system stringclasses 7 values | input stringlengths 10 94.5k | output stringlengths 14 55.5k |
|---|---|---|
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <cuda_runtime.h>
#define PREFIX_LENGTH 4
#define MAX_PASSWORD_LENGTH 6
#define ALPHABET_SIZE 26
/* F, G and H are basic MD5 functions: selection, majority, parity */
#define F(x, y, z) (((x) & (y)) | ((~x) & (z)))
#define G(x, y, z) (((x) & (z)) | ((y) & (~z)))
#define H(x, y, z) ((x) ^ (y) ^ (z))
#define I(x, y, z) ((y) ^ ((x) | (~z)))
/* ROTATE_LEFT rotates x left n bits */
#define ROTATE_LEFT(x, n) (((x) << (n)) | ((x) >> (32-(n))))
/* FF, GG, HH, and II transformations for rounds 1, 2, 3, and 4 */
/* Rotation is separate from addition to prevent recomputation */
#define FF(a, b, c, d, x, s, ac) \
{(a) += F ((b), (c), (d)) + (x) + (unsigned int)(ac); \
(a) = ROTATE_LEFT ((a), (s)); \
(a) += (b); \
}
#define GG(a, b, c, d, x, s, ac) \
{(a) += G ((b), (c), (d)) + (x) + (unsigned int)(ac); \
(a) = ROTATE_LEFT ((a), (s)); \
(a) += (b); \
}
#define HH(a, b, c, d, x, s, ac) \
{(a) += H ((b), (c), (d)) + (x) + (unsigned int)(ac); \
(a) = ROTATE_LEFT ((a), (s)); \
(a) += (b); \
}
#define II(a, b, c, d, x, s, ac) \
{(a) += I ((b), (c), (d)) + (x) + (unsigned int)(ac); \
(a) = ROTATE_LEFT ((a), (s)); \
(a) += (b); \
}
__host__ __device__ void md5_vfy(unsigned char* data, unsigned int length, unsigned int *a1, unsigned int *b1, unsigned int *c1, unsigned int *d1)
{
const unsigned int a0 = 0x67452301;
const unsigned int b0 = 0xEFCDAB89;
const unsigned int c0 = 0x98BADCFE;
const unsigned int d0 = 0x10325476;
unsigned int a = 0;
unsigned int b = 0;
unsigned int c = 0;
unsigned int d = 0;
unsigned int vals[14] = { 0,0,0,0,0,0,0,0,0,0,0,0,0,0 };
int i = 0;
for (i = 0; i < length; i++)
{
vals[i / 4] |= data[i] << ((i % 4) * 8);
}
vals[i / 4] |= 0x80 << ((i % 4) * 8);
unsigned int bitlen = length * 8;
#define in0 (vals[0])//x
#define in1 (vals[1])//y
#define in2 (vals[2])//z
#define in3 (vals[3])
#define in4 (vals[4])
#define in5 (vals[5])
#define in6 (vals[6])
#define in7 (vals[7])
#define in8 (vals[8])
#define in9 (vals[9])
#define in10 (vals[10])
#define in11 (vals[11])
#define in12 (vals[12])
#define in13 (vals[13])
#define in14 (bitlen) //w = bit length
#define in15 (0)
//Initialize hash value for this chunk:
a = a0;
b = b0;
c = c0;
d = d0;
/* Round 1 */
#define S11 7
#define S12 12
#define S13 17
#define S14 22
FF(a, b, c, d, in0, S11, 3614090360); /* 1 */
FF(d, a, b, c, in1, S12, 3905402710); /* 2 */
FF(c, d, a, b, in2, S13, 606105819); /* 3 */
FF(b, c, d, a, in3, S14, 3250441966); /* 4 */
FF(a, b, c, d, in4, S11, 4118548399); /* 5 */
FF(d, a, b, c, in5, S12, 1200080426); /* 6 */
FF(c, d, a, b, in6, S13, 2821735955); /* 7 */
FF(b, c, d, a, in7, S14, 4249261313); /* 8 */
FF(a, b, c, d, in8, S11, 1770035416); /* 9 */
FF(d, a, b, c, in9, S12, 2336552879); /* 10 */
FF(c, d, a, b, in10, S13, 4294925233); /* 11 */
FF(b, c, d, a, in11, S14, 2304563134); /* 12 */
FF(a, b, c, d, in12, S11, 1804603682); /* 13 */
FF(d, a, b, c, in13, S12, 4254626195); /* 14 */
FF(c, d, a, b, in14, S13, 2792965006); /* 15 */
FF(b, c, d, a, in15, S14, 1236535329); /* 16 */
/* Round 2 */
#define S21 5
#define S22 9
#define S23 14
#define S24 20
GG(a, b, c, d, in1, S21, 4129170786); /* 17 */
GG(d, a, b, c, in6, S22, 3225465664); /* 18 */
GG(c, d, a, b, in11, S23, 643717713); /* 19 */
GG(b, c, d, a, in0, S24, 3921069994); /* 20 */
GG(a, b, c, d, in5, S21, 3593408605); /* 21 */
GG(d, a, b, c, in10, S22, 38016083); /* 22 */
GG(c, d, a, b, in15, S23, 3634488961); /* 23 */
GG(b, c, d, a, in4, S24, 3889429448); /* 24 */
GG(a, b, c, d, in9, S21, 568446438); /* 25 */
GG(d, a, b, c, in14, S22, 3275163606); /* 26 */
GG(c, d, a, b, in3, S23, 4107603335); /* 27 */
GG(b, c, d, a, in8, S24, 1163531501); /* 28 */
GG(a, b, c, d, in13, S21, 2850285829); /* 29 */
GG(d, a, b, c, in2, S22, 4243563512); /* 30 */
GG(c, d, a, b, in7, S23, 1735328473); /* 31 */
GG(b, c, d, a, in12, S24, 2368359562); /* 32 */
/* Round 3 */
#define S31 4
#define S32 11
#define S33 16
#define S34 23
HH(a, b, c, d, in5, S31, 4294588738); /* 33 */
HH(d, a, b, c, in8, S32, 2272392833); /* 34 */
HH(c, d, a, b, in11, S33, 1839030562); /* 35 */
HH(b, c, d, a, in14, S34, 4259657740); /* 36 */
HH(a, b, c, d, in1, S31, 2763975236); /* 37 */
HH(d, a, b, c, in4, S32, 1272893353); /* 38 */
HH(c, d, a, b, in7, S33, 4139469664); /* 39 */
HH(b, c, d, a, in10, S34, 3200236656); /* 40 */
HH(a, b, c, d, in13, S31, 681279174); /* 41 */
HH(d, a, b, c, in0, S32, 3936430074); /* 42 */
HH(c, d, a, b, in3, S33, 3572445317); /* 43 */
HH(b, c, d, a, in6, S34, 76029189); /* 44 */
HH(a, b, c, d, in9, S31, 3654602809); /* 45 */
HH(d, a, b, c, in12, S32, 3873151461); /* 46 */
HH(c, d, a, b, in15, S33, 530742520); /* 47 */
HH(b, c, d, a, in2, S34, 3299628645); /* 48 */
/* Round 4 */
#define S41 6
#define S42 10
#define S43 15
#define S44 21
II(a, b, c, d, in0, S41, 4096336452); /* 49 */
II(d, a, b, c, in7, S42, 1126891415); /* 50 */
II(c, d, a, b, in14, S43, 2878612391); /* 51 */
II(b, c, d, a, in5, S44, 4237533241); /* 52 */
II(a, b, c, d, in12, S41, 1700485571); /* 53 */
II(d, a, b, c, in3, S42, 2399980690); /* 54 */
II(c, d, a, b, in10, S43, 4293915773); /* 55 */
II(b, c, d, a, in1, S44, 2240044497); /* 56 */
II(a, b, c, d, in8, S41, 1873313359); /* 57 */
II(d, a, b, c, in15, S42, 4264355552); /* 58 */
II(c, d, a, b, in6, S43, 2734768916); /* 59 */
II(b, c, d, a, in13, S44, 1309151649); /* 60 */
II(a, b, c, d, in4, S41, 4149444226); /* 61 */
II(d, a, b, c, in11, S42, 3174756917); /* 62 */
II(c, d, a, b, in2, S43, 718787259); /* 63 */
II(b, c, d, a, in9, S44, 3951481745); /* 64 */
a += a0;
b += b0;
c += c0;
d += d0;
*a1 = a;
*b1 = b;
*c1 = c;
*d1 = d;
}
unsigned int unhex(unsigned char x)
{
if (x <= 'F' && x >= 'A')
{
return (unsigned int)(x - 'A' + 10);
}
else if (x <= 'f' && x >= 'a')
{
return (unsigned int)(x - 'a' + 10);
}
else if (x <= '9' && x >= '0')
{
return (unsigned int)(x - '0');
}
return 0;
}
void md5_to_ints(unsigned char* md5, unsigned int *r0, unsigned int *r1, unsigned int *r2, unsigned int *r3)
{
unsigned int v0 = 0, v1 = 0, v2 = 0, v3 = 0;
int i = 0;
for (i = 0; i < 32; i += 2)
{
unsigned int first = unhex(md5[i]);
unsigned int second = unhex(md5[i + 1]);
unsigned int both = first * 16 + second;
both = both << 24;
if (i < 8)
{
v0 = (v0 >> 8) | both;
}
else if (i < 16)
{
v1 = (v1 >> 8) | both;
}
else if (i < 24)
{
v2 = (v2 >> 8) | both;
}
else if (i < 32)
{
v3 = (v3 >> 8) | both;
}
}
*r0 = v0;
*r1 = v1;
*r2 = v2;
*r3 = v3;
}
/*
* This method is given a test password and the unsigned int of the orginal password and will check
* to see if it is the original passowrd
*
*/
__host__ __device__ int myencrypt(unsigned char * test, unsigned int length, unsigned int a, unsigned int b, unsigned int c, unsigned int d)
{
unsigned int v1 = 0, v2 = 0, v3 = 0, v4 = 0;
md5_vfy(test, length, &v1, &v2, &v3, &v4);
if (v1 == a && v2 == b && v3 == c && v4 == d)
{
return 1;
}
else
return 0;
}//end of crypt method
__device__ bool Compare(unsigned char* guess, unsigned int* hash) {
return myencrypt(guess, MAX_PASSWORD_LENGTH, hash[0], hash[1], hash[2], hash[3]);
}
__global__ void square_array(char* a, unsigned int* hash) {
int idx = threadIdx.x + blockDim.x * blockIdx.x;
unsigned char guess[MAX_PASSWORD_LENGTH];
if (idx < pow((double)ALPHABET_SIZE, (double)PREFIX_LENGTH)) {
for (unsigned int i = 0; i < PREFIX_LENGTH; i++) {
if (i == PREFIX_LENGTH - 1) {
guess[i] = 'a' + idx % ALPHABET_SIZE;
}
else {
unsigned int offset = PREFIX_LENGTH - i - 1;
unsigned int t = idx % (int)(pow((double)ALPHABET_SIZE, (double)(offset + 1)));
guess[i] = 'a' + t / (unsigned long long int)(pow((double)ALPHABET_SIZE, (double)offset));
}
}
}
int remainingLetters = MAX_PASSWORD_LENGTH - PREFIX_LENGTH;
unsigned long i = 1;
for (int j = 0; j < remainingLetters; j++) {
i *= 26;
}
for (int pos = PREFIX_LENGTH; pos < MAX_PASSWORD_LENGTH; pos++) {
guess[pos] = 'a';
}
while (true) {
int tail;
for (tail = MAX_PASSWORD_LENGTH - 1; tail >= PREFIX_LENGTH && guess[tail] == 'z'; tail--);
if (tail < PREFIX_LENGTH) {
break;
}
else {
for (int j = MAX_PASSWORD_LENGTH - 1; j > tail; j--) {
guess[j] = 'a';
}
guess[tail] += 1;
}
if (Compare(guess, hash)) {
for (unsigned int jj = 0; jj < MAX_PASSWORD_LENGTH; jj++)
a[jj] = guess[jj];
break;
}
}
}
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort = true)
{
if (code != cudaSuccess)
{
fprintf(stderr, "GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
if (abort) exit(code);
}
}
int main() {
unsigned char password[MAX_PASSWORD_LENGTH] = { 'p', 'a', 's', 's', 'w', 'o' };
char* a_h, *a_d;
size_t size = MAX_PASSWORD_LENGTH * sizeof(char);
unsigned int* hash_h, *hash_d;
hash_h = (unsigned int*)malloc(4 * sizeof(unsigned int));
// calculate hash and verify it works
md5_vfy(password, MAX_PASSWORD_LENGTH, &hash_h[0], &hash_h[1], &hash_h[2], &hash_h[3]);
printf("hash: 0x%08X 0x%08X 0x%08X 0x%08X\n", hash_h[0], hash_h[1], hash_h[2], hash_h[3]);
int res = myencrypt((unsigned char*)"password", MAX_PASSWORD_LENGTH, hash_h[0], hash_h[1], hash_h[2], hash_h[3]);
printf("%d\n", res);
// set array to a default value
a_h = (char*)malloc(size + 1);
for (unsigned int i = 0; i < size; i++) {
a_h[i] = 'a' - 1;
}
a_h[size] = '\0';
cudaMalloc((void**)&a_d, size);
cudaMalloc((void**)&hash_d, 4 * sizeof(unsigned int));
cudaMemcpy(a_d, a_h, size, cudaMemcpyHostToDevice);
cudaMemcpy(hash_d, hash_h, 4 * sizeof(unsigned int), cudaMemcpyHostToDevice);
int threadsPerBlock = 1024;
int numBlocks = ceil(pow(ALPHABET_SIZE, PREFIX_LENGTH) / threadsPerBlock);
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start);
square_array<<<numBlocks, threadsPerBlock>>>(a_d, hash_d);
cudaEventRecord(stop);
cudaMemcpy(a_h, a_d, size, cudaMemcpyDeviceToHost);
printf("password: %s\n", a_h);
cudaEventSynchronize(stop);
float milliseconds = 0;
cudaEventElapsedTime(&milliseconds, start, stop);
printf("Done in: %f milliseconds\n", milliseconds);
free(a_h);
cudaFree(a_d);
} | .file "tmpxft_0003c565_00000000-6_main.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2067:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2067:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z7md5_vfyPhjPjS0_S0_S0_
.type _Z7md5_vfyPhjPjS0_S0_S0_, @function
_Z7md5_vfyPhjPjS0_S0_S0_:
.LFB2057:
.cfi_startproc
endbr64
pushq %r15
.cfi_def_cfa_offset 16
.cfi_offset 15, -16
pushq %r14
.cfi_def_cfa_offset 24
.cfi_offset 14, -24
pushq %r13
.cfi_def_cfa_offset 32
.cfi_offset 13, -32
pushq %r12
.cfi_def_cfa_offset 40
.cfi_offset 12, -40
pushq %rbp
.cfi_def_cfa_offset 48
.cfi_offset 6, -48
pushq %rbx
.cfi_def_cfa_offset 56
.cfi_offset 3, -56
subq $104, %rsp
.cfi_def_cfa_offset 160
movq %rdx, (%rsp)
movq %rcx, 8(%rsp)
movq %r8, 16(%rsp)
movq %r9, 24(%rsp)
movq %fs:40, %rax
movq %rax, 88(%rsp)
xorl %eax, %eax
pxor %xmm0, %xmm0
movaps %xmm0, 32(%rsp)
movaps %xmm0, 48(%rsp)
movaps %xmm0, 64(%rsp)
movq $0, 80(%rsp)
testl %esi, %esi
je .L7
movl %esi, %r8d
.L5:
leal 3(%rax), %edx
testl %eax, %eax
cmovns %eax, %edx
sarl $2, %edx
movslq %edx, %rdx
movzbl (%rdi,%rax), %r10d
movl %eax, %r9d
sarl $31, %r9d
shrl $30, %r9d
leal (%r9,%rax), %ecx
andl $3, %ecx
subl %r9d, %ecx
sall $3, %ecx
sall %cl, %r10d
orl %r10d, 32(%rsp,%rdx,4)
addq $1, %rax
cmpq %r8, %rax
jne .L5
movl %esi, %eax
.L4:
leal 3(%rax), %edx
testl %eax, %eax
cmovns %eax, %edx
sarl $2, %edx
movslq %edx, %rdx
movl %eax, %edi
sarl $31, %edi
shrl $30, %edi
leal (%rax,%rdi), %ecx
andl $3, %ecx
subl %edi, %ecx
sall $3, %ecx
movl $128, %eax
sall %cl, %eax
orl %eax, 32(%rsp,%rdx,4)
leal 0(,%rsi,8), %r8d
movl 32(%rsp), %r14d
leal -680876937(%r14), %eax
roll $7, %eax
subl $271733879, %eax
movl %eax, %edx
andl $2004318071, %edx
xorl $-1732584194, %edx
movl 36(%rsp), %ebx
leal -117830708(%rbx,%rdx), %edx
roll $12, %edx
addl %eax, %edx
movl %eax, %ecx
xorl $-271733879, %ecx
andl %edx, %ecx
xorl $-271733879, %ecx
movl 40(%rsp), %ebx
leal -1126478375(%rbx,%rcx), %ecx
rorl $15, %ecx
addl %edx, %ecx
movl 44(%rsp), %ebx
movl %eax, %esi
xorl %edx, %esi
andl %ecx, %esi
xorl %eax, %esi
leal -1316259209(%rbx,%rsi), %esi
rorl $10, %esi
addl %ecx, %esi
movl 48(%rsp), %edi
leal -176418897(%rdi,%rax), %edi
movl %edx, %eax
xorl %ecx, %eax
andl %esi, %eax
xorl %edx, %eax
addl %edi, %eax
roll $7, %eax
addl %esi, %eax
movl 52(%rsp), %r12d
leal 1200080426(%r12,%rdx), %edi
movl %ecx, %edx
xorl %esi, %edx
andl %eax, %edx
xorl %ecx, %edx
addl %edi, %edx
roll $12, %edx
addl %eax, %edx
movl 56(%rsp), %edi
leal -1473231341(%rdi,%rcx), %edi
movl %esi, %ecx
xorl %eax, %ecx
andl %edx, %ecx
xorl %esi, %ecx
addl %edi, %ecx
rorl $15, %ecx
addl %edx, %ecx
movl 60(%rsp), %r13d
leal -45705983(%r13,%rsi), %edi
movl %eax, %esi
xorl %edx, %esi
andl %ecx, %esi
xorl %eax, %esi
addl %edi, %esi
rorl $10, %esi
addl %ecx, %esi
movl 64(%rsp), %r10d
leal 1770035416(%r10,%rax), %edi
movl %edx, %eax
xorl %ecx, %eax
andl %esi, %eax
xorl %edx, %eax
addl %edi, %eax
roll $7, %eax
addl %esi, %eax
movl 68(%rsp), %edi
leal -1958414417(%rdi,%rdx), %edi
movl %ecx, %edx
xorl %esi, %edx
andl %eax, %edx
xorl %ecx, %edx
addl %edi, %edx
roll $12, %edx
addl %eax, %edx
movl 72(%rsp), %r11d
leal -42063(%r11,%rcx), %edi
movl %esi, %ecx
xorl %eax, %ecx
andl %edx, %ecx
xorl %esi, %ecx
addl %edi, %ecx
rorl $15, %ecx
addl %edx, %ecx
movl 76(%rsp), %edi
leal -1990404162(%rdi,%rsi), %edi
movl %eax, %esi
xorl %edx, %esi
andl %ecx, %esi
xorl %eax, %esi
addl %edi, %esi
rorl $10, %esi
addl %ecx, %esi
movl 80(%rsp), %ebp
leal 1804603682(%rbp,%rax), %edi
movl %edx, %eax
xorl %ecx, %eax
andl %esi, %eax
xorl %edx, %eax
addl %edi, %eax
roll $7, %eax
addl %esi, %eax
movl 84(%rsp), %r9d
leal -40341101(%r9,%rdx), %edi
movl %ecx, %edx
xorl %esi, %edx
andl %eax, %edx
xorl %ecx, %edx
addl %edi, %edx
roll $12, %edx
addl %eax, %edx
leal -1502002290(%r8,%rcx), %edi
movl %esi, %ecx
xorl %eax, %ecx
andl %edx, %ecx
xorl %esi, %ecx
addl %edi, %ecx
rorl $15, %ecx
addl %edx, %ecx
movl %eax, %edi
xorl %edx, %edi
andl %ecx, %edi
xorl %eax, %edi
leal 1236535329(%rsi,%rdi), %esi
rorl $10, %esi
addl %ecx, %esi
movl 36(%rsp), %edi
leal -165796510(%rdi,%rax), %edi
movl %ecx, %eax
xorl %esi, %eax
andl %edx, %eax
xorl %ecx, %eax
addl %edi, %eax
roll $5, %eax
addl %esi, %eax
movl 56(%rsp), %edi
leal -1069501632(%rdi,%rdx), %edi
movl %esi, %edx
xorl %eax, %edx
andl %ecx, %edx
xorl %esi, %edx
addl %edi, %edx
roll $9, %edx
addl %eax, %edx
movl 76(%rsp), %edi
leal 643717713(%rdi,%rcx), %edi
movl %eax, %ecx
xorl %edx, %ecx
andl %esi, %ecx
xorl %eax, %ecx
addl %edi, %ecx
roll $14, %ecx
addl %edx, %ecx
leal -373897302(%r14,%rsi), %edi
movl %edx, %esi
xorl %ecx, %esi
andl %eax, %esi
xorl %edx, %esi
addl %edi, %esi
rorl $12, %esi
addl %ecx, %esi
leal -701558691(%r12,%rax), %edi
movl %ecx, %eax
xorl %esi, %eax
andl %edx, %eax
xorl %ecx, %eax
addl %edi, %eax
roll $5, %eax
addl %esi, %eax
leal 38016083(%r11,%rdx), %edi
movl %esi, %edx
xorl %eax, %edx
andl %ecx, %edx
xorl %esi, %edx
addl %edi, %edx
roll $9, %edx
addl %eax, %edx
movl %eax, %edi
xorl %edx, %edi
andl %esi, %edi
xorl %eax, %edi
leal -660478335(%rcx,%rdi), %ecx
roll $14, %ecx
addl %edx, %ecx
movl 48(%rsp), %edi
leal -405537848(%rdi,%rsi), %edi
movl %edx, %esi
xorl %ecx, %esi
andl %eax, %esi
xorl %edx, %esi
addl %edi, %esi
rorl $12, %esi
addl %ecx, %esi
movl 68(%rsp), %edi
leal 568446438(%rdi,%rax), %edi
movl %ecx, %eax
xorl %esi, %eax
andl %edx, %eax
xorl %ecx, %eax
addl %edi, %eax
roll $5, %eax
addl %esi, %eax
leal -1019803690(%r8,%rdx), %edi
movl %esi, %edx
xorl %eax, %edx
andl %ecx, %edx
xorl %esi, %edx
addl %edi, %edx
roll $9, %edx
addl %eax, %edx
leal -187363961(%rbx,%rcx), %edi
movl %eax, %ecx
xorl %edx, %ecx
andl %esi, %ecx
xorl %eax, %ecx
addl %edi, %ecx
roll $14, %ecx
addl %edx, %ecx
leal 1163531501(%r10,%rsi), %esi
movl %edx, %edi
xorl %ecx, %edi
andl %eax, %edi
xorl %edx, %edi
addl %esi, %edi
rorl $12, %edi
addl %ecx, %edi
leal -1444681467(%r9,%rax), %esi
movl %ecx, %eax
xorl %edi, %eax
andl %edx, %eax
xorl %ecx, %eax
addl %esi, %eax
roll $5, %eax
addl %edi, %eax
movl 40(%rsp), %esi
leal -51403784(%rsi,%rdx), %esi
movl %edi, %edx
xorl %eax, %edx
andl %ecx, %edx
xorl %edi, %edx
addl %esi, %edx
roll $9, %edx
addl %eax, %edx
leal 1735328473(%r13,%rcx), %ecx
movl %eax, %esi
xorl %edx, %esi
andl %edi, %esi
xorl %eax, %esi
addl %ecx, %esi
roll $14, %esi
addl %edx, %esi
movl %edx, %ecx
xorl %esi, %ecx
leal -1926607734(%rbp,%rdi), %r15d
movl %eax, %edi
andl %ecx, %edi
xorl %edx, %edi
addl %r15d, %edi
rorl $12, %edi
addl %esi, %edi
leal -378558(%r12,%rax), %r15d
xorl %edi, %ecx
addl %r15d, %ecx
roll $4, %ecx
leal (%rdi,%rcx), %eax
leal -2022574463(%r10,%rdx), %ecx
movl %esi, %edx
xorl %edi, %edx
xorl %eax, %edx
addl %ecx, %edx
roll $11, %edx
addl %eax, %edx
movl 76(%rsp), %ecx
leal 1839030562(%rcx,%rsi), %ecx
movl %edi, %esi
xorl %eax, %esi
xorl %edx, %esi
addl %ecx, %esi
roll $16, %esi
addl %edx, %esi
leal -35309556(%r8,%rdi), %edi
movl %eax, %ecx
xorl %edx, %ecx
xorl %esi, %ecx
addl %edi, %ecx
rorl $9, %ecx
addl %esi, %ecx
movl 36(%rsp), %edi
leal -1530992060(%rdi,%rax), %edi
movl %edx, %eax
xorl %esi, %eax
xorl %ecx, %eax
addl %edi, %eax
roll $4, %eax
addl %ecx, %eax
movl 48(%rsp), %edi
leal 1272893353(%rdi,%rdx), %edx
movl %esi, %edi
xorl %ecx, %edi
xorl %eax, %edi
addl %edx, %edi
roll $11, %edi
addl %eax, %edi
leal -155497632(%r13,%rsi), %esi
movl %ecx, %edx
xorl %eax, %edx
xorl %edi, %edx
addl %esi, %edx
roll $16, %edx
addl %edi, %edx
leal -1094730640(%r11,%rcx), %esi
movl %eax, %ecx
xorl %edi, %ecx
xorl %edx, %ecx
addl %esi, %ecx
rorl $9, %ecx
addl %edx, %ecx
leal 681279174(%r9,%rax), %eax
movl %edi, %esi
xorl %edx, %esi
xorl %ecx, %esi
addl %eax, %esi
roll $4, %esi
addl %ecx, %esi
leal -358537222(%r14,%rdi), %edi
movl %edx, %eax
xorl %ecx, %eax
xorl %esi, %eax
addl %edi, %eax
roll $11, %eax
addl %esi, %eax
leal -722521979(%rbx,%rdx), %edx
movl %ecx, %edi
xorl %esi, %edi
xorl %eax, %edi
addl %edx, %edi
roll $16, %edi
addl %eax, %edi
movl 56(%rsp), %edx
leal 76029189(%rdx,%rcx), %edx
movl %esi, %ecx
xorl %eax, %ecx
xorl %edi, %ecx
addl %edx, %ecx
rorl $9, %ecx
addl %edi, %ecx
movl 68(%rsp), %edx
leal -640364487(%rdx,%rsi), %esi
movl %eax, %edx
xorl %edi, %edx
xorl %ecx, %edx
addl %esi, %edx
roll $4, %edx
addl %ecx, %edx
leal -421815835(%rbp,%rax), %eax
movl %edi, %esi
xorl %ecx, %esi
xorl %edx, %esi
addl %eax, %esi
roll $11, %esi
addl %edx, %esi
movl %ecx, %eax
xorl %edx, %eax
xorl %esi, %eax
leal 530742520(%rdi,%rax), %edi
roll $16, %edi
addl %esi, %edi
movl 40(%rsp), %eax
leal -995338651(%rax,%rcx), %ecx
movl %edx, %eax
xorl %esi, %eax
xorl %edi, %eax
addl %ecx, %eax
rorl $9, %eax
addl %edi, %eax
leal -198630844(%r14,%rdx), %ecx
movl %esi, %edx
notl %edx
orl %eax, %edx
xorl %edi, %edx
addl %ecx, %edx
roll $6, %edx
addl %eax, %edx
leal 1126891415(%r13,%rsi), %esi
movl %edi, %ecx
notl %ecx
orl %edx, %ecx
xorl %eax, %ecx
addl %esi, %ecx
roll $10, %ecx
addl %edx, %ecx
leal -1416354905(%r8,%rdi), %edi
movl %eax, %esi
notl %esi
orl %ecx, %esi
xorl %edx, %esi
addl %edi, %esi
roll $15, %esi
addl %ecx, %esi
leal -57434055(%r12,%rax), %edi
movl %edx, %eax
notl %eax
orl %esi, %eax
xorl %ecx, %eax
addl %edi, %eax
rorl $11, %eax
addl %esi, %eax
leal 1700485571(%rbp,%rdx), %edi
movl %ecx, %edx
notl %edx
orl %eax, %edx
xorl %esi, %edx
addl %edi, %edx
roll $6, %edx
addl %eax, %edx
leal -1894986606(%rbx,%rcx), %edi
movl %esi, %ecx
notl %ecx
orl %edx, %ecx
xorl %eax, %ecx
addl %edi, %ecx
roll $10, %ecx
addl %edx, %ecx
leal -1051523(%r11,%rsi), %edi
movl %eax, %esi
notl %esi
orl %ecx, %esi
xorl %edx, %esi
addl %edi, %esi
roll $15, %esi
addl %ecx, %esi
movl 36(%rsp), %ebx
leal -2054922799(%rbx,%rax), %edi
movl %edx, %eax
notl %eax
orl %esi, %eax
xorl %ecx, %eax
addl %edi, %eax
rorl $11, %eax
addl %esi, %eax
leal 1873313359(%r10,%rdx), %edi
movl %ecx, %edx
notl %edx
orl %eax, %edx
xorl %esi, %edx
addl %edi, %edx
roll $6, %edx
addl %eax, %edx
movl %esi, %edi
notl %edi
orl %edx, %edi
xorl %eax, %edi
leal -30611744(%rcx,%rdi), %ecx
roll $10, %ecx
addl %edx, %ecx
movl 56(%rsp), %ebx
leal -1560198380(%rbx,%rsi), %edi
movl %eax, %esi
notl %esi
orl %ecx, %esi
xorl %edx, %esi
addl %edi, %esi
roll $15, %esi
addl %ecx, %esi
leal 1309151649(%r9,%rax), %eax
movl %edx, %edi
notl %edi
orl %esi, %edi
xorl %ecx, %edi
addl %eax, %edi
rorl $11, %edi
addl %esi, %edi
movl 48(%rsp), %eax
leal -145523070(%rax,%rdx), %edx
movl %ecx, %eax
notl %eax
orl %edi, %eax
xorl %esi, %eax
addl %edx, %eax
roll $6, %eax
addl %edi, %eax
movl 76(%rsp), %ebx
leal -1120210379(%rbx,%rcx), %ecx
movl %esi, %edx
notl %edx
orl %eax, %edx
xorl %edi, %edx
addl %ecx, %edx
roll $10, %edx
addl %eax, %edx
movl 40(%rsp), %ebx
leal 718787259(%rbx,%rsi), %esi
movl %edi, %ecx
notl %ecx
orl %edx, %ecx
xorl %eax, %ecx
addl %esi, %ecx
roll $15, %ecx
addl %edx, %ecx
movl 68(%rsp), %ebx
leal -343485551(%rbx,%rdi), %edi
movl %eax, %esi
notl %esi
orl %ecx, %esi
xorl %edx, %esi
addl %edi, %esi
rorl $11, %esi
addl $1732584193, %eax
movq (%rsp), %rbx
movl %eax, (%rbx)
leal -271733879(%rcx,%rsi), %eax
movq 8(%rsp), %rbx
movl %eax, (%rbx)
subl $1732584194, %ecx
movq 16(%rsp), %rax
movl %ecx, (%rax)
addl $271733878, %edx
movq 24(%rsp), %rax
movl %edx, (%rax)
movq 88(%rsp), %rax
subq %fs:40, %rax
jne .L10
addq $104, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %rbp
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r13
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
movl $0, %eax
jmp .L4
.L10:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2057:
.size _Z7md5_vfyPhjPjS0_S0_S0_, .-_Z7md5_vfyPhjPjS0_S0_S0_
.globl _Z5unhexh
.type _Z5unhexh, @function
_Z5unhexh:
.LFB2058:
.cfi_startproc
endbr64
leal -65(%rdi), %eax
cmpb $5, %al
jbe .L16
leal -97(%rdi), %eax
cmpb $5, %al
jbe .L17
leal -48(%rdi), %edx
movzbl %dil, %eax
subl $48, %eax
cmpb $10, %dl
movl $0, %edx
cmovnb %edx, %eax
ret
.L16:
movzbl %dil, %edi
leal -55(%rdi), %eax
ret
.L17:
movzbl %dil, %edi
leal -87(%rdi), %eax
ret
.cfi_endproc
.LFE2058:
.size _Z5unhexh, .-_Z5unhexh
.globl _Z11md5_to_intsPhPjS0_S0_S0_
.type _Z11md5_to_intsPhPjS0_S0_S0_, @function
_Z11md5_to_intsPhPjS0_S0_S0_:
.LFB2059:
.cfi_startproc
endbr64
pushq %r15
.cfi_def_cfa_offset 16
.cfi_offset 15, -16
pushq %r14
.cfi_def_cfa_offset 24
.cfi_offset 14, -24
pushq %r13
.cfi_def_cfa_offset 32
.cfi_offset 13, -32
pushq %r12
.cfi_def_cfa_offset 40
.cfi_offset 12, -40
pushq %rbp
.cfi_def_cfa_offset 48
.cfi_offset 6, -48
pushq %rbx
.cfi_def_cfa_offset 56
.cfi_offset 3, -56
subq $40, %rsp
.cfi_def_cfa_offset 96
movq %rdi, %r12
movq %rsi, 8(%rsp)
movq %rdx, 16(%rsp)
movq %rcx, 24(%rsp)
movq %r8, 32(%rsp)
movl $0, %ebp
movl $0, 4(%rsp)
movl $0, %r14d
movl $0, %r13d
movl $0, %r15d
jmp .L24
.L28:
shrl $8, %r15d
orl %ebx, %r15d
jmp .L20
.L21:
cmpl $23, %ebp
jg .L22
shrl $8, %r14d
orl %ebx, %r14d
.L20:
addq $2, %rbp
.L24:
movzbl (%r12,%rbp), %edi
call _Z5unhexh
movl %eax, %ebx
movzbl 1(%r12,%rbp), %edi
call _Z5unhexh
sall $4, %ebx
addl %eax, %ebx
sall $24, %ebx
cmpl $7, %ebp
jle .L28
cmpl $15, %ebp
jg .L21
shrl $8, %r13d
orl %ebx, %r13d
jmp .L20
.L22:
movl 4(%rsp), %eax
shrl $8, %eax
orl %ebx, %eax
movl %eax, 4(%rsp)
addq $2, %rbp
cmpq $32, %rbp
jne .L24
movq 8(%rsp), %rdx
movl %r15d, (%rdx)
movq 16(%rsp), %rcx
movl %r13d, (%rcx)
movq 24(%rsp), %rsi
movl %r14d, (%rsi)
movq 32(%rsp), %rdx
movl %eax, (%rdx)
addq $40, %rsp
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %rbp
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r13
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2059:
.size _Z11md5_to_intsPhPjS0_S0_S0_, .-_Z11md5_to_intsPhPjS0_S0_S0_
.globl _Z9myencryptPhjjjjj
.type _Z9myencryptPhjjjjj, @function
_Z9myencryptPhjjjjj:
.LFB2060:
.cfi_startproc
endbr64
pushq %r13
.cfi_def_cfa_offset 16
.cfi_offset 13, -16
pushq %r12
.cfi_def_cfa_offset 24
.cfi_offset 12, -24
pushq %rbp
.cfi_def_cfa_offset 32
.cfi_offset 6, -32
pushq %rbx
.cfi_def_cfa_offset 40
.cfi_offset 3, -40
subq $40, %rsp
.cfi_def_cfa_offset 80
movl %edx, %ebx
movl %ecx, %ebp
movl %r8d, %r12d
movl %r9d, %r13d
movq %fs:40, %rax
movq %rax, 24(%rsp)
xorl %eax, %eax
leaq 12(%rsp), %rcx
leaq 8(%rsp), %rdx
leaq 20(%rsp), %r9
leaq 16(%rsp), %r8
call _Z7md5_vfyPhjPjS0_S0_S0_
movl $0, %eax
cmpl %ebx, 8(%rsp)
jne .L29
cmpl %ebp, 12(%rsp)
jne .L29
cmpl %r12d, 16(%rsp)
jne .L29
cmpl %r13d, 20(%rsp)
sete %al
movzbl %al, %eax
.L29:
movq 24(%rsp), %rdx
subq %fs:40, %rdx
jne .L36
addq $40, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 40
popq %rbx
.cfi_def_cfa_offset 32
popq %rbp
.cfi_def_cfa_offset 24
popq %r12
.cfi_def_cfa_offset 16
popq %r13
.cfi_def_cfa_offset 8
ret
.L36:
.cfi_restore_state
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2060:
.size _Z9myencryptPhjjjjj, .-_Z9myencryptPhjjjjj
.globl _Z7ComparePhPj
.type _Z7ComparePhPj, @function
_Z7ComparePhPj:
.LFB2061:
.cfi_startproc
endbr64
pushq %rax
.cfi_def_cfa_offset 16
popq %rax
.cfi_def_cfa_offset 8
subq $24, %rsp
.cfi_def_cfa_offset 32
movl $1, 12(%rsp)
movl 12(%rsp), %edi
call exit@PLT
.cfi_endproc
.LFE2061:
.size _Z7ComparePhPj, .-_Z7ComparePhPj
.globl _Z34__device_stub__Z12square_arrayPcPjPcPj
.type _Z34__device_stub__Z12square_arrayPcPjPcPj, @function
_Z34__device_stub__Z12square_arrayPcPjPcPj:
.LFB2089:
.cfi_startproc
endbr64
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 8(%rsp)
movq %rsi, (%rsp)
movq %fs:40, %rax
movq %rax, 104(%rsp)
xorl %eax, %eax
leaq 8(%rsp), %rax
movq %rax, 80(%rsp)
movq %rsp, %rax
movq %rax, 88(%rsp)
movl $1, 32(%rsp)
movl $1, 36(%rsp)
movl $1, 40(%rsp)
movl $1, 44(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
leaq 24(%rsp), %rcx
leaq 16(%rsp), %rdx
leaq 44(%rsp), %rsi
leaq 32(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L43
.L39:
movq 104(%rsp), %rax
subq %fs:40, %rax
jne .L44
addq $120, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L43:
.cfi_restore_state
pushq 24(%rsp)
.cfi_def_cfa_offset 136
pushq 24(%rsp)
.cfi_def_cfa_offset 144
leaq 96(%rsp), %r9
movq 60(%rsp), %rcx
movl 68(%rsp), %r8d
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
leaq _Z12square_arrayPcPj(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 128
jmp .L39
.L44:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2089:
.size _Z34__device_stub__Z12square_arrayPcPjPcPj, .-_Z34__device_stub__Z12square_arrayPcPjPcPj
.globl _Z12square_arrayPcPj
.type _Z12square_arrayPcPj, @function
_Z12square_arrayPcPj:
.LFB2090:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z34__device_stub__Z12square_arrayPcPjPcPj
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2090:
.size _Z12square_arrayPcPj, .-_Z12square_arrayPcPj
.section .rodata.str1.8,"aMS",@progbits,1
.align 8
.LC0:
.string "hash: 0x%08X 0x%08X 0x%08X 0x%08X\n"
.section .rodata.str1.1,"aMS",@progbits,1
.LC1:
.string "password"
.LC2:
.string "%d\n"
.LC3:
.string "password: %s\n"
.LC5:
.string "Done in: %f milliseconds\n"
.text
.globl main
.type main, @function
main:
.LFB2063:
.cfi_startproc
endbr64
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset 6, -16
pushq %rbx
.cfi_def_cfa_offset 24
.cfi_offset 3, -24
subq $88, %rsp
.cfi_def_cfa_offset 112
movq %fs:40, %rax
movq %rax, 72(%rsp)
xorl %eax, %eax
movl $1936941424, 66(%rsp)
movw $28535, 70(%rsp)
movl $16, %edi
call malloc@PLT
movq %rax, %rbx
leaq 4(%rax), %rcx
leaq 66(%rsp), %rdi
leaq 12(%rax), %r9
leaq 8(%rax), %r8
movq %rax, %rdx
movl $6, %esi
call _Z7md5_vfyPhjPjS0_S0_S0_
movl 4(%rbx), %ecx
movl (%rbx), %edx
movl 12(%rbx), %r9d
movl 8(%rbx), %r8d
leaq .LC0(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl 4(%rbx), %ecx
movl (%rbx), %edx
movl 12(%rbx), %r9d
movl 8(%rbx), %r8d
movl $6, %esi
leaq .LC1(%rip), %rdi
call _Z9myencryptPhjjjjj
movl %eax, %edx
leaq .LC2(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $7, %edi
call malloc@PLT
movq %rax, %rbp
leaq 6(%rax), %rdx
.L48:
movb $96, (%rax)
addq $1, %rax
cmpq %rdx, %rax
jne .L48
movb $0, 6(%rbp)
leaq 8(%rsp), %rdi
movl $6, %esi
call cudaMalloc@PLT
leaq 16(%rsp), %rdi
movl $16, %esi
call cudaMalloc@PLT
movl $1, %ecx
movl $6, %edx
movq %rbp, %rsi
movq 8(%rsp), %rdi
call cudaMemcpy@PLT
movl $1, %ecx
movl $16, %edx
movq %rbx, %rsi
movq 16(%rsp), %rdi
call cudaMemcpy@PLT
leaq 24(%rsp), %rdi
call cudaEventCreate@PLT
leaq 32(%rsp), %rdi
call cudaEventCreate@PLT
movl $0, %esi
movq 24(%rsp), %rdi
call cudaEventRecord@PLT
movl $1024, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $447, 40(%rsp)
movl $1, 44(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 52(%rsp), %rdx
movl $1, %ecx
movq 40(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L53
.L49:
movl $0, %esi
movq 32(%rsp), %rdi
call cudaEventRecord@PLT
movl $2, %ecx
movl $6, %edx
movq 8(%rsp), %rsi
movq %rbp, %rdi
call cudaMemcpy@PLT
movq %rbp, %rdx
leaq .LC3(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movq 32(%rsp), %rdi
call cudaEventSynchronize@PLT
movl $0x00000000, 52(%rsp)
leaq 52(%rsp), %rdi
movq 32(%rsp), %rdx
movq 24(%rsp), %rsi
call cudaEventElapsedTime@PLT
pxor %xmm0, %xmm0
cvtss2sd 52(%rsp), %xmm0
leaq .LC5(%rip), %rsi
movl $2, %edi
movl $1, %eax
call __printf_chk@PLT
movq %rbp, %rdi
call free@PLT
movq 8(%rsp), %rdi
call cudaFree@PLT
movq 72(%rsp), %rax
subq %fs:40, %rax
jne .L54
movl $0, %eax
addq $88, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 24
popq %rbx
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
ret
.L53:
.cfi_restore_state
movq 16(%rsp), %rsi
movq 8(%rsp), %rdi
call _Z34__device_stub__Z12square_arrayPcPjPcPj
jmp .L49
.L54:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2063:
.size main, .-main
.section .rodata.str1.1
.LC6:
.string "_Z12square_arrayPcPj"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2092:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC6(%rip), %rdx
movq %rdx, %rcx
leaq _Z12square_arrayPcPj(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2092:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <cuda_runtime.h>
#define PREFIX_LENGTH 4
#define MAX_PASSWORD_LENGTH 6
#define ALPHABET_SIZE 26
/* F, G and H are basic MD5 functions: selection, majority, parity */
#define F(x, y, z) (((x) & (y)) | ((~x) & (z)))
#define G(x, y, z) (((x) & (z)) | ((y) & (~z)))
#define H(x, y, z) ((x) ^ (y) ^ (z))
#define I(x, y, z) ((y) ^ ((x) | (~z)))
/* ROTATE_LEFT rotates x left n bits */
#define ROTATE_LEFT(x, n) (((x) << (n)) | ((x) >> (32-(n))))
/* FF, GG, HH, and II transformations for rounds 1, 2, 3, and 4 */
/* Rotation is separate from addition to prevent recomputation */
#define FF(a, b, c, d, x, s, ac) \
{(a) += F ((b), (c), (d)) + (x) + (unsigned int)(ac); \
(a) = ROTATE_LEFT ((a), (s)); \
(a) += (b); \
}
#define GG(a, b, c, d, x, s, ac) \
{(a) += G ((b), (c), (d)) + (x) + (unsigned int)(ac); \
(a) = ROTATE_LEFT ((a), (s)); \
(a) += (b); \
}
#define HH(a, b, c, d, x, s, ac) \
{(a) += H ((b), (c), (d)) + (x) + (unsigned int)(ac); \
(a) = ROTATE_LEFT ((a), (s)); \
(a) += (b); \
}
#define II(a, b, c, d, x, s, ac) \
{(a) += I ((b), (c), (d)) + (x) + (unsigned int)(ac); \
(a) = ROTATE_LEFT ((a), (s)); \
(a) += (b); \
}
__host__ __device__ void md5_vfy(unsigned char* data, unsigned int length, unsigned int *a1, unsigned int *b1, unsigned int *c1, unsigned int *d1)
{
const unsigned int a0 = 0x67452301;
const unsigned int b0 = 0xEFCDAB89;
const unsigned int c0 = 0x98BADCFE;
const unsigned int d0 = 0x10325476;
unsigned int a = 0;
unsigned int b = 0;
unsigned int c = 0;
unsigned int d = 0;
unsigned int vals[14] = { 0,0,0,0,0,0,0,0,0,0,0,0,0,0 };
int i = 0;
for (i = 0; i < length; i++)
{
vals[i / 4] |= data[i] << ((i % 4) * 8);
}
vals[i / 4] |= 0x80 << ((i % 4) * 8);
unsigned int bitlen = length * 8;
#define in0 (vals[0])//x
#define in1 (vals[1])//y
#define in2 (vals[2])//z
#define in3 (vals[3])
#define in4 (vals[4])
#define in5 (vals[5])
#define in6 (vals[6])
#define in7 (vals[7])
#define in8 (vals[8])
#define in9 (vals[9])
#define in10 (vals[10])
#define in11 (vals[11])
#define in12 (vals[12])
#define in13 (vals[13])
#define in14 (bitlen) //w = bit length
#define in15 (0)
//Initialize hash value for this chunk:
a = a0;
b = b0;
c = c0;
d = d0;
/* Round 1 */
#define S11 7
#define S12 12
#define S13 17
#define S14 22
FF(a, b, c, d, in0, S11, 3614090360); /* 1 */
FF(d, a, b, c, in1, S12, 3905402710); /* 2 */
FF(c, d, a, b, in2, S13, 606105819); /* 3 */
FF(b, c, d, a, in3, S14, 3250441966); /* 4 */
FF(a, b, c, d, in4, S11, 4118548399); /* 5 */
FF(d, a, b, c, in5, S12, 1200080426); /* 6 */
FF(c, d, a, b, in6, S13, 2821735955); /* 7 */
FF(b, c, d, a, in7, S14, 4249261313); /* 8 */
FF(a, b, c, d, in8, S11, 1770035416); /* 9 */
FF(d, a, b, c, in9, S12, 2336552879); /* 10 */
FF(c, d, a, b, in10, S13, 4294925233); /* 11 */
FF(b, c, d, a, in11, S14, 2304563134); /* 12 */
FF(a, b, c, d, in12, S11, 1804603682); /* 13 */
FF(d, a, b, c, in13, S12, 4254626195); /* 14 */
FF(c, d, a, b, in14, S13, 2792965006); /* 15 */
FF(b, c, d, a, in15, S14, 1236535329); /* 16 */
/* Round 2 */
#define S21 5
#define S22 9
#define S23 14
#define S24 20
GG(a, b, c, d, in1, S21, 4129170786); /* 17 */
GG(d, a, b, c, in6, S22, 3225465664); /* 18 */
GG(c, d, a, b, in11, S23, 643717713); /* 19 */
GG(b, c, d, a, in0, S24, 3921069994); /* 20 */
GG(a, b, c, d, in5, S21, 3593408605); /* 21 */
GG(d, a, b, c, in10, S22, 38016083); /* 22 */
GG(c, d, a, b, in15, S23, 3634488961); /* 23 */
GG(b, c, d, a, in4, S24, 3889429448); /* 24 */
GG(a, b, c, d, in9, S21, 568446438); /* 25 */
GG(d, a, b, c, in14, S22, 3275163606); /* 26 */
GG(c, d, a, b, in3, S23, 4107603335); /* 27 */
GG(b, c, d, a, in8, S24, 1163531501); /* 28 */
GG(a, b, c, d, in13, S21, 2850285829); /* 29 */
GG(d, a, b, c, in2, S22, 4243563512); /* 30 */
GG(c, d, a, b, in7, S23, 1735328473); /* 31 */
GG(b, c, d, a, in12, S24, 2368359562); /* 32 */
/* Round 3 */
#define S31 4
#define S32 11
#define S33 16
#define S34 23
HH(a, b, c, d, in5, S31, 4294588738); /* 33 */
HH(d, a, b, c, in8, S32, 2272392833); /* 34 */
HH(c, d, a, b, in11, S33, 1839030562); /* 35 */
HH(b, c, d, a, in14, S34, 4259657740); /* 36 */
HH(a, b, c, d, in1, S31, 2763975236); /* 37 */
HH(d, a, b, c, in4, S32, 1272893353); /* 38 */
HH(c, d, a, b, in7, S33, 4139469664); /* 39 */
HH(b, c, d, a, in10, S34, 3200236656); /* 40 */
HH(a, b, c, d, in13, S31, 681279174); /* 41 */
HH(d, a, b, c, in0, S32, 3936430074); /* 42 */
HH(c, d, a, b, in3, S33, 3572445317); /* 43 */
HH(b, c, d, a, in6, S34, 76029189); /* 44 */
HH(a, b, c, d, in9, S31, 3654602809); /* 45 */
HH(d, a, b, c, in12, S32, 3873151461); /* 46 */
HH(c, d, a, b, in15, S33, 530742520); /* 47 */
HH(b, c, d, a, in2, S34, 3299628645); /* 48 */
/* Round 4 */
#define S41 6
#define S42 10
#define S43 15
#define S44 21
II(a, b, c, d, in0, S41, 4096336452); /* 49 */
II(d, a, b, c, in7, S42, 1126891415); /* 50 */
II(c, d, a, b, in14, S43, 2878612391); /* 51 */
II(b, c, d, a, in5, S44, 4237533241); /* 52 */
II(a, b, c, d, in12, S41, 1700485571); /* 53 */
II(d, a, b, c, in3, S42, 2399980690); /* 54 */
II(c, d, a, b, in10, S43, 4293915773); /* 55 */
II(b, c, d, a, in1, S44, 2240044497); /* 56 */
II(a, b, c, d, in8, S41, 1873313359); /* 57 */
II(d, a, b, c, in15, S42, 4264355552); /* 58 */
II(c, d, a, b, in6, S43, 2734768916); /* 59 */
II(b, c, d, a, in13, S44, 1309151649); /* 60 */
II(a, b, c, d, in4, S41, 4149444226); /* 61 */
II(d, a, b, c, in11, S42, 3174756917); /* 62 */
II(c, d, a, b, in2, S43, 718787259); /* 63 */
II(b, c, d, a, in9, S44, 3951481745); /* 64 */
a += a0;
b += b0;
c += c0;
d += d0;
*a1 = a;
*b1 = b;
*c1 = c;
*d1 = d;
}
unsigned int unhex(unsigned char x)
{
if (x <= 'F' && x >= 'A')
{
return (unsigned int)(x - 'A' + 10);
}
else if (x <= 'f' && x >= 'a')
{
return (unsigned int)(x - 'a' + 10);
}
else if (x <= '9' && x >= '0')
{
return (unsigned int)(x - '0');
}
return 0;
}
void md5_to_ints(unsigned char* md5, unsigned int *r0, unsigned int *r1, unsigned int *r2, unsigned int *r3)
{
unsigned int v0 = 0, v1 = 0, v2 = 0, v3 = 0;
int i = 0;
for (i = 0; i < 32; i += 2)
{
unsigned int first = unhex(md5[i]);
unsigned int second = unhex(md5[i + 1]);
unsigned int both = first * 16 + second;
both = both << 24;
if (i < 8)
{
v0 = (v0 >> 8) | both;
}
else if (i < 16)
{
v1 = (v1 >> 8) | both;
}
else if (i < 24)
{
v2 = (v2 >> 8) | both;
}
else if (i < 32)
{
v3 = (v3 >> 8) | both;
}
}
*r0 = v0;
*r1 = v1;
*r2 = v2;
*r3 = v3;
}
/*
* This method is given a test password and the unsigned int of the orginal password and will check
* to see if it is the original passowrd
*
*/
__host__ __device__ int myencrypt(unsigned char * test, unsigned int length, unsigned int a, unsigned int b, unsigned int c, unsigned int d)
{
unsigned int v1 = 0, v2 = 0, v3 = 0, v4 = 0;
md5_vfy(test, length, &v1, &v2, &v3, &v4);
if (v1 == a && v2 == b && v3 == c && v4 == d)
{
return 1;
}
else
return 0;
}//end of crypt method
__device__ bool Compare(unsigned char* guess, unsigned int* hash) {
return myencrypt(guess, MAX_PASSWORD_LENGTH, hash[0], hash[1], hash[2], hash[3]);
}
__global__ void square_array(char* a, unsigned int* hash) {
int idx = threadIdx.x + blockDim.x * blockIdx.x;
unsigned char guess[MAX_PASSWORD_LENGTH];
if (idx < pow((double)ALPHABET_SIZE, (double)PREFIX_LENGTH)) {
for (unsigned int i = 0; i < PREFIX_LENGTH; i++) {
if (i == PREFIX_LENGTH - 1) {
guess[i] = 'a' + idx % ALPHABET_SIZE;
}
else {
unsigned int offset = PREFIX_LENGTH - i - 1;
unsigned int t = idx % (int)(pow((double)ALPHABET_SIZE, (double)(offset + 1)));
guess[i] = 'a' + t / (unsigned long long int)(pow((double)ALPHABET_SIZE, (double)offset));
}
}
}
int remainingLetters = MAX_PASSWORD_LENGTH - PREFIX_LENGTH;
unsigned long i = 1;
for (int j = 0; j < remainingLetters; j++) {
i *= 26;
}
for (int pos = PREFIX_LENGTH; pos < MAX_PASSWORD_LENGTH; pos++) {
guess[pos] = 'a';
}
while (true) {
int tail;
for (tail = MAX_PASSWORD_LENGTH - 1; tail >= PREFIX_LENGTH && guess[tail] == 'z'; tail--);
if (tail < PREFIX_LENGTH) {
break;
}
else {
for (int j = MAX_PASSWORD_LENGTH - 1; j > tail; j--) {
guess[j] = 'a';
}
guess[tail] += 1;
}
if (Compare(guess, hash)) {
for (unsigned int jj = 0; jj < MAX_PASSWORD_LENGTH; jj++)
a[jj] = guess[jj];
break;
}
}
}
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort = true)
{
if (code != cudaSuccess)
{
fprintf(stderr, "GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
if (abort) exit(code);
}
}
int main() {
unsigned char password[MAX_PASSWORD_LENGTH] = { 'p', 'a', 's', 's', 'w', 'o' };
char* a_h, *a_d;
size_t size = MAX_PASSWORD_LENGTH * sizeof(char);
unsigned int* hash_h, *hash_d;
hash_h = (unsigned int*)malloc(4 * sizeof(unsigned int));
// calculate hash and verify it works
md5_vfy(password, MAX_PASSWORD_LENGTH, &hash_h[0], &hash_h[1], &hash_h[2], &hash_h[3]);
printf("hash: 0x%08X 0x%08X 0x%08X 0x%08X\n", hash_h[0], hash_h[1], hash_h[2], hash_h[3]);
int res = myencrypt((unsigned char*)"password", MAX_PASSWORD_LENGTH, hash_h[0], hash_h[1], hash_h[2], hash_h[3]);
printf("%d\n", res);
// set array to a default value
a_h = (char*)malloc(size + 1);
for (unsigned int i = 0; i < size; i++) {
a_h[i] = 'a' - 1;
}
a_h[size] = '\0';
cudaMalloc((void**)&a_d, size);
cudaMalloc((void**)&hash_d, 4 * sizeof(unsigned int));
cudaMemcpy(a_d, a_h, size, cudaMemcpyHostToDevice);
cudaMemcpy(hash_d, hash_h, 4 * sizeof(unsigned int), cudaMemcpyHostToDevice);
int threadsPerBlock = 1024;
int numBlocks = ceil(pow(ALPHABET_SIZE, PREFIX_LENGTH) / threadsPerBlock);
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start);
square_array<<<numBlocks, threadsPerBlock>>>(a_d, hash_d);
cudaEventRecord(stop);
cudaMemcpy(a_h, a_d, size, cudaMemcpyDeviceToHost);
printf("password: %s\n", a_h);
cudaEventSynchronize(stop);
float milliseconds = 0;
cudaEventElapsedTime(&milliseconds, start, stop);
printf("Done in: %f milliseconds\n", milliseconds);
free(a_h);
cudaFree(a_d);
} | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <hip/hip_runtime.h>
#define PREFIX_LENGTH 4
#define MAX_PASSWORD_LENGTH 6
#define ALPHABET_SIZE 26
/* F, G and H are basic MD5 functions: selection, majority, parity */
#define F(x, y, z) (((x) & (y)) | ((~x) & (z)))
#define G(x, y, z) (((x) & (z)) | ((y) & (~z)))
#define H(x, y, z) ((x) ^ (y) ^ (z))
#define I(x, y, z) ((y) ^ ((x) | (~z)))
/* ROTATE_LEFT rotates x left n bits */
#define ROTATE_LEFT(x, n) (((x) << (n)) | ((x) >> (32-(n))))
/* FF, GG, HH, and II transformations for rounds 1, 2, 3, and 4 */
/* Rotation is separate from addition to prevent recomputation */
#define FF(a, b, c, d, x, s, ac) \
{(a) += F ((b), (c), (d)) + (x) + (unsigned int)(ac); \
(a) = ROTATE_LEFT ((a), (s)); \
(a) += (b); \
}
#define GG(a, b, c, d, x, s, ac) \
{(a) += G ((b), (c), (d)) + (x) + (unsigned int)(ac); \
(a) = ROTATE_LEFT ((a), (s)); \
(a) += (b); \
}
#define HH(a, b, c, d, x, s, ac) \
{(a) += H ((b), (c), (d)) + (x) + (unsigned int)(ac); \
(a) = ROTATE_LEFT ((a), (s)); \
(a) += (b); \
}
#define II(a, b, c, d, x, s, ac) \
{(a) += I ((b), (c), (d)) + (x) + (unsigned int)(ac); \
(a) = ROTATE_LEFT ((a), (s)); \
(a) += (b); \
}
__host__ __device__ void md5_vfy(unsigned char* data, unsigned int length, unsigned int *a1, unsigned int *b1, unsigned int *c1, unsigned int *d1)
{
const unsigned int a0 = 0x67452301;
const unsigned int b0 = 0xEFCDAB89;
const unsigned int c0 = 0x98BADCFE;
const unsigned int d0 = 0x10325476;
unsigned int a = 0;
unsigned int b = 0;
unsigned int c = 0;
unsigned int d = 0;
unsigned int vals[14] = { 0,0,0,0,0,0,0,0,0,0,0,0,0,0 };
int i = 0;
for (i = 0; i < length; i++)
{
vals[i / 4] |= data[i] << ((i % 4) * 8);
}
vals[i / 4] |= 0x80 << ((i % 4) * 8);
unsigned int bitlen = length * 8;
#define in0 (vals[0])//x
#define in1 (vals[1])//y
#define in2 (vals[2])//z
#define in3 (vals[3])
#define in4 (vals[4])
#define in5 (vals[5])
#define in6 (vals[6])
#define in7 (vals[7])
#define in8 (vals[8])
#define in9 (vals[9])
#define in10 (vals[10])
#define in11 (vals[11])
#define in12 (vals[12])
#define in13 (vals[13])
#define in14 (bitlen) //w = bit length
#define in15 (0)
//Initialize hash value for this chunk:
a = a0;
b = b0;
c = c0;
d = d0;
/* Round 1 */
#define S11 7
#define S12 12
#define S13 17
#define S14 22
FF(a, b, c, d, in0, S11, 3614090360); /* 1 */
FF(d, a, b, c, in1, S12, 3905402710); /* 2 */
FF(c, d, a, b, in2, S13, 606105819); /* 3 */
FF(b, c, d, a, in3, S14, 3250441966); /* 4 */
FF(a, b, c, d, in4, S11, 4118548399); /* 5 */
FF(d, a, b, c, in5, S12, 1200080426); /* 6 */
FF(c, d, a, b, in6, S13, 2821735955); /* 7 */
FF(b, c, d, a, in7, S14, 4249261313); /* 8 */
FF(a, b, c, d, in8, S11, 1770035416); /* 9 */
FF(d, a, b, c, in9, S12, 2336552879); /* 10 */
FF(c, d, a, b, in10, S13, 4294925233); /* 11 */
FF(b, c, d, a, in11, S14, 2304563134); /* 12 */
FF(a, b, c, d, in12, S11, 1804603682); /* 13 */
FF(d, a, b, c, in13, S12, 4254626195); /* 14 */
FF(c, d, a, b, in14, S13, 2792965006); /* 15 */
FF(b, c, d, a, in15, S14, 1236535329); /* 16 */
/* Round 2 */
#define S21 5
#define S22 9
#define S23 14
#define S24 20
GG(a, b, c, d, in1, S21, 4129170786); /* 17 */
GG(d, a, b, c, in6, S22, 3225465664); /* 18 */
GG(c, d, a, b, in11, S23, 643717713); /* 19 */
GG(b, c, d, a, in0, S24, 3921069994); /* 20 */
GG(a, b, c, d, in5, S21, 3593408605); /* 21 */
GG(d, a, b, c, in10, S22, 38016083); /* 22 */
GG(c, d, a, b, in15, S23, 3634488961); /* 23 */
GG(b, c, d, a, in4, S24, 3889429448); /* 24 */
GG(a, b, c, d, in9, S21, 568446438); /* 25 */
GG(d, a, b, c, in14, S22, 3275163606); /* 26 */
GG(c, d, a, b, in3, S23, 4107603335); /* 27 */
GG(b, c, d, a, in8, S24, 1163531501); /* 28 */
GG(a, b, c, d, in13, S21, 2850285829); /* 29 */
GG(d, a, b, c, in2, S22, 4243563512); /* 30 */
GG(c, d, a, b, in7, S23, 1735328473); /* 31 */
GG(b, c, d, a, in12, S24, 2368359562); /* 32 */
/* Round 3 */
#define S31 4
#define S32 11
#define S33 16
#define S34 23
HH(a, b, c, d, in5, S31, 4294588738); /* 33 */
HH(d, a, b, c, in8, S32, 2272392833); /* 34 */
HH(c, d, a, b, in11, S33, 1839030562); /* 35 */
HH(b, c, d, a, in14, S34, 4259657740); /* 36 */
HH(a, b, c, d, in1, S31, 2763975236); /* 37 */
HH(d, a, b, c, in4, S32, 1272893353); /* 38 */
HH(c, d, a, b, in7, S33, 4139469664); /* 39 */
HH(b, c, d, a, in10, S34, 3200236656); /* 40 */
HH(a, b, c, d, in13, S31, 681279174); /* 41 */
HH(d, a, b, c, in0, S32, 3936430074); /* 42 */
HH(c, d, a, b, in3, S33, 3572445317); /* 43 */
HH(b, c, d, a, in6, S34, 76029189); /* 44 */
HH(a, b, c, d, in9, S31, 3654602809); /* 45 */
HH(d, a, b, c, in12, S32, 3873151461); /* 46 */
HH(c, d, a, b, in15, S33, 530742520); /* 47 */
HH(b, c, d, a, in2, S34, 3299628645); /* 48 */
/* Round 4 */
#define S41 6
#define S42 10
#define S43 15
#define S44 21
II(a, b, c, d, in0, S41, 4096336452); /* 49 */
II(d, a, b, c, in7, S42, 1126891415); /* 50 */
II(c, d, a, b, in14, S43, 2878612391); /* 51 */
II(b, c, d, a, in5, S44, 4237533241); /* 52 */
II(a, b, c, d, in12, S41, 1700485571); /* 53 */
II(d, a, b, c, in3, S42, 2399980690); /* 54 */
II(c, d, a, b, in10, S43, 4293915773); /* 55 */
II(b, c, d, a, in1, S44, 2240044497); /* 56 */
II(a, b, c, d, in8, S41, 1873313359); /* 57 */
II(d, a, b, c, in15, S42, 4264355552); /* 58 */
II(c, d, a, b, in6, S43, 2734768916); /* 59 */
II(b, c, d, a, in13, S44, 1309151649); /* 60 */
II(a, b, c, d, in4, S41, 4149444226); /* 61 */
II(d, a, b, c, in11, S42, 3174756917); /* 62 */
II(c, d, a, b, in2, S43, 718787259); /* 63 */
II(b, c, d, a, in9, S44, 3951481745); /* 64 */
a += a0;
b += b0;
c += c0;
d += d0;
*a1 = a;
*b1 = b;
*c1 = c;
*d1 = d;
}
unsigned int unhex(unsigned char x)
{
if (x <= 'F' && x >= 'A')
{
return (unsigned int)(x - 'A' + 10);
}
else if (x <= 'f' && x >= 'a')
{
return (unsigned int)(x - 'a' + 10);
}
else if (x <= '9' && x >= '0')
{
return (unsigned int)(x - '0');
}
return 0;
}
void md5_to_ints(unsigned char* md5, unsigned int *r0, unsigned int *r1, unsigned int *r2, unsigned int *r3)
{
unsigned int v0 = 0, v1 = 0, v2 = 0, v3 = 0;
int i = 0;
for (i = 0; i < 32; i += 2)
{
unsigned int first = unhex(md5[i]);
unsigned int second = unhex(md5[i + 1]);
unsigned int both = first * 16 + second;
both = both << 24;
if (i < 8)
{
v0 = (v0 >> 8) | both;
}
else if (i < 16)
{
v1 = (v1 >> 8) | both;
}
else if (i < 24)
{
v2 = (v2 >> 8) | both;
}
else if (i < 32)
{
v3 = (v3 >> 8) | both;
}
}
*r0 = v0;
*r1 = v1;
*r2 = v2;
*r3 = v3;
}
/*
* This method is given a test password and the unsigned int of the orginal password and will check
* to see if it is the original passowrd
*
*/
__host__ __device__ int myencrypt(unsigned char * test, unsigned int length, unsigned int a, unsigned int b, unsigned int c, unsigned int d)
{
unsigned int v1 = 0, v2 = 0, v3 = 0, v4 = 0;
md5_vfy(test, length, &v1, &v2, &v3, &v4);
if (v1 == a && v2 == b && v3 == c && v4 == d)
{
return 1;
}
else
return 0;
}//end of crypt method
__device__ bool Compare(unsigned char* guess, unsigned int* hash) {
return myencrypt(guess, MAX_PASSWORD_LENGTH, hash[0], hash[1], hash[2], hash[3]);
}
__global__ void square_array(char* a, unsigned int* hash) {
int idx = threadIdx.x + blockDim.x * blockIdx.x;
unsigned char guess[MAX_PASSWORD_LENGTH];
if (idx < pow((double)ALPHABET_SIZE, (double)PREFIX_LENGTH)) {
for (unsigned int i = 0; i < PREFIX_LENGTH; i++) {
if (i == PREFIX_LENGTH - 1) {
guess[i] = 'a' + idx % ALPHABET_SIZE;
}
else {
unsigned int offset = PREFIX_LENGTH - i - 1;
unsigned int t = idx % (int)(pow((double)ALPHABET_SIZE, (double)(offset + 1)));
guess[i] = 'a' + t / (unsigned long long int)(pow((double)ALPHABET_SIZE, (double)offset));
}
}
}
int remainingLetters = MAX_PASSWORD_LENGTH - PREFIX_LENGTH;
unsigned long i = 1;
for (int j = 0; j < remainingLetters; j++) {
i *= 26;
}
for (int pos = PREFIX_LENGTH; pos < MAX_PASSWORD_LENGTH; pos++) {
guess[pos] = 'a';
}
while (true) {
int tail;
for (tail = MAX_PASSWORD_LENGTH - 1; tail >= PREFIX_LENGTH && guess[tail] == 'z'; tail--);
if (tail < PREFIX_LENGTH) {
break;
}
else {
for (int j = MAX_PASSWORD_LENGTH - 1; j > tail; j--) {
guess[j] = 'a';
}
guess[tail] += 1;
}
if (Compare(guess, hash)) {
for (unsigned int jj = 0; jj < MAX_PASSWORD_LENGTH; jj++)
a[jj] = guess[jj];
break;
}
}
}
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(hipError_t code, const char *file, int line, bool abort = true)
{
if (code != hipSuccess)
{
fprintf(stderr, "GPUassert: %s %s %d\n", hipGetErrorString(code), file, line);
if (abort) exit(code);
}
}
int main() {
unsigned char password[MAX_PASSWORD_LENGTH] = { 'p', 'a', 's', 's', 'w', 'o' };
char* a_h, *a_d;
size_t size = MAX_PASSWORD_LENGTH * sizeof(char);
unsigned int* hash_h, *hash_d;
hash_h = (unsigned int*)malloc(4 * sizeof(unsigned int));
// calculate hash and verify it works
md5_vfy(password, MAX_PASSWORD_LENGTH, &hash_h[0], &hash_h[1], &hash_h[2], &hash_h[3]);
printf("hash: 0x%08X 0x%08X 0x%08X 0x%08X\n", hash_h[0], hash_h[1], hash_h[2], hash_h[3]);
int res = myencrypt((unsigned char*)"password", MAX_PASSWORD_LENGTH, hash_h[0], hash_h[1], hash_h[2], hash_h[3]);
printf("%d\n", res);
// set array to a default value
a_h = (char*)malloc(size + 1);
for (unsigned int i = 0; i < size; i++) {
a_h[i] = 'a' - 1;
}
a_h[size] = '\0';
hipMalloc((void**)&a_d, size);
hipMalloc((void**)&hash_d, 4 * sizeof(unsigned int));
hipMemcpy(a_d, a_h, size, hipMemcpyHostToDevice);
hipMemcpy(hash_d, hash_h, 4 * sizeof(unsigned int), hipMemcpyHostToDevice);
int threadsPerBlock = 1024;
int numBlocks = ceil(pow(ALPHABET_SIZE, PREFIX_LENGTH) / threadsPerBlock);
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start);
square_array<<<numBlocks, threadsPerBlock>>>(a_d, hash_d);
hipEventRecord(stop);
hipMemcpy(a_h, a_d, size, hipMemcpyDeviceToHost);
printf("password: %s\n", a_h);
hipEventSynchronize(stop);
float milliseconds = 0;
hipEventElapsedTime(&milliseconds, start, stop);
printf("Done in: %f milliseconds\n", milliseconds);
free(a_h);
hipFree(a_d);
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <hip/hip_runtime.h>
#define PREFIX_LENGTH 4
#define MAX_PASSWORD_LENGTH 6
#define ALPHABET_SIZE 26
/* F, G and H are basic MD5 functions: selection, majority, parity */
#define F(x, y, z) (((x) & (y)) | ((~x) & (z)))
#define G(x, y, z) (((x) & (z)) | ((y) & (~z)))
#define H(x, y, z) ((x) ^ (y) ^ (z))
#define I(x, y, z) ((y) ^ ((x) | (~z)))
/* ROTATE_LEFT rotates x left n bits */
#define ROTATE_LEFT(x, n) (((x) << (n)) | ((x) >> (32-(n))))
/* FF, GG, HH, and II transformations for rounds 1, 2, 3, and 4 */
/* Rotation is separate from addition to prevent recomputation */
#define FF(a, b, c, d, x, s, ac) \
{(a) += F ((b), (c), (d)) + (x) + (unsigned int)(ac); \
(a) = ROTATE_LEFT ((a), (s)); \
(a) += (b); \
}
#define GG(a, b, c, d, x, s, ac) \
{(a) += G ((b), (c), (d)) + (x) + (unsigned int)(ac); \
(a) = ROTATE_LEFT ((a), (s)); \
(a) += (b); \
}
#define HH(a, b, c, d, x, s, ac) \
{(a) += H ((b), (c), (d)) + (x) + (unsigned int)(ac); \
(a) = ROTATE_LEFT ((a), (s)); \
(a) += (b); \
}
#define II(a, b, c, d, x, s, ac) \
{(a) += I ((b), (c), (d)) + (x) + (unsigned int)(ac); \
(a) = ROTATE_LEFT ((a), (s)); \
(a) += (b); \
}
__host__ __device__ void md5_vfy(unsigned char* data, unsigned int length, unsigned int *a1, unsigned int *b1, unsigned int *c1, unsigned int *d1)
{
const unsigned int a0 = 0x67452301;
const unsigned int b0 = 0xEFCDAB89;
const unsigned int c0 = 0x98BADCFE;
const unsigned int d0 = 0x10325476;
unsigned int a = 0;
unsigned int b = 0;
unsigned int c = 0;
unsigned int d = 0;
unsigned int vals[14] = { 0,0,0,0,0,0,0,0,0,0,0,0,0,0 };
int i = 0;
for (i = 0; i < length; i++)
{
vals[i / 4] |= data[i] << ((i % 4) * 8);
}
vals[i / 4] |= 0x80 << ((i % 4) * 8);
unsigned int bitlen = length * 8;
#define in0 (vals[0])//x
#define in1 (vals[1])//y
#define in2 (vals[2])//z
#define in3 (vals[3])
#define in4 (vals[4])
#define in5 (vals[5])
#define in6 (vals[6])
#define in7 (vals[7])
#define in8 (vals[8])
#define in9 (vals[9])
#define in10 (vals[10])
#define in11 (vals[11])
#define in12 (vals[12])
#define in13 (vals[13])
#define in14 (bitlen) //w = bit length
#define in15 (0)
//Initialize hash value for this chunk:
a = a0;
b = b0;
c = c0;
d = d0;
/* Round 1 */
#define S11 7
#define S12 12
#define S13 17
#define S14 22
FF(a, b, c, d, in0, S11, 3614090360); /* 1 */
FF(d, a, b, c, in1, S12, 3905402710); /* 2 */
FF(c, d, a, b, in2, S13, 606105819); /* 3 */
FF(b, c, d, a, in3, S14, 3250441966); /* 4 */
FF(a, b, c, d, in4, S11, 4118548399); /* 5 */
FF(d, a, b, c, in5, S12, 1200080426); /* 6 */
FF(c, d, a, b, in6, S13, 2821735955); /* 7 */
FF(b, c, d, a, in7, S14, 4249261313); /* 8 */
FF(a, b, c, d, in8, S11, 1770035416); /* 9 */
FF(d, a, b, c, in9, S12, 2336552879); /* 10 */
FF(c, d, a, b, in10, S13, 4294925233); /* 11 */
FF(b, c, d, a, in11, S14, 2304563134); /* 12 */
FF(a, b, c, d, in12, S11, 1804603682); /* 13 */
FF(d, a, b, c, in13, S12, 4254626195); /* 14 */
FF(c, d, a, b, in14, S13, 2792965006); /* 15 */
FF(b, c, d, a, in15, S14, 1236535329); /* 16 */
/* Round 2 */
#define S21 5
#define S22 9
#define S23 14
#define S24 20
GG(a, b, c, d, in1, S21, 4129170786); /* 17 */
GG(d, a, b, c, in6, S22, 3225465664); /* 18 */
GG(c, d, a, b, in11, S23, 643717713); /* 19 */
GG(b, c, d, a, in0, S24, 3921069994); /* 20 */
GG(a, b, c, d, in5, S21, 3593408605); /* 21 */
GG(d, a, b, c, in10, S22, 38016083); /* 22 */
GG(c, d, a, b, in15, S23, 3634488961); /* 23 */
GG(b, c, d, a, in4, S24, 3889429448); /* 24 */
GG(a, b, c, d, in9, S21, 568446438); /* 25 */
GG(d, a, b, c, in14, S22, 3275163606); /* 26 */
GG(c, d, a, b, in3, S23, 4107603335); /* 27 */
GG(b, c, d, a, in8, S24, 1163531501); /* 28 */
GG(a, b, c, d, in13, S21, 2850285829); /* 29 */
GG(d, a, b, c, in2, S22, 4243563512); /* 30 */
GG(c, d, a, b, in7, S23, 1735328473); /* 31 */
GG(b, c, d, a, in12, S24, 2368359562); /* 32 */
/* Round 3 */
#define S31 4
#define S32 11
#define S33 16
#define S34 23
HH(a, b, c, d, in5, S31, 4294588738); /* 33 */
HH(d, a, b, c, in8, S32, 2272392833); /* 34 */
HH(c, d, a, b, in11, S33, 1839030562); /* 35 */
HH(b, c, d, a, in14, S34, 4259657740); /* 36 */
HH(a, b, c, d, in1, S31, 2763975236); /* 37 */
HH(d, a, b, c, in4, S32, 1272893353); /* 38 */
HH(c, d, a, b, in7, S33, 4139469664); /* 39 */
HH(b, c, d, a, in10, S34, 3200236656); /* 40 */
HH(a, b, c, d, in13, S31, 681279174); /* 41 */
HH(d, a, b, c, in0, S32, 3936430074); /* 42 */
HH(c, d, a, b, in3, S33, 3572445317); /* 43 */
HH(b, c, d, a, in6, S34, 76029189); /* 44 */
HH(a, b, c, d, in9, S31, 3654602809); /* 45 */
HH(d, a, b, c, in12, S32, 3873151461); /* 46 */
HH(c, d, a, b, in15, S33, 530742520); /* 47 */
HH(b, c, d, a, in2, S34, 3299628645); /* 48 */
/* Round 4 */
#define S41 6
#define S42 10
#define S43 15
#define S44 21
II(a, b, c, d, in0, S41, 4096336452); /* 49 */
II(d, a, b, c, in7, S42, 1126891415); /* 50 */
II(c, d, a, b, in14, S43, 2878612391); /* 51 */
II(b, c, d, a, in5, S44, 4237533241); /* 52 */
II(a, b, c, d, in12, S41, 1700485571); /* 53 */
II(d, a, b, c, in3, S42, 2399980690); /* 54 */
II(c, d, a, b, in10, S43, 4293915773); /* 55 */
II(b, c, d, a, in1, S44, 2240044497); /* 56 */
II(a, b, c, d, in8, S41, 1873313359); /* 57 */
II(d, a, b, c, in15, S42, 4264355552); /* 58 */
II(c, d, a, b, in6, S43, 2734768916); /* 59 */
II(b, c, d, a, in13, S44, 1309151649); /* 60 */
II(a, b, c, d, in4, S41, 4149444226); /* 61 */
II(d, a, b, c, in11, S42, 3174756917); /* 62 */
II(c, d, a, b, in2, S43, 718787259); /* 63 */
II(b, c, d, a, in9, S44, 3951481745); /* 64 */
a += a0;
b += b0;
c += c0;
d += d0;
*a1 = a;
*b1 = b;
*c1 = c;
*d1 = d;
}
unsigned int unhex(unsigned char x)
{
if (x <= 'F' && x >= 'A')
{
return (unsigned int)(x - 'A' + 10);
}
else if (x <= 'f' && x >= 'a')
{
return (unsigned int)(x - 'a' + 10);
}
else if (x <= '9' && x >= '0')
{
return (unsigned int)(x - '0');
}
return 0;
}
void md5_to_ints(unsigned char* md5, unsigned int *r0, unsigned int *r1, unsigned int *r2, unsigned int *r3)
{
unsigned int v0 = 0, v1 = 0, v2 = 0, v3 = 0;
int i = 0;
for (i = 0; i < 32; i += 2)
{
unsigned int first = unhex(md5[i]);
unsigned int second = unhex(md5[i + 1]);
unsigned int both = first * 16 + second;
both = both << 24;
if (i < 8)
{
v0 = (v0 >> 8) | both;
}
else if (i < 16)
{
v1 = (v1 >> 8) | both;
}
else if (i < 24)
{
v2 = (v2 >> 8) | both;
}
else if (i < 32)
{
v3 = (v3 >> 8) | both;
}
}
*r0 = v0;
*r1 = v1;
*r2 = v2;
*r3 = v3;
}
/*
* This method is given a test password and the unsigned int of the orginal password and will check
* to see if it is the original passowrd
*
*/
__host__ __device__ int myencrypt(unsigned char * test, unsigned int length, unsigned int a, unsigned int b, unsigned int c, unsigned int d)
{
unsigned int v1 = 0, v2 = 0, v3 = 0, v4 = 0;
md5_vfy(test, length, &v1, &v2, &v3, &v4);
if (v1 == a && v2 == b && v3 == c && v4 == d)
{
return 1;
}
else
return 0;
}//end of crypt method
__device__ bool Compare(unsigned char* guess, unsigned int* hash) {
return myencrypt(guess, MAX_PASSWORD_LENGTH, hash[0], hash[1], hash[2], hash[3]);
}
__global__ void square_array(char* a, unsigned int* hash) {
int idx = threadIdx.x + blockDim.x * blockIdx.x;
unsigned char guess[MAX_PASSWORD_LENGTH];
if (idx < pow((double)ALPHABET_SIZE, (double)PREFIX_LENGTH)) {
for (unsigned int i = 0; i < PREFIX_LENGTH; i++) {
if (i == PREFIX_LENGTH - 1) {
guess[i] = 'a' + idx % ALPHABET_SIZE;
}
else {
unsigned int offset = PREFIX_LENGTH - i - 1;
unsigned int t = idx % (int)(pow((double)ALPHABET_SIZE, (double)(offset + 1)));
guess[i] = 'a' + t / (unsigned long long int)(pow((double)ALPHABET_SIZE, (double)offset));
}
}
}
int remainingLetters = MAX_PASSWORD_LENGTH - PREFIX_LENGTH;
unsigned long i = 1;
for (int j = 0; j < remainingLetters; j++) {
i *= 26;
}
for (int pos = PREFIX_LENGTH; pos < MAX_PASSWORD_LENGTH; pos++) {
guess[pos] = 'a';
}
while (true) {
int tail;
for (tail = MAX_PASSWORD_LENGTH - 1; tail >= PREFIX_LENGTH && guess[tail] == 'z'; tail--);
if (tail < PREFIX_LENGTH) {
break;
}
else {
for (int j = MAX_PASSWORD_LENGTH - 1; j > tail; j--) {
guess[j] = 'a';
}
guess[tail] += 1;
}
if (Compare(guess, hash)) {
for (unsigned int jj = 0; jj < MAX_PASSWORD_LENGTH; jj++)
a[jj] = guess[jj];
break;
}
}
}
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(hipError_t code, const char *file, int line, bool abort = true)
{
if (code != hipSuccess)
{
fprintf(stderr, "GPUassert: %s %s %d\n", hipGetErrorString(code), file, line);
if (abort) exit(code);
}
}
int main() {
unsigned char password[MAX_PASSWORD_LENGTH] = { 'p', 'a', 's', 's', 'w', 'o' };
char* a_h, *a_d;
size_t size = MAX_PASSWORD_LENGTH * sizeof(char);
unsigned int* hash_h, *hash_d;
hash_h = (unsigned int*)malloc(4 * sizeof(unsigned int));
// calculate hash and verify it works
md5_vfy(password, MAX_PASSWORD_LENGTH, &hash_h[0], &hash_h[1], &hash_h[2], &hash_h[3]);
printf("hash: 0x%08X 0x%08X 0x%08X 0x%08X\n", hash_h[0], hash_h[1], hash_h[2], hash_h[3]);
int res = myencrypt((unsigned char*)"password", MAX_PASSWORD_LENGTH, hash_h[0], hash_h[1], hash_h[2], hash_h[3]);
printf("%d\n", res);
// set array to a default value
a_h = (char*)malloc(size + 1);
for (unsigned int i = 0; i < size; i++) {
a_h[i] = 'a' - 1;
}
a_h[size] = '\0';
hipMalloc((void**)&a_d, size);
hipMalloc((void**)&hash_d, 4 * sizeof(unsigned int));
hipMemcpy(a_d, a_h, size, hipMemcpyHostToDevice);
hipMemcpy(hash_d, hash_h, 4 * sizeof(unsigned int), hipMemcpyHostToDevice);
int threadsPerBlock = 1024;
int numBlocks = ceil(pow(ALPHABET_SIZE, PREFIX_LENGTH) / threadsPerBlock);
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start);
square_array<<<numBlocks, threadsPerBlock>>>(a_d, hash_d);
hipEventRecord(stop);
hipMemcpy(a_h, a_d, size, hipMemcpyDeviceToHost);
printf("password: %s\n", a_h);
hipEventSynchronize(stop);
float milliseconds = 0;
hipEventElapsedTime(&milliseconds, start, stop);
printf("Done in: %f milliseconds\n", milliseconds);
free(a_h);
hipFree(a_d);
} | .text
.file "main.hip"
.globl _Z7md5_vfyPhjPjS0_S0_S0_ # -- Begin function _Z7md5_vfyPhjPjS0_S0_S0_
.p2align 4, 0x90
.type _Z7md5_vfyPhjPjS0_S0_S0_,@function
_Z7md5_vfyPhjPjS0_S0_S0_: # @_Z7md5_vfyPhjPjS0_S0_S0_
.cfi_startproc
# %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
pushq %r15
.cfi_def_cfa_offset 24
pushq %r14
.cfi_def_cfa_offset 32
pushq %r13
.cfi_def_cfa_offset 40
pushq %r12
.cfi_def_cfa_offset 48
pushq %rbx
.cfi_def_cfa_offset 56
subq $56, %rsp
.cfi_def_cfa_offset 112
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
.cfi_offset %rbp, -16
movq %r9, 48(%rsp) # 8-byte Spill
movq %r8, 40(%rsp) # 8-byte Spill
movq %rcx, 32(%rsp) # 8-byte Spill
movq %rdx, 24(%rsp) # 8-byte Spill
movl %esi, %r12d
xorps %xmm0, %xmm0
movaps %xmm0, -32(%rsp)
movaps %xmm0, -48(%rsp)
movaps %xmm0, -64(%rsp)
movq $0, -16(%rsp)
testl %esi, %esi
je .LBB0_1
# %bb.2: # %.lr.ph.preheader
movl %r12d, %edx
xorl %esi, %esi
xorl %eax, %eax
.p2align 4, 0x90
.LBB0_3: # %.lr.ph
# =>This Inner Loop Header: Depth=1
movzbl (%rdi,%rax), %r8d
movl %esi, %ecx
andb $24, %cl
shll %cl, %r8d
movl %eax, %ecx
andl $-4, %ecx
orl %r8d, -64(%rsp,%rcx)
incq %rax
addl $8, %esi
cmpq %rax, %rdx
jne .LBB0_3
jmp .LBB0_4
.LBB0_1:
xorl %eax, %eax
.LBB0_4: # %._crit_edge
leal (,%rax,8), %ecx
movl $128, %edx
# kill: def $cl killed $cl killed $ecx
shll %cl, %edx
andl $-4, %eax
orl %edx, -64(%rsp,%rax)
movl -64(%rsp), %r13d
movl -60(%rsp), %ebx
leal -680876937(%r13), %ecx
movq %r13, -8(%rsp) # 8-byte Spill
roll $7, %ecx
leal -271733879(%rcx), %eax
movl %eax, %edx
andl $-271733879, %edx # imm = 0xEFCDAB89
movl $271733878, %esi # imm = 0x10325476
subl %ecx, %esi
andl $-1732584194, %esi # imm = 0x98BADCFE
orl %edx, %esi
leal (%rbx,%rsi), %edx
addl $-117830708, %edx # imm = 0xF8FA0BCC
movq %rbx, -72(%rsp) # 8-byte Spill
roll $12, %edx
addl %ecx, %edx
addl $-271733879, %edx # imm = 0xEFCDAB89
movl %eax, %esi
xorl $-271733879, %esi # imm = 0xEFCDAB89
andl %edx, %esi
xorl $-271733879, %esi # imm = 0xEFCDAB89
movl -56(%rsp), %edi
movq %rdi, 16(%rsp) # 8-byte Spill
addl %edi, %esi
addl $-1126478375, %esi # imm = 0xBCDB4DD9
roll $17, %esi
addl %edx, %esi
movl %edx, %edi
xorl %eax, %edi
andl %esi, %edi
xorl %eax, %edi
movl -52(%rsp), %eax
movq %rax, (%rsp) # 8-byte Spill
addl %edi, %eax
addl $-1316259209, %eax # imm = 0xB18B7A77
roll $22, %eax
addl %esi, %eax
movl %esi, %edi
xorl %edx, %edi
andl %eax, %edi
xorl %edx, %edi
movl -48(%rsp), %r8d
movq %r8, -88(%rsp) # 8-byte Spill
addl %r8d, %ecx
addl %edi, %ecx
addl $-448152776, %ecx # imm = 0xE549BB38
roll $7, %ecx
addl %eax, %ecx
movl %eax, %edi
xorl %esi, %edi
andl %ecx, %edi
xorl %esi, %edi
movl -44(%rsp), %r8d
addl %r8d, %edx
movl %r8d, %ebp
addl %edi, %edx
addl $1200080426, %edx # imm = 0x4787C62A
roll $12, %edx
addl %ecx, %edx
movl %ecx, %edi
xorl %eax, %edi
andl %edx, %edi
xorl %eax, %edi
movl -40(%rsp), %r8d
addl %r8d, %esi
movq %r8, %r11
movq %r8, 8(%rsp) # 8-byte Spill
addl %esi, %edi
addl $-1473231341, %edi # imm = 0xA8304613
roll $17, %edi
addl %edx, %edi
movl %edx, %esi
xorl %ecx, %esi
andl %edi, %esi
xorl %ecx, %esi
movl -36(%rsp), %r8d
movl %r8d, -100(%rsp) # 4-byte Spill
addl %r8d, %eax
addl %esi, %eax
addl $-45705983, %eax # imm = 0xFD469501
roll $22, %eax
addl %edi, %eax
movl %edi, %esi
xorl %edx, %esi
andl %eax, %esi
xorl %edx, %esi
movl -32(%rsp), %r8d
movl %r8d, -124(%rsp) # 4-byte Spill
addl %r8d, %ecx
addl %esi, %ecx
addl $1770035416, %ecx # imm = 0x698098D8
roll $7, %ecx
addl %eax, %ecx
movl %eax, %esi
xorl %edi, %esi
andl %ecx, %esi
xorl %edi, %esi
movl -28(%rsp), %r8d
movq %r8, -120(%rsp) # 8-byte Spill
addl %r8d, %edx
addl %esi, %edx
addl $-1958414417, %edx # imm = 0x8B44F7AF
roll $12, %edx
addl %ecx, %edx
movl %ecx, %esi
xorl %eax, %esi
andl %edx, %esi
xorl %eax, %esi
movl -24(%rsp), %r8d
addl %r8d, %edi
movl %r8d, %r14d
movl %r8d, -112(%rsp) # 4-byte Spill
addl %edi, %esi
addl $-42063, %esi # imm = 0xFFFF5BB1
roll $17, %esi
addl %edx, %esi
movl %edx, %edi
xorl %ecx, %edi
andl %esi, %edi
xorl %ecx, %edi
movl -20(%rsp), %r10d
addl %r10d, %eax
movq %r10, -80(%rsp) # 8-byte Spill
addl %edi, %eax
addl $-1990404162, %eax # imm = 0x895CD7BE
roll $22, %eax
addl %esi, %eax
movl %esi, %edi
xorl %edx, %edi
andl %eax, %edi
xorl %edx, %edi
movl -16(%rsp), %r8d
movl %r8d, -104(%rsp) # 4-byte Spill
addl %r8d, %ecx
addl %ecx, %edi
addl $1804603682, %edi # imm = 0x6B901122
roll $7, %edi
addl %eax, %edi
movl %eax, %ecx
xorl %esi, %ecx
andl %edi, %ecx
xorl %esi, %ecx
movl -12(%rsp), %r8d
movq %r8, -96(%rsp) # 8-byte Spill
addl %r8d, %edx
addl %edx, %ecx
addl $-40341101, %ecx # imm = 0xFD987193
roll $12, %ecx
addl %edi, %ecx
movl %ecx, %edx
andl %edi, %edx
movl %ecx, %r15d
notl %r15d
movl %eax, %r8d
andl %r15d, %r8d
orl %edx, %r8d
leal (%rsi,%r12,8), %edx
addl %r8d, %edx
addl $-1502002290, %edx # imm = 0xA679438E
roll $17, %edx
addl %ecx, %edx
movl %edx, %esi
andl %ecx, %esi
movl %edx, %r8d
notl %r8d
leal (%rbx,%rdi), %r9d
andl %r8d, %edi
orl %esi, %edi
leal (%rax,%rdi), %esi
addl $1236535329, %esi # imm = 0x49B40821
roll $22, %esi
addl %edx, %esi
movl %esi, %eax
andl %ecx, %eax
andl %edx, %r15d
orl %eax, %r15d
leal (%r9,%r15), %eax
addl $-165796510, %eax # imm = 0xF61E2562
roll $5, %eax
addl %esi, %eax
movl %eax, %edi
andl %edx, %edi
andl %esi, %r8d
orl %edi, %r8d
addl %r11d, %ecx
addl %r8d, %ecx
addl $-1069501632, %ecx # imm = 0xC040B340
roll $9, %ecx
addl %eax, %ecx
movl %ecx, %edi
xorl %eax, %edi
andl %esi, %edi
xorl %eax, %edi
addl %r10d, %edx
addl %edi, %edx
addl $643717713, %edx # imm = 0x265E5A51
roll $14, %edx
addl %ecx, %edx
movl %edx, %edi
xorl %ecx, %edi
andl %eax, %edi
xorl %ecx, %edi
addl %r13d, %esi
addl %edi, %esi
addl $-373897302, %esi # imm = 0xE9B6C7AA
roll $20, %esi
addl %edx, %esi
movl %esi, %edi
xorl %edx, %edi
andl %ecx, %edi
xorl %edx, %edi
addl %ebp, %eax
movl %ebp, %r13d
movl %ebp, -108(%rsp) # 4-byte Spill
leal (%rdi,%rax), %r15d
addl $-701558691, %r15d # imm = 0xD62F105D
roll $5, %r15d
addl %esi, %r15d
movl %r15d, %eax
xorl %esi, %eax
andl %edx, %eax
xorl %esi, %eax
addl %r14d, %ecx
leal (%rax,%rcx), %edi
addl $38016083, %edi # imm = 0x2441453
roll $9, %edi
addl %r15d, %edi
movl %edi, %eax
xorl %r15d, %eax
andl %esi, %eax
xorl %r15d, %eax
addl %edx, %eax
addl $-660478335, %eax # imm = 0xD8A1E681
roll $14, %eax
addl %edi, %eax
movl %eax, %ecx
xorl %edi, %ecx
andl %r15d, %ecx
xorl %edi, %ecx
movq -88(%rsp), %r10 # 8-byte Reload
addl %r10d, %esi
addl %esi, %ecx
addl $-405537848, %ecx # imm = 0xE7D3FBC8
roll $20, %ecx
addl %eax, %ecx
movl %ecx, %edx
xorl %eax, %edx
andl %edi, %edx
xorl %eax, %edx
addl -120(%rsp), %r15d # 4-byte Folded Reload
addl %r15d, %edx
addl $568446438, %edx # imm = 0x21E1CDE6
roll $5, %edx
addl %ecx, %edx
movl %edx, %esi
xorl %ecx, %esi
andl %eax, %esi
xorl %ecx, %esi
leal (%rdi,%r12,8), %edi
addl %edi, %esi
addl $-1019803690, %esi # imm = 0xC33707D6
roll $9, %esi
addl %edx, %esi
movl %esi, %edi
xorl %edx, %edi
andl %ecx, %edi
xorl %edx, %edi
movq (%rsp), %r14 # 8-byte Reload
addl %r14d, %eax
addl %edi, %eax
addl $-187363961, %eax # imm = 0xF4D50D87
roll $14, %eax
addl %esi, %eax
movl %eax, %edi
xorl %esi, %edi
andl %edx, %edi
xorl %esi, %edi
movl -124(%rsp), %ebp # 4-byte Reload
addl %ebp, %ecx
addl %edi, %ecx
addl $1163531501, %ecx # imm = 0x455A14ED
roll $20, %ecx
addl %eax, %ecx
movl %ecx, %edi
xorl %eax, %edi
andl %esi, %edi
xorl %eax, %edi
movq -96(%rsp), %r8 # 8-byte Reload
addl %r8d, %edx
addl %edi, %edx
addl $-1444681467, %edx # imm = 0xA9E3E905
roll $5, %edx
addl %ecx, %edx
movl %edx, %edi
xorl %ecx, %edi
andl %eax, %edi
xorl %ecx, %edi
movq 16(%rsp), %rbx # 8-byte Reload
addl %ebx, %esi
addl %edi, %esi
addl $-51403784, %esi # imm = 0xFCEFA3F8
roll $9, %esi
addl %edx, %esi
movl %esi, %edi
xorl %edx, %edi
andl %ecx, %edi
xorl %edx, %edi
movl -100(%rsp), %r11d # 4-byte Reload
addl %r11d, %eax
addl %edi, %eax
addl $1735328473, %eax # imm = 0x676F02D9
roll $14, %eax
addl %esi, %eax
movl %eax, %edi
xorl %esi, %edi
andl %edx, %edi
xorl %esi, %edi
movl -104(%rsp), %r9d # 4-byte Reload
addl %r9d, %ecx
leal (%rdi,%rcx), %r15d
addl $-1926607734, %r15d # imm = 0x8D2A4C8A
roll $20, %r15d
addl %eax, %r15d
movl %r15d, %ecx
xorl %eax, %ecx
movl %ecx, %edi
xorl %esi, %edi
addl %r13d, %edx
addl %edx, %edi
addl $-378558, %edi # imm = 0xFFFA3942
roll $4, %edi
addl %r15d, %edi
xorl %edi, %ecx
addl %ebp, %esi
addl %esi, %ecx
addl $-2022574463, %ecx # imm = 0x8771F681
roll $11, %ecx
addl %edi, %ecx
movl %edi, %edx
xorl %r15d, %edx
xorl %ecx, %edx
addl -80(%rsp), %eax # 4-byte Folded Reload
addl %edx, %eax
addl $1839030562, %eax # imm = 0x6D9D6122
roll $16, %eax
addl %ecx, %eax
movl %ecx, %edx
xorl %edi, %edx
xorl %eax, %edx
leal (%r15,%r12,8), %esi
addl %esi, %edx
addl $-35309556, %edx # imm = 0xFDE5380C
roll $23, %edx
addl %eax, %edx
movl %eax, %esi
xorl %ecx, %esi
xorl %edx, %esi
addl -72(%rsp), %edi # 4-byte Folded Reload
addl %edi, %esi
addl $-1530992060, %esi # imm = 0xA4BEEA44
roll $4, %esi
addl %edx, %esi
movl %edx, %edi
xorl %eax, %edi
xorl %esi, %edi
addl %r10d, %ecx
addl %edi, %ecx
addl $1272893353, %ecx # imm = 0x4BDECFA9
roll $11, %ecx
addl %esi, %ecx
movl %esi, %edi
xorl %edx, %edi
xorl %ecx, %edi
addl %r11d, %eax
addl %edi, %eax
addl $-155497632, %eax # imm = 0xF6BB4B60
roll $16, %eax
addl %ecx, %eax
movl %ecx, %edi
xorl %esi, %edi
xorl %eax, %edi
movl -112(%rsp), %ebp # 4-byte Reload
addl %ebp, %edx
addl %edi, %edx
addl $-1094730640, %edx # imm = 0xBEBFBC70
roll $23, %edx
addl %eax, %edx
movl %eax, %edi
xorl %ecx, %edi
xorl %edx, %edi
addl %r8d, %esi
addl %edi, %esi
addl $681279174, %esi # imm = 0x289B7EC6
roll $4, %esi
addl %edx, %esi
movl %edx, %edi
xorl %eax, %edi
xorl %esi, %edi
movq -8(%rsp), %r8 # 8-byte Reload
addl %r8d, %ecx
addl %edi, %ecx
addl $-358537222, %ecx # imm = 0xEAA127FA
roll $11, %ecx
addl %esi, %ecx
movl %esi, %edi
xorl %edx, %edi
xorl %ecx, %edi
addl %r14d, %eax
addl %eax, %edi
addl $-722521979, %edi # imm = 0xD4EF3085
roll $16, %edi
addl %ecx, %edi
movl %ecx, %eax
xorl %esi, %eax
xorl %edi, %eax
movq 8(%rsp), %r10 # 8-byte Reload
addl %r10d, %edx
leal (%rax,%rdx), %r15d
addl $76029189, %r15d # imm = 0x4881D05
roll $23, %r15d
addl %edi, %r15d
movl %edi, %eax
xorl %ecx, %eax
xorl %r15d, %eax
addl -120(%rsp), %esi # 4-byte Folded Reload
addl %eax, %esi
addl $-640364487, %esi # imm = 0xD9D4D039
roll $4, %esi
addl %r15d, %esi
movl %r15d, %eax
xorl %edi, %eax
xorl %esi, %eax
addl %r9d, %ecx
addl %ecx, %eax
addl $-421815835, %eax # imm = 0xE6DB99E5
roll $11, %eax
addl %esi, %eax
movl %esi, %ecx
xorl %r15d, %ecx
xorl %eax, %ecx
leal (%rcx,%rdi), %edx
addl $530742520, %edx # imm = 0x1FA27CF8
roll $16, %edx
addl %eax, %edx
movl %eax, %ecx
xorl %esi, %ecx
xorl %edx, %ecx
addl %ebx, %r15d
addl %r15d, %ecx
addl $-995338651, %ecx # imm = 0xC4AC5665
addl %r8d, %esi
roll $23, %ecx
addl %edx, %ecx
movl %eax, %edi
notl %edi
orl %ecx, %edi
xorl %edx, %edi
leal (%rdi,%rsi), %r13d
addl $-198630844, %r13d # imm = 0xF4292244
addl %r11d, %eax
roll $6, %r13d
addl %ecx, %r13d
movl %edx, %esi
notl %esi
orl %r13d, %esi
xorl %ecx, %esi
leal (%rsi,%rax), %r8d
addl $1126891415, %r8d # imm = 0x432AFF97
roll $10, %r8d
leal (%rdx,%r12,8), %eax
addl %r13d, %r8d
movl %ecx, %edx
notl %edx
orl %r8d, %edx
xorl %r13d, %edx
leal (%rdx,%rax), %esi
addl $-1416354905, %esi # imm = 0xAB9423A7
addl -108(%rsp), %ecx # 4-byte Folded Reload
roll $15, %esi
addl %r8d, %esi
movl %r13d, %eax
notl %eax
orl %esi, %eax
xorl %r8d, %eax
addl %ecx, %eax
addl $-57434055, %eax # imm = 0xFC93A039
addl %r9d, %r13d
roll $21, %eax
addl %esi, %eax
movl %r8d, %ecx
notl %ecx
orl %eax, %ecx
xorl %esi, %ecx
addl %r13d, %ecx
addl $1700485571, %ecx # imm = 0x655B59C3
addl %r14d, %r8d
roll $6, %ecx
addl %eax, %ecx
movl %esi, %edx
notl %edx
orl %ecx, %edx
xorl %eax, %edx
addl %edx, %r8d
addl $-1894986606, %r8d # imm = 0x8F0CCC92
roll $10, %r8d
addl %ebp, %esi
addl %ecx, %r8d
movl %eax, %edx
notl %edx
orl %r8d, %edx
xorl %ecx, %edx
addl %esi, %edx
addl $-1051523, %edx # imm = 0xFFEFF47D
addl -72(%rsp), %eax # 4-byte Folded Reload
roll $15, %edx
addl %r8d, %edx
movl %ecx, %esi
notl %esi
orl %edx, %esi
xorl %r8d, %esi
addl %esi, %eax
addl $-2054922799, %eax # imm = 0x85845DD1
roll $21, %eax
addl %edx, %eax
addl -124(%rsp), %ecx # 4-byte Folded Reload
movl %r8d, %esi
notl %esi
orl %eax, %esi
xorl %edx, %esi
addl %esi, %ecx
addl $1873313359, %ecx # imm = 0x6FA87E4F
roll $6, %ecx
addl %eax, %ecx
addl %edx, %r10d
notl %edx
orl %ecx, %edx
xorl %eax, %edx
leal (%rdx,%r8), %esi
addl $-30611744, %esi # imm = 0xFE2CE6E0
roll $10, %esi
addl %ecx, %esi
movq -96(%rsp), %r8 # 8-byte Reload
addl %eax, %r8d
notl %eax
orl %esi, %eax
xorl %ecx, %eax
leal (%rax,%r10), %edx
addl $-1560198380, %edx # imm = 0xA3014314
roll $15, %edx
addl %esi, %edx
movq -88(%rsp), %rax # 8-byte Reload
addl %ecx, %eax
notl %ecx
orl %edx, %ecx
xorl %esi, %ecx
addl %r8d, %ecx
addl $1309151649, %ecx # imm = 0x4E0811A1
roll $21, %ecx
addl %edx, %ecx
movq -80(%rsp), %rdi # 8-byte Reload
addl %esi, %edi
notl %esi
orl %ecx, %esi
xorl %edx, %esi
addl %eax, %esi
addl $-145523070, %esi # imm = 0xF7537E82
roll $6, %esi
leal (%rsi,%rcx), %eax
addl %edx, %ebx
notl %edx
orl %eax, %edx
xorl %ecx, %edx
addl %edi, %edx
addl $-1120210379, %edx # imm = 0xBD3AF235
roll $10, %edx
leal (%rdx,%rax), %edi
movl %ecx, %r8d
notl %r8d
orl %edi, %r8d
xorl %eax, %r8d
addl %ebx, %r8d
addl $718787259, %r8d # imm = 0x2AD7D2BB
movq -120(%rsp), %r9 # 8-byte Reload
addl %ecx, %r9d
roll $15, %r8d
addl %esi, %ecx
addl $1732584193, %ecx # imm = 0x67452301
movq 24(%rsp), %rsi # 8-byte Reload
movl %ecx, (%rsi)
leal (%r8,%rdi), %ecx
movl %eax, %esi
notl %esi
orl %ecx, %esi
xorl %edi, %esi
addl %r9d, %esi
addl $-343485551, %esi # imm = 0xEB86D391
roll $21, %esi
addl %esi, %ecx
addl $-271733879, %ecx # imm = 0xEFCDAB89
movq 32(%rsp), %rsi # 8-byte Reload
movl %ecx, (%rsi)
leal -1732584194(%r8,%rdi), %ecx
movq 40(%rsp), %rsi # 8-byte Reload
movl %ecx, (%rsi)
addl %edx, %eax
addl $271733878, %eax # imm = 0x10325476
movq 48(%rsp), %rcx # 8-byte Reload
movl %eax, (%rcx)
addq $56, %rsp
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %r12
.cfi_def_cfa_offset 40
popq %r13
.cfi_def_cfa_offset 32
popq %r14
.cfi_def_cfa_offset 24
popq %r15
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
retq
.Lfunc_end0:
.size _Z7md5_vfyPhjPjS0_S0_S0_, .Lfunc_end0-_Z7md5_vfyPhjPjS0_S0_S0_
.cfi_endproc
# -- End function
.globl _Z5unhexh # -- Begin function _Z5unhexh
.p2align 4, 0x90
.type _Z5unhexh,@function
_Z5unhexh: # @_Z5unhexh
.cfi_startproc
# %bb.0:
movl %edi, %eax
leal -65(%rax), %ecx
cmpb $5, %cl
ja .LBB1_2
# %bb.1:
addl $-55, %eax
# kill: def $eax killed $eax killed $rax
retq
.LBB1_2:
leal -97(%rax), %ecx
cmpb $5, %cl
ja .LBB1_4
# %bb.3:
addl $-87, %eax
# kill: def $eax killed $eax killed $rax
retq
.LBB1_4:
leal -48(%rax), %ecx
addl $-48, %eax
xorl %edx, %edx
cmpb $10, %cl
cmovbl %eax, %edx
movl %edx, %eax
# kill: def $eax killed $eax killed $rax
retq
.Lfunc_end1:
.size _Z5unhexh, .Lfunc_end1-_Z5unhexh
.cfi_endproc
# -- End function
.globl _Z11md5_to_intsPhPjS0_S0_S0_ # -- Begin function _Z11md5_to_intsPhPjS0_S0_S0_
.p2align 4, 0x90
.type _Z11md5_to_intsPhPjS0_S0_S0_,@function
_Z11md5_to_intsPhPjS0_S0_S0_: # @_Z11md5_to_intsPhPjS0_S0_S0_
.cfi_startproc
# %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
pushq %r15
.cfi_def_cfa_offset 24
pushq %r14
.cfi_def_cfa_offset 32
pushq %r12
.cfi_def_cfa_offset 40
pushq %rbx
.cfi_def_cfa_offset 48
.cfi_offset %rbx, -48
.cfi_offset %r12, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
.cfi_offset %rbp, -16
xorl %eax, %eax
xorl %r10d, %r10d
xorl %ebp, %ebp
xorl %ebx, %ebx
xorl %r9d, %r9d
xorl %r11d, %r11d
jmp .LBB2_1
.p2align 4, 0x90
.LBB2_12: # in Loop: Header=BB2_1 Depth=1
shrl $8, %ebp
orl %ebp, %r15d
movl %r15d, %ebp
.LBB2_18: # in Loop: Header=BB2_1 Depth=1
leaq 2(%r10), %r14
cmpq $30, %r10
movq %r14, %r10
jae .LBB2_19
.LBB2_1: # =>This Inner Loop Header: Depth=1
movzbl (%rdi,%r10), %r14d
leal -65(%r14), %r15d
cmpb $5, %r15b
ja .LBB2_3
# %bb.2: # in Loop: Header=BB2_1 Depth=1
addl $-55, %r14d
jmp .LBB2_6
.p2align 4, 0x90
.LBB2_3: # in Loop: Header=BB2_1 Depth=1
leal -97(%r14), %r15d
cmpb $5, %r15b
ja .LBB2_5
# %bb.4: # in Loop: Header=BB2_1 Depth=1
addl $-87, %r14d
jmp .LBB2_6
.p2align 4, 0x90
.LBB2_5: # in Loop: Header=BB2_1 Depth=1
leal -48(%r14), %r15d
addl $-48, %r14d
cmpb $10, %r15b
cmovael %eax, %r14d
.LBB2_6: # %_Z5unhexh.exit
# in Loop: Header=BB2_1 Depth=1
movzbl 1(%rdi,%r10), %r15d
leal -65(%r15), %r12d
cmpb $5, %r12b
ja .LBB2_8
# %bb.7: # in Loop: Header=BB2_1 Depth=1
addl $-55, %r15d
jmp .LBB2_11
.p2align 4, 0x90
.LBB2_8: # in Loop: Header=BB2_1 Depth=1
leal -97(%r15), %r12d
cmpb $5, %r12b
ja .LBB2_10
# %bb.9: # in Loop: Header=BB2_1 Depth=1
addl $-87, %r15d
jmp .LBB2_11
.p2align 4, 0x90
.LBB2_10: # in Loop: Header=BB2_1 Depth=1
leal -48(%r15), %r12d
addl $-48, %r15d
cmpb $10, %r12b
cmovael %eax, %r15d
.LBB2_11: # %_Z5unhexh.exit40
# in Loop: Header=BB2_1 Depth=1
shll $28, %r14d
shll $24, %r15d
addl %r14d, %r15d
cmpq $7, %r10
jbe .LBB2_12
# %bb.13: # in Loop: Header=BB2_1 Depth=1
cmpq $15, %r10
ja .LBB2_15
# %bb.14: # in Loop: Header=BB2_1 Depth=1
shrl $8, %ebx
orl %ebx, %r15d
movl %r15d, %ebx
jmp .LBB2_18
.p2align 4, 0x90
.LBB2_15: # in Loop: Header=BB2_1 Depth=1
cmpq $23, %r10
ja .LBB2_17
# %bb.16: # in Loop: Header=BB2_1 Depth=1
shrl $8, %r11d
orl %r11d, %r15d
movl %r15d, %r11d
jmp .LBB2_18
.LBB2_17: # in Loop: Header=BB2_1 Depth=1
shrl $8, %r9d
orl %r9d, %r15d
movl %r15d, %r9d
jmp .LBB2_18
.LBB2_19:
movl %ebp, (%rsi)
movl %ebx, (%rdx)
movl %r11d, (%rcx)
movl %r9d, (%r8)
popq %rbx
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r14
.cfi_def_cfa_offset 24
popq %r15
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
retq
.Lfunc_end2:
.size _Z11md5_to_intsPhPjS0_S0_S0_, .Lfunc_end2-_Z11md5_to_intsPhPjS0_S0_S0_
.cfi_endproc
# -- End function
.globl _Z9myencryptPhjjjjj # -- Begin function _Z9myencryptPhjjjjj
.p2align 4, 0x90
.type _Z9myencryptPhjjjjj,@function
_Z9myencryptPhjjjjj: # @_Z9myencryptPhjjjjj
.cfi_startproc
# %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
pushq %r15
.cfi_def_cfa_offset 24
pushq %r14
.cfi_def_cfa_offset 32
pushq %rbx
.cfi_def_cfa_offset 40
subq $24, %rsp
.cfi_def_cfa_offset 64
.cfi_offset %rbx, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
.cfi_offset %rbp, -16
movl %r9d, %ebx
movl %r8d, %ebp
movl %ecx, %r14d
movl %edx, %r15d
movl $0, 20(%rsp)
movl $0, 16(%rsp)
movl $0, 12(%rsp)
movl $0, 8(%rsp)
leaq 20(%rsp), %rdx
leaq 16(%rsp), %rcx
leaq 12(%rsp), %r8
leaq 8(%rsp), %r9
callq _Z7md5_vfyPhjPjS0_S0_S0_
xorl 20(%rsp), %r15d
xorl 16(%rsp), %r14d
xorl 12(%rsp), %ebp
orl %r15d, %r14d
orl %r14d, %ebp
xorl 8(%rsp), %ebx
xorl %eax, %eax
orl %ebp, %ebx
sete %al
addq $24, %rsp
.cfi_def_cfa_offset 40
popq %rbx
.cfi_def_cfa_offset 32
popq %r14
.cfi_def_cfa_offset 24
popq %r15
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
retq
.Lfunc_end3:
.size _Z9myencryptPhjjjjj, .Lfunc_end3-_Z9myencryptPhjjjjj
.cfi_endproc
# -- End function
.globl _Z27__device_stub__square_arrayPcPj # -- Begin function _Z27__device_stub__square_arrayPcPj
.p2align 4, 0x90
.type _Z27__device_stub__square_arrayPcPj,@function
_Z27__device_stub__square_arrayPcPj: # @_Z27__device_stub__square_arrayPcPj
.cfi_startproc
# %bb.0:
subq $88, %rsp
.cfi_def_cfa_offset 96
movq %rdi, 56(%rsp)
movq %rsi, 48(%rsp)
leaq 56(%rsp), %rax
movq %rax, 64(%rsp)
leaq 48(%rsp), %rax
movq %rax, 72(%rsp)
leaq 32(%rsp), %rdi
leaq 16(%rsp), %rsi
leaq 8(%rsp), %rdx
movq %rsp, %rcx
callq __hipPopCallConfiguration
movq 32(%rsp), %rsi
movl 40(%rsp), %edx
movq 16(%rsp), %rcx
movl 24(%rsp), %r8d
leaq 64(%rsp), %r9
movl $_Z12square_arrayPcPj, %edi
pushq (%rsp)
.cfi_adjust_cfa_offset 8
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $104, %rsp
.cfi_adjust_cfa_offset -104
retq
.Lfunc_end4:
.size _Z27__device_stub__square_arrayPcPj, .Lfunc_end4-_Z27__device_stub__square_arrayPcPj
.cfi_endproc
# -- End function
.globl main # -- Begin function main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
pushq %r15
.cfi_def_cfa_offset 24
pushq %r14
.cfi_def_cfa_offset 32
pushq %r13
.cfi_def_cfa_offset 40
pushq %r12
.cfi_def_cfa_offset 48
pushq %rbx
.cfi_def_cfa_offset 56
subq $136, %rsp
.cfi_def_cfa_offset 192
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
.cfi_offset %rbp, -16
movl $1936941424, 90(%rsp) # imm = 0x73736170
movw $28535, 94(%rsp) # imm = 0x6F77
movl $16, %edi
callq malloc
movq %rax, %r14
leaq 4(%rax), %rcx
leaq 8(%rax), %r8
leaq 12(%rax), %r9
leaq 90(%rsp), %rdi
movl $6, %esi
movq %rax, %rdx
callq _Z7md5_vfyPhjPjS0_S0_S0_
movl (%r14), %esi
movl 4(%r14), %edx
movl 8(%r14), %ecx
movl 12(%r14), %r8d
movl $.L.str, %edi
xorl %eax, %eax
callq printf
movl (%r14), %r13d
movl 4(%r14), %r12d
movl 8(%r14), %ebx
movl 12(%r14), %ebp
movl $0, 16(%rsp)
movl $0, 64(%rsp)
movl $0, 48(%rsp)
movl $0, 80(%rsp)
leaq 16(%rsp), %rdx
leaq 64(%rsp), %rcx
leaq 48(%rsp), %r8
leaq 80(%rsp), %r15
movl $.L.str.1, %edi
movl $6, %esi
movq %r15, %r9
callq _Z7md5_vfyPhjPjS0_S0_S0_
xorl 16(%rsp), %r13d
xorl 64(%rsp), %r12d
orl %r13d, %r12d
xorl 48(%rsp), %ebx
xorl 80(%rsp), %ebp
orl %r12d, %ebx
xorl %esi, %esi
orl %ebx, %ebp
sete %sil
movl $.L.str.2, %edi
xorl %eax, %eax
callq printf
movl $7, %edi
callq malloc
movq %rax, %rbx
movl $1616928864, (%rax) # imm = 0x60606060
movw $24672, 4(%rax) # imm = 0x6060
movb $0, 6(%rax)
leaq 8(%rsp), %rdi
movl $6, %esi
callq hipMalloc
leaq 104(%rsp), %rdi
movl $16, %esi
callq hipMalloc
movq 8(%rsp), %rdi
movl $6, %edx
movq %rbx, %rsi
movl $1, %ecx
callq hipMemcpy
movq 104(%rsp), %rdi
movl $16, %edx
movq %r14, %rsi
movl $1, %ecx
callq hipMemcpy
leaq 96(%rsp), %rdi
callq hipEventCreate
leaq 40(%rsp), %rdi
callq hipEventCreate
movq 96(%rsp), %rdi
xorl %esi, %esi
callq hipEventRecord
movabsq $4294967743, %rdi # imm = 0x1000001BF
leaq 577(%rdi), %rdx
movl $1, %esi
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB5_2
# %bb.1:
movq 8(%rsp), %rax
movq 104(%rsp), %rcx
movq %rax, 80(%rsp)
movq %rcx, 128(%rsp)
movq %r15, 16(%rsp)
leaq 128(%rsp), %rax
movq %rax, 24(%rsp)
leaq 64(%rsp), %rdi
leaq 48(%rsp), %rsi
leaq 120(%rsp), %rdx
leaq 112(%rsp), %rcx
callq __hipPopCallConfiguration
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
movq 48(%rsp), %rcx
movl 56(%rsp), %r8d
leaq 16(%rsp), %r9
movl $_Z12square_arrayPcPj, %edi
pushq 112(%rsp)
.cfi_adjust_cfa_offset 8
pushq 128(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB5_2:
movq 40(%rsp), %rdi
xorl %esi, %esi
callq hipEventRecord
movq 8(%rsp), %rsi
movl $6, %edx
movq %rbx, %rdi
movl $2, %ecx
callq hipMemcpy
movl $.L.str.3, %edi
movq %rbx, %rsi
xorl %eax, %eax
callq printf
movq 40(%rsp), %rdi
callq hipEventSynchronize
movl $0, 16(%rsp)
movq 96(%rsp), %rsi
movq 40(%rsp), %rdx
leaq 16(%rsp), %rdi
callq hipEventElapsedTime
movss 16(%rsp), %xmm0 # xmm0 = mem[0],zero,zero,zero
cvtss2sd %xmm0, %xmm0
movl $.L.str.4, %edi
movb $1, %al
callq printf
movq %rbx, %rdi
callq free
movq 8(%rsp), %rdi
callq hipFree
xorl %eax, %eax
addq $136, %rsp
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %r12
.cfi_def_cfa_offset 40
popq %r13
.cfi_def_cfa_offset 32
popq %r14
.cfi_def_cfa_offset 24
popq %r15
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
retq
.Lfunc_end5:
.size main, .Lfunc_end5-main
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB6_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB6_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z12square_arrayPcPj, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end6:
.size __hip_module_ctor, .Lfunc_end6-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB7_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB7_2:
retq
.Lfunc_end7:
.size __hip_module_dtor, .Lfunc_end7-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z12square_arrayPcPj,@object # @_Z12square_arrayPcPj
.section .rodata,"a",@progbits
.globl _Z12square_arrayPcPj
.p2align 3, 0x0
_Z12square_arrayPcPj:
.quad _Z27__device_stub__square_arrayPcPj
.size _Z12square_arrayPcPj, 8
.type .L.str,@object # @.str
.section .rodata.str1.1,"aMS",@progbits,1
.L.str:
.asciz "hash: 0x%08X 0x%08X 0x%08X 0x%08X\n"
.size .L.str, 35
.type .L.str.1,@object # @.str.1
.L.str.1:
.asciz "password"
.size .L.str.1, 9
.type .L.str.2,@object # @.str.2
.L.str.2:
.asciz "%d\n"
.size .L.str.2, 4
.type .L.str.3,@object # @.str.3
.L.str.3:
.asciz "password: %s\n"
.size .L.str.3, 14
.type .L.str.4,@object # @.str.4
.L.str.4:
.asciz "Done in: %f milliseconds\n"
.size .L.str.4, 26
.type .L__unnamed_1,@object # @0
.L__unnamed_1:
.asciz "_Z12square_arrayPcPj"
.size .L__unnamed_1, 21
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z27__device_stub__square_arrayPcPj
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z12square_arrayPcPj
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | //////////////////////////////////////////////////////////////////////////
////This is the code implementation for GPU Premier League Round 1
//////////////////////////////////////////////////////////////////////////
#include <iostream>
#include <fstream>
#include <vector>
#include <chrono>
#include <cuda_runtime.h>
using namespace std;
//////////////////////////////////////////////////////////////////////////
////TODO 0: Please replace the following strings with your team name and author names
////Note: Please do not use space in the string, use "_" instead
//////////////////////////////////////////////////////////////////////////
namespace name
{
std::string team="using_namespace_std;";
std::string author_1="Jeff Liu";
};
////This is a matrix class to carry out linear algebra operations on both GPU and CPU
////It is the same as the sample code I showed in class on Week 3.
////NOTICE: You do not have to change the implementation in this class.
////But if you do want to change part of it for performance reasons, please let us known by writting a submission note on Canvas.
class Matrix{
public:
int m=0; ////number of rows
int n=0; ////number of columns
vector<float> elements_on_host; ////we use a std::vector for the element array on host
float* elements_on_dev=0; ////we use a pointer for the element array on device
bool on_host=true;
////constructors
__host__ Matrix(){}
__host__ Matrix(const int _m,const int _n,bool _on_host=true)
{
on_host=_on_host;
if(on_host)Resize_On_Host(_m,_n);
else Resize_On_Device(_m,_n);
}
////destructor
__host__ ~Matrix()
{
if(!on_host&&elements_on_dev!=0) cudaFree(elements_on_dev);
}
////Resize on host or device
__host__ void Resize_On_Host(const int _m,const int _n)
{
if(m==_m&&n==_n)return;
m=_m;
n=_n;
elements_on_host.resize(m*n);
}
__host__ void Resize_On_Device(const int _m,const int _n)
{
if(m==_m&&n==_n)return;
m=_m;
n=_n;
if(elements_on_dev!=0)cudaFree(elements_on_dev);
cudaMalloc((void**)&elements_on_dev,m*n*sizeof(float));
}
////random access a matrix element
inline __host__ float& operator() (const int i,const int j)
{
return elements_on_host[i*n+j];
}
inline __host__ const float& operator() (const int i,const int j) const
{
return elements_on_host[i*n+j];
}
////copy data with four cases (CPU->CPU, GPU->CPU, GPU->GPU, CPU->GPU)
__host__ Matrix& operator= (const Matrix& mtx)
{
if(on_host&&mtx.on_host){
Resize_On_Host(mtx.m,mtx.n);
elements_on_host=mtx.elements_on_host;
}
else if(on_host&&!mtx.on_host){
Resize_On_Host(mtx.m,mtx.n);
cudaMemcpy(&elements_on_host[0],mtx.elements_on_dev,m*n*sizeof(float),cudaMemcpyDeviceToHost);
}
else if(!on_host&&!mtx.on_host){
Resize_On_Device(mtx.m,mtx.n);
cudaMemcpy(elements_on_dev,mtx.elements_on_dev,mtx.m*n*sizeof(float),cudaMemcpyDeviceToDevice);
}
else if(!on_host&&mtx.on_host){
Resize_On_Device(mtx.m,mtx.n);
cudaMemcpy(elements_on_dev,&mtx.elements_on_host[0],m*n*sizeof(float),cudaMemcpyHostToDevice);
}
return *this;
}
////print matrix elements on screen
__host__ friend ostream & operator << (ostream &out,const Matrix &mtx)
{
if(!mtx.on_host)
cout<<"Print for matrix on device is not supported."<<endl;
for(int i=0;i<mtx.m;i++){
for(int j=0;j<mtx.n;j++){
out<<mtx(i,j)<<", ";
}
out<<std::endl;
}
return out;
}
};
//////////////////////////////////////////////////////////////////////////
////Your tasks start!
////This is a sample implementation without using any memory hierarchy
////The function calculates C=A*B, with dimA=[Am,An], dimB=[Bm,Bn], dimC=[Am,bn], and An=Bm
__global__ void Matrix_Multiplication_AB_Kernel_Poorman(const float* Ae,const float* Be,float* Ce,const int Am,const int An,const int Bn)
{
int i=blockIdx.x*blockDim.x+threadIdx.x;
int j=blockIdx.y*blockDim.y+threadIdx.y;
float val=0.f;
for(int k=0;k<An;k++)
val+=Ae[i*An+k]*Be[k*Bn+j];
Ce[i*Bn+j]=val;
}
//////////////////////////////////////////////////////////////////////////
////Task 1: implement your fast matrix-matrix multiplication in the following kernel function.
////The function parameters are the same as the sample function:
////The function calculates C=A*B, with dimA=[Am,An], dimB=[Bm,Bn], dimC=[Am,bn], and An=Bm
//////////////////////////////////////////////////////////////////////////
__global__ void Matrix_Multiplication_AB_Kernel_Your_Version(const float* Ae,const float* Be,float* Ce,const int Am,const int An,const int Bn)
{
// initialize memory
const int block_size = 32;
const int num_tiles = An / block_size;
__shared__ float a_shared[block_size][block_size];
__shared__ float b_shared[block_size][block_size];
__shared__ float c_shared[block_size][block_size];
// calculate 1d index of correct item on A, B, C
int thr_per_block = blockDim.y * blockDim.x;
int c_idx = blockIdx.y * gridDim.x * thr_per_block + threadIdx.y * gridDim.x * blockDim.x + blockIdx.x * blockDim.x + threadIdx.x;
c_shared[threadIdx.y][threadIdx.x] = 0; // set everything to zero just the first time
int a_idx, b_idx;
for (int tile = 0; tile < num_tiles; ++tile) {
// want blockIdx.x to increment
a_idx = blockIdx.y * num_tiles * thr_per_block + threadIdx.y * num_tiles * blockDim.x + tile * blockDim.x + threadIdx.x;
// want blockIdx.y to increment
b_idx = tile * gridDim.x * thr_per_block + threadIdx.y * gridDim.x * blockDim.x + blockIdx.x * blockDim.x + threadIdx.x;
a_shared[threadIdx.y][threadIdx.x] = Ae[a_idx];
b_shared[threadIdx.y][threadIdx.x] = Be[b_idx];
__syncthreads();
// lmao loop unrolling time my dudes
c_shared[threadIdx.y][threadIdx.x] += a_shared[threadIdx.y][0] * b_shared[0][threadIdx.x];
c_shared[threadIdx.y][threadIdx.x] += a_shared[threadIdx.y][1] * b_shared[1][threadIdx.x];
c_shared[threadIdx.y][threadIdx.x] += a_shared[threadIdx.y][2] * b_shared[2][threadIdx.x];
c_shared[threadIdx.y][threadIdx.x] += a_shared[threadIdx.y][3] * b_shared[3][threadIdx.x];
c_shared[threadIdx.y][threadIdx.x] += a_shared[threadIdx.y][4] * b_shared[4][threadIdx.x];
c_shared[threadIdx.y][threadIdx.x] += a_shared[threadIdx.y][5] * b_shared[5][threadIdx.x];
c_shared[threadIdx.y][threadIdx.x] += a_shared[threadIdx.y][6] * b_shared[6][threadIdx.x];
c_shared[threadIdx.y][threadIdx.x] += a_shared[threadIdx.y][7] * b_shared[7][threadIdx.x];
c_shared[threadIdx.y][threadIdx.x] += a_shared[threadIdx.y][8] * b_shared[8][threadIdx.x];
c_shared[threadIdx.y][threadIdx.x] += a_shared[threadIdx.y][9] * b_shared[9][threadIdx.x];
c_shared[threadIdx.y][threadIdx.x] += a_shared[threadIdx.y][10] * b_shared[10][threadIdx.x];
c_shared[threadIdx.y][threadIdx.x] += a_shared[threadIdx.y][11] * b_shared[11][threadIdx.x];
c_shared[threadIdx.y][threadIdx.x] += a_shared[threadIdx.y][12] * b_shared[12][threadIdx.x];
c_shared[threadIdx.y][threadIdx.x] += a_shared[threadIdx.y][13] * b_shared[13][threadIdx.x];
c_shared[threadIdx.y][threadIdx.x] += a_shared[threadIdx.y][14] * b_shared[14][threadIdx.x];
c_shared[threadIdx.y][threadIdx.x] += a_shared[threadIdx.y][15] * b_shared[15][threadIdx.x];
c_shared[threadIdx.y][threadIdx.x] += a_shared[threadIdx.y][16] * b_shared[16][threadIdx.x];
c_shared[threadIdx.y][threadIdx.x] += a_shared[threadIdx.y][17] * b_shared[17][threadIdx.x];
c_shared[threadIdx.y][threadIdx.x] += a_shared[threadIdx.y][18] * b_shared[18][threadIdx.x];
c_shared[threadIdx.y][threadIdx.x] += a_shared[threadIdx.y][19] * b_shared[19][threadIdx.x];
c_shared[threadIdx.y][threadIdx.x] += a_shared[threadIdx.y][20] * b_shared[20][threadIdx.x];
c_shared[threadIdx.y][threadIdx.x] += a_shared[threadIdx.y][21] * b_shared[21][threadIdx.x];
c_shared[threadIdx.y][threadIdx.x] += a_shared[threadIdx.y][22] * b_shared[22][threadIdx.x];
c_shared[threadIdx.y][threadIdx.x] += a_shared[threadIdx.y][23] * b_shared[23][threadIdx.x];
c_shared[threadIdx.y][threadIdx.x] += a_shared[threadIdx.y][24] * b_shared[24][threadIdx.x];
c_shared[threadIdx.y][threadIdx.x] += a_shared[threadIdx.y][25] * b_shared[25][threadIdx.x];
c_shared[threadIdx.y][threadIdx.x] += a_shared[threadIdx.y][26] * b_shared[26][threadIdx.x];
c_shared[threadIdx.y][threadIdx.x] += a_shared[threadIdx.y][27] * b_shared[27][threadIdx.x];
c_shared[threadIdx.y][threadIdx.x] += a_shared[threadIdx.y][28] * b_shared[28][threadIdx.x];
c_shared[threadIdx.y][threadIdx.x] += a_shared[threadIdx.y][29] * b_shared[29][threadIdx.x];
c_shared[threadIdx.y][threadIdx.x] += a_shared[threadIdx.y][30] * b_shared[30][threadIdx.x];
c_shared[threadIdx.y][threadIdx.x] += a_shared[threadIdx.y][31] * b_shared[31][threadIdx.x];
__syncthreads();
}
// save to global
Ce[c_idx] = c_shared[threadIdx.y][threadIdx.x];
}
////This is a sample implementation without using any memory hierarchy
////The function calculates the matrix multiplication, with C=A^T*B*A, A^T is the transpose of A, dimA=[Am,An], dimB=[Am,Am], and dimC=[An,An]
__global__ void Matrix_Multiplication_ATBA_Kernel_Poorman(const float* Ae,const float* Be,float* Ce,const int Am,const int An)
{
int i=blockIdx.x*blockDim.x+threadIdx.x;
int j=blockIdx.y*blockDim.y+threadIdx.y;
float val=0.f;
for(int l=0;l<Am;l++)
for(int k=0;k<Am;k++)
val+=Ae[l*An+i]*Be[l*Am+k]*Ae[k*An+j];
Ce[i*An+j]=val;
}
//////////////////////////////////////////////////////////////////////////
////Task 2: calculate the matrix multiplication in the following kernel function.
////The function parameters are the same as the sample function:
////The function calculates the matrix multiplication, with C=A^T*B*A, A^T is the transpose of A, dimA=[Am,An], dimB=[Am,Am], and dimC=[An,An]
//////////////////////////////////////////////////////////////////////////
__global__ void Matrix_Multiplication_ATBA_Kernel_Your_Version(const float* Ae,const float* Be,float* Ce,const int Am,const int An)
{
// memory setup
const int num_tiles = Am / 32;
__shared__ float aTT_shared[32][32];
__shared__ float b_shared[32][32];
__shared__ float a_shared[32][32];
__shared__ float accum_shared[32][32];
__shared__ float c_shared[32][32];
// coordinate setup
int thr_per_block = blockDim.y * blockDim.x;
int c_idx = blockIdx.y*gridDim.x*thr_per_block + threadIdx.y*gridDim.x*blockDim.x + blockIdx.x*blockDim.x + threadIdx.x;
int a_idx, b_idx, aTT_idx;
// initialize memory
c_shared[threadIdx.y][threadIdx.x] = 0;
// following psuedocode coordinates are (y,x)
for (int ay = 0; ay < num_tiles; ++ay) { //ay = bx
// load a(ay,blockIdx.x)
a_idx = ay*gridDim.x*thr_per_block + threadIdx.y*gridDim.x*blockDim.x + blockIdx.x*blockDim.x + threadIdx.x;
a_shared[threadIdx.y][threadIdx.x] = Ae[a_idx];
// clear accumulator
accum_shared[threadIdx.y][threadIdx.x] = 0;
__syncthreads();
for (int by = 0; by < num_tiles; ++by) { // by = aTx = aTTy
// calculate indices
b_idx = by*num_tiles*thr_per_block + threadIdx.y*num_tiles*blockDim.x + ay*blockDim.x + threadIdx.x;
aTT_idx = by*gridDim.x*thr_per_block + threadIdx.y*gridDim.x*blockDim.x + blockIdx.y*blockDim.x + threadIdx.x;
// load aTT(by, blockIdx.y) (since we load A but column access) and b(by,ay)
b_shared[threadIdx.y][threadIdx.x] = Be[b_idx];
aTT_shared[threadIdx.y][threadIdx.x] = Ae[aTT_idx];
__syncthreads();
// multiply aT x b, accumulate
accum_shared[threadIdx.y][threadIdx.x] += aTT_shared[0][threadIdx.y] * b_shared[0][threadIdx.x];
accum_shared[threadIdx.y][threadIdx.x] += aTT_shared[1][threadIdx.y] * b_shared[1][threadIdx.x];
accum_shared[threadIdx.y][threadIdx.x] += aTT_shared[2][threadIdx.y] * b_shared[2][threadIdx.x];
accum_shared[threadIdx.y][threadIdx.x] += aTT_shared[3][threadIdx.y] * b_shared[3][threadIdx.x];
accum_shared[threadIdx.y][threadIdx.x] += aTT_shared[4][threadIdx.y] * b_shared[4][threadIdx.x];
accum_shared[threadIdx.y][threadIdx.x] += aTT_shared[5][threadIdx.y] * b_shared[5][threadIdx.x];
accum_shared[threadIdx.y][threadIdx.x] += aTT_shared[6][threadIdx.y] * b_shared[6][threadIdx.x];
accum_shared[threadIdx.y][threadIdx.x] += aTT_shared[7][threadIdx.y] * b_shared[7][threadIdx.x];
accum_shared[threadIdx.y][threadIdx.x] += aTT_shared[8][threadIdx.y] * b_shared[8][threadIdx.x];
accum_shared[threadIdx.y][threadIdx.x] += aTT_shared[9][threadIdx.y] * b_shared[9][threadIdx.x];
accum_shared[threadIdx.y][threadIdx.x] += aTT_shared[10][threadIdx.y] * b_shared[10][threadIdx.x];
accum_shared[threadIdx.y][threadIdx.x] += aTT_shared[11][threadIdx.y] * b_shared[11][threadIdx.x];
accum_shared[threadIdx.y][threadIdx.x] += aTT_shared[12][threadIdx.y] * b_shared[12][threadIdx.x];
accum_shared[threadIdx.y][threadIdx.x] += aTT_shared[13][threadIdx.y] * b_shared[13][threadIdx.x];
accum_shared[threadIdx.y][threadIdx.x] += aTT_shared[14][threadIdx.y] * b_shared[14][threadIdx.x];
accum_shared[threadIdx.y][threadIdx.x] += aTT_shared[15][threadIdx.y] * b_shared[15][threadIdx.x];
accum_shared[threadIdx.y][threadIdx.x] += aTT_shared[16][threadIdx.y] * b_shared[16][threadIdx.x];
accum_shared[threadIdx.y][threadIdx.x] += aTT_shared[17][threadIdx.y] * b_shared[17][threadIdx.x];
accum_shared[threadIdx.y][threadIdx.x] += aTT_shared[18][threadIdx.y] * b_shared[18][threadIdx.x];
accum_shared[threadIdx.y][threadIdx.x] += aTT_shared[19][threadIdx.y] * b_shared[19][threadIdx.x];
accum_shared[threadIdx.y][threadIdx.x] += aTT_shared[20][threadIdx.y] * b_shared[20][threadIdx.x];
accum_shared[threadIdx.y][threadIdx.x] += aTT_shared[21][threadIdx.y] * b_shared[21][threadIdx.x];
accum_shared[threadIdx.y][threadIdx.x] += aTT_shared[22][threadIdx.y] * b_shared[22][threadIdx.x];
accum_shared[threadIdx.y][threadIdx.x] += aTT_shared[23][threadIdx.y] * b_shared[23][threadIdx.x];
accum_shared[threadIdx.y][threadIdx.x] += aTT_shared[24][threadIdx.y] * b_shared[24][threadIdx.x];
accum_shared[threadIdx.y][threadIdx.x] += aTT_shared[25][threadIdx.y] * b_shared[25][threadIdx.x];
accum_shared[threadIdx.y][threadIdx.x] += aTT_shared[26][threadIdx.y] * b_shared[26][threadIdx.x];
accum_shared[threadIdx.y][threadIdx.x] += aTT_shared[27][threadIdx.y] * b_shared[27][threadIdx.x];
accum_shared[threadIdx.y][threadIdx.x] += aTT_shared[28][threadIdx.y] * b_shared[28][threadIdx.x];
accum_shared[threadIdx.y][threadIdx.x] += aTT_shared[29][threadIdx.y] * b_shared[29][threadIdx.x];
accum_shared[threadIdx.y][threadIdx.x] += aTT_shared[30][threadIdx.y] * b_shared[30][threadIdx.x];
accum_shared[threadIdx.y][threadIdx.x] += aTT_shared[31][threadIdx.y] * b_shared[31][threadIdx.x];
__syncthreads();
}
// multiply accum x a, add to c
c_shared[threadIdx.y][threadIdx.x] += accum_shared[threadIdx.y][0] * a_shared[0][threadIdx.x];
c_shared[threadIdx.y][threadIdx.x] += accum_shared[threadIdx.y][1] * a_shared[1][threadIdx.x];
c_shared[threadIdx.y][threadIdx.x] += accum_shared[threadIdx.y][2] * a_shared[2][threadIdx.x];
c_shared[threadIdx.y][threadIdx.x] += accum_shared[threadIdx.y][3] * a_shared[3][threadIdx.x];
c_shared[threadIdx.y][threadIdx.x] += accum_shared[threadIdx.y][4] * a_shared[4][threadIdx.x];
c_shared[threadIdx.y][threadIdx.x] += accum_shared[threadIdx.y][5] * a_shared[5][threadIdx.x];
c_shared[threadIdx.y][threadIdx.x] += accum_shared[threadIdx.y][6] * a_shared[6][threadIdx.x];
c_shared[threadIdx.y][threadIdx.x] += accum_shared[threadIdx.y][7] * a_shared[7][threadIdx.x];
c_shared[threadIdx.y][threadIdx.x] += accum_shared[threadIdx.y][8] * a_shared[8][threadIdx.x];
c_shared[threadIdx.y][threadIdx.x] += accum_shared[threadIdx.y][9] * a_shared[9][threadIdx.x];
c_shared[threadIdx.y][threadIdx.x] += accum_shared[threadIdx.y][10] * a_shared[10][threadIdx.x];
c_shared[threadIdx.y][threadIdx.x] += accum_shared[threadIdx.y][11] * a_shared[11][threadIdx.x];
c_shared[threadIdx.y][threadIdx.x] += accum_shared[threadIdx.y][12] * a_shared[12][threadIdx.x];
c_shared[threadIdx.y][threadIdx.x] += accum_shared[threadIdx.y][13] * a_shared[13][threadIdx.x];
c_shared[threadIdx.y][threadIdx.x] += accum_shared[threadIdx.y][14] * a_shared[14][threadIdx.x];
c_shared[threadIdx.y][threadIdx.x] += accum_shared[threadIdx.y][15] * a_shared[15][threadIdx.x];
c_shared[threadIdx.y][threadIdx.x] += accum_shared[threadIdx.y][16] * a_shared[16][threadIdx.x];
c_shared[threadIdx.y][threadIdx.x] += accum_shared[threadIdx.y][17] * a_shared[17][threadIdx.x];
c_shared[threadIdx.y][threadIdx.x] += accum_shared[threadIdx.y][18] * a_shared[18][threadIdx.x];
c_shared[threadIdx.y][threadIdx.x] += accum_shared[threadIdx.y][19] * a_shared[19][threadIdx.x];
c_shared[threadIdx.y][threadIdx.x] += accum_shared[threadIdx.y][20] * a_shared[20][threadIdx.x];
c_shared[threadIdx.y][threadIdx.x] += accum_shared[threadIdx.y][21] * a_shared[21][threadIdx.x];
c_shared[threadIdx.y][threadIdx.x] += accum_shared[threadIdx.y][22] * a_shared[22][threadIdx.x];
c_shared[threadIdx.y][threadIdx.x] += accum_shared[threadIdx.y][23] * a_shared[23][threadIdx.x];
c_shared[threadIdx.y][threadIdx.x] += accum_shared[threadIdx.y][24] * a_shared[24][threadIdx.x];
c_shared[threadIdx.y][threadIdx.x] += accum_shared[threadIdx.y][25] * a_shared[25][threadIdx.x];
c_shared[threadIdx.y][threadIdx.x] += accum_shared[threadIdx.y][26] * a_shared[26][threadIdx.x];
c_shared[threadIdx.y][threadIdx.x] += accum_shared[threadIdx.y][27] * a_shared[27][threadIdx.x];
c_shared[threadIdx.y][threadIdx.x] += accum_shared[threadIdx.y][28] * a_shared[28][threadIdx.x];
c_shared[threadIdx.y][threadIdx.x] += accum_shared[threadIdx.y][29] * a_shared[29][threadIdx.x];
c_shared[threadIdx.y][threadIdx.x] += accum_shared[threadIdx.y][30] * a_shared[30][threadIdx.x];
c_shared[threadIdx.y][threadIdx.x] += accum_shared[threadIdx.y][31] * a_shared[31][threadIdx.x];
__syncthreads();
}
// save c to global
Ce[c_idx] = c_shared[threadIdx.y][threadIdx.x];
}
//////////////////////////////////////////////////////////////////////////
////Task 3: calculate the Frobenius norm of a matrix
////The definition of F-norm for a matrix is square root of (the sum of squares of all the matrix elements), i.e., F=sqrt(sum_(A_ij^2))
////See the definition: https://mathworld.wolfram.com/FrobeniusNorm.html
//////////////////////////////////////////////////////////////////////////
////Please write your own kernel function here, and call it in the function Test_F_Norm_On_GPU to test its correctness and performance
__global__ void F_Norm_On_GPU_Lazy(const float* Ae, float* sum)
{
// lazy man's method for reference
__shared__ float a_shared[16][16];
int thr_per_block = blockDim.y * blockDim.x;
int idx = blockIdx.y*gridDim.x*thr_per_block + threadIdx.y*gridDim.x*blockDim.x + blockIdx.x*blockDim.x + threadIdx.x;
float element = Ae[idx];
a_shared[threadIdx.y][threadIdx.x] = element * element;
atomicAdd(&sum[0], a_shared[threadIdx.y][threadIdx.x]);
}
__global__ void F_Norm_On_GPU(const float* Ae, float* Be, bool round1)
{
extern __shared__ float data[];
int idx = blockIdx.x*blockDim.x*2 + threadIdx.x;
// use 2 registers
float num1 = Ae[idx];
float num2 = Ae[idx + blockDim.x]; // offset by stride is better for alignment
// only square first time
if (round1) {
num1 *= num1;
num2 *= num2;
}
// add two elements into one shared index
data[threadIdx.x] = num1 + num2;
__syncthreads();
// from reduce4 in class notes
for (unsigned int s = blockDim.x/2; s > 0; s >>= 1) {
if(threadIdx.x < s){
data[threadIdx.x]+=data[threadIdx.x+s];
}
__syncthreads();
}
if (threadIdx.x == 0) Be[blockIdx.x] = data[0];
}
////Congratulations, your tasks are all finished!
//////////////////////////////////////////////////////////////////////////
////Here are the test functions for your three kernel implementations
ofstream out;
__host__ void Test_Matrix_Multiplication_AB_On_GPU(const Matrix& A,const Matrix& B,Matrix& C)
{
//// Load A and B to device memory
Matrix A_on_dev(A.m,A.n,false);
A_on_dev=A;
Matrix B_on_dev(B.m,B.n,false);
B_on_dev=B;
//// Allocate C in device memory
Matrix C_on_dev(A_on_dev.m,B_on_dev.n,false);
cudaEvent_t start,end;
cudaEventCreate(&start);
cudaEventCreate(&end);
float gpu_time=0.0f;
cudaDeviceSynchronize();
cudaEventRecord(start);
//// Invoke kernel
const int block_size=32;
const int block_num_x=C.m/block_size;
const int block_num_y=C.n/block_size;
#ifdef POORMAN
Matrix_Multiplication_AB_Kernel_Poorman<<<dim3(block_num_x,block_num_y),dim3(block_size,block_size)>>>(A_on_dev.elements_on_dev,B_on_dev.elements_on_dev,C_on_dev.elements_on_dev,A_on_dev.m,A_on_dev.n,B_on_dev.n);
#endif
#ifndef POORMAN
Matrix_Multiplication_AB_Kernel_Your_Version<<<dim3(block_num_y,block_num_x),dim3(block_size,block_size)>>>(A_on_dev.elements_on_dev,B_on_dev.elements_on_dev,C_on_dev.elements_on_dev,A_on_dev.m,A_on_dev.n,B_on_dev.n);
#endif
cudaEventRecord(end);
cudaEventSynchronize(end);
cudaEventElapsedTime(&gpu_time,start,end);
printf("\nGPU runtime for matrix multiplication AB: %.4f ms\n",gpu_time);
cudaEventDestroy(start);
cudaEventDestroy(end);
//// Transfer data back to CPU
C=C_on_dev;
out<<"T1: "<<gpu_time<<endl;
}
__host__ void Test_Matrix_Multiplication_ATBA_On_GPU(const Matrix& A,const Matrix& B,Matrix& C)
{
//// Load A and B to device memory
Matrix A_on_dev(A.m,A.n,false);
A_on_dev=A;
Matrix B_on_dev(B.m,B.n,false);
B_on_dev=B;
//// Allocate C in device memory
Matrix C_on_dev(A_on_dev.n,A_on_dev.n,false);
cudaEvent_t start,end;
cudaEventCreate(&start);
cudaEventCreate(&end);
float gpu_time=0.0f;
cudaDeviceSynchronize();
cudaEventRecord(start);
//// Invoke kernel
const int block_size=32;
const int block_num_x=C.m/block_size;
const int block_num_y=C.n/block_size;
#ifdef POORMAN
Matrix_Multiplication_ATBA_Kernel_Poorman<<<dim3(block_num_x,block_num_y),dim3(block_size,block_size)>>>(A_on_dev.elements_on_dev,B_on_dev.elements_on_dev,C_on_dev.elements_on_dev,A_on_dev.m,A_on_dev.n);
#endif
#ifndef POORMAN
////NOTICE: You do not have to use the block_size I specified here. You may customize the size of your grid and blocks for better performance.
Matrix_Multiplication_ATBA_Kernel_Your_Version<<<dim3(block_num_y,block_num_x),dim3(block_size,block_size)>>>(A_on_dev.elements_on_dev,B_on_dev.elements_on_dev,C_on_dev.elements_on_dev,A_on_dev.m,A_on_dev.n);
#endif
cudaEventRecord(end);
cudaEventSynchronize(end);
cudaEventElapsedTime(&gpu_time,start,end);
printf("\nGPU runtime for matrix multiplication ATBA: %.4f ms\n",gpu_time);
cudaEventDestroy(start);
cudaEventDestroy(end);
//// Transfer data back to CPU
C=C_on_dev;
out<<"T2: "<<gpu_time<<endl;
}
__host__ void Test_Matrix_F_Norm_On_GPU(const Matrix& A, float& norm)
{
//// Load A and B to device memory
Matrix A_on_dev(A.m,A.n,false);
A_on_dev=A;
cudaEvent_t start,end;
cudaEventCreate(&start);
cudaEventCreate(&end);
float gpu_time=0.0f;
cudaDeviceSynchronize();
cudaEventRecord(start);
#ifdef POORMAN // atomic add
//// Invoke kernel
const int block_size=16;
const int block_num_x=A.n/block_size;
const int block_num_y=A.m/block_size;
float *sum_dev = nullptr;
cudaMalloc((void**)&sum_dev, sizeof(float));
F_Norm_On_GPU_Lazy<<<dim3(block_num_x,block_num_y), dim3(block_size,block_size)>>>(A_on_dev.elements_on_dev, sum_dev);
float *sum_host = (float *)malloc(4);
cudaMemcpy(sum_host, sum_dev, sizeof(float), cudaMemcpyDeviceToHost);
cudaFree(sum_dev);
norm = sqrt(*sum_host);
free(sum_host);
#endif // ifdef
#ifndef POORMAN // parallel reduction
const int r1_blocks = A.m;
const int r1_threads = A.n / 2;
const int r2_threads = A.m / 2;
float *B_dev = nullptr;
cudaMalloc((void**)&B_dev, A.m * sizeof(float));
F_Norm_On_GPU<<<r1_blocks, r1_threads, r1_threads*sizeof(float)>>>(A_on_dev.elements_on_dev, B_dev, true);
F_Norm_On_GPU<<<1, r2_threads, r2_threads*sizeof(float)>>>(B_dev, B_dev, false);
float result = 0;
cudaMemcpy(&result,B_dev,sizeof(float),cudaMemcpyDeviceToHost);
norm = sqrt(result);
cudaFree(B_dev);
#endif // ifndef
cudaEventRecord(end);
cudaEventSynchronize(end);
cudaEventElapsedTime(&gpu_time,start,end);
printf("\nGPU runtime for F norm: %.4f ms\n",gpu_time);
cudaEventDestroy(start);
cudaEventDestroy(end);
out<<"T3: "<<gpu_time<<endl;
}
int main()
{
if(name::team=="Team_X"){
printf("\nPlease specify your team name and team member names in name::team and name::author to start.\n");
return 0;
}
std::string file_name=name::team+"_competition_1_matrix.dat";
out.open(file_name.c_str());
if(out.fail()){
printf("\ncannot open file %s to record results\n",file_name.c_str());
return 0;
}
//////////////////////////////////////////////////////////////////////////
////NOTICE: We may use a different set of parameters to evaluate your code.
////So please test your functions with different size and initial values.
//////////////////////////////////////////////////////////////////////////
const int m=512;
const int n=2048;
const int p=1024;
Matrix h_A(m,n);
for(int i=0;i<m;i++){
for(int j=0;j<n;j++){
h_A(i,j) = 1;
}
}
Matrix h_B(n,p);
for(int i=0;i<n;i++){
for(int j=0;j<p;j++){
h_B(i,j) = 1;
}
}
Matrix h_C(m,p);
Matrix h_B2(m,m);
for(int i=0;i<m;i++){
for(int j=0;j<m;j++){
h_B2(i,j) = 1;
}
}
Matrix h_C2(n,n);
Test_Matrix_Multiplication_AB_On_GPU(h_A,h_B,h_C);
cout<<"AB result: "<<h_C(h_C.m/2,h_C.n/2)<<endl;
out<<"R1: "<<h_C(h_C.m/2,h_C.n/2)<<endl;
Test_Matrix_Multiplication_ATBA_On_GPU(h_A,h_B2,h_C2);
cout<<"ATBA result: "<<h_C2(h_C2.m/3,h_C2.n/3)<<endl;
out<<"R2: "<<h_C2(h_C2.m/3,h_C2.n/3)<<endl;
float f_norm=0.f;
Test_Matrix_F_Norm_On_GPU(h_A,f_norm);
cout<<"F-norm result: "<<f_norm<<endl;
out<<"R3: "<<f_norm<<endl;
return 0;
} | //////////////////////////////////////////////////////////////////////////
////This is the code implementation for GPU Premier League Round 1
//////////////////////////////////////////////////////////////////////////
#include <iostream>
#include <fstream>
#include <vector>
#include <chrono>
#include <hip/hip_runtime.h>
using namespace std;
//////////////////////////////////////////////////////////////////////////
////TODO 0: Please replace the following strings with your team name and author names
////Note: Please do not use space in the string, use "_" instead
//////////////////////////////////////////////////////////////////////////
namespace name
{
std::string team="using_namespace_std;";
std::string author_1="Jeff Liu";
};
////This is a matrix class to carry out linear algebra operations on both GPU and CPU
////It is the same as the sample code I showed in class on Week 3.
////NOTICE: You do not have to change the implementation in this class.
////But if you do want to change part of it for performance reasons, please let us known by writting a submission note on Canvas.
class Matrix{
public:
int m=0; ////number of rows
int n=0; ////number of columns
vector<float> elements_on_host; ////we use a std::vector for the element array on host
float* elements_on_dev=0; ////we use a pointer for the element array on device
bool on_host=true;
////constructors
__host__ Matrix(){}
__host__ Matrix(const int _m,const int _n,bool _on_host=true)
{
on_host=_on_host;
if(on_host)Resize_On_Host(_m,_n);
else Resize_On_Device(_m,_n);
}
////destructor
__host__ ~Matrix()
{
if(!on_host&&elements_on_dev!=0) hipFree(elements_on_dev);
}
////Resize on host or device
__host__ void Resize_On_Host(const int _m,const int _n)
{
if(m==_m&&n==_n)return;
m=_m;
n=_n;
elements_on_host.resize(m*n);
}
__host__ void Resize_On_Device(const int _m,const int _n)
{
if(m==_m&&n==_n)return;
m=_m;
n=_n;
if(elements_on_dev!=0)hipFree(elements_on_dev);
hipMalloc((void**)&elements_on_dev,m*n*sizeof(float));
}
////random access a matrix element
inline __host__ float& operator() (const int i,const int j)
{
return elements_on_host[i*n+j];
}
inline __host__ const float& operator() (const int i,const int j) const
{
return elements_on_host[i*n+j];
}
////copy data with four cases (CPU->CPU, GPU->CPU, GPU->GPU, CPU->GPU)
__host__ Matrix& operator= (const Matrix& mtx)
{
if(on_host&&mtx.on_host){
Resize_On_Host(mtx.m,mtx.n);
elements_on_host=mtx.elements_on_host;
}
else if(on_host&&!mtx.on_host){
Resize_On_Host(mtx.m,mtx.n);
hipMemcpy(&elements_on_host[0],mtx.elements_on_dev,m*n*sizeof(float),hipMemcpyDeviceToHost);
}
else if(!on_host&&!mtx.on_host){
Resize_On_Device(mtx.m,mtx.n);
hipMemcpy(elements_on_dev,mtx.elements_on_dev,mtx.m*n*sizeof(float),hipMemcpyDeviceToDevice);
}
else if(!on_host&&mtx.on_host){
Resize_On_Device(mtx.m,mtx.n);
hipMemcpy(elements_on_dev,&mtx.elements_on_host[0],m*n*sizeof(float),hipMemcpyHostToDevice);
}
return *this;
}
////print matrix elements on screen
__host__ friend ostream & operator << (ostream &out,const Matrix &mtx)
{
if(!mtx.on_host)
cout<<"Print for matrix on device is not supported."<<endl;
for(int i=0;i<mtx.m;i++){
for(int j=0;j<mtx.n;j++){
out<<mtx(i,j)<<", ";
}
out<<std::endl;
}
return out;
}
};
//////////////////////////////////////////////////////////////////////////
////Your tasks start!
////This is a sample implementation without using any memory hierarchy
////The function calculates C=A*B, with dimA=[Am,An], dimB=[Bm,Bn], dimC=[Am,bn], and An=Bm
__global__ void Matrix_Multiplication_AB_Kernel_Poorman(const float* Ae,const float* Be,float* Ce,const int Am,const int An,const int Bn)
{
int i=blockIdx.x*blockDim.x+threadIdx.x;
int j=blockIdx.y*blockDim.y+threadIdx.y;
float val=0.f;
for(int k=0;k<An;k++)
val+=Ae[i*An+k]*Be[k*Bn+j];
Ce[i*Bn+j]=val;
}
//////////////////////////////////////////////////////////////////////////
////Task 1: implement your fast matrix-matrix multiplication in the following kernel function.
////The function parameters are the same as the sample function:
////The function calculates C=A*B, with dimA=[Am,An], dimB=[Bm,Bn], dimC=[Am,bn], and An=Bm
//////////////////////////////////////////////////////////////////////////
__global__ void Matrix_Multiplication_AB_Kernel_Your_Version(const float* Ae,const float* Be,float* Ce,const int Am,const int An,const int Bn)
{
// initialize memory
const int block_size = 32;
const int num_tiles = An / block_size;
__shared__ float a_shared[block_size][block_size];
__shared__ float b_shared[block_size][block_size];
__shared__ float c_shared[block_size][block_size];
// calculate 1d index of correct item on A, B, C
int thr_per_block = blockDim.y * blockDim.x;
int c_idx = blockIdx.y * gridDim.x * thr_per_block + threadIdx.y * gridDim.x * blockDim.x + blockIdx.x * blockDim.x + threadIdx.x;
c_shared[threadIdx.y][threadIdx.x] = 0; // set everything to zero just the first time
int a_idx, b_idx;
for (int tile = 0; tile < num_tiles; ++tile) {
// want blockIdx.x to increment
a_idx = blockIdx.y * num_tiles * thr_per_block + threadIdx.y * num_tiles * blockDim.x + tile * blockDim.x + threadIdx.x;
// want blockIdx.y to increment
b_idx = tile * gridDim.x * thr_per_block + threadIdx.y * gridDim.x * blockDim.x + blockIdx.x * blockDim.x + threadIdx.x;
a_shared[threadIdx.y][threadIdx.x] = Ae[a_idx];
b_shared[threadIdx.y][threadIdx.x] = Be[b_idx];
__syncthreads();
// lmao loop unrolling time my dudes
c_shared[threadIdx.y][threadIdx.x] += a_shared[threadIdx.y][0] * b_shared[0][threadIdx.x];
c_shared[threadIdx.y][threadIdx.x] += a_shared[threadIdx.y][1] * b_shared[1][threadIdx.x];
c_shared[threadIdx.y][threadIdx.x] += a_shared[threadIdx.y][2] * b_shared[2][threadIdx.x];
c_shared[threadIdx.y][threadIdx.x] += a_shared[threadIdx.y][3] * b_shared[3][threadIdx.x];
c_shared[threadIdx.y][threadIdx.x] += a_shared[threadIdx.y][4] * b_shared[4][threadIdx.x];
c_shared[threadIdx.y][threadIdx.x] += a_shared[threadIdx.y][5] * b_shared[5][threadIdx.x];
c_shared[threadIdx.y][threadIdx.x] += a_shared[threadIdx.y][6] * b_shared[6][threadIdx.x];
c_shared[threadIdx.y][threadIdx.x] += a_shared[threadIdx.y][7] * b_shared[7][threadIdx.x];
c_shared[threadIdx.y][threadIdx.x] += a_shared[threadIdx.y][8] * b_shared[8][threadIdx.x];
c_shared[threadIdx.y][threadIdx.x] += a_shared[threadIdx.y][9] * b_shared[9][threadIdx.x];
c_shared[threadIdx.y][threadIdx.x] += a_shared[threadIdx.y][10] * b_shared[10][threadIdx.x];
c_shared[threadIdx.y][threadIdx.x] += a_shared[threadIdx.y][11] * b_shared[11][threadIdx.x];
c_shared[threadIdx.y][threadIdx.x] += a_shared[threadIdx.y][12] * b_shared[12][threadIdx.x];
c_shared[threadIdx.y][threadIdx.x] += a_shared[threadIdx.y][13] * b_shared[13][threadIdx.x];
c_shared[threadIdx.y][threadIdx.x] += a_shared[threadIdx.y][14] * b_shared[14][threadIdx.x];
c_shared[threadIdx.y][threadIdx.x] += a_shared[threadIdx.y][15] * b_shared[15][threadIdx.x];
c_shared[threadIdx.y][threadIdx.x] += a_shared[threadIdx.y][16] * b_shared[16][threadIdx.x];
c_shared[threadIdx.y][threadIdx.x] += a_shared[threadIdx.y][17] * b_shared[17][threadIdx.x];
c_shared[threadIdx.y][threadIdx.x] += a_shared[threadIdx.y][18] * b_shared[18][threadIdx.x];
c_shared[threadIdx.y][threadIdx.x] += a_shared[threadIdx.y][19] * b_shared[19][threadIdx.x];
c_shared[threadIdx.y][threadIdx.x] += a_shared[threadIdx.y][20] * b_shared[20][threadIdx.x];
c_shared[threadIdx.y][threadIdx.x] += a_shared[threadIdx.y][21] * b_shared[21][threadIdx.x];
c_shared[threadIdx.y][threadIdx.x] += a_shared[threadIdx.y][22] * b_shared[22][threadIdx.x];
c_shared[threadIdx.y][threadIdx.x] += a_shared[threadIdx.y][23] * b_shared[23][threadIdx.x];
c_shared[threadIdx.y][threadIdx.x] += a_shared[threadIdx.y][24] * b_shared[24][threadIdx.x];
c_shared[threadIdx.y][threadIdx.x] += a_shared[threadIdx.y][25] * b_shared[25][threadIdx.x];
c_shared[threadIdx.y][threadIdx.x] += a_shared[threadIdx.y][26] * b_shared[26][threadIdx.x];
c_shared[threadIdx.y][threadIdx.x] += a_shared[threadIdx.y][27] * b_shared[27][threadIdx.x];
c_shared[threadIdx.y][threadIdx.x] += a_shared[threadIdx.y][28] * b_shared[28][threadIdx.x];
c_shared[threadIdx.y][threadIdx.x] += a_shared[threadIdx.y][29] * b_shared[29][threadIdx.x];
c_shared[threadIdx.y][threadIdx.x] += a_shared[threadIdx.y][30] * b_shared[30][threadIdx.x];
c_shared[threadIdx.y][threadIdx.x] += a_shared[threadIdx.y][31] * b_shared[31][threadIdx.x];
__syncthreads();
}
// save to global
Ce[c_idx] = c_shared[threadIdx.y][threadIdx.x];
}
////This is a sample implementation without using any memory hierarchy
////The function calculates the matrix multiplication, with C=A^T*B*A, A^T is the transpose of A, dimA=[Am,An], dimB=[Am,Am], and dimC=[An,An]
__global__ void Matrix_Multiplication_ATBA_Kernel_Poorman(const float* Ae,const float* Be,float* Ce,const int Am,const int An)
{
int i=blockIdx.x*blockDim.x+threadIdx.x;
int j=blockIdx.y*blockDim.y+threadIdx.y;
float val=0.f;
for(int l=0;l<Am;l++)
for(int k=0;k<Am;k++)
val+=Ae[l*An+i]*Be[l*Am+k]*Ae[k*An+j];
Ce[i*An+j]=val;
}
//////////////////////////////////////////////////////////////////////////
////Task 2: calculate the matrix multiplication in the following kernel function.
////The function parameters are the same as the sample function:
////The function calculates the matrix multiplication, with C=A^T*B*A, A^T is the transpose of A, dimA=[Am,An], dimB=[Am,Am], and dimC=[An,An]
//////////////////////////////////////////////////////////////////////////
__global__ void Matrix_Multiplication_ATBA_Kernel_Your_Version(const float* Ae,const float* Be,float* Ce,const int Am,const int An)
{
// memory setup
const int num_tiles = Am / 32;
__shared__ float aTT_shared[32][32];
__shared__ float b_shared[32][32];
__shared__ float a_shared[32][32];
__shared__ float accum_shared[32][32];
__shared__ float c_shared[32][32];
// coordinate setup
int thr_per_block = blockDim.y * blockDim.x;
int c_idx = blockIdx.y*gridDim.x*thr_per_block + threadIdx.y*gridDim.x*blockDim.x + blockIdx.x*blockDim.x + threadIdx.x;
int a_idx, b_idx, aTT_idx;
// initialize memory
c_shared[threadIdx.y][threadIdx.x] = 0;
// following psuedocode coordinates are (y,x)
for (int ay = 0; ay < num_tiles; ++ay) { //ay = bx
// load a(ay,blockIdx.x)
a_idx = ay*gridDim.x*thr_per_block + threadIdx.y*gridDim.x*blockDim.x + blockIdx.x*blockDim.x + threadIdx.x;
a_shared[threadIdx.y][threadIdx.x] = Ae[a_idx];
// clear accumulator
accum_shared[threadIdx.y][threadIdx.x] = 0;
__syncthreads();
for (int by = 0; by < num_tiles; ++by) { // by = aTx = aTTy
// calculate indices
b_idx = by*num_tiles*thr_per_block + threadIdx.y*num_tiles*blockDim.x + ay*blockDim.x + threadIdx.x;
aTT_idx = by*gridDim.x*thr_per_block + threadIdx.y*gridDim.x*blockDim.x + blockIdx.y*blockDim.x + threadIdx.x;
// load aTT(by, blockIdx.y) (since we load A but column access) and b(by,ay)
b_shared[threadIdx.y][threadIdx.x] = Be[b_idx];
aTT_shared[threadIdx.y][threadIdx.x] = Ae[aTT_idx];
__syncthreads();
// multiply aT x b, accumulate
accum_shared[threadIdx.y][threadIdx.x] += aTT_shared[0][threadIdx.y] * b_shared[0][threadIdx.x];
accum_shared[threadIdx.y][threadIdx.x] += aTT_shared[1][threadIdx.y] * b_shared[1][threadIdx.x];
accum_shared[threadIdx.y][threadIdx.x] += aTT_shared[2][threadIdx.y] * b_shared[2][threadIdx.x];
accum_shared[threadIdx.y][threadIdx.x] += aTT_shared[3][threadIdx.y] * b_shared[3][threadIdx.x];
accum_shared[threadIdx.y][threadIdx.x] += aTT_shared[4][threadIdx.y] * b_shared[4][threadIdx.x];
accum_shared[threadIdx.y][threadIdx.x] += aTT_shared[5][threadIdx.y] * b_shared[5][threadIdx.x];
accum_shared[threadIdx.y][threadIdx.x] += aTT_shared[6][threadIdx.y] * b_shared[6][threadIdx.x];
accum_shared[threadIdx.y][threadIdx.x] += aTT_shared[7][threadIdx.y] * b_shared[7][threadIdx.x];
accum_shared[threadIdx.y][threadIdx.x] += aTT_shared[8][threadIdx.y] * b_shared[8][threadIdx.x];
accum_shared[threadIdx.y][threadIdx.x] += aTT_shared[9][threadIdx.y] * b_shared[9][threadIdx.x];
accum_shared[threadIdx.y][threadIdx.x] += aTT_shared[10][threadIdx.y] * b_shared[10][threadIdx.x];
accum_shared[threadIdx.y][threadIdx.x] += aTT_shared[11][threadIdx.y] * b_shared[11][threadIdx.x];
accum_shared[threadIdx.y][threadIdx.x] += aTT_shared[12][threadIdx.y] * b_shared[12][threadIdx.x];
accum_shared[threadIdx.y][threadIdx.x] += aTT_shared[13][threadIdx.y] * b_shared[13][threadIdx.x];
accum_shared[threadIdx.y][threadIdx.x] += aTT_shared[14][threadIdx.y] * b_shared[14][threadIdx.x];
accum_shared[threadIdx.y][threadIdx.x] += aTT_shared[15][threadIdx.y] * b_shared[15][threadIdx.x];
accum_shared[threadIdx.y][threadIdx.x] += aTT_shared[16][threadIdx.y] * b_shared[16][threadIdx.x];
accum_shared[threadIdx.y][threadIdx.x] += aTT_shared[17][threadIdx.y] * b_shared[17][threadIdx.x];
accum_shared[threadIdx.y][threadIdx.x] += aTT_shared[18][threadIdx.y] * b_shared[18][threadIdx.x];
accum_shared[threadIdx.y][threadIdx.x] += aTT_shared[19][threadIdx.y] * b_shared[19][threadIdx.x];
accum_shared[threadIdx.y][threadIdx.x] += aTT_shared[20][threadIdx.y] * b_shared[20][threadIdx.x];
accum_shared[threadIdx.y][threadIdx.x] += aTT_shared[21][threadIdx.y] * b_shared[21][threadIdx.x];
accum_shared[threadIdx.y][threadIdx.x] += aTT_shared[22][threadIdx.y] * b_shared[22][threadIdx.x];
accum_shared[threadIdx.y][threadIdx.x] += aTT_shared[23][threadIdx.y] * b_shared[23][threadIdx.x];
accum_shared[threadIdx.y][threadIdx.x] += aTT_shared[24][threadIdx.y] * b_shared[24][threadIdx.x];
accum_shared[threadIdx.y][threadIdx.x] += aTT_shared[25][threadIdx.y] * b_shared[25][threadIdx.x];
accum_shared[threadIdx.y][threadIdx.x] += aTT_shared[26][threadIdx.y] * b_shared[26][threadIdx.x];
accum_shared[threadIdx.y][threadIdx.x] += aTT_shared[27][threadIdx.y] * b_shared[27][threadIdx.x];
accum_shared[threadIdx.y][threadIdx.x] += aTT_shared[28][threadIdx.y] * b_shared[28][threadIdx.x];
accum_shared[threadIdx.y][threadIdx.x] += aTT_shared[29][threadIdx.y] * b_shared[29][threadIdx.x];
accum_shared[threadIdx.y][threadIdx.x] += aTT_shared[30][threadIdx.y] * b_shared[30][threadIdx.x];
accum_shared[threadIdx.y][threadIdx.x] += aTT_shared[31][threadIdx.y] * b_shared[31][threadIdx.x];
__syncthreads();
}
// multiply accum x a, add to c
c_shared[threadIdx.y][threadIdx.x] += accum_shared[threadIdx.y][0] * a_shared[0][threadIdx.x];
c_shared[threadIdx.y][threadIdx.x] += accum_shared[threadIdx.y][1] * a_shared[1][threadIdx.x];
c_shared[threadIdx.y][threadIdx.x] += accum_shared[threadIdx.y][2] * a_shared[2][threadIdx.x];
c_shared[threadIdx.y][threadIdx.x] += accum_shared[threadIdx.y][3] * a_shared[3][threadIdx.x];
c_shared[threadIdx.y][threadIdx.x] += accum_shared[threadIdx.y][4] * a_shared[4][threadIdx.x];
c_shared[threadIdx.y][threadIdx.x] += accum_shared[threadIdx.y][5] * a_shared[5][threadIdx.x];
c_shared[threadIdx.y][threadIdx.x] += accum_shared[threadIdx.y][6] * a_shared[6][threadIdx.x];
c_shared[threadIdx.y][threadIdx.x] += accum_shared[threadIdx.y][7] * a_shared[7][threadIdx.x];
c_shared[threadIdx.y][threadIdx.x] += accum_shared[threadIdx.y][8] * a_shared[8][threadIdx.x];
c_shared[threadIdx.y][threadIdx.x] += accum_shared[threadIdx.y][9] * a_shared[9][threadIdx.x];
c_shared[threadIdx.y][threadIdx.x] += accum_shared[threadIdx.y][10] * a_shared[10][threadIdx.x];
c_shared[threadIdx.y][threadIdx.x] += accum_shared[threadIdx.y][11] * a_shared[11][threadIdx.x];
c_shared[threadIdx.y][threadIdx.x] += accum_shared[threadIdx.y][12] * a_shared[12][threadIdx.x];
c_shared[threadIdx.y][threadIdx.x] += accum_shared[threadIdx.y][13] * a_shared[13][threadIdx.x];
c_shared[threadIdx.y][threadIdx.x] += accum_shared[threadIdx.y][14] * a_shared[14][threadIdx.x];
c_shared[threadIdx.y][threadIdx.x] += accum_shared[threadIdx.y][15] * a_shared[15][threadIdx.x];
c_shared[threadIdx.y][threadIdx.x] += accum_shared[threadIdx.y][16] * a_shared[16][threadIdx.x];
c_shared[threadIdx.y][threadIdx.x] += accum_shared[threadIdx.y][17] * a_shared[17][threadIdx.x];
c_shared[threadIdx.y][threadIdx.x] += accum_shared[threadIdx.y][18] * a_shared[18][threadIdx.x];
c_shared[threadIdx.y][threadIdx.x] += accum_shared[threadIdx.y][19] * a_shared[19][threadIdx.x];
c_shared[threadIdx.y][threadIdx.x] += accum_shared[threadIdx.y][20] * a_shared[20][threadIdx.x];
c_shared[threadIdx.y][threadIdx.x] += accum_shared[threadIdx.y][21] * a_shared[21][threadIdx.x];
c_shared[threadIdx.y][threadIdx.x] += accum_shared[threadIdx.y][22] * a_shared[22][threadIdx.x];
c_shared[threadIdx.y][threadIdx.x] += accum_shared[threadIdx.y][23] * a_shared[23][threadIdx.x];
c_shared[threadIdx.y][threadIdx.x] += accum_shared[threadIdx.y][24] * a_shared[24][threadIdx.x];
c_shared[threadIdx.y][threadIdx.x] += accum_shared[threadIdx.y][25] * a_shared[25][threadIdx.x];
c_shared[threadIdx.y][threadIdx.x] += accum_shared[threadIdx.y][26] * a_shared[26][threadIdx.x];
c_shared[threadIdx.y][threadIdx.x] += accum_shared[threadIdx.y][27] * a_shared[27][threadIdx.x];
c_shared[threadIdx.y][threadIdx.x] += accum_shared[threadIdx.y][28] * a_shared[28][threadIdx.x];
c_shared[threadIdx.y][threadIdx.x] += accum_shared[threadIdx.y][29] * a_shared[29][threadIdx.x];
c_shared[threadIdx.y][threadIdx.x] += accum_shared[threadIdx.y][30] * a_shared[30][threadIdx.x];
c_shared[threadIdx.y][threadIdx.x] += accum_shared[threadIdx.y][31] * a_shared[31][threadIdx.x];
__syncthreads();
}
// save c to global
Ce[c_idx] = c_shared[threadIdx.y][threadIdx.x];
}
//////////////////////////////////////////////////////////////////////////
////Task 3: calculate the Frobenius norm of a matrix
////The definition of F-norm for a matrix is square root of (the sum of squares of all the matrix elements), i.e., F=sqrt(sum_(A_ij^2))
////See the definition: https://mathworld.wolfram.com/FrobeniusNorm.html
//////////////////////////////////////////////////////////////////////////
////Please write your own kernel function here, and call it in the function Test_F_Norm_On_GPU to test its correctness and performance
__global__ void F_Norm_On_GPU_Lazy(const float* Ae, float* sum)
{
// lazy man's method for reference
__shared__ float a_shared[16][16];
int thr_per_block = blockDim.y * blockDim.x;
int idx = blockIdx.y*gridDim.x*thr_per_block + threadIdx.y*gridDim.x*blockDim.x + blockIdx.x*blockDim.x + threadIdx.x;
float element = Ae[idx];
a_shared[threadIdx.y][threadIdx.x] = element * element;
atomicAdd(&sum[0], a_shared[threadIdx.y][threadIdx.x]);
}
__global__ void F_Norm_On_GPU(const float* Ae, float* Be, bool round1)
{
extern __shared__ float data[];
int idx = blockIdx.x*blockDim.x*2 + threadIdx.x;
// use 2 registers
float num1 = Ae[idx];
float num2 = Ae[idx + blockDim.x]; // offset by stride is better for alignment
// only square first time
if (round1) {
num1 *= num1;
num2 *= num2;
}
// add two elements into one shared index
data[threadIdx.x] = num1 + num2;
__syncthreads();
// from reduce4 in class notes
for (unsigned int s = blockDim.x/2; s > 0; s >>= 1) {
if(threadIdx.x < s){
data[threadIdx.x]+=data[threadIdx.x+s];
}
__syncthreads();
}
if (threadIdx.x == 0) Be[blockIdx.x] = data[0];
}
////Congratulations, your tasks are all finished!
//////////////////////////////////////////////////////////////////////////
////Here are the test functions for your three kernel implementations
ofstream out;
__host__ void Test_Matrix_Multiplication_AB_On_GPU(const Matrix& A,const Matrix& B,Matrix& C)
{
//// Load A and B to device memory
Matrix A_on_dev(A.m,A.n,false);
A_on_dev=A;
Matrix B_on_dev(B.m,B.n,false);
B_on_dev=B;
//// Allocate C in device memory
Matrix C_on_dev(A_on_dev.m,B_on_dev.n,false);
hipEvent_t start,end;
hipEventCreate(&start);
hipEventCreate(&end);
float gpu_time=0.0f;
hipDeviceSynchronize();
hipEventRecord(start);
//// Invoke kernel
const int block_size=32;
const int block_num_x=C.m/block_size;
const int block_num_y=C.n/block_size;
#ifdef POORMAN
Matrix_Multiplication_AB_Kernel_Poorman<<<dim3(block_num_x,block_num_y),dim3(block_size,block_size)>>>(A_on_dev.elements_on_dev,B_on_dev.elements_on_dev,C_on_dev.elements_on_dev,A_on_dev.m,A_on_dev.n,B_on_dev.n);
#endif
#ifndef POORMAN
Matrix_Multiplication_AB_Kernel_Your_Version<<<dim3(block_num_y,block_num_x),dim3(block_size,block_size)>>>(A_on_dev.elements_on_dev,B_on_dev.elements_on_dev,C_on_dev.elements_on_dev,A_on_dev.m,A_on_dev.n,B_on_dev.n);
#endif
hipEventRecord(end);
hipEventSynchronize(end);
hipEventElapsedTime(&gpu_time,start,end);
printf("\nGPU runtime for matrix multiplication AB: %.4f ms\n",gpu_time);
hipEventDestroy(start);
hipEventDestroy(end);
//// Transfer data back to CPU
C=C_on_dev;
out<<"T1: "<<gpu_time<<endl;
}
__host__ void Test_Matrix_Multiplication_ATBA_On_GPU(const Matrix& A,const Matrix& B,Matrix& C)
{
//// Load A and B to device memory
Matrix A_on_dev(A.m,A.n,false);
A_on_dev=A;
Matrix B_on_dev(B.m,B.n,false);
B_on_dev=B;
//// Allocate C in device memory
Matrix C_on_dev(A_on_dev.n,A_on_dev.n,false);
hipEvent_t start,end;
hipEventCreate(&start);
hipEventCreate(&end);
float gpu_time=0.0f;
hipDeviceSynchronize();
hipEventRecord(start);
//// Invoke kernel
const int block_size=32;
const int block_num_x=C.m/block_size;
const int block_num_y=C.n/block_size;
#ifdef POORMAN
Matrix_Multiplication_ATBA_Kernel_Poorman<<<dim3(block_num_x,block_num_y),dim3(block_size,block_size)>>>(A_on_dev.elements_on_dev,B_on_dev.elements_on_dev,C_on_dev.elements_on_dev,A_on_dev.m,A_on_dev.n);
#endif
#ifndef POORMAN
////NOTICE: You do not have to use the block_size I specified here. You may customize the size of your grid and blocks for better performance.
Matrix_Multiplication_ATBA_Kernel_Your_Version<<<dim3(block_num_y,block_num_x),dim3(block_size,block_size)>>>(A_on_dev.elements_on_dev,B_on_dev.elements_on_dev,C_on_dev.elements_on_dev,A_on_dev.m,A_on_dev.n);
#endif
hipEventRecord(end);
hipEventSynchronize(end);
hipEventElapsedTime(&gpu_time,start,end);
printf("\nGPU runtime for matrix multiplication ATBA: %.4f ms\n",gpu_time);
hipEventDestroy(start);
hipEventDestroy(end);
//// Transfer data back to CPU
C=C_on_dev;
out<<"T2: "<<gpu_time<<endl;
}
__host__ void Test_Matrix_F_Norm_On_GPU(const Matrix& A, float& norm)
{
//// Load A and B to device memory
Matrix A_on_dev(A.m,A.n,false);
A_on_dev=A;
hipEvent_t start,end;
hipEventCreate(&start);
hipEventCreate(&end);
float gpu_time=0.0f;
hipDeviceSynchronize();
hipEventRecord(start);
#ifdef POORMAN // atomic add
//// Invoke kernel
const int block_size=16;
const int block_num_x=A.n/block_size;
const int block_num_y=A.m/block_size;
float *sum_dev = nullptr;
hipMalloc((void**)&sum_dev, sizeof(float));
F_Norm_On_GPU_Lazy<<<dim3(block_num_x,block_num_y), dim3(block_size,block_size)>>>(A_on_dev.elements_on_dev, sum_dev);
float *sum_host = (float *)malloc(4);
hipMemcpy(sum_host, sum_dev, sizeof(float), hipMemcpyDeviceToHost);
hipFree(sum_dev);
norm = sqrt(*sum_host);
free(sum_host);
#endif // ifdef
#ifndef POORMAN // parallel reduction
const int r1_blocks = A.m;
const int r1_threads = A.n / 2;
const int r2_threads = A.m / 2;
float *B_dev = nullptr;
hipMalloc((void**)&B_dev, A.m * sizeof(float));
F_Norm_On_GPU<<<r1_blocks, r1_threads, r1_threads*sizeof(float)>>>(A_on_dev.elements_on_dev, B_dev, true);
F_Norm_On_GPU<<<1, r2_threads, r2_threads*sizeof(float)>>>(B_dev, B_dev, false);
float result = 0;
hipMemcpy(&result,B_dev,sizeof(float),hipMemcpyDeviceToHost);
norm = sqrt(result);
hipFree(B_dev);
#endif // ifndef
hipEventRecord(end);
hipEventSynchronize(end);
hipEventElapsedTime(&gpu_time,start,end);
printf("\nGPU runtime for F norm: %.4f ms\n",gpu_time);
hipEventDestroy(start);
hipEventDestroy(end);
out<<"T3: "<<gpu_time<<endl;
}
int main()
{
if(name::team=="Team_X"){
printf("\nPlease specify your team name and team member names in name::team and name::author to start.\n");
return 0;
}
std::string file_name=name::team+"_competition_1_matrix.dat";
out.open(file_name.c_str());
if(out.fail()){
printf("\ncannot open file %s to record results\n",file_name.c_str());
return 0;
}
//////////////////////////////////////////////////////////////////////////
////NOTICE: We may use a different set of parameters to evaluate your code.
////So please test your functions with different size and initial values.
//////////////////////////////////////////////////////////////////////////
const int m=512;
const int n=2048;
const int p=1024;
Matrix h_A(m,n);
for(int i=0;i<m;i++){
for(int j=0;j<n;j++){
h_A(i,j) = 1;
}
}
Matrix h_B(n,p);
for(int i=0;i<n;i++){
for(int j=0;j<p;j++){
h_B(i,j) = 1;
}
}
Matrix h_C(m,p);
Matrix h_B2(m,m);
for(int i=0;i<m;i++){
for(int j=0;j<m;j++){
h_B2(i,j) = 1;
}
}
Matrix h_C2(n,n);
Test_Matrix_Multiplication_AB_On_GPU(h_A,h_B,h_C);
cout<<"AB result: "<<h_C(h_C.m/2,h_C.n/2)<<endl;
out<<"R1: "<<h_C(h_C.m/2,h_C.n/2)<<endl;
Test_Matrix_Multiplication_ATBA_On_GPU(h_A,h_B2,h_C2);
cout<<"ATBA result: "<<h_C2(h_C2.m/3,h_C2.n/3)<<endl;
out<<"R2: "<<h_C2(h_C2.m/3,h_C2.n/3)<<endl;
float f_norm=0.f;
Test_Matrix_F_Norm_On_GPU(h_A,f_norm);
cout<<"F-norm result: "<<f_norm<<endl;
out<<"R3: "<<f_norm<<endl;
return 0;
} |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly. | #include<stdio.h>
#include<stdlib.h>
#include<time.h>
#define N 256
__global__ void vecAdd(int *a,int *b,int *c);
void vecAdd_cpu(int a[N],int b[N],int c[N]){
for(int i=0;i<N;i++){
c[i]=a[i]+b[i];
}
}
int main(){
int a[N],b[N],gpu_add[N],cpu_add[N];
int *dev_a, *dev_b, *dev_c;
float time_gpu,time_cpu,timeindex,timeinit;
for(int i=0;i<N;i++){
a[i]=i+i;
b[i]=i*i;
}
int size=N*sizeof(int);
cudaMalloc((void**) &dev_a, size);
cudaMalloc((void**) &dev_b,size);
cudaMalloc((void**) &dev_c,size);
cudaEvent_t startinit,endinit;
cudaEventCreate(&startinit);
cudaEventCreate(&endinit);
cudaEventRecord(startinit, 0);
cudaMemcpy(dev_a,a,size,cudaMemcpyHostToDevice);
cudaMemcpy(dev_b,b,size,cudaMemcpyHostToDevice);
cudaEventRecord(endinit, 0);
cudaEventSynchronize(endinit);
cudaEventElapsedTime(&timeinit, startinit, endinit);
cudaEvent_t gpu_start,gpu_end;
cudaEventCreate(&gpu_start);
cudaEventCreate(&gpu_end);
cudaEventRecord(gpu_start, 0);
vecAdd<<<1,N>>> (dev_a,dev_b,dev_c);
cudaDeviceSynchronize();
cudaEventRecord(gpu_end, 0);
cudaEventSynchronize(gpu_end);
cudaEventElapsedTime(&time_gpu, gpu_start, gpu_end);
cudaEvent_t startindex,endindex;
cudaEventCreate(&startindex);
cudaEventCreate(&endindex);
cudaEventRecord(startindex, 0);
cudaMemcpy(gpu_add,dev_c,size,cudaMemcpyDeviceToHost);
cudaEventRecord(endindex, 0);
cudaEventSynchronize(endindex);
cudaEventElapsedTime(&timeindex, startindex, endindex);
clock_t cpu_start,cpu_end;
cpu_start=clock();
vecAdd_cpu(a,b,cpu_add);
cpu_end=clock();
timeinit/=1000;
timeindex/=1000;
time_gpu/=1000;
time_cpu=float(cpu_end-cpu_start)/float(CLOCKS_PER_SEC);
printf("Time for sending initial data from host to device : %f\t sec\n",timeinit);
printf("Cuda program launched with 1 block and %d threads\n",N);
printf("Time for sending calculated data from device to host : %f\t sec\n",timeindex);
printf("GPU Time:%f seconds\n",time_gpu);
printf("CPU Time:%f seconds\n",time_cpu);
int flag=1;
for(int i=0;i<N;i++){
if(gpu_add[i]!=cpu_add[i]){
flag=0;
break;
}
}
if(flag){
printf("TEST PASSED\n");
printf("SPEED UP:%f\n",time_cpu/time_gpu);
}
else{
printf("TEST FAILED\n");
}
cudaFree(dev_a);
cudaFree(dev_b);
cudaFree(dev_c);
exit(0);
}
__global__ void vecAdd(int *a,int *b,int *c){
int i=threadIdx.x;
c[i]=a[i]+b[i];
} | code for sm_80
Function : _Z6vecAddPiS_S_
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ S2R R6, SR_TID.X ; /* 0x0000000000067919 */
/* 0x000e220000002100 */
/*0020*/ HFMA2.MMA R7, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff077435 */
/* 0x000fe200000001ff */
/*0030*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fd20000000a00 */
/*0040*/ IMAD.WIDE R2, R6, R7, c[0x0][0x160] ; /* 0x0000580006027625 */
/* 0x001fc800078e0207 */
/*0050*/ IMAD.WIDE R4, R6.reuse, R7.reuse, c[0x0][0x168] ; /* 0x00005a0006047625 */
/* 0x0c0fe400078e0207 */
/*0060*/ LDG.E R2, [R2.64] ; /* 0x0000000402027981 */
/* 0x000ea8000c1e1900 */
/*0070*/ LDG.E R5, [R4.64] ; /* 0x0000000404057981 */
/* 0x000ea2000c1e1900 */
/*0080*/ IMAD.WIDE R6, R6, R7, c[0x0][0x170] ; /* 0x00005c0006067625 */
/* 0x000fe200078e0207 */
/*0090*/ IADD3 R9, R2, R5, RZ ; /* 0x0000000502097210 */
/* 0x004fca0007ffe0ff */
/*00a0*/ STG.E [R6.64], R9 ; /* 0x0000000906007986 */
/* 0x000fe2000c101904 */
/*00b0*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*00c0*/ BRA 0xc0; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*00d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0100*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0110*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0120*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0130*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0140*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0150*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0160*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0170*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | #include<stdio.h>
#include<stdlib.h>
#include<time.h>
#define N 256
__global__ void vecAdd(int *a,int *b,int *c);
void vecAdd_cpu(int a[N],int b[N],int c[N]){
for(int i=0;i<N;i++){
c[i]=a[i]+b[i];
}
}
int main(){
int a[N],b[N],gpu_add[N],cpu_add[N];
int *dev_a, *dev_b, *dev_c;
float time_gpu,time_cpu,timeindex,timeinit;
for(int i=0;i<N;i++){
a[i]=i+i;
b[i]=i*i;
}
int size=N*sizeof(int);
cudaMalloc((void**) &dev_a, size);
cudaMalloc((void**) &dev_b,size);
cudaMalloc((void**) &dev_c,size);
cudaEvent_t startinit,endinit;
cudaEventCreate(&startinit);
cudaEventCreate(&endinit);
cudaEventRecord(startinit, 0);
cudaMemcpy(dev_a,a,size,cudaMemcpyHostToDevice);
cudaMemcpy(dev_b,b,size,cudaMemcpyHostToDevice);
cudaEventRecord(endinit, 0);
cudaEventSynchronize(endinit);
cudaEventElapsedTime(&timeinit, startinit, endinit);
cudaEvent_t gpu_start,gpu_end;
cudaEventCreate(&gpu_start);
cudaEventCreate(&gpu_end);
cudaEventRecord(gpu_start, 0);
vecAdd<<<1,N>>> (dev_a,dev_b,dev_c);
cudaDeviceSynchronize();
cudaEventRecord(gpu_end, 0);
cudaEventSynchronize(gpu_end);
cudaEventElapsedTime(&time_gpu, gpu_start, gpu_end);
cudaEvent_t startindex,endindex;
cudaEventCreate(&startindex);
cudaEventCreate(&endindex);
cudaEventRecord(startindex, 0);
cudaMemcpy(gpu_add,dev_c,size,cudaMemcpyDeviceToHost);
cudaEventRecord(endindex, 0);
cudaEventSynchronize(endindex);
cudaEventElapsedTime(&timeindex, startindex, endindex);
clock_t cpu_start,cpu_end;
cpu_start=clock();
vecAdd_cpu(a,b,cpu_add);
cpu_end=clock();
timeinit/=1000;
timeindex/=1000;
time_gpu/=1000;
time_cpu=float(cpu_end-cpu_start)/float(CLOCKS_PER_SEC);
printf("Time for sending initial data from host to device : %f\t sec\n",timeinit);
printf("Cuda program launched with 1 block and %d threads\n",N);
printf("Time for sending calculated data from device to host : %f\t sec\n",timeindex);
printf("GPU Time:%f seconds\n",time_gpu);
printf("CPU Time:%f seconds\n",time_cpu);
int flag=1;
for(int i=0;i<N;i++){
if(gpu_add[i]!=cpu_add[i]){
flag=0;
break;
}
}
if(flag){
printf("TEST PASSED\n");
printf("SPEED UP:%f\n",time_cpu/time_gpu);
}
else{
printf("TEST FAILED\n");
}
cudaFree(dev_a);
cudaFree(dev_b);
cudaFree(dev_c);
exit(0);
}
__global__ void vecAdd(int *a,int *b,int *c){
int i=threadIdx.x;
c[i]=a[i]+b[i];
} | .file "tmpxft_000283f7_00000000-6_vecAdd1.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2061:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2061:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z10vecAdd_cpuPiS_S_
.type _Z10vecAdd_cpuPiS_S_, @function
_Z10vecAdd_cpuPiS_S_:
.LFB2057:
.cfi_startproc
endbr64
movl $0, %eax
.L4:
movl (%rsi,%rax), %ecx
addl (%rdi,%rax), %ecx
movl %ecx, (%rdx,%rax)
addq $4, %rax
cmpq $1024, %rax
jne .L4
ret
.cfi_endproc
.LFE2057:
.size _Z10vecAdd_cpuPiS_S_, .-_Z10vecAdd_cpuPiS_S_
.globl _Z29__device_stub__Z6vecAddPiS_S_PiS_S_
.type _Z29__device_stub__Z6vecAddPiS_S_PiS_S_, @function
_Z29__device_stub__Z6vecAddPiS_S_PiS_S_:
.LFB2083:
.cfi_startproc
endbr64
subq $136, %rsp
.cfi_def_cfa_offset 144
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movq %rdx, 8(%rsp)
movq %fs:40, %rax
movq %rax, 120(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 8(%rsp), %rax
movq %rax, 112(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L10
.L6:
movq 120(%rsp), %rax
subq %fs:40, %rax
jne .L11
addq $136, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L10:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 152
pushq 40(%rsp)
.cfi_def_cfa_offset 160
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z6vecAddPiS_S_(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 144
jmp .L6
.L11:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2083:
.size _Z29__device_stub__Z6vecAddPiS_S_PiS_S_, .-_Z29__device_stub__Z6vecAddPiS_S_PiS_S_
.globl _Z6vecAddPiS_S_
.type _Z6vecAddPiS_S_, @function
_Z6vecAddPiS_S_:
.LFB2084:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z29__device_stub__Z6vecAddPiS_S_PiS_S_
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2084:
.size _Z6vecAddPiS_S_, .-_Z6vecAddPiS_S_
.section .rodata.str1.8,"aMS",@progbits,1
.align 8
.LC2:
.string "Time for sending initial data from host to device : %f\t sec\n"
.align 8
.LC3:
.string "Cuda program launched with 1 block and %d threads\n"
.align 8
.LC4:
.string "Time for sending calculated data from device to host : %f\t sec\n"
.section .rodata.str1.1,"aMS",@progbits,1
.LC5:
.string "GPU Time:%f seconds\n"
.LC6:
.string "CPU Time:%f seconds\n"
.LC7:
.string "TEST PASSED\n"
.LC8:
.string "SPEED UP:%f\n"
.LC9:
.string "TEST FAILED\n"
.text
.globl main
.type main, @function
main:
.LFB2058:
.cfi_startproc
endbr64
pushq %rbx
.cfi_def_cfa_offset 16
.cfi_offset 3, -16
subq $4096, %rsp
.cfi_def_cfa_offset 4112
orq $0, (%rsp)
subq $144, %rsp
.cfi_def_cfa_offset 4256
movq %fs:40, %rax
movq %rax, 4232(%rsp)
xorl %eax, %eax
.L15:
leal (%rax,%rax), %edx
movl %edx, 128(%rsp,%rax,4)
movl %eax, %edx
imull %eax, %edx
movl %edx, 1152(%rsp,%rax,4)
addq $1, %rax
cmpq $256, %rax
jne .L15
leaq 40(%rsp), %rdi
movl $1024, %esi
call cudaMalloc@PLT
leaq 48(%rsp), %rdi
movl $1024, %esi
call cudaMalloc@PLT
leaq 56(%rsp), %rdi
movl $1024, %esi
call cudaMalloc@PLT
leaq 64(%rsp), %rdi
call cudaEventCreate@PLT
leaq 72(%rsp), %rdi
call cudaEventCreate@PLT
movl $0, %esi
movq 64(%rsp), %rdi
call cudaEventRecord@PLT
leaq 128(%rsp), %rsi
movl $1, %ecx
movl $1024, %edx
movq 40(%rsp), %rdi
call cudaMemcpy@PLT
leaq 1152(%rsp), %rsi
movl $1, %ecx
movl $1024, %edx
movq 48(%rsp), %rdi
call cudaMemcpy@PLT
movl $0, %esi
movq 72(%rsp), %rdi
call cudaEventRecord@PLT
movq 72(%rsp), %rdi
call cudaEventSynchronize@PLT
leaq 36(%rsp), %rdi
movq 72(%rsp), %rdx
movq 64(%rsp), %rsi
call cudaEventElapsedTime@PLT
leaq 80(%rsp), %rdi
call cudaEventCreate@PLT
leaq 88(%rsp), %rdi
call cudaEventCreate@PLT
movl $0, %esi
movq 80(%rsp), %rdi
call cudaEventRecord@PLT
movl $256, 112(%rsp)
movl $1, 116(%rsp)
movl $1, 120(%rsp)
movl $1, 96(%rsp)
movl $1, 100(%rsp)
movl $1, 104(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 112(%rsp), %rdx
movl $1, %ecx
movq 96(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L26
.L16:
call cudaDeviceSynchronize@PLT
movl $0, %esi
movq 88(%rsp), %rdi
call cudaEventRecord@PLT
movq 88(%rsp), %rdi
call cudaEventSynchronize@PLT
leaq 28(%rsp), %rdi
movq 88(%rsp), %rdx
movq 80(%rsp), %rsi
call cudaEventElapsedTime@PLT
leaq 96(%rsp), %rdi
call cudaEventCreate@PLT
leaq 112(%rsp), %rdi
call cudaEventCreate@PLT
movl $0, %esi
movq 96(%rsp), %rdi
call cudaEventRecord@PLT
leaq 2176(%rsp), %rdi
movl $2, %ecx
movl $1024, %edx
movq 56(%rsp), %rsi
call cudaMemcpy@PLT
movl $0, %esi
movq 112(%rsp), %rdi
call cudaEventRecord@PLT
movq 112(%rsp), %rdi
call cudaEventSynchronize@PLT
leaq 32(%rsp), %rdi
movq 112(%rsp), %rdx
movq 96(%rsp), %rsi
call cudaEventElapsedTime@PLT
call clock@PLT
movq %rax, %rbx
leaq 3200(%rsp), %rdx
leaq 1152(%rsp), %rsi
leaq 128(%rsp), %rdi
call _Z10vecAdd_cpuPiS_S_
call clock@PLT
movss .LC0(%rip), %xmm2
movss 36(%rsp), %xmm0
divss %xmm2, %xmm0
movss %xmm0, 36(%rsp)
movss 32(%rsp), %xmm1
divss %xmm2, %xmm1
movss %xmm1, 32(%rsp)
movss 28(%rsp), %xmm1
divss %xmm2, %xmm1
movss %xmm1, 28(%rsp)
subq %rbx, %rax
pxor %xmm1, %xmm1
cvtsi2ssq %rax, %xmm1
divss .LC1(%rip), %xmm1
movss %xmm1, 12(%rsp)
cvtss2sd %xmm0, %xmm0
leaq .LC2(%rip), %rsi
movl $2, %edi
movl $1, %eax
call __printf_chk@PLT
movl $256, %edx
leaq .LC3(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
pxor %xmm0, %xmm0
cvtss2sd 32(%rsp), %xmm0
leaq .LC4(%rip), %rsi
movl $2, %edi
movl $1, %eax
call __printf_chk@PLT
pxor %xmm0, %xmm0
cvtss2sd 28(%rsp), %xmm0
leaq .LC5(%rip), %rsi
movl $2, %edi
movl $1, %eax
call __printf_chk@PLT
pxor %xmm0, %xmm0
cvtss2sd 12(%rsp), %xmm0
leaq .LC6(%rip), %rsi
movl $2, %edi
movl $1, %eax
call __printf_chk@PLT
movl $0, %eax
leaq 3200(%rsp), %rdx
.L18:
movl (%rdx,%rax), %ebx
cmpl %ebx, 2176(%rsp,%rax)
jne .L17
addq $4, %rax
cmpq $1024, %rax
jne .L18
leaq .LC7(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movss 12(%rsp), %xmm0
divss 28(%rsp), %xmm0
cvtss2sd %xmm0, %xmm0
leaq .LC8(%rip), %rsi
movl $2, %edi
movl $1, %eax
call __printf_chk@PLT
.L20:
movq 40(%rsp), %rdi
call cudaFree@PLT
movq 48(%rsp), %rdi
call cudaFree@PLT
movq 56(%rsp), %rdi
call cudaFree@PLT
movl $0, %edi
call exit@PLT
.L26:
movq 56(%rsp), %rdx
movq 48(%rsp), %rsi
movq 40(%rsp), %rdi
call _Z29__device_stub__Z6vecAddPiS_S_PiS_S_
jmp .L16
.L17:
leaq .LC9(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
jmp .L20
.cfi_endproc
.LFE2058:
.size main, .-main
.section .rodata.str1.1
.LC10:
.string "_Z6vecAddPiS_S_"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2086:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC10(%rip), %rdx
movq %rdx, %rcx
leaq _Z6vecAddPiS_S_(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2086:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.section .rodata.cst4,"aM",@progbits,4
.align 4
.LC0:
.long 1148846080
.align 4
.LC1:
.long 1232348160
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | #include<stdio.h>
#include<stdlib.h>
#include<time.h>
#define N 256
__global__ void vecAdd(int *a,int *b,int *c);
void vecAdd_cpu(int a[N],int b[N],int c[N]){
for(int i=0;i<N;i++){
c[i]=a[i]+b[i];
}
}
int main(){
int a[N],b[N],gpu_add[N],cpu_add[N];
int *dev_a, *dev_b, *dev_c;
float time_gpu,time_cpu,timeindex,timeinit;
for(int i=0;i<N;i++){
a[i]=i+i;
b[i]=i*i;
}
int size=N*sizeof(int);
cudaMalloc((void**) &dev_a, size);
cudaMalloc((void**) &dev_b,size);
cudaMalloc((void**) &dev_c,size);
cudaEvent_t startinit,endinit;
cudaEventCreate(&startinit);
cudaEventCreate(&endinit);
cudaEventRecord(startinit, 0);
cudaMemcpy(dev_a,a,size,cudaMemcpyHostToDevice);
cudaMemcpy(dev_b,b,size,cudaMemcpyHostToDevice);
cudaEventRecord(endinit, 0);
cudaEventSynchronize(endinit);
cudaEventElapsedTime(&timeinit, startinit, endinit);
cudaEvent_t gpu_start,gpu_end;
cudaEventCreate(&gpu_start);
cudaEventCreate(&gpu_end);
cudaEventRecord(gpu_start, 0);
vecAdd<<<1,N>>> (dev_a,dev_b,dev_c);
cudaDeviceSynchronize();
cudaEventRecord(gpu_end, 0);
cudaEventSynchronize(gpu_end);
cudaEventElapsedTime(&time_gpu, gpu_start, gpu_end);
cudaEvent_t startindex,endindex;
cudaEventCreate(&startindex);
cudaEventCreate(&endindex);
cudaEventRecord(startindex, 0);
cudaMemcpy(gpu_add,dev_c,size,cudaMemcpyDeviceToHost);
cudaEventRecord(endindex, 0);
cudaEventSynchronize(endindex);
cudaEventElapsedTime(&timeindex, startindex, endindex);
clock_t cpu_start,cpu_end;
cpu_start=clock();
vecAdd_cpu(a,b,cpu_add);
cpu_end=clock();
timeinit/=1000;
timeindex/=1000;
time_gpu/=1000;
time_cpu=float(cpu_end-cpu_start)/float(CLOCKS_PER_SEC);
printf("Time for sending initial data from host to device : %f\t sec\n",timeinit);
printf("Cuda program launched with 1 block and %d threads\n",N);
printf("Time for sending calculated data from device to host : %f\t sec\n",timeindex);
printf("GPU Time:%f seconds\n",time_gpu);
printf("CPU Time:%f seconds\n",time_cpu);
int flag=1;
for(int i=0;i<N;i++){
if(gpu_add[i]!=cpu_add[i]){
flag=0;
break;
}
}
if(flag){
printf("TEST PASSED\n");
printf("SPEED UP:%f\n",time_cpu/time_gpu);
}
else{
printf("TEST FAILED\n");
}
cudaFree(dev_a);
cudaFree(dev_b);
cudaFree(dev_c);
exit(0);
}
__global__ void vecAdd(int *a,int *b,int *c){
int i=threadIdx.x;
c[i]=a[i]+b[i];
} | #include <hip/hip_runtime.h>
#include<stdio.h>
#include<stdlib.h>
#include<time.h>
#define N 256
__global__ void vecAdd(int *a,int *b,int *c);
void vecAdd_cpu(int a[N],int b[N],int c[N]){
for(int i=0;i<N;i++){
c[i]=a[i]+b[i];
}
}
int main(){
int a[N],b[N],gpu_add[N],cpu_add[N];
int *dev_a, *dev_b, *dev_c;
float time_gpu,time_cpu,timeindex,timeinit;
for(int i=0;i<N;i++){
a[i]=i+i;
b[i]=i*i;
}
int size=N*sizeof(int);
hipMalloc((void**) &dev_a, size);
hipMalloc((void**) &dev_b,size);
hipMalloc((void**) &dev_c,size);
hipEvent_t startinit,endinit;
hipEventCreate(&startinit);
hipEventCreate(&endinit);
hipEventRecord(startinit, 0);
hipMemcpy(dev_a,a,size,hipMemcpyHostToDevice);
hipMemcpy(dev_b,b,size,hipMemcpyHostToDevice);
hipEventRecord(endinit, 0);
hipEventSynchronize(endinit);
hipEventElapsedTime(&timeinit, startinit, endinit);
hipEvent_t gpu_start,gpu_end;
hipEventCreate(&gpu_start);
hipEventCreate(&gpu_end);
hipEventRecord(gpu_start, 0);
vecAdd<<<1,N>>> (dev_a,dev_b,dev_c);
hipDeviceSynchronize();
hipEventRecord(gpu_end, 0);
hipEventSynchronize(gpu_end);
hipEventElapsedTime(&time_gpu, gpu_start, gpu_end);
hipEvent_t startindex,endindex;
hipEventCreate(&startindex);
hipEventCreate(&endindex);
hipEventRecord(startindex, 0);
hipMemcpy(gpu_add,dev_c,size,hipMemcpyDeviceToHost);
hipEventRecord(endindex, 0);
hipEventSynchronize(endindex);
hipEventElapsedTime(&timeindex, startindex, endindex);
clock_t cpu_start,cpu_end;
cpu_start=clock();
vecAdd_cpu(a,b,cpu_add);
cpu_end=clock();
timeinit/=1000;
timeindex/=1000;
time_gpu/=1000;
time_cpu=float(cpu_end-cpu_start)/float(CLOCKS_PER_SEC);
printf("Time for sending initial data from host to device : %f\t sec\n",timeinit);
printf("Cuda program launched with 1 block and %d threads\n",N);
printf("Time for sending calculated data from device to host : %f\t sec\n",timeindex);
printf("GPU Time:%f seconds\n",time_gpu);
printf("CPU Time:%f seconds\n",time_cpu);
int flag=1;
for(int i=0;i<N;i++){
if(gpu_add[i]!=cpu_add[i]){
flag=0;
break;
}
}
if(flag){
printf("TEST PASSED\n");
printf("SPEED UP:%f\n",time_cpu/time_gpu);
}
else{
printf("TEST FAILED\n");
}
hipFree(dev_a);
hipFree(dev_b);
hipFree(dev_c);
exit(0);
}
__global__ void vecAdd(int *a,int *b,int *c){
int i=threadIdx.x;
c[i]=a[i]+b[i];
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | #include <hip/hip_runtime.h>
#include<stdio.h>
#include<stdlib.h>
#include<time.h>
#define N 256
__global__ void vecAdd(int *a,int *b,int *c);
void vecAdd_cpu(int a[N],int b[N],int c[N]){
for(int i=0;i<N;i++){
c[i]=a[i]+b[i];
}
}
int main(){
int a[N],b[N],gpu_add[N],cpu_add[N];
int *dev_a, *dev_b, *dev_c;
float time_gpu,time_cpu,timeindex,timeinit;
for(int i=0;i<N;i++){
a[i]=i+i;
b[i]=i*i;
}
int size=N*sizeof(int);
hipMalloc((void**) &dev_a, size);
hipMalloc((void**) &dev_b,size);
hipMalloc((void**) &dev_c,size);
hipEvent_t startinit,endinit;
hipEventCreate(&startinit);
hipEventCreate(&endinit);
hipEventRecord(startinit, 0);
hipMemcpy(dev_a,a,size,hipMemcpyHostToDevice);
hipMemcpy(dev_b,b,size,hipMemcpyHostToDevice);
hipEventRecord(endinit, 0);
hipEventSynchronize(endinit);
hipEventElapsedTime(&timeinit, startinit, endinit);
hipEvent_t gpu_start,gpu_end;
hipEventCreate(&gpu_start);
hipEventCreate(&gpu_end);
hipEventRecord(gpu_start, 0);
vecAdd<<<1,N>>> (dev_a,dev_b,dev_c);
hipDeviceSynchronize();
hipEventRecord(gpu_end, 0);
hipEventSynchronize(gpu_end);
hipEventElapsedTime(&time_gpu, gpu_start, gpu_end);
hipEvent_t startindex,endindex;
hipEventCreate(&startindex);
hipEventCreate(&endindex);
hipEventRecord(startindex, 0);
hipMemcpy(gpu_add,dev_c,size,hipMemcpyDeviceToHost);
hipEventRecord(endindex, 0);
hipEventSynchronize(endindex);
hipEventElapsedTime(&timeindex, startindex, endindex);
clock_t cpu_start,cpu_end;
cpu_start=clock();
vecAdd_cpu(a,b,cpu_add);
cpu_end=clock();
timeinit/=1000;
timeindex/=1000;
time_gpu/=1000;
time_cpu=float(cpu_end-cpu_start)/float(CLOCKS_PER_SEC);
printf("Time for sending initial data from host to device : %f\t sec\n",timeinit);
printf("Cuda program launched with 1 block and %d threads\n",N);
printf("Time for sending calculated data from device to host : %f\t sec\n",timeindex);
printf("GPU Time:%f seconds\n",time_gpu);
printf("CPU Time:%f seconds\n",time_cpu);
int flag=1;
for(int i=0;i<N;i++){
if(gpu_add[i]!=cpu_add[i]){
flag=0;
break;
}
}
if(flag){
printf("TEST PASSED\n");
printf("SPEED UP:%f\n",time_cpu/time_gpu);
}
else{
printf("TEST FAILED\n");
}
hipFree(dev_a);
hipFree(dev_b);
hipFree(dev_c);
exit(0);
}
__global__ void vecAdd(int *a,int *b,int *c){
int i=threadIdx.x;
c[i]=a[i]+b[i];
} | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z6vecAddPiS_S_
.globl _Z6vecAddPiS_S_
.p2align 8
.type _Z6vecAddPiS_S_,@function
_Z6vecAddPiS_S_:
s_load_b128 s[4:7], s[0:1], 0x0
v_lshlrev_b32_e32 v0, 2, v0
s_load_b64 s[0:1], s[0:1], 0x10
s_waitcnt lgkmcnt(0)
s_clause 0x1
global_load_b32 v1, v0, s[4:5]
global_load_b32 v2, v0, s[6:7]
s_waitcnt vmcnt(0)
v_add_nc_u32_e32 v1, v2, v1
global_store_b32 v0, v1, s[0:1]
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z6vecAddPiS_S_
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 24
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 3
.amdhsa_next_free_sgpr 8
.amdhsa_reserve_vcc 0
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z6vecAddPiS_S_, .Lfunc_end0-_Z6vecAddPiS_S_
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 24
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z6vecAddPiS_S_
.private_segment_fixed_size: 0
.sgpr_count: 8
.sgpr_spill_count: 0
.symbol: _Z6vecAddPiS_S_.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 3
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | #include <hip/hip_runtime.h>
#include<stdio.h>
#include<stdlib.h>
#include<time.h>
#define N 256
__global__ void vecAdd(int *a,int *b,int *c);
void vecAdd_cpu(int a[N],int b[N],int c[N]){
for(int i=0;i<N;i++){
c[i]=a[i]+b[i];
}
}
int main(){
int a[N],b[N],gpu_add[N],cpu_add[N];
int *dev_a, *dev_b, *dev_c;
float time_gpu,time_cpu,timeindex,timeinit;
for(int i=0;i<N;i++){
a[i]=i+i;
b[i]=i*i;
}
int size=N*sizeof(int);
hipMalloc((void**) &dev_a, size);
hipMalloc((void**) &dev_b,size);
hipMalloc((void**) &dev_c,size);
hipEvent_t startinit,endinit;
hipEventCreate(&startinit);
hipEventCreate(&endinit);
hipEventRecord(startinit, 0);
hipMemcpy(dev_a,a,size,hipMemcpyHostToDevice);
hipMemcpy(dev_b,b,size,hipMemcpyHostToDevice);
hipEventRecord(endinit, 0);
hipEventSynchronize(endinit);
hipEventElapsedTime(&timeinit, startinit, endinit);
hipEvent_t gpu_start,gpu_end;
hipEventCreate(&gpu_start);
hipEventCreate(&gpu_end);
hipEventRecord(gpu_start, 0);
vecAdd<<<1,N>>> (dev_a,dev_b,dev_c);
hipDeviceSynchronize();
hipEventRecord(gpu_end, 0);
hipEventSynchronize(gpu_end);
hipEventElapsedTime(&time_gpu, gpu_start, gpu_end);
hipEvent_t startindex,endindex;
hipEventCreate(&startindex);
hipEventCreate(&endindex);
hipEventRecord(startindex, 0);
hipMemcpy(gpu_add,dev_c,size,hipMemcpyDeviceToHost);
hipEventRecord(endindex, 0);
hipEventSynchronize(endindex);
hipEventElapsedTime(&timeindex, startindex, endindex);
clock_t cpu_start,cpu_end;
cpu_start=clock();
vecAdd_cpu(a,b,cpu_add);
cpu_end=clock();
timeinit/=1000;
timeindex/=1000;
time_gpu/=1000;
time_cpu=float(cpu_end-cpu_start)/float(CLOCKS_PER_SEC);
printf("Time for sending initial data from host to device : %f\t sec\n",timeinit);
printf("Cuda program launched with 1 block and %d threads\n",N);
printf("Time for sending calculated data from device to host : %f\t sec\n",timeindex);
printf("GPU Time:%f seconds\n",time_gpu);
printf("CPU Time:%f seconds\n",time_cpu);
int flag=1;
for(int i=0;i<N;i++){
if(gpu_add[i]!=cpu_add[i]){
flag=0;
break;
}
}
if(flag){
printf("TEST PASSED\n");
printf("SPEED UP:%f\n",time_cpu/time_gpu);
}
else{
printf("TEST FAILED\n");
}
hipFree(dev_a);
hipFree(dev_b);
hipFree(dev_c);
exit(0);
}
__global__ void vecAdd(int *a,int *b,int *c){
int i=threadIdx.x;
c[i]=a[i]+b[i];
} | .text
.file "vecAdd1.hip"
.globl _Z10vecAdd_cpuPiS_S_ # -- Begin function _Z10vecAdd_cpuPiS_S_
.p2align 4, 0x90
.type _Z10vecAdd_cpuPiS_S_,@function
_Z10vecAdd_cpuPiS_S_: # @_Z10vecAdd_cpuPiS_S_
.cfi_startproc
# %bb.0:
xorl %eax, %eax
.p2align 4, 0x90
.LBB0_1: # =>This Inner Loop Header: Depth=1
movl (%rsi,%rax,4), %ecx
addl (%rdi,%rax,4), %ecx
movl %ecx, (%rdx,%rax,4)
incq %rax
cmpq $256, %rax # imm = 0x100
jne .LBB0_1
# %bb.2:
retq
.Lfunc_end0:
.size _Z10vecAdd_cpuPiS_S_, .Lfunc_end0-_Z10vecAdd_cpuPiS_S_
.cfi_endproc
# -- End function
.section .rodata.cst4,"aM",@progbits,4
.p2align 2, 0x0 # -- Begin function main
.LCPI1_0:
.long 0x447a0000 # float 1000
.LCPI1_1:
.long 0x49742400 # float 1.0E+6
.text
.globl main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
pushq %r14
.cfi_def_cfa_offset 16
pushq %rbx
.cfi_def_cfa_offset 24
subq $4232, %rsp # imm = 0x1088
.cfi_def_cfa_offset 4256
.cfi_offset %rbx, -24
.cfi_offset %r14, -16
xorl %eax, %eax
xorl %ecx, %ecx
.p2align 4, 0x90
.LBB1_1: # =>This Inner Loop Header: Depth=1
movl %eax, 3200(%rsp,%rcx,4)
movl %ecx, %edx
imull %ecx, %edx
movl %edx, 2176(%rsp,%rcx,4)
incq %rcx
addl $2, %eax
cmpq $256, %rcx # imm = 0x100
jne .LBB1_1
# %bb.2:
leaq 72(%rsp), %rdi
movl $1024, %esi # imm = 0x400
callq hipMalloc
leaq 64(%rsp), %rdi
movl $1024, %esi # imm = 0x400
callq hipMalloc
leaq 56(%rsp), %rdi
movl $1024, %esi # imm = 0x400
callq hipMalloc
leaq 104(%rsp), %rdi
callq hipEventCreate
leaq 48(%rsp), %rdi
callq hipEventCreate
movq 104(%rsp), %rdi
xorl %esi, %esi
callq hipEventRecord
movq 72(%rsp), %rdi
leaq 3200(%rsp), %rsi
movl $1024, %edx # imm = 0x400
movl $1, %ecx
callq hipMemcpy
movq 64(%rsp), %rdi
leaq 2176(%rsp), %rsi
movl $1024, %edx # imm = 0x400
movl $1, %ecx
callq hipMemcpy
movq 48(%rsp), %rdi
xorl %esi, %esi
callq hipEventRecord
movq 48(%rsp), %rdi
callq hipEventSynchronize
movq 104(%rsp), %rsi
movq 48(%rsp), %rdx
leaq 36(%rsp), %rdi
callq hipEventElapsedTime
leaq 96(%rsp), %rdi
callq hipEventCreate
leaq 40(%rsp), %rdi
callq hipEventCreate
movq 96(%rsp), %rdi
xorl %esi, %esi
callq hipEventRecord
movabsq $4294967297, %rdi # imm = 0x100000001
leaq 255(%rdi), %rdx
movl $1, %esi
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB1_4
# %bb.3:
movq 72(%rsp), %rax
movq 64(%rsp), %rcx
movq 56(%rsp), %rdx
movq %rax, 24(%rsp)
movq %rcx, 8(%rsp)
movq %rdx, 16(%rsp)
leaq 24(%rsp), %rax
movq %rax, 128(%rsp)
leaq 8(%rsp), %rax
movq %rax, 136(%rsp)
leaq 16(%rsp), %rax
movq %rax, 144(%rsp)
leaq 1152(%rsp), %rdi
leaq 80(%rsp), %rsi
leaq 120(%rsp), %rdx
leaq 112(%rsp), %rcx
callq __hipPopCallConfiguration
movq 1152(%rsp), %rsi
movl 1160(%rsp), %edx
movq 80(%rsp), %rcx
movl 88(%rsp), %r8d
leaq 128(%rsp), %r9
movl $_Z6vecAddPiS_S_, %edi
pushq 112(%rsp)
.cfi_adjust_cfa_offset 8
pushq 128(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB1_4:
callq hipDeviceSynchronize
movq 40(%rsp), %rdi
xorl %r14d, %r14d
xorl %esi, %esi
callq hipEventRecord
movq 40(%rsp), %rdi
callq hipEventSynchronize
movq 96(%rsp), %rsi
movq 40(%rsp), %rdx
leaq 8(%rsp), %rdi
callq hipEventElapsedTime
leaq 80(%rsp), %rdi
callq hipEventCreate
leaq 24(%rsp), %rdi
callq hipEventCreate
movq 80(%rsp), %rdi
xorl %esi, %esi
callq hipEventRecord
movq 56(%rsp), %rsi
leaq 128(%rsp), %rdi
movl $1024, %edx # imm = 0x400
movl $2, %ecx
callq hipMemcpy
movq 24(%rsp), %rdi
xorl %esi, %esi
callq hipEventRecord
movq 24(%rsp), %rdi
callq hipEventSynchronize
movq 80(%rsp), %rsi
movq 24(%rsp), %rdx
leaq 16(%rsp), %rdi
callq hipEventElapsedTime
callq clock
movq %rax, %rbx
.p2align 4, 0x90
.LBB1_5: # =>This Inner Loop Header: Depth=1
movl 2176(%rsp,%r14,4), %eax
addl 3200(%rsp,%r14,4), %eax
movl %eax, 1152(%rsp,%r14,4)
incq %r14
cmpq $256, %r14 # imm = 0x100
jne .LBB1_5
# %bb.6: # %_Z10vecAdd_cpuPiS_S_.exit
callq clock
movss 36(%rsp), %xmm0 # xmm0 = mem[0],zero,zero,zero
movss .LCPI1_0(%rip), %xmm1 # xmm1 = mem[0],zero,zero,zero
divss %xmm1, %xmm0
movss %xmm0, 36(%rsp)
movss 16(%rsp), %xmm2 # xmm2 = mem[0],zero,zero,zero
divss %xmm1, %xmm2
movss %xmm2, 16(%rsp)
movss 8(%rsp), %xmm2 # xmm2 = mem[0],zero,zero,zero
divss %xmm1, %xmm2
subq %rbx, %rax
xorps %xmm1, %xmm1
cvtsi2ss %rax, %xmm1
movss %xmm2, 8(%rsp)
divss .LCPI1_1(%rip), %xmm1
movss %xmm1, 32(%rsp) # 4-byte Spill
cvtss2sd %xmm0, %xmm0
movl $.L.str, %edi
movb $1, %al
callq printf
xorl %ebx, %ebx
movl $.L.str.1, %edi
movl $256, %esi # imm = 0x100
xorl %eax, %eax
callq printf
movss 16(%rsp), %xmm0 # xmm0 = mem[0],zero,zero,zero
cvtss2sd %xmm0, %xmm0
movl $.L.str.2, %edi
movb $1, %al
callq printf
movss 8(%rsp), %xmm0 # xmm0 = mem[0],zero,zero,zero
cvtss2sd %xmm0, %xmm0
movl $.L.str.3, %edi
movb $1, %al
callq printf
movss 32(%rsp), %xmm0 # 4-byte Reload
# xmm0 = mem[0],zero,zero,zero
cvtss2sd %xmm0, %xmm0
movl $.L.str.4, %edi
movb $1, %al
callq printf
.p2align 4, 0x90
.LBB1_8: # =>This Inner Loop Header: Depth=1
movl 128(%rsp,%rbx,4), %eax
cmpl 1152(%rsp,%rbx,4), %eax
jne .LBB1_9
# %bb.7: # in Loop: Header=BB1_8 Depth=1
incq %rbx
cmpq $256, %rbx # imm = 0x100
jne .LBB1_8
# %bb.11: # %.critedge
movl $.Lstr.1, %edi
callq puts@PLT
movss 32(%rsp), %xmm0 # 4-byte Reload
# xmm0 = mem[0],zero,zero,zero
divss 8(%rsp), %xmm0
cvtss2sd %xmm0, %xmm0
movl $.L.str.6, %edi
movb $1, %al
callq printf
jmp .LBB1_10
.LBB1_9:
movl $.Lstr, %edi
callq puts@PLT
.LBB1_10:
movq 72(%rsp), %rdi
callq hipFree
movq 64(%rsp), %rdi
callq hipFree
movq 56(%rsp), %rdi
callq hipFree
xorl %edi, %edi
callq exit
.Lfunc_end1:
.size main, .Lfunc_end1-main
.cfi_endproc
# -- End function
.globl _Z21__device_stub__vecAddPiS_S_ # -- Begin function _Z21__device_stub__vecAddPiS_S_
.p2align 4, 0x90
.type _Z21__device_stub__vecAddPiS_S_,@function
_Z21__device_stub__vecAddPiS_S_: # @_Z21__device_stub__vecAddPiS_S_
.cfi_startproc
# %bb.0:
subq $104, %rsp
.cfi_def_cfa_offset 112
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movq %rdx, 56(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 56(%rsp), %rax
movq %rax, 96(%rsp)
leaq 40(%rsp), %rdi
leaq 24(%rsp), %rsi
leaq 16(%rsp), %rdx
leaq 8(%rsp), %rcx
callq __hipPopCallConfiguration
movq 40(%rsp), %rsi
movl 48(%rsp), %edx
movq 24(%rsp), %rcx
movl 32(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z6vecAddPiS_S_, %edi
pushq 8(%rsp)
.cfi_adjust_cfa_offset 8
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $120, %rsp
.cfi_adjust_cfa_offset -120
retq
.Lfunc_end2:
.size _Z21__device_stub__vecAddPiS_S_, .Lfunc_end2-_Z21__device_stub__vecAddPiS_S_
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB3_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB3_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z6vecAddPiS_S_, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end3:
.size __hip_module_ctor, .Lfunc_end3-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB4_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB4_2:
retq
.Lfunc_end4:
.size __hip_module_dtor, .Lfunc_end4-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z6vecAddPiS_S_,@object # @_Z6vecAddPiS_S_
.section .rodata,"a",@progbits
.globl _Z6vecAddPiS_S_
.p2align 3, 0x0
_Z6vecAddPiS_S_:
.quad _Z21__device_stub__vecAddPiS_S_
.size _Z6vecAddPiS_S_, 8
.type .L.str,@object # @.str
.section .rodata.str1.1,"aMS",@progbits,1
.L.str:
.asciz "Time for sending initial data from host to device : %f\t sec\n"
.size .L.str, 61
.type .L.str.1,@object # @.str.1
.L.str.1:
.asciz "Cuda program launched with 1 block and %d threads\n"
.size .L.str.1, 51
.type .L.str.2,@object # @.str.2
.L.str.2:
.asciz "Time for sending calculated data from device to host : %f\t sec\n"
.size .L.str.2, 64
.type .L.str.3,@object # @.str.3
.L.str.3:
.asciz "GPU Time:%f seconds\n"
.size .L.str.3, 21
.type .L.str.4,@object # @.str.4
.L.str.4:
.asciz "CPU Time:%f seconds\n"
.size .L.str.4, 21
.type .L.str.6,@object # @.str.6
.L.str.6:
.asciz "SPEED UP:%f\n"
.size .L.str.6, 13
.type .L__unnamed_1,@object # @0
.L__unnamed_1:
.asciz "_Z6vecAddPiS_S_"
.size .L__unnamed_1, 16
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.type .Lstr,@object # @str
.section .rodata.str1.1,"aMS",@progbits,1
.Lstr:
.asciz "TEST FAILED"
.size .Lstr, 12
.type .Lstr.1,@object # @str.1
.Lstr.1:
.asciz "TEST PASSED"
.size .Lstr.1, 12
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z21__device_stub__vecAddPiS_S_
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z6vecAddPiS_S_
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly. | code for sm_80
Function : _Z6vecAddPiS_S_
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ S2R R6, SR_TID.X ; /* 0x0000000000067919 */
/* 0x000e220000002100 */
/*0020*/ HFMA2.MMA R7, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff077435 */
/* 0x000fe200000001ff */
/*0030*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fd20000000a00 */
/*0040*/ IMAD.WIDE R2, R6, R7, c[0x0][0x160] ; /* 0x0000580006027625 */
/* 0x001fc800078e0207 */
/*0050*/ IMAD.WIDE R4, R6.reuse, R7.reuse, c[0x0][0x168] ; /* 0x00005a0006047625 */
/* 0x0c0fe400078e0207 */
/*0060*/ LDG.E R2, [R2.64] ; /* 0x0000000402027981 */
/* 0x000ea8000c1e1900 */
/*0070*/ LDG.E R5, [R4.64] ; /* 0x0000000404057981 */
/* 0x000ea2000c1e1900 */
/*0080*/ IMAD.WIDE R6, R6, R7, c[0x0][0x170] ; /* 0x00005c0006067625 */
/* 0x000fe200078e0207 */
/*0090*/ IADD3 R9, R2, R5, RZ ; /* 0x0000000502097210 */
/* 0x004fca0007ffe0ff */
/*00a0*/ STG.E [R6.64], R9 ; /* 0x0000000906007986 */
/* 0x000fe2000c101904 */
/*00b0*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*00c0*/ BRA 0xc0; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*00d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0100*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0110*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0120*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0130*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0140*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0150*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0160*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0170*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z6vecAddPiS_S_
.globl _Z6vecAddPiS_S_
.p2align 8
.type _Z6vecAddPiS_S_,@function
_Z6vecAddPiS_S_:
s_load_b128 s[4:7], s[0:1], 0x0
v_lshlrev_b32_e32 v0, 2, v0
s_load_b64 s[0:1], s[0:1], 0x10
s_waitcnt lgkmcnt(0)
s_clause 0x1
global_load_b32 v1, v0, s[4:5]
global_load_b32 v2, v0, s[6:7]
s_waitcnt vmcnt(0)
v_add_nc_u32_e32 v1, v2, v1
global_store_b32 v0, v1, s[0:1]
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z6vecAddPiS_S_
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 24
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 3
.amdhsa_next_free_sgpr 8
.amdhsa_reserve_vcc 0
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z6vecAddPiS_S_, .Lfunc_end0-_Z6vecAddPiS_S_
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 24
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z6vecAddPiS_S_
.private_segment_fixed_size: 0
.sgpr_count: 8
.sgpr_spill_count: 0
.symbol: _Z6vecAddPiS_S_.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 3
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_000283f7_00000000-6_vecAdd1.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2061:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2061:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z10vecAdd_cpuPiS_S_
.type _Z10vecAdd_cpuPiS_S_, @function
_Z10vecAdd_cpuPiS_S_:
.LFB2057:
.cfi_startproc
endbr64
movl $0, %eax
.L4:
movl (%rsi,%rax), %ecx
addl (%rdi,%rax), %ecx
movl %ecx, (%rdx,%rax)
addq $4, %rax
cmpq $1024, %rax
jne .L4
ret
.cfi_endproc
.LFE2057:
.size _Z10vecAdd_cpuPiS_S_, .-_Z10vecAdd_cpuPiS_S_
.globl _Z29__device_stub__Z6vecAddPiS_S_PiS_S_
.type _Z29__device_stub__Z6vecAddPiS_S_PiS_S_, @function
_Z29__device_stub__Z6vecAddPiS_S_PiS_S_:
.LFB2083:
.cfi_startproc
endbr64
subq $136, %rsp
.cfi_def_cfa_offset 144
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movq %rdx, 8(%rsp)
movq %fs:40, %rax
movq %rax, 120(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 8(%rsp), %rax
movq %rax, 112(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L10
.L6:
movq 120(%rsp), %rax
subq %fs:40, %rax
jne .L11
addq $136, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L10:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 152
pushq 40(%rsp)
.cfi_def_cfa_offset 160
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z6vecAddPiS_S_(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 144
jmp .L6
.L11:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2083:
.size _Z29__device_stub__Z6vecAddPiS_S_PiS_S_, .-_Z29__device_stub__Z6vecAddPiS_S_PiS_S_
.globl _Z6vecAddPiS_S_
.type _Z6vecAddPiS_S_, @function
_Z6vecAddPiS_S_:
.LFB2084:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z29__device_stub__Z6vecAddPiS_S_PiS_S_
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2084:
.size _Z6vecAddPiS_S_, .-_Z6vecAddPiS_S_
.section .rodata.str1.8,"aMS",@progbits,1
.align 8
.LC2:
.string "Time for sending initial data from host to device : %f\t sec\n"
.align 8
.LC3:
.string "Cuda program launched with 1 block and %d threads\n"
.align 8
.LC4:
.string "Time for sending calculated data from device to host : %f\t sec\n"
.section .rodata.str1.1,"aMS",@progbits,1
.LC5:
.string "GPU Time:%f seconds\n"
.LC6:
.string "CPU Time:%f seconds\n"
.LC7:
.string "TEST PASSED\n"
.LC8:
.string "SPEED UP:%f\n"
.LC9:
.string "TEST FAILED\n"
.text
.globl main
.type main, @function
main:
.LFB2058:
.cfi_startproc
endbr64
pushq %rbx
.cfi_def_cfa_offset 16
.cfi_offset 3, -16
subq $4096, %rsp
.cfi_def_cfa_offset 4112
orq $0, (%rsp)
subq $144, %rsp
.cfi_def_cfa_offset 4256
movq %fs:40, %rax
movq %rax, 4232(%rsp)
xorl %eax, %eax
.L15:
leal (%rax,%rax), %edx
movl %edx, 128(%rsp,%rax,4)
movl %eax, %edx
imull %eax, %edx
movl %edx, 1152(%rsp,%rax,4)
addq $1, %rax
cmpq $256, %rax
jne .L15
leaq 40(%rsp), %rdi
movl $1024, %esi
call cudaMalloc@PLT
leaq 48(%rsp), %rdi
movl $1024, %esi
call cudaMalloc@PLT
leaq 56(%rsp), %rdi
movl $1024, %esi
call cudaMalloc@PLT
leaq 64(%rsp), %rdi
call cudaEventCreate@PLT
leaq 72(%rsp), %rdi
call cudaEventCreate@PLT
movl $0, %esi
movq 64(%rsp), %rdi
call cudaEventRecord@PLT
leaq 128(%rsp), %rsi
movl $1, %ecx
movl $1024, %edx
movq 40(%rsp), %rdi
call cudaMemcpy@PLT
leaq 1152(%rsp), %rsi
movl $1, %ecx
movl $1024, %edx
movq 48(%rsp), %rdi
call cudaMemcpy@PLT
movl $0, %esi
movq 72(%rsp), %rdi
call cudaEventRecord@PLT
movq 72(%rsp), %rdi
call cudaEventSynchronize@PLT
leaq 36(%rsp), %rdi
movq 72(%rsp), %rdx
movq 64(%rsp), %rsi
call cudaEventElapsedTime@PLT
leaq 80(%rsp), %rdi
call cudaEventCreate@PLT
leaq 88(%rsp), %rdi
call cudaEventCreate@PLT
movl $0, %esi
movq 80(%rsp), %rdi
call cudaEventRecord@PLT
movl $256, 112(%rsp)
movl $1, 116(%rsp)
movl $1, 120(%rsp)
movl $1, 96(%rsp)
movl $1, 100(%rsp)
movl $1, 104(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 112(%rsp), %rdx
movl $1, %ecx
movq 96(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L26
.L16:
call cudaDeviceSynchronize@PLT
movl $0, %esi
movq 88(%rsp), %rdi
call cudaEventRecord@PLT
movq 88(%rsp), %rdi
call cudaEventSynchronize@PLT
leaq 28(%rsp), %rdi
movq 88(%rsp), %rdx
movq 80(%rsp), %rsi
call cudaEventElapsedTime@PLT
leaq 96(%rsp), %rdi
call cudaEventCreate@PLT
leaq 112(%rsp), %rdi
call cudaEventCreate@PLT
movl $0, %esi
movq 96(%rsp), %rdi
call cudaEventRecord@PLT
leaq 2176(%rsp), %rdi
movl $2, %ecx
movl $1024, %edx
movq 56(%rsp), %rsi
call cudaMemcpy@PLT
movl $0, %esi
movq 112(%rsp), %rdi
call cudaEventRecord@PLT
movq 112(%rsp), %rdi
call cudaEventSynchronize@PLT
leaq 32(%rsp), %rdi
movq 112(%rsp), %rdx
movq 96(%rsp), %rsi
call cudaEventElapsedTime@PLT
call clock@PLT
movq %rax, %rbx
leaq 3200(%rsp), %rdx
leaq 1152(%rsp), %rsi
leaq 128(%rsp), %rdi
call _Z10vecAdd_cpuPiS_S_
call clock@PLT
movss .LC0(%rip), %xmm2
movss 36(%rsp), %xmm0
divss %xmm2, %xmm0
movss %xmm0, 36(%rsp)
movss 32(%rsp), %xmm1
divss %xmm2, %xmm1
movss %xmm1, 32(%rsp)
movss 28(%rsp), %xmm1
divss %xmm2, %xmm1
movss %xmm1, 28(%rsp)
subq %rbx, %rax
pxor %xmm1, %xmm1
cvtsi2ssq %rax, %xmm1
divss .LC1(%rip), %xmm1
movss %xmm1, 12(%rsp)
cvtss2sd %xmm0, %xmm0
leaq .LC2(%rip), %rsi
movl $2, %edi
movl $1, %eax
call __printf_chk@PLT
movl $256, %edx
leaq .LC3(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
pxor %xmm0, %xmm0
cvtss2sd 32(%rsp), %xmm0
leaq .LC4(%rip), %rsi
movl $2, %edi
movl $1, %eax
call __printf_chk@PLT
pxor %xmm0, %xmm0
cvtss2sd 28(%rsp), %xmm0
leaq .LC5(%rip), %rsi
movl $2, %edi
movl $1, %eax
call __printf_chk@PLT
pxor %xmm0, %xmm0
cvtss2sd 12(%rsp), %xmm0
leaq .LC6(%rip), %rsi
movl $2, %edi
movl $1, %eax
call __printf_chk@PLT
movl $0, %eax
leaq 3200(%rsp), %rdx
.L18:
movl (%rdx,%rax), %ebx
cmpl %ebx, 2176(%rsp,%rax)
jne .L17
addq $4, %rax
cmpq $1024, %rax
jne .L18
leaq .LC7(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movss 12(%rsp), %xmm0
divss 28(%rsp), %xmm0
cvtss2sd %xmm0, %xmm0
leaq .LC8(%rip), %rsi
movl $2, %edi
movl $1, %eax
call __printf_chk@PLT
.L20:
movq 40(%rsp), %rdi
call cudaFree@PLT
movq 48(%rsp), %rdi
call cudaFree@PLT
movq 56(%rsp), %rdi
call cudaFree@PLT
movl $0, %edi
call exit@PLT
.L26:
movq 56(%rsp), %rdx
movq 48(%rsp), %rsi
movq 40(%rsp), %rdi
call _Z29__device_stub__Z6vecAddPiS_S_PiS_S_
jmp .L16
.L17:
leaq .LC9(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
jmp .L20
.cfi_endproc
.LFE2058:
.size main, .-main
.section .rodata.str1.1
.LC10:
.string "_Z6vecAddPiS_S_"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2086:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC10(%rip), %rdx
movq %rdx, %rcx
leaq _Z6vecAddPiS_S_(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2086:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.section .rodata.cst4,"aM",@progbits,4
.align 4
.LC0:
.long 1148846080
.align 4
.LC1:
.long 1232348160
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "vecAdd1.hip"
.globl _Z10vecAdd_cpuPiS_S_ # -- Begin function _Z10vecAdd_cpuPiS_S_
.p2align 4, 0x90
.type _Z10vecAdd_cpuPiS_S_,@function
_Z10vecAdd_cpuPiS_S_: # @_Z10vecAdd_cpuPiS_S_
.cfi_startproc
# %bb.0:
xorl %eax, %eax
.p2align 4, 0x90
.LBB0_1: # =>This Inner Loop Header: Depth=1
movl (%rsi,%rax,4), %ecx
addl (%rdi,%rax,4), %ecx
movl %ecx, (%rdx,%rax,4)
incq %rax
cmpq $256, %rax # imm = 0x100
jne .LBB0_1
# %bb.2:
retq
.Lfunc_end0:
.size _Z10vecAdd_cpuPiS_S_, .Lfunc_end0-_Z10vecAdd_cpuPiS_S_
.cfi_endproc
# -- End function
.section .rodata.cst4,"aM",@progbits,4
.p2align 2, 0x0 # -- Begin function main
.LCPI1_0:
.long 0x447a0000 # float 1000
.LCPI1_1:
.long 0x49742400 # float 1.0E+6
.text
.globl main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
pushq %r14
.cfi_def_cfa_offset 16
pushq %rbx
.cfi_def_cfa_offset 24
subq $4232, %rsp # imm = 0x1088
.cfi_def_cfa_offset 4256
.cfi_offset %rbx, -24
.cfi_offset %r14, -16
xorl %eax, %eax
xorl %ecx, %ecx
.p2align 4, 0x90
.LBB1_1: # =>This Inner Loop Header: Depth=1
movl %eax, 3200(%rsp,%rcx,4)
movl %ecx, %edx
imull %ecx, %edx
movl %edx, 2176(%rsp,%rcx,4)
incq %rcx
addl $2, %eax
cmpq $256, %rcx # imm = 0x100
jne .LBB1_1
# %bb.2:
leaq 72(%rsp), %rdi
movl $1024, %esi # imm = 0x400
callq hipMalloc
leaq 64(%rsp), %rdi
movl $1024, %esi # imm = 0x400
callq hipMalloc
leaq 56(%rsp), %rdi
movl $1024, %esi # imm = 0x400
callq hipMalloc
leaq 104(%rsp), %rdi
callq hipEventCreate
leaq 48(%rsp), %rdi
callq hipEventCreate
movq 104(%rsp), %rdi
xorl %esi, %esi
callq hipEventRecord
movq 72(%rsp), %rdi
leaq 3200(%rsp), %rsi
movl $1024, %edx # imm = 0x400
movl $1, %ecx
callq hipMemcpy
movq 64(%rsp), %rdi
leaq 2176(%rsp), %rsi
movl $1024, %edx # imm = 0x400
movl $1, %ecx
callq hipMemcpy
movq 48(%rsp), %rdi
xorl %esi, %esi
callq hipEventRecord
movq 48(%rsp), %rdi
callq hipEventSynchronize
movq 104(%rsp), %rsi
movq 48(%rsp), %rdx
leaq 36(%rsp), %rdi
callq hipEventElapsedTime
leaq 96(%rsp), %rdi
callq hipEventCreate
leaq 40(%rsp), %rdi
callq hipEventCreate
movq 96(%rsp), %rdi
xorl %esi, %esi
callq hipEventRecord
movabsq $4294967297, %rdi # imm = 0x100000001
leaq 255(%rdi), %rdx
movl $1, %esi
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB1_4
# %bb.3:
movq 72(%rsp), %rax
movq 64(%rsp), %rcx
movq 56(%rsp), %rdx
movq %rax, 24(%rsp)
movq %rcx, 8(%rsp)
movq %rdx, 16(%rsp)
leaq 24(%rsp), %rax
movq %rax, 128(%rsp)
leaq 8(%rsp), %rax
movq %rax, 136(%rsp)
leaq 16(%rsp), %rax
movq %rax, 144(%rsp)
leaq 1152(%rsp), %rdi
leaq 80(%rsp), %rsi
leaq 120(%rsp), %rdx
leaq 112(%rsp), %rcx
callq __hipPopCallConfiguration
movq 1152(%rsp), %rsi
movl 1160(%rsp), %edx
movq 80(%rsp), %rcx
movl 88(%rsp), %r8d
leaq 128(%rsp), %r9
movl $_Z6vecAddPiS_S_, %edi
pushq 112(%rsp)
.cfi_adjust_cfa_offset 8
pushq 128(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB1_4:
callq hipDeviceSynchronize
movq 40(%rsp), %rdi
xorl %r14d, %r14d
xorl %esi, %esi
callq hipEventRecord
movq 40(%rsp), %rdi
callq hipEventSynchronize
movq 96(%rsp), %rsi
movq 40(%rsp), %rdx
leaq 8(%rsp), %rdi
callq hipEventElapsedTime
leaq 80(%rsp), %rdi
callq hipEventCreate
leaq 24(%rsp), %rdi
callq hipEventCreate
movq 80(%rsp), %rdi
xorl %esi, %esi
callq hipEventRecord
movq 56(%rsp), %rsi
leaq 128(%rsp), %rdi
movl $1024, %edx # imm = 0x400
movl $2, %ecx
callq hipMemcpy
movq 24(%rsp), %rdi
xorl %esi, %esi
callq hipEventRecord
movq 24(%rsp), %rdi
callq hipEventSynchronize
movq 80(%rsp), %rsi
movq 24(%rsp), %rdx
leaq 16(%rsp), %rdi
callq hipEventElapsedTime
callq clock
movq %rax, %rbx
.p2align 4, 0x90
.LBB1_5: # =>This Inner Loop Header: Depth=1
movl 2176(%rsp,%r14,4), %eax
addl 3200(%rsp,%r14,4), %eax
movl %eax, 1152(%rsp,%r14,4)
incq %r14
cmpq $256, %r14 # imm = 0x100
jne .LBB1_5
# %bb.6: # %_Z10vecAdd_cpuPiS_S_.exit
callq clock
movss 36(%rsp), %xmm0 # xmm0 = mem[0],zero,zero,zero
movss .LCPI1_0(%rip), %xmm1 # xmm1 = mem[0],zero,zero,zero
divss %xmm1, %xmm0
movss %xmm0, 36(%rsp)
movss 16(%rsp), %xmm2 # xmm2 = mem[0],zero,zero,zero
divss %xmm1, %xmm2
movss %xmm2, 16(%rsp)
movss 8(%rsp), %xmm2 # xmm2 = mem[0],zero,zero,zero
divss %xmm1, %xmm2
subq %rbx, %rax
xorps %xmm1, %xmm1
cvtsi2ss %rax, %xmm1
movss %xmm2, 8(%rsp)
divss .LCPI1_1(%rip), %xmm1
movss %xmm1, 32(%rsp) # 4-byte Spill
cvtss2sd %xmm0, %xmm0
movl $.L.str, %edi
movb $1, %al
callq printf
xorl %ebx, %ebx
movl $.L.str.1, %edi
movl $256, %esi # imm = 0x100
xorl %eax, %eax
callq printf
movss 16(%rsp), %xmm0 # xmm0 = mem[0],zero,zero,zero
cvtss2sd %xmm0, %xmm0
movl $.L.str.2, %edi
movb $1, %al
callq printf
movss 8(%rsp), %xmm0 # xmm0 = mem[0],zero,zero,zero
cvtss2sd %xmm0, %xmm0
movl $.L.str.3, %edi
movb $1, %al
callq printf
movss 32(%rsp), %xmm0 # 4-byte Reload
# xmm0 = mem[0],zero,zero,zero
cvtss2sd %xmm0, %xmm0
movl $.L.str.4, %edi
movb $1, %al
callq printf
.p2align 4, 0x90
.LBB1_8: # =>This Inner Loop Header: Depth=1
movl 128(%rsp,%rbx,4), %eax
cmpl 1152(%rsp,%rbx,4), %eax
jne .LBB1_9
# %bb.7: # in Loop: Header=BB1_8 Depth=1
incq %rbx
cmpq $256, %rbx # imm = 0x100
jne .LBB1_8
# %bb.11: # %.critedge
movl $.Lstr.1, %edi
callq puts@PLT
movss 32(%rsp), %xmm0 # 4-byte Reload
# xmm0 = mem[0],zero,zero,zero
divss 8(%rsp), %xmm0
cvtss2sd %xmm0, %xmm0
movl $.L.str.6, %edi
movb $1, %al
callq printf
jmp .LBB1_10
.LBB1_9:
movl $.Lstr, %edi
callq puts@PLT
.LBB1_10:
movq 72(%rsp), %rdi
callq hipFree
movq 64(%rsp), %rdi
callq hipFree
movq 56(%rsp), %rdi
callq hipFree
xorl %edi, %edi
callq exit
.Lfunc_end1:
.size main, .Lfunc_end1-main
.cfi_endproc
# -- End function
.globl _Z21__device_stub__vecAddPiS_S_ # -- Begin function _Z21__device_stub__vecAddPiS_S_
.p2align 4, 0x90
.type _Z21__device_stub__vecAddPiS_S_,@function
_Z21__device_stub__vecAddPiS_S_: # @_Z21__device_stub__vecAddPiS_S_
.cfi_startproc
# %bb.0:
subq $104, %rsp
.cfi_def_cfa_offset 112
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movq %rdx, 56(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 56(%rsp), %rax
movq %rax, 96(%rsp)
leaq 40(%rsp), %rdi
leaq 24(%rsp), %rsi
leaq 16(%rsp), %rdx
leaq 8(%rsp), %rcx
callq __hipPopCallConfiguration
movq 40(%rsp), %rsi
movl 48(%rsp), %edx
movq 24(%rsp), %rcx
movl 32(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z6vecAddPiS_S_, %edi
pushq 8(%rsp)
.cfi_adjust_cfa_offset 8
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $120, %rsp
.cfi_adjust_cfa_offset -120
retq
.Lfunc_end2:
.size _Z21__device_stub__vecAddPiS_S_, .Lfunc_end2-_Z21__device_stub__vecAddPiS_S_
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB3_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB3_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z6vecAddPiS_S_, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end3:
.size __hip_module_ctor, .Lfunc_end3-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB4_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB4_2:
retq
.Lfunc_end4:
.size __hip_module_dtor, .Lfunc_end4-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z6vecAddPiS_S_,@object # @_Z6vecAddPiS_S_
.section .rodata,"a",@progbits
.globl _Z6vecAddPiS_S_
.p2align 3, 0x0
_Z6vecAddPiS_S_:
.quad _Z21__device_stub__vecAddPiS_S_
.size _Z6vecAddPiS_S_, 8
.type .L.str,@object # @.str
.section .rodata.str1.1,"aMS",@progbits,1
.L.str:
.asciz "Time for sending initial data from host to device : %f\t sec\n"
.size .L.str, 61
.type .L.str.1,@object # @.str.1
.L.str.1:
.asciz "Cuda program launched with 1 block and %d threads\n"
.size .L.str.1, 51
.type .L.str.2,@object # @.str.2
.L.str.2:
.asciz "Time for sending calculated data from device to host : %f\t sec\n"
.size .L.str.2, 64
.type .L.str.3,@object # @.str.3
.L.str.3:
.asciz "GPU Time:%f seconds\n"
.size .L.str.3, 21
.type .L.str.4,@object # @.str.4
.L.str.4:
.asciz "CPU Time:%f seconds\n"
.size .L.str.4, 21
.type .L.str.6,@object # @.str.6
.L.str.6:
.asciz "SPEED UP:%f\n"
.size .L.str.6, 13
.type .L__unnamed_1,@object # @0
.L__unnamed_1:
.asciz "_Z6vecAddPiS_S_"
.size .L__unnamed_1, 16
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.type .Lstr,@object # @str
.section .rodata.str1.1,"aMS",@progbits,1
.Lstr:
.asciz "TEST FAILED"
.size .Lstr, 12
.type .Lstr.1,@object # @str.1
.Lstr.1:
.asciz "TEST PASSED"
.size .Lstr.1, 12
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z21__device_stub__vecAddPiS_S_
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z6vecAddPiS_S_
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly. | #include "includes.h"
__global__ void __extractmat(double *a, int *b, long long *c, int n) {
int tid = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y);
const int signbit = 0x80000000;
const int mag = 0x7fffffff;
for (int i = tid; i < n; i += blockDim.x*gridDim.x*gridDim.y) {
int vi = *((int *)&c[i]);
if (vi & signbit) {
vi = -(vi & mag);
}
a[i] = *((double *)&vi);
b[i] = *(((int *)&c[i])+1);
}
} | code for sm_80
Function : _Z12__extractmatPdPiPxi
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fc60000000f00 */
/*0010*/ S2R R0, SR_CTAID.Y ; /* 0x0000000000007919 */
/* 0x000e220000002600 */
/*0020*/ IADD3 R1, R1, -0x8, RZ ; /* 0xfffffff801017810 */
/* 0x000fc60007ffe0ff */
/*0030*/ S2R R3, SR_CTAID.X ; /* 0x0000000000037919 */
/* 0x000e280000002500 */
/*0040*/ S2R R5, SR_TID.X ; /* 0x0000000000057919 */
/* 0x000e620000002100 */
/*0050*/ IMAD R0, R0, c[0x0][0xc], R3 ; /* 0x0000030000007a24 */
/* 0x001fc800078e0203 */
/*0060*/ IMAD R0, R0, c[0x0][0x0], R5 ; /* 0x0000000000007a24 */
/* 0x002fca00078e0205 */
/*0070*/ ISETP.GE.AND P0, PT, R0, c[0x0][0x178], PT ; /* 0x00005e0000007a0c */
/* 0x000fda0003f06270 */
/*0080*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*0090*/ MOV R13, c[0x0][0x0] ; /* 0x00000000000d7a02 */
/* 0x000fe20000000f00 */
/*00a0*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fc80000000a00 */
/*00b0*/ IMAD R13, R13, c[0x0][0xc], RZ ; /* 0x000003000d0d7a24 */
/* 0x000fe400078e02ff */
/*00c0*/ HFMA2.MMA R7, -RZ, RZ, 0, 4.76837158203125e-07 ; /* 0x00000008ff077435 */
/* 0x001fd400000001ff */
/*00d0*/ IMAD.WIDE R10, R0, R7, c[0x0][0x170] ; /* 0x00005c00000a7625 */
/* 0x000fca00078e0207 */
/*00e0*/ LDG.E R2, [R10.64] ; /* 0x000000040a027981 */
/* 0x000ea4000c1e1900 */
/*00f0*/ ISETP.GE.AND P0, PT, R2.reuse, RZ, PT ; /* 0x000000ff0200720c */
/* 0x044fe40003f06270 */
/*0100*/ LOP3.LUT R3, R2, 0x7fffffff, RZ, 0xc0, !PT ; /* 0x7fffffff02037812 */
/* 0x000fd600078ec0ff */
/*0110*/ @!P0 IADD3 R2, -R3, RZ, RZ ; /* 0x000000ff03028210 */
/* 0x000fca0007ffe1ff */
/*0120*/ STL [R1], R2 ; /* 0x0000000201007387 */
/* 0x0001e80000100800 */
/*0130*/ LDL.64 R4, [R1] ; /* 0x0000000001047983 */
/* 0x000ea20000100a00 */
/*0140*/ IMAD.WIDE R6, R0, R7, c[0x0][0x160] ; /* 0x0000580000067625 */
/* 0x000fe200078e0207 */
/*0150*/ MOV R9, 0x4 ; /* 0x0000000400097802 */
/* 0x000fc80000000f00 */
/*0160*/ STG.E.64 [R6.64], R4 ; /* 0x0000000406007986 */
/* 0x0041e8000c101b04 */
/*0170*/ LDG.E R3, [R10.64+0x4] ; /* 0x000004040a037981 */
/* 0x000ea2000c1e1900 */
/*0180*/ IMAD.WIDE R8, R0, R9, c[0x0][0x168] ; /* 0x00005a0000087625 */
/* 0x000fc800078e0209 */
/*0190*/ IMAD R0, R13, c[0x0][0x10], R0 ; /* 0x000004000d007a24 */
/* 0x000fca00078e0200 */
/*01a0*/ ISETP.GE.AND P0, PT, R0, c[0x0][0x178], PT ; /* 0x00005e0000007a0c */
/* 0x000fe20003f06270 */
/*01b0*/ STG.E [R8.64], R3 ; /* 0x0000000308007986 */
/* 0x0041d8000c101904 */
/*01c0*/ @!P0 BRA 0xc0 ; /* 0xfffffef000008947 */
/* 0x000fea000383ffff */
/*01d0*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*01e0*/ BRA 0x1e0; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*01f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0200*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0210*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0220*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0230*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0240*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0250*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0260*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0270*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | #include "includes.h"
__global__ void __extractmat(double *a, int *b, long long *c, int n) {
int tid = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y);
const int signbit = 0x80000000;
const int mag = 0x7fffffff;
for (int i = tid; i < n; i += blockDim.x*gridDim.x*gridDim.y) {
int vi = *((int *)&c[i]);
if (vi & signbit) {
vi = -(vi & mag);
}
a[i] = *((double *)&vi);
b[i] = *(((int *)&c[i])+1);
}
} | .file "tmpxft_0002cf4d_00000000-6___extractmat.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2029:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2029:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z37__device_stub__Z12__extractmatPdPiPxiPdPiPxi
.type _Z37__device_stub__Z12__extractmatPdPiPxiPdPiPxi, @function
_Z37__device_stub__Z12__extractmatPdPiPxiPdPiPxi:
.LFB2051:
.cfi_startproc
endbr64
subq $152, %rsp
.cfi_def_cfa_offset 160
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movq %rdx, 8(%rsp)
movl %ecx, 4(%rsp)
movq %fs:40, %rax
movq %rax, 136(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 8(%rsp), %rax
movq %rax, 112(%rsp)
leaq 4(%rsp), %rax
movq %rax, 120(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 136(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $152, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 168
pushq 40(%rsp)
.cfi_def_cfa_offset 176
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z12__extractmatPdPiPxi(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 160
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2051:
.size _Z37__device_stub__Z12__extractmatPdPiPxiPdPiPxi, .-_Z37__device_stub__Z12__extractmatPdPiPxiPdPiPxi
.globl _Z12__extractmatPdPiPxi
.type _Z12__extractmatPdPiPxi, @function
_Z12__extractmatPdPiPxi:
.LFB2052:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z37__device_stub__Z12__extractmatPdPiPxiPdPiPxi
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2052:
.size _Z12__extractmatPdPiPxi, .-_Z12__extractmatPdPiPxi
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "_Z12__extractmatPdPiPxi"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2054:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC0(%rip), %rdx
movq %rdx, %rcx
leaq _Z12__extractmatPdPiPxi(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2054:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | #include "includes.h"
__global__ void __extractmat(double *a, int *b, long long *c, int n) {
int tid = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y);
const int signbit = 0x80000000;
const int mag = 0x7fffffff;
for (int i = tid; i < n; i += blockDim.x*gridDim.x*gridDim.y) {
int vi = *((int *)&c[i]);
if (vi & signbit) {
vi = -(vi & mag);
}
a[i] = *((double *)&vi);
b[i] = *(((int *)&c[i])+1);
}
} | #include <hip/hip_runtime.h>
#include "includes.h"
__global__ void __extractmat(double *a, int *b, long long *c, int n) {
int tid = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y);
const int signbit = 0x80000000;
const int mag = 0x7fffffff;
for (int i = tid; i < n; i += blockDim.x*gridDim.x*gridDim.y) {
int vi = *((int *)&c[i]);
if (vi & signbit) {
vi = -(vi & mag);
}
a[i] = *((double *)&vi);
b[i] = *(((int *)&c[i])+1);
}
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | #include <hip/hip_runtime.h>
#include "includes.h"
__global__ void __extractmat(double *a, int *b, long long *c, int n) {
int tid = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y);
const int signbit = 0x80000000;
const int mag = 0x7fffffff;
for (int i = tid; i < n; i += blockDim.x*gridDim.x*gridDim.y) {
int vi = *((int *)&c[i]);
if (vi & signbit) {
vi = -(vi & mag);
}
a[i] = *((double *)&vi);
b[i] = *(((int *)&c[i])+1);
}
} | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z12__extractmatPdPiPxi
.globl _Z12__extractmatPdPiPxi
.p2align 8
.type _Z12__extractmatPdPiPxi,@function
_Z12__extractmatPdPiPxi:
s_clause 0x2
s_load_b32 s9, s[2:3], 0x20
s_load_b32 s6, s[2:3], 0x2c
s_load_b32 s8, s[2:3], 0x18
v_and_b32_e32 v3, 0x3ff, v0
s_add_u32 s4, s2, 32
s_addc_u32 s5, s3, 0
s_waitcnt lgkmcnt(0)
s_mul_i32 s7, s9, s15
s_and_b32 s10, s6, 0xffff
s_add_i32 s7, s7, s14
s_mov_b32 s6, exec_lo
v_mad_u64_u32 v[1:2], null, s7, s10, v[3:4]
s_delay_alu instid0(VALU_DEP_1)
v_cmpx_gt_i32_e64 s8, v1
s_cbranch_execz .LBB0_3
s_load_b64 s[0:1], s[0:1], 0x4
s_load_b32 s11, s[4:5], 0x4
s_clause 0x1
s_load_b128 s[4:7], s[2:3], 0x0
s_load_b64 s[2:3], s[2:3], 0x10
s_waitcnt lgkmcnt(0)
s_lshr_b32 s0, s0, 16
s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
s_mul_i32 s0, s0, s1
v_mul_lo_u32 v2, s0, v3
v_bfe_u32 v3, v0, 10, 10
v_bfe_u32 v0, v0, 20, 10
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_3) | instid1(VALU_DEP_1)
v_mad_u32_u24 v2, v3, s1, v2
s_mul_i32 s1, s9, s10
s_mov_b32 s9, 0
s_mul_i32 s1, s1, s11
v_add_lshl_u32 v0, v2, v0, 2
s_set_inst_prefetch_distance 0x1
.p2align 6
.LBB0_2:
v_ashrrev_i32_e32 v2, 31, v1
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
v_lshlrev_b64 v[3:4], 3, v[1:2]
v_lshlrev_b64 v[9:10], 2, v[1:2]
v_add_nc_u32_e32 v1, s1, v1
v_add_co_u32 v5, vcc_lo, s2, v3
s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_add_co_ci_u32_e32 v6, vcc_lo, s3, v4, vcc_lo
v_add_co_u32 v2, s0, s4, v3
v_add_co_ci_u32_e64 v3, s0, s5, v4, s0
global_load_b64 v[5:6], v[5:6], off
v_add_co_u32 v4, s0, s6, v9
s_waitcnt vmcnt(0)
v_and_b32_e32 v7, 0x7fffffff, v5
v_cmp_gt_i32_e32 vcc_lo, 0, v5
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
v_sub_nc_u32_e32 v7, 0, v7
v_cndmask_b32_e32 v5, v5, v7, vcc_lo
v_cmp_le_i32_e32 vcc_lo, s8, v1
ds_store_b32 v0, v5
ds_load_2addr_b32 v[7:8], v0 offset1:1
v_add_co_ci_u32_e64 v5, s0, s7, v10, s0
s_or_b32 s9, vcc_lo, s9
s_waitcnt lgkmcnt(0)
global_store_b64 v[2:3], v[7:8], off
global_store_b32 v[4:5], v6, off
s_and_not1_b32 exec_lo, exec_lo, s9
s_cbranch_execnz .LBB0_2
.LBB0_3:
s_set_inst_prefetch_distance 0x2
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z12__extractmatPdPiPxi
.amdhsa_group_segment_fixed_size 4096
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 288
.amdhsa_user_sgpr_count 14
.amdhsa_user_sgpr_dispatch_ptr 1
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 1
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 2
.amdhsa_next_free_vgpr 11
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z12__extractmatPdPiPxi, .Lfunc_end0-_Z12__extractmatPdPiPxi
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
- .offset: 24
.size: 4
.value_kind: by_value
- .offset: 32
.size: 4
.value_kind: hidden_block_count_x
- .offset: 36
.size: 4
.value_kind: hidden_block_count_y
- .offset: 40
.size: 4
.value_kind: hidden_block_count_z
- .offset: 44
.size: 2
.value_kind: hidden_group_size_x
- .offset: 46
.size: 2
.value_kind: hidden_group_size_y
- .offset: 48
.size: 2
.value_kind: hidden_group_size_z
- .offset: 50
.size: 2
.value_kind: hidden_remainder_x
- .offset: 52
.size: 2
.value_kind: hidden_remainder_y
- .offset: 54
.size: 2
.value_kind: hidden_remainder_z
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 88
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 96
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 4096
.kernarg_segment_align: 8
.kernarg_segment_size: 288
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z12__extractmatPdPiPxi
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z12__extractmatPdPiPxi.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 11
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | #include <hip/hip_runtime.h>
#include "includes.h"
__global__ void __extractmat(double *a, int *b, long long *c, int n) {
int tid = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y);
const int signbit = 0x80000000;
const int mag = 0x7fffffff;
for (int i = tid; i < n; i += blockDim.x*gridDim.x*gridDim.y) {
int vi = *((int *)&c[i]);
if (vi & signbit) {
vi = -(vi & mag);
}
a[i] = *((double *)&vi);
b[i] = *(((int *)&c[i])+1);
}
} | .text
.file "__extractmat.hip"
.globl _Z27__device_stub____extractmatPdPiPxi # -- Begin function _Z27__device_stub____extractmatPdPiPxi
.p2align 4, 0x90
.type _Z27__device_stub____extractmatPdPiPxi,@function
_Z27__device_stub____extractmatPdPiPxi: # @_Z27__device_stub____extractmatPdPiPxi
.cfi_startproc
# %bb.0:
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movq %rdx, 56(%rsp)
movl %ecx, 4(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 56(%rsp), %rax
movq %rax, 96(%rsp)
leaq 4(%rsp), %rax
movq %rax, 104(%rsp)
leaq 40(%rsp), %rdi
leaq 24(%rsp), %rsi
leaq 16(%rsp), %rdx
leaq 8(%rsp), %rcx
callq __hipPopCallConfiguration
movq 40(%rsp), %rsi
movl 48(%rsp), %edx
movq 24(%rsp), %rcx
movl 32(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z12__extractmatPdPiPxi, %edi
pushq 8(%rsp)
.cfi_adjust_cfa_offset 8
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $136, %rsp
.cfi_adjust_cfa_offset -136
retq
.Lfunc_end0:
.size _Z27__device_stub____extractmatPdPiPxi, .Lfunc_end0-_Z27__device_stub____extractmatPdPiPxi
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB1_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB1_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z12__extractmatPdPiPxi, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end1:
.size __hip_module_ctor, .Lfunc_end1-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB2_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB2_2:
retq
.Lfunc_end2:
.size __hip_module_dtor, .Lfunc_end2-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z12__extractmatPdPiPxi,@object # @_Z12__extractmatPdPiPxi
.section .rodata,"a",@progbits
.globl _Z12__extractmatPdPiPxi
.p2align 3, 0x0
_Z12__extractmatPdPiPxi:
.quad _Z27__device_stub____extractmatPdPiPxi
.size _Z12__extractmatPdPiPxi, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "_Z12__extractmatPdPiPxi"
.size .L__unnamed_1, 24
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z27__device_stub____extractmatPdPiPxi
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z12__extractmatPdPiPxi
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly. | code for sm_80
Function : _Z12__extractmatPdPiPxi
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fc60000000f00 */
/*0010*/ S2R R0, SR_CTAID.Y ; /* 0x0000000000007919 */
/* 0x000e220000002600 */
/*0020*/ IADD3 R1, R1, -0x8, RZ ; /* 0xfffffff801017810 */
/* 0x000fc60007ffe0ff */
/*0030*/ S2R R3, SR_CTAID.X ; /* 0x0000000000037919 */
/* 0x000e280000002500 */
/*0040*/ S2R R5, SR_TID.X ; /* 0x0000000000057919 */
/* 0x000e620000002100 */
/*0050*/ IMAD R0, R0, c[0x0][0xc], R3 ; /* 0x0000030000007a24 */
/* 0x001fc800078e0203 */
/*0060*/ IMAD R0, R0, c[0x0][0x0], R5 ; /* 0x0000000000007a24 */
/* 0x002fca00078e0205 */
/*0070*/ ISETP.GE.AND P0, PT, R0, c[0x0][0x178], PT ; /* 0x00005e0000007a0c */
/* 0x000fda0003f06270 */
/*0080*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*0090*/ MOV R13, c[0x0][0x0] ; /* 0x00000000000d7a02 */
/* 0x000fe20000000f00 */
/*00a0*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fc80000000a00 */
/*00b0*/ IMAD R13, R13, c[0x0][0xc], RZ ; /* 0x000003000d0d7a24 */
/* 0x000fe400078e02ff */
/*00c0*/ HFMA2.MMA R7, -RZ, RZ, 0, 4.76837158203125e-07 ; /* 0x00000008ff077435 */
/* 0x001fd400000001ff */
/*00d0*/ IMAD.WIDE R10, R0, R7, c[0x0][0x170] ; /* 0x00005c00000a7625 */
/* 0x000fca00078e0207 */
/*00e0*/ LDG.E R2, [R10.64] ; /* 0x000000040a027981 */
/* 0x000ea4000c1e1900 */
/*00f0*/ ISETP.GE.AND P0, PT, R2.reuse, RZ, PT ; /* 0x000000ff0200720c */
/* 0x044fe40003f06270 */
/*0100*/ LOP3.LUT R3, R2, 0x7fffffff, RZ, 0xc0, !PT ; /* 0x7fffffff02037812 */
/* 0x000fd600078ec0ff */
/*0110*/ @!P0 IADD3 R2, -R3, RZ, RZ ; /* 0x000000ff03028210 */
/* 0x000fca0007ffe1ff */
/*0120*/ STL [R1], R2 ; /* 0x0000000201007387 */
/* 0x0001e80000100800 */
/*0130*/ LDL.64 R4, [R1] ; /* 0x0000000001047983 */
/* 0x000ea20000100a00 */
/*0140*/ IMAD.WIDE R6, R0, R7, c[0x0][0x160] ; /* 0x0000580000067625 */
/* 0x000fe200078e0207 */
/*0150*/ MOV R9, 0x4 ; /* 0x0000000400097802 */
/* 0x000fc80000000f00 */
/*0160*/ STG.E.64 [R6.64], R4 ; /* 0x0000000406007986 */
/* 0x0041e8000c101b04 */
/*0170*/ LDG.E R3, [R10.64+0x4] ; /* 0x000004040a037981 */
/* 0x000ea2000c1e1900 */
/*0180*/ IMAD.WIDE R8, R0, R9, c[0x0][0x168] ; /* 0x00005a0000087625 */
/* 0x000fc800078e0209 */
/*0190*/ IMAD R0, R13, c[0x0][0x10], R0 ; /* 0x000004000d007a24 */
/* 0x000fca00078e0200 */
/*01a0*/ ISETP.GE.AND P0, PT, R0, c[0x0][0x178], PT ; /* 0x00005e0000007a0c */
/* 0x000fe20003f06270 */
/*01b0*/ STG.E [R8.64], R3 ; /* 0x0000000308007986 */
/* 0x0041d8000c101904 */
/*01c0*/ @!P0 BRA 0xc0 ; /* 0xfffffef000008947 */
/* 0x000fea000383ffff */
/*01d0*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*01e0*/ BRA 0x1e0; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*01f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0200*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0210*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0220*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0230*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0240*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0250*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0260*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0270*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z12__extractmatPdPiPxi
.globl _Z12__extractmatPdPiPxi
.p2align 8
.type _Z12__extractmatPdPiPxi,@function
_Z12__extractmatPdPiPxi:
s_clause 0x2
s_load_b32 s9, s[2:3], 0x20
s_load_b32 s6, s[2:3], 0x2c
s_load_b32 s8, s[2:3], 0x18
v_and_b32_e32 v3, 0x3ff, v0
s_add_u32 s4, s2, 32
s_addc_u32 s5, s3, 0
s_waitcnt lgkmcnt(0)
s_mul_i32 s7, s9, s15
s_and_b32 s10, s6, 0xffff
s_add_i32 s7, s7, s14
s_mov_b32 s6, exec_lo
v_mad_u64_u32 v[1:2], null, s7, s10, v[3:4]
s_delay_alu instid0(VALU_DEP_1)
v_cmpx_gt_i32_e64 s8, v1
s_cbranch_execz .LBB0_3
s_load_b64 s[0:1], s[0:1], 0x4
s_load_b32 s11, s[4:5], 0x4
s_clause 0x1
s_load_b128 s[4:7], s[2:3], 0x0
s_load_b64 s[2:3], s[2:3], 0x10
s_waitcnt lgkmcnt(0)
s_lshr_b32 s0, s0, 16
s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
s_mul_i32 s0, s0, s1
v_mul_lo_u32 v2, s0, v3
v_bfe_u32 v3, v0, 10, 10
v_bfe_u32 v0, v0, 20, 10
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_3) | instid1(VALU_DEP_1)
v_mad_u32_u24 v2, v3, s1, v2
s_mul_i32 s1, s9, s10
s_mov_b32 s9, 0
s_mul_i32 s1, s1, s11
v_add_lshl_u32 v0, v2, v0, 2
s_set_inst_prefetch_distance 0x1
.p2align 6
.LBB0_2:
v_ashrrev_i32_e32 v2, 31, v1
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
v_lshlrev_b64 v[3:4], 3, v[1:2]
v_lshlrev_b64 v[9:10], 2, v[1:2]
v_add_nc_u32_e32 v1, s1, v1
v_add_co_u32 v5, vcc_lo, s2, v3
s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_add_co_ci_u32_e32 v6, vcc_lo, s3, v4, vcc_lo
v_add_co_u32 v2, s0, s4, v3
v_add_co_ci_u32_e64 v3, s0, s5, v4, s0
global_load_b64 v[5:6], v[5:6], off
v_add_co_u32 v4, s0, s6, v9
s_waitcnt vmcnt(0)
v_and_b32_e32 v7, 0x7fffffff, v5
v_cmp_gt_i32_e32 vcc_lo, 0, v5
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
v_sub_nc_u32_e32 v7, 0, v7
v_cndmask_b32_e32 v5, v5, v7, vcc_lo
v_cmp_le_i32_e32 vcc_lo, s8, v1
ds_store_b32 v0, v5
ds_load_2addr_b32 v[7:8], v0 offset1:1
v_add_co_ci_u32_e64 v5, s0, s7, v10, s0
s_or_b32 s9, vcc_lo, s9
s_waitcnt lgkmcnt(0)
global_store_b64 v[2:3], v[7:8], off
global_store_b32 v[4:5], v6, off
s_and_not1_b32 exec_lo, exec_lo, s9
s_cbranch_execnz .LBB0_2
.LBB0_3:
s_set_inst_prefetch_distance 0x2
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z12__extractmatPdPiPxi
.amdhsa_group_segment_fixed_size 4096
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 288
.amdhsa_user_sgpr_count 14
.amdhsa_user_sgpr_dispatch_ptr 1
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 1
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 2
.amdhsa_next_free_vgpr 11
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z12__extractmatPdPiPxi, .Lfunc_end0-_Z12__extractmatPdPiPxi
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
- .offset: 24
.size: 4
.value_kind: by_value
- .offset: 32
.size: 4
.value_kind: hidden_block_count_x
- .offset: 36
.size: 4
.value_kind: hidden_block_count_y
- .offset: 40
.size: 4
.value_kind: hidden_block_count_z
- .offset: 44
.size: 2
.value_kind: hidden_group_size_x
- .offset: 46
.size: 2
.value_kind: hidden_group_size_y
- .offset: 48
.size: 2
.value_kind: hidden_group_size_z
- .offset: 50
.size: 2
.value_kind: hidden_remainder_x
- .offset: 52
.size: 2
.value_kind: hidden_remainder_y
- .offset: 54
.size: 2
.value_kind: hidden_remainder_z
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 88
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 96
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 4096
.kernarg_segment_align: 8
.kernarg_segment_size: 288
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z12__extractmatPdPiPxi
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z12__extractmatPdPiPxi.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 11
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_0002cf4d_00000000-6___extractmat.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2029:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2029:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z37__device_stub__Z12__extractmatPdPiPxiPdPiPxi
.type _Z37__device_stub__Z12__extractmatPdPiPxiPdPiPxi, @function
_Z37__device_stub__Z12__extractmatPdPiPxiPdPiPxi:
.LFB2051:
.cfi_startproc
endbr64
subq $152, %rsp
.cfi_def_cfa_offset 160
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movq %rdx, 8(%rsp)
movl %ecx, 4(%rsp)
movq %fs:40, %rax
movq %rax, 136(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 8(%rsp), %rax
movq %rax, 112(%rsp)
leaq 4(%rsp), %rax
movq %rax, 120(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 136(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $152, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 168
pushq 40(%rsp)
.cfi_def_cfa_offset 176
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z12__extractmatPdPiPxi(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 160
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2051:
.size _Z37__device_stub__Z12__extractmatPdPiPxiPdPiPxi, .-_Z37__device_stub__Z12__extractmatPdPiPxiPdPiPxi
.globl _Z12__extractmatPdPiPxi
.type _Z12__extractmatPdPiPxi, @function
_Z12__extractmatPdPiPxi:
.LFB2052:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z37__device_stub__Z12__extractmatPdPiPxiPdPiPxi
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2052:
.size _Z12__extractmatPdPiPxi, .-_Z12__extractmatPdPiPxi
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "_Z12__extractmatPdPiPxi"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2054:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC0(%rip), %rdx
movq %rdx, %rcx
leaq _Z12__extractmatPdPiPxi(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2054:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "__extractmat.hip"
.globl _Z27__device_stub____extractmatPdPiPxi # -- Begin function _Z27__device_stub____extractmatPdPiPxi
.p2align 4, 0x90
.type _Z27__device_stub____extractmatPdPiPxi,@function
_Z27__device_stub____extractmatPdPiPxi: # @_Z27__device_stub____extractmatPdPiPxi
.cfi_startproc
# %bb.0:
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movq %rdx, 56(%rsp)
movl %ecx, 4(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 56(%rsp), %rax
movq %rax, 96(%rsp)
leaq 4(%rsp), %rax
movq %rax, 104(%rsp)
leaq 40(%rsp), %rdi
leaq 24(%rsp), %rsi
leaq 16(%rsp), %rdx
leaq 8(%rsp), %rcx
callq __hipPopCallConfiguration
movq 40(%rsp), %rsi
movl 48(%rsp), %edx
movq 24(%rsp), %rcx
movl 32(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z12__extractmatPdPiPxi, %edi
pushq 8(%rsp)
.cfi_adjust_cfa_offset 8
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $136, %rsp
.cfi_adjust_cfa_offset -136
retq
.Lfunc_end0:
.size _Z27__device_stub____extractmatPdPiPxi, .Lfunc_end0-_Z27__device_stub____extractmatPdPiPxi
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB1_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB1_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z12__extractmatPdPiPxi, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end1:
.size __hip_module_ctor, .Lfunc_end1-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB2_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB2_2:
retq
.Lfunc_end2:
.size __hip_module_dtor, .Lfunc_end2-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z12__extractmatPdPiPxi,@object # @_Z12__extractmatPdPiPxi
.section .rodata,"a",@progbits
.globl _Z12__extractmatPdPiPxi
.p2align 3, 0x0
_Z12__extractmatPdPiPxi:
.quad _Z27__device_stub____extractmatPdPiPxi
.size _Z12__extractmatPdPiPxi, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "_Z12__extractmatPdPiPxi"
.size .L__unnamed_1, 24
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z27__device_stub____extractmatPdPiPxi
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z12__extractmatPdPiPxi
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly. | #include "includes.h"
__global__ void sumArraysOnGPU(float *A, float *B, float *C, const int N)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
__shared__ float d[256];
if (i < N) {
d[threadIdx.x%16]= A[i] + B[i];
C[i] = d[threadIdx.x%8];
}
} | code for sm_80
Function : _Z14sumArraysOnGPUPfS_S_i
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ S2R R6, SR_CTAID.X ; /* 0x0000000000067919 */
/* 0x000e280000002500 */
/*0020*/ S2R R7, SR_TID.X ; /* 0x0000000000077919 */
/* 0x000e240000002100 */
/*0030*/ IMAD R6, R6, c[0x0][0x0], R7 ; /* 0x0000000006067a24 */
/* 0x001fca00078e0207 */
/*0040*/ ISETP.GE.AND P0, PT, R6, c[0x0][0x178], PT ; /* 0x00005e0006007a0c */
/* 0x000fda0003f06270 */
/*0050*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*0060*/ HFMA2.MMA R13, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff0d7435 */
/* 0x000fe200000001ff */
/*0070*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fd20000000a00 */
/*0080*/ IMAD.WIDE R4, R6, R13, c[0x0][0x168] ; /* 0x00005a0006047625 */
/* 0x000fc800078e020d */
/*0090*/ IMAD.WIDE R2, R6, R13, c[0x0][0x160] ; /* 0x0000580006027625 */
/* 0x000fe400078e020d */
/*00a0*/ LDG.E R4, [R4.64] ; /* 0x0000000404047981 */
/* 0x000ea8000c1e1900 */
/*00b0*/ LDG.E R3, [R2.64] ; /* 0x0000000402037981 */
/* 0x000ea2000c1e1900 */
/*00c0*/ SHF.L.U32 R7, R7, 0x2, RZ ; /* 0x0000000207077819 */
/* 0x000fc800000006ff */
/*00d0*/ LOP3.LUT R9, R7.reuse, 0x3c, RZ, 0xc0, !PT ; /* 0x0000003c07097812 */
/* 0x040fe400078ec0ff */
/*00e0*/ LOP3.LUT R8, R7, 0x1c, RZ, 0xc0, !PT ; /* 0x0000001c07087812 */
/* 0x000fe200078ec0ff */
/*00f0*/ IMAD.WIDE R6, R6, R13, c[0x0][0x170] ; /* 0x00005c0006067625 */
/* 0x000fc800078e020d */
/*0100*/ FADD R0, R4, R3 ; /* 0x0000000304007221 */
/* 0x004fca0000000000 */
/*0110*/ STS [R9], R0 ; /* 0x0000000009007388 */
/* 0x000fe80000000800 */
/*0120*/ LDS R11, [R8] ; /* 0x00000000080b7984 */
/* 0x000e280000000800 */
/*0130*/ STG.E [R6.64], R11 ; /* 0x0000000b06007986 */
/* 0x001fe2000c101904 */
/*0140*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0150*/ BRA 0x150; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0160*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0170*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0180*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0190*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | #include "includes.h"
__global__ void sumArraysOnGPU(float *A, float *B, float *C, const int N)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
__shared__ float d[256];
if (i < N) {
d[threadIdx.x%16]= A[i] + B[i];
C[i] = d[threadIdx.x%8];
}
} | .file "tmpxft_0011f503_00000000-6_sumArraysOnGPU.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2029:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2029:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z39__device_stub__Z14sumArraysOnGPUPfS_S_iPfS_S_i
.type _Z39__device_stub__Z14sumArraysOnGPUPfS_S_iPfS_S_i, @function
_Z39__device_stub__Z14sumArraysOnGPUPfS_S_iPfS_S_i:
.LFB2051:
.cfi_startproc
endbr64
subq $152, %rsp
.cfi_def_cfa_offset 160
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movq %rdx, 8(%rsp)
movl %ecx, 4(%rsp)
movq %fs:40, %rax
movq %rax, 136(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 8(%rsp), %rax
movq %rax, 112(%rsp)
leaq 4(%rsp), %rax
movq %rax, 120(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 136(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $152, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 168
pushq 40(%rsp)
.cfi_def_cfa_offset 176
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z14sumArraysOnGPUPfS_S_i(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 160
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2051:
.size _Z39__device_stub__Z14sumArraysOnGPUPfS_S_iPfS_S_i, .-_Z39__device_stub__Z14sumArraysOnGPUPfS_S_iPfS_S_i
.globl _Z14sumArraysOnGPUPfS_S_i
.type _Z14sumArraysOnGPUPfS_S_i, @function
_Z14sumArraysOnGPUPfS_S_i:
.LFB2052:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z39__device_stub__Z14sumArraysOnGPUPfS_S_iPfS_S_i
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2052:
.size _Z14sumArraysOnGPUPfS_S_i, .-_Z14sumArraysOnGPUPfS_S_i
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "_Z14sumArraysOnGPUPfS_S_i"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2054:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC0(%rip), %rdx
movq %rdx, %rcx
leaq _Z14sumArraysOnGPUPfS_S_i(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2054:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | #include "includes.h"
__global__ void sumArraysOnGPU(float *A, float *B, float *C, const int N)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
__shared__ float d[256];
if (i < N) {
d[threadIdx.x%16]= A[i] + B[i];
C[i] = d[threadIdx.x%8];
}
} | #include <hip/hip_runtime.h>
#include "includes.h"
__global__ void sumArraysOnGPU(float *A, float *B, float *C, const int N)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
__shared__ float d[256];
if (i < N) {
d[threadIdx.x%16]= A[i] + B[i];
C[i] = d[threadIdx.x%8];
}
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | #include <hip/hip_runtime.h>
#include "includes.h"
__global__ void sumArraysOnGPU(float *A, float *B, float *C, const int N)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
__shared__ float d[256];
if (i < N) {
d[threadIdx.x%16]= A[i] + B[i];
C[i] = d[threadIdx.x%8];
}
} | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z14sumArraysOnGPUPfS_S_i
.globl _Z14sumArraysOnGPUPfS_S_i
.p2align 8
.type _Z14sumArraysOnGPUPfS_S_i,@function
_Z14sumArraysOnGPUPfS_S_i:
s_clause 0x1
s_load_b32 s2, s[0:1], 0x2c
s_load_b32 s3, s[0:1], 0x18
s_waitcnt lgkmcnt(0)
s_and_b32 s2, s2, 0xffff
s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_mad_u64_u32 v[1:2], null, s15, s2, v[0:1]
s_mov_b32 s2, exec_lo
v_cmpx_gt_i32_e64 s3, v1
s_cbranch_execz .LBB0_2
s_load_b128 s[4:7], s[0:1], 0x0
v_ashrrev_i32_e32 v2, 31, v1
s_load_b64 s[0:1], s[0:1], 0x10
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[1:2], 2, v[1:2]
s_waitcnt lgkmcnt(0)
v_add_co_u32 v3, vcc_lo, s4, v1
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v4, vcc_lo, s5, v2, vcc_lo
v_add_co_u32 v5, vcc_lo, s6, v1
v_add_co_ci_u32_e32 v6, vcc_lo, s7, v2, vcc_lo
global_load_b32 v3, v[3:4], off
global_load_b32 v4, v[5:6], off
v_and_b32_e32 v5, 15, v0
v_and_b32_e32 v0, 7, v0
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_3)
v_lshlrev_b32_e32 v0, 2, v0
s_waitcnt vmcnt(0)
v_dual_add_f32 v3, v3, v4 :: v_dual_lshlrev_b32 v4, 2, v5
ds_store_b32 v4, v3
ds_load_b32 v3, v0
v_add_co_u32 v0, vcc_lo, s0, v1
v_add_co_ci_u32_e32 v1, vcc_lo, s1, v2, vcc_lo
s_waitcnt lgkmcnt(0)
global_store_b32 v[0:1], v3, off
.LBB0_2:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z14sumArraysOnGPUPfS_S_i
.amdhsa_group_segment_fixed_size 1024
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 288
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 7
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z14sumArraysOnGPUPfS_S_i, .Lfunc_end0-_Z14sumArraysOnGPUPfS_S_i
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
- .offset: 24
.size: 4
.value_kind: by_value
- .offset: 32
.size: 4
.value_kind: hidden_block_count_x
- .offset: 36
.size: 4
.value_kind: hidden_block_count_y
- .offset: 40
.size: 4
.value_kind: hidden_block_count_z
- .offset: 44
.size: 2
.value_kind: hidden_group_size_x
- .offset: 46
.size: 2
.value_kind: hidden_group_size_y
- .offset: 48
.size: 2
.value_kind: hidden_group_size_z
- .offset: 50
.size: 2
.value_kind: hidden_remainder_x
- .offset: 52
.size: 2
.value_kind: hidden_remainder_y
- .offset: 54
.size: 2
.value_kind: hidden_remainder_z
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 88
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 96
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 1024
.kernarg_segment_align: 8
.kernarg_segment_size: 288
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z14sumArraysOnGPUPfS_S_i
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z14sumArraysOnGPUPfS_S_i.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 7
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | #include <hip/hip_runtime.h>
#include "includes.h"
__global__ void sumArraysOnGPU(float *A, float *B, float *C, const int N)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
__shared__ float d[256];
if (i < N) {
d[threadIdx.x%16]= A[i] + B[i];
C[i] = d[threadIdx.x%8];
}
} | .text
.file "sumArraysOnGPU.hip"
.globl _Z29__device_stub__sumArraysOnGPUPfS_S_i # -- Begin function _Z29__device_stub__sumArraysOnGPUPfS_S_i
.p2align 4, 0x90
.type _Z29__device_stub__sumArraysOnGPUPfS_S_i,@function
_Z29__device_stub__sumArraysOnGPUPfS_S_i: # @_Z29__device_stub__sumArraysOnGPUPfS_S_i
.cfi_startproc
# %bb.0:
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movq %rdx, 56(%rsp)
movl %ecx, 4(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 56(%rsp), %rax
movq %rax, 96(%rsp)
leaq 4(%rsp), %rax
movq %rax, 104(%rsp)
leaq 40(%rsp), %rdi
leaq 24(%rsp), %rsi
leaq 16(%rsp), %rdx
leaq 8(%rsp), %rcx
callq __hipPopCallConfiguration
movq 40(%rsp), %rsi
movl 48(%rsp), %edx
movq 24(%rsp), %rcx
movl 32(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z14sumArraysOnGPUPfS_S_i, %edi
pushq 8(%rsp)
.cfi_adjust_cfa_offset 8
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $136, %rsp
.cfi_adjust_cfa_offset -136
retq
.Lfunc_end0:
.size _Z29__device_stub__sumArraysOnGPUPfS_S_i, .Lfunc_end0-_Z29__device_stub__sumArraysOnGPUPfS_S_i
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB1_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB1_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z14sumArraysOnGPUPfS_S_i, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end1:
.size __hip_module_ctor, .Lfunc_end1-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB2_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB2_2:
retq
.Lfunc_end2:
.size __hip_module_dtor, .Lfunc_end2-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z14sumArraysOnGPUPfS_S_i,@object # @_Z14sumArraysOnGPUPfS_S_i
.section .rodata,"a",@progbits
.globl _Z14sumArraysOnGPUPfS_S_i
.p2align 3, 0x0
_Z14sumArraysOnGPUPfS_S_i:
.quad _Z29__device_stub__sumArraysOnGPUPfS_S_i
.size _Z14sumArraysOnGPUPfS_S_i, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "_Z14sumArraysOnGPUPfS_S_i"
.size .L__unnamed_1, 26
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z29__device_stub__sumArraysOnGPUPfS_S_i
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z14sumArraysOnGPUPfS_S_i
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly. | code for sm_80
Function : _Z14sumArraysOnGPUPfS_S_i
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ S2R R6, SR_CTAID.X ; /* 0x0000000000067919 */
/* 0x000e280000002500 */
/*0020*/ S2R R7, SR_TID.X ; /* 0x0000000000077919 */
/* 0x000e240000002100 */
/*0030*/ IMAD R6, R6, c[0x0][0x0], R7 ; /* 0x0000000006067a24 */
/* 0x001fca00078e0207 */
/*0040*/ ISETP.GE.AND P0, PT, R6, c[0x0][0x178], PT ; /* 0x00005e0006007a0c */
/* 0x000fda0003f06270 */
/*0050*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*0060*/ HFMA2.MMA R13, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff0d7435 */
/* 0x000fe200000001ff */
/*0070*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fd20000000a00 */
/*0080*/ IMAD.WIDE R4, R6, R13, c[0x0][0x168] ; /* 0x00005a0006047625 */
/* 0x000fc800078e020d */
/*0090*/ IMAD.WIDE R2, R6, R13, c[0x0][0x160] ; /* 0x0000580006027625 */
/* 0x000fe400078e020d */
/*00a0*/ LDG.E R4, [R4.64] ; /* 0x0000000404047981 */
/* 0x000ea8000c1e1900 */
/*00b0*/ LDG.E R3, [R2.64] ; /* 0x0000000402037981 */
/* 0x000ea2000c1e1900 */
/*00c0*/ SHF.L.U32 R7, R7, 0x2, RZ ; /* 0x0000000207077819 */
/* 0x000fc800000006ff */
/*00d0*/ LOP3.LUT R9, R7.reuse, 0x3c, RZ, 0xc0, !PT ; /* 0x0000003c07097812 */
/* 0x040fe400078ec0ff */
/*00e0*/ LOP3.LUT R8, R7, 0x1c, RZ, 0xc0, !PT ; /* 0x0000001c07087812 */
/* 0x000fe200078ec0ff */
/*00f0*/ IMAD.WIDE R6, R6, R13, c[0x0][0x170] ; /* 0x00005c0006067625 */
/* 0x000fc800078e020d */
/*0100*/ FADD R0, R4, R3 ; /* 0x0000000304007221 */
/* 0x004fca0000000000 */
/*0110*/ STS [R9], R0 ; /* 0x0000000009007388 */
/* 0x000fe80000000800 */
/*0120*/ LDS R11, [R8] ; /* 0x00000000080b7984 */
/* 0x000e280000000800 */
/*0130*/ STG.E [R6.64], R11 ; /* 0x0000000b06007986 */
/* 0x001fe2000c101904 */
/*0140*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0150*/ BRA 0x150; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0160*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0170*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0180*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0190*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z14sumArraysOnGPUPfS_S_i
.globl _Z14sumArraysOnGPUPfS_S_i
.p2align 8
.type _Z14sumArraysOnGPUPfS_S_i,@function
_Z14sumArraysOnGPUPfS_S_i:
s_clause 0x1
s_load_b32 s2, s[0:1], 0x2c
s_load_b32 s3, s[0:1], 0x18
s_waitcnt lgkmcnt(0)
s_and_b32 s2, s2, 0xffff
s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_mad_u64_u32 v[1:2], null, s15, s2, v[0:1]
s_mov_b32 s2, exec_lo
v_cmpx_gt_i32_e64 s3, v1
s_cbranch_execz .LBB0_2
s_load_b128 s[4:7], s[0:1], 0x0
v_ashrrev_i32_e32 v2, 31, v1
s_load_b64 s[0:1], s[0:1], 0x10
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[1:2], 2, v[1:2]
s_waitcnt lgkmcnt(0)
v_add_co_u32 v3, vcc_lo, s4, v1
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v4, vcc_lo, s5, v2, vcc_lo
v_add_co_u32 v5, vcc_lo, s6, v1
v_add_co_ci_u32_e32 v6, vcc_lo, s7, v2, vcc_lo
global_load_b32 v3, v[3:4], off
global_load_b32 v4, v[5:6], off
v_and_b32_e32 v5, 15, v0
v_and_b32_e32 v0, 7, v0
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_3)
v_lshlrev_b32_e32 v0, 2, v0
s_waitcnt vmcnt(0)
v_dual_add_f32 v3, v3, v4 :: v_dual_lshlrev_b32 v4, 2, v5
ds_store_b32 v4, v3
ds_load_b32 v3, v0
v_add_co_u32 v0, vcc_lo, s0, v1
v_add_co_ci_u32_e32 v1, vcc_lo, s1, v2, vcc_lo
s_waitcnt lgkmcnt(0)
global_store_b32 v[0:1], v3, off
.LBB0_2:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z14sumArraysOnGPUPfS_S_i
.amdhsa_group_segment_fixed_size 1024
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 288
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 7
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z14sumArraysOnGPUPfS_S_i, .Lfunc_end0-_Z14sumArraysOnGPUPfS_S_i
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
- .offset: 24
.size: 4
.value_kind: by_value
- .offset: 32
.size: 4
.value_kind: hidden_block_count_x
- .offset: 36
.size: 4
.value_kind: hidden_block_count_y
- .offset: 40
.size: 4
.value_kind: hidden_block_count_z
- .offset: 44
.size: 2
.value_kind: hidden_group_size_x
- .offset: 46
.size: 2
.value_kind: hidden_group_size_y
- .offset: 48
.size: 2
.value_kind: hidden_group_size_z
- .offset: 50
.size: 2
.value_kind: hidden_remainder_x
- .offset: 52
.size: 2
.value_kind: hidden_remainder_y
- .offset: 54
.size: 2
.value_kind: hidden_remainder_z
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 88
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 96
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 1024
.kernarg_segment_align: 8
.kernarg_segment_size: 288
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z14sumArraysOnGPUPfS_S_i
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z14sumArraysOnGPUPfS_S_i.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 7
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_0011f503_00000000-6_sumArraysOnGPU.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2029:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2029:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z39__device_stub__Z14sumArraysOnGPUPfS_S_iPfS_S_i
.type _Z39__device_stub__Z14sumArraysOnGPUPfS_S_iPfS_S_i, @function
_Z39__device_stub__Z14sumArraysOnGPUPfS_S_iPfS_S_i:
.LFB2051:
.cfi_startproc
endbr64
subq $152, %rsp
.cfi_def_cfa_offset 160
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movq %rdx, 8(%rsp)
movl %ecx, 4(%rsp)
movq %fs:40, %rax
movq %rax, 136(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 8(%rsp), %rax
movq %rax, 112(%rsp)
leaq 4(%rsp), %rax
movq %rax, 120(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 136(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $152, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 168
pushq 40(%rsp)
.cfi_def_cfa_offset 176
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z14sumArraysOnGPUPfS_S_i(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 160
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2051:
.size _Z39__device_stub__Z14sumArraysOnGPUPfS_S_iPfS_S_i, .-_Z39__device_stub__Z14sumArraysOnGPUPfS_S_iPfS_S_i
.globl _Z14sumArraysOnGPUPfS_S_i
.type _Z14sumArraysOnGPUPfS_S_i, @function
_Z14sumArraysOnGPUPfS_S_i:
.LFB2052:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z39__device_stub__Z14sumArraysOnGPUPfS_S_iPfS_S_i
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2052:
.size _Z14sumArraysOnGPUPfS_S_i, .-_Z14sumArraysOnGPUPfS_S_i
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "_Z14sumArraysOnGPUPfS_S_i"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2054:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC0(%rip), %rdx
movq %rdx, %rcx
leaq _Z14sumArraysOnGPUPfS_S_i(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2054:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "sumArraysOnGPU.hip"
.globl _Z29__device_stub__sumArraysOnGPUPfS_S_i # -- Begin function _Z29__device_stub__sumArraysOnGPUPfS_S_i
.p2align 4, 0x90
.type _Z29__device_stub__sumArraysOnGPUPfS_S_i,@function
_Z29__device_stub__sumArraysOnGPUPfS_S_i: # @_Z29__device_stub__sumArraysOnGPUPfS_S_i
.cfi_startproc
# %bb.0:
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movq %rdx, 56(%rsp)
movl %ecx, 4(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 56(%rsp), %rax
movq %rax, 96(%rsp)
leaq 4(%rsp), %rax
movq %rax, 104(%rsp)
leaq 40(%rsp), %rdi
leaq 24(%rsp), %rsi
leaq 16(%rsp), %rdx
leaq 8(%rsp), %rcx
callq __hipPopCallConfiguration
movq 40(%rsp), %rsi
movl 48(%rsp), %edx
movq 24(%rsp), %rcx
movl 32(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z14sumArraysOnGPUPfS_S_i, %edi
pushq 8(%rsp)
.cfi_adjust_cfa_offset 8
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $136, %rsp
.cfi_adjust_cfa_offset -136
retq
.Lfunc_end0:
.size _Z29__device_stub__sumArraysOnGPUPfS_S_i, .Lfunc_end0-_Z29__device_stub__sumArraysOnGPUPfS_S_i
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB1_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB1_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z14sumArraysOnGPUPfS_S_i, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end1:
.size __hip_module_ctor, .Lfunc_end1-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB2_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB2_2:
retq
.Lfunc_end2:
.size __hip_module_dtor, .Lfunc_end2-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z14sumArraysOnGPUPfS_S_i,@object # @_Z14sumArraysOnGPUPfS_S_i
.section .rodata,"a",@progbits
.globl _Z14sumArraysOnGPUPfS_S_i
.p2align 3, 0x0
_Z14sumArraysOnGPUPfS_S_i:
.quad _Z29__device_stub__sumArraysOnGPUPfS_S_i
.size _Z14sumArraysOnGPUPfS_S_i, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "_Z14sumArraysOnGPUPfS_S_i"
.size .L__unnamed_1, 26
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z29__device_stub__sumArraysOnGPUPfS_S_i
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z14sumArraysOnGPUPfS_S_i
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly. | /*
Transformer function helper function.
Written by tomztyang,
2021/08/23
*/
#include <math.h>
#include <stdio.h>
#define THREADS_PER_BLOCK 256
#define DIVUP(m,n) ((m) / (n) + ((m) % (n) > 0))
// #define DEBUG
__global__ void rpe_q_forward(
int b, int total_query_num, int local_size, int nhead, int hdim, int l,
const int *query_batch_cnt,
const float *relpos, const float* lookup_table, const float* query_features,
float *output) {
// dim3 blocks(DIVUP(total_query_num * local_size, THREADS_PER_BLOCK), nhead, hdim);
// params query_batch_cnt: [b]
// params relpos: [total_query_num, local_size]
// params lookup_table: [l, nhead, hdim]
// params query_features: [total_query_num, nhead, hdim]
// params output: [total_query_num, local_size, nhead]
int index = blockIdx.x * blockDim.x + threadIdx.x;
int head_idx = blockIdx.y;
int hdim_idx = blockIdx.z;
if (index >= total_query_num * local_size ||
head_idx >= nhead ||
hdim_idx >= hdim) return;
// 1. Obtain query features.
int query_idx = index / local_size;
query_features += query_idx * nhead * hdim + head_idx * hdim + hdim_idx;
// 2. Obtain quantize relative position.
relpos += index;
int quantize_relpos = min(max(int(floor(relpos[0])), 0), l - 1);
lookup_table += quantize_relpos * nhead * hdim + head_idx * hdim + hdim_idx;
// 3. Obtain output position.
output += index * nhead + head_idx;
atomicAdd(
output,
query_features[0] * lookup_table[0]);
}
void rpe_q_launcher(
int b, int total_query_num, int local_size, int nhead, int hdim, int l,
const int *query_batch_cnt,
const float *relpos, const float* lookup_table, const float* query_features,
float *output){
// params query_batch_cnt: [b]
// params relpos: [total_query_num, local_size]
// params lookup_table: [l, nhead, hdim]
// params query_features: [total_query_num, nhead, hdim]
// params output: [total_query_num, local_size, nhead]
dim3 blocks(DIVUP(total_query_num * local_size, THREADS_PER_BLOCK), nhead, hdim);
dim3 threads(THREADS_PER_BLOCK);
rpe_q_forward<<<blocks, threads>>>(
b, total_query_num, local_size, nhead, hdim, l,
query_batch_cnt,
relpos, lookup_table, query_features,
output);
}
__global__ void rpe_q_backward(
int b, int total_query_num, int local_size, int nhead, int hdim, int l,
const int *query_batch_cnt,
const float *relpos, const float* lookup_table, const float* query_features,
float *grad_out, float * grad_lookup_table, float * grad_query_features) {
// dim3 blocks(DIVUP(total_query_num * local_size, THREADS_PER_BLOCK), nhead, hdim);
// params query_batch_cnt: [b]
// params relpos: [total_query_num, local_size]
// params lookup_table: [l, nhead, hdim]
// params query_features: [total_query_num, nhead, hdim]
// params grad_out: [total_query_num, local_size, nhead]
// params grad_lookup_table: [l, nhead, hdim]
// params grad_query_features: [total_query_num, nhead, hdim]
int index = blockIdx.x * blockDim.x + threadIdx.x;
int head_idx = blockIdx.y;
int hdim_idx = blockIdx.z;
if (index >= total_query_num * local_size ||
head_idx >= nhead ||
hdim_idx >= hdim) return;
// 1. Obtain query features.
int query_idx = index / local_size;
query_features += query_idx * nhead * hdim + head_idx * hdim + hdim_idx;
grad_query_features += query_idx * nhead * hdim + head_idx * hdim + hdim_idx;
// 2. Obtain quantize relative position.
relpos += index;
int quantize_relpos = min(max(int(floor(relpos[0])), 0), l - 1);
lookup_table += quantize_relpos * nhead * hdim + head_idx * hdim + hdim_idx;
grad_lookup_table += quantize_relpos * nhead * hdim + head_idx * hdim + hdim_idx;
// 3. Obtain output position.
grad_out += index * nhead + head_idx;
atomicAdd(
grad_query_features,
grad_out[0] * lookup_table[0]);
atomicAdd(
grad_lookup_table,
grad_out[0] * query_features[0]);
}
void rpe_q_grad_launcher(
int b, int total_query_num, int local_size, int nhead, int hdim, int l,
const int *query_batch_cnt,
const float *relpos, const float* lookup_table, const float* query_features,
float *grad_out, float* grad_lookup_table, float* grad_query_features){
// params query_batch_cnt: [b]
// params relpos: [total_query_num, local_size]
// params lookup_table: [l, nhead, hdim]
// params query_features: [total_query_num, nhead, hdim]
// params grad_out: [total_query_num, local_size, nhead]
// params grad_lookup_table: [l, nhead, hdim]
// params grad_query_features: [total_query_num, nhead, hdim]
dim3 blocks(DIVUP(total_query_num * local_size, THREADS_PER_BLOCK), nhead, hdim);
dim3 threads(THREADS_PER_BLOCK);
rpe_q_backward<<<blocks, threads>>>(
b, total_query_num, local_size, nhead, hdim, l,
query_batch_cnt, relpos, lookup_table, query_features,
grad_out, grad_lookup_table, grad_query_features);
} | code for sm_80
Function : _Z14rpe_q_backwardiiiiiiPKiPKfS2_S2_PfS3_S3_
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ S2R R7, SR_CTAID.X ; /* 0x0000000000077919 */
/* 0x000e220000002500 */
/*0020*/ ULDC UR4, c[0x0][0x168] ; /* 0x00005a0000047ab9 */
/* 0x000fe40000000800 */
/*0030*/ ULDC UR5, c[0x0][0x164] ; /* 0x0000590000057ab9 */
/* 0x000fe20000000800 */
/*0040*/ S2R R0, SR_TID.X ; /* 0x0000000000007919 */
/* 0x000e220000002100 */
/*0050*/ UIMAD UR4, UR4, UR5, URZ ; /* 0x00000005040472a4 */
/* 0x000fc6000f8e023f */
/*0060*/ S2R R6, SR_CTAID.Y ; /* 0x0000000000067919 */
/* 0x000e680000002600 */
/*0070*/ S2R R4, SR_CTAID.Z ; /* 0x0000000000047919 */
/* 0x000ea20000002700 */
/*0080*/ IMAD R7, R7, c[0x0][0x0], R0 ; /* 0x0000000007077a24 */
/* 0x001fca00078e0200 */
/*0090*/ ISETP.GE.AND P0, PT, R7, UR4, PT ; /* 0x0000000407007c0c */
/* 0x000fc8000bf06270 */
/*00a0*/ ISETP.GE.OR P0, PT, R6, c[0x0][0x16c], P0 ; /* 0x00005b0006007a0c */
/* 0x002fc80000706670 */
/*00b0*/ ISETP.GE.OR P0, PT, R4, c[0x0][0x170], P0 ; /* 0x00005c0004007a0c */
/* 0x004fda0000706670 */
/*00c0*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*00d0*/ HFMA2.MMA R0, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff007435 */
/* 0x000fe200000001ff */
/*00e0*/ ULDC.64 UR6, c[0x0][0x118] ; /* 0x0000460000067ab9 */
/* 0x000fd20000000a00 */
/*00f0*/ IMAD.WIDE R8, R7, R0, c[0x0][0x180] ; /* 0x0000600007087625 */
/* 0x000fcc00078e0200 */
/*0100*/ LDG.E R9, [R8.64] ; /* 0x0000000608097981 */
/* 0x000ea2000c1e1900 */
/*0110*/ ULDC UR4, c[0x0][0x174] ; /* 0x00005d0000047ab9 */
/* 0x000fe20000000800 */
/*0120*/ IMAD R11, R7, c[0x0][0x16c], R6 ; /* 0x00005b00070b7a24 */
/* 0x000fe200078e0206 */
/*0130*/ UIADD3 UR4, UR4, -0x1, URZ ; /* 0xffffffff04047890 */
/* 0x000fe2000fffe03f */
/*0140*/ IABS R14, c[0x0][0x168] ; /* 0x00005a00000e7a13 */
/* 0x000fe20000000000 */
/*0150*/ F2I.FLOOR.NTZ R2, R9 ; /* 0x0000000900027305 */
/* 0x004e240000207100 */
/*0160*/ IMNMX R2, RZ, R2, !PT ; /* 0x00000002ff027217 */
/* 0x001fc80007800200 */
/*0170*/ IMNMX R3, R2, UR4, PT ; /* 0x0000000402037c17 */
/* 0x000fca000b800200 */
/*0180*/ IMAD R3, R3, c[0x0][0x16c], R6 ; /* 0x00005b0003037a24 */
/* 0x000fc800078e0206 */
/*0190*/ IMAD R5, R3, c[0x0][0x170], R4 ; /* 0x00005c0003057a24 */
/* 0x000fe400078e0204 */
/*01a0*/ IMAD.WIDE R2, R11, R0, c[0x0][0x198] ; /* 0x000066000b027625 */
/* 0x000fc800078e0200 */
/*01b0*/ IMAD.WIDE R10, R5, R0, c[0x0][0x188] ; /* 0x00006200050a7625 */
/* 0x000fe200078e0200 */
/*01c0*/ LDG.E R8, [R2.64] ; /* 0x0000000602087981 */
/* 0x000eaa000c1e1900 */
/*01d0*/ LDG.E R11, [R10.64] ; /* 0x000000060a0b7981 */
/* 0x0000a2000c1e1900 */
/*01e0*/ I2F.RP R9, R14 ; /* 0x0000000e00097306 */
/* 0x000e620000209400 */
/*01f0*/ IABS R10, R7 ; /* 0x00000007000a7213 */
/* 0x001fe40000000000 */
/*0200*/ LOP3.LUT R7, R7, c[0x0][0x168], RZ, 0x3c, !PT ; /* 0x00005a0007077a12 */
/* 0x000fc800078e3cff */
/*0210*/ ISETP.GE.AND P1, PT, R7, RZ, PT ; /* 0x000000ff0700720c */
/* 0x000fe20003f26270 */
/*0220*/ MUFU.RCP R9, R9 ; /* 0x0000000900097308 */
/* 0x002e240000001000 */
/*0230*/ IADD3 R12, R9, 0xffffffe, RZ ; /* 0x0ffffffe090c7810 */
/* 0x001fcc0007ffe0ff */
/*0240*/ F2I.FTZ.U32.TRUNC.NTZ R13, R12 ; /* 0x0000000c000d7305 */
/* 0x000064000021f000 */
/*0250*/ HFMA2.MMA R12, -RZ, RZ, 0, 0 ; /* 0x00000000ff0c7435 */
/* 0x001fe200000001ff */
/*0260*/ IMAD.MOV R15, RZ, RZ, -R13 ; /* 0x000000ffff0f7224 */
/* 0x002fc800078e0a0d */
/*0270*/ IMAD R15, R15, R14, RZ ; /* 0x0000000e0f0f7224 */
/* 0x000fca00078e02ff */
/*0280*/ IMAD.HI.U32 R13, R13, R15, R12 ; /* 0x0000000f0d0d7227 */
/* 0x000fcc00078e000c */
/*0290*/ IMAD.HI.U32 R13, R13, R10, RZ ; /* 0x0000000a0d0d7227 */
/* 0x000fc800078e00ff */
/*02a0*/ IMAD.MOV R9, RZ, RZ, -R13 ; /* 0x000000ffff097224 */
/* 0x000fc800078e0a0d */
/*02b0*/ IMAD R9, R14, R9, R10 ; /* 0x000000090e097224 */
/* 0x000fca00078e020a */
/*02c0*/ ISETP.GT.U32.AND P2, PT, R14, R9, PT ; /* 0x000000090e00720c */
/* 0x000fda0003f44070 */
/*02d0*/ @!P2 IADD3 R9, R9, -R14.reuse, RZ ; /* 0x8000000e0909a210 */
/* 0x080fe40007ffe0ff */
/*02e0*/ @!P2 IADD3 R13, R13, 0x1, RZ ; /* 0x000000010d0da810 */
/* 0x000fe40007ffe0ff */
/*02f0*/ ISETP.GE.U32.AND P0, PT, R9, R14, PT ; /* 0x0000000e0900720c */
/* 0x000fe40003f06070 */
/*0300*/ ISETP.NE.AND P2, PT, RZ, c[0x0][0x168], PT ; /* 0x00005a00ff007a0c */
/* 0x000fd60003f45270 */
/*0310*/ @P0 IADD3 R13, R13, 0x1, RZ ; /* 0x000000010d0d0810 */
/* 0x000fc80007ffe0ff */
/*0320*/ @!P1 IADD3 R13, -R13, RZ, RZ ; /* 0x000000ff0d0d9210 */
/* 0x000fe40007ffe1ff */
/*0330*/ @!P2 LOP3.LUT R13, RZ, c[0x0][0x168], RZ, 0x33, !PT ; /* 0x00005a00ff0daa12 */
/* 0x000fca00078e33ff */
/*0340*/ IMAD R13, R13, c[0x0][0x16c], R6 ; /* 0x00005b000d0d7a24 */
/* 0x000fc800078e0206 */
/*0350*/ IMAD R13, R13, c[0x0][0x170], R4 ; /* 0x00005c000d0d7a24 */
/* 0x000fc800078e0204 */
/*0360*/ IMAD.WIDE R6, R13, R0, c[0x0][0x190] ; /* 0x000064000d067625 */
/* 0x000fc800078e0200 */
/*0370*/ FMUL R11, R11, R8 ; /* 0x000000080b0b7220 */
/* 0x004fe40000400000 */
/*0380*/ IMAD.WIDE R8, R13, R0, c[0x0][0x1a8] ; /* 0x00006a000d087625 */
/* 0x000fca00078e0200 */
/*0390*/ RED.E.ADD.F32.FTZ.RN.STRONG.GPU [R8.64], R11 ; /* 0x0000000b0800798e */
/* 0x000fe8000c10e786 */
/*03a0*/ LDG.E R6, [R6.64] ; /* 0x0000000606067981 */
/* 0x000ea8000c1e1900 */
/*03b0*/ LDG.E R3, [R2.64] ; /* 0x0000000602037981 */
/* 0x000ea2000c1e1900 */
/*03c0*/ IMAD.WIDE R4, R5, R0, c[0x0][0x1a0] ; /* 0x0000680005047625 */
/* 0x000fc800078e0200 */
/*03d0*/ FMUL R13, R6, R3 ; /* 0x00000003060d7220 */
/* 0x004fca0000400000 */
/*03e0*/ RED.E.ADD.F32.FTZ.RN.STRONG.GPU [R4.64], R13 ; /* 0x0000000d0400798e */
/* 0x000fe2000c10e786 */
/*03f0*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0400*/ BRA 0x400; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0410*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0420*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0430*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0440*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0450*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0460*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0470*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0480*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0490*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*04a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*04b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*04c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*04d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*04e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*04f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
..........
Function : _Z13rpe_q_forwardiiiiiiPKiPKfS2_S2_Pf
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ IMAD.MOV.U32 R1, RZ, RZ, c[0x0][0x28] ; /* 0x00000a00ff017624 */
/* 0x000fe400078e00ff */
/*0010*/ S2R R3, SR_CTAID.X ; /* 0x0000000000037919 */
/* 0x000e220000002500 */
/*0020*/ ULDC UR4, c[0x0][0x168] ; /* 0x00005a0000047ab9 */
/* 0x000fe40000000800 */
/*0030*/ ULDC UR5, c[0x0][0x164] ; /* 0x0000590000057ab9 */
/* 0x000fe20000000800 */
/*0040*/ S2R R0, SR_TID.X ; /* 0x0000000000007919 */
/* 0x000e220000002100 */
/*0050*/ UIMAD UR4, UR4, UR5, URZ ; /* 0x00000005040472a4 */
/* 0x000fc6000f8e023f */
/*0060*/ S2R R2, SR_CTAID.Y ; /* 0x0000000000027919 */
/* 0x000e680000002600 */
/*0070*/ S2R R4, SR_CTAID.Z ; /* 0x0000000000047919 */
/* 0x000ea20000002700 */
/*0080*/ IMAD R3, R3, c[0x0][0x0], R0 ; /* 0x0000000003037a24 */
/* 0x001fca00078e0200 */
/*0090*/ ISETP.GE.AND P0, PT, R3, UR4, PT ; /* 0x0000000403007c0c */
/* 0x000fc8000bf06270 */
/*00a0*/ ISETP.GE.OR P0, PT, R2, c[0x0][0x16c], P0 ; /* 0x00005b0002007a0c */
/* 0x002fc80000706670 */
/*00b0*/ ISETP.GE.OR P0, PT, R4, c[0x0][0x170], P0 ; /* 0x00005c0004007a0c */
/* 0x004fda0000706670 */
/*00c0*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*00d0*/ HFMA2.MMA R0, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff007435 */
/* 0x000fe200000001ff */
/*00e0*/ ULDC.64 UR6, c[0x0][0x118] ; /* 0x0000460000067ab9 */
/* 0x000fd20000000a00 */
/*00f0*/ IMAD.WIDE R6, R3, R0, c[0x0][0x180] ; /* 0x0000600003067625 */
/* 0x000fcc00078e0200 */
/*0100*/ LDG.E R6, [R6.64] ; /* 0x0000000606067981 */
/* 0x0000a2000c1e1900 */
/*0110*/ IABS R10, c[0x0][0x168] ; /* 0x00005a00000a7a13 */
/* 0x000fe20000000000 */
/*0120*/ ULDC UR4, c[0x0][0x174] ; /* 0x00005d0000047ab9 */
/* 0x000fe40000000800 */
/*0130*/ UIADD3 UR4, UR4, -0x1, URZ ; /* 0xffffffff04047890 */
/* 0x000fe2000fffe03f */
/*0140*/ I2F.RP R5, R10 ; /* 0x0000000a00057306 */
/* 0x000e620000209400 */
/*0150*/ IABS R7, R3 ; /* 0x0000000300077213 */
/* 0x001fce0000000000 */
/*0160*/ MUFU.RCP R5, R5 ; /* 0x0000000500057308 */
/* 0x002e240000001000 */
/*0170*/ IADD3 R8, R5, 0xffffffe, RZ ; /* 0x0ffffffe05087810 */
/* 0x001fcc0007ffe0ff */
/*0180*/ F2I.FTZ.U32.TRUNC.NTZ R9, R8 ; /* 0x0000000800097305 */
/* 0x000064000021f000 */
/*0190*/ HFMA2.MMA R8, -RZ, RZ, 0, 0 ; /* 0x00000000ff087435 */
/* 0x001fe200000001ff */
/*01a0*/ IMAD.MOV R11, RZ, RZ, -R9 ; /* 0x000000ffff0b7224 */
/* 0x002fc800078e0a09 */
/*01b0*/ IMAD R11, R11, R10, RZ ; /* 0x0000000a0b0b7224 */
/* 0x000fca00078e02ff */
/*01c0*/ IMAD.HI.U32 R9, R9, R11, R8 ; /* 0x0000000b09097227 */
/* 0x000fcc00078e0008 */
/*01d0*/ IMAD.HI.U32 R9, R9, R7, RZ ; /* 0x0000000709097227 */
/* 0x000fc800078e00ff */
/*01e0*/ IMAD.MOV R5, RZ, RZ, -R9 ; /* 0x000000ffff057224 */
/* 0x000fc800078e0a09 */
/*01f0*/ IMAD R5, R10, R5, R7 ; /* 0x000000050a057224 */
/* 0x000fca00078e0207 */
/*0200*/ ISETP.GT.U32.AND P2, PT, R10, R5, PT ; /* 0x000000050a00720c */
/* 0x000fda0003f44070 */
/*0210*/ @!P2 IADD3 R5, R5, -R10.reuse, RZ ; /* 0x8000000a0505a210 */
/* 0x080fe40007ffe0ff */
/*0220*/ @!P2 IADD3 R9, R9, 0x1, RZ ; /* 0x000000010909a810 */
/* 0x000fe40007ffe0ff */
/*0230*/ ISETP.GE.U32.AND P0, PT, R5, R10, PT ; /* 0x0000000a0500720c */
/* 0x000fe40003f06070 */
/*0240*/ LOP3.LUT R5, R3, c[0x0][0x168], RZ, 0x3c, !PT ; /* 0x00005a0003057a12 */
/* 0x000fe400078e3cff */
/*0250*/ ISETP.NE.AND P2, PT, RZ, c[0x0][0x168], PT ; /* 0x00005a00ff007a0c */
/* 0x000fe40003f45270 */
/*0260*/ ISETP.GE.AND P1, PT, R5, RZ, PT ; /* 0x000000ff0500720c */
/* 0x000fce0003f26270 */
/*0270*/ @P0 IADD3 R9, R9, 0x1, RZ ; /* 0x0000000109090810 */
/* 0x000fcc0007ffe0ff */
/*0280*/ @!P1 IMAD.MOV R9, RZ, RZ, -R9 ; /* 0x000000ffff099224 */
/* 0x000fe200078e0a09 */
/*0290*/ @!P2 LOP3.LUT R9, RZ, c[0x0][0x168], RZ, 0x33, !PT ; /* 0x00005a00ff09aa12 */
/* 0x000fca00078e33ff */
/*02a0*/ IMAD R9, R9, c[0x0][0x16c], R2 ; /* 0x00005b0009097a24 */
/* 0x000fe200078e0202 */
/*02b0*/ F2I.FLOOR.NTZ R6, R6 ; /* 0x0000000600067305 */
/* 0x004e240000207100 */
/*02c0*/ IMNMX R5, RZ, R6, !PT ; /* 0x00000006ff057217 */
/* 0x001fc80007800200 */
/*02d0*/ IMNMX R5, R5, UR4, PT ; /* 0x0000000405057c17 */
/* 0x000fca000b800200 */
/*02e0*/ IMAD R5, R5, c[0x0][0x16c], R2 ; /* 0x00005b0005057a24 */
/* 0x000fc800078e0202 */
/*02f0*/ IMAD R7, R5, c[0x0][0x170], R4.reuse ; /* 0x00005c0005077a24 */
/* 0x100fe400078e0204 */
/*0300*/ IMAD R5, R9, c[0x0][0x170], R4 ; /* 0x00005c0009057a24 */
/* 0x000fe400078e0204 */
/*0310*/ IMAD.WIDE R6, R7, R0, c[0x0][0x188] ; /* 0x0000620007067625 */
/* 0x000fc800078e0200 */
/*0320*/ IMAD.WIDE R4, R5, R0, c[0x0][0x190] ; /* 0x0000640005047625 */
/* 0x000fe400078e0200 */
/*0330*/ LDG.E R6, [R6.64] ; /* 0x0000000606067981 */
/* 0x000ea8000c1e1900 */
/*0340*/ LDG.E R5, [R4.64] ; /* 0x0000000604057981 */
/* 0x000ea2000c1e1900 */
/*0350*/ IMAD R3, R3, c[0x0][0x16c], R2 ; /* 0x00005b0003037a24 */
/* 0x000fc800078e0202 */
/*0360*/ IMAD.WIDE R2, R3, R0, c[0x0][0x198] ; /* 0x0000660003027625 */
/* 0x000fc800078e0200 */
/*0370*/ FMUL R9, R6, R5 ; /* 0x0000000506097220 */
/* 0x004fca0000400000 */
/*0380*/ RED.E.ADD.F32.FTZ.RN.STRONG.GPU [R2.64], R9 ; /* 0x000000090200798e */
/* 0x000fe2000c10e786 */
/*0390*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*03a0*/ BRA 0x3a0; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*03b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*03c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*03d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*03e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*03f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0400*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0410*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0420*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0430*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0440*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0450*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0460*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0470*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | /*
Transformer function helper function.
Written by tomztyang,
2021/08/23
*/
#include <math.h>
#include <stdio.h>
#define THREADS_PER_BLOCK 256
#define DIVUP(m,n) ((m) / (n) + ((m) % (n) > 0))
// #define DEBUG
__global__ void rpe_q_forward(
int b, int total_query_num, int local_size, int nhead, int hdim, int l,
const int *query_batch_cnt,
const float *relpos, const float* lookup_table, const float* query_features,
float *output) {
// dim3 blocks(DIVUP(total_query_num * local_size, THREADS_PER_BLOCK), nhead, hdim);
// params query_batch_cnt: [b]
// params relpos: [total_query_num, local_size]
// params lookup_table: [l, nhead, hdim]
// params query_features: [total_query_num, nhead, hdim]
// params output: [total_query_num, local_size, nhead]
int index = blockIdx.x * blockDim.x + threadIdx.x;
int head_idx = blockIdx.y;
int hdim_idx = blockIdx.z;
if (index >= total_query_num * local_size ||
head_idx >= nhead ||
hdim_idx >= hdim) return;
// 1. Obtain query features.
int query_idx = index / local_size;
query_features += query_idx * nhead * hdim + head_idx * hdim + hdim_idx;
// 2. Obtain quantize relative position.
relpos += index;
int quantize_relpos = min(max(int(floor(relpos[0])), 0), l - 1);
lookup_table += quantize_relpos * nhead * hdim + head_idx * hdim + hdim_idx;
// 3. Obtain output position.
output += index * nhead + head_idx;
atomicAdd(
output,
query_features[0] * lookup_table[0]);
}
void rpe_q_launcher(
int b, int total_query_num, int local_size, int nhead, int hdim, int l,
const int *query_batch_cnt,
const float *relpos, const float* lookup_table, const float* query_features,
float *output){
// params query_batch_cnt: [b]
// params relpos: [total_query_num, local_size]
// params lookup_table: [l, nhead, hdim]
// params query_features: [total_query_num, nhead, hdim]
// params output: [total_query_num, local_size, nhead]
dim3 blocks(DIVUP(total_query_num * local_size, THREADS_PER_BLOCK), nhead, hdim);
dim3 threads(THREADS_PER_BLOCK);
rpe_q_forward<<<blocks, threads>>>(
b, total_query_num, local_size, nhead, hdim, l,
query_batch_cnt,
relpos, lookup_table, query_features,
output);
}
__global__ void rpe_q_backward(
int b, int total_query_num, int local_size, int nhead, int hdim, int l,
const int *query_batch_cnt,
const float *relpos, const float* lookup_table, const float* query_features,
float *grad_out, float * grad_lookup_table, float * grad_query_features) {
// dim3 blocks(DIVUP(total_query_num * local_size, THREADS_PER_BLOCK), nhead, hdim);
// params query_batch_cnt: [b]
// params relpos: [total_query_num, local_size]
// params lookup_table: [l, nhead, hdim]
// params query_features: [total_query_num, nhead, hdim]
// params grad_out: [total_query_num, local_size, nhead]
// params grad_lookup_table: [l, nhead, hdim]
// params grad_query_features: [total_query_num, nhead, hdim]
int index = blockIdx.x * blockDim.x + threadIdx.x;
int head_idx = blockIdx.y;
int hdim_idx = blockIdx.z;
if (index >= total_query_num * local_size ||
head_idx >= nhead ||
hdim_idx >= hdim) return;
// 1. Obtain query features.
int query_idx = index / local_size;
query_features += query_idx * nhead * hdim + head_idx * hdim + hdim_idx;
grad_query_features += query_idx * nhead * hdim + head_idx * hdim + hdim_idx;
// 2. Obtain quantize relative position.
relpos += index;
int quantize_relpos = min(max(int(floor(relpos[0])), 0), l - 1);
lookup_table += quantize_relpos * nhead * hdim + head_idx * hdim + hdim_idx;
grad_lookup_table += quantize_relpos * nhead * hdim + head_idx * hdim + hdim_idx;
// 3. Obtain output position.
grad_out += index * nhead + head_idx;
atomicAdd(
grad_query_features,
grad_out[0] * lookup_table[0]);
atomicAdd(
grad_lookup_table,
grad_out[0] * query_features[0]);
}
void rpe_q_grad_launcher(
int b, int total_query_num, int local_size, int nhead, int hdim, int l,
const int *query_batch_cnt,
const float *relpos, const float* lookup_table, const float* query_features,
float *grad_out, float* grad_lookup_table, float* grad_query_features){
// params query_batch_cnt: [b]
// params relpos: [total_query_num, local_size]
// params lookup_table: [l, nhead, hdim]
// params query_features: [total_query_num, nhead, hdim]
// params grad_out: [total_query_num, local_size, nhead]
// params grad_lookup_table: [l, nhead, hdim]
// params grad_query_features: [total_query_num, nhead, hdim]
dim3 blocks(DIVUP(total_query_num * local_size, THREADS_PER_BLOCK), nhead, hdim);
dim3 threads(THREADS_PER_BLOCK);
rpe_q_backward<<<blocks, threads>>>(
b, total_query_num, local_size, nhead, hdim, l,
query_batch_cnt, relpos, lookup_table, query_features,
grad_out, grad_lookup_table, grad_query_features);
} | .file "tmpxft_0004a3a9_00000000-6_rpe_q_kernel.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2061:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2061:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z51__device_stub__Z13rpe_q_forwardiiiiiiPKiPKfS2_S2_PfiiiiiiPKiPKfS2_S2_Pf
.type _Z51__device_stub__Z13rpe_q_forwardiiiiiiPKiPKfS2_S2_PfiiiiiiPKiPKfS2_S2_Pf, @function
_Z51__device_stub__Z13rpe_q_forwardiiiiiiPKiPKfS2_S2_PfiiiiiiPKiPKfS2_S2_Pf:
.LFB2083:
.cfi_startproc
endbr64
subq $232, %rsp
.cfi_def_cfa_offset 240
movl %edi, 60(%rsp)
movl %esi, 56(%rsp)
movl %edx, 52(%rsp)
movl %ecx, 48(%rsp)
movl %r8d, 44(%rsp)
movl %r9d, 40(%rsp)
movq 240(%rsp), %rax
movq %rax, 32(%rsp)
movq 248(%rsp), %rax
movq %rax, 24(%rsp)
movq 256(%rsp), %rax
movq %rax, 16(%rsp)
movq 264(%rsp), %rax
movq %rax, 8(%rsp)
movq 272(%rsp), %rax
movq %rax, (%rsp)
movq %fs:40, %rax
movq %rax, 216(%rsp)
xorl %eax, %eax
leaq 60(%rsp), %rax
movq %rax, 128(%rsp)
leaq 56(%rsp), %rax
movq %rax, 136(%rsp)
leaq 52(%rsp), %rax
movq %rax, 144(%rsp)
leaq 48(%rsp), %rax
movq %rax, 152(%rsp)
leaq 44(%rsp), %rax
movq %rax, 160(%rsp)
leaq 40(%rsp), %rax
movq %rax, 168(%rsp)
leaq 32(%rsp), %rax
movq %rax, 176(%rsp)
leaq 24(%rsp), %rax
movq %rax, 184(%rsp)
leaq 16(%rsp), %rax
movq %rax, 192(%rsp)
leaq 8(%rsp), %rax
movq %rax, 200(%rsp)
movq %rsp, %rax
movq %rax, 208(%rsp)
movl $1, 80(%rsp)
movl $1, 84(%rsp)
movl $1, 88(%rsp)
movl $1, 92(%rsp)
movl $1, 96(%rsp)
movl $1, 100(%rsp)
leaq 72(%rsp), %rcx
leaq 64(%rsp), %rdx
leaq 92(%rsp), %rsi
leaq 80(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 216(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $232, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 72(%rsp)
.cfi_def_cfa_offset 248
pushq 72(%rsp)
.cfi_def_cfa_offset 256
leaq 144(%rsp), %r9
movq 108(%rsp), %rcx
movl 116(%rsp), %r8d
movq 96(%rsp), %rsi
movl 104(%rsp), %edx
leaq _Z13rpe_q_forwardiiiiiiPKiPKfS2_S2_Pf(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 240
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2083:
.size _Z51__device_stub__Z13rpe_q_forwardiiiiiiPKiPKfS2_S2_PfiiiiiiPKiPKfS2_S2_Pf, .-_Z51__device_stub__Z13rpe_q_forwardiiiiiiPKiPKfS2_S2_PfiiiiiiPKiPKfS2_S2_Pf
.globl _Z13rpe_q_forwardiiiiiiPKiPKfS2_S2_Pf
.type _Z13rpe_q_forwardiiiiiiPKiPKfS2_S2_Pf, @function
_Z13rpe_q_forwardiiiiiiPKiPKfS2_S2_Pf:
.LFB2084:
.cfi_startproc
endbr64
subq $16, %rsp
.cfi_def_cfa_offset 24
pushq 56(%rsp)
.cfi_def_cfa_offset 32
pushq 56(%rsp)
.cfi_def_cfa_offset 40
pushq 56(%rsp)
.cfi_def_cfa_offset 48
pushq 56(%rsp)
.cfi_def_cfa_offset 56
pushq 56(%rsp)
.cfi_def_cfa_offset 64
call _Z51__device_stub__Z13rpe_q_forwardiiiiiiPKiPKfS2_S2_PfiiiiiiPKiPKfS2_S2_Pf
addq $56, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2084:
.size _Z13rpe_q_forwardiiiiiiPKiPKfS2_S2_Pf, .-_Z13rpe_q_forwardiiiiiiPKiPKfS2_S2_Pf
.globl _Z14rpe_q_launcheriiiiiiPKiPKfS2_S2_Pf
.type _Z14rpe_q_launcheriiiiiiPKiPKfS2_S2_Pf, @function
_Z14rpe_q_launcheriiiiiiPKiPKfS2_S2_Pf:
.LFB2057:
.cfi_startproc
endbr64
pushq %r15
.cfi_def_cfa_offset 16
.cfi_offset 15, -16
pushq %r14
.cfi_def_cfa_offset 24
.cfi_offset 14, -24
pushq %r13
.cfi_def_cfa_offset 32
.cfi_offset 13, -32
pushq %r12
.cfi_def_cfa_offset 40
.cfi_offset 12, -40
pushq %rbp
.cfi_def_cfa_offset 48
.cfi_offset 6, -48
pushq %rbx
.cfi_def_cfa_offset 56
.cfi_offset 3, -56
subq $40, %rsp
.cfi_def_cfa_offset 96
movl %edi, %r14d
movl %esi, %ebx
movl %edx, %ebp
movl %ecx, %r12d
movl %r8d, %r13d
movl %r9d, %r15d
movl %esi, %edx
imull %ebp, %edx
movl %edx, %ecx
sarl $31, %ecx
shrl $24, %ecx
leal (%rdx,%rcx), %eax
movzbl %al, %eax
subl %ecx, %eax
testl %eax, %eax
setg %cl
movzbl %cl, %ecx
leal 255(%rdx), %eax
testl %edx, %edx
cmovns %edx, %eax
sarl $8, %eax
addl %ecx, %eax
movl %eax, 8(%rsp)
movl %r12d, 12(%rsp)
movl $256, 20(%rsp)
movl $1, 24(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 20(%rsp), %rdx
movl $1, %ecx
movq 8(%rsp), %rdi
movl %r13d, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L14
.L11:
addq $40, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %rbp
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r13
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
ret
.L14:
.cfi_restore_state
subq $8, %rsp
.cfi_def_cfa_offset 104
pushq 136(%rsp)
.cfi_def_cfa_offset 112
pushq 136(%rsp)
.cfi_def_cfa_offset 120
pushq 136(%rsp)
.cfi_def_cfa_offset 128
pushq 136(%rsp)
.cfi_def_cfa_offset 136
pushq 136(%rsp)
.cfi_def_cfa_offset 144
movl %r15d, %r9d
movl %r13d, %r8d
movl %r12d, %ecx
movl %ebp, %edx
movl %ebx, %esi
movl %r14d, %edi
call _Z51__device_stub__Z13rpe_q_forwardiiiiiiPKiPKfS2_S2_PfiiiiiiPKiPKfS2_S2_Pf
addq $48, %rsp
.cfi_def_cfa_offset 96
jmp .L11
.cfi_endproc
.LFE2057:
.size _Z14rpe_q_launcheriiiiiiPKiPKfS2_S2_Pf, .-_Z14rpe_q_launcheriiiiiiPKiPKfS2_S2_Pf
.globl _Z58__device_stub__Z14rpe_q_backwardiiiiiiPKiPKfS2_S2_PfS3_S3_iiiiiiPKiPKfS2_S2_PfS3_S3_
.type _Z58__device_stub__Z14rpe_q_backwardiiiiiiPKiPKfS2_S2_PfS3_S3_iiiiiiPKiPKfS2_S2_PfS3_S3_, @function
_Z58__device_stub__Z14rpe_q_backwardiiiiiiPKiPKfS2_S2_PfS3_S3_iiiiiiPKiPKfS2_S2_PfS3_S3_:
.LFB2085:
.cfi_startproc
endbr64
subq $264, %rsp
.cfi_def_cfa_offset 272
movl %edi, 76(%rsp)
movl %esi, 72(%rsp)
movl %edx, 68(%rsp)
movl %ecx, 64(%rsp)
movl %r8d, 60(%rsp)
movl %r9d, 56(%rsp)
movq 272(%rsp), %rax
movq %rax, 48(%rsp)
movq 280(%rsp), %rax
movq %rax, 40(%rsp)
movq 288(%rsp), %rax
movq %rax, 32(%rsp)
movq 296(%rsp), %rax
movq %rax, 24(%rsp)
movq 304(%rsp), %rax
movq %rax, 16(%rsp)
movq 312(%rsp), %rax
movq %rax, 8(%rsp)
movq 320(%rsp), %rax
movq %rax, (%rsp)
movq %fs:40, %rax
movq %rax, 248(%rsp)
xorl %eax, %eax
leaq 76(%rsp), %rax
movq %rax, 144(%rsp)
leaq 72(%rsp), %rax
movq %rax, 152(%rsp)
leaq 68(%rsp), %rax
movq %rax, 160(%rsp)
leaq 64(%rsp), %rax
movq %rax, 168(%rsp)
leaq 60(%rsp), %rax
movq %rax, 176(%rsp)
leaq 56(%rsp), %rax
movq %rax, 184(%rsp)
leaq 48(%rsp), %rax
movq %rax, 192(%rsp)
leaq 40(%rsp), %rax
movq %rax, 200(%rsp)
leaq 32(%rsp), %rax
movq %rax, 208(%rsp)
leaq 24(%rsp), %rax
movq %rax, 216(%rsp)
leaq 16(%rsp), %rax
movq %rax, 224(%rsp)
leaq 8(%rsp), %rax
movq %rax, 232(%rsp)
movq %rsp, %rax
movq %rax, 240(%rsp)
movl $1, 96(%rsp)
movl $1, 100(%rsp)
movl $1, 104(%rsp)
movl $1, 108(%rsp)
movl $1, 112(%rsp)
movl $1, 116(%rsp)
leaq 88(%rsp), %rcx
leaq 80(%rsp), %rdx
leaq 108(%rsp), %rsi
leaq 96(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L19
.L15:
movq 248(%rsp), %rax
subq %fs:40, %rax
jne .L20
addq $264, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L19:
.cfi_restore_state
pushq 88(%rsp)
.cfi_def_cfa_offset 280
pushq 88(%rsp)
.cfi_def_cfa_offset 288
leaq 160(%rsp), %r9
movq 124(%rsp), %rcx
movl 132(%rsp), %r8d
movq 112(%rsp), %rsi
movl 120(%rsp), %edx
leaq _Z14rpe_q_backwardiiiiiiPKiPKfS2_S2_PfS3_S3_(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 272
jmp .L15
.L20:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2085:
.size _Z58__device_stub__Z14rpe_q_backwardiiiiiiPKiPKfS2_S2_PfS3_S3_iiiiiiPKiPKfS2_S2_PfS3_S3_, .-_Z58__device_stub__Z14rpe_q_backwardiiiiiiPKiPKfS2_S2_PfS3_S3_iiiiiiPKiPKfS2_S2_PfS3_S3_
.globl _Z14rpe_q_backwardiiiiiiPKiPKfS2_S2_PfS3_S3_
.type _Z14rpe_q_backwardiiiiiiPKiPKfS2_S2_PfS3_S3_, @function
_Z14rpe_q_backwardiiiiiiPKiPKfS2_S2_PfS3_S3_:
.LFB2086:
.cfi_startproc
endbr64
subq $16, %rsp
.cfi_def_cfa_offset 24
pushq 72(%rsp)
.cfi_def_cfa_offset 32
pushq 72(%rsp)
.cfi_def_cfa_offset 40
pushq 72(%rsp)
.cfi_def_cfa_offset 48
pushq 72(%rsp)
.cfi_def_cfa_offset 56
pushq 72(%rsp)
.cfi_def_cfa_offset 64
pushq 72(%rsp)
.cfi_def_cfa_offset 72
pushq 72(%rsp)
.cfi_def_cfa_offset 80
call _Z58__device_stub__Z14rpe_q_backwardiiiiiiPKiPKfS2_S2_PfS3_S3_iiiiiiPKiPKfS2_S2_PfS3_S3_
addq $72, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2086:
.size _Z14rpe_q_backwardiiiiiiPKiPKfS2_S2_PfS3_S3_, .-_Z14rpe_q_backwardiiiiiiPKiPKfS2_S2_PfS3_S3_
.globl _Z19rpe_q_grad_launcheriiiiiiPKiPKfS2_S2_PfS3_S3_
.type _Z19rpe_q_grad_launcheriiiiiiPKiPKfS2_S2_PfS3_S3_, @function
_Z19rpe_q_grad_launcheriiiiiiPKiPKfS2_S2_PfS3_S3_:
.LFB2058:
.cfi_startproc
endbr64
pushq %r15
.cfi_def_cfa_offset 16
.cfi_offset 15, -16
pushq %r14
.cfi_def_cfa_offset 24
.cfi_offset 14, -24
pushq %r13
.cfi_def_cfa_offset 32
.cfi_offset 13, -32
pushq %r12
.cfi_def_cfa_offset 40
.cfi_offset 12, -40
pushq %rbp
.cfi_def_cfa_offset 48
.cfi_offset 6, -48
pushq %rbx
.cfi_def_cfa_offset 56
.cfi_offset 3, -56
subq $40, %rsp
.cfi_def_cfa_offset 96
movl %edi, %r14d
movl %esi, %ebx
movl %edx, %ebp
movl %ecx, %r12d
movl %r8d, %r13d
movl %r9d, %r15d
movl %esi, %edx
imull %ebp, %edx
movl %edx, %ecx
sarl $31, %ecx
shrl $24, %ecx
leal (%rdx,%rcx), %eax
movzbl %al, %eax
subl %ecx, %eax
testl %eax, %eax
setg %cl
movzbl %cl, %ecx
leal 255(%rdx), %eax
testl %edx, %edx
cmovns %edx, %eax
sarl $8, %eax
addl %ecx, %eax
movl %eax, 8(%rsp)
movl %r12d, 12(%rsp)
movl $256, 20(%rsp)
movl $1, 24(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 20(%rsp), %rdx
movl $1, %ecx
movq 8(%rsp), %rdi
movl %r13d, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L26
.L23:
addq $40, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %rbp
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r13
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
ret
.L26:
.cfi_restore_state
subq $8, %rsp
.cfi_def_cfa_offset 104
pushq 152(%rsp)
.cfi_def_cfa_offset 112
pushq 152(%rsp)
.cfi_def_cfa_offset 120
pushq 152(%rsp)
.cfi_def_cfa_offset 128
pushq 152(%rsp)
.cfi_def_cfa_offset 136
pushq 152(%rsp)
.cfi_def_cfa_offset 144
pushq 152(%rsp)
.cfi_def_cfa_offset 152
pushq 152(%rsp)
.cfi_def_cfa_offset 160
movl %r15d, %r9d
movl %r13d, %r8d
movl %r12d, %ecx
movl %ebp, %edx
movl %ebx, %esi
movl %r14d, %edi
call _Z58__device_stub__Z14rpe_q_backwardiiiiiiPKiPKfS2_S2_PfS3_S3_iiiiiiPKiPKfS2_S2_PfS3_S3_
addq $64, %rsp
.cfi_def_cfa_offset 96
jmp .L23
.cfi_endproc
.LFE2058:
.size _Z19rpe_q_grad_launcheriiiiiiPKiPKfS2_S2_PfS3_S3_, .-_Z19rpe_q_grad_launcheriiiiiiPKiPKfS2_S2_PfS3_S3_
.section .rodata.str1.8,"aMS",@progbits,1
.align 8
.LC0:
.string "_Z14rpe_q_backwardiiiiiiPKiPKfS2_S2_PfS3_S3_"
.align 8
.LC1:
.string "_Z13rpe_q_forwardiiiiiiPKiPKfS2_S2_Pf"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2088:
.cfi_startproc
endbr64
pushq %rbx
.cfi_def_cfa_offset 16
.cfi_offset 3, -16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rbx
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC0(%rip), %rdx
movq %rdx, %rcx
leaq _Z14rpe_q_backwardiiiiiiPKiPKfS2_S2_PfS3_S3_(%rip), %rsi
movq %rax, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC1(%rip), %rdx
movq %rdx, %rcx
leaq _Z13rpe_q_forwardiiiiiiPKiPKfS2_S2_Pf(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
popq %rbx
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2088:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | /*
Transformer function helper function.
Written by tomztyang,
2021/08/23
*/
#include <math.h>
#include <stdio.h>
#define THREADS_PER_BLOCK 256
#define DIVUP(m,n) ((m) / (n) + ((m) % (n) > 0))
// #define DEBUG
__global__ void rpe_q_forward(
int b, int total_query_num, int local_size, int nhead, int hdim, int l,
const int *query_batch_cnt,
const float *relpos, const float* lookup_table, const float* query_features,
float *output) {
// dim3 blocks(DIVUP(total_query_num * local_size, THREADS_PER_BLOCK), nhead, hdim);
// params query_batch_cnt: [b]
// params relpos: [total_query_num, local_size]
// params lookup_table: [l, nhead, hdim]
// params query_features: [total_query_num, nhead, hdim]
// params output: [total_query_num, local_size, nhead]
int index = blockIdx.x * blockDim.x + threadIdx.x;
int head_idx = blockIdx.y;
int hdim_idx = blockIdx.z;
if (index >= total_query_num * local_size ||
head_idx >= nhead ||
hdim_idx >= hdim) return;
// 1. Obtain query features.
int query_idx = index / local_size;
query_features += query_idx * nhead * hdim + head_idx * hdim + hdim_idx;
// 2. Obtain quantize relative position.
relpos += index;
int quantize_relpos = min(max(int(floor(relpos[0])), 0), l - 1);
lookup_table += quantize_relpos * nhead * hdim + head_idx * hdim + hdim_idx;
// 3. Obtain output position.
output += index * nhead + head_idx;
atomicAdd(
output,
query_features[0] * lookup_table[0]);
}
void rpe_q_launcher(
int b, int total_query_num, int local_size, int nhead, int hdim, int l,
const int *query_batch_cnt,
const float *relpos, const float* lookup_table, const float* query_features,
float *output){
// params query_batch_cnt: [b]
// params relpos: [total_query_num, local_size]
// params lookup_table: [l, nhead, hdim]
// params query_features: [total_query_num, nhead, hdim]
// params output: [total_query_num, local_size, nhead]
dim3 blocks(DIVUP(total_query_num * local_size, THREADS_PER_BLOCK), nhead, hdim);
dim3 threads(THREADS_PER_BLOCK);
rpe_q_forward<<<blocks, threads>>>(
b, total_query_num, local_size, nhead, hdim, l,
query_batch_cnt,
relpos, lookup_table, query_features,
output);
}
__global__ void rpe_q_backward(
int b, int total_query_num, int local_size, int nhead, int hdim, int l,
const int *query_batch_cnt,
const float *relpos, const float* lookup_table, const float* query_features,
float *grad_out, float * grad_lookup_table, float * grad_query_features) {
// dim3 blocks(DIVUP(total_query_num * local_size, THREADS_PER_BLOCK), nhead, hdim);
// params query_batch_cnt: [b]
// params relpos: [total_query_num, local_size]
// params lookup_table: [l, nhead, hdim]
// params query_features: [total_query_num, nhead, hdim]
// params grad_out: [total_query_num, local_size, nhead]
// params grad_lookup_table: [l, nhead, hdim]
// params grad_query_features: [total_query_num, nhead, hdim]
int index = blockIdx.x * blockDim.x + threadIdx.x;
int head_idx = blockIdx.y;
int hdim_idx = blockIdx.z;
if (index >= total_query_num * local_size ||
head_idx >= nhead ||
hdim_idx >= hdim) return;
// 1. Obtain query features.
int query_idx = index / local_size;
query_features += query_idx * nhead * hdim + head_idx * hdim + hdim_idx;
grad_query_features += query_idx * nhead * hdim + head_idx * hdim + hdim_idx;
// 2. Obtain quantize relative position.
relpos += index;
int quantize_relpos = min(max(int(floor(relpos[0])), 0), l - 1);
lookup_table += quantize_relpos * nhead * hdim + head_idx * hdim + hdim_idx;
grad_lookup_table += quantize_relpos * nhead * hdim + head_idx * hdim + hdim_idx;
// 3. Obtain output position.
grad_out += index * nhead + head_idx;
atomicAdd(
grad_query_features,
grad_out[0] * lookup_table[0]);
atomicAdd(
grad_lookup_table,
grad_out[0] * query_features[0]);
}
void rpe_q_grad_launcher(
int b, int total_query_num, int local_size, int nhead, int hdim, int l,
const int *query_batch_cnt,
const float *relpos, const float* lookup_table, const float* query_features,
float *grad_out, float* grad_lookup_table, float* grad_query_features){
// params query_batch_cnt: [b]
// params relpos: [total_query_num, local_size]
// params lookup_table: [l, nhead, hdim]
// params query_features: [total_query_num, nhead, hdim]
// params grad_out: [total_query_num, local_size, nhead]
// params grad_lookup_table: [l, nhead, hdim]
// params grad_query_features: [total_query_num, nhead, hdim]
dim3 blocks(DIVUP(total_query_num * local_size, THREADS_PER_BLOCK), nhead, hdim);
dim3 threads(THREADS_PER_BLOCK);
rpe_q_backward<<<blocks, threads>>>(
b, total_query_num, local_size, nhead, hdim, l,
query_batch_cnt, relpos, lookup_table, query_features,
grad_out, grad_lookup_table, grad_query_features);
} | /*
Transformer function helper function.
Written by tomztyang,
2021/08/23
*/
#include <hip/hip_runtime.h>
#include <math.h>
#include <stdio.h>
#define THREADS_PER_BLOCK 256
#define DIVUP(m,n) ((m) / (n) + ((m) % (n) > 0))
// #define DEBUG
__global__ void rpe_q_forward(
int b, int total_query_num, int local_size, int nhead, int hdim, int l,
const int *query_batch_cnt,
const float *relpos, const float* lookup_table, const float* query_features,
float *output) {
// dim3 blocks(DIVUP(total_query_num * local_size, THREADS_PER_BLOCK), nhead, hdim);
// params query_batch_cnt: [b]
// params relpos: [total_query_num, local_size]
// params lookup_table: [l, nhead, hdim]
// params query_features: [total_query_num, nhead, hdim]
// params output: [total_query_num, local_size, nhead]
int index = blockIdx.x * blockDim.x + threadIdx.x;
int head_idx = blockIdx.y;
int hdim_idx = blockIdx.z;
if (index >= total_query_num * local_size ||
head_idx >= nhead ||
hdim_idx >= hdim) return;
// 1. Obtain query features.
int query_idx = index / local_size;
query_features += query_idx * nhead * hdim + head_idx * hdim + hdim_idx;
// 2. Obtain quantize relative position.
relpos += index;
int quantize_relpos = min(max(int(floor(relpos[0])), 0), l - 1);
lookup_table += quantize_relpos * nhead * hdim + head_idx * hdim + hdim_idx;
// 3. Obtain output position.
output += index * nhead + head_idx;
atomicAdd(
output,
query_features[0] * lookup_table[0]);
}
void rpe_q_launcher(
int b, int total_query_num, int local_size, int nhead, int hdim, int l,
const int *query_batch_cnt,
const float *relpos, const float* lookup_table, const float* query_features,
float *output){
// params query_batch_cnt: [b]
// params relpos: [total_query_num, local_size]
// params lookup_table: [l, nhead, hdim]
// params query_features: [total_query_num, nhead, hdim]
// params output: [total_query_num, local_size, nhead]
dim3 blocks(DIVUP(total_query_num * local_size, THREADS_PER_BLOCK), nhead, hdim);
dim3 threads(THREADS_PER_BLOCK);
rpe_q_forward<<<blocks, threads>>>(
b, total_query_num, local_size, nhead, hdim, l,
query_batch_cnt,
relpos, lookup_table, query_features,
output);
}
__global__ void rpe_q_backward(
int b, int total_query_num, int local_size, int nhead, int hdim, int l,
const int *query_batch_cnt,
const float *relpos, const float* lookup_table, const float* query_features,
float *grad_out, float * grad_lookup_table, float * grad_query_features) {
// dim3 blocks(DIVUP(total_query_num * local_size, THREADS_PER_BLOCK), nhead, hdim);
// params query_batch_cnt: [b]
// params relpos: [total_query_num, local_size]
// params lookup_table: [l, nhead, hdim]
// params query_features: [total_query_num, nhead, hdim]
// params grad_out: [total_query_num, local_size, nhead]
// params grad_lookup_table: [l, nhead, hdim]
// params grad_query_features: [total_query_num, nhead, hdim]
int index = blockIdx.x * blockDim.x + threadIdx.x;
int head_idx = blockIdx.y;
int hdim_idx = blockIdx.z;
if (index >= total_query_num * local_size ||
head_idx >= nhead ||
hdim_idx >= hdim) return;
// 1. Obtain query features.
int query_idx = index / local_size;
query_features += query_idx * nhead * hdim + head_idx * hdim + hdim_idx;
grad_query_features += query_idx * nhead * hdim + head_idx * hdim + hdim_idx;
// 2. Obtain quantize relative position.
relpos += index;
int quantize_relpos = min(max(int(floor(relpos[0])), 0), l - 1);
lookup_table += quantize_relpos * nhead * hdim + head_idx * hdim + hdim_idx;
grad_lookup_table += quantize_relpos * nhead * hdim + head_idx * hdim + hdim_idx;
// 3. Obtain output position.
grad_out += index * nhead + head_idx;
atomicAdd(
grad_query_features,
grad_out[0] * lookup_table[0]);
atomicAdd(
grad_lookup_table,
grad_out[0] * query_features[0]);
}
void rpe_q_grad_launcher(
int b, int total_query_num, int local_size, int nhead, int hdim, int l,
const int *query_batch_cnt,
const float *relpos, const float* lookup_table, const float* query_features,
float *grad_out, float* grad_lookup_table, float* grad_query_features){
// params query_batch_cnt: [b]
// params relpos: [total_query_num, local_size]
// params lookup_table: [l, nhead, hdim]
// params query_features: [total_query_num, nhead, hdim]
// params grad_out: [total_query_num, local_size, nhead]
// params grad_lookup_table: [l, nhead, hdim]
// params grad_query_features: [total_query_num, nhead, hdim]
dim3 blocks(DIVUP(total_query_num * local_size, THREADS_PER_BLOCK), nhead, hdim);
dim3 threads(THREADS_PER_BLOCK);
rpe_q_backward<<<blocks, threads>>>(
b, total_query_num, local_size, nhead, hdim, l,
query_batch_cnt, relpos, lookup_table, query_features,
grad_out, grad_lookup_table, grad_query_features);
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | /*
Transformer function helper function.
Written by tomztyang,
2021/08/23
*/
#include <hip/hip_runtime.h>
#include <math.h>
#include <stdio.h>
#define THREADS_PER_BLOCK 256
#define DIVUP(m,n) ((m) / (n) + ((m) % (n) > 0))
// #define DEBUG
__global__ void rpe_q_forward(
int b, int total_query_num, int local_size, int nhead, int hdim, int l,
const int *query_batch_cnt,
const float *relpos, const float* lookup_table, const float* query_features,
float *output) {
// dim3 blocks(DIVUP(total_query_num * local_size, THREADS_PER_BLOCK), nhead, hdim);
// params query_batch_cnt: [b]
// params relpos: [total_query_num, local_size]
// params lookup_table: [l, nhead, hdim]
// params query_features: [total_query_num, nhead, hdim]
// params output: [total_query_num, local_size, nhead]
int index = blockIdx.x * blockDim.x + threadIdx.x;
int head_idx = blockIdx.y;
int hdim_idx = blockIdx.z;
if (index >= total_query_num * local_size ||
head_idx >= nhead ||
hdim_idx >= hdim) return;
// 1. Obtain query features.
int query_idx = index / local_size;
query_features += query_idx * nhead * hdim + head_idx * hdim + hdim_idx;
// 2. Obtain quantize relative position.
relpos += index;
int quantize_relpos = min(max(int(floor(relpos[0])), 0), l - 1);
lookup_table += quantize_relpos * nhead * hdim + head_idx * hdim + hdim_idx;
// 3. Obtain output position.
output += index * nhead + head_idx;
atomicAdd(
output,
query_features[0] * lookup_table[0]);
}
void rpe_q_launcher(
int b, int total_query_num, int local_size, int nhead, int hdim, int l,
const int *query_batch_cnt,
const float *relpos, const float* lookup_table, const float* query_features,
float *output){
// params query_batch_cnt: [b]
// params relpos: [total_query_num, local_size]
// params lookup_table: [l, nhead, hdim]
// params query_features: [total_query_num, nhead, hdim]
// params output: [total_query_num, local_size, nhead]
dim3 blocks(DIVUP(total_query_num * local_size, THREADS_PER_BLOCK), nhead, hdim);
dim3 threads(THREADS_PER_BLOCK);
rpe_q_forward<<<blocks, threads>>>(
b, total_query_num, local_size, nhead, hdim, l,
query_batch_cnt,
relpos, lookup_table, query_features,
output);
}
__global__ void rpe_q_backward(
int b, int total_query_num, int local_size, int nhead, int hdim, int l,
const int *query_batch_cnt,
const float *relpos, const float* lookup_table, const float* query_features,
float *grad_out, float * grad_lookup_table, float * grad_query_features) {
// dim3 blocks(DIVUP(total_query_num * local_size, THREADS_PER_BLOCK), nhead, hdim);
// params query_batch_cnt: [b]
// params relpos: [total_query_num, local_size]
// params lookup_table: [l, nhead, hdim]
// params query_features: [total_query_num, nhead, hdim]
// params grad_out: [total_query_num, local_size, nhead]
// params grad_lookup_table: [l, nhead, hdim]
// params grad_query_features: [total_query_num, nhead, hdim]
int index = blockIdx.x * blockDim.x + threadIdx.x;
int head_idx = blockIdx.y;
int hdim_idx = blockIdx.z;
if (index >= total_query_num * local_size ||
head_idx >= nhead ||
hdim_idx >= hdim) return;
// 1. Obtain query features.
int query_idx = index / local_size;
query_features += query_idx * nhead * hdim + head_idx * hdim + hdim_idx;
grad_query_features += query_idx * nhead * hdim + head_idx * hdim + hdim_idx;
// 2. Obtain quantize relative position.
relpos += index;
int quantize_relpos = min(max(int(floor(relpos[0])), 0), l - 1);
lookup_table += quantize_relpos * nhead * hdim + head_idx * hdim + hdim_idx;
grad_lookup_table += quantize_relpos * nhead * hdim + head_idx * hdim + hdim_idx;
// 3. Obtain output position.
grad_out += index * nhead + head_idx;
atomicAdd(
grad_query_features,
grad_out[0] * lookup_table[0]);
atomicAdd(
grad_lookup_table,
grad_out[0] * query_features[0]);
}
void rpe_q_grad_launcher(
int b, int total_query_num, int local_size, int nhead, int hdim, int l,
const int *query_batch_cnt,
const float *relpos, const float* lookup_table, const float* query_features,
float *grad_out, float* grad_lookup_table, float* grad_query_features){
// params query_batch_cnt: [b]
// params relpos: [total_query_num, local_size]
// params lookup_table: [l, nhead, hdim]
// params query_features: [total_query_num, nhead, hdim]
// params grad_out: [total_query_num, local_size, nhead]
// params grad_lookup_table: [l, nhead, hdim]
// params grad_query_features: [total_query_num, nhead, hdim]
dim3 blocks(DIVUP(total_query_num * local_size, THREADS_PER_BLOCK), nhead, hdim);
dim3 threads(THREADS_PER_BLOCK);
rpe_q_backward<<<blocks, threads>>>(
b, total_query_num, local_size, nhead, hdim, l,
query_batch_cnt, relpos, lookup_table, query_features,
grad_out, grad_lookup_table, grad_query_features);
} | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z13rpe_q_forwardiiiiiiPKiPKfS2_S2_Pf
.globl _Z13rpe_q_forwardiiiiiiPKiPKfS2_S2_Pf
.p2align 8
.type _Z13rpe_q_forwardiiiiiiPKiPKfS2_S2_Pf,@function
_Z13rpe_q_forwardiiiiiiPKiPKfS2_S2_Pf:
s_clause 0x1
s_load_b32 s3, s[0:1], 0x4c
s_load_b128 s[16:19], s[0:1], 0x4
s_waitcnt lgkmcnt(0)
s_and_b32 s3, s3, 0xffff
s_cmp_lt_i32 s14, s18
v_mad_u64_u32 v[1:2], null, s13, s3, v[0:1]
s_mul_i32 s3, s17, s16
s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1)
v_cmp_gt_i32_e32 vcc_lo, s3, v1
s_cselect_b32 s3, -1, 0
s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_2) | instid1(SALU_CYCLE_1)
s_and_b32 s3, s3, vcc_lo
s_cmp_lt_i32 s15, s19
s_cselect_b32 s4, -1, 0
s_and_b32 s3, s4, s3
s_delay_alu instid0(SALU_CYCLE_1)
s_and_saveexec_b32 s4, s3
s_cbranch_execz .LBB0_3
s_load_b256 s[4:11], s[0:1], 0x20
v_ashrrev_i32_e32 v2, 31, v1
s_ashr_i32 s3, s17, 31
v_ashrrev_i32_e32 v4, 31, v1
s_load_b32 s0, s[0:1], 0x14
s_mov_b32 s2, s15
v_lshlrev_b64 v[2:3], 2, v[1:2]
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
v_add_nc_u32_e32 v5, v1, v4
v_xor_b32_e32 v5, v5, v4
v_xor_b32_e32 v4, s3, v4
s_waitcnt lgkmcnt(0)
s_delay_alu instid0(VALU_DEP_4)
v_add_co_u32 v2, vcc_lo, s4, v2
v_add_co_ci_u32_e32 v3, vcc_lo, s5, v3, vcc_lo
s_add_i32 s4, s17, s3
s_add_i32 s0, s0, -1
s_xor_b32 s4, s4, s3
global_load_b32 v0, v[2:3], off
v_cvt_f32_u32_e32 v2, s4
s_sub_i32 s5, 0, s4
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_1)
v_rcp_iflag_f32_e32 v2, v2
s_waitcnt_depctr 0xfff
v_mul_f32_e32 v2, 0x4f7ffffe, v2
v_cvt_u32_f32_e32 v2, v2
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mul_lo_u32 v3, s5, v2
v_mul_hi_u32 v3, v2, v3
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_add_nc_u32_e32 v2, v2, v3
v_mul_hi_u32 v2, v5, v2
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mul_lo_u32 v3, v2, s4
v_sub_nc_u32_e32 v3, v5, v3
v_add_nc_u32_e32 v5, 1, v2
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2)
v_subrev_nc_u32_e32 v6, s4, v3
v_cmp_le_u32_e32 vcc_lo, s4, v3
v_dual_cndmask_b32 v2, v2, v5 :: v_dual_cndmask_b32 v3, v3, v6
v_mad_u64_u32 v[6:7], null, v1, s18, s[14:15]
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_3)
v_add_nc_u32_e32 v5, 1, v2
v_cmp_le_u32_e32 vcc_lo, s4, v3
s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
v_ashrrev_i32_e32 v7, 31, v6
v_cndmask_b32_e32 v2, v2, v5, vcc_lo
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_xor_b32_e32 v2, v2, v4
v_sub_nc_u32_e32 v4, v2, v4
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_1)
v_mad_u64_u32 v[2:3], null, v4, s18, s[14:15]
s_waitcnt vmcnt(0)
v_floor_f32_e32 v0, v0
v_cvt_i32_f32_e32 v0, v0
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_maxmin_i32 v0, v0, 0, s0
s_mov_b32 s0, 0
v_mad_u64_u32 v[3:4], null, v0, s18, s[14:15]
v_mad_u64_u32 v[4:5], null, v2, s19, s[2:3]
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_mad_u64_u32 v[0:1], null, v3, s19, s[2:3]
v_ashrrev_i32_e32 v5, 31, v4
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3)
v_lshlrev_b64 v[2:3], 2, v[4:5]
v_ashrrev_i32_e32 v1, 31, v0
v_lshlrev_b64 v[4:5], 2, v[6:7]
s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
v_add_co_u32 v2, vcc_lo, s8, v2
v_lshlrev_b64 v[0:1], 2, v[0:1]
v_add_co_ci_u32_e32 v3, vcc_lo, s9, v3, vcc_lo
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_3)
v_add_co_u32 v6, vcc_lo, s6, v0
v_add_co_ci_u32_e32 v7, vcc_lo, s7, v1, vcc_lo
v_add_co_u32 v0, vcc_lo, s10, v4
v_add_co_ci_u32_e32 v1, vcc_lo, s11, v5, vcc_lo
global_load_b32 v2, v[2:3], off
global_load_b32 v4, v[6:7], off
global_load_b32 v3, v[0:1], off
s_waitcnt vmcnt(1)
v_mul_f32_e32 v4, v2, v4
.LBB0_2:
s_waitcnt vmcnt(0)
s_delay_alu instid0(VALU_DEP_1)
v_add_f32_e32 v2, v3, v4
global_atomic_cmpswap_b32 v2, v[0:1], v[2:3], off glc
s_waitcnt vmcnt(0)
v_cmp_eq_u32_e32 vcc_lo, v2, v3
v_mov_b32_e32 v3, v2
s_or_b32 s0, vcc_lo, s0
s_delay_alu instid0(SALU_CYCLE_1)
s_and_not1_b32 exec_lo, exec_lo, s0
s_cbranch_execnz .LBB0_2
.LBB0_3:
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z13rpe_q_forwardiiiiiiPKiPKfS2_S2_Pf
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 320
.amdhsa_user_sgpr_count 13
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 1
.amdhsa_system_sgpr_workgroup_id_z 1
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 8
.amdhsa_next_free_sgpr 20
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z13rpe_q_forwardiiiiiiPKiPKfS2_S2_Pf, .Lfunc_end0-_Z13rpe_q_forwardiiiiiiPKiPKfS2_S2_Pf
.section .AMDGPU.csdata,"",@progbits
.text
.protected _Z14rpe_q_backwardiiiiiiPKiPKfS2_S2_PfS3_S3_
.globl _Z14rpe_q_backwardiiiiiiPKiPKfS2_S2_PfS3_S3_
.p2align 8
.type _Z14rpe_q_backwardiiiiiiPKiPKfS2_S2_PfS3_S3_,@function
_Z14rpe_q_backwardiiiiiiPKiPKfS2_S2_PfS3_S3_:
s_clause 0x1
s_load_b32 s3, s[0:1], 0x5c
s_load_b128 s[4:7], s[0:1], 0x4
s_waitcnt lgkmcnt(0)
s_and_b32 s3, s3, 0xffff
s_cmp_lt_i32 s14, s6
v_mad_u64_u32 v[1:2], null, s13, s3, v[0:1]
s_mul_i32 s3, s5, s4
s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1)
v_cmp_gt_i32_e32 vcc_lo, s3, v1
s_cselect_b32 s3, -1, 0
s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_2) | instid1(SALU_CYCLE_1)
s_and_b32 s3, s3, vcc_lo
s_cmp_lt_i32 s15, s7
s_cselect_b32 s4, -1, 0
s_and_b32 s3, s4, s3
s_delay_alu instid0(SALU_CYCLE_1)
s_and_saveexec_b32 s4, s3
s_cbranch_execz .LBB1_5
s_load_b128 s[8:11], s[0:1], 0x20
v_ashrrev_i32_e32 v2, 31, v1
s_ashr_i32 s3, s5, 31
v_ashrrev_i32_e32 v4, 31, v1
s_add_i32 s4, s5, s3
s_mov_b32 s2, s15
v_lshlrev_b64 v[2:3], 2, v[1:2]
s_xor_b32 s4, s4, s3
v_add_nc_u32_e32 v5, v1, v4
s_sub_i32 s5, 0, s4
s_delay_alu instid0(VALU_DEP_1)
v_xor_b32_e32 v5, v5, v4
v_xor_b32_e32 v4, s3, v4
s_waitcnt lgkmcnt(0)
v_add_co_u32 v2, vcc_lo, s8, v2
v_add_co_ci_u32_e32 v3, vcc_lo, s9, v3, vcc_lo
s_load_b64 s[8:9], s[0:1], 0x48
global_load_b32 v0, v[2:3], off
v_cvt_f32_u32_e32 v2, s4
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_1)
v_rcp_iflag_f32_e32 v2, v2
s_waitcnt_depctr 0xfff
v_mul_f32_e32 v2, 0x4f7ffffe, v2
v_cvt_u32_f32_e32 v2, v2
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_mul_lo_u32 v3, s5, v2
s_load_b32 s5, s[0:1], 0x14
v_mul_hi_u32 v3, v2, v3
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_1)
v_add_nc_u32_e32 v2, v2, v3
s_waitcnt lgkmcnt(0)
s_add_i32 s5, s5, -1
v_mul_hi_u32 v2, v5, v2
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mul_lo_u32 v3, v2, s4
v_sub_nc_u32_e32 v3, v5, v3
v_add_nc_u32_e32 v5, 1, v2
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2)
v_subrev_nc_u32_e32 v6, s4, v3
v_cmp_le_u32_e32 vcc_lo, s4, v3
v_dual_cndmask_b32 v3, v3, v6 :: v_dual_cndmask_b32 v2, v2, v5
v_mad_u64_u32 v[6:7], null, v1, s6, s[14:15]
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_3)
v_cmp_le_u32_e32 vcc_lo, s4, v3
v_add_nc_u32_e32 v5, 1, v2
s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
v_ashrrev_i32_e32 v7, 31, v6
v_cndmask_b32_e32 v2, v2, v5, vcc_lo
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_xor_b32_e32 v2, v2, v4
v_sub_nc_u32_e32 v5, v2, v4
s_waitcnt vmcnt(0)
v_floor_f32_e32 v0, v0
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_cvt_i32_f32_e32 v0, v0
v_maxmin_i32 v0, v0, 0, s5
s_load_b64 s[4:5], s[0:1], 0x38
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
v_mad_u64_u32 v[2:3], null, v0, s6, s[14:15]
v_mad_u64_u32 v[3:4], null, v5, s6, s[14:15]
v_mad_u64_u32 v[0:1], null, v2, s7, s[2:3]
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_3)
v_mad_u64_u32 v[4:5], null, v3, s7, s[2:3]
v_lshlrev_b64 v[2:3], 2, v[6:7]
s_mov_b32 s2, 0
v_ashrrev_i32_e32 v1, 31, v0
s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_3)
v_ashrrev_i32_e32 v5, 31, v4
s_waitcnt lgkmcnt(0)
v_add_co_u32 v2, vcc_lo, s4, v2
s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_2) | instid1(VALU_DEP_3)
v_lshlrev_b64 v[6:7], 2, v[0:1]
v_add_co_ci_u32_e32 v3, vcc_lo, s5, v3, vcc_lo
v_lshlrev_b64 v[8:9], 2, v[4:5]
v_add_co_u32 v10, vcc_lo, s10, v6
s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_3)
v_add_co_ci_u32_e32 v11, vcc_lo, s11, v7, vcc_lo
v_add_co_u32 v6, vcc_lo, s8, v8
s_delay_alu instid0(VALU_DEP_4)
v_add_co_ci_u32_e32 v7, vcc_lo, s9, v9, vcc_lo
global_load_b32 v8, v[2:3], off
global_load_b32 v10, v[10:11], off
global_load_b32 v9, v[6:7], off
s_waitcnt vmcnt(1)
v_mul_f32_e32 v10, v8, v10
.LBB1_2:
s_waitcnt vmcnt(0)
s_delay_alu instid0(VALU_DEP_1)
v_add_f32_e32 v8, v9, v10
global_atomic_cmpswap_b32 v8, v[6:7], v[8:9], off glc
s_waitcnt vmcnt(0)
v_cmp_eq_u32_e32 vcc_lo, v8, v9
v_mov_b32_e32 v9, v8
s_or_b32 s2, vcc_lo, s2
s_delay_alu instid0(SALU_CYCLE_1)
s_and_not1_b32 exec_lo, exec_lo, s2
s_cbranch_execnz .LBB1_2
s_or_b32 exec_lo, exec_lo, s2
s_clause 0x1
s_load_b64 s[2:3], s[0:1], 0x30
s_load_b64 s[0:1], s[0:1], 0x40
v_lshlrev_b64 v[4:5], 2, v[4:5]
v_lshlrev_b64 v[0:1], 2, v[0:1]
s_waitcnt lgkmcnt(0)
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_3)
v_add_co_u32 v4, vcc_lo, s2, v4
v_add_co_ci_u32_e32 v5, vcc_lo, s3, v5, vcc_lo
s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_4)
v_add_co_u32 v0, vcc_lo, s0, v0
v_add_co_ci_u32_e32 v1, vcc_lo, s1, v1, vcc_lo
global_load_b32 v2, v[2:3], off
global_load_b32 v4, v[4:5], off
global_load_b32 v3, v[0:1], off
s_mov_b32 s0, 0
s_waitcnt vmcnt(1)
v_mul_f32_e32 v4, v2, v4
.LBB1_4:
s_waitcnt vmcnt(0)
s_delay_alu instid0(VALU_DEP_1)
v_add_f32_e32 v2, v3, v4
global_atomic_cmpswap_b32 v2, v[0:1], v[2:3], off glc
s_waitcnt vmcnt(0)
v_cmp_eq_u32_e32 vcc_lo, v2, v3
v_mov_b32_e32 v3, v2
s_or_b32 s0, vcc_lo, s0
s_delay_alu instid0(SALU_CYCLE_1)
s_and_not1_b32 exec_lo, exec_lo, s0
s_cbranch_execnz .LBB1_4
.LBB1_5:
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z14rpe_q_backwardiiiiiiPKiPKfS2_S2_PfS3_S3_
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 336
.amdhsa_user_sgpr_count 13
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 1
.amdhsa_system_sgpr_workgroup_id_z 1
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 12
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end1:
.size _Z14rpe_q_backwardiiiiiiPKiPKfS2_S2_PfS3_S3_, .Lfunc_end1-_Z14rpe_q_backwardiiiiiiPKiPKfS2_S2_PfS3_S3_
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .offset: 0
.size: 4
.value_kind: by_value
- .offset: 4
.size: 4
.value_kind: by_value
- .offset: 8
.size: 4
.value_kind: by_value
- .offset: 12
.size: 4
.value_kind: by_value
- .offset: 16
.size: 4
.value_kind: by_value
- .offset: 20
.size: 4
.value_kind: by_value
- .address_space: global
.offset: 24
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 32
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 40
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 48
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 56
.size: 8
.value_kind: global_buffer
- .offset: 64
.size: 4
.value_kind: hidden_block_count_x
- .offset: 68
.size: 4
.value_kind: hidden_block_count_y
- .offset: 72
.size: 4
.value_kind: hidden_block_count_z
- .offset: 76
.size: 2
.value_kind: hidden_group_size_x
- .offset: 78
.size: 2
.value_kind: hidden_group_size_y
- .offset: 80
.size: 2
.value_kind: hidden_group_size_z
- .offset: 82
.size: 2
.value_kind: hidden_remainder_x
- .offset: 84
.size: 2
.value_kind: hidden_remainder_y
- .offset: 86
.size: 2
.value_kind: hidden_remainder_z
- .offset: 104
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 112
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 120
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 128
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 320
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z13rpe_q_forwardiiiiiiPKiPKfS2_S2_Pf
.private_segment_fixed_size: 0
.sgpr_count: 22
.sgpr_spill_count: 0
.symbol: _Z13rpe_q_forwardiiiiiiPKiPKfS2_S2_Pf.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 8
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
- .args:
- .offset: 0
.size: 4
.value_kind: by_value
- .offset: 4
.size: 4
.value_kind: by_value
- .offset: 8
.size: 4
.value_kind: by_value
- .offset: 12
.size: 4
.value_kind: by_value
- .offset: 16
.size: 4
.value_kind: by_value
- .offset: 20
.size: 4
.value_kind: by_value
- .address_space: global
.offset: 24
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 32
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 40
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 48
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 56
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 64
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 72
.size: 8
.value_kind: global_buffer
- .offset: 80
.size: 4
.value_kind: hidden_block_count_x
- .offset: 84
.size: 4
.value_kind: hidden_block_count_y
- .offset: 88
.size: 4
.value_kind: hidden_block_count_z
- .offset: 92
.size: 2
.value_kind: hidden_group_size_x
- .offset: 94
.size: 2
.value_kind: hidden_group_size_y
- .offset: 96
.size: 2
.value_kind: hidden_group_size_z
- .offset: 98
.size: 2
.value_kind: hidden_remainder_x
- .offset: 100
.size: 2
.value_kind: hidden_remainder_y
- .offset: 102
.size: 2
.value_kind: hidden_remainder_z
- .offset: 120
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 128
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 136
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 144
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 336
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z14rpe_q_backwardiiiiiiPKiPKfS2_S2_PfS3_S3_
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z14rpe_q_backwardiiiiiiPKiPKfS2_S2_PfS3_S3_.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 12
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | /*
Transformer function helper function.
Written by tomztyang,
2021/08/23
*/
#include <hip/hip_runtime.h>
#include <math.h>
#include <stdio.h>
#define THREADS_PER_BLOCK 256
#define DIVUP(m,n) ((m) / (n) + ((m) % (n) > 0))
// #define DEBUG
__global__ void rpe_q_forward(
int b, int total_query_num, int local_size, int nhead, int hdim, int l,
const int *query_batch_cnt,
const float *relpos, const float* lookup_table, const float* query_features,
float *output) {
// dim3 blocks(DIVUP(total_query_num * local_size, THREADS_PER_BLOCK), nhead, hdim);
// params query_batch_cnt: [b]
// params relpos: [total_query_num, local_size]
// params lookup_table: [l, nhead, hdim]
// params query_features: [total_query_num, nhead, hdim]
// params output: [total_query_num, local_size, nhead]
int index = blockIdx.x * blockDim.x + threadIdx.x;
int head_idx = blockIdx.y;
int hdim_idx = blockIdx.z;
if (index >= total_query_num * local_size ||
head_idx >= nhead ||
hdim_idx >= hdim) return;
// 1. Obtain query features.
int query_idx = index / local_size;
query_features += query_idx * nhead * hdim + head_idx * hdim + hdim_idx;
// 2. Obtain quantize relative position.
relpos += index;
int quantize_relpos = min(max(int(floor(relpos[0])), 0), l - 1);
lookup_table += quantize_relpos * nhead * hdim + head_idx * hdim + hdim_idx;
// 3. Obtain output position.
output += index * nhead + head_idx;
atomicAdd(
output,
query_features[0] * lookup_table[0]);
}
void rpe_q_launcher(
int b, int total_query_num, int local_size, int nhead, int hdim, int l,
const int *query_batch_cnt,
const float *relpos, const float* lookup_table, const float* query_features,
float *output){
// params query_batch_cnt: [b]
// params relpos: [total_query_num, local_size]
// params lookup_table: [l, nhead, hdim]
// params query_features: [total_query_num, nhead, hdim]
// params output: [total_query_num, local_size, nhead]
dim3 blocks(DIVUP(total_query_num * local_size, THREADS_PER_BLOCK), nhead, hdim);
dim3 threads(THREADS_PER_BLOCK);
rpe_q_forward<<<blocks, threads>>>(
b, total_query_num, local_size, nhead, hdim, l,
query_batch_cnt,
relpos, lookup_table, query_features,
output);
}
__global__ void rpe_q_backward(
int b, int total_query_num, int local_size, int nhead, int hdim, int l,
const int *query_batch_cnt,
const float *relpos, const float* lookup_table, const float* query_features,
float *grad_out, float * grad_lookup_table, float * grad_query_features) {
// dim3 blocks(DIVUP(total_query_num * local_size, THREADS_PER_BLOCK), nhead, hdim);
// params query_batch_cnt: [b]
// params relpos: [total_query_num, local_size]
// params lookup_table: [l, nhead, hdim]
// params query_features: [total_query_num, nhead, hdim]
// params grad_out: [total_query_num, local_size, nhead]
// params grad_lookup_table: [l, nhead, hdim]
// params grad_query_features: [total_query_num, nhead, hdim]
int index = blockIdx.x * blockDim.x + threadIdx.x;
int head_idx = blockIdx.y;
int hdim_idx = blockIdx.z;
if (index >= total_query_num * local_size ||
head_idx >= nhead ||
hdim_idx >= hdim) return;
// 1. Obtain query features.
int query_idx = index / local_size;
query_features += query_idx * nhead * hdim + head_idx * hdim + hdim_idx;
grad_query_features += query_idx * nhead * hdim + head_idx * hdim + hdim_idx;
// 2. Obtain quantize relative position.
relpos += index;
int quantize_relpos = min(max(int(floor(relpos[0])), 0), l - 1);
lookup_table += quantize_relpos * nhead * hdim + head_idx * hdim + hdim_idx;
grad_lookup_table += quantize_relpos * nhead * hdim + head_idx * hdim + hdim_idx;
// 3. Obtain output position.
grad_out += index * nhead + head_idx;
atomicAdd(
grad_query_features,
grad_out[0] * lookup_table[0]);
atomicAdd(
grad_lookup_table,
grad_out[0] * query_features[0]);
}
void rpe_q_grad_launcher(
int b, int total_query_num, int local_size, int nhead, int hdim, int l,
const int *query_batch_cnt,
const float *relpos, const float* lookup_table, const float* query_features,
float *grad_out, float* grad_lookup_table, float* grad_query_features){
// params query_batch_cnt: [b]
// params relpos: [total_query_num, local_size]
// params lookup_table: [l, nhead, hdim]
// params query_features: [total_query_num, nhead, hdim]
// params grad_out: [total_query_num, local_size, nhead]
// params grad_lookup_table: [l, nhead, hdim]
// params grad_query_features: [total_query_num, nhead, hdim]
dim3 blocks(DIVUP(total_query_num * local_size, THREADS_PER_BLOCK), nhead, hdim);
dim3 threads(THREADS_PER_BLOCK);
rpe_q_backward<<<blocks, threads>>>(
b, total_query_num, local_size, nhead, hdim, l,
query_batch_cnt, relpos, lookup_table, query_features,
grad_out, grad_lookup_table, grad_query_features);
} | .text
.file "rpe_q_kernel.hip"
.globl _Z28__device_stub__rpe_q_forwardiiiiiiPKiPKfS2_S2_Pf # -- Begin function _Z28__device_stub__rpe_q_forwardiiiiiiPKiPKfS2_S2_Pf
.p2align 4, 0x90
.type _Z28__device_stub__rpe_q_forwardiiiiiiPKiPKfS2_S2_Pf,@function
_Z28__device_stub__rpe_q_forwardiiiiiiPKiPKfS2_S2_Pf: # @_Z28__device_stub__rpe_q_forwardiiiiiiPKiPKfS2_S2_Pf
.cfi_startproc
# %bb.0:
subq $168, %rsp
.cfi_def_cfa_offset 176
movl %edi, 28(%rsp)
movl %esi, 24(%rsp)
movl %edx, 20(%rsp)
movl %ecx, 16(%rsp)
movl %r8d, 12(%rsp)
movl %r9d, 8(%rsp)
leaq 28(%rsp), %rax
movq %rax, 80(%rsp)
leaq 24(%rsp), %rax
movq %rax, 88(%rsp)
leaq 20(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 12(%rsp), %rax
movq %rax, 112(%rsp)
leaq 8(%rsp), %rax
movq %rax, 120(%rsp)
leaq 176(%rsp), %rax
movq %rax, 128(%rsp)
leaq 184(%rsp), %rax
movq %rax, 136(%rsp)
leaq 192(%rsp), %rax
movq %rax, 144(%rsp)
leaq 200(%rsp), %rax
movq %rax, 152(%rsp)
leaq 208(%rsp), %rax
movq %rax, 160(%rsp)
leaq 64(%rsp), %rdi
leaq 48(%rsp), %rsi
leaq 40(%rsp), %rdx
leaq 32(%rsp), %rcx
callq __hipPopCallConfiguration
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
movq 48(%rsp), %rcx
movl 56(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z13rpe_q_forwardiiiiiiPKiPKfS2_S2_Pf, %edi
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
pushq 48(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $184, %rsp
.cfi_adjust_cfa_offset -184
retq
.Lfunc_end0:
.size _Z28__device_stub__rpe_q_forwardiiiiiiPKiPKfS2_S2_Pf, .Lfunc_end0-_Z28__device_stub__rpe_q_forwardiiiiiiPKiPKfS2_S2_Pf
.cfi_endproc
# -- End function
.globl _Z14rpe_q_launcheriiiiiiPKiPKfS2_S2_Pf # -- Begin function _Z14rpe_q_launcheriiiiiiPKiPKfS2_S2_Pf
.p2align 4, 0x90
.type _Z14rpe_q_launcheriiiiiiPKiPKfS2_S2_Pf,@function
_Z14rpe_q_launcheriiiiiiPKiPKfS2_S2_Pf: # @_Z14rpe_q_launcheriiiiiiPKiPKfS2_S2_Pf
.cfi_startproc
# %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
pushq %r15
.cfi_def_cfa_offset 24
pushq %r14
.cfi_def_cfa_offset 32
pushq %r13
.cfi_def_cfa_offset 40
pushq %r12
.cfi_def_cfa_offset 48
pushq %rbx
.cfi_def_cfa_offset 56
subq $200, %rsp
.cfi_def_cfa_offset 256
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
.cfi_offset %rbp, -16
movl %r9d, %ebx
movl %r8d, %ebp
movl %ecx, %r14d
movl %edx, %r15d
movl %esi, %r12d
movl %edi, %r13d
movl %edx, %eax
imull %esi, %eax
leal 255(%rax), %ecx
testl %eax, %eax
cmovnsl %eax, %ecx
sarl $8, %ecx
xorl %edx, %edx
testl $-2147483393, %eax # imm = 0x800000FF
setg %dl
addl %ecx, %edx
movq %r14, %rdi
shlq $32, %rdi
orq %rdx, %rdi
movabsq $4294967552, %rdx # imm = 0x100000100
movl %r8d, %esi
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB1_2
# %bb.1:
movq 288(%rsp), %rax
movq 280(%rsp), %rcx
movq 272(%rsp), %rdx
movq 264(%rsp), %rsi
movq 256(%rsp), %rdi
movl %r13d, 20(%rsp)
movl %r12d, 16(%rsp)
movl %r15d, 12(%rsp)
movl %r14d, 8(%rsp)
movl %ebp, 4(%rsp)
movl %ebx, (%rsp)
movq %rdi, 104(%rsp)
movq %rsi, 96(%rsp)
movq %rdx, 88(%rsp)
movq %rcx, 80(%rsp)
movq %rax, 72(%rsp)
leaq 20(%rsp), %rax
movq %rax, 112(%rsp)
leaq 16(%rsp), %rax
movq %rax, 120(%rsp)
leaq 12(%rsp), %rax
movq %rax, 128(%rsp)
leaq 8(%rsp), %rax
movq %rax, 136(%rsp)
leaq 4(%rsp), %rax
movq %rax, 144(%rsp)
movq %rsp, %rax
movq %rax, 152(%rsp)
leaq 104(%rsp), %rax
movq %rax, 160(%rsp)
leaq 96(%rsp), %rax
movq %rax, 168(%rsp)
leaq 88(%rsp), %rax
movq %rax, 176(%rsp)
leaq 80(%rsp), %rax
movq %rax, 184(%rsp)
leaq 72(%rsp), %rax
movq %rax, 192(%rsp)
leaq 56(%rsp), %rdi
leaq 40(%rsp), %rsi
leaq 32(%rsp), %rdx
leaq 24(%rsp), %rcx
callq __hipPopCallConfiguration
movq 56(%rsp), %rsi
movl 64(%rsp), %edx
movq 40(%rsp), %rcx
movl 48(%rsp), %r8d
leaq 112(%rsp), %r9
movl $_Z13rpe_q_forwardiiiiiiPKiPKfS2_S2_Pf, %edi
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
pushq 40(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB1_2:
addq $200, %rsp
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %r12
.cfi_def_cfa_offset 40
popq %r13
.cfi_def_cfa_offset 32
popq %r14
.cfi_def_cfa_offset 24
popq %r15
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
retq
.Lfunc_end1:
.size _Z14rpe_q_launcheriiiiiiPKiPKfS2_S2_Pf, .Lfunc_end1-_Z14rpe_q_launcheriiiiiiPKiPKfS2_S2_Pf
.cfi_endproc
# -- End function
.globl _Z29__device_stub__rpe_q_backwardiiiiiiPKiPKfS2_S2_PfS3_S3_ # -- Begin function _Z29__device_stub__rpe_q_backwardiiiiiiPKiPKfS2_S2_PfS3_S3_
.p2align 4, 0x90
.type _Z29__device_stub__rpe_q_backwardiiiiiiPKiPKfS2_S2_PfS3_S3_,@function
_Z29__device_stub__rpe_q_backwardiiiiiiPKiPKfS2_S2_PfS3_S3_: # @_Z29__device_stub__rpe_q_backwardiiiiiiPKiPKfS2_S2_PfS3_S3_
.cfi_startproc
# %bb.0:
subq $184, %rsp
.cfi_def_cfa_offset 192
movl %edi, 28(%rsp)
movl %esi, 24(%rsp)
movl %edx, 20(%rsp)
movl %ecx, 16(%rsp)
movl %r8d, 12(%rsp)
movl %r9d, 8(%rsp)
leaq 28(%rsp), %rax
movq %rax, 80(%rsp)
leaq 24(%rsp), %rax
movq %rax, 88(%rsp)
leaq 20(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 12(%rsp), %rax
movq %rax, 112(%rsp)
leaq 8(%rsp), %rax
movq %rax, 120(%rsp)
leaq 192(%rsp), %rax
movq %rax, 128(%rsp)
leaq 200(%rsp), %rax
movq %rax, 136(%rsp)
leaq 208(%rsp), %rax
movq %rax, 144(%rsp)
leaq 216(%rsp), %rax
movq %rax, 152(%rsp)
leaq 224(%rsp), %rax
movq %rax, 160(%rsp)
leaq 232(%rsp), %rax
movq %rax, 168(%rsp)
leaq 240(%rsp), %rax
movq %rax, 176(%rsp)
leaq 64(%rsp), %rdi
leaq 48(%rsp), %rsi
leaq 40(%rsp), %rdx
leaq 32(%rsp), %rcx
callq __hipPopCallConfiguration
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
movq 48(%rsp), %rcx
movl 56(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z14rpe_q_backwardiiiiiiPKiPKfS2_S2_PfS3_S3_, %edi
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
pushq 48(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $200, %rsp
.cfi_adjust_cfa_offset -200
retq
.Lfunc_end2:
.size _Z29__device_stub__rpe_q_backwardiiiiiiPKiPKfS2_S2_PfS3_S3_, .Lfunc_end2-_Z29__device_stub__rpe_q_backwardiiiiiiPKiPKfS2_S2_PfS3_S3_
.cfi_endproc
# -- End function
.globl _Z19rpe_q_grad_launcheriiiiiiPKiPKfS2_S2_PfS3_S3_ # -- Begin function _Z19rpe_q_grad_launcheriiiiiiPKiPKfS2_S2_PfS3_S3_
.p2align 4, 0x90
.type _Z19rpe_q_grad_launcheriiiiiiPKiPKfS2_S2_PfS3_S3_,@function
_Z19rpe_q_grad_launcheriiiiiiPKiPKfS2_S2_PfS3_S3_: # @_Z19rpe_q_grad_launcheriiiiiiPKiPKfS2_S2_PfS3_S3_
.cfi_startproc
# %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
pushq %r15
.cfi_def_cfa_offset 24
pushq %r14
.cfi_def_cfa_offset 32
pushq %r13
.cfi_def_cfa_offset 40
pushq %r12
.cfi_def_cfa_offset 48
pushq %rbx
.cfi_def_cfa_offset 56
subq $232, %rsp
.cfi_def_cfa_offset 288
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
.cfi_offset %rbp, -16
movl %r9d, %ebx
movl %r8d, %ebp
movl %ecx, %r14d
movl %edx, %r15d
movl %esi, %r12d
movl %edi, %r13d
movl %edx, %eax
imull %esi, %eax
leal 255(%rax), %ecx
testl %eax, %eax
cmovnsl %eax, %ecx
sarl $8, %ecx
xorl %edx, %edx
testl $-2147483393, %eax # imm = 0x800000FF
setg %dl
addl %ecx, %edx
movq %r14, %rdi
shlq $32, %rdi
orq %rdx, %rdi
movabsq $4294967552, %rdx # imm = 0x100000100
movl %r8d, %esi
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB3_2
# %bb.1:
movq 336(%rsp), %rax
movq 328(%rsp), %rcx
movq 320(%rsp), %rdx
movq 312(%rsp), %rsi
movq 304(%rsp), %rdi
movq 296(%rsp), %r8
movq 288(%rsp), %r9
movl %r13d, 20(%rsp)
movl %r12d, 16(%rsp)
movl %r15d, 12(%rsp)
movl %r14d, 8(%rsp)
movl %ebp, 4(%rsp)
movl %ebx, (%rsp)
movq %r9, 120(%rsp)
movq %r8, 112(%rsp)
movq %rdi, 104(%rsp)
movq %rsi, 96(%rsp)
movq %rdx, 88(%rsp)
movq %rcx, 80(%rsp)
movq %rax, 72(%rsp)
leaq 20(%rsp), %rax
movq %rax, 128(%rsp)
leaq 16(%rsp), %rax
movq %rax, 136(%rsp)
leaq 12(%rsp), %rax
movq %rax, 144(%rsp)
leaq 8(%rsp), %rax
movq %rax, 152(%rsp)
leaq 4(%rsp), %rax
movq %rax, 160(%rsp)
movq %rsp, %rax
movq %rax, 168(%rsp)
leaq 120(%rsp), %rax
movq %rax, 176(%rsp)
leaq 112(%rsp), %rax
movq %rax, 184(%rsp)
leaq 104(%rsp), %rax
movq %rax, 192(%rsp)
leaq 96(%rsp), %rax
movq %rax, 200(%rsp)
leaq 88(%rsp), %rax
movq %rax, 208(%rsp)
leaq 80(%rsp), %rax
movq %rax, 216(%rsp)
leaq 72(%rsp), %rax
movq %rax, 224(%rsp)
leaq 56(%rsp), %rdi
leaq 40(%rsp), %rsi
leaq 32(%rsp), %rdx
leaq 24(%rsp), %rcx
callq __hipPopCallConfiguration
movq 56(%rsp), %rsi
movl 64(%rsp), %edx
movq 40(%rsp), %rcx
movl 48(%rsp), %r8d
leaq 128(%rsp), %r9
movl $_Z14rpe_q_backwardiiiiiiPKiPKfS2_S2_PfS3_S3_, %edi
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
pushq 40(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB3_2:
addq $232, %rsp
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %r12
.cfi_def_cfa_offset 40
popq %r13
.cfi_def_cfa_offset 32
popq %r14
.cfi_def_cfa_offset 24
popq %r15
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
retq
.Lfunc_end3:
.size _Z19rpe_q_grad_launcheriiiiiiPKiPKfS2_S2_PfS3_S3_, .Lfunc_end3-_Z19rpe_q_grad_launcheriiiiiiPKiPKfS2_S2_PfS3_S3_
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
pushq %rbx
.cfi_def_cfa_offset 16
subq $32, %rsp
.cfi_def_cfa_offset 48
.cfi_offset %rbx, -16
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB4_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB4_2:
movq __hip_gpubin_handle(%rip), %rbx
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z13rpe_q_forwardiiiiiiPKiPKfS2_S2_Pf, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z14rpe_q_backwardiiiiiiPKiPKfS2_S2_PfS3_S3_, %esi
movl $.L__unnamed_2, %edx
movl $.L__unnamed_2, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $32, %rsp
.cfi_def_cfa_offset 16
popq %rbx
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end4:
.size __hip_module_ctor, .Lfunc_end4-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB5_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB5_2:
retq
.Lfunc_end5:
.size __hip_module_dtor, .Lfunc_end5-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z13rpe_q_forwardiiiiiiPKiPKfS2_S2_Pf,@object # @_Z13rpe_q_forwardiiiiiiPKiPKfS2_S2_Pf
.section .rodata,"a",@progbits
.globl _Z13rpe_q_forwardiiiiiiPKiPKfS2_S2_Pf
.p2align 3, 0x0
_Z13rpe_q_forwardiiiiiiPKiPKfS2_S2_Pf:
.quad _Z28__device_stub__rpe_q_forwardiiiiiiPKiPKfS2_S2_Pf
.size _Z13rpe_q_forwardiiiiiiPKiPKfS2_S2_Pf, 8
.type _Z14rpe_q_backwardiiiiiiPKiPKfS2_S2_PfS3_S3_,@object # @_Z14rpe_q_backwardiiiiiiPKiPKfS2_S2_PfS3_S3_
.globl _Z14rpe_q_backwardiiiiiiPKiPKfS2_S2_PfS3_S3_
.p2align 3, 0x0
_Z14rpe_q_backwardiiiiiiPKiPKfS2_S2_PfS3_S3_:
.quad _Z29__device_stub__rpe_q_backwardiiiiiiPKiPKfS2_S2_PfS3_S3_
.size _Z14rpe_q_backwardiiiiiiPKiPKfS2_S2_PfS3_S3_, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "_Z13rpe_q_forwardiiiiiiPKiPKfS2_S2_Pf"
.size .L__unnamed_1, 38
.type .L__unnamed_2,@object # @1
.L__unnamed_2:
.asciz "_Z14rpe_q_backwardiiiiiiPKiPKfS2_S2_PfS3_S3_"
.size .L__unnamed_2, 45
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z28__device_stub__rpe_q_forwardiiiiiiPKiPKfS2_S2_Pf
.addrsig_sym _Z29__device_stub__rpe_q_backwardiiiiiiPKiPKfS2_S2_PfS3_S3_
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z13rpe_q_forwardiiiiiiPKiPKfS2_S2_Pf
.addrsig_sym _Z14rpe_q_backwardiiiiiiPKiPKfS2_S2_PfS3_S3_
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly. | code for sm_80
Function : _Z14rpe_q_backwardiiiiiiPKiPKfS2_S2_PfS3_S3_
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ S2R R7, SR_CTAID.X ; /* 0x0000000000077919 */
/* 0x000e220000002500 */
/*0020*/ ULDC UR4, c[0x0][0x168] ; /* 0x00005a0000047ab9 */
/* 0x000fe40000000800 */
/*0030*/ ULDC UR5, c[0x0][0x164] ; /* 0x0000590000057ab9 */
/* 0x000fe20000000800 */
/*0040*/ S2R R0, SR_TID.X ; /* 0x0000000000007919 */
/* 0x000e220000002100 */
/*0050*/ UIMAD UR4, UR4, UR5, URZ ; /* 0x00000005040472a4 */
/* 0x000fc6000f8e023f */
/*0060*/ S2R R6, SR_CTAID.Y ; /* 0x0000000000067919 */
/* 0x000e680000002600 */
/*0070*/ S2R R4, SR_CTAID.Z ; /* 0x0000000000047919 */
/* 0x000ea20000002700 */
/*0080*/ IMAD R7, R7, c[0x0][0x0], R0 ; /* 0x0000000007077a24 */
/* 0x001fca00078e0200 */
/*0090*/ ISETP.GE.AND P0, PT, R7, UR4, PT ; /* 0x0000000407007c0c */
/* 0x000fc8000bf06270 */
/*00a0*/ ISETP.GE.OR P0, PT, R6, c[0x0][0x16c], P0 ; /* 0x00005b0006007a0c */
/* 0x002fc80000706670 */
/*00b0*/ ISETP.GE.OR P0, PT, R4, c[0x0][0x170], P0 ; /* 0x00005c0004007a0c */
/* 0x004fda0000706670 */
/*00c0*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*00d0*/ HFMA2.MMA R0, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff007435 */
/* 0x000fe200000001ff */
/*00e0*/ ULDC.64 UR6, c[0x0][0x118] ; /* 0x0000460000067ab9 */
/* 0x000fd20000000a00 */
/*00f0*/ IMAD.WIDE R8, R7, R0, c[0x0][0x180] ; /* 0x0000600007087625 */
/* 0x000fcc00078e0200 */
/*0100*/ LDG.E R9, [R8.64] ; /* 0x0000000608097981 */
/* 0x000ea2000c1e1900 */
/*0110*/ ULDC UR4, c[0x0][0x174] ; /* 0x00005d0000047ab9 */
/* 0x000fe20000000800 */
/*0120*/ IMAD R11, R7, c[0x0][0x16c], R6 ; /* 0x00005b00070b7a24 */
/* 0x000fe200078e0206 */
/*0130*/ UIADD3 UR4, UR4, -0x1, URZ ; /* 0xffffffff04047890 */
/* 0x000fe2000fffe03f */
/*0140*/ IABS R14, c[0x0][0x168] ; /* 0x00005a00000e7a13 */
/* 0x000fe20000000000 */
/*0150*/ F2I.FLOOR.NTZ R2, R9 ; /* 0x0000000900027305 */
/* 0x004e240000207100 */
/*0160*/ IMNMX R2, RZ, R2, !PT ; /* 0x00000002ff027217 */
/* 0x001fc80007800200 */
/*0170*/ IMNMX R3, R2, UR4, PT ; /* 0x0000000402037c17 */
/* 0x000fca000b800200 */
/*0180*/ IMAD R3, R3, c[0x0][0x16c], R6 ; /* 0x00005b0003037a24 */
/* 0x000fc800078e0206 */
/*0190*/ IMAD R5, R3, c[0x0][0x170], R4 ; /* 0x00005c0003057a24 */
/* 0x000fe400078e0204 */
/*01a0*/ IMAD.WIDE R2, R11, R0, c[0x0][0x198] ; /* 0x000066000b027625 */
/* 0x000fc800078e0200 */
/*01b0*/ IMAD.WIDE R10, R5, R0, c[0x0][0x188] ; /* 0x00006200050a7625 */
/* 0x000fe200078e0200 */
/*01c0*/ LDG.E R8, [R2.64] ; /* 0x0000000602087981 */
/* 0x000eaa000c1e1900 */
/*01d0*/ LDG.E R11, [R10.64] ; /* 0x000000060a0b7981 */
/* 0x0000a2000c1e1900 */
/*01e0*/ I2F.RP R9, R14 ; /* 0x0000000e00097306 */
/* 0x000e620000209400 */
/*01f0*/ IABS R10, R7 ; /* 0x00000007000a7213 */
/* 0x001fe40000000000 */
/*0200*/ LOP3.LUT R7, R7, c[0x0][0x168], RZ, 0x3c, !PT ; /* 0x00005a0007077a12 */
/* 0x000fc800078e3cff */
/*0210*/ ISETP.GE.AND P1, PT, R7, RZ, PT ; /* 0x000000ff0700720c */
/* 0x000fe20003f26270 */
/*0220*/ MUFU.RCP R9, R9 ; /* 0x0000000900097308 */
/* 0x002e240000001000 */
/*0230*/ IADD3 R12, R9, 0xffffffe, RZ ; /* 0x0ffffffe090c7810 */
/* 0x001fcc0007ffe0ff */
/*0240*/ F2I.FTZ.U32.TRUNC.NTZ R13, R12 ; /* 0x0000000c000d7305 */
/* 0x000064000021f000 */
/*0250*/ HFMA2.MMA R12, -RZ, RZ, 0, 0 ; /* 0x00000000ff0c7435 */
/* 0x001fe200000001ff */
/*0260*/ IMAD.MOV R15, RZ, RZ, -R13 ; /* 0x000000ffff0f7224 */
/* 0x002fc800078e0a0d */
/*0270*/ IMAD R15, R15, R14, RZ ; /* 0x0000000e0f0f7224 */
/* 0x000fca00078e02ff */
/*0280*/ IMAD.HI.U32 R13, R13, R15, R12 ; /* 0x0000000f0d0d7227 */
/* 0x000fcc00078e000c */
/*0290*/ IMAD.HI.U32 R13, R13, R10, RZ ; /* 0x0000000a0d0d7227 */
/* 0x000fc800078e00ff */
/*02a0*/ IMAD.MOV R9, RZ, RZ, -R13 ; /* 0x000000ffff097224 */
/* 0x000fc800078e0a0d */
/*02b0*/ IMAD R9, R14, R9, R10 ; /* 0x000000090e097224 */
/* 0x000fca00078e020a */
/*02c0*/ ISETP.GT.U32.AND P2, PT, R14, R9, PT ; /* 0x000000090e00720c */
/* 0x000fda0003f44070 */
/*02d0*/ @!P2 IADD3 R9, R9, -R14.reuse, RZ ; /* 0x8000000e0909a210 */
/* 0x080fe40007ffe0ff */
/*02e0*/ @!P2 IADD3 R13, R13, 0x1, RZ ; /* 0x000000010d0da810 */
/* 0x000fe40007ffe0ff */
/*02f0*/ ISETP.GE.U32.AND P0, PT, R9, R14, PT ; /* 0x0000000e0900720c */
/* 0x000fe40003f06070 */
/*0300*/ ISETP.NE.AND P2, PT, RZ, c[0x0][0x168], PT ; /* 0x00005a00ff007a0c */
/* 0x000fd60003f45270 */
/*0310*/ @P0 IADD3 R13, R13, 0x1, RZ ; /* 0x000000010d0d0810 */
/* 0x000fc80007ffe0ff */
/*0320*/ @!P1 IADD3 R13, -R13, RZ, RZ ; /* 0x000000ff0d0d9210 */
/* 0x000fe40007ffe1ff */
/*0330*/ @!P2 LOP3.LUT R13, RZ, c[0x0][0x168], RZ, 0x33, !PT ; /* 0x00005a00ff0daa12 */
/* 0x000fca00078e33ff */
/*0340*/ IMAD R13, R13, c[0x0][0x16c], R6 ; /* 0x00005b000d0d7a24 */
/* 0x000fc800078e0206 */
/*0350*/ IMAD R13, R13, c[0x0][0x170], R4 ; /* 0x00005c000d0d7a24 */
/* 0x000fc800078e0204 */
/*0360*/ IMAD.WIDE R6, R13, R0, c[0x0][0x190] ; /* 0x000064000d067625 */
/* 0x000fc800078e0200 */
/*0370*/ FMUL R11, R11, R8 ; /* 0x000000080b0b7220 */
/* 0x004fe40000400000 */
/*0380*/ IMAD.WIDE R8, R13, R0, c[0x0][0x1a8] ; /* 0x00006a000d087625 */
/* 0x000fca00078e0200 */
/*0390*/ RED.E.ADD.F32.FTZ.RN.STRONG.GPU [R8.64], R11 ; /* 0x0000000b0800798e */
/* 0x000fe8000c10e786 */
/*03a0*/ LDG.E R6, [R6.64] ; /* 0x0000000606067981 */
/* 0x000ea8000c1e1900 */
/*03b0*/ LDG.E R3, [R2.64] ; /* 0x0000000602037981 */
/* 0x000ea2000c1e1900 */
/*03c0*/ IMAD.WIDE R4, R5, R0, c[0x0][0x1a0] ; /* 0x0000680005047625 */
/* 0x000fc800078e0200 */
/*03d0*/ FMUL R13, R6, R3 ; /* 0x00000003060d7220 */
/* 0x004fca0000400000 */
/*03e0*/ RED.E.ADD.F32.FTZ.RN.STRONG.GPU [R4.64], R13 ; /* 0x0000000d0400798e */
/* 0x000fe2000c10e786 */
/*03f0*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0400*/ BRA 0x400; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0410*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0420*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0430*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0440*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0450*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0460*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0470*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0480*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0490*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*04a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*04b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*04c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*04d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*04e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*04f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
..........
Function : _Z13rpe_q_forwardiiiiiiPKiPKfS2_S2_Pf
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ IMAD.MOV.U32 R1, RZ, RZ, c[0x0][0x28] ; /* 0x00000a00ff017624 */
/* 0x000fe400078e00ff */
/*0010*/ S2R R3, SR_CTAID.X ; /* 0x0000000000037919 */
/* 0x000e220000002500 */
/*0020*/ ULDC UR4, c[0x0][0x168] ; /* 0x00005a0000047ab9 */
/* 0x000fe40000000800 */
/*0030*/ ULDC UR5, c[0x0][0x164] ; /* 0x0000590000057ab9 */
/* 0x000fe20000000800 */
/*0040*/ S2R R0, SR_TID.X ; /* 0x0000000000007919 */
/* 0x000e220000002100 */
/*0050*/ UIMAD UR4, UR4, UR5, URZ ; /* 0x00000005040472a4 */
/* 0x000fc6000f8e023f */
/*0060*/ S2R R2, SR_CTAID.Y ; /* 0x0000000000027919 */
/* 0x000e680000002600 */
/*0070*/ S2R R4, SR_CTAID.Z ; /* 0x0000000000047919 */
/* 0x000ea20000002700 */
/*0080*/ IMAD R3, R3, c[0x0][0x0], R0 ; /* 0x0000000003037a24 */
/* 0x001fca00078e0200 */
/*0090*/ ISETP.GE.AND P0, PT, R3, UR4, PT ; /* 0x0000000403007c0c */
/* 0x000fc8000bf06270 */
/*00a0*/ ISETP.GE.OR P0, PT, R2, c[0x0][0x16c], P0 ; /* 0x00005b0002007a0c */
/* 0x002fc80000706670 */
/*00b0*/ ISETP.GE.OR P0, PT, R4, c[0x0][0x170], P0 ; /* 0x00005c0004007a0c */
/* 0x004fda0000706670 */
/*00c0*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*00d0*/ HFMA2.MMA R0, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff007435 */
/* 0x000fe200000001ff */
/*00e0*/ ULDC.64 UR6, c[0x0][0x118] ; /* 0x0000460000067ab9 */
/* 0x000fd20000000a00 */
/*00f0*/ IMAD.WIDE R6, R3, R0, c[0x0][0x180] ; /* 0x0000600003067625 */
/* 0x000fcc00078e0200 */
/*0100*/ LDG.E R6, [R6.64] ; /* 0x0000000606067981 */
/* 0x0000a2000c1e1900 */
/*0110*/ IABS R10, c[0x0][0x168] ; /* 0x00005a00000a7a13 */
/* 0x000fe20000000000 */
/*0120*/ ULDC UR4, c[0x0][0x174] ; /* 0x00005d0000047ab9 */
/* 0x000fe40000000800 */
/*0130*/ UIADD3 UR4, UR4, -0x1, URZ ; /* 0xffffffff04047890 */
/* 0x000fe2000fffe03f */
/*0140*/ I2F.RP R5, R10 ; /* 0x0000000a00057306 */
/* 0x000e620000209400 */
/*0150*/ IABS R7, R3 ; /* 0x0000000300077213 */
/* 0x001fce0000000000 */
/*0160*/ MUFU.RCP R5, R5 ; /* 0x0000000500057308 */
/* 0x002e240000001000 */
/*0170*/ IADD3 R8, R5, 0xffffffe, RZ ; /* 0x0ffffffe05087810 */
/* 0x001fcc0007ffe0ff */
/*0180*/ F2I.FTZ.U32.TRUNC.NTZ R9, R8 ; /* 0x0000000800097305 */
/* 0x000064000021f000 */
/*0190*/ HFMA2.MMA R8, -RZ, RZ, 0, 0 ; /* 0x00000000ff087435 */
/* 0x001fe200000001ff */
/*01a0*/ IMAD.MOV R11, RZ, RZ, -R9 ; /* 0x000000ffff0b7224 */
/* 0x002fc800078e0a09 */
/*01b0*/ IMAD R11, R11, R10, RZ ; /* 0x0000000a0b0b7224 */
/* 0x000fca00078e02ff */
/*01c0*/ IMAD.HI.U32 R9, R9, R11, R8 ; /* 0x0000000b09097227 */
/* 0x000fcc00078e0008 */
/*01d0*/ IMAD.HI.U32 R9, R9, R7, RZ ; /* 0x0000000709097227 */
/* 0x000fc800078e00ff */
/*01e0*/ IMAD.MOV R5, RZ, RZ, -R9 ; /* 0x000000ffff057224 */
/* 0x000fc800078e0a09 */
/*01f0*/ IMAD R5, R10, R5, R7 ; /* 0x000000050a057224 */
/* 0x000fca00078e0207 */
/*0200*/ ISETP.GT.U32.AND P2, PT, R10, R5, PT ; /* 0x000000050a00720c */
/* 0x000fda0003f44070 */
/*0210*/ @!P2 IADD3 R5, R5, -R10.reuse, RZ ; /* 0x8000000a0505a210 */
/* 0x080fe40007ffe0ff */
/*0220*/ @!P2 IADD3 R9, R9, 0x1, RZ ; /* 0x000000010909a810 */
/* 0x000fe40007ffe0ff */
/*0230*/ ISETP.GE.U32.AND P0, PT, R5, R10, PT ; /* 0x0000000a0500720c */
/* 0x000fe40003f06070 */
/*0240*/ LOP3.LUT R5, R3, c[0x0][0x168], RZ, 0x3c, !PT ; /* 0x00005a0003057a12 */
/* 0x000fe400078e3cff */
/*0250*/ ISETP.NE.AND P2, PT, RZ, c[0x0][0x168], PT ; /* 0x00005a00ff007a0c */
/* 0x000fe40003f45270 */
/*0260*/ ISETP.GE.AND P1, PT, R5, RZ, PT ; /* 0x000000ff0500720c */
/* 0x000fce0003f26270 */
/*0270*/ @P0 IADD3 R9, R9, 0x1, RZ ; /* 0x0000000109090810 */
/* 0x000fcc0007ffe0ff */
/*0280*/ @!P1 IMAD.MOV R9, RZ, RZ, -R9 ; /* 0x000000ffff099224 */
/* 0x000fe200078e0a09 */
/*0290*/ @!P2 LOP3.LUT R9, RZ, c[0x0][0x168], RZ, 0x33, !PT ; /* 0x00005a00ff09aa12 */
/* 0x000fca00078e33ff */
/*02a0*/ IMAD R9, R9, c[0x0][0x16c], R2 ; /* 0x00005b0009097a24 */
/* 0x000fe200078e0202 */
/*02b0*/ F2I.FLOOR.NTZ R6, R6 ; /* 0x0000000600067305 */
/* 0x004e240000207100 */
/*02c0*/ IMNMX R5, RZ, R6, !PT ; /* 0x00000006ff057217 */
/* 0x001fc80007800200 */
/*02d0*/ IMNMX R5, R5, UR4, PT ; /* 0x0000000405057c17 */
/* 0x000fca000b800200 */
/*02e0*/ IMAD R5, R5, c[0x0][0x16c], R2 ; /* 0x00005b0005057a24 */
/* 0x000fc800078e0202 */
/*02f0*/ IMAD R7, R5, c[0x0][0x170], R4.reuse ; /* 0x00005c0005077a24 */
/* 0x100fe400078e0204 */
/*0300*/ IMAD R5, R9, c[0x0][0x170], R4 ; /* 0x00005c0009057a24 */
/* 0x000fe400078e0204 */
/*0310*/ IMAD.WIDE R6, R7, R0, c[0x0][0x188] ; /* 0x0000620007067625 */
/* 0x000fc800078e0200 */
/*0320*/ IMAD.WIDE R4, R5, R0, c[0x0][0x190] ; /* 0x0000640005047625 */
/* 0x000fe400078e0200 */
/*0330*/ LDG.E R6, [R6.64] ; /* 0x0000000606067981 */
/* 0x000ea8000c1e1900 */
/*0340*/ LDG.E R5, [R4.64] ; /* 0x0000000604057981 */
/* 0x000ea2000c1e1900 */
/*0350*/ IMAD R3, R3, c[0x0][0x16c], R2 ; /* 0x00005b0003037a24 */
/* 0x000fc800078e0202 */
/*0360*/ IMAD.WIDE R2, R3, R0, c[0x0][0x198] ; /* 0x0000660003027625 */
/* 0x000fc800078e0200 */
/*0370*/ FMUL R9, R6, R5 ; /* 0x0000000506097220 */
/* 0x004fca0000400000 */
/*0380*/ RED.E.ADD.F32.FTZ.RN.STRONG.GPU [R2.64], R9 ; /* 0x000000090200798e */
/* 0x000fe2000c10e786 */
/*0390*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*03a0*/ BRA 0x3a0; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*03b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*03c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*03d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*03e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*03f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0400*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0410*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0420*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0430*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0440*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0450*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0460*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0470*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z13rpe_q_forwardiiiiiiPKiPKfS2_S2_Pf
.globl _Z13rpe_q_forwardiiiiiiPKiPKfS2_S2_Pf
.p2align 8
.type _Z13rpe_q_forwardiiiiiiPKiPKfS2_S2_Pf,@function
_Z13rpe_q_forwardiiiiiiPKiPKfS2_S2_Pf:
s_clause 0x1
s_load_b32 s3, s[0:1], 0x4c
s_load_b128 s[16:19], s[0:1], 0x4
s_waitcnt lgkmcnt(0)
s_and_b32 s3, s3, 0xffff
s_cmp_lt_i32 s14, s18
v_mad_u64_u32 v[1:2], null, s13, s3, v[0:1]
s_mul_i32 s3, s17, s16
s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1)
v_cmp_gt_i32_e32 vcc_lo, s3, v1
s_cselect_b32 s3, -1, 0
s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_2) | instid1(SALU_CYCLE_1)
s_and_b32 s3, s3, vcc_lo
s_cmp_lt_i32 s15, s19
s_cselect_b32 s4, -1, 0
s_and_b32 s3, s4, s3
s_delay_alu instid0(SALU_CYCLE_1)
s_and_saveexec_b32 s4, s3
s_cbranch_execz .LBB0_3
s_load_b256 s[4:11], s[0:1], 0x20
v_ashrrev_i32_e32 v2, 31, v1
s_ashr_i32 s3, s17, 31
v_ashrrev_i32_e32 v4, 31, v1
s_load_b32 s0, s[0:1], 0x14
s_mov_b32 s2, s15
v_lshlrev_b64 v[2:3], 2, v[1:2]
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
v_add_nc_u32_e32 v5, v1, v4
v_xor_b32_e32 v5, v5, v4
v_xor_b32_e32 v4, s3, v4
s_waitcnt lgkmcnt(0)
s_delay_alu instid0(VALU_DEP_4)
v_add_co_u32 v2, vcc_lo, s4, v2
v_add_co_ci_u32_e32 v3, vcc_lo, s5, v3, vcc_lo
s_add_i32 s4, s17, s3
s_add_i32 s0, s0, -1
s_xor_b32 s4, s4, s3
global_load_b32 v0, v[2:3], off
v_cvt_f32_u32_e32 v2, s4
s_sub_i32 s5, 0, s4
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_1)
v_rcp_iflag_f32_e32 v2, v2
s_waitcnt_depctr 0xfff
v_mul_f32_e32 v2, 0x4f7ffffe, v2
v_cvt_u32_f32_e32 v2, v2
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mul_lo_u32 v3, s5, v2
v_mul_hi_u32 v3, v2, v3
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_add_nc_u32_e32 v2, v2, v3
v_mul_hi_u32 v2, v5, v2
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mul_lo_u32 v3, v2, s4
v_sub_nc_u32_e32 v3, v5, v3
v_add_nc_u32_e32 v5, 1, v2
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2)
v_subrev_nc_u32_e32 v6, s4, v3
v_cmp_le_u32_e32 vcc_lo, s4, v3
v_dual_cndmask_b32 v2, v2, v5 :: v_dual_cndmask_b32 v3, v3, v6
v_mad_u64_u32 v[6:7], null, v1, s18, s[14:15]
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_3)
v_add_nc_u32_e32 v5, 1, v2
v_cmp_le_u32_e32 vcc_lo, s4, v3
s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
v_ashrrev_i32_e32 v7, 31, v6
v_cndmask_b32_e32 v2, v2, v5, vcc_lo
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_xor_b32_e32 v2, v2, v4
v_sub_nc_u32_e32 v4, v2, v4
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_1)
v_mad_u64_u32 v[2:3], null, v4, s18, s[14:15]
s_waitcnt vmcnt(0)
v_floor_f32_e32 v0, v0
v_cvt_i32_f32_e32 v0, v0
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_maxmin_i32 v0, v0, 0, s0
s_mov_b32 s0, 0
v_mad_u64_u32 v[3:4], null, v0, s18, s[14:15]
v_mad_u64_u32 v[4:5], null, v2, s19, s[2:3]
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_mad_u64_u32 v[0:1], null, v3, s19, s[2:3]
v_ashrrev_i32_e32 v5, 31, v4
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3)
v_lshlrev_b64 v[2:3], 2, v[4:5]
v_ashrrev_i32_e32 v1, 31, v0
v_lshlrev_b64 v[4:5], 2, v[6:7]
s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
v_add_co_u32 v2, vcc_lo, s8, v2
v_lshlrev_b64 v[0:1], 2, v[0:1]
v_add_co_ci_u32_e32 v3, vcc_lo, s9, v3, vcc_lo
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_3)
v_add_co_u32 v6, vcc_lo, s6, v0
v_add_co_ci_u32_e32 v7, vcc_lo, s7, v1, vcc_lo
v_add_co_u32 v0, vcc_lo, s10, v4
v_add_co_ci_u32_e32 v1, vcc_lo, s11, v5, vcc_lo
global_load_b32 v2, v[2:3], off
global_load_b32 v4, v[6:7], off
global_load_b32 v3, v[0:1], off
s_waitcnt vmcnt(1)
v_mul_f32_e32 v4, v2, v4
.LBB0_2:
s_waitcnt vmcnt(0)
s_delay_alu instid0(VALU_DEP_1)
v_add_f32_e32 v2, v3, v4
global_atomic_cmpswap_b32 v2, v[0:1], v[2:3], off glc
s_waitcnt vmcnt(0)
v_cmp_eq_u32_e32 vcc_lo, v2, v3
v_mov_b32_e32 v3, v2
s_or_b32 s0, vcc_lo, s0
s_delay_alu instid0(SALU_CYCLE_1)
s_and_not1_b32 exec_lo, exec_lo, s0
s_cbranch_execnz .LBB0_2
.LBB0_3:
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z13rpe_q_forwardiiiiiiPKiPKfS2_S2_Pf
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 320
.amdhsa_user_sgpr_count 13
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 1
.amdhsa_system_sgpr_workgroup_id_z 1
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 8
.amdhsa_next_free_sgpr 20
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z13rpe_q_forwardiiiiiiPKiPKfS2_S2_Pf, .Lfunc_end0-_Z13rpe_q_forwardiiiiiiPKiPKfS2_S2_Pf
.section .AMDGPU.csdata,"",@progbits
.text
.protected _Z14rpe_q_backwardiiiiiiPKiPKfS2_S2_PfS3_S3_
.globl _Z14rpe_q_backwardiiiiiiPKiPKfS2_S2_PfS3_S3_
.p2align 8
.type _Z14rpe_q_backwardiiiiiiPKiPKfS2_S2_PfS3_S3_,@function
_Z14rpe_q_backwardiiiiiiPKiPKfS2_S2_PfS3_S3_:
s_clause 0x1
s_load_b32 s3, s[0:1], 0x5c
s_load_b128 s[4:7], s[0:1], 0x4
s_waitcnt lgkmcnt(0)
s_and_b32 s3, s3, 0xffff
s_cmp_lt_i32 s14, s6
v_mad_u64_u32 v[1:2], null, s13, s3, v[0:1]
s_mul_i32 s3, s5, s4
s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1)
v_cmp_gt_i32_e32 vcc_lo, s3, v1
s_cselect_b32 s3, -1, 0
s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_2) | instid1(SALU_CYCLE_1)
s_and_b32 s3, s3, vcc_lo
s_cmp_lt_i32 s15, s7
s_cselect_b32 s4, -1, 0
s_and_b32 s3, s4, s3
s_delay_alu instid0(SALU_CYCLE_1)
s_and_saveexec_b32 s4, s3
s_cbranch_execz .LBB1_5
s_load_b128 s[8:11], s[0:1], 0x20
v_ashrrev_i32_e32 v2, 31, v1
s_ashr_i32 s3, s5, 31
v_ashrrev_i32_e32 v4, 31, v1
s_add_i32 s4, s5, s3
s_mov_b32 s2, s15
v_lshlrev_b64 v[2:3], 2, v[1:2]
s_xor_b32 s4, s4, s3
v_add_nc_u32_e32 v5, v1, v4
s_sub_i32 s5, 0, s4
s_delay_alu instid0(VALU_DEP_1)
v_xor_b32_e32 v5, v5, v4
v_xor_b32_e32 v4, s3, v4
s_waitcnt lgkmcnt(0)
v_add_co_u32 v2, vcc_lo, s8, v2
v_add_co_ci_u32_e32 v3, vcc_lo, s9, v3, vcc_lo
s_load_b64 s[8:9], s[0:1], 0x48
global_load_b32 v0, v[2:3], off
v_cvt_f32_u32_e32 v2, s4
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_1)
v_rcp_iflag_f32_e32 v2, v2
s_waitcnt_depctr 0xfff
v_mul_f32_e32 v2, 0x4f7ffffe, v2
v_cvt_u32_f32_e32 v2, v2
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_mul_lo_u32 v3, s5, v2
s_load_b32 s5, s[0:1], 0x14
v_mul_hi_u32 v3, v2, v3
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_1)
v_add_nc_u32_e32 v2, v2, v3
s_waitcnt lgkmcnt(0)
s_add_i32 s5, s5, -1
v_mul_hi_u32 v2, v5, v2
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mul_lo_u32 v3, v2, s4
v_sub_nc_u32_e32 v3, v5, v3
v_add_nc_u32_e32 v5, 1, v2
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2)
v_subrev_nc_u32_e32 v6, s4, v3
v_cmp_le_u32_e32 vcc_lo, s4, v3
v_dual_cndmask_b32 v3, v3, v6 :: v_dual_cndmask_b32 v2, v2, v5
v_mad_u64_u32 v[6:7], null, v1, s6, s[14:15]
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_3)
v_cmp_le_u32_e32 vcc_lo, s4, v3
v_add_nc_u32_e32 v5, 1, v2
s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
v_ashrrev_i32_e32 v7, 31, v6
v_cndmask_b32_e32 v2, v2, v5, vcc_lo
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_xor_b32_e32 v2, v2, v4
v_sub_nc_u32_e32 v5, v2, v4
s_waitcnt vmcnt(0)
v_floor_f32_e32 v0, v0
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_cvt_i32_f32_e32 v0, v0
v_maxmin_i32 v0, v0, 0, s5
s_load_b64 s[4:5], s[0:1], 0x38
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
v_mad_u64_u32 v[2:3], null, v0, s6, s[14:15]
v_mad_u64_u32 v[3:4], null, v5, s6, s[14:15]
v_mad_u64_u32 v[0:1], null, v2, s7, s[2:3]
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_3)
v_mad_u64_u32 v[4:5], null, v3, s7, s[2:3]
v_lshlrev_b64 v[2:3], 2, v[6:7]
s_mov_b32 s2, 0
v_ashrrev_i32_e32 v1, 31, v0
s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_3)
v_ashrrev_i32_e32 v5, 31, v4
s_waitcnt lgkmcnt(0)
v_add_co_u32 v2, vcc_lo, s4, v2
s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_2) | instid1(VALU_DEP_3)
v_lshlrev_b64 v[6:7], 2, v[0:1]
v_add_co_ci_u32_e32 v3, vcc_lo, s5, v3, vcc_lo
v_lshlrev_b64 v[8:9], 2, v[4:5]
v_add_co_u32 v10, vcc_lo, s10, v6
s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_3)
v_add_co_ci_u32_e32 v11, vcc_lo, s11, v7, vcc_lo
v_add_co_u32 v6, vcc_lo, s8, v8
s_delay_alu instid0(VALU_DEP_4)
v_add_co_ci_u32_e32 v7, vcc_lo, s9, v9, vcc_lo
global_load_b32 v8, v[2:3], off
global_load_b32 v10, v[10:11], off
global_load_b32 v9, v[6:7], off
s_waitcnt vmcnt(1)
v_mul_f32_e32 v10, v8, v10
.LBB1_2:
s_waitcnt vmcnt(0)
s_delay_alu instid0(VALU_DEP_1)
v_add_f32_e32 v8, v9, v10
global_atomic_cmpswap_b32 v8, v[6:7], v[8:9], off glc
s_waitcnt vmcnt(0)
v_cmp_eq_u32_e32 vcc_lo, v8, v9
v_mov_b32_e32 v9, v8
s_or_b32 s2, vcc_lo, s2
s_delay_alu instid0(SALU_CYCLE_1)
s_and_not1_b32 exec_lo, exec_lo, s2
s_cbranch_execnz .LBB1_2
s_or_b32 exec_lo, exec_lo, s2
s_clause 0x1
s_load_b64 s[2:3], s[0:1], 0x30
s_load_b64 s[0:1], s[0:1], 0x40
v_lshlrev_b64 v[4:5], 2, v[4:5]
v_lshlrev_b64 v[0:1], 2, v[0:1]
s_waitcnt lgkmcnt(0)
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_3)
v_add_co_u32 v4, vcc_lo, s2, v4
v_add_co_ci_u32_e32 v5, vcc_lo, s3, v5, vcc_lo
s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_4)
v_add_co_u32 v0, vcc_lo, s0, v0
v_add_co_ci_u32_e32 v1, vcc_lo, s1, v1, vcc_lo
global_load_b32 v2, v[2:3], off
global_load_b32 v4, v[4:5], off
global_load_b32 v3, v[0:1], off
s_mov_b32 s0, 0
s_waitcnt vmcnt(1)
v_mul_f32_e32 v4, v2, v4
.LBB1_4:
s_waitcnt vmcnt(0)
s_delay_alu instid0(VALU_DEP_1)
v_add_f32_e32 v2, v3, v4
global_atomic_cmpswap_b32 v2, v[0:1], v[2:3], off glc
s_waitcnt vmcnt(0)
v_cmp_eq_u32_e32 vcc_lo, v2, v3
v_mov_b32_e32 v3, v2
s_or_b32 s0, vcc_lo, s0
s_delay_alu instid0(SALU_CYCLE_1)
s_and_not1_b32 exec_lo, exec_lo, s0
s_cbranch_execnz .LBB1_4
.LBB1_5:
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z14rpe_q_backwardiiiiiiPKiPKfS2_S2_PfS3_S3_
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 336
.amdhsa_user_sgpr_count 13
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 1
.amdhsa_system_sgpr_workgroup_id_z 1
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 12
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end1:
.size _Z14rpe_q_backwardiiiiiiPKiPKfS2_S2_PfS3_S3_, .Lfunc_end1-_Z14rpe_q_backwardiiiiiiPKiPKfS2_S2_PfS3_S3_
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .offset: 0
.size: 4
.value_kind: by_value
- .offset: 4
.size: 4
.value_kind: by_value
- .offset: 8
.size: 4
.value_kind: by_value
- .offset: 12
.size: 4
.value_kind: by_value
- .offset: 16
.size: 4
.value_kind: by_value
- .offset: 20
.size: 4
.value_kind: by_value
- .address_space: global
.offset: 24
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 32
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 40
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 48
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 56
.size: 8
.value_kind: global_buffer
- .offset: 64
.size: 4
.value_kind: hidden_block_count_x
- .offset: 68
.size: 4
.value_kind: hidden_block_count_y
- .offset: 72
.size: 4
.value_kind: hidden_block_count_z
- .offset: 76
.size: 2
.value_kind: hidden_group_size_x
- .offset: 78
.size: 2
.value_kind: hidden_group_size_y
- .offset: 80
.size: 2
.value_kind: hidden_group_size_z
- .offset: 82
.size: 2
.value_kind: hidden_remainder_x
- .offset: 84
.size: 2
.value_kind: hidden_remainder_y
- .offset: 86
.size: 2
.value_kind: hidden_remainder_z
- .offset: 104
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 112
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 120
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 128
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 320
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z13rpe_q_forwardiiiiiiPKiPKfS2_S2_Pf
.private_segment_fixed_size: 0
.sgpr_count: 22
.sgpr_spill_count: 0
.symbol: _Z13rpe_q_forwardiiiiiiPKiPKfS2_S2_Pf.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 8
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
- .args:
- .offset: 0
.size: 4
.value_kind: by_value
- .offset: 4
.size: 4
.value_kind: by_value
- .offset: 8
.size: 4
.value_kind: by_value
- .offset: 12
.size: 4
.value_kind: by_value
- .offset: 16
.size: 4
.value_kind: by_value
- .offset: 20
.size: 4
.value_kind: by_value
- .address_space: global
.offset: 24
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 32
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 40
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 48
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 56
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 64
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 72
.size: 8
.value_kind: global_buffer
- .offset: 80
.size: 4
.value_kind: hidden_block_count_x
- .offset: 84
.size: 4
.value_kind: hidden_block_count_y
- .offset: 88
.size: 4
.value_kind: hidden_block_count_z
- .offset: 92
.size: 2
.value_kind: hidden_group_size_x
- .offset: 94
.size: 2
.value_kind: hidden_group_size_y
- .offset: 96
.size: 2
.value_kind: hidden_group_size_z
- .offset: 98
.size: 2
.value_kind: hidden_remainder_x
- .offset: 100
.size: 2
.value_kind: hidden_remainder_y
- .offset: 102
.size: 2
.value_kind: hidden_remainder_z
- .offset: 120
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 128
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 136
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 144
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 336
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z14rpe_q_backwardiiiiiiPKiPKfS2_S2_PfS3_S3_
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z14rpe_q_backwardiiiiiiPKiPKfS2_S2_PfS3_S3_.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 12
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_0004a3a9_00000000-6_rpe_q_kernel.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2061:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2061:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z51__device_stub__Z13rpe_q_forwardiiiiiiPKiPKfS2_S2_PfiiiiiiPKiPKfS2_S2_Pf
.type _Z51__device_stub__Z13rpe_q_forwardiiiiiiPKiPKfS2_S2_PfiiiiiiPKiPKfS2_S2_Pf, @function
_Z51__device_stub__Z13rpe_q_forwardiiiiiiPKiPKfS2_S2_PfiiiiiiPKiPKfS2_S2_Pf:
.LFB2083:
.cfi_startproc
endbr64
subq $232, %rsp
.cfi_def_cfa_offset 240
movl %edi, 60(%rsp)
movl %esi, 56(%rsp)
movl %edx, 52(%rsp)
movl %ecx, 48(%rsp)
movl %r8d, 44(%rsp)
movl %r9d, 40(%rsp)
movq 240(%rsp), %rax
movq %rax, 32(%rsp)
movq 248(%rsp), %rax
movq %rax, 24(%rsp)
movq 256(%rsp), %rax
movq %rax, 16(%rsp)
movq 264(%rsp), %rax
movq %rax, 8(%rsp)
movq 272(%rsp), %rax
movq %rax, (%rsp)
movq %fs:40, %rax
movq %rax, 216(%rsp)
xorl %eax, %eax
leaq 60(%rsp), %rax
movq %rax, 128(%rsp)
leaq 56(%rsp), %rax
movq %rax, 136(%rsp)
leaq 52(%rsp), %rax
movq %rax, 144(%rsp)
leaq 48(%rsp), %rax
movq %rax, 152(%rsp)
leaq 44(%rsp), %rax
movq %rax, 160(%rsp)
leaq 40(%rsp), %rax
movq %rax, 168(%rsp)
leaq 32(%rsp), %rax
movq %rax, 176(%rsp)
leaq 24(%rsp), %rax
movq %rax, 184(%rsp)
leaq 16(%rsp), %rax
movq %rax, 192(%rsp)
leaq 8(%rsp), %rax
movq %rax, 200(%rsp)
movq %rsp, %rax
movq %rax, 208(%rsp)
movl $1, 80(%rsp)
movl $1, 84(%rsp)
movl $1, 88(%rsp)
movl $1, 92(%rsp)
movl $1, 96(%rsp)
movl $1, 100(%rsp)
leaq 72(%rsp), %rcx
leaq 64(%rsp), %rdx
leaq 92(%rsp), %rsi
leaq 80(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 216(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $232, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 72(%rsp)
.cfi_def_cfa_offset 248
pushq 72(%rsp)
.cfi_def_cfa_offset 256
leaq 144(%rsp), %r9
movq 108(%rsp), %rcx
movl 116(%rsp), %r8d
movq 96(%rsp), %rsi
movl 104(%rsp), %edx
leaq _Z13rpe_q_forwardiiiiiiPKiPKfS2_S2_Pf(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 240
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2083:
.size _Z51__device_stub__Z13rpe_q_forwardiiiiiiPKiPKfS2_S2_PfiiiiiiPKiPKfS2_S2_Pf, .-_Z51__device_stub__Z13rpe_q_forwardiiiiiiPKiPKfS2_S2_PfiiiiiiPKiPKfS2_S2_Pf
.globl _Z13rpe_q_forwardiiiiiiPKiPKfS2_S2_Pf
.type _Z13rpe_q_forwardiiiiiiPKiPKfS2_S2_Pf, @function
_Z13rpe_q_forwardiiiiiiPKiPKfS2_S2_Pf:
.LFB2084:
.cfi_startproc
endbr64
subq $16, %rsp
.cfi_def_cfa_offset 24
pushq 56(%rsp)
.cfi_def_cfa_offset 32
pushq 56(%rsp)
.cfi_def_cfa_offset 40
pushq 56(%rsp)
.cfi_def_cfa_offset 48
pushq 56(%rsp)
.cfi_def_cfa_offset 56
pushq 56(%rsp)
.cfi_def_cfa_offset 64
call _Z51__device_stub__Z13rpe_q_forwardiiiiiiPKiPKfS2_S2_PfiiiiiiPKiPKfS2_S2_Pf
addq $56, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2084:
.size _Z13rpe_q_forwardiiiiiiPKiPKfS2_S2_Pf, .-_Z13rpe_q_forwardiiiiiiPKiPKfS2_S2_Pf
.globl _Z14rpe_q_launcheriiiiiiPKiPKfS2_S2_Pf
.type _Z14rpe_q_launcheriiiiiiPKiPKfS2_S2_Pf, @function
_Z14rpe_q_launcheriiiiiiPKiPKfS2_S2_Pf:
.LFB2057:
.cfi_startproc
endbr64
pushq %r15
.cfi_def_cfa_offset 16
.cfi_offset 15, -16
pushq %r14
.cfi_def_cfa_offset 24
.cfi_offset 14, -24
pushq %r13
.cfi_def_cfa_offset 32
.cfi_offset 13, -32
pushq %r12
.cfi_def_cfa_offset 40
.cfi_offset 12, -40
pushq %rbp
.cfi_def_cfa_offset 48
.cfi_offset 6, -48
pushq %rbx
.cfi_def_cfa_offset 56
.cfi_offset 3, -56
subq $40, %rsp
.cfi_def_cfa_offset 96
movl %edi, %r14d
movl %esi, %ebx
movl %edx, %ebp
movl %ecx, %r12d
movl %r8d, %r13d
movl %r9d, %r15d
movl %esi, %edx
imull %ebp, %edx
movl %edx, %ecx
sarl $31, %ecx
shrl $24, %ecx
leal (%rdx,%rcx), %eax
movzbl %al, %eax
subl %ecx, %eax
testl %eax, %eax
setg %cl
movzbl %cl, %ecx
leal 255(%rdx), %eax
testl %edx, %edx
cmovns %edx, %eax
sarl $8, %eax
addl %ecx, %eax
movl %eax, 8(%rsp)
movl %r12d, 12(%rsp)
movl $256, 20(%rsp)
movl $1, 24(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 20(%rsp), %rdx
movl $1, %ecx
movq 8(%rsp), %rdi
movl %r13d, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L14
.L11:
addq $40, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %rbp
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r13
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
ret
.L14:
.cfi_restore_state
subq $8, %rsp
.cfi_def_cfa_offset 104
pushq 136(%rsp)
.cfi_def_cfa_offset 112
pushq 136(%rsp)
.cfi_def_cfa_offset 120
pushq 136(%rsp)
.cfi_def_cfa_offset 128
pushq 136(%rsp)
.cfi_def_cfa_offset 136
pushq 136(%rsp)
.cfi_def_cfa_offset 144
movl %r15d, %r9d
movl %r13d, %r8d
movl %r12d, %ecx
movl %ebp, %edx
movl %ebx, %esi
movl %r14d, %edi
call _Z51__device_stub__Z13rpe_q_forwardiiiiiiPKiPKfS2_S2_PfiiiiiiPKiPKfS2_S2_Pf
addq $48, %rsp
.cfi_def_cfa_offset 96
jmp .L11
.cfi_endproc
.LFE2057:
.size _Z14rpe_q_launcheriiiiiiPKiPKfS2_S2_Pf, .-_Z14rpe_q_launcheriiiiiiPKiPKfS2_S2_Pf
.globl _Z58__device_stub__Z14rpe_q_backwardiiiiiiPKiPKfS2_S2_PfS3_S3_iiiiiiPKiPKfS2_S2_PfS3_S3_
.type _Z58__device_stub__Z14rpe_q_backwardiiiiiiPKiPKfS2_S2_PfS3_S3_iiiiiiPKiPKfS2_S2_PfS3_S3_, @function
_Z58__device_stub__Z14rpe_q_backwardiiiiiiPKiPKfS2_S2_PfS3_S3_iiiiiiPKiPKfS2_S2_PfS3_S3_:
.LFB2085:
.cfi_startproc
endbr64
subq $264, %rsp
.cfi_def_cfa_offset 272
movl %edi, 76(%rsp)
movl %esi, 72(%rsp)
movl %edx, 68(%rsp)
movl %ecx, 64(%rsp)
movl %r8d, 60(%rsp)
movl %r9d, 56(%rsp)
movq 272(%rsp), %rax
movq %rax, 48(%rsp)
movq 280(%rsp), %rax
movq %rax, 40(%rsp)
movq 288(%rsp), %rax
movq %rax, 32(%rsp)
movq 296(%rsp), %rax
movq %rax, 24(%rsp)
movq 304(%rsp), %rax
movq %rax, 16(%rsp)
movq 312(%rsp), %rax
movq %rax, 8(%rsp)
movq 320(%rsp), %rax
movq %rax, (%rsp)
movq %fs:40, %rax
movq %rax, 248(%rsp)
xorl %eax, %eax
leaq 76(%rsp), %rax
movq %rax, 144(%rsp)
leaq 72(%rsp), %rax
movq %rax, 152(%rsp)
leaq 68(%rsp), %rax
movq %rax, 160(%rsp)
leaq 64(%rsp), %rax
movq %rax, 168(%rsp)
leaq 60(%rsp), %rax
movq %rax, 176(%rsp)
leaq 56(%rsp), %rax
movq %rax, 184(%rsp)
leaq 48(%rsp), %rax
movq %rax, 192(%rsp)
leaq 40(%rsp), %rax
movq %rax, 200(%rsp)
leaq 32(%rsp), %rax
movq %rax, 208(%rsp)
leaq 24(%rsp), %rax
movq %rax, 216(%rsp)
leaq 16(%rsp), %rax
movq %rax, 224(%rsp)
leaq 8(%rsp), %rax
movq %rax, 232(%rsp)
movq %rsp, %rax
movq %rax, 240(%rsp)
movl $1, 96(%rsp)
movl $1, 100(%rsp)
movl $1, 104(%rsp)
movl $1, 108(%rsp)
movl $1, 112(%rsp)
movl $1, 116(%rsp)
leaq 88(%rsp), %rcx
leaq 80(%rsp), %rdx
leaq 108(%rsp), %rsi
leaq 96(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L19
.L15:
movq 248(%rsp), %rax
subq %fs:40, %rax
jne .L20
addq $264, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L19:
.cfi_restore_state
pushq 88(%rsp)
.cfi_def_cfa_offset 280
pushq 88(%rsp)
.cfi_def_cfa_offset 288
leaq 160(%rsp), %r9
movq 124(%rsp), %rcx
movl 132(%rsp), %r8d
movq 112(%rsp), %rsi
movl 120(%rsp), %edx
leaq _Z14rpe_q_backwardiiiiiiPKiPKfS2_S2_PfS3_S3_(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 272
jmp .L15
.L20:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2085:
.size _Z58__device_stub__Z14rpe_q_backwardiiiiiiPKiPKfS2_S2_PfS3_S3_iiiiiiPKiPKfS2_S2_PfS3_S3_, .-_Z58__device_stub__Z14rpe_q_backwardiiiiiiPKiPKfS2_S2_PfS3_S3_iiiiiiPKiPKfS2_S2_PfS3_S3_
.globl _Z14rpe_q_backwardiiiiiiPKiPKfS2_S2_PfS3_S3_
.type _Z14rpe_q_backwardiiiiiiPKiPKfS2_S2_PfS3_S3_, @function
_Z14rpe_q_backwardiiiiiiPKiPKfS2_S2_PfS3_S3_:
.LFB2086:
.cfi_startproc
endbr64
subq $16, %rsp
.cfi_def_cfa_offset 24
pushq 72(%rsp)
.cfi_def_cfa_offset 32
pushq 72(%rsp)
.cfi_def_cfa_offset 40
pushq 72(%rsp)
.cfi_def_cfa_offset 48
pushq 72(%rsp)
.cfi_def_cfa_offset 56
pushq 72(%rsp)
.cfi_def_cfa_offset 64
pushq 72(%rsp)
.cfi_def_cfa_offset 72
pushq 72(%rsp)
.cfi_def_cfa_offset 80
call _Z58__device_stub__Z14rpe_q_backwardiiiiiiPKiPKfS2_S2_PfS3_S3_iiiiiiPKiPKfS2_S2_PfS3_S3_
addq $72, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2086:
.size _Z14rpe_q_backwardiiiiiiPKiPKfS2_S2_PfS3_S3_, .-_Z14rpe_q_backwardiiiiiiPKiPKfS2_S2_PfS3_S3_
.globl _Z19rpe_q_grad_launcheriiiiiiPKiPKfS2_S2_PfS3_S3_
.type _Z19rpe_q_grad_launcheriiiiiiPKiPKfS2_S2_PfS3_S3_, @function
_Z19rpe_q_grad_launcheriiiiiiPKiPKfS2_S2_PfS3_S3_:
.LFB2058:
.cfi_startproc
endbr64
pushq %r15
.cfi_def_cfa_offset 16
.cfi_offset 15, -16
pushq %r14
.cfi_def_cfa_offset 24
.cfi_offset 14, -24
pushq %r13
.cfi_def_cfa_offset 32
.cfi_offset 13, -32
pushq %r12
.cfi_def_cfa_offset 40
.cfi_offset 12, -40
pushq %rbp
.cfi_def_cfa_offset 48
.cfi_offset 6, -48
pushq %rbx
.cfi_def_cfa_offset 56
.cfi_offset 3, -56
subq $40, %rsp
.cfi_def_cfa_offset 96
movl %edi, %r14d
movl %esi, %ebx
movl %edx, %ebp
movl %ecx, %r12d
movl %r8d, %r13d
movl %r9d, %r15d
movl %esi, %edx
imull %ebp, %edx
movl %edx, %ecx
sarl $31, %ecx
shrl $24, %ecx
leal (%rdx,%rcx), %eax
movzbl %al, %eax
subl %ecx, %eax
testl %eax, %eax
setg %cl
movzbl %cl, %ecx
leal 255(%rdx), %eax
testl %edx, %edx
cmovns %edx, %eax
sarl $8, %eax
addl %ecx, %eax
movl %eax, 8(%rsp)
movl %r12d, 12(%rsp)
movl $256, 20(%rsp)
movl $1, 24(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 20(%rsp), %rdx
movl $1, %ecx
movq 8(%rsp), %rdi
movl %r13d, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L26
.L23:
addq $40, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %rbp
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r13
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
ret
.L26:
.cfi_restore_state
subq $8, %rsp
.cfi_def_cfa_offset 104
pushq 152(%rsp)
.cfi_def_cfa_offset 112
pushq 152(%rsp)
.cfi_def_cfa_offset 120
pushq 152(%rsp)
.cfi_def_cfa_offset 128
pushq 152(%rsp)
.cfi_def_cfa_offset 136
pushq 152(%rsp)
.cfi_def_cfa_offset 144
pushq 152(%rsp)
.cfi_def_cfa_offset 152
pushq 152(%rsp)
.cfi_def_cfa_offset 160
movl %r15d, %r9d
movl %r13d, %r8d
movl %r12d, %ecx
movl %ebp, %edx
movl %ebx, %esi
movl %r14d, %edi
call _Z58__device_stub__Z14rpe_q_backwardiiiiiiPKiPKfS2_S2_PfS3_S3_iiiiiiPKiPKfS2_S2_PfS3_S3_
addq $64, %rsp
.cfi_def_cfa_offset 96
jmp .L23
.cfi_endproc
.LFE2058:
.size _Z19rpe_q_grad_launcheriiiiiiPKiPKfS2_S2_PfS3_S3_, .-_Z19rpe_q_grad_launcheriiiiiiPKiPKfS2_S2_PfS3_S3_
.section .rodata.str1.8,"aMS",@progbits,1
.align 8
.LC0:
.string "_Z14rpe_q_backwardiiiiiiPKiPKfS2_S2_PfS3_S3_"
.align 8
.LC1:
.string "_Z13rpe_q_forwardiiiiiiPKiPKfS2_S2_Pf"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2088:
.cfi_startproc
endbr64
pushq %rbx
.cfi_def_cfa_offset 16
.cfi_offset 3, -16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rbx
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC0(%rip), %rdx
movq %rdx, %rcx
leaq _Z14rpe_q_backwardiiiiiiPKiPKfS2_S2_PfS3_S3_(%rip), %rsi
movq %rax, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC1(%rip), %rdx
movq %rdx, %rcx
leaq _Z13rpe_q_forwardiiiiiiPKiPKfS2_S2_Pf(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
popq %rbx
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2088:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "rpe_q_kernel.hip"
.globl _Z28__device_stub__rpe_q_forwardiiiiiiPKiPKfS2_S2_Pf # -- Begin function _Z28__device_stub__rpe_q_forwardiiiiiiPKiPKfS2_S2_Pf
.p2align 4, 0x90
.type _Z28__device_stub__rpe_q_forwardiiiiiiPKiPKfS2_S2_Pf,@function
_Z28__device_stub__rpe_q_forwardiiiiiiPKiPKfS2_S2_Pf: # @_Z28__device_stub__rpe_q_forwardiiiiiiPKiPKfS2_S2_Pf
.cfi_startproc
# %bb.0:
subq $168, %rsp
.cfi_def_cfa_offset 176
movl %edi, 28(%rsp)
movl %esi, 24(%rsp)
movl %edx, 20(%rsp)
movl %ecx, 16(%rsp)
movl %r8d, 12(%rsp)
movl %r9d, 8(%rsp)
leaq 28(%rsp), %rax
movq %rax, 80(%rsp)
leaq 24(%rsp), %rax
movq %rax, 88(%rsp)
leaq 20(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 12(%rsp), %rax
movq %rax, 112(%rsp)
leaq 8(%rsp), %rax
movq %rax, 120(%rsp)
leaq 176(%rsp), %rax
movq %rax, 128(%rsp)
leaq 184(%rsp), %rax
movq %rax, 136(%rsp)
leaq 192(%rsp), %rax
movq %rax, 144(%rsp)
leaq 200(%rsp), %rax
movq %rax, 152(%rsp)
leaq 208(%rsp), %rax
movq %rax, 160(%rsp)
leaq 64(%rsp), %rdi
leaq 48(%rsp), %rsi
leaq 40(%rsp), %rdx
leaq 32(%rsp), %rcx
callq __hipPopCallConfiguration
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
movq 48(%rsp), %rcx
movl 56(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z13rpe_q_forwardiiiiiiPKiPKfS2_S2_Pf, %edi
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
pushq 48(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $184, %rsp
.cfi_adjust_cfa_offset -184
retq
.Lfunc_end0:
.size _Z28__device_stub__rpe_q_forwardiiiiiiPKiPKfS2_S2_Pf, .Lfunc_end0-_Z28__device_stub__rpe_q_forwardiiiiiiPKiPKfS2_S2_Pf
.cfi_endproc
# -- End function
.globl _Z14rpe_q_launcheriiiiiiPKiPKfS2_S2_Pf # -- Begin function _Z14rpe_q_launcheriiiiiiPKiPKfS2_S2_Pf
.p2align 4, 0x90
.type _Z14rpe_q_launcheriiiiiiPKiPKfS2_S2_Pf,@function
_Z14rpe_q_launcheriiiiiiPKiPKfS2_S2_Pf: # @_Z14rpe_q_launcheriiiiiiPKiPKfS2_S2_Pf
.cfi_startproc
# %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
pushq %r15
.cfi_def_cfa_offset 24
pushq %r14
.cfi_def_cfa_offset 32
pushq %r13
.cfi_def_cfa_offset 40
pushq %r12
.cfi_def_cfa_offset 48
pushq %rbx
.cfi_def_cfa_offset 56
subq $200, %rsp
.cfi_def_cfa_offset 256
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
.cfi_offset %rbp, -16
movl %r9d, %ebx
movl %r8d, %ebp
movl %ecx, %r14d
movl %edx, %r15d
movl %esi, %r12d
movl %edi, %r13d
movl %edx, %eax
imull %esi, %eax
leal 255(%rax), %ecx
testl %eax, %eax
cmovnsl %eax, %ecx
sarl $8, %ecx
xorl %edx, %edx
testl $-2147483393, %eax # imm = 0x800000FF
setg %dl
addl %ecx, %edx
movq %r14, %rdi
shlq $32, %rdi
orq %rdx, %rdi
movabsq $4294967552, %rdx # imm = 0x100000100
movl %r8d, %esi
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB1_2
# %bb.1:
movq 288(%rsp), %rax
movq 280(%rsp), %rcx
movq 272(%rsp), %rdx
movq 264(%rsp), %rsi
movq 256(%rsp), %rdi
movl %r13d, 20(%rsp)
movl %r12d, 16(%rsp)
movl %r15d, 12(%rsp)
movl %r14d, 8(%rsp)
movl %ebp, 4(%rsp)
movl %ebx, (%rsp)
movq %rdi, 104(%rsp)
movq %rsi, 96(%rsp)
movq %rdx, 88(%rsp)
movq %rcx, 80(%rsp)
movq %rax, 72(%rsp)
leaq 20(%rsp), %rax
movq %rax, 112(%rsp)
leaq 16(%rsp), %rax
movq %rax, 120(%rsp)
leaq 12(%rsp), %rax
movq %rax, 128(%rsp)
leaq 8(%rsp), %rax
movq %rax, 136(%rsp)
leaq 4(%rsp), %rax
movq %rax, 144(%rsp)
movq %rsp, %rax
movq %rax, 152(%rsp)
leaq 104(%rsp), %rax
movq %rax, 160(%rsp)
leaq 96(%rsp), %rax
movq %rax, 168(%rsp)
leaq 88(%rsp), %rax
movq %rax, 176(%rsp)
leaq 80(%rsp), %rax
movq %rax, 184(%rsp)
leaq 72(%rsp), %rax
movq %rax, 192(%rsp)
leaq 56(%rsp), %rdi
leaq 40(%rsp), %rsi
leaq 32(%rsp), %rdx
leaq 24(%rsp), %rcx
callq __hipPopCallConfiguration
movq 56(%rsp), %rsi
movl 64(%rsp), %edx
movq 40(%rsp), %rcx
movl 48(%rsp), %r8d
leaq 112(%rsp), %r9
movl $_Z13rpe_q_forwardiiiiiiPKiPKfS2_S2_Pf, %edi
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
pushq 40(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB1_2:
addq $200, %rsp
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %r12
.cfi_def_cfa_offset 40
popq %r13
.cfi_def_cfa_offset 32
popq %r14
.cfi_def_cfa_offset 24
popq %r15
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
retq
.Lfunc_end1:
.size _Z14rpe_q_launcheriiiiiiPKiPKfS2_S2_Pf, .Lfunc_end1-_Z14rpe_q_launcheriiiiiiPKiPKfS2_S2_Pf
.cfi_endproc
# -- End function
.globl _Z29__device_stub__rpe_q_backwardiiiiiiPKiPKfS2_S2_PfS3_S3_ # -- Begin function _Z29__device_stub__rpe_q_backwardiiiiiiPKiPKfS2_S2_PfS3_S3_
.p2align 4, 0x90
.type _Z29__device_stub__rpe_q_backwardiiiiiiPKiPKfS2_S2_PfS3_S3_,@function
_Z29__device_stub__rpe_q_backwardiiiiiiPKiPKfS2_S2_PfS3_S3_: # @_Z29__device_stub__rpe_q_backwardiiiiiiPKiPKfS2_S2_PfS3_S3_
.cfi_startproc
# %bb.0:
subq $184, %rsp
.cfi_def_cfa_offset 192
movl %edi, 28(%rsp)
movl %esi, 24(%rsp)
movl %edx, 20(%rsp)
movl %ecx, 16(%rsp)
movl %r8d, 12(%rsp)
movl %r9d, 8(%rsp)
leaq 28(%rsp), %rax
movq %rax, 80(%rsp)
leaq 24(%rsp), %rax
movq %rax, 88(%rsp)
leaq 20(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 12(%rsp), %rax
movq %rax, 112(%rsp)
leaq 8(%rsp), %rax
movq %rax, 120(%rsp)
leaq 192(%rsp), %rax
movq %rax, 128(%rsp)
leaq 200(%rsp), %rax
movq %rax, 136(%rsp)
leaq 208(%rsp), %rax
movq %rax, 144(%rsp)
leaq 216(%rsp), %rax
movq %rax, 152(%rsp)
leaq 224(%rsp), %rax
movq %rax, 160(%rsp)
leaq 232(%rsp), %rax
movq %rax, 168(%rsp)
leaq 240(%rsp), %rax
movq %rax, 176(%rsp)
leaq 64(%rsp), %rdi
leaq 48(%rsp), %rsi
leaq 40(%rsp), %rdx
leaq 32(%rsp), %rcx
callq __hipPopCallConfiguration
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
movq 48(%rsp), %rcx
movl 56(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z14rpe_q_backwardiiiiiiPKiPKfS2_S2_PfS3_S3_, %edi
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
pushq 48(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $200, %rsp
.cfi_adjust_cfa_offset -200
retq
.Lfunc_end2:
.size _Z29__device_stub__rpe_q_backwardiiiiiiPKiPKfS2_S2_PfS3_S3_, .Lfunc_end2-_Z29__device_stub__rpe_q_backwardiiiiiiPKiPKfS2_S2_PfS3_S3_
.cfi_endproc
# -- End function
.globl _Z19rpe_q_grad_launcheriiiiiiPKiPKfS2_S2_PfS3_S3_ # -- Begin function _Z19rpe_q_grad_launcheriiiiiiPKiPKfS2_S2_PfS3_S3_
.p2align 4, 0x90
.type _Z19rpe_q_grad_launcheriiiiiiPKiPKfS2_S2_PfS3_S3_,@function
_Z19rpe_q_grad_launcheriiiiiiPKiPKfS2_S2_PfS3_S3_: # @_Z19rpe_q_grad_launcheriiiiiiPKiPKfS2_S2_PfS3_S3_
.cfi_startproc
# %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
pushq %r15
.cfi_def_cfa_offset 24
pushq %r14
.cfi_def_cfa_offset 32
pushq %r13
.cfi_def_cfa_offset 40
pushq %r12
.cfi_def_cfa_offset 48
pushq %rbx
.cfi_def_cfa_offset 56
subq $232, %rsp
.cfi_def_cfa_offset 288
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
.cfi_offset %rbp, -16
movl %r9d, %ebx
movl %r8d, %ebp
movl %ecx, %r14d
movl %edx, %r15d
movl %esi, %r12d
movl %edi, %r13d
movl %edx, %eax
imull %esi, %eax
leal 255(%rax), %ecx
testl %eax, %eax
cmovnsl %eax, %ecx
sarl $8, %ecx
xorl %edx, %edx
testl $-2147483393, %eax # imm = 0x800000FF
setg %dl
addl %ecx, %edx
movq %r14, %rdi
shlq $32, %rdi
orq %rdx, %rdi
movabsq $4294967552, %rdx # imm = 0x100000100
movl %r8d, %esi
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB3_2
# %bb.1:
movq 336(%rsp), %rax
movq 328(%rsp), %rcx
movq 320(%rsp), %rdx
movq 312(%rsp), %rsi
movq 304(%rsp), %rdi
movq 296(%rsp), %r8
movq 288(%rsp), %r9
movl %r13d, 20(%rsp)
movl %r12d, 16(%rsp)
movl %r15d, 12(%rsp)
movl %r14d, 8(%rsp)
movl %ebp, 4(%rsp)
movl %ebx, (%rsp)
movq %r9, 120(%rsp)
movq %r8, 112(%rsp)
movq %rdi, 104(%rsp)
movq %rsi, 96(%rsp)
movq %rdx, 88(%rsp)
movq %rcx, 80(%rsp)
movq %rax, 72(%rsp)
leaq 20(%rsp), %rax
movq %rax, 128(%rsp)
leaq 16(%rsp), %rax
movq %rax, 136(%rsp)
leaq 12(%rsp), %rax
movq %rax, 144(%rsp)
leaq 8(%rsp), %rax
movq %rax, 152(%rsp)
leaq 4(%rsp), %rax
movq %rax, 160(%rsp)
movq %rsp, %rax
movq %rax, 168(%rsp)
leaq 120(%rsp), %rax
movq %rax, 176(%rsp)
leaq 112(%rsp), %rax
movq %rax, 184(%rsp)
leaq 104(%rsp), %rax
movq %rax, 192(%rsp)
leaq 96(%rsp), %rax
movq %rax, 200(%rsp)
leaq 88(%rsp), %rax
movq %rax, 208(%rsp)
leaq 80(%rsp), %rax
movq %rax, 216(%rsp)
leaq 72(%rsp), %rax
movq %rax, 224(%rsp)
leaq 56(%rsp), %rdi
leaq 40(%rsp), %rsi
leaq 32(%rsp), %rdx
leaq 24(%rsp), %rcx
callq __hipPopCallConfiguration
movq 56(%rsp), %rsi
movl 64(%rsp), %edx
movq 40(%rsp), %rcx
movl 48(%rsp), %r8d
leaq 128(%rsp), %r9
movl $_Z14rpe_q_backwardiiiiiiPKiPKfS2_S2_PfS3_S3_, %edi
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
pushq 40(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB3_2:
addq $232, %rsp
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %r12
.cfi_def_cfa_offset 40
popq %r13
.cfi_def_cfa_offset 32
popq %r14
.cfi_def_cfa_offset 24
popq %r15
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
retq
.Lfunc_end3:
.size _Z19rpe_q_grad_launcheriiiiiiPKiPKfS2_S2_PfS3_S3_, .Lfunc_end3-_Z19rpe_q_grad_launcheriiiiiiPKiPKfS2_S2_PfS3_S3_
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
pushq %rbx
.cfi_def_cfa_offset 16
subq $32, %rsp
.cfi_def_cfa_offset 48
.cfi_offset %rbx, -16
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB4_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB4_2:
movq __hip_gpubin_handle(%rip), %rbx
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z13rpe_q_forwardiiiiiiPKiPKfS2_S2_Pf, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z14rpe_q_backwardiiiiiiPKiPKfS2_S2_PfS3_S3_, %esi
movl $.L__unnamed_2, %edx
movl $.L__unnamed_2, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $32, %rsp
.cfi_def_cfa_offset 16
popq %rbx
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end4:
.size __hip_module_ctor, .Lfunc_end4-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB5_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB5_2:
retq
.Lfunc_end5:
.size __hip_module_dtor, .Lfunc_end5-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z13rpe_q_forwardiiiiiiPKiPKfS2_S2_Pf,@object # @_Z13rpe_q_forwardiiiiiiPKiPKfS2_S2_Pf
.section .rodata,"a",@progbits
.globl _Z13rpe_q_forwardiiiiiiPKiPKfS2_S2_Pf
.p2align 3, 0x0
_Z13rpe_q_forwardiiiiiiPKiPKfS2_S2_Pf:
.quad _Z28__device_stub__rpe_q_forwardiiiiiiPKiPKfS2_S2_Pf
.size _Z13rpe_q_forwardiiiiiiPKiPKfS2_S2_Pf, 8
.type _Z14rpe_q_backwardiiiiiiPKiPKfS2_S2_PfS3_S3_,@object # @_Z14rpe_q_backwardiiiiiiPKiPKfS2_S2_PfS3_S3_
.globl _Z14rpe_q_backwardiiiiiiPKiPKfS2_S2_PfS3_S3_
.p2align 3, 0x0
_Z14rpe_q_backwardiiiiiiPKiPKfS2_S2_PfS3_S3_:
.quad _Z29__device_stub__rpe_q_backwardiiiiiiPKiPKfS2_S2_PfS3_S3_
.size _Z14rpe_q_backwardiiiiiiPKiPKfS2_S2_PfS3_S3_, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "_Z13rpe_q_forwardiiiiiiPKiPKfS2_S2_Pf"
.size .L__unnamed_1, 38
.type .L__unnamed_2,@object # @1
.L__unnamed_2:
.asciz "_Z14rpe_q_backwardiiiiiiPKiPKfS2_S2_PfS3_S3_"
.size .L__unnamed_2, 45
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z28__device_stub__rpe_q_forwardiiiiiiPKiPKfS2_S2_Pf
.addrsig_sym _Z29__device_stub__rpe_q_backwardiiiiiiPKiPKfS2_S2_PfS3_S3_
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z13rpe_q_forwardiiiiiiPKiPKfS2_S2_Pf
.addrsig_sym _Z14rpe_q_backwardiiiiiiPKiPKfS2_S2_PfS3_S3_
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | #include<stdio.h>
#include<stdlib.h>
#include<time.h>
#include<math.h>
#include<curand.h>
#include<curand_kernel.h>
#include<string.h>
#include<new>
#define FALSE 0
#define TRUE 1
#define STR_EQ 0
#define max(a, b) \
({__typeof__ (a) _a = (a); \
__typeof__ (b) _b = (b); \
_a > _b ? _a : _b; })
#define min(a, b) \
({__typeof__ (a) _a = (a); \
__typeof__ (b) _b = (b); \
_a > _b ? _b : _a; })
#define abs(a) \
({__typeof__ (a) _a = (a); \
_a >= 0 ? _a : -_a; })
/* =================== BASIC FUNCTIONS =====================================================================*/
static void HandleError( cudaError_t err,
const char *file,
int line ) {
if (err != cudaSuccess) {
printf( "%s in %s at line %d\n", cudaGetErrorString( err ),
file, line );
exit( EXIT_FAILURE );
}
}
#define HANDLE_ERROR( err ) (HandleError( err, __FILE__, __LINE__ ))
__device__ void curandInit(curandState_t* state_ptr, int tid){
curand_init((unsigned long long)clock(), tid, 0, state_ptr);
}
__device__ float cudaFloatRand(float min, float max, curandState_t* state_ptr){
return min + curand_uniform(state_ptr) * (max - min);
}
__device__ int cudaIntRand(int min, int max, curandState_t* state_ptr){
return int(cudaFloatRand(float(min), float(max + 1.0), state_ptr));
}
__host__ float floatRand(float min, float max){
float scale = rand() / (float) RAND_MAX;
return min + scale * (max - min);
}
__host__ char roll(float probability){
if(floatRand(0.0, 1.0) < probability)
return TRUE;
return FALSE;
}
__device__ char cudaRoll(float probability, curandState_t* curand_state_ptr){
if(cudaFloatRand(0.0, 1.0, curand_state_ptr) < probability)
return TRUE;
return FALSE;
}
/* =================== STRUCTS AND METHODS =====================================================================*/
typedef struct SimulationOptions{
int N;
float DIM;
int simulation_time;
float infection_r;
float infection_p;
float velocity;
int immune_time;
int sympthoms_time;
int blocks;
int threads_per_block;
char* output;
float lawful_p;
int quarantine_sick_time;
int quarantine_all_time;
int gathering_points_n;
float gathering_point_p;
int buffor_size;
} SimulationOptions;
typedef enum{HEALTHY, CARRIER, SICK, IMMUNE} Health;
typedef enum{GOING_TO, GOING_BACK, NO_DESTINATION} GatheringPointTravel;
typedef struct Point{
float x;
float y;
} Point;
__host__ Point randPoint(float DIM){
Point point;
point.x = floatRand(0.0, DIM);
point.y = floatRand(0.0, DIM);
return point;
}
__device__ Point cudaRandPoint(float DIM, curandState_t* state_ptr){
Point point;
point.x = cudaFloatRand(0.0, DIM, state_ptr);
point.y = cudaFloatRand(0.0, DIM, state_ptr);
return point;
}
__host__ __device__ float distance(Point p1, Point p2){
float dx = abs(p1.x - p2.x);
float dy = abs(p1.y - p2.y);
return sqrt(dx * dx + dy * dy);
}
typedef struct Person{
Point location;
Point home;
Health health;
GatheringPointTravel travel;
char quarantined; // SICK people are totaly quarantined, the rest is partialy quarantined
int time_sick;
Point destination;
char lawful;
} Person;
typedef struct PersonInfo{
Point location;
Health health;
} PersonInfo;
/* =================== DEVICE CODE =====================================================================*/
__device__ void updateQuarantine(SimulationOptions settings, Person* person_ptr, int time){
if(!(person_ptr->lawful))
return;
if(settings.quarantine_all_time && settings.quarantine_all_time < time)
person_ptr->quarantined = TRUE;
else if(settings.quarantine_sick_time && settings.quarantine_sick_time < time){
if(person_ptr->health == SICK){
person_ptr->quarantined = TRUE;
person_ptr->travel = NO_DESTINATION;
}
else
person_ptr->quarantined = FALSE;
}
}
__device__ void migrate(
SimulationOptions settings,
Person* person_ptr,
curandState_t* state_ptr,
Point* gathering_points
){
float angle, dy, dx;
float destination_r = settings.velocity;
if(person_ptr->quarantined){
if(person_ptr->health == SICK)
return;
if(person_ptr->travel == GOING_TO && distance(person_ptr->location, person_ptr->destination) < destination_r){
person_ptr->destination = person_ptr->home;
person_ptr->travel = GOING_BACK;
}
if(person_ptr->travel == GOING_BACK && distance(person_ptr->location, person_ptr->destination) < destination_r){
person_ptr->travel = NO_DESTINATION;
}
if(person_ptr->travel == NO_DESTINATION){
if(!settings.gathering_points_n)
return;
if(!cudaRoll(settings.gathering_point_p, state_ptr))
return;
person_ptr->destination = gathering_points[cudaIntRand(0, settings.gathering_points_n - 1, state_ptr)];
person_ptr->travel = GOING_TO;
}
}
else if(distance(person_ptr->location, person_ptr->destination) < destination_r){
person_ptr->destination = cudaRandPoint(settings.DIM, state_ptr);
}
dy = person_ptr->destination.y - person_ptr->location.y;
dx = person_ptr->destination.x - person_ptr->location.x;
angle = atan2(dy, dx);
person_ptr->location.x = min(max(person_ptr->location.x + cos(angle) * settings.velocity, 0.0), settings.DIM);
person_ptr->location.y = min(max(person_ptr->location.y + sin(angle) * settings.velocity, 0.0), settings.DIM);
}
__device__ void developDisease(SimulationOptions settings, Person* person_ptr){
if(person_ptr->health == CARRIER || person_ptr->health == SICK)
person_ptr->time_sick += 1;
if(person_ptr->time_sick > settings.immune_time)
person_ptr->health = IMMUNE;
else if(person_ptr->time_sick > settings.sympthoms_time)
person_ptr->health = SICK;
}
// there may be races, but it doesn't matter (I think?)
__device__ void infect(
SimulationOptions settings,
Person* population,
int me_idx,
curandState_t* curand_state_ptr
){
Person* me_ptr = &population[me_idx];
Person* person_ptr;
int i;
if((me_ptr->health == CARRIER || me_ptr->health == SICK) && !(me_ptr->quarantined && me_ptr->health == SICK)){
for(i = 0; i < settings.N; i++){
person_ptr = &population[i];
if(i == me_idx) continue;
if(person_ptr->quarantined && person_ptr->travel == NO_DESTINATION) continue;
if(person_ptr->health == CARRIER || person_ptr->health == SICK) continue;
if(distance(me_ptr->location, person_ptr->location) > settings.infection_r) continue;
if(cudaRoll(settings.infection_p, curand_state_ptr))
person_ptr->health = CARRIER;
}
}
}
__global__ void simulate(
SimulationOptions settings,
Person* population,
curandState_t* curand_states,
int time,
Point* gathering_points,
int buffor_index,
PersonInfo* population_info
){
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int i;
Person* person_ptr;
curandState_t my_curand_state = curand_states[tid];
curandInit(&my_curand_state, tid);
// develop disease
i = tid;
while(i < settings.N){
person_ptr = &population[i];
developDisease(settings, person_ptr);
i += gridDim.x * blockDim.x;
}
// update population quarantine_all_time
i = tid;
while(i < settings.N){
person_ptr = &population[i];
updateQuarantine(settings, person_ptr, time);
i += gridDim.x * blockDim.x;
}
// migration of population
i = tid;
while(i < settings.N){
person_ptr = &population[i];
migrate(settings, person_ptr, &my_curand_state, gathering_points);
i += gridDim.x * blockDim.x;
}
// spread of disease
i = tid;
while(i < settings.N){
infect(settings, population, i, &my_curand_state);
i += gridDim.x * blockDim.x;
}
// save to buffor
i = tid;
while(i < settings.N){
population_info[settings.N * buffor_index + i].location = population[i].location;
population_info[settings.N * buffor_index + i].health = population[i].health;
i += gridDim.x * blockDim.x;
}
}
/* =================== HOST =====================================================================*/
int main(int argc, char** argv){
SimulationOptions settings;
int i, j, buffors_simulated;
FILE* file;
char save_output;
Person* population;
Person* dev_population;
curandState_t* curand_states;
Point* gathering_points;
Point* dev_gathering_points;
PersonInfo* population_info;
PersonInfo* dev_population_info;
settings.N = 10000;
settings.DIM = 100;
settings.simulation_time = 500;
settings.velocity = 1.0;
settings.infection_p = 0.33;
settings.infection_r = 3.0;
settings.immune_time = 100;
settings.sympthoms_time = 10;
settings.blocks = 128;
settings.threads_per_block = 128;
settings.output = "output.sim";
settings.quarantine_all_time = 0;
settings.quarantine_sick_time = 0;
settings.lawful_p = 1.0;
settings.gathering_points_n = 0;
settings.gathering_point_p = 0.05;
settings.buffor_size = 1;
//read commandline args
i = 1;
while(i < argc - 1){
if(strcmp(argv[i], "--N") == STR_EQ || strcmp(argv[i], "-N") == STR_EQ){
settings.N = atoi(argv[++i]);
if(settings.N < 1) return 1;
}
else if(strcmp(argv[i], "-=DIM") == STR_EQ || strcmp(argv[i], "-DIM") == STR_EQ){
settings.DIM = atof(argv[++i]);
if(settings.DIM <= 0.0) return 1;
}
else if(strcmp(argv[i], "--simulation_n") == STR_EQ || strcmp(argv[i], "-simn") == STR_EQ){
settings.simulation_time = atoi(argv[++i]);
if(settings.simulation_time < 1) return 1;
}
else if(strcmp(argv[i], "--velocity") == STR_EQ || strcmp(argv[i], "-v") == STR_EQ){
settings.velocity = atof(argv[++i]);
if(settings.velocity < 0) return 1;
}
else if(strcmp(argv[i], "--infection_p") == STR_EQ || strcmp(argv[i], "-infp") == STR_EQ){
settings.infection_p = atof(argv[++i]);
if(settings.infection_p <= 0.0) return 1;
}
else if(strcmp(argv[i], "--infection_r") == STR_EQ || strcmp(argv[i], "-infr") == STR_EQ){
settings.infection_r = atof(argv[++i]);
if(settings.infection_r <= 0.0) return 1;
}
else if(strcmp(argv[i], "--immune_time") == STR_EQ || strcmp(argv[i], "-immt") == STR_EQ){
settings.immune_time = atoi(argv[++i]);
if(settings.immune_time < 0) return 1;
}
else if(strcmp(argv[i], "--sympthoms_time") == STR_EQ || strcmp(argv[i], "-symt") == STR_EQ){
settings.sympthoms_time = atoi(argv[++i]);
if(settings.sympthoms_time < 0) return 1;
}
else if(strcmp(argv[i], "--blocks") == STR_EQ || strcmp(argv[i], "-b") == STR_EQ){
settings.blocks = atoi(argv[++i]);
if(settings.blocks < 1) return 1;
}
else if(strcmp(argv[i], "--threads_per_block") == STR_EQ || strcmp(argv[i], "-tpb") == STR_EQ){
settings.threads_per_block = atoi(argv[++i]);
if(settings.threads_per_block < 1) return 1;
}
else if(strcmp(argv[i], "--output") == STR_EQ || strcmp(argv[i], "-o") == STR_EQ){
settings.output = argv[++i];
if(!settings.output) return 1;
}
else if(strcmp(argv[i], "--quarantine_all_time") == STR_EQ || strcmp(argv[i], "-qat") == STR_EQ){
settings.quarantine_all_time = atoi(argv[++i]);
if(settings.quarantine_all_time < 0) return 1;
}
else if(strcmp(argv[i], "--quarantine_sick_time") == STR_EQ || strcmp(argv[i], "-qst") == STR_EQ){
settings.quarantine_sick_time = atoi(argv[++i]);
if(settings.quarantine_sick_time < 0) return 1;
}
else if(strcmp(argv[i], "--lawful_p") == STR_EQ || strcmp(argv[i], "-lawp") == STR_EQ){
settings.lawful_p = atof(argv[++i]);
if(settings.lawful_p < 0.0) return 1;
}
else if(strcmp(argv[i], "--gathering_points_n") == STR_EQ || strcmp(argv[i], "-gn") == STR_EQ){
settings.gathering_points_n = atoi(argv[++i]);
if(settings.gathering_points_n < 0) return 1;
}
else if(strcmp(argv[i], "--gathering_point_p") == STR_EQ || strcmp(argv[i], "-gp") == STR_EQ){
settings.gathering_point_p = atof(argv[++i]);
if(settings.gathering_point_p < 0.0) return 1;
}
else if(strcmp(argv[i], "--buffor_size") == STR_EQ || strcmp(argv[i], "-buff") == STR_EQ){
settings.buffor_size = atoi(argv[++i]);
if(settings.buffor_size < 1) return 1;
}
i++;
}
if(strcmp(settings.output, "none") == STR_EQ)
save_output = FALSE;
else
save_output = TRUE;
try{
population_info = new PersonInfo[settings.N * settings.buffor_size];
population = new Person[settings.N];
}
catch(const std::bad_alloc& e){
printf("Insufficent memory on host\n");
return 1;
}
srand((unsigned int)time(NULL));
for(i = 0; i < settings.N; i++){
population[i].location.x = floatRand(0.0, settings.DIM);
population[i].location.y = floatRand(0.0, settings.DIM);
population[i].home = population[i].location;
population[i].destination.x = floatRand(0.0, settings.DIM);
population[i].destination.y = floatRand(0.0, settings.DIM);
population[i].health = HEALTHY;
population[i].quarantined = FALSE;
population[i].time_sick = 0;
population[i].travel = NO_DESTINATION;
if(roll(settings.lawful_p))
population[i].lawful = TRUE;
else
population[i].lawful = FALSE;
}
gathering_points = new Point[settings.gathering_points_n];
for(i = 0; i < settings.gathering_points_n; i++){
gathering_points[i].x = floatRand(0.0, settings.DIM);
gathering_points[i].y = floatRand(0.0, settings.DIM);
}
//patient zero
population[0].health = CARRIER;
HANDLE_ERROR( cudaMalloc((void**)&dev_population, sizeof(Person) * settings.N) );
HANDLE_ERROR( cudaMalloc((void**)&curand_states, sizeof(curandState_t) * settings.blocks * settings.threads_per_block) );
HANDLE_ERROR( cudaMalloc((void**)&dev_gathering_points, sizeof(Point) * settings.gathering_points_n) );
HANDLE_ERROR( cudaMalloc((void**)&dev_population_info, sizeof(PersonInfo) * settings.N * settings.buffor_size) );
HANDLE_ERROR( cudaMemcpy(dev_population, population, sizeof(Person) * settings.N, cudaMemcpyHostToDevice) );
HANDLE_ERROR( cudaMemcpy(dev_gathering_points, gathering_points, sizeof(Point) * settings.gathering_points_n, cudaMemcpyHostToDevice) );
if(save_output){
file = fopen(settings.output, "w");
fprintf(file, "%d %f %d %d\n", settings.N, settings.DIM, settings.simulation_time, settings.gathering_points_n);
for(i = 0; i < settings.gathering_points_n; i++)
fprintf(file, "%f %f\n", gathering_points[i].x, gathering_points[i].y);
}
// for(i = 0; i < settings.simulation_time; i++){
// printf("==========SIM%d==========\n", i);
// simulate<<<settings.blocks, settings.threads_per_block>>>(settings, dev_population, curand_states, i, dev_gathering_points);
// cudaDeviceSynchronize();
// HANDLE_ERROR( cudaMemcpy(population, dev_population, sizeof(Person) * settings.N, cudaMemcpyDeviceToHost) );
// if(save_output){
// for(j = 0; j < settings.N; j++){
// fprintf(file, "%f %f %d\n", population[j].location.x, population[j].location.y, population[j].health);
// }
// }
// }
i = 0;
while(i < settings.simulation_time){
for(j = 0; j < settings.buffor_size; j++){
printf("==========SIM%d==========\n", i);
simulate<<<settings.blocks, settings.threads_per_block>>>(
settings, dev_population, curand_states, i, dev_gathering_points, j, dev_population_info
);
cudaDeviceSynchronize();
buffors_simulated = j + 1;
i++;
if(i >= settings.simulation_time)
break;
}
printf("Coping buffor from GPU to host...\n");
HANDLE_ERROR( cudaMemcpy(
population_info, dev_population_info, sizeof(PersonInfo) * settings.N * settings.buffor_size, cudaMemcpyDeviceToHost
) );
if(save_output){
for(j = 0; j < settings.N * buffors_simulated; j++){
fprintf(file, "%f %f %d\n", population_info[j].location.x, population_info[j].location.y, population_info[j].health);
}
}
}
if(save_output)
fclose(file);
cudaFree(curand_states);
cudaFree(dev_population);
cudaFree(dev_gathering_points);
cudaFree(dev_population_info);
delete[] population;
delete[] gathering_points;
delete[] population_info;
return 0;
} | .file "tmpxft_00059183_00000000-6_pandemia.cudafe1.cpp"
.text
#APP
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "%s in %s at line %d\n"
#NO_APP
.text
.type _ZL11HandleError9cudaErrorPKci, @function
_ZL11HandleError9cudaErrorPKci:
.LFB2285:
.cfi_startproc
testl %edi, %edi
jne .L6
ret
.L6:
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset 6, -16
pushq %rbx
.cfi_def_cfa_offset 24
.cfi_offset 3, -24
subq $8, %rsp
.cfi_def_cfa_offset 32
movq %rsi, %rbx
movl %edx, %ebp
call cudaGetErrorString@PLT
movq %rax, %rdx
movl %ebp, %r8d
movq %rbx, %rcx
leaq .LC0(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $1, %edi
call exit@PLT
.cfi_endproc
.LFE2285:
.size _ZL11HandleError9cudaErrorPKci, .-_ZL11HandleError9cudaErrorPKci
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2302:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2302:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z10curandInitP17curandStateXORWOWi
.type _Z10curandInitP17curandStateXORWOWi, @function
_Z10curandInitP17curandStateXORWOWi:
.LFB2286:
.cfi_startproc
endbr64
pushq %rax
.cfi_def_cfa_offset 16
popq %rax
.cfi_def_cfa_offset 8
subq $24, %rsp
.cfi_def_cfa_offset 32
movl $1, 12(%rsp)
movl 12(%rsp), %edi
call exit@PLT
.cfi_endproc
.LFE2286:
.size _Z10curandInitP17curandStateXORWOWi, .-_Z10curandInitP17curandStateXORWOWi
.globl _Z13cudaFloatRandffP17curandStateXORWOW
.type _Z13cudaFloatRandffP17curandStateXORWOW, @function
_Z13cudaFloatRandffP17curandStateXORWOW:
.LFB2287:
.cfi_startproc
endbr64
pushq %rax
.cfi_def_cfa_offset 16
popq %rax
.cfi_def_cfa_offset 8
subq $24, %rsp
.cfi_def_cfa_offset 32
movl $1, 12(%rsp)
movl 12(%rsp), %edi
call exit@PLT
.cfi_endproc
.LFE2287:
.size _Z13cudaFloatRandffP17curandStateXORWOW, .-_Z13cudaFloatRandffP17curandStateXORWOW
.globl _Z11cudaIntRandiiP17curandStateXORWOW
.type _Z11cudaIntRandiiP17curandStateXORWOW, @function
_Z11cudaIntRandiiP17curandStateXORWOW:
.LFB2288:
.cfi_startproc
endbr64
pushq %rax
.cfi_def_cfa_offset 16
popq %rax
.cfi_def_cfa_offset 8
subq $24, %rsp
.cfi_def_cfa_offset 32
movl $1, 12(%rsp)
movl 12(%rsp), %edi
call exit@PLT
.cfi_endproc
.LFE2288:
.size _Z11cudaIntRandiiP17curandStateXORWOW, .-_Z11cudaIntRandiiP17curandStateXORWOW
.globl _Z9floatRandff
.type _Z9floatRandff, @function
_Z9floatRandff:
.LFB2289:
.cfi_startproc
endbr64
subq $24, %rsp
.cfi_def_cfa_offset 32
movss %xmm0, 8(%rsp)
movss %xmm1, 12(%rsp)
call rand@PLT
pxor %xmm0, %xmm0
cvtsi2ssl %eax, %xmm0
mulss .LC1(%rip), %xmm0
movss 12(%rsp), %xmm1
movss 8(%rsp), %xmm2
subss %xmm2, %xmm1
mulss %xmm1, %xmm0
addss %xmm2, %xmm0
addq $24, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2289:
.size _Z9floatRandff, .-_Z9floatRandff
.globl _Z4rollf
.type _Z4rollf, @function
_Z4rollf:
.LFB2290:
.cfi_startproc
endbr64
subq $24, %rsp
.cfi_def_cfa_offset 32
movss %xmm0, 12(%rsp)
movss .LC2(%rip), %xmm1
pxor %xmm0, %xmm0
call _Z9floatRandff
movss 12(%rsp), %xmm2
comiss %xmm0, %xmm2
seta %al
addq $24, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2290:
.size _Z4rollf, .-_Z4rollf
.globl _Z8cudaRollfP17curandStateXORWOW
.type _Z8cudaRollfP17curandStateXORWOW, @function
_Z8cudaRollfP17curandStateXORWOW:
.LFB2291:
.cfi_startproc
endbr64
pushq %rax
.cfi_def_cfa_offset 16
popq %rax
.cfi_def_cfa_offset 8
subq $24, %rsp
.cfi_def_cfa_offset 32
movl $1, 12(%rsp)
movl 12(%rsp), %edi
call exit@PLT
.cfi_endproc
.LFE2291:
.size _Z8cudaRollfP17curandStateXORWOW, .-_Z8cudaRollfP17curandStateXORWOW
.globl _Z9randPointf
.type _Z9randPointf, @function
_Z9randPointf:
.LFB2292:
.cfi_startproc
endbr64
subq $24, %rsp
.cfi_def_cfa_offset 32
movss %xmm0, 12(%rsp)
movaps %xmm0, %xmm1
pxor %xmm0, %xmm0
call _Z9floatRandff
movss %xmm0, 8(%rsp)
movss 12(%rsp), %xmm1
pxor %xmm0, %xmm0
call _Z9floatRandff
movss 8(%rsp), %xmm1
movd %xmm0, %edx
salq $32, %rdx
movd %xmm1, %eax
orq %rdx, %rax
movq %rax, %xmm0
addq $24, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2292:
.size _Z9randPointf, .-_Z9randPointf
.globl _Z13cudaRandPointfP17curandStateXORWOW
.type _Z13cudaRandPointfP17curandStateXORWOW, @function
_Z13cudaRandPointfP17curandStateXORWOW:
.LFB2293:
.cfi_startproc
endbr64
pushq %rax
.cfi_def_cfa_offset 16
popq %rax
.cfi_def_cfa_offset 8
subq $24, %rsp
.cfi_def_cfa_offset 32
movl $1, 12(%rsp)
movl 12(%rsp), %edi
call exit@PLT
.cfi_endproc
.LFE2293:
.size _Z13cudaRandPointfP17curandStateXORWOW, .-_Z13cudaRandPointfP17curandStateXORWOW
.globl _Z8distance5PointS_
.type _Z8distance5PointS_, @function
_Z8distance5PointS_:
.LFB2294:
.cfi_startproc
endbr64
movq %xmm0, %rax
movq %xmm1, %rdx
subss %xmm1, %xmm0
movq %rax, %xmm3
shufps $85, %xmm3, %xmm3
movdqa %xmm3, %xmm1
movq %rdx, %xmm4
shufps $85, %xmm4, %xmm4
subss %xmm4, %xmm1
mulss %xmm0, %xmm0
mulss %xmm1, %xmm1
addss %xmm1, %xmm0
sqrtss %xmm0, %xmm0
ret
.cfi_endproc
.LFE2294:
.size _Z8distance5PointS_, .-_Z8distance5PointS_
.globl _Z16updateQuarantine17SimulationOptionsP6Personi
.type _Z16updateQuarantine17SimulationOptionsP6Personi, @function
_Z16updateQuarantine17SimulationOptionsP6Personi:
.LFB2295:
.cfi_startproc
endbr64
pushq %rax
.cfi_def_cfa_offset 16
popq %rax
.cfi_def_cfa_offset 8
subq $24, %rsp
.cfi_def_cfa_offset 32
movl $1, 12(%rsp)
movl 12(%rsp), %edi
call exit@PLT
.cfi_endproc
.LFE2295:
.size _Z16updateQuarantine17SimulationOptionsP6Personi, .-_Z16updateQuarantine17SimulationOptionsP6Personi
.globl _Z7migrate17SimulationOptionsP6PersonP17curandStateXORWOWP5Point
.type _Z7migrate17SimulationOptionsP6PersonP17curandStateXORWOWP5Point, @function
_Z7migrate17SimulationOptionsP6PersonP17curandStateXORWOWP5Point:
.LFB2296:
.cfi_startproc
endbr64
pushq %rax
.cfi_def_cfa_offset 16
popq %rax
.cfi_def_cfa_offset 8
subq $24, %rsp
.cfi_def_cfa_offset 32
movl $1, 12(%rsp)
movl 12(%rsp), %edi
call exit@PLT
.cfi_endproc
.LFE2296:
.size _Z7migrate17SimulationOptionsP6PersonP17curandStateXORWOWP5Point, .-_Z7migrate17SimulationOptionsP6PersonP17curandStateXORWOWP5Point
.globl _Z14developDisease17SimulationOptionsP6Person
.type _Z14developDisease17SimulationOptionsP6Person, @function
_Z14developDisease17SimulationOptionsP6Person:
.LFB2297:
.cfi_startproc
endbr64
pushq %rax
.cfi_def_cfa_offset 16
popq %rax
.cfi_def_cfa_offset 8
subq $24, %rsp
.cfi_def_cfa_offset 32
movl $1, 12(%rsp)
movl 12(%rsp), %edi
call exit@PLT
.cfi_endproc
.LFE2297:
.size _Z14developDisease17SimulationOptionsP6Person, .-_Z14developDisease17SimulationOptionsP6Person
.globl _Z6infect17SimulationOptionsP6PersoniP17curandStateXORWOW
.type _Z6infect17SimulationOptionsP6PersoniP17curandStateXORWOW, @function
_Z6infect17SimulationOptionsP6PersoniP17curandStateXORWOW:
.LFB2298:
.cfi_startproc
endbr64
pushq %rax
.cfi_def_cfa_offset 16
popq %rax
.cfi_def_cfa_offset 8
subq $24, %rsp
.cfi_def_cfa_offset 32
movl $1, 12(%rsp)
movl 12(%rsp), %edi
call exit@PLT
.cfi_endproc
.LFE2298:
.size _Z6infect17SimulationOptionsP6PersoniP17curandStateXORWOW, .-_Z6infect17SimulationOptionsP6PersoniP17curandStateXORWOW
.globl _Z94__device_stub__Z8simulate17SimulationOptionsP6PersonP17curandStateXORWOWiP5PointiP10PersonInfoR17SimulationOptionsP6PersonP17curandStateXORWOWiP5PointiP10PersonInfo
.type _Z94__device_stub__Z8simulate17SimulationOptionsP6PersonP17curandStateXORWOWiP5PointiP10PersonInfoR17SimulationOptionsP6PersonP17curandStateXORWOWiP5PointiP10PersonInfo, @function
_Z94__device_stub__Z8simulate17SimulationOptionsP6PersonP17curandStateXORWOWiP5PointiP10PersonInfoR17SimulationOptionsP6PersonP17curandStateXORWOWiP5PointiP10PersonInfo:
.LFB2324:
.cfi_startproc
endbr64
subq $184, %rsp
.cfi_def_cfa_offset 192
movq %rsi, 40(%rsp)
movq %rdx, 32(%rsp)
movl %ecx, 28(%rsp)
movq %r8, 16(%rsp)
movl %r9d, 24(%rsp)
movq 192(%rsp), %rax
movq %rax, 8(%rsp)
movq %fs:40, %rax
movq %rax, 168(%rsp)
xorl %eax, %eax
movq %rdi, 112(%rsp)
leaq 40(%rsp), %rax
movq %rax, 120(%rsp)
leaq 32(%rsp), %rax
movq %rax, 128(%rsp)
leaq 28(%rsp), %rax
movq %rax, 136(%rsp)
leaq 16(%rsp), %rax
movq %rax, 144(%rsp)
leaq 24(%rsp), %rax
movq %rax, 152(%rsp)
leaq 8(%rsp), %rax
movq %rax, 160(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
movl $1, 72(%rsp)
movl $1, 76(%rsp)
movl $1, 80(%rsp)
movl $1, 84(%rsp)
leaq 56(%rsp), %rcx
leaq 48(%rsp), %rdx
leaq 76(%rsp), %rsi
leaq 64(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L38
.L34:
movq 168(%rsp), %rax
subq %fs:40, %rax
jne .L39
addq $184, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L38:
.cfi_restore_state
pushq 56(%rsp)
.cfi_def_cfa_offset 200
pushq 56(%rsp)
.cfi_def_cfa_offset 208
leaq 128(%rsp), %r9
movq 92(%rsp), %rcx
movl 100(%rsp), %r8d
movq 80(%rsp), %rsi
movl 88(%rsp), %edx
leaq _Z8simulate17SimulationOptionsP6PersonP17curandStateXORWOWiP5PointiP10PersonInfo(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 192
jmp .L34
.L39:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2324:
.size _Z94__device_stub__Z8simulate17SimulationOptionsP6PersonP17curandStateXORWOWiP5PointiP10PersonInfoR17SimulationOptionsP6PersonP17curandStateXORWOWiP5PointiP10PersonInfo, .-_Z94__device_stub__Z8simulate17SimulationOptionsP6PersonP17curandStateXORWOWiP5PointiP10PersonInfoR17SimulationOptionsP6PersonP17curandStateXORWOWiP5PointiP10PersonInfo
.globl _Z8simulate17SimulationOptionsP6PersonP17curandStateXORWOWiP5PointiP10PersonInfo
.type _Z8simulate17SimulationOptionsP6PersonP17curandStateXORWOWiP5PointiP10PersonInfo, @function
_Z8simulate17SimulationOptionsP6PersonP17curandStateXORWOWiP5PointiP10PersonInfo:
.LFB2325:
.cfi_startproc
endbr64
subq $16, %rsp
.cfi_def_cfa_offset 24
pushq %r9
.cfi_def_cfa_offset 32
movl %r8d, %r9d
movq %rcx, %r8
movl %edx, %ecx
movq %rsi, %rdx
movq %rdi, %rsi
leaq 32(%rsp), %rdi
call _Z94__device_stub__Z8simulate17SimulationOptionsP6PersonP17curandStateXORWOWiP5PointiP10PersonInfoR17SimulationOptionsP6PersonP17curandStateXORWOWiP5PointiP10PersonInfo
addq $24, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2325:
.size _Z8simulate17SimulationOptionsP6PersonP17curandStateXORWOWiP5PointiP10PersonInfo, .-_Z8simulate17SimulationOptionsP6PersonP17curandStateXORWOWiP5PointiP10PersonInfo
.section .rodata.str1.1
.LC5:
.string "output.sim"
.LC9:
.string "--N"
.LC10:
.string "-N"
.LC11:
.string "-=DIM"
.LC12:
.string "-DIM"
.LC13:
.string "--simulation_n"
.LC14:
.string "-simn"
.LC15:
.string "--velocity"
.LC16:
.string "-v"
.LC17:
.string "--infection_p"
.LC18:
.string "-infp"
.LC19:
.string "--infection_r"
.LC20:
.string "-infr"
.LC21:
.string "--immune_time"
.LC22:
.string "-immt"
.LC23:
.string "--sympthoms_time"
.LC24:
.string "-symt"
.LC25:
.string "--blocks"
.LC26:
.string "-b"
.LC27:
.string "--threads_per_block"
.LC28:
.string "-tpb"
.LC29:
.string "--output"
.LC30:
.string "-o"
.LC31:
.string "--quarantine_all_time"
.LC32:
.string "-qat"
.LC33:
.string "--quarantine_sick_time"
.LC34:
.string "-qst"
.LC35:
.string "--lawful_p"
.LC36:
.string "-lawp"
.LC37:
.string "--gathering_points_n"
.LC38:
.string "-gn"
.LC39:
.string "--gathering_point_p"
.LC40:
.string "-gp"
.LC41:
.string "--buffor_size"
.LC42:
.string "-buff"
.LC43:
.string "none"
.section .rodata.str1.8,"aMS",@progbits,1
.align 8
.LC44:
.string "/home/ubuntu/Datasets/stackv2/train-structured/Qbuscs/Epidemic-simulation-with-CUDA-and-python/master/pandemia.cu"
.section .rodata.str1.1
.LC45:
.string "w"
.LC46:
.string "%d %f %d %d\n"
.LC47:
.string "%f %f\n"
.LC48:
.string "==========SIM%d==========\n"
.section .rodata.str1.8
.align 8
.LC49:
.string "Coping buffor from GPU to host...\n"
.section .rodata.str1.1
.LC50:
.string "%f %f %d\n"
.LC51:
.string "Insufficent memory on host\n"
.text
.globl main
.type main, @function
main:
.LFB2299:
.cfi_startproc
.cfi_personality 0x9b,DW.ref.__gxx_personality_v0
.cfi_lsda 0x1b,.LLSDA2299
endbr64
pushq %r15
.cfi_def_cfa_offset 16
.cfi_offset 15, -16
pushq %r14
.cfi_def_cfa_offset 24
.cfi_offset 14, -24
pushq %r13
.cfi_def_cfa_offset 32
.cfi_offset 13, -32
pushq %r12
.cfi_def_cfa_offset 40
.cfi_offset 12, -40
pushq %rbp
.cfi_def_cfa_offset 48
.cfi_offset 6, -48
pushq %rbx
.cfi_def_cfa_offset 56
.cfi_offset 3, -56
subq $280, %rsp
.cfi_def_cfa_offset 336
movq %fs:40, %rax
movq %rax, 264(%rsp)
xorl %eax, %eax
cmpl $2, %edi
jle .L114
movq %rsi, %r13
leal -1(%rdi), %edx
movl $1, %r15d
movl $0, 28(%rsp)
movl $0, 68(%rsp)
movss .LC4(%rip), %xmm4
movss %xmm4, 48(%rsp)
movl $0, 52(%rsp)
movss .LC2(%rip), %xmm3
movss %xmm3, 24(%rsp)
leaq .LC5(%rip), %rax
movq %rax, 32(%rsp)
movl $128, 12(%rsp)
movl $10, 64(%rsp)
movss .LC6(%rip), %xmm5
movss %xmm5, 60(%rsp)
movl $500, %r12d
movss .LC7(%rip), %xmm6
movss %xmm6, 16(%rsp)
movl $10000, 20(%rsp)
movl $1, %ebx
movl $128, 80(%rsp)
movl $100, 56(%rsp)
movss %xmm3, 40(%rsp)
movss .LC8(%rip), %xmm7
movss %xmm7, 44(%rsp)
movl %r15d, 72(%rsp)
movl %edx, %r15d
jmp .L79
.L44:
addl $1, %ebx
movslq %ebx, %rax
movq 0(%r13,%rax,8), %rdi
movl $10, %edx
movl $0, %esi
call __isoc23_strtol@PLT
movl %eax, 20(%rsp)
testl %eax, %eax
jle .L145
.L47:
addl $1, %ebx
cmpl %r15d, %ebx
jge .L146
.L79:
movslq %ebx, %rax
leaq 0(,%rax,8), %r14
movq 0(%r13,%rax,8), %rbp
leaq .LC9(%rip), %rsi
movq %rbp, %rdi
call strcmp@PLT
testl %eax, %eax
je .L44
leaq .LC10(%rip), %rsi
movq %rbp, %rdi
call strcmp@PLT
testl %eax, %eax
je .L44
leaq .LC11(%rip), %rsi
movq %rbp, %rdi
call strcmp@PLT
testl %eax, %eax
je .L48
leaq .LC12(%rip), %rsi
movq %rbp, %rdi
call strcmp@PLT
testl %eax, %eax
jne .L49
.L48:
addl $1, %ebx
movq 8(%r13,%r14), %rdi
movl $0, %esi
call strtod@PLT
pxor %xmm2, %xmm2
cvtsd2ss %xmm0, %xmm2
movss %xmm2, 16(%rsp)
pxor %xmm0, %xmm0
comiss %xmm2, %xmm0
jb .L47
movl $1, %eax
jmp .L42
.L49:
leaq .LC13(%rip), %rsi
movq %rbp, %rdi
call strcmp@PLT
testl %eax, %eax
je .L50
leaq .LC14(%rip), %rsi
movq %rbp, %rdi
call strcmp@PLT
testl %eax, %eax
jne .L51
.L50:
addl $1, %ebx
movq 8(%r13,%r14), %rdi
movl $10, %edx
movl $0, %esi
call __isoc23_strtol@PLT
movl %eax, %r12d
testl %eax, %eax
jg .L47
movl $1, %eax
jmp .L42
.L51:
leaq .LC15(%rip), %rsi
movq %rbp, %rdi
call strcmp@PLT
testl %eax, %eax
je .L52
leaq .LC16(%rip), %rsi
movq %rbp, %rdi
call strcmp@PLT
testl %eax, %eax
jne .L53
.L52:
addl $1, %ebx
movq 8(%r13,%r14), %rdi
movl $0, %esi
call strtod@PLT
pxor %xmm4, %xmm4
cvtsd2ss %xmm0, %xmm4
movss %xmm4, 40(%rsp)
pxor %xmm0, %xmm0
comiss %xmm4, %xmm0
jbe .L47
movl $1, %eax
jmp .L42
.L53:
leaq .LC17(%rip), %rsi
movq %rbp, %rdi
call strcmp@PLT
testl %eax, %eax
je .L54
leaq .LC18(%rip), %rsi
movq %rbp, %rdi
call strcmp@PLT
testl %eax, %eax
jne .L55
.L54:
addl $1, %ebx
movq 8(%r13,%r14), %rdi
movl $0, %esi
call strtod@PLT
pxor %xmm5, %xmm5
cvtsd2ss %xmm0, %xmm5
movss %xmm5, 44(%rsp)
pxor %xmm0, %xmm0
comiss %xmm5, %xmm0
jb .L47
movl $1, %eax
jmp .L42
.L55:
leaq .LC19(%rip), %rsi
movq %rbp, %rdi
call strcmp@PLT
testl %eax, %eax
je .L56
leaq .LC20(%rip), %rsi
movq %rbp, %rdi
call strcmp@PLT
testl %eax, %eax
jne .L57
.L56:
addl $1, %ebx
movq 8(%r13,%r14), %rdi
movl $0, %esi
call strtod@PLT
pxor %xmm6, %xmm6
cvtsd2ss %xmm0, %xmm6
movss %xmm6, 60(%rsp)
pxor %xmm0, %xmm0
comiss %xmm6, %xmm0
jb .L47
movl $1, %eax
jmp .L42
.L57:
leaq .LC21(%rip), %rsi
movq %rbp, %rdi
call strcmp@PLT
testl %eax, %eax
je .L58
leaq .LC22(%rip), %rsi
movq %rbp, %rdi
call strcmp@PLT
testl %eax, %eax
jne .L59
.L58:
addl $1, %ebx
movq 8(%r13,%r14), %rdi
movl $10, %edx
movl $0, %esi
call __isoc23_strtol@PLT
movl %eax, 56(%rsp)
testl %eax, %eax
jns .L47
movl $1, %eax
jmp .L42
.L59:
leaq .LC23(%rip), %rsi
movq %rbp, %rdi
call strcmp@PLT
testl %eax, %eax
je .L60
leaq .LC24(%rip), %rsi
movq %rbp, %rdi
call strcmp@PLT
testl %eax, %eax
jne .L61
.L60:
addl $1, %ebx
movq 8(%r13,%r14), %rdi
movl $10, %edx
movl $0, %esi
call __isoc23_strtol@PLT
movl %eax, 64(%rsp)
testl %eax, %eax
jns .L47
movl $1, %eax
jmp .L42
.L61:
leaq .LC25(%rip), %rsi
movq %rbp, %rdi
call strcmp@PLT
testl %eax, %eax
je .L62
leaq .LC26(%rip), %rsi
movq %rbp, %rdi
call strcmp@PLT
testl %eax, %eax
jne .L63
.L62:
addl $1, %ebx
movq 8(%r13,%r14), %rdi
movl $10, %edx
movl $0, %esi
call __isoc23_strtol@PLT
movl %eax, 12(%rsp)
testl %eax, %eax
jg .L47
movl $1, %eax
jmp .L42
.L63:
leaq .LC27(%rip), %rsi
movq %rbp, %rdi
call strcmp@PLT
testl %eax, %eax
je .L64
leaq .LC28(%rip), %rsi
movq %rbp, %rdi
call strcmp@PLT
testl %eax, %eax
jne .L65
.L64:
addl $1, %ebx
movq 8(%r13,%r14), %rdi
movl $10, %edx
movl $0, %esi
call __isoc23_strtol@PLT
movl %eax, 80(%rsp)
testl %eax, %eax
jg .L47
movl $1, %eax
jmp .L42
.L65:
leaq .LC29(%rip), %rsi
movq %rbp, %rdi
call strcmp@PLT
testl %eax, %eax
je .L66
leaq .LC30(%rip), %rsi
movq %rbp, %rdi
call strcmp@PLT
testl %eax, %eax
jne .L67
.L66:
addl $1, %ebx
movq 8(%r13,%r14), %rax
movq %rax, 32(%rsp)
testq %rax, %rax
jne .L47
movl $1, %eax
jmp .L42
.L67:
leaq .LC31(%rip), %rsi
movq %rbp, %rdi
call strcmp@PLT
testl %eax, %eax
je .L68
leaq .LC32(%rip), %rsi
movq %rbp, %rdi
call strcmp@PLT
testl %eax, %eax
jne .L69
.L68:
addl $1, %ebx
movq 8(%r13,%r14), %rdi
movl $10, %edx
movl $0, %esi
call __isoc23_strtol@PLT
movl %eax, 52(%rsp)
testl %eax, %eax
jns .L47
movl $1, %eax
jmp .L42
.L69:
leaq .LC33(%rip), %rsi
movq %rbp, %rdi
call strcmp@PLT
testl %eax, %eax
je .L70
leaq .LC34(%rip), %rsi
movq %rbp, %rdi
call strcmp@PLT
testl %eax, %eax
jne .L71
.L70:
addl $1, %ebx
movq 8(%r13,%r14), %rdi
movl $10, %edx
movl $0, %esi
call __isoc23_strtol@PLT
movl %eax, 68(%rsp)
testl %eax, %eax
jns .L47
movl $1, %eax
jmp .L42
.L71:
leaq .LC35(%rip), %rsi
movq %rbp, %rdi
call strcmp@PLT
testl %eax, %eax
je .L72
leaq .LC36(%rip), %rsi
movq %rbp, %rdi
call strcmp@PLT
testl %eax, %eax
jne .L73
.L72:
addl $1, %ebx
movq 8(%r13,%r14), %rdi
movl $0, %esi
call strtod@PLT
pxor %xmm7, %xmm7
cvtsd2ss %xmm0, %xmm7
movss %xmm7, 24(%rsp)
pxor %xmm0, %xmm0
comiss %xmm7, %xmm0
jbe .L47
movl $1, %eax
jmp .L42
.L73:
leaq .LC37(%rip), %rsi
movq %rbp, %rdi
call strcmp@PLT
testl %eax, %eax
je .L74
leaq .LC38(%rip), %rsi
movq %rbp, %rdi
call strcmp@PLT
testl %eax, %eax
jne .L75
.L74:
addl $1, %ebx
movq 8(%r13,%r14), %rdi
movl $10, %edx
movl $0, %esi
call __isoc23_strtol@PLT
movl %eax, 28(%rsp)
testl %eax, %eax
jns .L47
movl $1, %eax
jmp .L42
.L75:
leaq .LC39(%rip), %rsi
movq %rbp, %rdi
call strcmp@PLT
testl %eax, %eax
je .L76
leaq .LC40(%rip), %rsi
movq %rbp, %rdi
call strcmp@PLT
testl %eax, %eax
jne .L77
.L76:
addl $1, %ebx
movq 8(%r13,%r14), %rdi
movl $0, %esi
call strtod@PLT
pxor %xmm6, %xmm6
cvtsd2ss %xmm0, %xmm6
movss %xmm6, 48(%rsp)
pxor %xmm0, %xmm0
comiss %xmm6, %xmm0
jbe .L47
movl $1, %eax
jmp .L42
.L77:
leaq .LC41(%rip), %rsi
movq %rbp, %rdi
call strcmp@PLT
testl %eax, %eax
je .L78
leaq .LC42(%rip), %rsi
movq %rbp, %rdi
call strcmp@PLT
testl %eax, %eax
jne .L47
.L78:
addl $1, %ebx
movq 8(%r13,%r14), %rdi
movl $10, %edx
movl $0, %esi
call __isoc23_strtol@PLT
movl %eax, 72(%rsp)
testl %eax, %eax
jg .L47
movl $1, %eax
jmp .L42
.L146:
movl 72(%rsp), %r15d
leaq .LC43(%rip), %rsi
movq 32(%rsp), %rdi
call strcmp@PLT
testl %eax, %eax
setne 87(%rsp)
movl 20(%rsp), %eax
imull %r15d, %eax
cltq
movabsq $768614336404564650, %rdx
cmpq %rax, %rdx
jb .L147
.L43:
imulq $12, %rax, %rdi
.LEHB0:
call _Znam@PLT
jmp .L148
.L114:
movl $1, %r15d
movl $0, 28(%rsp)
movl $0, 68(%rsp)
movss .LC4(%rip), %xmm6
movss %xmm6, 48(%rsp)
movl $0, 52(%rsp)
movss .LC2(%rip), %xmm7
movss %xmm7, 24(%rsp)
leaq .LC5(%rip), %rax
movq %rax, 32(%rsp)
movl $128, 12(%rsp)
movl $10, 64(%rsp)
movss .LC6(%rip), %xmm6
movss %xmm6, 60(%rsp)
movl $500, %r12d
movss .LC7(%rip), %xmm6
movss %xmm6, 16(%rsp)
movl $10000, 20(%rsp)
movl $128, 80(%rsp)
movl $100, 56(%rsp)
movss %xmm7, 40(%rsp)
movss .LC8(%rip), %xmm7
movss %xmm7, 44(%rsp)
movb $1, 87(%rsp)
movl $10000, %eax
jmp .L43
.L148:
movq %rax, 72(%rsp)
movslq 20(%rsp), %rax
movq %rax, %rcx
movq %rax, 88(%rsp)
movabsq $209622091746699450, %rax
cmpq %rcx, %rax
jb .L149
imulq $44, 88(%rsp), %r13
movq %r13, %rdi
call _Znam@PLT
jmp .L150
.L147:
movq 264(%rsp), %rax
subq %fs:40, %rax
je .L82
call __stack_chk_fail@PLT
.L82:
call __cxa_throw_bad_array_new_length@PLT
.LEHE0:
.L133:
endbr64
movq %rax, %rdi
cmpq $1, %rdx
je .L107
movq 264(%rsp), %rax
subq %fs:40, %rax
je .L108
call __stack_chk_fail@PLT
.L150:
movq %rax, 96(%rsp)
movl $0, %edi
call time@PLT
movl %eax, %edi
call srand@PLT
cmpl $0, 20(%rsp)
jle .L86
movq 96(%rsp), %rax
movq %rax, %rbx
leaq (%rax,%r13), %rbp
.L87:
movss 16(%rsp), %xmm1
pxor %xmm0, %xmm0
call _Z9floatRandff
movss %xmm0, (%rbx)
movss 16(%rsp), %xmm1
pxor %xmm0, %xmm0
call _Z9floatRandff
movss %xmm0, 4(%rbx)
movq (%rbx), %rax
movq %rax, 8(%rbx)
movss 16(%rsp), %xmm1
pxor %xmm0, %xmm0
call _Z9floatRandff
movss %xmm0, 32(%rbx)
movss 16(%rsp), %xmm1
pxor %xmm0, %xmm0
call _Z9floatRandff
movss %xmm0, 36(%rbx)
movl $0, 16(%rbx)
movb $0, 24(%rbx)
movl $0, 28(%rbx)
movl $2, 20(%rbx)
movss 24(%rsp), %xmm0
call _Z4rollf
testb %al, %al
setne 40(%rbx)
addq $44, %rbx
cmpq %rbp, %rbx
jne .L87
.L86:
movslq 28(%rsp), %rax
movabsq $1152921504606846975, %rdx
cmpq %rax, %rdx
jb .L88
leaq 0(,%rax,8), %rbp
movq %rbp, %rdi
.LEHB1:
call _Znam@PLT
movq %rax, 112(%rsp)
movq %rax, %rbx
leaq 0(%rbp,%rax), %r14
cmpl $0, 28(%rsp)
jle .L90
.L92:
movss 16(%rsp), %xmm1
pxor %xmm0, %xmm0
call _Z9floatRandff
movss %xmm0, (%rbx)
movss 16(%rsp), %xmm1
pxor %xmm0, %xmm0
call _Z9floatRandff
movss %xmm0, 4(%rbx)
addq $8, %rbx
cmpq %r14, %rbx
jne .L92
.L90:
movq 96(%rsp), %r14
movl $1, 16(%r14)
leaq 136(%rsp), %rdi
movq %r13, %rsi
call cudaMalloc@PLT
movl %eax, %edi
movl $430, %edx
leaq .LC44(%rip), %rbx
movq %rbx, %rsi
call _ZL11HandleError9cudaErrorPKci
movslq 12(%rsp), %rsi
movslq 80(%rsp), %rax
imulq %rax, %rsi
imulq $48, %rsi, %rsi
leaq 144(%rsp), %rdi
call cudaMalloc@PLT
movl %eax, %edi
movl $431, %edx
movq %rbx, %rsi
call _ZL11HandleError9cudaErrorPKci
leaq 152(%rsp), %rdi
movq %rbp, %rsi
call cudaMalloc@PLT
movl %eax, %edi
movl $432, %edx
movq %rbx, %rsi
call _ZL11HandleError9cudaErrorPKci
movslq %r15d, %rax
movq 88(%rsp), %rcx
imulq %rcx, %rax
imulq $12, %rax, %rax
movq %rax, 88(%rsp)
leaq 160(%rsp), %rdi
movq %rax, %rsi
call cudaMalloc@PLT
movl %eax, %edi
movl $433, %edx
movq %rbx, %rsi
call _ZL11HandleError9cudaErrorPKci
movl $1, %ecx
movq %r13, %rdx
movq %r14, %rsi
movq 136(%rsp), %rdi
call cudaMemcpy@PLT
movl %eax, %edi
movl $435, %edx
movq %rbx, %rsi
call _ZL11HandleError9cudaErrorPKci
movl $1, %ecx
movq %rbp, %rdx
movq 112(%rsp), %rsi
movq 152(%rsp), %rdi
call cudaMemcpy@PLT
movl %eax, %edi
movl $436, %edx
movq %rbx, %rsi
call _ZL11HandleError9cudaErrorPKci
.LEHE1:
cmpb $0, 87(%rsp)
jne .L151
testl %r12d, %r12d
jle .L105
.L112:
movl $0, %ebp
leaq .LC48(%rip), %r14
movl 104(%rsp), %r13d
jmp .L97
.L149:
movq 264(%rsp), %rax
subq %fs:40, %rax
je .L85
call __stack_chk_fail@PLT
.L85:
.LEHB2:
call __cxa_throw_bad_array_new_length@PLT
.LEHE2:
.L88:
movq 264(%rsp), %rax
subq %fs:40, %rax
je .L91
call __stack_chk_fail@PLT
.L91:
.LEHB3:
call __cxa_throw_bad_array_new_length@PLT
.L151:
leaq .LC45(%rip), %rsi
movq 32(%rsp), %rdi
call fopen@PLT
movq %rax, %r14
movq %rax, 120(%rsp)
pxor %xmm0, %xmm0
cvtss2sd 16(%rsp), %xmm0
movl 28(%rsp), %ebx
movl %ebx, %r9d
movl %r12d, %r8d
movl 20(%rsp), %ecx
leaq .LC46(%rip), %rdx
movl $2, %esi
movq %rax, %rdi
movl $1, %eax
call __fprintf_chk@PLT
testl %ebx, %ebx
jle .L94
movq 112(%rsp), %rax
movq %rax, %rbx
addq %rax, %rbp
leaq .LC47(%rip), %r13
.L95:
pxor %xmm0, %xmm0
cvtss2sd (%rbx), %xmm0
pxor %xmm1, %xmm1
cvtss2sd 4(%rbx), %xmm1
movq %r13, %rdx
movl $2, %esi
movq %r14, %rdi
movl $2, %eax
call __fprintf_chk@PLT
addq $8, %rbx
cmpq %rbx, %rbp
jne .L95
testl %r12d, %r12d
jg .L112
jmp .L96
.L98:
call cudaDeviceSynchronize@PLT
addl $1, %ebx
addl $1, %ebp
cmpl %r12d, %ebp
jge .L99
movl %ebx, %eax
.L104:
cmpl %r15d, %ebx
jge .L152
movl %ebp, %edx
movq %r14, %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl %r13d, 180(%rsp)
movl $1, 184(%rsp)
movl 12(%rsp), %eax
movl %eax, 168(%rsp)
movl $1, 172(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 180(%rsp), %rdx
movl $1, %ecx
movq 168(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
jne .L98
movl 20(%rsp), %eax
movl %eax, 192(%rsp)
movss 16(%rsp), %xmm2
movss %xmm2, 196(%rsp)
movl %r12d, 200(%rsp)
movss 60(%rsp), %xmm3
movss %xmm3, 204(%rsp)
movss 44(%rsp), %xmm4
movss %xmm4, 208(%rsp)
movss 40(%rsp), %xmm5
movss %xmm5, 212(%rsp)
movl 56(%rsp), %eax
movl %eax, 216(%rsp)
movl 64(%rsp), %eax
movl %eax, 220(%rsp)
movl 12(%rsp), %eax
movl %eax, 224(%rsp)
movl %r13d, 228(%rsp)
movq 32(%rsp), %rax
movq %rax, 232(%rsp)
movss 24(%rsp), %xmm6
movss %xmm6, 240(%rsp)
movl 68(%rsp), %eax
movl %eax, 244(%rsp)
movl 52(%rsp), %eax
movl %eax, 248(%rsp)
movl 28(%rsp), %eax
movl %eax, 252(%rsp)
movss 48(%rsp), %xmm7
movss %xmm7, 256(%rsp)
movl %r15d, 260(%rsp)
leaq 192(%rsp), %rdi
subq $8, %rsp
.cfi_def_cfa_offset 344
pushq 168(%rsp)
.cfi_def_cfa_offset 352
movl %ebx, %r9d
movq 168(%rsp), %r8
movl %ebp, %ecx
movq 160(%rsp), %rdx
movq 152(%rsp), %rsi
call _Z94__device_stub__Z8simulate17SimulationOptionsP6PersonP17curandStateXORWOWiP5PointiP10PersonInfoR17SimulationOptionsP6PersonP17curandStateXORWOWiP5PointiP10PersonInfo
addq $16, %rsp
.cfi_def_cfa_offset 336
jmp .L98
.L152:
movl %eax, %r13d
leaq .LC49(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $2, %ecx
movq 88(%rsp), %rdx
movq 160(%rsp), %rsi
movq 72(%rsp), %rdi
call cudaMemcpy@PLT
movl %eax, %edi
movl $469, %edx
leaq .LC44(%rip), %rsi
call _ZL11HandleError9cudaErrorPKci
cmpb $0, 87(%rsp)
jne .L153
.L101:
cmpl %r12d, %ebp
jge .L103
.L97:
movl $0, %ebx
movl %r13d, %eax
movl 80(%rsp), %r13d
jmp .L104
.L153:
movl %r13d, %ebx
jmp .L111
.L132:
movl %ebx, %r13d
jmp .L101
.L103:
cmpb $0, 87(%rsp)
je .L105
.L96:
movq 120(%rsp), %rdi
call fclose@PLT
.L105:
movq 144(%rsp), %rdi
call cudaFree@PLT
movq 136(%rsp), %rdi
call cudaFree@PLT
movq 152(%rsp), %rdi
call cudaFree@PLT
movq 160(%rsp), %rdi
call cudaFree@PLT
movq 96(%rsp), %rdi
call _ZdaPv@PLT
movq 112(%rsp), %rdi
call _ZdaPv@PLT
movq 72(%rsp), %rdi
call _ZdaPv@PLT
movl $0, %eax
.L42:
movq 264(%rsp), %rdx
subq %fs:40, %rdx
jne .L154
addq $280, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %rbp
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r13
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
ret
.L145:
.cfi_restore_state
movl $1, %eax
jmp .L42
.L108:
call _Unwind_Resume@PLT
.LEHE3:
.L107:
call __cxa_begin_catch@PLT
leaq .LC51(%rip), %rsi
movl $2, %edi
movl $0, %eax
.LEHB4:
call __printf_chk@PLT
.LEHE4:
call __cxa_end_catch@PLT
movl $1, %eax
jmp .L42
.L134:
endbr64
movq %rax, %rbx
call __cxa_end_catch@PLT
movq 264(%rsp), %rax
subq %fs:40, %rax
je .L110
call __stack_chk_fail@PLT
.L110:
movq %rbx, %rdi
.LEHB5:
call _Unwind_Resume@PLT
.L99:
leaq .LC49(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $2, %ecx
movq 88(%rsp), %rdx
movq 160(%rsp), %rsi
movq 72(%rsp), %rdi
call cudaMemcpy@PLT
movl %eax, %edi
movl $469, %edx
leaq .LC44(%rip), %rsi
call _ZL11HandleError9cudaErrorPKci
cmpb $0, 87(%rsp)
je .L105
.L111:
movl 20(%rsp), %eax
imull %ebx, %eax
testl %eax, %eax
jle .L132
movq 72(%rsp), %rcx
movq %rcx, %r13
cltq
leaq (%rax,%rax,2), %rax
leaq (%rcx,%rax,4), %rax
movl %ebp, 104(%rsp)
movl %ebx, 108(%rsp)
movq %rax, %rbp
movq 120(%rsp), %rbx
.L102:
movl 8(%r13), %ecx
pxor %xmm0, %xmm0
cvtss2sd 0(%r13), %xmm0
pxor %xmm1, %xmm1
cvtss2sd 4(%r13), %xmm1
leaq .LC50(%rip), %rdx
movl $2, %esi
movq %rbx, %rdi
movl $2, %eax
call __fprintf_chk@PLT
.LEHE5:
addq $12, %r13
cmpq %r13, %rbp
jne .L102
movl 104(%rsp), %ebp
movl 108(%rsp), %ebx
movl %ebx, %r13d
jmp .L101
.L94:
testl %r12d, %r12d
jg .L112
jmp .L96
.L154:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2299:
.globl __gxx_personality_v0
.section .gcc_except_table,"a",@progbits
.align 4
.LLSDA2299:
.byte 0xff
.byte 0x9b
.uleb128 .LLSDATT2299-.LLSDATTD2299
.LLSDATTD2299:
.byte 0x1
.uleb128 .LLSDACSE2299-.LLSDACSB2299
.LLSDACSB2299:
.uleb128 .LEHB0-.LFB2299
.uleb128 .LEHE0-.LEHB0
.uleb128 .L133-.LFB2299
.uleb128 0x1
.uleb128 .LEHB1-.LFB2299
.uleb128 .LEHE1-.LEHB1
.uleb128 0
.uleb128 0
.uleb128 .LEHB2-.LFB2299
.uleb128 .LEHE2-.LEHB2
.uleb128 .L133-.LFB2299
.uleb128 0x1
.uleb128 .LEHB3-.LFB2299
.uleb128 .LEHE3-.LEHB3
.uleb128 0
.uleb128 0
.uleb128 .LEHB4-.LFB2299
.uleb128 .LEHE4-.LEHB4
.uleb128 .L134-.LFB2299
.uleb128 0
.uleb128 .LEHB5-.LFB2299
.uleb128 .LEHE5-.LEHB5
.uleb128 0
.uleb128 0
.LLSDACSE2299:
.byte 0x1
.byte 0
.align 4
.long DW.ref._ZTISt9bad_alloc-.
.LLSDATT2299:
.text
.size main, .-main
.section .rodata.str1.8
.align 8
.LC52:
.string "_Z8simulate17SimulationOptionsP6PersonP17curandStateXORWOWiP5PointiP10PersonInfo"
.section .rodata.str1.1
.LC53:
.string "precalc_xorwow_matrix"
.LC54:
.string "precalc_xorwow_offset_matrix"
.LC55:
.string "mrg32k3aM1"
.LC56:
.string "mrg32k3aM2"
.LC57:
.string "mrg32k3aM1SubSeq"
.LC58:
.string "mrg32k3aM2SubSeq"
.LC59:
.string "mrg32k3aM1Seq"
.LC60:
.string "mrg32k3aM2Seq"
.LC61:
.string "__cr_lgamma_table"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2327:
.cfi_startproc
endbr64
pushq %rbx
.cfi_def_cfa_offset 16
.cfi_offset 3, -16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rbx
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC52(%rip), %rdx
movq %rdx, %rcx
leaq _Z8simulate17SimulationOptionsP6PersonP17curandStateXORWOWiP5PointiP10PersonInfo(%rip), %rsi
movq %rax, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
movl $102400, %r9d
movl $0, %r8d
leaq .LC53(%rip), %rdx
movq %rdx, %rcx
leaq _ZL21precalc_xorwow_matrix(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterVar@PLT
addq $16, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
movl $102400, %r9d
movl $0, %r8d
leaq .LC54(%rip), %rdx
movq %rdx, %rcx
leaq _ZL28precalc_xorwow_offset_matrix(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterVar@PLT
addq $16, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
movl $2304, %r9d
movl $0, %r8d
leaq .LC55(%rip), %rdx
movq %rdx, %rcx
leaq _ZL10mrg32k3aM1(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterVar@PLT
addq $16, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
movl $2304, %r9d
movl $0, %r8d
leaq .LC56(%rip), %rdx
movq %rdx, %rcx
leaq _ZL10mrg32k3aM2(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterVar@PLT
addq $16, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
movl $2016, %r9d
movl $0, %r8d
leaq .LC57(%rip), %rdx
movq %rdx, %rcx
leaq _ZL16mrg32k3aM1SubSeq(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterVar@PLT
addq $16, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
movl $2016, %r9d
movl $0, %r8d
leaq .LC58(%rip), %rdx
movq %rdx, %rcx
leaq _ZL16mrg32k3aM2SubSeq(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterVar@PLT
addq $16, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
movl $2304, %r9d
movl $0, %r8d
leaq .LC59(%rip), %rdx
movq %rdx, %rcx
leaq _ZL13mrg32k3aM1Seq(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterVar@PLT
addq $16, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
movl $2304, %r9d
movl $0, %r8d
leaq .LC60(%rip), %rdx
movq %rdx, %rcx
leaq _ZL13mrg32k3aM2Seq(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterVar@PLT
addq $16, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $1
.cfi_def_cfa_offset 32
movl $72, %r9d
movl $0, %r8d
leaq .LC61(%rip), %rdx
movq %rdx, %rcx
leaq _ZL17__cr_lgamma_table(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterVar@PLT
addq $16, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
popq %rbx
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2327:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.local _ZL17__cr_lgamma_table
.comm _ZL17__cr_lgamma_table,72,32
.local _ZL13mrg32k3aM2Seq
.comm _ZL13mrg32k3aM2Seq,2304,32
.local _ZL13mrg32k3aM1Seq
.comm _ZL13mrg32k3aM1Seq,2304,32
.local _ZL16mrg32k3aM2SubSeq
.comm _ZL16mrg32k3aM2SubSeq,2016,32
.local _ZL16mrg32k3aM1SubSeq
.comm _ZL16mrg32k3aM1SubSeq,2016,32
.local _ZL10mrg32k3aM2
.comm _ZL10mrg32k3aM2,2304,32
.local _ZL10mrg32k3aM1
.comm _ZL10mrg32k3aM1,2304,32
.local _ZL28precalc_xorwow_offset_matrix
.comm _ZL28precalc_xorwow_offset_matrix,102400,32
.local _ZL21precalc_xorwow_matrix
.comm _ZL21precalc_xorwow_matrix,102400,32
.section .rodata.cst4,"aM",@progbits,4
.align 4
.LC1:
.long 805306368
.align 4
.LC2:
.long 1065353216
.align 4
.LC4:
.long 1028443341
.align 4
.LC6:
.long 1077936128
.align 4
.LC7:
.long 1120403456
.align 4
.LC8:
.long 1051260355
.hidden DW.ref._ZTISt9bad_alloc
.weak DW.ref._ZTISt9bad_alloc
.section .data.rel.local.DW.ref._ZTISt9bad_alloc,"awG",@progbits,DW.ref._ZTISt9bad_alloc,comdat
.align 8
.type DW.ref._ZTISt9bad_alloc, @object
.size DW.ref._ZTISt9bad_alloc, 8
DW.ref._ZTISt9bad_alloc:
.quad _ZTISt9bad_alloc
.hidden DW.ref.__gxx_personality_v0
.weak DW.ref.__gxx_personality_v0
.section .data.rel.local.DW.ref.__gxx_personality_v0,"awG",@progbits,DW.ref.__gxx_personality_v0,comdat
.align 8
.type DW.ref.__gxx_personality_v0, @object
.size DW.ref.__gxx_personality_v0, 8
DW.ref.__gxx_personality_v0:
.quad __gxx_personality_v0
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | #include<stdio.h>
#include<stdlib.h>
#include<time.h>
#include<math.h>
#include<curand.h>
#include<curand_kernel.h>
#include<string.h>
#include<new>
#define FALSE 0
#define TRUE 1
#define STR_EQ 0
#define max(a, b) \
({__typeof__ (a) _a = (a); \
__typeof__ (b) _b = (b); \
_a > _b ? _a : _b; })
#define min(a, b) \
({__typeof__ (a) _a = (a); \
__typeof__ (b) _b = (b); \
_a > _b ? _b : _a; })
#define abs(a) \
({__typeof__ (a) _a = (a); \
_a >= 0 ? _a : -_a; })
/* =================== BASIC FUNCTIONS =====================================================================*/
static void HandleError( cudaError_t err,
const char *file,
int line ) {
if (err != cudaSuccess) {
printf( "%s in %s at line %d\n", cudaGetErrorString( err ),
file, line );
exit( EXIT_FAILURE );
}
}
#define HANDLE_ERROR( err ) (HandleError( err, __FILE__, __LINE__ ))
__device__ void curandInit(curandState_t* state_ptr, int tid){
curand_init((unsigned long long)clock(), tid, 0, state_ptr);
}
__device__ float cudaFloatRand(float min, float max, curandState_t* state_ptr){
return min + curand_uniform(state_ptr) * (max - min);
}
__device__ int cudaIntRand(int min, int max, curandState_t* state_ptr){
return int(cudaFloatRand(float(min), float(max + 1.0), state_ptr));
}
__host__ float floatRand(float min, float max){
float scale = rand() / (float) RAND_MAX;
return min + scale * (max - min);
}
__host__ char roll(float probability){
if(floatRand(0.0, 1.0) < probability)
return TRUE;
return FALSE;
}
__device__ char cudaRoll(float probability, curandState_t* curand_state_ptr){
if(cudaFloatRand(0.0, 1.0, curand_state_ptr) < probability)
return TRUE;
return FALSE;
}
/* =================== STRUCTS AND METHODS =====================================================================*/
typedef struct SimulationOptions{
int N;
float DIM;
int simulation_time;
float infection_r;
float infection_p;
float velocity;
int immune_time;
int sympthoms_time;
int blocks;
int threads_per_block;
char* output;
float lawful_p;
int quarantine_sick_time;
int quarantine_all_time;
int gathering_points_n;
float gathering_point_p;
int buffor_size;
} SimulationOptions;
typedef enum{HEALTHY, CARRIER, SICK, IMMUNE} Health;
typedef enum{GOING_TO, GOING_BACK, NO_DESTINATION} GatheringPointTravel;
typedef struct Point{
float x;
float y;
} Point;
__host__ Point randPoint(float DIM){
Point point;
point.x = floatRand(0.0, DIM);
point.y = floatRand(0.0, DIM);
return point;
}
__device__ Point cudaRandPoint(float DIM, curandState_t* state_ptr){
Point point;
point.x = cudaFloatRand(0.0, DIM, state_ptr);
point.y = cudaFloatRand(0.0, DIM, state_ptr);
return point;
}
__host__ __device__ float distance(Point p1, Point p2){
float dx = abs(p1.x - p2.x);
float dy = abs(p1.y - p2.y);
return sqrt(dx * dx + dy * dy);
}
typedef struct Person{
Point location;
Point home;
Health health;
GatheringPointTravel travel;
char quarantined; // SICK people are totaly quarantined, the rest is partialy quarantined
int time_sick;
Point destination;
char lawful;
} Person;
typedef struct PersonInfo{
Point location;
Health health;
} PersonInfo;
/* =================== DEVICE CODE =====================================================================*/
__device__ void updateQuarantine(SimulationOptions settings, Person* person_ptr, int time){
if(!(person_ptr->lawful))
return;
if(settings.quarantine_all_time && settings.quarantine_all_time < time)
person_ptr->quarantined = TRUE;
else if(settings.quarantine_sick_time && settings.quarantine_sick_time < time){
if(person_ptr->health == SICK){
person_ptr->quarantined = TRUE;
person_ptr->travel = NO_DESTINATION;
}
else
person_ptr->quarantined = FALSE;
}
}
__device__ void migrate(
SimulationOptions settings,
Person* person_ptr,
curandState_t* state_ptr,
Point* gathering_points
){
float angle, dy, dx;
float destination_r = settings.velocity;
if(person_ptr->quarantined){
if(person_ptr->health == SICK)
return;
if(person_ptr->travel == GOING_TO && distance(person_ptr->location, person_ptr->destination) < destination_r){
person_ptr->destination = person_ptr->home;
person_ptr->travel = GOING_BACK;
}
if(person_ptr->travel == GOING_BACK && distance(person_ptr->location, person_ptr->destination) < destination_r){
person_ptr->travel = NO_DESTINATION;
}
if(person_ptr->travel == NO_DESTINATION){
if(!settings.gathering_points_n)
return;
if(!cudaRoll(settings.gathering_point_p, state_ptr))
return;
person_ptr->destination = gathering_points[cudaIntRand(0, settings.gathering_points_n - 1, state_ptr)];
person_ptr->travel = GOING_TO;
}
}
else if(distance(person_ptr->location, person_ptr->destination) < destination_r){
person_ptr->destination = cudaRandPoint(settings.DIM, state_ptr);
}
dy = person_ptr->destination.y - person_ptr->location.y;
dx = person_ptr->destination.x - person_ptr->location.x;
angle = atan2(dy, dx);
person_ptr->location.x = min(max(person_ptr->location.x + cos(angle) * settings.velocity, 0.0), settings.DIM);
person_ptr->location.y = min(max(person_ptr->location.y + sin(angle) * settings.velocity, 0.0), settings.DIM);
}
__device__ void developDisease(SimulationOptions settings, Person* person_ptr){
if(person_ptr->health == CARRIER || person_ptr->health == SICK)
person_ptr->time_sick += 1;
if(person_ptr->time_sick > settings.immune_time)
person_ptr->health = IMMUNE;
else if(person_ptr->time_sick > settings.sympthoms_time)
person_ptr->health = SICK;
}
// there may be races, but it doesn't matter (I think?)
__device__ void infect(
SimulationOptions settings,
Person* population,
int me_idx,
curandState_t* curand_state_ptr
){
Person* me_ptr = &population[me_idx];
Person* person_ptr;
int i;
if((me_ptr->health == CARRIER || me_ptr->health == SICK) && !(me_ptr->quarantined && me_ptr->health == SICK)){
for(i = 0; i < settings.N; i++){
person_ptr = &population[i];
if(i == me_idx) continue;
if(person_ptr->quarantined && person_ptr->travel == NO_DESTINATION) continue;
if(person_ptr->health == CARRIER || person_ptr->health == SICK) continue;
if(distance(me_ptr->location, person_ptr->location) > settings.infection_r) continue;
if(cudaRoll(settings.infection_p, curand_state_ptr))
person_ptr->health = CARRIER;
}
}
}
__global__ void simulate(
SimulationOptions settings,
Person* population,
curandState_t* curand_states,
int time,
Point* gathering_points,
int buffor_index,
PersonInfo* population_info
){
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int i;
Person* person_ptr;
curandState_t my_curand_state = curand_states[tid];
curandInit(&my_curand_state, tid);
// develop disease
i = tid;
while(i < settings.N){
person_ptr = &population[i];
developDisease(settings, person_ptr);
i += gridDim.x * blockDim.x;
}
// update population quarantine_all_time
i = tid;
while(i < settings.N){
person_ptr = &population[i];
updateQuarantine(settings, person_ptr, time);
i += gridDim.x * blockDim.x;
}
// migration of population
i = tid;
while(i < settings.N){
person_ptr = &population[i];
migrate(settings, person_ptr, &my_curand_state, gathering_points);
i += gridDim.x * blockDim.x;
}
// spread of disease
i = tid;
while(i < settings.N){
infect(settings, population, i, &my_curand_state);
i += gridDim.x * blockDim.x;
}
// save to buffor
i = tid;
while(i < settings.N){
population_info[settings.N * buffor_index + i].location = population[i].location;
population_info[settings.N * buffor_index + i].health = population[i].health;
i += gridDim.x * blockDim.x;
}
}
/* =================== HOST =====================================================================*/
int main(int argc, char** argv){
SimulationOptions settings;
int i, j, buffors_simulated;
FILE* file;
char save_output;
Person* population;
Person* dev_population;
curandState_t* curand_states;
Point* gathering_points;
Point* dev_gathering_points;
PersonInfo* population_info;
PersonInfo* dev_population_info;
settings.N = 10000;
settings.DIM = 100;
settings.simulation_time = 500;
settings.velocity = 1.0;
settings.infection_p = 0.33;
settings.infection_r = 3.0;
settings.immune_time = 100;
settings.sympthoms_time = 10;
settings.blocks = 128;
settings.threads_per_block = 128;
settings.output = "output.sim";
settings.quarantine_all_time = 0;
settings.quarantine_sick_time = 0;
settings.lawful_p = 1.0;
settings.gathering_points_n = 0;
settings.gathering_point_p = 0.05;
settings.buffor_size = 1;
//read commandline args
i = 1;
while(i < argc - 1){
if(strcmp(argv[i], "--N") == STR_EQ || strcmp(argv[i], "-N") == STR_EQ){
settings.N = atoi(argv[++i]);
if(settings.N < 1) return 1;
}
else if(strcmp(argv[i], "-=DIM") == STR_EQ || strcmp(argv[i], "-DIM") == STR_EQ){
settings.DIM = atof(argv[++i]);
if(settings.DIM <= 0.0) return 1;
}
else if(strcmp(argv[i], "--simulation_n") == STR_EQ || strcmp(argv[i], "-simn") == STR_EQ){
settings.simulation_time = atoi(argv[++i]);
if(settings.simulation_time < 1) return 1;
}
else if(strcmp(argv[i], "--velocity") == STR_EQ || strcmp(argv[i], "-v") == STR_EQ){
settings.velocity = atof(argv[++i]);
if(settings.velocity < 0) return 1;
}
else if(strcmp(argv[i], "--infection_p") == STR_EQ || strcmp(argv[i], "-infp") == STR_EQ){
settings.infection_p = atof(argv[++i]);
if(settings.infection_p <= 0.0) return 1;
}
else if(strcmp(argv[i], "--infection_r") == STR_EQ || strcmp(argv[i], "-infr") == STR_EQ){
settings.infection_r = atof(argv[++i]);
if(settings.infection_r <= 0.0) return 1;
}
else if(strcmp(argv[i], "--immune_time") == STR_EQ || strcmp(argv[i], "-immt") == STR_EQ){
settings.immune_time = atoi(argv[++i]);
if(settings.immune_time < 0) return 1;
}
else if(strcmp(argv[i], "--sympthoms_time") == STR_EQ || strcmp(argv[i], "-symt") == STR_EQ){
settings.sympthoms_time = atoi(argv[++i]);
if(settings.sympthoms_time < 0) return 1;
}
else if(strcmp(argv[i], "--blocks") == STR_EQ || strcmp(argv[i], "-b") == STR_EQ){
settings.blocks = atoi(argv[++i]);
if(settings.blocks < 1) return 1;
}
else if(strcmp(argv[i], "--threads_per_block") == STR_EQ || strcmp(argv[i], "-tpb") == STR_EQ){
settings.threads_per_block = atoi(argv[++i]);
if(settings.threads_per_block < 1) return 1;
}
else if(strcmp(argv[i], "--output") == STR_EQ || strcmp(argv[i], "-o") == STR_EQ){
settings.output = argv[++i];
if(!settings.output) return 1;
}
else if(strcmp(argv[i], "--quarantine_all_time") == STR_EQ || strcmp(argv[i], "-qat") == STR_EQ){
settings.quarantine_all_time = atoi(argv[++i]);
if(settings.quarantine_all_time < 0) return 1;
}
else if(strcmp(argv[i], "--quarantine_sick_time") == STR_EQ || strcmp(argv[i], "-qst") == STR_EQ){
settings.quarantine_sick_time = atoi(argv[++i]);
if(settings.quarantine_sick_time < 0) return 1;
}
else if(strcmp(argv[i], "--lawful_p") == STR_EQ || strcmp(argv[i], "-lawp") == STR_EQ){
settings.lawful_p = atof(argv[++i]);
if(settings.lawful_p < 0.0) return 1;
}
else if(strcmp(argv[i], "--gathering_points_n") == STR_EQ || strcmp(argv[i], "-gn") == STR_EQ){
settings.gathering_points_n = atoi(argv[++i]);
if(settings.gathering_points_n < 0) return 1;
}
else if(strcmp(argv[i], "--gathering_point_p") == STR_EQ || strcmp(argv[i], "-gp") == STR_EQ){
settings.gathering_point_p = atof(argv[++i]);
if(settings.gathering_point_p < 0.0) return 1;
}
else if(strcmp(argv[i], "--buffor_size") == STR_EQ || strcmp(argv[i], "-buff") == STR_EQ){
settings.buffor_size = atoi(argv[++i]);
if(settings.buffor_size < 1) return 1;
}
i++;
}
if(strcmp(settings.output, "none") == STR_EQ)
save_output = FALSE;
else
save_output = TRUE;
try{
population_info = new PersonInfo[settings.N * settings.buffor_size];
population = new Person[settings.N];
}
catch(const std::bad_alloc& e){
printf("Insufficent memory on host\n");
return 1;
}
srand((unsigned int)time(NULL));
for(i = 0; i < settings.N; i++){
population[i].location.x = floatRand(0.0, settings.DIM);
population[i].location.y = floatRand(0.0, settings.DIM);
population[i].home = population[i].location;
population[i].destination.x = floatRand(0.0, settings.DIM);
population[i].destination.y = floatRand(0.0, settings.DIM);
population[i].health = HEALTHY;
population[i].quarantined = FALSE;
population[i].time_sick = 0;
population[i].travel = NO_DESTINATION;
if(roll(settings.lawful_p))
population[i].lawful = TRUE;
else
population[i].lawful = FALSE;
}
gathering_points = new Point[settings.gathering_points_n];
for(i = 0; i < settings.gathering_points_n; i++){
gathering_points[i].x = floatRand(0.0, settings.DIM);
gathering_points[i].y = floatRand(0.0, settings.DIM);
}
//patient zero
population[0].health = CARRIER;
HANDLE_ERROR( cudaMalloc((void**)&dev_population, sizeof(Person) * settings.N) );
HANDLE_ERROR( cudaMalloc((void**)&curand_states, sizeof(curandState_t) * settings.blocks * settings.threads_per_block) );
HANDLE_ERROR( cudaMalloc((void**)&dev_gathering_points, sizeof(Point) * settings.gathering_points_n) );
HANDLE_ERROR( cudaMalloc((void**)&dev_population_info, sizeof(PersonInfo) * settings.N * settings.buffor_size) );
HANDLE_ERROR( cudaMemcpy(dev_population, population, sizeof(Person) * settings.N, cudaMemcpyHostToDevice) );
HANDLE_ERROR( cudaMemcpy(dev_gathering_points, gathering_points, sizeof(Point) * settings.gathering_points_n, cudaMemcpyHostToDevice) );
if(save_output){
file = fopen(settings.output, "w");
fprintf(file, "%d %f %d %d\n", settings.N, settings.DIM, settings.simulation_time, settings.gathering_points_n);
for(i = 0; i < settings.gathering_points_n; i++)
fprintf(file, "%f %f\n", gathering_points[i].x, gathering_points[i].y);
}
// for(i = 0; i < settings.simulation_time; i++){
// printf("==========SIM%d==========\n", i);
// simulate<<<settings.blocks, settings.threads_per_block>>>(settings, dev_population, curand_states, i, dev_gathering_points);
// cudaDeviceSynchronize();
// HANDLE_ERROR( cudaMemcpy(population, dev_population, sizeof(Person) * settings.N, cudaMemcpyDeviceToHost) );
// if(save_output){
// for(j = 0; j < settings.N; j++){
// fprintf(file, "%f %f %d\n", population[j].location.x, population[j].location.y, population[j].health);
// }
// }
// }
i = 0;
while(i < settings.simulation_time){
for(j = 0; j < settings.buffor_size; j++){
printf("==========SIM%d==========\n", i);
simulate<<<settings.blocks, settings.threads_per_block>>>(
settings, dev_population, curand_states, i, dev_gathering_points, j, dev_population_info
);
cudaDeviceSynchronize();
buffors_simulated = j + 1;
i++;
if(i >= settings.simulation_time)
break;
}
printf("Coping buffor from GPU to host...\n");
HANDLE_ERROR( cudaMemcpy(
population_info, dev_population_info, sizeof(PersonInfo) * settings.N * settings.buffor_size, cudaMemcpyDeviceToHost
) );
if(save_output){
for(j = 0; j < settings.N * buffors_simulated; j++){
fprintf(file, "%f %f %d\n", population_info[j].location.x, population_info[j].location.y, population_info[j].health);
}
}
}
if(save_output)
fclose(file);
cudaFree(curand_states);
cudaFree(dev_population);
cudaFree(dev_gathering_points);
cudaFree(dev_population_info);
delete[] population;
delete[] gathering_points;
delete[] population_info;
return 0;
} | #include <hip/hip_runtime.h>
#include<stdio.h>
#include<stdlib.h>
#include<time.h>
#include<math.h>
#include<hiprand/hiprand.h>
#include<hiprand/hiprand_kernel.h>
#include<string.h>
#include<new>
#define FALSE 0
#define TRUE 1
#define STR_EQ 0
#define max(a, b) \
({__typeof__ (a) _a = (a); \
__typeof__ (b) _b = (b); \
_a > _b ? _a : _b; })
#define min(a, b) \
({__typeof__ (a) _a = (a); \
__typeof__ (b) _b = (b); \
_a > _b ? _b : _a; })
#define abs(a) \
({__typeof__ (a) _a = (a); \
_a >= 0 ? _a : -_a; })
/* =================== BASIC FUNCTIONS =====================================================================*/
static void HandleError( hipError_t err,
const char *file,
int line ) {
if (err != hipSuccess) {
printf( "%s in %s at line %d\n", hipGetErrorString( err ),
file, line );
exit( EXIT_FAILURE );
}
}
#define HANDLE_ERROR( err ) (HandleError( err, __FILE__, __LINE__ ))
__device__ void curandInit(hiprandState_t* state_ptr, int tid){
hiprand_init((unsigned long long)clock(), tid, 0, state_ptr);
}
__device__ float cudaFloatRand(float min, float max, hiprandState_t* state_ptr){
return min + hiprand_uniform(state_ptr) * (max - min);
}
__device__ int cudaIntRand(int min, int max, hiprandState_t* state_ptr){
return int(cudaFloatRand(float(min), float(max + 1.0), state_ptr));
}
__host__ float floatRand(float min, float max){
float scale = rand() / (float) RAND_MAX;
return min + scale * (max - min);
}
__host__ char roll(float probability){
if(floatRand(0.0, 1.0) < probability)
return TRUE;
return FALSE;
}
__device__ char cudaRoll(float probability, hiprandState_t* curand_state_ptr){
if(cudaFloatRand(0.0, 1.0, curand_state_ptr) < probability)
return TRUE;
return FALSE;
}
/* =================== STRUCTS AND METHODS =====================================================================*/
typedef struct SimulationOptions{
int N;
float DIM;
int simulation_time;
float infection_r;
float infection_p;
float velocity;
int immune_time;
int sympthoms_time;
int blocks;
int threads_per_block;
char* output;
float lawful_p;
int quarantine_sick_time;
int quarantine_all_time;
int gathering_points_n;
float gathering_point_p;
int buffor_size;
} SimulationOptions;
typedef enum{HEALTHY, CARRIER, SICK, IMMUNE} Health;
typedef enum{GOING_TO, GOING_BACK, NO_DESTINATION} GatheringPointTravel;
typedef struct Point{
float x;
float y;
} Point;
__host__ Point randPoint(float DIM){
Point point;
point.x = floatRand(0.0, DIM);
point.y = floatRand(0.0, DIM);
return point;
}
__device__ Point cudaRandPoint(float DIM, hiprandState_t* state_ptr){
Point point;
point.x = cudaFloatRand(0.0, DIM, state_ptr);
point.y = cudaFloatRand(0.0, DIM, state_ptr);
return point;
}
__host__ __device__ float distance(Point p1, Point p2){
float dx = abs(p1.x - p2.x);
float dy = abs(p1.y - p2.y);
return sqrt(dx * dx + dy * dy);
}
typedef struct Person{
Point location;
Point home;
Health health;
GatheringPointTravel travel;
char quarantined; // SICK people are totaly quarantined, the rest is partialy quarantined
int time_sick;
Point destination;
char lawful;
} Person;
typedef struct PersonInfo{
Point location;
Health health;
} PersonInfo;
/* =================== DEVICE CODE =====================================================================*/
__device__ void updateQuarantine(SimulationOptions settings, Person* person_ptr, int time){
if(!(person_ptr->lawful))
return;
if(settings.quarantine_all_time && settings.quarantine_all_time < time)
person_ptr->quarantined = TRUE;
else if(settings.quarantine_sick_time && settings.quarantine_sick_time < time){
if(person_ptr->health == SICK){
person_ptr->quarantined = TRUE;
person_ptr->travel = NO_DESTINATION;
}
else
person_ptr->quarantined = FALSE;
}
}
__device__ void migrate(
SimulationOptions settings,
Person* person_ptr,
hiprandState_t* state_ptr,
Point* gathering_points
){
float angle, dy, dx;
float destination_r = settings.velocity;
if(person_ptr->quarantined){
if(person_ptr->health == SICK)
return;
if(person_ptr->travel == GOING_TO && distance(person_ptr->location, person_ptr->destination) < destination_r){
person_ptr->destination = person_ptr->home;
person_ptr->travel = GOING_BACK;
}
if(person_ptr->travel == GOING_BACK && distance(person_ptr->location, person_ptr->destination) < destination_r){
person_ptr->travel = NO_DESTINATION;
}
if(person_ptr->travel == NO_DESTINATION){
if(!settings.gathering_points_n)
return;
if(!cudaRoll(settings.gathering_point_p, state_ptr))
return;
person_ptr->destination = gathering_points[cudaIntRand(0, settings.gathering_points_n - 1, state_ptr)];
person_ptr->travel = GOING_TO;
}
}
else if(distance(person_ptr->location, person_ptr->destination) < destination_r){
person_ptr->destination = cudaRandPoint(settings.DIM, state_ptr);
}
dy = person_ptr->destination.y - person_ptr->location.y;
dx = person_ptr->destination.x - person_ptr->location.x;
angle = atan2(dy, dx);
person_ptr->location.x = min(max(person_ptr->location.x + cos(angle) * settings.velocity, 0.0), settings.DIM);
person_ptr->location.y = min(max(person_ptr->location.y + sin(angle) * settings.velocity, 0.0), settings.DIM);
}
__device__ void developDisease(SimulationOptions settings, Person* person_ptr){
if(person_ptr->health == CARRIER || person_ptr->health == SICK)
person_ptr->time_sick += 1;
if(person_ptr->time_sick > settings.immune_time)
person_ptr->health = IMMUNE;
else if(person_ptr->time_sick > settings.sympthoms_time)
person_ptr->health = SICK;
}
// there may be races, but it doesn't matter (I think?)
__device__ void infect(
SimulationOptions settings,
Person* population,
int me_idx,
hiprandState_t* curand_state_ptr
){
Person* me_ptr = &population[me_idx];
Person* person_ptr;
int i;
if((me_ptr->health == CARRIER || me_ptr->health == SICK) && !(me_ptr->quarantined && me_ptr->health == SICK)){
for(i = 0; i < settings.N; i++){
person_ptr = &population[i];
if(i == me_idx) continue;
if(person_ptr->quarantined && person_ptr->travel == NO_DESTINATION) continue;
if(person_ptr->health == CARRIER || person_ptr->health == SICK) continue;
if(distance(me_ptr->location, person_ptr->location) > settings.infection_r) continue;
if(cudaRoll(settings.infection_p, curand_state_ptr))
person_ptr->health = CARRIER;
}
}
}
__global__ void simulate(
SimulationOptions settings,
Person* population,
hiprandState_t* curand_states,
int time,
Point* gathering_points,
int buffor_index,
PersonInfo* population_info
){
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int i;
Person* person_ptr;
hiprandState_t my_curand_state = curand_states[tid];
curandInit(&my_curand_state, tid);
// develop disease
i = tid;
while(i < settings.N){
person_ptr = &population[i];
developDisease(settings, person_ptr);
i += gridDim.x * blockDim.x;
}
// update population quarantine_all_time
i = tid;
while(i < settings.N){
person_ptr = &population[i];
updateQuarantine(settings, person_ptr, time);
i += gridDim.x * blockDim.x;
}
// migration of population
i = tid;
while(i < settings.N){
person_ptr = &population[i];
migrate(settings, person_ptr, &my_curand_state, gathering_points);
i += gridDim.x * blockDim.x;
}
// spread of disease
i = tid;
while(i < settings.N){
infect(settings, population, i, &my_curand_state);
i += gridDim.x * blockDim.x;
}
// save to buffor
i = tid;
while(i < settings.N){
population_info[settings.N * buffor_index + i].location = population[i].location;
population_info[settings.N * buffor_index + i].health = population[i].health;
i += gridDim.x * blockDim.x;
}
}
/* =================== HOST =====================================================================*/
int main(int argc, char** argv){
SimulationOptions settings;
int i, j, buffors_simulated;
FILE* file;
char save_output;
Person* population;
Person* dev_population;
hiprandState_t* curand_states;
Point* gathering_points;
Point* dev_gathering_points;
PersonInfo* population_info;
PersonInfo* dev_population_info;
settings.N = 10000;
settings.DIM = 100;
settings.simulation_time = 500;
settings.velocity = 1.0;
settings.infection_p = 0.33;
settings.infection_r = 3.0;
settings.immune_time = 100;
settings.sympthoms_time = 10;
settings.blocks = 128;
settings.threads_per_block = 128;
settings.output = "output.sim";
settings.quarantine_all_time = 0;
settings.quarantine_sick_time = 0;
settings.lawful_p = 1.0;
settings.gathering_points_n = 0;
settings.gathering_point_p = 0.05;
settings.buffor_size = 1;
//read commandline args
i = 1;
while(i < argc - 1){
if(strcmp(argv[i], "--N") == STR_EQ || strcmp(argv[i], "-N") == STR_EQ){
settings.N = atoi(argv[++i]);
if(settings.N < 1) return 1;
}
else if(strcmp(argv[i], "-=DIM") == STR_EQ || strcmp(argv[i], "-DIM") == STR_EQ){
settings.DIM = atof(argv[++i]);
if(settings.DIM <= 0.0) return 1;
}
else if(strcmp(argv[i], "--simulation_n") == STR_EQ || strcmp(argv[i], "-simn") == STR_EQ){
settings.simulation_time = atoi(argv[++i]);
if(settings.simulation_time < 1) return 1;
}
else if(strcmp(argv[i], "--velocity") == STR_EQ || strcmp(argv[i], "-v") == STR_EQ){
settings.velocity = atof(argv[++i]);
if(settings.velocity < 0) return 1;
}
else if(strcmp(argv[i], "--infection_p") == STR_EQ || strcmp(argv[i], "-infp") == STR_EQ){
settings.infection_p = atof(argv[++i]);
if(settings.infection_p <= 0.0) return 1;
}
else if(strcmp(argv[i], "--infection_r") == STR_EQ || strcmp(argv[i], "-infr") == STR_EQ){
settings.infection_r = atof(argv[++i]);
if(settings.infection_r <= 0.0) return 1;
}
else if(strcmp(argv[i], "--immune_time") == STR_EQ || strcmp(argv[i], "-immt") == STR_EQ){
settings.immune_time = atoi(argv[++i]);
if(settings.immune_time < 0) return 1;
}
else if(strcmp(argv[i], "--sympthoms_time") == STR_EQ || strcmp(argv[i], "-symt") == STR_EQ){
settings.sympthoms_time = atoi(argv[++i]);
if(settings.sympthoms_time < 0) return 1;
}
else if(strcmp(argv[i], "--blocks") == STR_EQ || strcmp(argv[i], "-b") == STR_EQ){
settings.blocks = atoi(argv[++i]);
if(settings.blocks < 1) return 1;
}
else if(strcmp(argv[i], "--threads_per_block") == STR_EQ || strcmp(argv[i], "-tpb") == STR_EQ){
settings.threads_per_block = atoi(argv[++i]);
if(settings.threads_per_block < 1) return 1;
}
else if(strcmp(argv[i], "--output") == STR_EQ || strcmp(argv[i], "-o") == STR_EQ){
settings.output = argv[++i];
if(!settings.output) return 1;
}
else if(strcmp(argv[i], "--quarantine_all_time") == STR_EQ || strcmp(argv[i], "-qat") == STR_EQ){
settings.quarantine_all_time = atoi(argv[++i]);
if(settings.quarantine_all_time < 0) return 1;
}
else if(strcmp(argv[i], "--quarantine_sick_time") == STR_EQ || strcmp(argv[i], "-qst") == STR_EQ){
settings.quarantine_sick_time = atoi(argv[++i]);
if(settings.quarantine_sick_time < 0) return 1;
}
else if(strcmp(argv[i], "--lawful_p") == STR_EQ || strcmp(argv[i], "-lawp") == STR_EQ){
settings.lawful_p = atof(argv[++i]);
if(settings.lawful_p < 0.0) return 1;
}
else if(strcmp(argv[i], "--gathering_points_n") == STR_EQ || strcmp(argv[i], "-gn") == STR_EQ){
settings.gathering_points_n = atoi(argv[++i]);
if(settings.gathering_points_n < 0) return 1;
}
else if(strcmp(argv[i], "--gathering_point_p") == STR_EQ || strcmp(argv[i], "-gp") == STR_EQ){
settings.gathering_point_p = atof(argv[++i]);
if(settings.gathering_point_p < 0.0) return 1;
}
else if(strcmp(argv[i], "--buffor_size") == STR_EQ || strcmp(argv[i], "-buff") == STR_EQ){
settings.buffor_size = atoi(argv[++i]);
if(settings.buffor_size < 1) return 1;
}
i++;
}
if(strcmp(settings.output, "none") == STR_EQ)
save_output = FALSE;
else
save_output = TRUE;
try{
population_info = new PersonInfo[settings.N * settings.buffor_size];
population = new Person[settings.N];
}
catch(const std::bad_alloc& e){
printf("Insufficent memory on host\n");
return 1;
}
srand((unsigned int)time(NULL));
for(i = 0; i < settings.N; i++){
population[i].location.x = floatRand(0.0, settings.DIM);
population[i].location.y = floatRand(0.0, settings.DIM);
population[i].home = population[i].location;
population[i].destination.x = floatRand(0.0, settings.DIM);
population[i].destination.y = floatRand(0.0, settings.DIM);
population[i].health = HEALTHY;
population[i].quarantined = FALSE;
population[i].time_sick = 0;
population[i].travel = NO_DESTINATION;
if(roll(settings.lawful_p))
population[i].lawful = TRUE;
else
population[i].lawful = FALSE;
}
gathering_points = new Point[settings.gathering_points_n];
for(i = 0; i < settings.gathering_points_n; i++){
gathering_points[i].x = floatRand(0.0, settings.DIM);
gathering_points[i].y = floatRand(0.0, settings.DIM);
}
//patient zero
population[0].health = CARRIER;
HANDLE_ERROR( hipMalloc((void**)&dev_population, sizeof(Person) * settings.N) );
HANDLE_ERROR( hipMalloc((void**)&curand_states, sizeof(hiprandState_t) * settings.blocks * settings.threads_per_block) );
HANDLE_ERROR( hipMalloc((void**)&dev_gathering_points, sizeof(Point) * settings.gathering_points_n) );
HANDLE_ERROR( hipMalloc((void**)&dev_population_info, sizeof(PersonInfo) * settings.N * settings.buffor_size) );
HANDLE_ERROR( hipMemcpy(dev_population, population, sizeof(Person) * settings.N, hipMemcpyHostToDevice) );
HANDLE_ERROR( hipMemcpy(dev_gathering_points, gathering_points, sizeof(Point) * settings.gathering_points_n, hipMemcpyHostToDevice) );
if(save_output){
file = fopen(settings.output, "w");
fprintf(file, "%d %f %d %d\n", settings.N, settings.DIM, settings.simulation_time, settings.gathering_points_n);
for(i = 0; i < settings.gathering_points_n; i++)
fprintf(file, "%f %f\n", gathering_points[i].x, gathering_points[i].y);
}
// for(i = 0; i < settings.simulation_time; i++){
// printf("==========SIM%d==========\n", i);
// simulate<<<settings.blocks, settings.threads_per_block>>>(settings, dev_population, curand_states, i, dev_gathering_points);
// cudaDeviceSynchronize();
// HANDLE_ERROR( cudaMemcpy(population, dev_population, sizeof(Person) * settings.N, cudaMemcpyDeviceToHost) );
// if(save_output){
// for(j = 0; j < settings.N; j++){
// fprintf(file, "%f %f %d\n", population[j].location.x, population[j].location.y, population[j].health);
// }
// }
// }
i = 0;
while(i < settings.simulation_time){
for(j = 0; j < settings.buffor_size; j++){
printf("==========SIM%d==========\n", i);
simulate<<<settings.blocks, settings.threads_per_block>>>(
settings, dev_population, curand_states, i, dev_gathering_points, j, dev_population_info
);
hipDeviceSynchronize();
buffors_simulated = j + 1;
i++;
if(i >= settings.simulation_time)
break;
}
printf("Coping buffor from GPU to host...\n");
HANDLE_ERROR( hipMemcpy(
population_info, dev_population_info, sizeof(PersonInfo) * settings.N * settings.buffor_size, hipMemcpyDeviceToHost
) );
if(save_output){
for(j = 0; j < settings.N * buffors_simulated; j++){
fprintf(file, "%f %f %d\n", population_info[j].location.x, population_info[j].location.y, population_info[j].health);
}
}
}
if(save_output)
fclose(file);
hipFree(curand_states);
hipFree(dev_population);
hipFree(dev_gathering_points);
hipFree(dev_population_info);
delete[] population;
delete[] gathering_points;
delete[] population_info;
return 0;
} |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | #include "includes.h"
__global__ void block_QR(float* z, float* z1, float* vector, float* vector1, float* Q, float* NewQ, float* R, float* PrevM, float* NewM, int* converged, float* eigenvector, const int *WidthOfMatrix, const int *ind, const int *vind)
{
//extern __shared__ float z1[];
int n = WidthOfMatrix[blockIdx.x];
int index = ind[blockIdx.x];
int vectindex = vind[blockIdx.x];
int numofelements = n*n;
if(threadIdx.x==0){
converged[blockIdx.x] = 0;
}
if(threadIdx.x<numofelements){
int i;
for(i=threadIdx.x;i<numofelements;i=i+blockDim.x){
//set eigenvector to the identity matrix.
if(i/n==i%n)eigenvector[i+index]=1;
else eigenvector[i+index]=0;
}
for(i=threadIdx.x;i<numofelements;i=i+blockDim.x){
int iplusindex = i+index;
z1[iplusindex]=z[iplusindex];
Q[iplusindex]=z[iplusindex];
PrevM[iplusindex]=z[iplusindex];
}
do{
int k, j, PowOf2;
for(k=0;k<n-1;k++){
//Householder Code
//STEP 0: Get value of z[k*n+k] for use in step 4
float NormCheck = z[k*n+k+index];
//STEP 1: Find minor matrix of the input matrix z and sets it to z
for(i=threadIdx.x;i<numofelements;i=i+blockDim.x){
if(i%n==i/n&&i/n<k)z[i+index]=1;
else if(i/n>=k&&i%n>=k)z[i+index]=z[i+index];
else z[i+index]=0;
}
__syncthreads();
//STEP 2: Find kTH column of z and set to vector
for(i=threadIdx.x;i<numofelements;i=i+blockDim.x){
if(i<n){
vector[i+vectindex] = z[i*n+k+index];
}
}
//STEP 3: Find the norm of the kTh column and set to NormOfKcol
float NormOfKcol;
for(i=threadIdx.x;i<numofelements;i=i+blockDim.x){
if(i<n){
int iplusvectindex = i + vectindex;
//Need a temporary for vector that we can change the values of since we need mcol later
vector1[iplusvectindex] = vector[iplusvectindex];
vector1[iplusvectindex] *= vector1[iplusvectindex];
}
}
PowOf2 = 1;
__syncthreads();
//add all x's together, 2 at a time. O((log(n)) function
for(i = 0;i < ((float)n)/2.0;i++){
for(j=threadIdx.x;j<numofelements;j=j+blockDim.x){
if(j<n&&j%(PowOf2*2)==0&&j+PowOf2<n){
int jplusvectindex = j + vectindex;
vector1[jplusvectindex] = vector1[jplusvectindex] + vector1[PowOf2+jplusvectindex];
}
}
__syncthreads();
//PowOf2 = pow(2,i)
for(j=threadIdx.x;j<numofelements;j=j+blockDim.x){
if(j<n){
PowOf2 *= 2;
}
}
}
NormOfKcol = sqrt(vector1[0+vectindex]);
//STEP 4: Make Norm Negative if NormCheck is > 0
if(NormCheck > 0) NormOfKcol = -NormOfKcol;
//STEPS 5+6 Combined: add NormOfKcol to tmp[k]
if(k==threadIdx.x)vector[k+vectindex]=vector[k+vectindex]+NormOfKcol;
__syncthreads();
//STEP 7: Finds the addition of the new kcol and stores it in tmp[0]
//used in ||tmp||
for(i=threadIdx.x;i<numofelements;i=i+blockDim.x){
if(i<n){
int iplusvectindex = i + vectindex;
vector1[iplusvectindex] = vector[iplusvectindex] * vector[iplusvectindex];
PowOf2 = 1;
}
}
__syncthreads();
//add all tmp's together, 2 at a time. O(n(log(n)) function
for(i = 0;i < ((float)n)/2.0;i++){
for(j=threadIdx.x;j<numofelements;j=j+blockDim.x){
if(j<n&&j%(PowOf2*2)==0&&j+PowOf2<n){
int jplusvectindex = j + vectindex;
vector1[jplusvectindex] = vector1[jplusvectindex] + vector1[PowOf2+jplusvectindex];
}
}
__syncthreads();
//PowOf2 = pow(2,i)
for(j=threadIdx.x;j<numofelements;j=j+blockDim.x){
if(j<n){
PowOf2 *= 2;
}
}
}
__syncthreads();
//STEP 8: Divide vector Vmadd by the Norm[0] and set it to Vdiv
// Vdiv = Vmadd / norm
for(i=threadIdx.x;i<numofelements;i=i+blockDim.x){
if(i<n){
int iplusvectindex = i + vectindex;
vector[iplusvectindex] = vector[iplusvectindex]/(sqrt(vector1[vectindex]));
}
}
__syncthreads();
//STEP 9: Multiply the Vdiv vector by its transverse and subtract that from I, store the resulting matrix in Vmul
// Vmul = I - 2 * Vdiv * Vdiv^T
//threadIdx.x%n = column
//threadIdx.x/n = row (integer division)
for(i=threadIdx.x;i<numofelements;i=i+blockDim.x){
R[i+index] = -2 * vector[i/n+vectindex] * vector[i%n+vectindex];
}
//if on the diagonal(row==column)
for(i=threadIdx.x;i<numofelements;i=i+blockDim.x){
if(i/n==i%n){
R[i+index] += 1;
}
}
__syncthreads();
//STEP 10: Multiply Vmul by input matrix z1 and store in VmulZ
for(i=threadIdx.x;i<numofelements;i=i+blockDim.x){
z[i+index]=0;
for(j=0;j<n;j++){
z[i+index]+= R[i/n*n+j+index] * z1[j*n+i%n+index];
}
}
//STEP 11: if k!=0 Multiply Vmul by input matrix Q and set to NewQ
if(k!=0){
for(i=threadIdx.x;i<numofelements;i=i+blockDim.x){
NewQ[i+index]=0;
for(j=0;j<n;j++)
{
NewQ[i+index]+= R[i/n*n+j+index] * Q[j*n+i%n+index];
}
}
}
__syncthreads();
//STEP 12.1: If first iteration of k, set Q to vmul for use in next iteration of k
if(k==0){
for(i=threadIdx.x;i<numofelements;i=i+blockDim.x){
Q[i+index] = R[i+index];
}
}
//STEP 12.2: If after first iteration of k, set Q to NewQ, which was found by multiplying the old Q by Vmul.
else {
for(i=threadIdx.x;i<numofelements;i=i+blockDim.x){
Q[i+index] = NewQ[i+index];
}
}
//STEP 12.3: Set z and z1 to VmulZ for use in the next iteration of k.
for(i=threadIdx.x;i<numofelements;i=i+blockDim.x){
z1[i+index] = z[i+index];
}
__syncthreads();
}
//Once for loop is completed:
//STEP 13: Multiply matrices Q and m to find the matrix R
for(i=threadIdx.x;i<numofelements;i=i+blockDim.x){
R[i+index]=0;
}
for(i=0;i<n;i++)
{
for(j=threadIdx.x;j<numofelements;j=j+blockDim.x){
R[j+index]+= Q[j/n*n+i+index] * PrevM[i*n+j%n+index];
}
}
__syncthreads();
//STEP 14: Find the transpose of matrix Q and store int TransposeOfQ
//threadIdx.x%n = column
//threadIdx.x/n = row (integer division)
// # -> #%n*n+#/n
// for n=4 0->0 1->4 2->8 3->12
// 4->1 5->5 6->9 7->13
// 8->2 9->6 10->10 11->14
// 12->3 13->7 14->11 15->15
for(i=threadIdx.x;i<numofelements;i=i+blockDim.x){
z[i%n*n+i/n+index] = Q[i+index];
}
__syncthreads();
//STEP 14.5: Multiply matrices eigenvector and TransposeOfQ and store in eigenvector(use NewM as a temporary matrix)
//NewM contains new eigenvectors
for(i=threadIdx.x;i<numofelements;i=i+blockDim.x){
NewM[i+index]=0;
for(j=0;j<n;j++){
NewM[i+index]+= eigenvector[i/n*n+j+index] * z[j*n+i%n+index];
}
}
__syncthreads();
for(i=threadIdx.x;i<numofelements;i=i+blockDim.x){
eigenvector[i+index]=NewM[i+index];
}
__syncthreads();
//STEP 15: Multiply matrices R and TransposeOfQ and store in NewM matrix
for(i=threadIdx.x;i<numofelements;i=i+blockDim.x){
NewM[i+index]=0;
for(j=0;j<n;j++)
{
NewM[i+index]+= R[i/n*n+j+index] * z[j*n+i%n+index];
}
}
//STEP 16: Check for Convergence of New Matrix (Newm)
if(threadIdx.x==0){
converged[blockIdx.x] = 1;
}
__syncthreads();
//threadIdx.x%n = column
//threadIdx.x/n = row (integer division)
for(i=threadIdx.x;i<numofelements;i=i+blockDim.x){
if(i/n==i%n&&(PrevM[i+index]/NewM[i+index]>1.000001||
PrevM[i+index]/NewM[i+index]<0.999999)){
converged[blockIdx.x] = 0;
}
}
__syncthreads();
//STEP 17: Set up for next iteration if converged is 0
if(converged[blockIdx.x]==0){
for(i=threadIdx.x;i<numofelements;i=i+blockDim.x){
int iplusindex = i + index;
z[iplusindex] = NewM[iplusindex];
z1[iplusindex] = NewM[iplusindex];
Q[iplusindex] = NewM[iplusindex];
PrevM[iplusindex] = NewM[iplusindex];
}
}
__syncthreads();
}while(converged[blockIdx.x]==0);
//put eigenvalues into vector
if(threadIdx.x<n){
vector[threadIdx.x+vectindex]=NewM[threadIdx.x+threadIdx.x*n+index];
}
__syncthreads();
if(threadIdx.x==0){
//Sort Eigenvalues low to high and swap eigenvectors to match eigenvalues
//Simple Bubble Sort
int i1,i2,i3;
for(i1=vectindex;i1<n-1+vectindex;i1++){
for(i2=i1+1;i2<n+vectindex;i2++){
if(vector[i1]>vector[i2]){
float tmp = vector[i1];
vector[i1] = vector[i2];
vector[i2] = tmp;
for(i3 = 0;i3<n;i3++){
float tmp = eigenvector[i3*n+(i1-vectindex)%n+index];
eigenvector[i3*n+(i1-vectindex)%n+index] = eigenvector[i3*n+(i2-vectindex)%n+index];
eigenvector[i3*n+(i2-vectindex)%n+index] = tmp;
}
}
}
}
}
}
} | .file "tmpxft_001bd7b5_00000000-6_block_QR.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2029:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2029:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z56__device_stub__Z8block_QRPfS_S_S_S_S_S_S_S_PiS_PKiS2_S2_PfS_S_S_S_S_S_S_S_PiS_PKiS2_S2_
.type _Z56__device_stub__Z8block_QRPfS_S_S_S_S_S_S_S_PiS_PKiS2_S2_PfS_S_S_S_S_S_S_S_PiS_PKiS2_S2_, @function
_Z56__device_stub__Z8block_QRPfS_S_S_S_S_S_S_S_PiS_PKiS2_S2_PfS_S_S_S_S_S_S_S_PiS_PKiS2_S2_:
.LFB2051:
.cfi_startproc
endbr64
subq $312, %rsp
.cfi_def_cfa_offset 320
movq %rdi, 104(%rsp)
movq %rsi, 96(%rsp)
movq %rdx, 88(%rsp)
movq %rcx, 80(%rsp)
movq %r8, 72(%rsp)
movq %r9, 64(%rsp)
movq 320(%rsp), %rax
movq %rax, 56(%rsp)
movq 328(%rsp), %rax
movq %rax, 48(%rsp)
movq 336(%rsp), %rax
movq %rax, 40(%rsp)
movq 344(%rsp), %rax
movq %rax, 32(%rsp)
movq 352(%rsp), %rax
movq %rax, 24(%rsp)
movq 360(%rsp), %rax
movq %rax, 16(%rsp)
movq 368(%rsp), %rax
movq %rax, 8(%rsp)
movq 376(%rsp), %rax
movq %rax, (%rsp)
movq %fs:40, %rax
movq %rax, 296(%rsp)
xorl %eax, %eax
leaq 104(%rsp), %rax
movq %rax, 176(%rsp)
leaq 96(%rsp), %rax
movq %rax, 184(%rsp)
leaq 88(%rsp), %rax
movq %rax, 192(%rsp)
leaq 80(%rsp), %rax
movq %rax, 200(%rsp)
leaq 72(%rsp), %rax
movq %rax, 208(%rsp)
leaq 64(%rsp), %rax
movq %rax, 216(%rsp)
leaq 56(%rsp), %rax
movq %rax, 224(%rsp)
leaq 48(%rsp), %rax
movq %rax, 232(%rsp)
leaq 40(%rsp), %rax
movq %rax, 240(%rsp)
leaq 32(%rsp), %rax
movq %rax, 248(%rsp)
leaq 24(%rsp), %rax
movq %rax, 256(%rsp)
leaq 16(%rsp), %rax
movq %rax, 264(%rsp)
leaq 8(%rsp), %rax
movq %rax, 272(%rsp)
movq %rsp, %rax
movq %rax, 280(%rsp)
movl $1, 128(%rsp)
movl $1, 132(%rsp)
movl $1, 136(%rsp)
movl $1, 140(%rsp)
movl $1, 144(%rsp)
movl $1, 148(%rsp)
leaq 120(%rsp), %rcx
leaq 112(%rsp), %rdx
leaq 140(%rsp), %rsi
leaq 128(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 296(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $312, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 120(%rsp)
.cfi_def_cfa_offset 328
pushq 120(%rsp)
.cfi_def_cfa_offset 336
leaq 192(%rsp), %r9
movq 156(%rsp), %rcx
movl 164(%rsp), %r8d
movq 144(%rsp), %rsi
movl 152(%rsp), %edx
leaq _Z8block_QRPfS_S_S_S_S_S_S_S_PiS_PKiS2_S2_(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 320
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2051:
.size _Z56__device_stub__Z8block_QRPfS_S_S_S_S_S_S_S_PiS_PKiS2_S2_PfS_S_S_S_S_S_S_S_PiS_PKiS2_S2_, .-_Z56__device_stub__Z8block_QRPfS_S_S_S_S_S_S_S_PiS_PKiS2_S2_PfS_S_S_S_S_S_S_S_PiS_PKiS2_S2_
.globl _Z8block_QRPfS_S_S_S_S_S_S_S_PiS_PKiS2_S2_
.type _Z8block_QRPfS_S_S_S_S_S_S_S_PiS_PKiS2_S2_, @function
_Z8block_QRPfS_S_S_S_S_S_S_S_PiS_PKiS2_S2_:
.LFB2052:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
pushq 72(%rsp)
.cfi_def_cfa_offset 24
pushq 72(%rsp)
.cfi_def_cfa_offset 32
pushq 72(%rsp)
.cfi_def_cfa_offset 40
pushq 72(%rsp)
.cfi_def_cfa_offset 48
pushq 72(%rsp)
.cfi_def_cfa_offset 56
pushq 72(%rsp)
.cfi_def_cfa_offset 64
pushq 72(%rsp)
.cfi_def_cfa_offset 72
pushq 72(%rsp)
.cfi_def_cfa_offset 80
call _Z56__device_stub__Z8block_QRPfS_S_S_S_S_S_S_S_PiS_PKiS2_S2_PfS_S_S_S_S_S_S_S_PiS_PKiS2_S2_
addq $72, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2052:
.size _Z8block_QRPfS_S_S_S_S_S_S_S_PiS_PKiS2_S2_, .-_Z8block_QRPfS_S_S_S_S_S_S_S_PiS_PKiS2_S2_
.section .rodata.str1.8,"aMS",@progbits,1
.align 8
.LC0:
.string "_Z8block_QRPfS_S_S_S_S_S_S_S_PiS_PKiS2_S2_"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2054:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC0(%rip), %rdx
movq %rdx, %rcx
leaq _Z8block_QRPfS_S_S_S_S_S_S_S_PiS_PKiS2_S2_(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2054:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | #include "includes.h"
__global__ void block_QR(float* z, float* z1, float* vector, float* vector1, float* Q, float* NewQ, float* R, float* PrevM, float* NewM, int* converged, float* eigenvector, const int *WidthOfMatrix, const int *ind, const int *vind)
{
//extern __shared__ float z1[];
int n = WidthOfMatrix[blockIdx.x];
int index = ind[blockIdx.x];
int vectindex = vind[blockIdx.x];
int numofelements = n*n;
if(threadIdx.x==0){
converged[blockIdx.x] = 0;
}
if(threadIdx.x<numofelements){
int i;
for(i=threadIdx.x;i<numofelements;i=i+blockDim.x){
//set eigenvector to the identity matrix.
if(i/n==i%n)eigenvector[i+index]=1;
else eigenvector[i+index]=0;
}
for(i=threadIdx.x;i<numofelements;i=i+blockDim.x){
int iplusindex = i+index;
z1[iplusindex]=z[iplusindex];
Q[iplusindex]=z[iplusindex];
PrevM[iplusindex]=z[iplusindex];
}
do{
int k, j, PowOf2;
for(k=0;k<n-1;k++){
//Householder Code
//STEP 0: Get value of z[k*n+k] for use in step 4
float NormCheck = z[k*n+k+index];
//STEP 1: Find minor matrix of the input matrix z and sets it to z
for(i=threadIdx.x;i<numofelements;i=i+blockDim.x){
if(i%n==i/n&&i/n<k)z[i+index]=1;
else if(i/n>=k&&i%n>=k)z[i+index]=z[i+index];
else z[i+index]=0;
}
__syncthreads();
//STEP 2: Find kTH column of z and set to vector
for(i=threadIdx.x;i<numofelements;i=i+blockDim.x){
if(i<n){
vector[i+vectindex] = z[i*n+k+index];
}
}
//STEP 3: Find the norm of the kTh column and set to NormOfKcol
float NormOfKcol;
for(i=threadIdx.x;i<numofelements;i=i+blockDim.x){
if(i<n){
int iplusvectindex = i + vectindex;
//Need a temporary for vector that we can change the values of since we need mcol later
vector1[iplusvectindex] = vector[iplusvectindex];
vector1[iplusvectindex] *= vector1[iplusvectindex];
}
}
PowOf2 = 1;
__syncthreads();
//add all x's together, 2 at a time. O((log(n)) function
for(i = 0;i < ((float)n)/2.0;i++){
for(j=threadIdx.x;j<numofelements;j=j+blockDim.x){
if(j<n&&j%(PowOf2*2)==0&&j+PowOf2<n){
int jplusvectindex = j + vectindex;
vector1[jplusvectindex] = vector1[jplusvectindex] + vector1[PowOf2+jplusvectindex];
}
}
__syncthreads();
//PowOf2 = pow(2,i)
for(j=threadIdx.x;j<numofelements;j=j+blockDim.x){
if(j<n){
PowOf2 *= 2;
}
}
}
NormOfKcol = sqrt(vector1[0+vectindex]);
//STEP 4: Make Norm Negative if NormCheck is > 0
if(NormCheck > 0) NormOfKcol = -NormOfKcol;
//STEPS 5+6 Combined: add NormOfKcol to tmp[k]
if(k==threadIdx.x)vector[k+vectindex]=vector[k+vectindex]+NormOfKcol;
__syncthreads();
//STEP 7: Finds the addition of the new kcol and stores it in tmp[0]
//used in ||tmp||
for(i=threadIdx.x;i<numofelements;i=i+blockDim.x){
if(i<n){
int iplusvectindex = i + vectindex;
vector1[iplusvectindex] = vector[iplusvectindex] * vector[iplusvectindex];
PowOf2 = 1;
}
}
__syncthreads();
//add all tmp's together, 2 at a time. O(n(log(n)) function
for(i = 0;i < ((float)n)/2.0;i++){
for(j=threadIdx.x;j<numofelements;j=j+blockDim.x){
if(j<n&&j%(PowOf2*2)==0&&j+PowOf2<n){
int jplusvectindex = j + vectindex;
vector1[jplusvectindex] = vector1[jplusvectindex] + vector1[PowOf2+jplusvectindex];
}
}
__syncthreads();
//PowOf2 = pow(2,i)
for(j=threadIdx.x;j<numofelements;j=j+blockDim.x){
if(j<n){
PowOf2 *= 2;
}
}
}
__syncthreads();
//STEP 8: Divide vector Vmadd by the Norm[0] and set it to Vdiv
// Vdiv = Vmadd / norm
for(i=threadIdx.x;i<numofelements;i=i+blockDim.x){
if(i<n){
int iplusvectindex = i + vectindex;
vector[iplusvectindex] = vector[iplusvectindex]/(sqrt(vector1[vectindex]));
}
}
__syncthreads();
//STEP 9: Multiply the Vdiv vector by its transverse and subtract that from I, store the resulting matrix in Vmul
// Vmul = I - 2 * Vdiv * Vdiv^T
//threadIdx.x%n = column
//threadIdx.x/n = row (integer division)
for(i=threadIdx.x;i<numofelements;i=i+blockDim.x){
R[i+index] = -2 * vector[i/n+vectindex] * vector[i%n+vectindex];
}
//if on the diagonal(row==column)
for(i=threadIdx.x;i<numofelements;i=i+blockDim.x){
if(i/n==i%n){
R[i+index] += 1;
}
}
__syncthreads();
//STEP 10: Multiply Vmul by input matrix z1 and store in VmulZ
for(i=threadIdx.x;i<numofelements;i=i+blockDim.x){
z[i+index]=0;
for(j=0;j<n;j++){
z[i+index]+= R[i/n*n+j+index] * z1[j*n+i%n+index];
}
}
//STEP 11: if k!=0 Multiply Vmul by input matrix Q and set to NewQ
if(k!=0){
for(i=threadIdx.x;i<numofelements;i=i+blockDim.x){
NewQ[i+index]=0;
for(j=0;j<n;j++)
{
NewQ[i+index]+= R[i/n*n+j+index] * Q[j*n+i%n+index];
}
}
}
__syncthreads();
//STEP 12.1: If first iteration of k, set Q to vmul for use in next iteration of k
if(k==0){
for(i=threadIdx.x;i<numofelements;i=i+blockDim.x){
Q[i+index] = R[i+index];
}
}
//STEP 12.2: If after first iteration of k, set Q to NewQ, which was found by multiplying the old Q by Vmul.
else {
for(i=threadIdx.x;i<numofelements;i=i+blockDim.x){
Q[i+index] = NewQ[i+index];
}
}
//STEP 12.3: Set z and z1 to VmulZ for use in the next iteration of k.
for(i=threadIdx.x;i<numofelements;i=i+blockDim.x){
z1[i+index] = z[i+index];
}
__syncthreads();
}
//Once for loop is completed:
//STEP 13: Multiply matrices Q and m to find the matrix R
for(i=threadIdx.x;i<numofelements;i=i+blockDim.x){
R[i+index]=0;
}
for(i=0;i<n;i++)
{
for(j=threadIdx.x;j<numofelements;j=j+blockDim.x){
R[j+index]+= Q[j/n*n+i+index] * PrevM[i*n+j%n+index];
}
}
__syncthreads();
//STEP 14: Find the transpose of matrix Q and store int TransposeOfQ
//threadIdx.x%n = column
//threadIdx.x/n = row (integer division)
// # -> #%n*n+#/n
// for n=4 0->0 1->4 2->8 3->12
// 4->1 5->5 6->9 7->13
// 8->2 9->6 10->10 11->14
// 12->3 13->7 14->11 15->15
for(i=threadIdx.x;i<numofelements;i=i+blockDim.x){
z[i%n*n+i/n+index] = Q[i+index];
}
__syncthreads();
//STEP 14.5: Multiply matrices eigenvector and TransposeOfQ and store in eigenvector(use NewM as a temporary matrix)
//NewM contains new eigenvectors
for(i=threadIdx.x;i<numofelements;i=i+blockDim.x){
NewM[i+index]=0;
for(j=0;j<n;j++){
NewM[i+index]+= eigenvector[i/n*n+j+index] * z[j*n+i%n+index];
}
}
__syncthreads();
for(i=threadIdx.x;i<numofelements;i=i+blockDim.x){
eigenvector[i+index]=NewM[i+index];
}
__syncthreads();
//STEP 15: Multiply matrices R and TransposeOfQ and store in NewM matrix
for(i=threadIdx.x;i<numofelements;i=i+blockDim.x){
NewM[i+index]=0;
for(j=0;j<n;j++)
{
NewM[i+index]+= R[i/n*n+j+index] * z[j*n+i%n+index];
}
}
//STEP 16: Check for Convergence of New Matrix (Newm)
if(threadIdx.x==0){
converged[blockIdx.x] = 1;
}
__syncthreads();
//threadIdx.x%n = column
//threadIdx.x/n = row (integer division)
for(i=threadIdx.x;i<numofelements;i=i+blockDim.x){
if(i/n==i%n&&(PrevM[i+index]/NewM[i+index]>1.000001||
PrevM[i+index]/NewM[i+index]<0.999999)){
converged[blockIdx.x] = 0;
}
}
__syncthreads();
//STEP 17: Set up for next iteration if converged is 0
if(converged[blockIdx.x]==0){
for(i=threadIdx.x;i<numofelements;i=i+blockDim.x){
int iplusindex = i + index;
z[iplusindex] = NewM[iplusindex];
z1[iplusindex] = NewM[iplusindex];
Q[iplusindex] = NewM[iplusindex];
PrevM[iplusindex] = NewM[iplusindex];
}
}
__syncthreads();
}while(converged[blockIdx.x]==0);
//put eigenvalues into vector
if(threadIdx.x<n){
vector[threadIdx.x+vectindex]=NewM[threadIdx.x+threadIdx.x*n+index];
}
__syncthreads();
if(threadIdx.x==0){
//Sort Eigenvalues low to high and swap eigenvectors to match eigenvalues
//Simple Bubble Sort
int i1,i2,i3;
for(i1=vectindex;i1<n-1+vectindex;i1++){
for(i2=i1+1;i2<n+vectindex;i2++){
if(vector[i1]>vector[i2]){
float tmp = vector[i1];
vector[i1] = vector[i2];
vector[i2] = tmp;
for(i3 = 0;i3<n;i3++){
float tmp = eigenvector[i3*n+(i1-vectindex)%n+index];
eigenvector[i3*n+(i1-vectindex)%n+index] = eigenvector[i3*n+(i2-vectindex)%n+index];
eigenvector[i3*n+(i2-vectindex)%n+index] = tmp;
}
}
}
}
}
}
} | #include <hip/hip_runtime.h>
#include "includes.h"
__global__ void block_QR(float* z, float* z1, float* vector, float* vector1, float* Q, float* NewQ, float* R, float* PrevM, float* NewM, int* converged, float* eigenvector, const int *WidthOfMatrix, const int *ind, const int *vind)
{
//extern __shared__ float z1[];
int n = WidthOfMatrix[blockIdx.x];
int index = ind[blockIdx.x];
int vectindex = vind[blockIdx.x];
int numofelements = n*n;
if(threadIdx.x==0){
converged[blockIdx.x] = 0;
}
if(threadIdx.x<numofelements){
int i;
for(i=threadIdx.x;i<numofelements;i=i+blockDim.x){
//set eigenvector to the identity matrix.
if(i/n==i%n)eigenvector[i+index]=1;
else eigenvector[i+index]=0;
}
for(i=threadIdx.x;i<numofelements;i=i+blockDim.x){
int iplusindex = i+index;
z1[iplusindex]=z[iplusindex];
Q[iplusindex]=z[iplusindex];
PrevM[iplusindex]=z[iplusindex];
}
do{
int k, j, PowOf2;
for(k=0;k<n-1;k++){
//Householder Code
//STEP 0: Get value of z[k*n+k] for use in step 4
float NormCheck = z[k*n+k+index];
//STEP 1: Find minor matrix of the input matrix z and sets it to z
for(i=threadIdx.x;i<numofelements;i=i+blockDim.x){
if(i%n==i/n&&i/n<k)z[i+index]=1;
else if(i/n>=k&&i%n>=k)z[i+index]=z[i+index];
else z[i+index]=0;
}
__syncthreads();
//STEP 2: Find kTH column of z and set to vector
for(i=threadIdx.x;i<numofelements;i=i+blockDim.x){
if(i<n){
vector[i+vectindex] = z[i*n+k+index];
}
}
//STEP 3: Find the norm of the kTh column and set to NormOfKcol
float NormOfKcol;
for(i=threadIdx.x;i<numofelements;i=i+blockDim.x){
if(i<n){
int iplusvectindex = i + vectindex;
//Need a temporary for vector that we can change the values of since we need mcol later
vector1[iplusvectindex] = vector[iplusvectindex];
vector1[iplusvectindex] *= vector1[iplusvectindex];
}
}
PowOf2 = 1;
__syncthreads();
//add all x's together, 2 at a time. O((log(n)) function
for(i = 0;i < ((float)n)/2.0;i++){
for(j=threadIdx.x;j<numofelements;j=j+blockDim.x){
if(j<n&&j%(PowOf2*2)==0&&j+PowOf2<n){
int jplusvectindex = j + vectindex;
vector1[jplusvectindex] = vector1[jplusvectindex] + vector1[PowOf2+jplusvectindex];
}
}
__syncthreads();
//PowOf2 = pow(2,i)
for(j=threadIdx.x;j<numofelements;j=j+blockDim.x){
if(j<n){
PowOf2 *= 2;
}
}
}
NormOfKcol = sqrt(vector1[0+vectindex]);
//STEP 4: Make Norm Negative if NormCheck is > 0
if(NormCheck > 0) NormOfKcol = -NormOfKcol;
//STEPS 5+6 Combined: add NormOfKcol to tmp[k]
if(k==threadIdx.x)vector[k+vectindex]=vector[k+vectindex]+NormOfKcol;
__syncthreads();
//STEP 7: Finds the addition of the new kcol and stores it in tmp[0]
//used in ||tmp||
for(i=threadIdx.x;i<numofelements;i=i+blockDim.x){
if(i<n){
int iplusvectindex = i + vectindex;
vector1[iplusvectindex] = vector[iplusvectindex] * vector[iplusvectindex];
PowOf2 = 1;
}
}
__syncthreads();
//add all tmp's together, 2 at a time. O(n(log(n)) function
for(i = 0;i < ((float)n)/2.0;i++){
for(j=threadIdx.x;j<numofelements;j=j+blockDim.x){
if(j<n&&j%(PowOf2*2)==0&&j+PowOf2<n){
int jplusvectindex = j + vectindex;
vector1[jplusvectindex] = vector1[jplusvectindex] + vector1[PowOf2+jplusvectindex];
}
}
__syncthreads();
//PowOf2 = pow(2,i)
for(j=threadIdx.x;j<numofelements;j=j+blockDim.x){
if(j<n){
PowOf2 *= 2;
}
}
}
__syncthreads();
//STEP 8: Divide vector Vmadd by the Norm[0] and set it to Vdiv
// Vdiv = Vmadd / norm
for(i=threadIdx.x;i<numofelements;i=i+blockDim.x){
if(i<n){
int iplusvectindex = i + vectindex;
vector[iplusvectindex] = vector[iplusvectindex]/(sqrt(vector1[vectindex]));
}
}
__syncthreads();
//STEP 9: Multiply the Vdiv vector by its transverse and subtract that from I, store the resulting matrix in Vmul
// Vmul = I - 2 * Vdiv * Vdiv^T
//threadIdx.x%n = column
//threadIdx.x/n = row (integer division)
for(i=threadIdx.x;i<numofelements;i=i+blockDim.x){
R[i+index] = -2 * vector[i/n+vectindex] * vector[i%n+vectindex];
}
//if on the diagonal(row==column)
for(i=threadIdx.x;i<numofelements;i=i+blockDim.x){
if(i/n==i%n){
R[i+index] += 1;
}
}
__syncthreads();
//STEP 10: Multiply Vmul by input matrix z1 and store in VmulZ
for(i=threadIdx.x;i<numofelements;i=i+blockDim.x){
z[i+index]=0;
for(j=0;j<n;j++){
z[i+index]+= R[i/n*n+j+index] * z1[j*n+i%n+index];
}
}
//STEP 11: if k!=0 Multiply Vmul by input matrix Q and set to NewQ
if(k!=0){
for(i=threadIdx.x;i<numofelements;i=i+blockDim.x){
NewQ[i+index]=0;
for(j=0;j<n;j++)
{
NewQ[i+index]+= R[i/n*n+j+index] * Q[j*n+i%n+index];
}
}
}
__syncthreads();
//STEP 12.1: If first iteration of k, set Q to vmul for use in next iteration of k
if(k==0){
for(i=threadIdx.x;i<numofelements;i=i+blockDim.x){
Q[i+index] = R[i+index];
}
}
//STEP 12.2: If after first iteration of k, set Q to NewQ, which was found by multiplying the old Q by Vmul.
else {
for(i=threadIdx.x;i<numofelements;i=i+blockDim.x){
Q[i+index] = NewQ[i+index];
}
}
//STEP 12.3: Set z and z1 to VmulZ for use in the next iteration of k.
for(i=threadIdx.x;i<numofelements;i=i+blockDim.x){
z1[i+index] = z[i+index];
}
__syncthreads();
}
//Once for loop is completed:
//STEP 13: Multiply matrices Q and m to find the matrix R
for(i=threadIdx.x;i<numofelements;i=i+blockDim.x){
R[i+index]=0;
}
for(i=0;i<n;i++)
{
for(j=threadIdx.x;j<numofelements;j=j+blockDim.x){
R[j+index]+= Q[j/n*n+i+index] * PrevM[i*n+j%n+index];
}
}
__syncthreads();
//STEP 14: Find the transpose of matrix Q and store int TransposeOfQ
//threadIdx.x%n = column
//threadIdx.x/n = row (integer division)
// # -> #%n*n+#/n
// for n=4 0->0 1->4 2->8 3->12
// 4->1 5->5 6->9 7->13
// 8->2 9->6 10->10 11->14
// 12->3 13->7 14->11 15->15
for(i=threadIdx.x;i<numofelements;i=i+blockDim.x){
z[i%n*n+i/n+index] = Q[i+index];
}
__syncthreads();
//STEP 14.5: Multiply matrices eigenvector and TransposeOfQ and store in eigenvector(use NewM as a temporary matrix)
//NewM contains new eigenvectors
for(i=threadIdx.x;i<numofelements;i=i+blockDim.x){
NewM[i+index]=0;
for(j=0;j<n;j++){
NewM[i+index]+= eigenvector[i/n*n+j+index] * z[j*n+i%n+index];
}
}
__syncthreads();
for(i=threadIdx.x;i<numofelements;i=i+blockDim.x){
eigenvector[i+index]=NewM[i+index];
}
__syncthreads();
//STEP 15: Multiply matrices R and TransposeOfQ and store in NewM matrix
for(i=threadIdx.x;i<numofelements;i=i+blockDim.x){
NewM[i+index]=0;
for(j=0;j<n;j++)
{
NewM[i+index]+= R[i/n*n+j+index] * z[j*n+i%n+index];
}
}
//STEP 16: Check for Convergence of New Matrix (Newm)
if(threadIdx.x==0){
converged[blockIdx.x] = 1;
}
__syncthreads();
//threadIdx.x%n = column
//threadIdx.x/n = row (integer division)
for(i=threadIdx.x;i<numofelements;i=i+blockDim.x){
if(i/n==i%n&&(PrevM[i+index]/NewM[i+index]>1.000001||
PrevM[i+index]/NewM[i+index]<0.999999)){
converged[blockIdx.x] = 0;
}
}
__syncthreads();
//STEP 17: Set up for next iteration if converged is 0
if(converged[blockIdx.x]==0){
for(i=threadIdx.x;i<numofelements;i=i+blockDim.x){
int iplusindex = i + index;
z[iplusindex] = NewM[iplusindex];
z1[iplusindex] = NewM[iplusindex];
Q[iplusindex] = NewM[iplusindex];
PrevM[iplusindex] = NewM[iplusindex];
}
}
__syncthreads();
}while(converged[blockIdx.x]==0);
//put eigenvalues into vector
if(threadIdx.x<n){
vector[threadIdx.x+vectindex]=NewM[threadIdx.x+threadIdx.x*n+index];
}
__syncthreads();
if(threadIdx.x==0){
//Sort Eigenvalues low to high and swap eigenvectors to match eigenvalues
//Simple Bubble Sort
int i1,i2,i3;
for(i1=vectindex;i1<n-1+vectindex;i1++){
for(i2=i1+1;i2<n+vectindex;i2++){
if(vector[i1]>vector[i2]){
float tmp = vector[i1];
vector[i1] = vector[i2];
vector[i2] = tmp;
for(i3 = 0;i3<n;i3++){
float tmp = eigenvector[i3*n+(i1-vectindex)%n+index];
eigenvector[i3*n+(i1-vectindex)%n+index] = eigenvector[i3*n+(i2-vectindex)%n+index];
eigenvector[i3*n+(i2-vectindex)%n+index] = tmp;
}
}
}
}
}
}
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | #include <hip/hip_runtime.h>
#include "includes.h"
__global__ void block_QR(float* z, float* z1, float* vector, float* vector1, float* Q, float* NewQ, float* R, float* PrevM, float* NewM, int* converged, float* eigenvector, const int *WidthOfMatrix, const int *ind, const int *vind)
{
//extern __shared__ float z1[];
int n = WidthOfMatrix[blockIdx.x];
int index = ind[blockIdx.x];
int vectindex = vind[blockIdx.x];
int numofelements = n*n;
if(threadIdx.x==0){
converged[blockIdx.x] = 0;
}
if(threadIdx.x<numofelements){
int i;
for(i=threadIdx.x;i<numofelements;i=i+blockDim.x){
//set eigenvector to the identity matrix.
if(i/n==i%n)eigenvector[i+index]=1;
else eigenvector[i+index]=0;
}
for(i=threadIdx.x;i<numofelements;i=i+blockDim.x){
int iplusindex = i+index;
z1[iplusindex]=z[iplusindex];
Q[iplusindex]=z[iplusindex];
PrevM[iplusindex]=z[iplusindex];
}
do{
int k, j, PowOf2;
for(k=0;k<n-1;k++){
//Householder Code
//STEP 0: Get value of z[k*n+k] for use in step 4
float NormCheck = z[k*n+k+index];
//STEP 1: Find minor matrix of the input matrix z and sets it to z
for(i=threadIdx.x;i<numofelements;i=i+blockDim.x){
if(i%n==i/n&&i/n<k)z[i+index]=1;
else if(i/n>=k&&i%n>=k)z[i+index]=z[i+index];
else z[i+index]=0;
}
__syncthreads();
//STEP 2: Find kTH column of z and set to vector
for(i=threadIdx.x;i<numofelements;i=i+blockDim.x){
if(i<n){
vector[i+vectindex] = z[i*n+k+index];
}
}
//STEP 3: Find the norm of the kTh column and set to NormOfKcol
float NormOfKcol;
for(i=threadIdx.x;i<numofelements;i=i+blockDim.x){
if(i<n){
int iplusvectindex = i + vectindex;
//Need a temporary for vector that we can change the values of since we need mcol later
vector1[iplusvectindex] = vector[iplusvectindex];
vector1[iplusvectindex] *= vector1[iplusvectindex];
}
}
PowOf2 = 1;
__syncthreads();
//add all x's together, 2 at a time. O((log(n)) function
for(i = 0;i < ((float)n)/2.0;i++){
for(j=threadIdx.x;j<numofelements;j=j+blockDim.x){
if(j<n&&j%(PowOf2*2)==0&&j+PowOf2<n){
int jplusvectindex = j + vectindex;
vector1[jplusvectindex] = vector1[jplusvectindex] + vector1[PowOf2+jplusvectindex];
}
}
__syncthreads();
//PowOf2 = pow(2,i)
for(j=threadIdx.x;j<numofelements;j=j+blockDim.x){
if(j<n){
PowOf2 *= 2;
}
}
}
NormOfKcol = sqrt(vector1[0+vectindex]);
//STEP 4: Make Norm Negative if NormCheck is > 0
if(NormCheck > 0) NormOfKcol = -NormOfKcol;
//STEPS 5+6 Combined: add NormOfKcol to tmp[k]
if(k==threadIdx.x)vector[k+vectindex]=vector[k+vectindex]+NormOfKcol;
__syncthreads();
//STEP 7: Finds the addition of the new kcol and stores it in tmp[0]
//used in ||tmp||
for(i=threadIdx.x;i<numofelements;i=i+blockDim.x){
if(i<n){
int iplusvectindex = i + vectindex;
vector1[iplusvectindex] = vector[iplusvectindex] * vector[iplusvectindex];
PowOf2 = 1;
}
}
__syncthreads();
//add all tmp's together, 2 at a time. O(n(log(n)) function
for(i = 0;i < ((float)n)/2.0;i++){
for(j=threadIdx.x;j<numofelements;j=j+blockDim.x){
if(j<n&&j%(PowOf2*2)==0&&j+PowOf2<n){
int jplusvectindex = j + vectindex;
vector1[jplusvectindex] = vector1[jplusvectindex] + vector1[PowOf2+jplusvectindex];
}
}
__syncthreads();
//PowOf2 = pow(2,i)
for(j=threadIdx.x;j<numofelements;j=j+blockDim.x){
if(j<n){
PowOf2 *= 2;
}
}
}
__syncthreads();
//STEP 8: Divide vector Vmadd by the Norm[0] and set it to Vdiv
// Vdiv = Vmadd / norm
for(i=threadIdx.x;i<numofelements;i=i+blockDim.x){
if(i<n){
int iplusvectindex = i + vectindex;
vector[iplusvectindex] = vector[iplusvectindex]/(sqrt(vector1[vectindex]));
}
}
__syncthreads();
//STEP 9: Multiply the Vdiv vector by its transverse and subtract that from I, store the resulting matrix in Vmul
// Vmul = I - 2 * Vdiv * Vdiv^T
//threadIdx.x%n = column
//threadIdx.x/n = row (integer division)
for(i=threadIdx.x;i<numofelements;i=i+blockDim.x){
R[i+index] = -2 * vector[i/n+vectindex] * vector[i%n+vectindex];
}
//if on the diagonal(row==column)
for(i=threadIdx.x;i<numofelements;i=i+blockDim.x){
if(i/n==i%n){
R[i+index] += 1;
}
}
__syncthreads();
//STEP 10: Multiply Vmul by input matrix z1 and store in VmulZ
for(i=threadIdx.x;i<numofelements;i=i+blockDim.x){
z[i+index]=0;
for(j=0;j<n;j++){
z[i+index]+= R[i/n*n+j+index] * z1[j*n+i%n+index];
}
}
//STEP 11: if k!=0 Multiply Vmul by input matrix Q and set to NewQ
if(k!=0){
for(i=threadIdx.x;i<numofelements;i=i+blockDim.x){
NewQ[i+index]=0;
for(j=0;j<n;j++)
{
NewQ[i+index]+= R[i/n*n+j+index] * Q[j*n+i%n+index];
}
}
}
__syncthreads();
//STEP 12.1: If first iteration of k, set Q to vmul for use in next iteration of k
if(k==0){
for(i=threadIdx.x;i<numofelements;i=i+blockDim.x){
Q[i+index] = R[i+index];
}
}
//STEP 12.2: If after first iteration of k, set Q to NewQ, which was found by multiplying the old Q by Vmul.
else {
for(i=threadIdx.x;i<numofelements;i=i+blockDim.x){
Q[i+index] = NewQ[i+index];
}
}
//STEP 12.3: Set z and z1 to VmulZ for use in the next iteration of k.
for(i=threadIdx.x;i<numofelements;i=i+blockDim.x){
z1[i+index] = z[i+index];
}
__syncthreads();
}
//Once for loop is completed:
//STEP 13: Multiply matrices Q and m to find the matrix R
for(i=threadIdx.x;i<numofelements;i=i+blockDim.x){
R[i+index]=0;
}
for(i=0;i<n;i++)
{
for(j=threadIdx.x;j<numofelements;j=j+blockDim.x){
R[j+index]+= Q[j/n*n+i+index] * PrevM[i*n+j%n+index];
}
}
__syncthreads();
//STEP 14: Find the transpose of matrix Q and store int TransposeOfQ
//threadIdx.x%n = column
//threadIdx.x/n = row (integer division)
// # -> #%n*n+#/n
// for n=4 0->0 1->4 2->8 3->12
// 4->1 5->5 6->9 7->13
// 8->2 9->6 10->10 11->14
// 12->3 13->7 14->11 15->15
for(i=threadIdx.x;i<numofelements;i=i+blockDim.x){
z[i%n*n+i/n+index] = Q[i+index];
}
__syncthreads();
//STEP 14.5: Multiply matrices eigenvector and TransposeOfQ and store in eigenvector(use NewM as a temporary matrix)
//NewM contains new eigenvectors
for(i=threadIdx.x;i<numofelements;i=i+blockDim.x){
NewM[i+index]=0;
for(j=0;j<n;j++){
NewM[i+index]+= eigenvector[i/n*n+j+index] * z[j*n+i%n+index];
}
}
__syncthreads();
for(i=threadIdx.x;i<numofelements;i=i+blockDim.x){
eigenvector[i+index]=NewM[i+index];
}
__syncthreads();
//STEP 15: Multiply matrices R and TransposeOfQ and store in NewM matrix
for(i=threadIdx.x;i<numofelements;i=i+blockDim.x){
NewM[i+index]=0;
for(j=0;j<n;j++)
{
NewM[i+index]+= R[i/n*n+j+index] * z[j*n+i%n+index];
}
}
//STEP 16: Check for Convergence of New Matrix (Newm)
if(threadIdx.x==0){
converged[blockIdx.x] = 1;
}
__syncthreads();
//threadIdx.x%n = column
//threadIdx.x/n = row (integer division)
for(i=threadIdx.x;i<numofelements;i=i+blockDim.x){
if(i/n==i%n&&(PrevM[i+index]/NewM[i+index]>1.000001||
PrevM[i+index]/NewM[i+index]<0.999999)){
converged[blockIdx.x] = 0;
}
}
__syncthreads();
//STEP 17: Set up for next iteration if converged is 0
if(converged[blockIdx.x]==0){
for(i=threadIdx.x;i<numofelements;i=i+blockDim.x){
int iplusindex = i + index;
z[iplusindex] = NewM[iplusindex];
z1[iplusindex] = NewM[iplusindex];
Q[iplusindex] = NewM[iplusindex];
PrevM[iplusindex] = NewM[iplusindex];
}
}
__syncthreads();
}while(converged[blockIdx.x]==0);
//put eigenvalues into vector
if(threadIdx.x<n){
vector[threadIdx.x+vectindex]=NewM[threadIdx.x+threadIdx.x*n+index];
}
__syncthreads();
if(threadIdx.x==0){
//Sort Eigenvalues low to high and swap eigenvectors to match eigenvalues
//Simple Bubble Sort
int i1,i2,i3;
for(i1=vectindex;i1<n-1+vectindex;i1++){
for(i2=i1+1;i2<n+vectindex;i2++){
if(vector[i1]>vector[i2]){
float tmp = vector[i1];
vector[i1] = vector[i2];
vector[i2] = tmp;
for(i3 = 0;i3<n;i3++){
float tmp = eigenvector[i3*n+(i1-vectindex)%n+index];
eigenvector[i3*n+(i1-vectindex)%n+index] = eigenvector[i3*n+(i2-vectindex)%n+index];
eigenvector[i3*n+(i2-vectindex)%n+index] = tmp;
}
}
}
}
}
}
} | .text
.file "block_QR.hip"
.globl _Z23__device_stub__block_QRPfS_S_S_S_S_S_S_S_PiS_PKiS2_S2_ # -- Begin function _Z23__device_stub__block_QRPfS_S_S_S_S_S_S_S_PiS_PKiS2_S2_
.p2align 4, 0x90
.type _Z23__device_stub__block_QRPfS_S_S_S_S_S_S_S_PiS_PKiS2_S2_,@function
_Z23__device_stub__block_QRPfS_S_S_S_S_S_S_S_PiS_PKiS2_S2_: # @_Z23__device_stub__block_QRPfS_S_S_S_S_S_S_S_PiS_PKiS2_S2_
.cfi_startproc
# %bb.0:
subq $216, %rsp
.cfi_def_cfa_offset 224
movq %rdi, 88(%rsp)
movq %rsi, 80(%rsp)
movq %rdx, 72(%rsp)
movq %rcx, 64(%rsp)
movq %r8, 56(%rsp)
movq %r9, 48(%rsp)
leaq 88(%rsp), %rax
movq %rax, 96(%rsp)
leaq 80(%rsp), %rax
movq %rax, 104(%rsp)
leaq 72(%rsp), %rax
movq %rax, 112(%rsp)
leaq 64(%rsp), %rax
movq %rax, 120(%rsp)
leaq 56(%rsp), %rax
movq %rax, 128(%rsp)
leaq 48(%rsp), %rax
movq %rax, 136(%rsp)
leaq 224(%rsp), %rax
movq %rax, 144(%rsp)
leaq 232(%rsp), %rax
movq %rax, 152(%rsp)
leaq 240(%rsp), %rax
movq %rax, 160(%rsp)
leaq 248(%rsp), %rax
movq %rax, 168(%rsp)
leaq 256(%rsp), %rax
movq %rax, 176(%rsp)
leaq 264(%rsp), %rax
movq %rax, 184(%rsp)
leaq 272(%rsp), %rax
movq %rax, 192(%rsp)
leaq 280(%rsp), %rax
movq %rax, 200(%rsp)
leaq 32(%rsp), %rdi
leaq 16(%rsp), %rsi
leaq 8(%rsp), %rdx
movq %rsp, %rcx
callq __hipPopCallConfiguration
movq 32(%rsp), %rsi
movl 40(%rsp), %edx
movq 16(%rsp), %rcx
movl 24(%rsp), %r8d
leaq 96(%rsp), %r9
movl $_Z8block_QRPfS_S_S_S_S_S_S_S_PiS_PKiS2_S2_, %edi
pushq (%rsp)
.cfi_adjust_cfa_offset 8
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $232, %rsp
.cfi_adjust_cfa_offset -232
retq
.Lfunc_end0:
.size _Z23__device_stub__block_QRPfS_S_S_S_S_S_S_S_PiS_PKiS2_S2_, .Lfunc_end0-_Z23__device_stub__block_QRPfS_S_S_S_S_S_S_S_PiS_PKiS2_S2_
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB1_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB1_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z8block_QRPfS_S_S_S_S_S_S_S_PiS_PKiS2_S2_, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end1:
.size __hip_module_ctor, .Lfunc_end1-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB2_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB2_2:
retq
.Lfunc_end2:
.size __hip_module_dtor, .Lfunc_end2-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z8block_QRPfS_S_S_S_S_S_S_S_PiS_PKiS2_S2_,@object # @_Z8block_QRPfS_S_S_S_S_S_S_S_PiS_PKiS2_S2_
.section .rodata,"a",@progbits
.globl _Z8block_QRPfS_S_S_S_S_S_S_S_PiS_PKiS2_S2_
.p2align 3, 0x0
_Z8block_QRPfS_S_S_S_S_S_S_S_PiS_PKiS2_S2_:
.quad _Z23__device_stub__block_QRPfS_S_S_S_S_S_S_S_PiS_PKiS2_S2_
.size _Z8block_QRPfS_S_S_S_S_S_S_S_PiS_PKiS2_S2_, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "_Z8block_QRPfS_S_S_S_S_S_S_S_PiS_PKiS2_S2_"
.size .L__unnamed_1, 43
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z23__device_stub__block_QRPfS_S_S_S_S_S_S_S_PiS_PKiS2_S2_
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z8block_QRPfS_S_S_S_S_S_S_S_PiS_PKiS2_S2_
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_001bd7b5_00000000-6_block_QR.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2029:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2029:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z56__device_stub__Z8block_QRPfS_S_S_S_S_S_S_S_PiS_PKiS2_S2_PfS_S_S_S_S_S_S_S_PiS_PKiS2_S2_
.type _Z56__device_stub__Z8block_QRPfS_S_S_S_S_S_S_S_PiS_PKiS2_S2_PfS_S_S_S_S_S_S_S_PiS_PKiS2_S2_, @function
_Z56__device_stub__Z8block_QRPfS_S_S_S_S_S_S_S_PiS_PKiS2_S2_PfS_S_S_S_S_S_S_S_PiS_PKiS2_S2_:
.LFB2051:
.cfi_startproc
endbr64
subq $312, %rsp
.cfi_def_cfa_offset 320
movq %rdi, 104(%rsp)
movq %rsi, 96(%rsp)
movq %rdx, 88(%rsp)
movq %rcx, 80(%rsp)
movq %r8, 72(%rsp)
movq %r9, 64(%rsp)
movq 320(%rsp), %rax
movq %rax, 56(%rsp)
movq 328(%rsp), %rax
movq %rax, 48(%rsp)
movq 336(%rsp), %rax
movq %rax, 40(%rsp)
movq 344(%rsp), %rax
movq %rax, 32(%rsp)
movq 352(%rsp), %rax
movq %rax, 24(%rsp)
movq 360(%rsp), %rax
movq %rax, 16(%rsp)
movq 368(%rsp), %rax
movq %rax, 8(%rsp)
movq 376(%rsp), %rax
movq %rax, (%rsp)
movq %fs:40, %rax
movq %rax, 296(%rsp)
xorl %eax, %eax
leaq 104(%rsp), %rax
movq %rax, 176(%rsp)
leaq 96(%rsp), %rax
movq %rax, 184(%rsp)
leaq 88(%rsp), %rax
movq %rax, 192(%rsp)
leaq 80(%rsp), %rax
movq %rax, 200(%rsp)
leaq 72(%rsp), %rax
movq %rax, 208(%rsp)
leaq 64(%rsp), %rax
movq %rax, 216(%rsp)
leaq 56(%rsp), %rax
movq %rax, 224(%rsp)
leaq 48(%rsp), %rax
movq %rax, 232(%rsp)
leaq 40(%rsp), %rax
movq %rax, 240(%rsp)
leaq 32(%rsp), %rax
movq %rax, 248(%rsp)
leaq 24(%rsp), %rax
movq %rax, 256(%rsp)
leaq 16(%rsp), %rax
movq %rax, 264(%rsp)
leaq 8(%rsp), %rax
movq %rax, 272(%rsp)
movq %rsp, %rax
movq %rax, 280(%rsp)
movl $1, 128(%rsp)
movl $1, 132(%rsp)
movl $1, 136(%rsp)
movl $1, 140(%rsp)
movl $1, 144(%rsp)
movl $1, 148(%rsp)
leaq 120(%rsp), %rcx
leaq 112(%rsp), %rdx
leaq 140(%rsp), %rsi
leaq 128(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 296(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $312, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 120(%rsp)
.cfi_def_cfa_offset 328
pushq 120(%rsp)
.cfi_def_cfa_offset 336
leaq 192(%rsp), %r9
movq 156(%rsp), %rcx
movl 164(%rsp), %r8d
movq 144(%rsp), %rsi
movl 152(%rsp), %edx
leaq _Z8block_QRPfS_S_S_S_S_S_S_S_PiS_PKiS2_S2_(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 320
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2051:
.size _Z56__device_stub__Z8block_QRPfS_S_S_S_S_S_S_S_PiS_PKiS2_S2_PfS_S_S_S_S_S_S_S_PiS_PKiS2_S2_, .-_Z56__device_stub__Z8block_QRPfS_S_S_S_S_S_S_S_PiS_PKiS2_S2_PfS_S_S_S_S_S_S_S_PiS_PKiS2_S2_
.globl _Z8block_QRPfS_S_S_S_S_S_S_S_PiS_PKiS2_S2_
.type _Z8block_QRPfS_S_S_S_S_S_S_S_PiS_PKiS2_S2_, @function
_Z8block_QRPfS_S_S_S_S_S_S_S_PiS_PKiS2_S2_:
.LFB2052:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
pushq 72(%rsp)
.cfi_def_cfa_offset 24
pushq 72(%rsp)
.cfi_def_cfa_offset 32
pushq 72(%rsp)
.cfi_def_cfa_offset 40
pushq 72(%rsp)
.cfi_def_cfa_offset 48
pushq 72(%rsp)
.cfi_def_cfa_offset 56
pushq 72(%rsp)
.cfi_def_cfa_offset 64
pushq 72(%rsp)
.cfi_def_cfa_offset 72
pushq 72(%rsp)
.cfi_def_cfa_offset 80
call _Z56__device_stub__Z8block_QRPfS_S_S_S_S_S_S_S_PiS_PKiS2_S2_PfS_S_S_S_S_S_S_S_PiS_PKiS2_S2_
addq $72, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2052:
.size _Z8block_QRPfS_S_S_S_S_S_S_S_PiS_PKiS2_S2_, .-_Z8block_QRPfS_S_S_S_S_S_S_S_PiS_PKiS2_S2_
.section .rodata.str1.8,"aMS",@progbits,1
.align 8
.LC0:
.string "_Z8block_QRPfS_S_S_S_S_S_S_S_PiS_PKiS2_S2_"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2054:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC0(%rip), %rdx
movq %rdx, %rcx
leaq _Z8block_QRPfS_S_S_S_S_S_S_S_PiS_PKiS2_S2_(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2054:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "block_QR.hip"
.globl _Z23__device_stub__block_QRPfS_S_S_S_S_S_S_S_PiS_PKiS2_S2_ # -- Begin function _Z23__device_stub__block_QRPfS_S_S_S_S_S_S_S_PiS_PKiS2_S2_
.p2align 4, 0x90
.type _Z23__device_stub__block_QRPfS_S_S_S_S_S_S_S_PiS_PKiS2_S2_,@function
_Z23__device_stub__block_QRPfS_S_S_S_S_S_S_S_PiS_PKiS2_S2_: # @_Z23__device_stub__block_QRPfS_S_S_S_S_S_S_S_PiS_PKiS2_S2_
.cfi_startproc
# %bb.0:
subq $216, %rsp
.cfi_def_cfa_offset 224
movq %rdi, 88(%rsp)
movq %rsi, 80(%rsp)
movq %rdx, 72(%rsp)
movq %rcx, 64(%rsp)
movq %r8, 56(%rsp)
movq %r9, 48(%rsp)
leaq 88(%rsp), %rax
movq %rax, 96(%rsp)
leaq 80(%rsp), %rax
movq %rax, 104(%rsp)
leaq 72(%rsp), %rax
movq %rax, 112(%rsp)
leaq 64(%rsp), %rax
movq %rax, 120(%rsp)
leaq 56(%rsp), %rax
movq %rax, 128(%rsp)
leaq 48(%rsp), %rax
movq %rax, 136(%rsp)
leaq 224(%rsp), %rax
movq %rax, 144(%rsp)
leaq 232(%rsp), %rax
movq %rax, 152(%rsp)
leaq 240(%rsp), %rax
movq %rax, 160(%rsp)
leaq 248(%rsp), %rax
movq %rax, 168(%rsp)
leaq 256(%rsp), %rax
movq %rax, 176(%rsp)
leaq 264(%rsp), %rax
movq %rax, 184(%rsp)
leaq 272(%rsp), %rax
movq %rax, 192(%rsp)
leaq 280(%rsp), %rax
movq %rax, 200(%rsp)
leaq 32(%rsp), %rdi
leaq 16(%rsp), %rsi
leaq 8(%rsp), %rdx
movq %rsp, %rcx
callq __hipPopCallConfiguration
movq 32(%rsp), %rsi
movl 40(%rsp), %edx
movq 16(%rsp), %rcx
movl 24(%rsp), %r8d
leaq 96(%rsp), %r9
movl $_Z8block_QRPfS_S_S_S_S_S_S_S_PiS_PKiS2_S2_, %edi
pushq (%rsp)
.cfi_adjust_cfa_offset 8
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $232, %rsp
.cfi_adjust_cfa_offset -232
retq
.Lfunc_end0:
.size _Z23__device_stub__block_QRPfS_S_S_S_S_S_S_S_PiS_PKiS2_S2_, .Lfunc_end0-_Z23__device_stub__block_QRPfS_S_S_S_S_S_S_S_PiS_PKiS2_S2_
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB1_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB1_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z8block_QRPfS_S_S_S_S_S_S_S_PiS_PKiS2_S2_, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end1:
.size __hip_module_ctor, .Lfunc_end1-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB2_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB2_2:
retq
.Lfunc_end2:
.size __hip_module_dtor, .Lfunc_end2-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z8block_QRPfS_S_S_S_S_S_S_S_PiS_PKiS2_S2_,@object # @_Z8block_QRPfS_S_S_S_S_S_S_S_PiS_PKiS2_S2_
.section .rodata,"a",@progbits
.globl _Z8block_QRPfS_S_S_S_S_S_S_S_PiS_PKiS2_S2_
.p2align 3, 0x0
_Z8block_QRPfS_S_S_S_S_S_S_S_PiS_PKiS2_S2_:
.quad _Z23__device_stub__block_QRPfS_S_S_S_S_S_S_S_PiS_PKiS2_S2_
.size _Z8block_QRPfS_S_S_S_S_S_S_S_PiS_PKiS2_S2_, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "_Z8block_QRPfS_S_S_S_S_S_S_S_PiS_PKiS2_S2_"
.size .L__unnamed_1, 43
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z23__device_stub__block_QRPfS_S_S_S_S_S_S_S_PiS_PKiS2_S2_
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z8block_QRPfS_S_S_S_S_S_S_S_PiS_PKiS2_S2_
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly. | extern "C" __global__ void
copy(float2* x, float* y)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
x[i].x = y[i];
x[i].y = 0;
} | code for sm_80
Function : copy
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ S2R R4, SR_CTAID.X ; /* 0x0000000000047919 */
/* 0x000e220000002500 */
/*0020*/ HFMA2.MMA R5, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff057435 */
/* 0x000fe200000001ff */
/*0030*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fe40000000a00 */
/*0040*/ S2R R3, SR_TID.X ; /* 0x0000000000037919 */
/* 0x000e240000002100 */
/*0050*/ IMAD R4, R4, c[0x0][0x0], R3 ; /* 0x0000000004047a24 */
/* 0x001fca00078e0203 */
/*0060*/ IMAD.WIDE R2, R4, R5, c[0x0][0x168] ; /* 0x00005a0004027625 */
/* 0x000fca00078e0205 */
/*0070*/ LDG.E R6, [R2.64] ; /* 0x0000000402067981 */
/* 0x000ea2000c1e1900 */
/*0080*/ HFMA2.MMA R5, -RZ, RZ, 0, 4.76837158203125e-07 ; /* 0x00000008ff057435 */
/* 0x000fe200000001ff */
/*0090*/ MOV R7, RZ ; /* 0x000000ff00077202 */
/* 0x000fd20000000f00 */
/*00a0*/ IMAD.WIDE R4, R4, R5, c[0x0][0x160] ; /* 0x0000580004047625 */
/* 0x000fca00078e0205 */
/*00b0*/ STG.E.64 [R4.64], R6 ; /* 0x0000000604007986 */
/* 0x004fe2000c101b04 */
/*00c0*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*00d0*/ BRA 0xd0; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*00e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0100*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0110*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0120*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0130*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0140*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0150*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0160*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0170*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | extern "C" __global__ void
copy(float2* x, float* y)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
x[i].x = y[i];
x[i].y = 0;
} | .file "tmpxft_0012a8bd_00000000-6_copy.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2029:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2029:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z31__device_stub__Z4copyP6float2PfP6float2Pf
.type _Z31__device_stub__Z4copyP6float2PfP6float2Pf, @function
_Z31__device_stub__Z4copyP6float2PfP6float2Pf:
.LFB2051:
.cfi_startproc
endbr64
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 8(%rsp)
movq %rsi, (%rsp)
movq %fs:40, %rax
movq %rax, 104(%rsp)
xorl %eax, %eax
leaq 8(%rsp), %rax
movq %rax, 80(%rsp)
movq %rsp, %rax
movq %rax, 88(%rsp)
movl $1, 32(%rsp)
movl $1, 36(%rsp)
movl $1, 40(%rsp)
movl $1, 44(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
leaq 24(%rsp), %rcx
leaq 16(%rsp), %rdx
leaq 44(%rsp), %rsi
leaq 32(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 104(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $120, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 24(%rsp)
.cfi_def_cfa_offset 136
pushq 24(%rsp)
.cfi_def_cfa_offset 144
leaq 96(%rsp), %r9
movq 60(%rsp), %rcx
movl 68(%rsp), %r8d
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
leaq copy(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 128
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2051:
.size _Z31__device_stub__Z4copyP6float2PfP6float2Pf, .-_Z31__device_stub__Z4copyP6float2PfP6float2Pf
.globl copy
.type copy, @function
copy:
.LFB2052:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z31__device_stub__Z4copyP6float2PfP6float2Pf
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2052:
.size copy, .-copy
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "copy"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2054:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC0(%rip), %rdx
movq %rdx, %rcx
leaq copy(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2054:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | extern "C" __global__ void
copy(float2* x, float* y)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
x[i].x = y[i];
x[i].y = 0;
} | #include <hip/hip_runtime.h>
extern "C" __global__ void
copy(float2* x, float* y)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
x[i].x = y[i];
x[i].y = 0;
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | #include <hip/hip_runtime.h>
extern "C" __global__ void
copy(float2* x, float* y)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
x[i].x = y[i];
x[i].y = 0;
} | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected copy
.globl copy
.p2align 8
.type copy,@function
copy:
s_clause 0x1
s_load_b32 s4, s[0:1], 0x1c
s_load_b128 s[0:3], s[0:1], 0x0
s_waitcnt lgkmcnt(0)
s_and_b32 s4, s4, 0xffff
s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mad_u64_u32 v[1:2], null, s15, s4, v[0:1]
v_ashrrev_i32_e32 v2, 31, v1
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
v_lshlrev_b64 v[3:4], 2, v[1:2]
v_lshlrev_b64 v[1:2], 3, v[1:2]
v_add_co_u32 v3, vcc_lo, s2, v3
s_delay_alu instid0(VALU_DEP_3)
v_add_co_ci_u32_e32 v4, vcc_lo, s3, v4, vcc_lo
global_load_b32 v0, v[3:4], off
v_add_co_u32 v3, vcc_lo, s0, v1
v_add_co_ci_u32_e32 v4, vcc_lo, s1, v2, vcc_lo
v_mov_b32_e32 v1, 0
s_waitcnt vmcnt(0)
global_store_b64 v[3:4], v[0:1], off
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel copy
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 272
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 5
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size copy, .Lfunc_end0-copy
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .offset: 16
.size: 4
.value_kind: hidden_block_count_x
- .offset: 20
.size: 4
.value_kind: hidden_block_count_y
- .offset: 24
.size: 4
.value_kind: hidden_block_count_z
- .offset: 28
.size: 2
.value_kind: hidden_group_size_x
- .offset: 30
.size: 2
.value_kind: hidden_group_size_y
- .offset: 32
.size: 2
.value_kind: hidden_group_size_z
- .offset: 34
.size: 2
.value_kind: hidden_remainder_x
- .offset: 36
.size: 2
.value_kind: hidden_remainder_y
- .offset: 38
.size: 2
.value_kind: hidden_remainder_z
- .offset: 56
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 64
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 80
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 272
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: copy
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: copy.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 5
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | #include <hip/hip_runtime.h>
extern "C" __global__ void
copy(float2* x, float* y)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
x[i].x = y[i];
x[i].y = 0;
} | .text
.file "copy.hip"
.globl __device_stub__copy # -- Begin function __device_stub__copy
.p2align 4, 0x90
.type __device_stub__copy,@function
__device_stub__copy: # @__device_stub__copy
.cfi_startproc
# %bb.0:
subq $88, %rsp
.cfi_def_cfa_offset 96
movq %rdi, 56(%rsp)
movq %rsi, 48(%rsp)
leaq 56(%rsp), %rax
movq %rax, 64(%rsp)
leaq 48(%rsp), %rax
movq %rax, 72(%rsp)
leaq 32(%rsp), %rdi
leaq 16(%rsp), %rsi
leaq 8(%rsp), %rdx
movq %rsp, %rcx
callq __hipPopCallConfiguration
movq 32(%rsp), %rsi
movl 40(%rsp), %edx
movq 16(%rsp), %rcx
movl 24(%rsp), %r8d
leaq 64(%rsp), %r9
movl $copy, %edi
pushq (%rsp)
.cfi_adjust_cfa_offset 8
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $104, %rsp
.cfi_adjust_cfa_offset -104
retq
.Lfunc_end0:
.size __device_stub__copy, .Lfunc_end0-__device_stub__copy
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB1_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB1_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $copy, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end1:
.size __hip_module_ctor, .Lfunc_end1-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB2_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB2_2:
retq
.Lfunc_end2:
.size __hip_module_dtor, .Lfunc_end2-__hip_module_dtor
.cfi_endproc
# -- End function
.type copy,@object # @copy
.section .rodata,"a",@progbits
.globl copy
.p2align 3, 0x0
copy:
.quad __device_stub__copy
.size copy, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "copy"
.size .L__unnamed_1, 5
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __device_stub__copy
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym copy
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly. | code for sm_80
Function : copy
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ S2R R4, SR_CTAID.X ; /* 0x0000000000047919 */
/* 0x000e220000002500 */
/*0020*/ HFMA2.MMA R5, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff057435 */
/* 0x000fe200000001ff */
/*0030*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fe40000000a00 */
/*0040*/ S2R R3, SR_TID.X ; /* 0x0000000000037919 */
/* 0x000e240000002100 */
/*0050*/ IMAD R4, R4, c[0x0][0x0], R3 ; /* 0x0000000004047a24 */
/* 0x001fca00078e0203 */
/*0060*/ IMAD.WIDE R2, R4, R5, c[0x0][0x168] ; /* 0x00005a0004027625 */
/* 0x000fca00078e0205 */
/*0070*/ LDG.E R6, [R2.64] ; /* 0x0000000402067981 */
/* 0x000ea2000c1e1900 */
/*0080*/ HFMA2.MMA R5, -RZ, RZ, 0, 4.76837158203125e-07 ; /* 0x00000008ff057435 */
/* 0x000fe200000001ff */
/*0090*/ MOV R7, RZ ; /* 0x000000ff00077202 */
/* 0x000fd20000000f00 */
/*00a0*/ IMAD.WIDE R4, R4, R5, c[0x0][0x160] ; /* 0x0000580004047625 */
/* 0x000fca00078e0205 */
/*00b0*/ STG.E.64 [R4.64], R6 ; /* 0x0000000604007986 */
/* 0x004fe2000c101b04 */
/*00c0*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*00d0*/ BRA 0xd0; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*00e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0100*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0110*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0120*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0130*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0140*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0150*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0160*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0170*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected copy
.globl copy
.p2align 8
.type copy,@function
copy:
s_clause 0x1
s_load_b32 s4, s[0:1], 0x1c
s_load_b128 s[0:3], s[0:1], 0x0
s_waitcnt lgkmcnt(0)
s_and_b32 s4, s4, 0xffff
s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mad_u64_u32 v[1:2], null, s15, s4, v[0:1]
v_ashrrev_i32_e32 v2, 31, v1
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
v_lshlrev_b64 v[3:4], 2, v[1:2]
v_lshlrev_b64 v[1:2], 3, v[1:2]
v_add_co_u32 v3, vcc_lo, s2, v3
s_delay_alu instid0(VALU_DEP_3)
v_add_co_ci_u32_e32 v4, vcc_lo, s3, v4, vcc_lo
global_load_b32 v0, v[3:4], off
v_add_co_u32 v3, vcc_lo, s0, v1
v_add_co_ci_u32_e32 v4, vcc_lo, s1, v2, vcc_lo
v_mov_b32_e32 v1, 0
s_waitcnt vmcnt(0)
global_store_b64 v[3:4], v[0:1], off
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel copy
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 272
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 5
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size copy, .Lfunc_end0-copy
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .offset: 16
.size: 4
.value_kind: hidden_block_count_x
- .offset: 20
.size: 4
.value_kind: hidden_block_count_y
- .offset: 24
.size: 4
.value_kind: hidden_block_count_z
- .offset: 28
.size: 2
.value_kind: hidden_group_size_x
- .offset: 30
.size: 2
.value_kind: hidden_group_size_y
- .offset: 32
.size: 2
.value_kind: hidden_group_size_z
- .offset: 34
.size: 2
.value_kind: hidden_remainder_x
- .offset: 36
.size: 2
.value_kind: hidden_remainder_y
- .offset: 38
.size: 2
.value_kind: hidden_remainder_z
- .offset: 56
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 64
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 80
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 272
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: copy
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: copy.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 5
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_0012a8bd_00000000-6_copy.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2029:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2029:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z31__device_stub__Z4copyP6float2PfP6float2Pf
.type _Z31__device_stub__Z4copyP6float2PfP6float2Pf, @function
_Z31__device_stub__Z4copyP6float2PfP6float2Pf:
.LFB2051:
.cfi_startproc
endbr64
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 8(%rsp)
movq %rsi, (%rsp)
movq %fs:40, %rax
movq %rax, 104(%rsp)
xorl %eax, %eax
leaq 8(%rsp), %rax
movq %rax, 80(%rsp)
movq %rsp, %rax
movq %rax, 88(%rsp)
movl $1, 32(%rsp)
movl $1, 36(%rsp)
movl $1, 40(%rsp)
movl $1, 44(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
leaq 24(%rsp), %rcx
leaq 16(%rsp), %rdx
leaq 44(%rsp), %rsi
leaq 32(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 104(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $120, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 24(%rsp)
.cfi_def_cfa_offset 136
pushq 24(%rsp)
.cfi_def_cfa_offset 144
leaq 96(%rsp), %r9
movq 60(%rsp), %rcx
movl 68(%rsp), %r8d
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
leaq copy(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 128
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2051:
.size _Z31__device_stub__Z4copyP6float2PfP6float2Pf, .-_Z31__device_stub__Z4copyP6float2PfP6float2Pf
.globl copy
.type copy, @function
copy:
.LFB2052:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z31__device_stub__Z4copyP6float2PfP6float2Pf
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2052:
.size copy, .-copy
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "copy"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2054:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC0(%rip), %rdx
movq %rdx, %rcx
leaq copy(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2054:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "copy.hip"
.globl __device_stub__copy # -- Begin function __device_stub__copy
.p2align 4, 0x90
.type __device_stub__copy,@function
__device_stub__copy: # @__device_stub__copy
.cfi_startproc
# %bb.0:
subq $88, %rsp
.cfi_def_cfa_offset 96
movq %rdi, 56(%rsp)
movq %rsi, 48(%rsp)
leaq 56(%rsp), %rax
movq %rax, 64(%rsp)
leaq 48(%rsp), %rax
movq %rax, 72(%rsp)
leaq 32(%rsp), %rdi
leaq 16(%rsp), %rsi
leaq 8(%rsp), %rdx
movq %rsp, %rcx
callq __hipPopCallConfiguration
movq 32(%rsp), %rsi
movl 40(%rsp), %edx
movq 16(%rsp), %rcx
movl 24(%rsp), %r8d
leaq 64(%rsp), %r9
movl $copy, %edi
pushq (%rsp)
.cfi_adjust_cfa_offset 8
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $104, %rsp
.cfi_adjust_cfa_offset -104
retq
.Lfunc_end0:
.size __device_stub__copy, .Lfunc_end0-__device_stub__copy
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB1_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB1_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $copy, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end1:
.size __hip_module_ctor, .Lfunc_end1-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB2_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB2_2:
retq
.Lfunc_end2:
.size __hip_module_dtor, .Lfunc_end2-__hip_module_dtor
.cfi_endproc
# -- End function
.type copy,@object # @copy
.section .rodata,"a",@progbits
.globl copy
.p2align 3, 0x0
copy:
.quad __device_stub__copy
.size copy, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "copy"
.size .L__unnamed_1, 5
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __device_stub__copy
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym copy
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | #include<stdio.h>
#include<stdbool.h>
typedef unsigned long long int ull;
__device__ bool getval(int v, ull id, ull ie){
if (v<0) v=-v;
if (v<=30) return (id & (1llu<<v)) ? true : false;
return (ie & (1llu<<(v-31))) ? true : false;
}
__device__ bool test(int n, int* raw, ull id, ull ie){
bool ret = true;
for (int i = 0; i < n; i+=3){
bool tmp = false;
for (int j = 0; j < 3; j++)
tmp |= (getval(raw[i+j], id, ie) ^ (raw[i+j] < 0));
ret &= tmp;
}
return ret;
}
__device__ void fillres(int m, bool* res, ull id, ull ie){
for (int i=1;i<=m;i++)
res[i] = getval(i, id, ie);
}
__global__ void bf(int n, int m, int* raw, bool* res, int* flag){
ull myid = blockIdx.x * 1024llu + threadIdx.x;
ull mxstate = (1llu<<m) - 1;
if (myid > mxstate) return;
ull end = 1;
if (m-30 > 0) end <<= m-30;
for (ull i = 0; i < end; i ++){
if (test(n, raw, myid<<1, i)){
if (!atomicExch(flag, 1))
fillres(m, res, myid<<1, i);
return;
}
if ((i & 0xff) == (myid & 0xff) && *flag)
return;
}
}
int main (){
int *rawd, *raw, *flag;
bool *resd, *res;
int n, m, mflag = 0;
scanf("%d%d", &n,&m);
n*=3;
raw = (int*)malloc(sizeof(int)*n);
res = (bool*)malloc(m+1);
for (int i=0;i<n;i++)
scanf("%d", raw+i);
cudaMalloc((void**)&rawd, sizeof(int)*n);
cudaMalloc((void**)&resd, m+1);
cudaMalloc((void**)&flag, sizeof(int));
cudaMemcpy(rawd, raw, sizeof(int)*n, cudaMemcpyHostToDevice);
cudaMemcpy(flag, &mflag, sizeof(int), cudaMemcpyHostToDevice);
bf<<<1048576,1024>>>(n, m, rawd, resd, flag);
cudaMemcpy(&mflag, flag, sizeof(int), cudaMemcpyDeviceToHost);
if (mflag){
cudaMemcpy(res, resd, m+1, cudaMemcpyDeviceToHost);
for (int i = 1; i <= m; i++)
printf("%d := %s\n", i, (res[i]?"true":"false"));
}
else printf("No satisfy!\n");
cudaFree(rawd);
cudaFree(resd);
cudaFree(flag);
free(raw);
free(res);
} | .file "tmpxft_000a0d06_00000000-6_cuda.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2063:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2063:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z6getvaliyy
.type _Z6getvaliyy, @function
_Z6getvaliyy:
.LFB2057:
.cfi_startproc
endbr64
pushq %rax
.cfi_def_cfa_offset 16
popq %rax
.cfi_def_cfa_offset 8
subq $24, %rsp
.cfi_def_cfa_offset 32
movl $1, 12(%rsp)
movl 12(%rsp), %edi
call exit@PLT
.cfi_endproc
.LFE2057:
.size _Z6getvaliyy, .-_Z6getvaliyy
.globl _Z4testiPiyy
.type _Z4testiPiyy, @function
_Z4testiPiyy:
.LFB2058:
.cfi_startproc
endbr64
pushq %rax
.cfi_def_cfa_offset 16
popq %rax
.cfi_def_cfa_offset 8
subq $24, %rsp
.cfi_def_cfa_offset 32
movl $1, 12(%rsp)
movl 12(%rsp), %edi
call exit@PLT
.cfi_endproc
.LFE2058:
.size _Z4testiPiyy, .-_Z4testiPiyy
.globl _Z7fillresiPbyy
.type _Z7fillresiPbyy, @function
_Z7fillresiPbyy:
.LFB2059:
.cfi_startproc
endbr64
pushq %rax
.cfi_def_cfa_offset 16
popq %rax
.cfi_def_cfa_offset 8
subq $24, %rsp
.cfi_def_cfa_offset 32
movl $1, 12(%rsp)
movl 12(%rsp), %edi
call exit@PLT
.cfi_endproc
.LFE2059:
.size _Z7fillresiPbyy, .-_Z7fillresiPbyy
.globl _Z27__device_stub__Z2bfiiPiPbS_iiPiPbS_
.type _Z27__device_stub__Z2bfiiPiPbS_iiPiPbS_, @function
_Z27__device_stub__Z2bfiiPiPbS_iiPiPbS_:
.LFB2085:
.cfi_startproc
endbr64
subq $152, %rsp
.cfi_def_cfa_offset 160
movl %edi, 28(%rsp)
movl %esi, 24(%rsp)
movq %rdx, 16(%rsp)
movq %rcx, 8(%rsp)
movq %r8, (%rsp)
movq %fs:40, %rax
movq %rax, 136(%rsp)
xorl %eax, %eax
leaq 28(%rsp), %rax
movq %rax, 96(%rsp)
leaq 24(%rsp), %rax
movq %rax, 104(%rsp)
leaq 16(%rsp), %rax
movq %rax, 112(%rsp)
leaq 8(%rsp), %rax
movq %rax, 120(%rsp)
movq %rsp, %rax
movq %rax, 128(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L13
.L9:
movq 136(%rsp), %rax
subq %fs:40, %rax
jne .L14
addq $152, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L13:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 168
pushq 40(%rsp)
.cfi_def_cfa_offset 176
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z2bfiiPiPbS_(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 160
jmp .L9
.L14:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2085:
.size _Z27__device_stub__Z2bfiiPiPbS_iiPiPbS_, .-_Z27__device_stub__Z2bfiiPiPbS_iiPiPbS_
.globl _Z2bfiiPiPbS_
.type _Z2bfiiPiPbS_, @function
_Z2bfiiPiPbS_:
.LFB2086:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z27__device_stub__Z2bfiiPiPbS_iiPiPbS_
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2086:
.size _Z2bfiiPiPbS_, .-_Z2bfiiPiPbS_
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "true"
.LC1:
.string "false"
.LC2:
.string "%d%d"
.LC3:
.string "%d"
.LC4:
.string "%d := %s\n"
.LC5:
.string "No satisfy!\n"
.text
.globl main
.type main, @function
main:
.LFB2060:
.cfi_startproc
endbr64
pushq %r15
.cfi_def_cfa_offset 16
.cfi_offset 15, -16
pushq %r14
.cfi_def_cfa_offset 24
.cfi_offset 14, -24
pushq %r13
.cfi_def_cfa_offset 32
.cfi_offset 13, -32
pushq %r12
.cfi_def_cfa_offset 40
.cfi_offset 12, -40
pushq %rbp
.cfi_def_cfa_offset 48
.cfi_offset 6, -48
pushq %rbx
.cfi_def_cfa_offset 56
.cfi_offset 3, -56
subq $88, %rsp
.cfi_def_cfa_offset 144
movq %fs:40, %rax
movq %rax, 72(%rsp)
xorl %eax, %eax
movl $0, 20(%rsp)
leaq 16(%rsp), %rdx
leaq 12(%rsp), %rsi
leaq .LC2(%rip), %rdi
call __isoc23_scanf@PLT
movl 12(%rsp), %eax
leal (%rax,%rax,2), %ebx
movl %ebx, %r12d
movl %ebx, 12(%rsp)
movslq %ebx, %rdi
salq $2, %rdi
call malloc@PLT
movq %rax, %r14
movl 16(%rsp), %eax
leal 1(%rax), %edi
movslq %edi, %rdi
call malloc@PLT
movq %rax, %r13
testl %ebx, %ebx
jle .L18
movq %r14, %rbp
movl $0, %ebx
leaq .LC3(%rip), %r15
.L19:
movq %rbp, %rsi
movq %r15, %rdi
movl $0, %eax
call __isoc23_scanf@PLT
addl $1, %ebx
movl 12(%rsp), %r12d
addq $4, %rbp
cmpl %ebx, %r12d
jg .L19
.L18:
movslq %r12d, %rsi
salq $2, %rsi
leaq 24(%rsp), %rdi
call cudaMalloc@PLT
movl 16(%rsp), %eax
leal 1(%rax), %esi
movslq %esi, %rsi
leaq 40(%rsp), %rdi
call cudaMalloc@PLT
leaq 32(%rsp), %rdi
movl $4, %esi
call cudaMalloc@PLT
movslq 12(%rsp), %rdx
salq $2, %rdx
movl $1, %ecx
movq %r14, %rsi
movq 24(%rsp), %rdi
call cudaMemcpy@PLT
leaq 20(%rsp), %rsi
movl $1, %ecx
movl $4, %edx
movq 32(%rsp), %rdi
call cudaMemcpy@PLT
movl $1024, 60(%rsp)
movl $1, 64(%rsp)
movl $1048576, 48(%rsp)
movl $1, 52(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 60(%rsp), %rdx
movl $1, %ecx
movq 48(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L30
.L20:
leaq 20(%rsp), %rdi
movl $2, %ecx
movl $4, %edx
movq 32(%rsp), %rsi
call cudaMemcpy@PLT
cmpl $0, 20(%rsp)
je .L21
movl 16(%rsp), %eax
leal 1(%rax), %edx
movslq %edx, %rdx
movl $2, %ecx
movq 40(%rsp), %rsi
movq %r13, %rdi
call cudaMemcpy@PLT
cmpl $0, 16(%rsp)
jle .L22
movl $1, %ebx
leaq .LC1(%rip), %r15
leaq .LC0(%rip), %r12
leaq .LC4(%rip), %rbp
.L24:
movl %ebx, %edx
cmpb $0, 0(%r13,%rbx)
movq %r12, %rcx
cmove %r15, %rcx
movq %rbp, %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
addq $1, %rbx
cmpl %ebx, 16(%rsp)
jge .L24
jmp .L22
.L30:
movq 32(%rsp), %r8
movq 40(%rsp), %rcx
movq 24(%rsp), %rdx
movl 16(%rsp), %esi
movl 12(%rsp), %edi
call _Z27__device_stub__Z2bfiiPiPbS_iiPiPbS_
jmp .L20
.L21:
leaq .LC5(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
.L22:
movq 24(%rsp), %rdi
call cudaFree@PLT
movq 40(%rsp), %rdi
call cudaFree@PLT
movq 32(%rsp), %rdi
call cudaFree@PLT
movq %r14, %rdi
call free@PLT
movq %r13, %rdi
call free@PLT
movq 72(%rsp), %rax
subq %fs:40, %rax
jne .L31
movl $0, %eax
addq $88, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %rbp
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r13
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
ret
.L31:
.cfi_restore_state
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2060:
.size main, .-main
.section .rodata.str1.1
.LC6:
.string "_Z2bfiiPiPbS_"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2088:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC6(%rip), %rdx
movq %rdx, %rcx
leaq _Z2bfiiPiPbS_(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2088:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | #include<stdio.h>
#include<stdbool.h>
typedef unsigned long long int ull;
__device__ bool getval(int v, ull id, ull ie){
if (v<0) v=-v;
if (v<=30) return (id & (1llu<<v)) ? true : false;
return (ie & (1llu<<(v-31))) ? true : false;
}
__device__ bool test(int n, int* raw, ull id, ull ie){
bool ret = true;
for (int i = 0; i < n; i+=3){
bool tmp = false;
for (int j = 0; j < 3; j++)
tmp |= (getval(raw[i+j], id, ie) ^ (raw[i+j] < 0));
ret &= tmp;
}
return ret;
}
__device__ void fillres(int m, bool* res, ull id, ull ie){
for (int i=1;i<=m;i++)
res[i] = getval(i, id, ie);
}
__global__ void bf(int n, int m, int* raw, bool* res, int* flag){
ull myid = blockIdx.x * 1024llu + threadIdx.x;
ull mxstate = (1llu<<m) - 1;
if (myid > mxstate) return;
ull end = 1;
if (m-30 > 0) end <<= m-30;
for (ull i = 0; i < end; i ++){
if (test(n, raw, myid<<1, i)){
if (!atomicExch(flag, 1))
fillres(m, res, myid<<1, i);
return;
}
if ((i & 0xff) == (myid & 0xff) && *flag)
return;
}
}
int main (){
int *rawd, *raw, *flag;
bool *resd, *res;
int n, m, mflag = 0;
scanf("%d%d", &n,&m);
n*=3;
raw = (int*)malloc(sizeof(int)*n);
res = (bool*)malloc(m+1);
for (int i=0;i<n;i++)
scanf("%d", raw+i);
cudaMalloc((void**)&rawd, sizeof(int)*n);
cudaMalloc((void**)&resd, m+1);
cudaMalloc((void**)&flag, sizeof(int));
cudaMemcpy(rawd, raw, sizeof(int)*n, cudaMemcpyHostToDevice);
cudaMemcpy(flag, &mflag, sizeof(int), cudaMemcpyHostToDevice);
bf<<<1048576,1024>>>(n, m, rawd, resd, flag);
cudaMemcpy(&mflag, flag, sizeof(int), cudaMemcpyDeviceToHost);
if (mflag){
cudaMemcpy(res, resd, m+1, cudaMemcpyDeviceToHost);
for (int i = 1; i <= m; i++)
printf("%d := %s\n", i, (res[i]?"true":"false"));
}
else printf("No satisfy!\n");
cudaFree(rawd);
cudaFree(resd);
cudaFree(flag);
free(raw);
free(res);
} | #include <hip/hip_runtime.h>
#include<stdio.h>
#include<stdbool.h>
typedef unsigned long long int ull;
__device__ bool getval(int v, ull id, ull ie){
if (v<0) v=-v;
if (v<=30) return (id & (1llu<<v)) ? true : false;
return (ie & (1llu<<(v-31))) ? true : false;
}
__device__ bool test(int n, int* raw, ull id, ull ie){
bool ret = true;
for (int i = 0; i < n; i+=3){
bool tmp = false;
for (int j = 0; j < 3; j++)
tmp |= (getval(raw[i+j], id, ie) ^ (raw[i+j] < 0));
ret &= tmp;
}
return ret;
}
__device__ void fillres(int m, bool* res, ull id, ull ie){
for (int i=1;i<=m;i++)
res[i] = getval(i, id, ie);
}
__global__ void bf(int n, int m, int* raw, bool* res, int* flag){
ull myid = blockIdx.x * 1024llu + threadIdx.x;
ull mxstate = (1llu<<m) - 1;
if (myid > mxstate) return;
ull end = 1;
if (m-30 > 0) end <<= m-30;
for (ull i = 0; i < end; i ++){
if (test(n, raw, myid<<1, i)){
if (!atomicExch(flag, 1))
fillres(m, res, myid<<1, i);
return;
}
if ((i & 0xff) == (myid & 0xff) && *flag)
return;
}
}
int main (){
int *rawd, *raw, *flag;
bool *resd, *res;
int n, m, mflag = 0;
scanf("%d%d", &n,&m);
n*=3;
raw = (int*)malloc(sizeof(int)*n);
res = (bool*)malloc(m+1);
for (int i=0;i<n;i++)
scanf("%d", raw+i);
hipMalloc((void**)&rawd, sizeof(int)*n);
hipMalloc((void**)&resd, m+1);
hipMalloc((void**)&flag, sizeof(int));
hipMemcpy(rawd, raw, sizeof(int)*n, hipMemcpyHostToDevice);
hipMemcpy(flag, &mflag, sizeof(int), hipMemcpyHostToDevice);
bf<<<1048576,1024>>>(n, m, rawd, resd, flag);
hipMemcpy(&mflag, flag, sizeof(int), hipMemcpyDeviceToHost);
if (mflag){
hipMemcpy(res, resd, m+1, hipMemcpyDeviceToHost);
for (int i = 1; i <= m; i++)
printf("%d := %s\n", i, (res[i]?"true":"false"));
}
else printf("No satisfy!\n");
hipFree(rawd);
hipFree(resd);
hipFree(flag);
free(raw);
free(res);
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | #include <hip/hip_runtime.h>
#include<stdio.h>
#include<stdbool.h>
typedef unsigned long long int ull;
__device__ bool getval(int v, ull id, ull ie){
if (v<0) v=-v;
if (v<=30) return (id & (1llu<<v)) ? true : false;
return (ie & (1llu<<(v-31))) ? true : false;
}
__device__ bool test(int n, int* raw, ull id, ull ie){
bool ret = true;
for (int i = 0; i < n; i+=3){
bool tmp = false;
for (int j = 0; j < 3; j++)
tmp |= (getval(raw[i+j], id, ie) ^ (raw[i+j] < 0));
ret &= tmp;
}
return ret;
}
__device__ void fillres(int m, bool* res, ull id, ull ie){
for (int i=1;i<=m;i++)
res[i] = getval(i, id, ie);
}
__global__ void bf(int n, int m, int* raw, bool* res, int* flag){
ull myid = blockIdx.x * 1024llu + threadIdx.x;
ull mxstate = (1llu<<m) - 1;
if (myid > mxstate) return;
ull end = 1;
if (m-30 > 0) end <<= m-30;
for (ull i = 0; i < end; i ++){
if (test(n, raw, myid<<1, i)){
if (!atomicExch(flag, 1))
fillres(m, res, myid<<1, i);
return;
}
if ((i & 0xff) == (myid & 0xff) && *flag)
return;
}
}
int main (){
int *rawd, *raw, *flag;
bool *resd, *res;
int n, m, mflag = 0;
scanf("%d%d", &n,&m);
n*=3;
raw = (int*)malloc(sizeof(int)*n);
res = (bool*)malloc(m+1);
for (int i=0;i<n;i++)
scanf("%d", raw+i);
hipMalloc((void**)&rawd, sizeof(int)*n);
hipMalloc((void**)&resd, m+1);
hipMalloc((void**)&flag, sizeof(int));
hipMemcpy(rawd, raw, sizeof(int)*n, hipMemcpyHostToDevice);
hipMemcpy(flag, &mflag, sizeof(int), hipMemcpyHostToDevice);
bf<<<1048576,1024>>>(n, m, rawd, resd, flag);
hipMemcpy(&mflag, flag, sizeof(int), hipMemcpyDeviceToHost);
if (mflag){
hipMemcpy(res, resd, m+1, hipMemcpyDeviceToHost);
for (int i = 1; i <= m; i++)
printf("%d := %s\n", i, (res[i]?"true":"false"));
}
else printf("No satisfy!\n");
hipFree(rawd);
hipFree(resd);
hipFree(flag);
free(raw);
free(res);
} | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z2bfiiPiPbS_
.globl _Z2bfiiPiPbS_
.p2align 8
.type _Z2bfiiPiPbS_,@function
_Z2bfiiPiPbS_:
s_load_b32 s12, s[0:1], 0x4
s_mov_b32 s2, s15
s_mov_b32 s3, 0
s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
s_lshl_b64 s[2:3], s[2:3], 10
v_or_b32_e32 v1, s2, v0
v_mov_b32_e32 v2, s3
s_mov_b32 s2, exec_lo
s_waitcnt lgkmcnt(0)
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_lshrrev_b64 v[3:4], s12, v[1:2]
v_cmpx_eq_u64_e32 0, v[3:4]
s_cbranch_execz .LBB0_23
s_cmp_lt_i32 s12, 31
s_mov_b64 s[4:5], 1
s_cbranch_scc1 .LBB0_3
s_sub_i32 s2, s12, 30
s_delay_alu instid0(SALU_CYCLE_1)
s_lshl_b64 s[4:5], 1, s2
.LBB0_3:
s_clause 0x1
s_load_b32 s13, s[0:1], 0x0
s_load_b64 s[2:3], s[0:1], 0x18
v_lshlrev_b64 v[1:2], 1, v[1:2]
s_waitcnt lgkmcnt(0)
s_cmp_lt_i32 s13, 1
s_cselect_b32 s14, -1, 0
s_delay_alu instid0(SALU_CYCLE_1)
s_and_b32 vcc_lo, exec_lo, s14
s_cbranch_vccnz .LBB0_19
s_load_b64 s[6:7], s[0:1], 0x8
v_mov_b32_e32 v3, 0
v_mov_b32_e32 v4, 0
s_mov_b32 s16, 0
s_branch .LBB0_7
.LBB0_5:
s_or_b32 exec_lo, exec_lo, s20
v_dual_mov_b32 v5, s8 :: v_dual_mov_b32 v6, s9
s_and_not1_b32 s8, s17, exec_lo
s_and_b32 s9, s11, exec_lo
s_delay_alu instid0(SALU_CYCLE_1)
s_or_b32 s17, s8, s9
s_or_not1_b32 s8, s19, exec_lo
.LBB0_6:
s_or_b32 exec_lo, exec_lo, s10
s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
s_and_b32 s8, exec_lo, s8
s_or_b32 s16, s8, s16
s_and_not1_b32 s8, s15, exec_lo
s_and_b32 s9, s17, exec_lo
s_delay_alu instid0(SALU_CYCLE_1)
s_or_b32 s15, s8, s9
s_and_not1_b32 exec_lo, exec_lo, s16
s_cbranch_execz .LBB0_18
.LBB0_7:
s_mov_b32 s18, -1
s_waitcnt lgkmcnt(0)
s_mov_b64 s[8:9], s[6:7]
s_mov_b32 s19, 0
s_set_inst_prefetch_distance 0x1
.p2align 6
.LBB0_8:
s_mov_b32 s20, 0
s_mov_b64 s[10:11], 0
.p2align 6
.LBB0_9:
s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_4) | instid1(SALU_CYCLE_1)
s_add_u32 s22, s8, s10
s_addc_u32 s23, s9, s11
s_load_b32 s22, s[22:23], 0x0
s_waitcnt lgkmcnt(0)
s_abs_i32 s23, s22
s_sub_i32 s24, s23, 31
s_cmp_lt_u32 s23, 31
s_cselect_b32 vcc_lo, -1, 0
v_dual_cndmask_b32 v6, v4, v2 :: v_dual_cndmask_b32 v5, v3, v1
s_and_b32 s25, vcc_lo, exec_lo
s_cselect_b32 s23, s23, s24
s_cmp_lt_i32 s22, 0
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_lshrrev_b64 v[5:6], s23, v[5:6]
s_cselect_b32 s22, -1, 0
v_and_b32_e32 v5, 1, v5
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
v_cmp_eq_u32_e32 vcc_lo, 1, v5
s_xor_b32 s22, s22, vcc_lo
s_or_b32 s20, s20, s22
s_add_u32 s10, s10, 4
s_addc_u32 s11, s11, 0
s_and_not1_b32 s21, s21, exec_lo
s_and_b32 s22, s20, exec_lo
s_delay_alu instid0(SALU_CYCLE_1)
s_or_b32 s21, s21, s22
s_cmp_eq_u32 s10, 12
s_cbranch_scc0 .LBB0_9
s_and_b32 s18, s18, s21
s_add_i32 s19, s19, 3
s_add_u32 s8, s8, 12
s_addc_u32 s9, s9, 0
s_cmp_ge_i32 s19, s13
s_cbranch_scc0 .LBB0_8
s_set_inst_prefetch_distance 0x2
v_dual_mov_b32 v6, v4 :: v_dual_mov_b32 v5, v3
s_xor_b32 s9, s18, -1
s_mov_b32 s8, -1
s_or_b32 s17, s17, exec_lo
s_and_saveexec_b32 s10, s9
s_cbranch_execz .LBB0_6
v_xor_b32_e32 v5, v3, v0
s_mov_b32 s11, -1
s_mov_b32 s18, 0
s_mov_b32 s8, exec_lo
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_and_b32_e32 v5, 0xff, v5
v_cmpx_ne_u32_e32 0, v5
s_xor_b32 s8, exec_lo, s8
v_add_co_u32 v3, vcc_lo, v3, 1
v_add_co_ci_u32_e32 v4, vcc_lo, 0, v4, vcc_lo
s_xor_b32 s11, exec_lo, -1
s_delay_alu instid0(VALU_DEP_1)
v_cmp_gt_u64_e32 vcc_lo, s[4:5], v[3:4]
s_and_b32 s18, vcc_lo, exec_lo
s_and_not1_saveexec_b32 s8, s8
s_cbranch_execz .LBB0_16
s_load_b32 s9, s[2:3], 0x0
v_add_co_u32 v3, vcc_lo, v3, 1
v_add_co_ci_u32_e32 v4, vcc_lo, 0, v4, vcc_lo
s_delay_alu instid0(VALU_DEP_1)
v_cmp_gt_u64_e32 vcc_lo, s[4:5], v[3:4]
s_waitcnt lgkmcnt(0)
s_cmp_eq_u32 s9, 0
s_cselect_b32 s9, -1, 0
s_and_not1_b32 s18, s18, exec_lo
s_and_b32 s9, s9, vcc_lo
s_and_not1_b32 s11, s11, exec_lo
s_and_b32 s9, s9, exec_lo
s_delay_alu instid0(SALU_CYCLE_1)
s_or_b32 s18, s18, s9
.LBB0_16:
s_or_b32 exec_lo, exec_lo, s8
s_mov_b32 s19, -1
s_and_saveexec_b32 s20, s18
s_cbranch_execz .LBB0_5
s_mov_b64 s[8:9], 0
s_or_b32 s11, s11, exec_lo
s_or_not1_b32 s19, s14, exec_lo
s_branch .LBB0_5
.LBB0_18:
s_or_b32 exec_lo, exec_lo, s16
s_delay_alu instid0(SALU_CYCLE_1)
s_and_b32 exec_lo, exec_lo, s15
s_cbranch_execnz .LBB0_20
s_branch .LBB0_23
.LBB0_19:
v_mov_b32_e32 v5, 0
v_mov_b32_e32 v6, 0
s_mov_b32 s15, -1
s_delay_alu instid0(SALU_CYCLE_1)
s_and_b32 exec_lo, exec_lo, s15
s_cbranch_execz .LBB0_23
.LBB0_20:
v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v3, 1
s_cmp_gt_i32 s12, 0
global_atomic_swap_b32 v3, v0, v3, s[2:3] glc
s_cselect_b32 s2, -1, 0
s_waitcnt vmcnt(0)
v_cmp_eq_u32_e32 vcc_lo, 0, v3
s_and_b32 s2, s2, vcc_lo
s_delay_alu instid0(SALU_CYCLE_1)
s_and_b32 exec_lo, exec_lo, s2
s_cbranch_execz .LBB0_23
s_load_b64 s[0:1], s[0:1], 0x10
s_mov_b64 s[2:3], 1
.p2align 6
.LBB0_22:
s_delay_alu instid0(SALU_CYCLE_1)
s_sub_i32 s4, s2, 31
s_cmp_lt_u32 s2, 31
s_cselect_b32 vcc_lo, -1, 0
v_dual_cndmask_b32 v4, v6, v2 :: v_dual_cndmask_b32 v3, v5, v1
s_and_b32 s5, vcc_lo, exec_lo
s_cselect_b32 s4, s2, s4
s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1)
v_lshrrev_b64 v[3:4], s4, v[3:4]
s_waitcnt lgkmcnt(0)
s_add_u32 s4, s0, s2
s_addc_u32 s5, s1, s3
s_add_u32 s6, s2, 1
s_addc_u32 s7, s3, 0
s_cmp_eq_u32 s2, s12
v_and_b32_e32 v3, 1, v3
s_mov_b64 s[2:3], s[6:7]
global_store_b8 v0, v3, s[4:5]
s_cbranch_scc0 .LBB0_22
.LBB0_23:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z2bfiiPiPbS_
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 32
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 7
.amdhsa_next_free_sgpr 26
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z2bfiiPiPbS_, .Lfunc_end0-_Z2bfiiPiPbS_
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .offset: 0
.size: 4
.value_kind: by_value
- .offset: 4
.size: 4
.value_kind: by_value
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 24
.size: 8
.value_kind: global_buffer
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 32
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z2bfiiPiPbS_
.private_segment_fixed_size: 0
.sgpr_count: 28
.sgpr_spill_count: 0
.symbol: _Z2bfiiPiPbS_.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 7
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | #include <hip/hip_runtime.h>
#include<stdio.h>
#include<stdbool.h>
typedef unsigned long long int ull;
__device__ bool getval(int v, ull id, ull ie){
if (v<0) v=-v;
if (v<=30) return (id & (1llu<<v)) ? true : false;
return (ie & (1llu<<(v-31))) ? true : false;
}
__device__ bool test(int n, int* raw, ull id, ull ie){
bool ret = true;
for (int i = 0; i < n; i+=3){
bool tmp = false;
for (int j = 0; j < 3; j++)
tmp |= (getval(raw[i+j], id, ie) ^ (raw[i+j] < 0));
ret &= tmp;
}
return ret;
}
__device__ void fillres(int m, bool* res, ull id, ull ie){
for (int i=1;i<=m;i++)
res[i] = getval(i, id, ie);
}
__global__ void bf(int n, int m, int* raw, bool* res, int* flag){
ull myid = blockIdx.x * 1024llu + threadIdx.x;
ull mxstate = (1llu<<m) - 1;
if (myid > mxstate) return;
ull end = 1;
if (m-30 > 0) end <<= m-30;
for (ull i = 0; i < end; i ++){
if (test(n, raw, myid<<1, i)){
if (!atomicExch(flag, 1))
fillres(m, res, myid<<1, i);
return;
}
if ((i & 0xff) == (myid & 0xff) && *flag)
return;
}
}
int main (){
int *rawd, *raw, *flag;
bool *resd, *res;
int n, m, mflag = 0;
scanf("%d%d", &n,&m);
n*=3;
raw = (int*)malloc(sizeof(int)*n);
res = (bool*)malloc(m+1);
for (int i=0;i<n;i++)
scanf("%d", raw+i);
hipMalloc((void**)&rawd, sizeof(int)*n);
hipMalloc((void**)&resd, m+1);
hipMalloc((void**)&flag, sizeof(int));
hipMemcpy(rawd, raw, sizeof(int)*n, hipMemcpyHostToDevice);
hipMemcpy(flag, &mflag, sizeof(int), hipMemcpyHostToDevice);
bf<<<1048576,1024>>>(n, m, rawd, resd, flag);
hipMemcpy(&mflag, flag, sizeof(int), hipMemcpyDeviceToHost);
if (mflag){
hipMemcpy(res, resd, m+1, hipMemcpyDeviceToHost);
for (int i = 1; i <= m; i++)
printf("%d := %s\n", i, (res[i]?"true":"false"));
}
else printf("No satisfy!\n");
hipFree(rawd);
hipFree(resd);
hipFree(flag);
free(raw);
free(res);
} | .text
.file "cuda.hip"
.globl _Z17__device_stub__bfiiPiPbS_ # -- Begin function _Z17__device_stub__bfiiPiPbS_
.p2align 4, 0x90
.type _Z17__device_stub__bfiiPiPbS_,@function
_Z17__device_stub__bfiiPiPbS_: # @_Z17__device_stub__bfiiPiPbS_
.cfi_startproc
# %bb.0:
subq $120, %rsp
.cfi_def_cfa_offset 128
movl %edi, 4(%rsp)
movl %esi, (%rsp)
movq %rdx, 72(%rsp)
movq %rcx, 64(%rsp)
movq %r8, 56(%rsp)
leaq 4(%rsp), %rax
movq %rax, 80(%rsp)
movq %rsp, %rax
movq %rax, 88(%rsp)
leaq 72(%rsp), %rax
movq %rax, 96(%rsp)
leaq 64(%rsp), %rax
movq %rax, 104(%rsp)
leaq 56(%rsp), %rax
movq %rax, 112(%rsp)
leaq 40(%rsp), %rdi
leaq 24(%rsp), %rsi
leaq 16(%rsp), %rdx
leaq 8(%rsp), %rcx
callq __hipPopCallConfiguration
movq 40(%rsp), %rsi
movl 48(%rsp), %edx
movq 24(%rsp), %rcx
movl 32(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z2bfiiPiPbS_, %edi
pushq 8(%rsp)
.cfi_adjust_cfa_offset 8
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $136, %rsp
.cfi_adjust_cfa_offset -136
retq
.Lfunc_end0:
.size _Z17__device_stub__bfiiPiPbS_, .Lfunc_end0-_Z17__device_stub__bfiiPiPbS_
.cfi_endproc
# -- End function
.globl main # -- Begin function main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
pushq %r15
.cfi_def_cfa_offset 16
pushq %r14
.cfi_def_cfa_offset 24
pushq %r12
.cfi_def_cfa_offset 32
pushq %rbx
.cfi_def_cfa_offset 40
subq $168, %rsp
.cfi_def_cfa_offset 208
.cfi_offset %rbx, -40
.cfi_offset %r12, -32
.cfi_offset %r14, -24
.cfi_offset %r15, -16
movl $0, 20(%rsp)
leaq 16(%rsp), %rsi
leaq 12(%rsp), %rdx
movl $.L.str, %edi
xorl %eax, %eax
callq __isoc23_scanf
movslq 16(%rsp), %rax
leaq (%rax,%rax,2), %rdi
movl %edi, 16(%rsp)
shlq $2, %rdi
callq malloc
movq %rax, %rbx
movslq 12(%rsp), %rdi
incq %rdi
callq malloc
movq %rax, %r14
movl 16(%rsp), %eax
testl %eax, %eax
jle .LBB1_3
# %bb.1: # %.lr.ph.preheader
movq %rbx, %r15
xorl %r12d, %r12d
.p2align 4, 0x90
.LBB1_2: # %.lr.ph
# =>This Inner Loop Header: Depth=1
movl $.L.str.1, %edi
movq %r15, %rsi
xorl %eax, %eax
callq __isoc23_scanf
incq %r12
movslq 16(%rsp), %rax
addq $4, %r15
cmpq %rax, %r12
jl .LBB1_2
.LBB1_3: # %._crit_edge
movslq %eax, %rsi
shlq $2, %rsi
leaq 40(%rsp), %rdi
callq hipMalloc
movslq 12(%rsp), %rsi
incq %rsi
leaq 32(%rsp), %rdi
callq hipMalloc
leaq 24(%rsp), %rdi
movl $4, %esi
callq hipMalloc
movq 40(%rsp), %rdi
movslq 16(%rsp), %rdx
shlq $2, %rdx
movq %rbx, %rsi
movl $1, %ecx
callq hipMemcpy
movq 24(%rsp), %rdi
leaq 20(%rsp), %rsi
movl $4, %edx
movl $1, %ecx
callq hipMemcpy
movabsq $4294968320, %rdx # imm = 0x100000400
leaq 1047552(%rdx), %rdi
movl $1, %esi
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB1_5
# %bb.4:
movl 16(%rsp), %eax
movl 12(%rsp), %ecx
movq 40(%rsp), %rdx
movq 32(%rsp), %rsi
movq 24(%rsp), %rdi
movl %eax, 52(%rsp)
movl %ecx, 48(%rsp)
movq %rdx, 120(%rsp)
movq %rsi, 112(%rsp)
movq %rdi, 104(%rsp)
leaq 52(%rsp), %rax
movq %rax, 128(%rsp)
leaq 48(%rsp), %rax
movq %rax, 136(%rsp)
leaq 120(%rsp), %rax
movq %rax, 144(%rsp)
leaq 112(%rsp), %rax
movq %rax, 152(%rsp)
leaq 104(%rsp), %rax
movq %rax, 160(%rsp)
leaq 88(%rsp), %rdi
leaq 72(%rsp), %rsi
leaq 64(%rsp), %rdx
leaq 56(%rsp), %rcx
callq __hipPopCallConfiguration
movq 88(%rsp), %rsi
movl 96(%rsp), %edx
movq 72(%rsp), %rcx
movl 80(%rsp), %r8d
leaq 128(%rsp), %r9
movl $_Z2bfiiPiPbS_, %edi
pushq 56(%rsp)
.cfi_adjust_cfa_offset 8
pushq 72(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB1_5:
movq 24(%rsp), %rsi
leaq 20(%rsp), %rdi
movl $4, %edx
movl $2, %ecx
callq hipMemcpy
cmpl $0, 20(%rsp)
je .LBB1_11
# %bb.6:
movq 32(%rsp), %rsi
movslq 12(%rsp), %rdx
incq %rdx
movq %r14, %rdi
movl $2, %ecx
callq hipMemcpy
cmpl $0, 12(%rsp)
jle .LBB1_12
# %bb.7: # %.lr.ph27.preheader
movl $1, %r15d
jmp .LBB1_8
.p2align 4, 0x90
.LBB1_10: # %.lr.ph27
# in Loop: Header=BB1_8 Depth=1
movl $.L.str.2, %edi
movl %r15d, %esi
xorl %eax, %eax
callq printf
leaq 1(%r15), %rax
movslq 12(%rsp), %rcx
cmpq %rcx, %r15
movq %rax, %r15
jge .LBB1_12
.LBB1_8: # %.lr.ph27
# =>This Inner Loop Header: Depth=1
cmpb $0, (%r14,%r15)
movl $.L.str.4, %edx
je .LBB1_10
# %bb.9: # %.lr.ph27
# in Loop: Header=BB1_8 Depth=1
movl $.L.str.3, %edx
jmp .LBB1_10
.LBB1_11:
movl $.Lstr, %edi
callq puts@PLT
.LBB1_12: # %.loopexit
movq 40(%rsp), %rdi
callq hipFree
movq 32(%rsp), %rdi
callq hipFree
movq 24(%rsp), %rdi
callq hipFree
movq %rbx, %rdi
callq free
movq %r14, %rdi
callq free
xorl %eax, %eax
addq $168, %rsp
.cfi_def_cfa_offset 40
popq %rbx
.cfi_def_cfa_offset 32
popq %r12
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
retq
.Lfunc_end1:
.size main, .Lfunc_end1-main
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB2_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB2_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z2bfiiPiPbS_, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end2:
.size __hip_module_ctor, .Lfunc_end2-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB3_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB3_2:
retq
.Lfunc_end3:
.size __hip_module_dtor, .Lfunc_end3-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z2bfiiPiPbS_,@object # @_Z2bfiiPiPbS_
.section .rodata,"a",@progbits
.globl _Z2bfiiPiPbS_
.p2align 3, 0x0
_Z2bfiiPiPbS_:
.quad _Z17__device_stub__bfiiPiPbS_
.size _Z2bfiiPiPbS_, 8
.type .L.str,@object # @.str
.section .rodata.str1.1,"aMS",@progbits,1
.L.str:
.asciz "%d%d"
.size .L.str, 5
.type .L.str.1,@object # @.str.1
.L.str.1:
.asciz "%d"
.size .L.str.1, 3
.type .L.str.2,@object # @.str.2
.L.str.2:
.asciz "%d := %s\n"
.size .L.str.2, 10
.type .L.str.3,@object # @.str.3
.L.str.3:
.asciz "true"
.size .L.str.3, 5
.type .L.str.4,@object # @.str.4
.L.str.4:
.asciz "false"
.size .L.str.4, 6
.type .L__unnamed_1,@object # @0
.L__unnamed_1:
.asciz "_Z2bfiiPiPbS_"
.size .L__unnamed_1, 14
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.type .Lstr,@object # @str
.section .rodata.str1.1,"aMS",@progbits,1
.Lstr:
.asciz "No satisfy!"
.size .Lstr, 12
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z17__device_stub__bfiiPiPbS_
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z2bfiiPiPbS_
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_000a0d06_00000000-6_cuda.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2063:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2063:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z6getvaliyy
.type _Z6getvaliyy, @function
_Z6getvaliyy:
.LFB2057:
.cfi_startproc
endbr64
pushq %rax
.cfi_def_cfa_offset 16
popq %rax
.cfi_def_cfa_offset 8
subq $24, %rsp
.cfi_def_cfa_offset 32
movl $1, 12(%rsp)
movl 12(%rsp), %edi
call exit@PLT
.cfi_endproc
.LFE2057:
.size _Z6getvaliyy, .-_Z6getvaliyy
.globl _Z4testiPiyy
.type _Z4testiPiyy, @function
_Z4testiPiyy:
.LFB2058:
.cfi_startproc
endbr64
pushq %rax
.cfi_def_cfa_offset 16
popq %rax
.cfi_def_cfa_offset 8
subq $24, %rsp
.cfi_def_cfa_offset 32
movl $1, 12(%rsp)
movl 12(%rsp), %edi
call exit@PLT
.cfi_endproc
.LFE2058:
.size _Z4testiPiyy, .-_Z4testiPiyy
.globl _Z7fillresiPbyy
.type _Z7fillresiPbyy, @function
_Z7fillresiPbyy:
.LFB2059:
.cfi_startproc
endbr64
pushq %rax
.cfi_def_cfa_offset 16
popq %rax
.cfi_def_cfa_offset 8
subq $24, %rsp
.cfi_def_cfa_offset 32
movl $1, 12(%rsp)
movl 12(%rsp), %edi
call exit@PLT
.cfi_endproc
.LFE2059:
.size _Z7fillresiPbyy, .-_Z7fillresiPbyy
.globl _Z27__device_stub__Z2bfiiPiPbS_iiPiPbS_
.type _Z27__device_stub__Z2bfiiPiPbS_iiPiPbS_, @function
_Z27__device_stub__Z2bfiiPiPbS_iiPiPbS_:
.LFB2085:
.cfi_startproc
endbr64
subq $152, %rsp
.cfi_def_cfa_offset 160
movl %edi, 28(%rsp)
movl %esi, 24(%rsp)
movq %rdx, 16(%rsp)
movq %rcx, 8(%rsp)
movq %r8, (%rsp)
movq %fs:40, %rax
movq %rax, 136(%rsp)
xorl %eax, %eax
leaq 28(%rsp), %rax
movq %rax, 96(%rsp)
leaq 24(%rsp), %rax
movq %rax, 104(%rsp)
leaq 16(%rsp), %rax
movq %rax, 112(%rsp)
leaq 8(%rsp), %rax
movq %rax, 120(%rsp)
movq %rsp, %rax
movq %rax, 128(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L13
.L9:
movq 136(%rsp), %rax
subq %fs:40, %rax
jne .L14
addq $152, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L13:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 168
pushq 40(%rsp)
.cfi_def_cfa_offset 176
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z2bfiiPiPbS_(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 160
jmp .L9
.L14:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2085:
.size _Z27__device_stub__Z2bfiiPiPbS_iiPiPbS_, .-_Z27__device_stub__Z2bfiiPiPbS_iiPiPbS_
.globl _Z2bfiiPiPbS_
.type _Z2bfiiPiPbS_, @function
_Z2bfiiPiPbS_:
.LFB2086:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z27__device_stub__Z2bfiiPiPbS_iiPiPbS_
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2086:
.size _Z2bfiiPiPbS_, .-_Z2bfiiPiPbS_
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "true"
.LC1:
.string "false"
.LC2:
.string "%d%d"
.LC3:
.string "%d"
.LC4:
.string "%d := %s\n"
.LC5:
.string "No satisfy!\n"
.text
.globl main
.type main, @function
main:
.LFB2060:
.cfi_startproc
endbr64
pushq %r15
.cfi_def_cfa_offset 16
.cfi_offset 15, -16
pushq %r14
.cfi_def_cfa_offset 24
.cfi_offset 14, -24
pushq %r13
.cfi_def_cfa_offset 32
.cfi_offset 13, -32
pushq %r12
.cfi_def_cfa_offset 40
.cfi_offset 12, -40
pushq %rbp
.cfi_def_cfa_offset 48
.cfi_offset 6, -48
pushq %rbx
.cfi_def_cfa_offset 56
.cfi_offset 3, -56
subq $88, %rsp
.cfi_def_cfa_offset 144
movq %fs:40, %rax
movq %rax, 72(%rsp)
xorl %eax, %eax
movl $0, 20(%rsp)
leaq 16(%rsp), %rdx
leaq 12(%rsp), %rsi
leaq .LC2(%rip), %rdi
call __isoc23_scanf@PLT
movl 12(%rsp), %eax
leal (%rax,%rax,2), %ebx
movl %ebx, %r12d
movl %ebx, 12(%rsp)
movslq %ebx, %rdi
salq $2, %rdi
call malloc@PLT
movq %rax, %r14
movl 16(%rsp), %eax
leal 1(%rax), %edi
movslq %edi, %rdi
call malloc@PLT
movq %rax, %r13
testl %ebx, %ebx
jle .L18
movq %r14, %rbp
movl $0, %ebx
leaq .LC3(%rip), %r15
.L19:
movq %rbp, %rsi
movq %r15, %rdi
movl $0, %eax
call __isoc23_scanf@PLT
addl $1, %ebx
movl 12(%rsp), %r12d
addq $4, %rbp
cmpl %ebx, %r12d
jg .L19
.L18:
movslq %r12d, %rsi
salq $2, %rsi
leaq 24(%rsp), %rdi
call cudaMalloc@PLT
movl 16(%rsp), %eax
leal 1(%rax), %esi
movslq %esi, %rsi
leaq 40(%rsp), %rdi
call cudaMalloc@PLT
leaq 32(%rsp), %rdi
movl $4, %esi
call cudaMalloc@PLT
movslq 12(%rsp), %rdx
salq $2, %rdx
movl $1, %ecx
movq %r14, %rsi
movq 24(%rsp), %rdi
call cudaMemcpy@PLT
leaq 20(%rsp), %rsi
movl $1, %ecx
movl $4, %edx
movq 32(%rsp), %rdi
call cudaMemcpy@PLT
movl $1024, 60(%rsp)
movl $1, 64(%rsp)
movl $1048576, 48(%rsp)
movl $1, 52(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 60(%rsp), %rdx
movl $1, %ecx
movq 48(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L30
.L20:
leaq 20(%rsp), %rdi
movl $2, %ecx
movl $4, %edx
movq 32(%rsp), %rsi
call cudaMemcpy@PLT
cmpl $0, 20(%rsp)
je .L21
movl 16(%rsp), %eax
leal 1(%rax), %edx
movslq %edx, %rdx
movl $2, %ecx
movq 40(%rsp), %rsi
movq %r13, %rdi
call cudaMemcpy@PLT
cmpl $0, 16(%rsp)
jle .L22
movl $1, %ebx
leaq .LC1(%rip), %r15
leaq .LC0(%rip), %r12
leaq .LC4(%rip), %rbp
.L24:
movl %ebx, %edx
cmpb $0, 0(%r13,%rbx)
movq %r12, %rcx
cmove %r15, %rcx
movq %rbp, %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
addq $1, %rbx
cmpl %ebx, 16(%rsp)
jge .L24
jmp .L22
.L30:
movq 32(%rsp), %r8
movq 40(%rsp), %rcx
movq 24(%rsp), %rdx
movl 16(%rsp), %esi
movl 12(%rsp), %edi
call _Z27__device_stub__Z2bfiiPiPbS_iiPiPbS_
jmp .L20
.L21:
leaq .LC5(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
.L22:
movq 24(%rsp), %rdi
call cudaFree@PLT
movq 40(%rsp), %rdi
call cudaFree@PLT
movq 32(%rsp), %rdi
call cudaFree@PLT
movq %r14, %rdi
call free@PLT
movq %r13, %rdi
call free@PLT
movq 72(%rsp), %rax
subq %fs:40, %rax
jne .L31
movl $0, %eax
addq $88, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %rbp
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r13
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
ret
.L31:
.cfi_restore_state
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2060:
.size main, .-main
.section .rodata.str1.1
.LC6:
.string "_Z2bfiiPiPbS_"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2088:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC6(%rip), %rdx
movq %rdx, %rcx
leaq _Z2bfiiPiPbS_(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2088:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "cuda.hip"
.globl _Z17__device_stub__bfiiPiPbS_ # -- Begin function _Z17__device_stub__bfiiPiPbS_
.p2align 4, 0x90
.type _Z17__device_stub__bfiiPiPbS_,@function
_Z17__device_stub__bfiiPiPbS_: # @_Z17__device_stub__bfiiPiPbS_
.cfi_startproc
# %bb.0:
subq $120, %rsp
.cfi_def_cfa_offset 128
movl %edi, 4(%rsp)
movl %esi, (%rsp)
movq %rdx, 72(%rsp)
movq %rcx, 64(%rsp)
movq %r8, 56(%rsp)
leaq 4(%rsp), %rax
movq %rax, 80(%rsp)
movq %rsp, %rax
movq %rax, 88(%rsp)
leaq 72(%rsp), %rax
movq %rax, 96(%rsp)
leaq 64(%rsp), %rax
movq %rax, 104(%rsp)
leaq 56(%rsp), %rax
movq %rax, 112(%rsp)
leaq 40(%rsp), %rdi
leaq 24(%rsp), %rsi
leaq 16(%rsp), %rdx
leaq 8(%rsp), %rcx
callq __hipPopCallConfiguration
movq 40(%rsp), %rsi
movl 48(%rsp), %edx
movq 24(%rsp), %rcx
movl 32(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z2bfiiPiPbS_, %edi
pushq 8(%rsp)
.cfi_adjust_cfa_offset 8
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $136, %rsp
.cfi_adjust_cfa_offset -136
retq
.Lfunc_end0:
.size _Z17__device_stub__bfiiPiPbS_, .Lfunc_end0-_Z17__device_stub__bfiiPiPbS_
.cfi_endproc
# -- End function
.globl main # -- Begin function main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
pushq %r15
.cfi_def_cfa_offset 16
pushq %r14
.cfi_def_cfa_offset 24
pushq %r12
.cfi_def_cfa_offset 32
pushq %rbx
.cfi_def_cfa_offset 40
subq $168, %rsp
.cfi_def_cfa_offset 208
.cfi_offset %rbx, -40
.cfi_offset %r12, -32
.cfi_offset %r14, -24
.cfi_offset %r15, -16
movl $0, 20(%rsp)
leaq 16(%rsp), %rsi
leaq 12(%rsp), %rdx
movl $.L.str, %edi
xorl %eax, %eax
callq __isoc23_scanf
movslq 16(%rsp), %rax
leaq (%rax,%rax,2), %rdi
movl %edi, 16(%rsp)
shlq $2, %rdi
callq malloc
movq %rax, %rbx
movslq 12(%rsp), %rdi
incq %rdi
callq malloc
movq %rax, %r14
movl 16(%rsp), %eax
testl %eax, %eax
jle .LBB1_3
# %bb.1: # %.lr.ph.preheader
movq %rbx, %r15
xorl %r12d, %r12d
.p2align 4, 0x90
.LBB1_2: # %.lr.ph
# =>This Inner Loop Header: Depth=1
movl $.L.str.1, %edi
movq %r15, %rsi
xorl %eax, %eax
callq __isoc23_scanf
incq %r12
movslq 16(%rsp), %rax
addq $4, %r15
cmpq %rax, %r12
jl .LBB1_2
.LBB1_3: # %._crit_edge
movslq %eax, %rsi
shlq $2, %rsi
leaq 40(%rsp), %rdi
callq hipMalloc
movslq 12(%rsp), %rsi
incq %rsi
leaq 32(%rsp), %rdi
callq hipMalloc
leaq 24(%rsp), %rdi
movl $4, %esi
callq hipMalloc
movq 40(%rsp), %rdi
movslq 16(%rsp), %rdx
shlq $2, %rdx
movq %rbx, %rsi
movl $1, %ecx
callq hipMemcpy
movq 24(%rsp), %rdi
leaq 20(%rsp), %rsi
movl $4, %edx
movl $1, %ecx
callq hipMemcpy
movabsq $4294968320, %rdx # imm = 0x100000400
leaq 1047552(%rdx), %rdi
movl $1, %esi
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB1_5
# %bb.4:
movl 16(%rsp), %eax
movl 12(%rsp), %ecx
movq 40(%rsp), %rdx
movq 32(%rsp), %rsi
movq 24(%rsp), %rdi
movl %eax, 52(%rsp)
movl %ecx, 48(%rsp)
movq %rdx, 120(%rsp)
movq %rsi, 112(%rsp)
movq %rdi, 104(%rsp)
leaq 52(%rsp), %rax
movq %rax, 128(%rsp)
leaq 48(%rsp), %rax
movq %rax, 136(%rsp)
leaq 120(%rsp), %rax
movq %rax, 144(%rsp)
leaq 112(%rsp), %rax
movq %rax, 152(%rsp)
leaq 104(%rsp), %rax
movq %rax, 160(%rsp)
leaq 88(%rsp), %rdi
leaq 72(%rsp), %rsi
leaq 64(%rsp), %rdx
leaq 56(%rsp), %rcx
callq __hipPopCallConfiguration
movq 88(%rsp), %rsi
movl 96(%rsp), %edx
movq 72(%rsp), %rcx
movl 80(%rsp), %r8d
leaq 128(%rsp), %r9
movl $_Z2bfiiPiPbS_, %edi
pushq 56(%rsp)
.cfi_adjust_cfa_offset 8
pushq 72(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB1_5:
movq 24(%rsp), %rsi
leaq 20(%rsp), %rdi
movl $4, %edx
movl $2, %ecx
callq hipMemcpy
cmpl $0, 20(%rsp)
je .LBB1_11
# %bb.6:
movq 32(%rsp), %rsi
movslq 12(%rsp), %rdx
incq %rdx
movq %r14, %rdi
movl $2, %ecx
callq hipMemcpy
cmpl $0, 12(%rsp)
jle .LBB1_12
# %bb.7: # %.lr.ph27.preheader
movl $1, %r15d
jmp .LBB1_8
.p2align 4, 0x90
.LBB1_10: # %.lr.ph27
# in Loop: Header=BB1_8 Depth=1
movl $.L.str.2, %edi
movl %r15d, %esi
xorl %eax, %eax
callq printf
leaq 1(%r15), %rax
movslq 12(%rsp), %rcx
cmpq %rcx, %r15
movq %rax, %r15
jge .LBB1_12
.LBB1_8: # %.lr.ph27
# =>This Inner Loop Header: Depth=1
cmpb $0, (%r14,%r15)
movl $.L.str.4, %edx
je .LBB1_10
# %bb.9: # %.lr.ph27
# in Loop: Header=BB1_8 Depth=1
movl $.L.str.3, %edx
jmp .LBB1_10
.LBB1_11:
movl $.Lstr, %edi
callq puts@PLT
.LBB1_12: # %.loopexit
movq 40(%rsp), %rdi
callq hipFree
movq 32(%rsp), %rdi
callq hipFree
movq 24(%rsp), %rdi
callq hipFree
movq %rbx, %rdi
callq free
movq %r14, %rdi
callq free
xorl %eax, %eax
addq $168, %rsp
.cfi_def_cfa_offset 40
popq %rbx
.cfi_def_cfa_offset 32
popq %r12
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
retq
.Lfunc_end1:
.size main, .Lfunc_end1-main
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB2_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB2_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z2bfiiPiPbS_, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end2:
.size __hip_module_ctor, .Lfunc_end2-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB3_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB3_2:
retq
.Lfunc_end3:
.size __hip_module_dtor, .Lfunc_end3-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z2bfiiPiPbS_,@object # @_Z2bfiiPiPbS_
.section .rodata,"a",@progbits
.globl _Z2bfiiPiPbS_
.p2align 3, 0x0
_Z2bfiiPiPbS_:
.quad _Z17__device_stub__bfiiPiPbS_
.size _Z2bfiiPiPbS_, 8
.type .L.str,@object # @.str
.section .rodata.str1.1,"aMS",@progbits,1
.L.str:
.asciz "%d%d"
.size .L.str, 5
.type .L.str.1,@object # @.str.1
.L.str.1:
.asciz "%d"
.size .L.str.1, 3
.type .L.str.2,@object # @.str.2
.L.str.2:
.asciz "%d := %s\n"
.size .L.str.2, 10
.type .L.str.3,@object # @.str.3
.L.str.3:
.asciz "true"
.size .L.str.3, 5
.type .L.str.4,@object # @.str.4
.L.str.4:
.asciz "false"
.size .L.str.4, 6
.type .L__unnamed_1,@object # @0
.L__unnamed_1:
.asciz "_Z2bfiiPiPbS_"
.size .L__unnamed_1, 14
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.type .Lstr,@object # @str
.section .rodata.str1.1,"aMS",@progbits,1
.Lstr:
.asciz "No satisfy!"
.size .Lstr, 12
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z17__device_stub__bfiiPiPbS_
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z2bfiiPiPbS_
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly. | #include "includes.h"
/* kernel.cu */
__global__ void AddVector( int vecSize, const float* vecA, const float* vecB, float* vecC)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < vecSize)
vecC[i] = vecA[i] + vecB[i];
} | code for sm_80
Function : _Z9AddVectoriPKfS0_Pf
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ S2R R6, SR_CTAID.X ; /* 0x0000000000067919 */
/* 0x000e280000002500 */
/*0020*/ S2R R3, SR_TID.X ; /* 0x0000000000037919 */
/* 0x000e240000002100 */
/*0030*/ IMAD R6, R6, c[0x0][0x0], R3 ; /* 0x0000000006067a24 */
/* 0x001fca00078e0203 */
/*0040*/ ISETP.GE.AND P0, PT, R6, c[0x0][0x160], PT ; /* 0x0000580006007a0c */
/* 0x000fda0003f06270 */
/*0050*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*0060*/ HFMA2.MMA R7, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff077435 */
/* 0x000fe200000001ff */
/*0070*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fd20000000a00 */
/*0080*/ IMAD.WIDE R4, R6, R7, c[0x0][0x170] ; /* 0x00005c0006047625 */
/* 0x000fc800078e0207 */
/*0090*/ IMAD.WIDE R2, R6.reuse, R7.reuse, c[0x0][0x168] ; /* 0x00005a0006027625 */
/* 0x0c0fe400078e0207 */
/*00a0*/ LDG.E R4, [R4.64] ; /* 0x0000000404047981 */
/* 0x000ea8000c1e1900 */
/*00b0*/ LDG.E R3, [R2.64] ; /* 0x0000000402037981 */
/* 0x000ea2000c1e1900 */
/*00c0*/ IMAD.WIDE R6, R6, R7, c[0x0][0x178] ; /* 0x00005e0006067625 */
/* 0x000fc800078e0207 */
/*00d0*/ FADD R9, R4, R3 ; /* 0x0000000304097221 */
/* 0x004fca0000000000 */
/*00e0*/ STG.E [R6.64], R9 ; /* 0x0000000906007986 */
/* 0x000fe2000c101904 */
/*00f0*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0100*/ BRA 0x100; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0110*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0120*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0130*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0140*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0150*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0160*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0170*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0180*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0190*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | #include "includes.h"
/* kernel.cu */
__global__ void AddVector( int vecSize, const float* vecA, const float* vecB, float* vecC)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < vecSize)
vecC[i] = vecA[i] + vecB[i];
} | .file "tmpxft_000588fe_00000000-6_AddVector.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2029:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2029:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z35__device_stub__Z9AddVectoriPKfS0_PfiPKfS0_Pf
.type _Z35__device_stub__Z9AddVectoriPKfS0_PfiPKfS0_Pf, @function
_Z35__device_stub__Z9AddVectoriPKfS0_PfiPKfS0_Pf:
.LFB2051:
.cfi_startproc
endbr64
subq $152, %rsp
.cfi_def_cfa_offset 160
movl %edi, 28(%rsp)
movq %rsi, 16(%rsp)
movq %rdx, 8(%rsp)
movq %rcx, (%rsp)
movq %fs:40, %rax
movq %rax, 136(%rsp)
xorl %eax, %eax
leaq 28(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 8(%rsp), %rax
movq %rax, 112(%rsp)
movq %rsp, %rax
movq %rax, 120(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 136(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $152, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 168
pushq 40(%rsp)
.cfi_def_cfa_offset 176
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z9AddVectoriPKfS0_Pf(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 160
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2051:
.size _Z35__device_stub__Z9AddVectoriPKfS0_PfiPKfS0_Pf, .-_Z35__device_stub__Z9AddVectoriPKfS0_PfiPKfS0_Pf
.globl _Z9AddVectoriPKfS0_Pf
.type _Z9AddVectoriPKfS0_Pf, @function
_Z9AddVectoriPKfS0_Pf:
.LFB2052:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z35__device_stub__Z9AddVectoriPKfS0_PfiPKfS0_Pf
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2052:
.size _Z9AddVectoriPKfS0_Pf, .-_Z9AddVectoriPKfS0_Pf
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "_Z9AddVectoriPKfS0_Pf"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2054:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC0(%rip), %rdx
movq %rdx, %rcx
leaq _Z9AddVectoriPKfS0_Pf(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2054:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | #include "includes.h"
/* kernel.cu */
__global__ void AddVector( int vecSize, const float* vecA, const float* vecB, float* vecC)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < vecSize)
vecC[i] = vecA[i] + vecB[i];
} | #include <hip/hip_runtime.h>
#include "includes.h"
/* kernel.cu */
__global__ void AddVector( int vecSize, const float* vecA, const float* vecB, float* vecC)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < vecSize)
vecC[i] = vecA[i] + vecB[i];
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | #include <hip/hip_runtime.h>
#include "includes.h"
/* kernel.cu */
__global__ void AddVector( int vecSize, const float* vecA, const float* vecB, float* vecC)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < vecSize)
vecC[i] = vecA[i] + vecB[i];
} | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z9AddVectoriPKfS0_Pf
.globl _Z9AddVectoriPKfS0_Pf
.p2align 8
.type _Z9AddVectoriPKfS0_Pf,@function
_Z9AddVectoriPKfS0_Pf:
s_clause 0x1
s_load_b32 s2, s[0:1], 0x2c
s_load_b32 s3, s[0:1], 0x0
s_waitcnt lgkmcnt(0)
s_and_b32 s2, s2, 0xffff
s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_mad_u64_u32 v[1:2], null, s15, s2, v[0:1]
s_mov_b32 s2, exec_lo
v_cmpx_gt_i32_e64 s3, v1
s_cbranch_execz .LBB0_2
s_load_b128 s[4:7], s[0:1], 0x8
v_ashrrev_i32_e32 v2, 31, v1
s_load_b64 s[0:1], s[0:1], 0x18
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[0:1], 2, v[1:2]
s_waitcnt lgkmcnt(0)
v_add_co_u32 v2, vcc_lo, s4, v0
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v3, vcc_lo, s5, v1, vcc_lo
v_add_co_u32 v4, vcc_lo, s6, v0
v_add_co_ci_u32_e32 v5, vcc_lo, s7, v1, vcc_lo
v_add_co_u32 v0, vcc_lo, s0, v0
global_load_b32 v2, v[2:3], off
global_load_b32 v3, v[4:5], off
v_add_co_ci_u32_e32 v1, vcc_lo, s1, v1, vcc_lo
s_waitcnt vmcnt(0)
v_add_f32_e32 v2, v2, v3
global_store_b32 v[0:1], v2, off
.LBB0_2:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z9AddVectoriPKfS0_Pf
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 288
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 6
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z9AddVectoriPKfS0_Pf, .Lfunc_end0-_Z9AddVectoriPKfS0_Pf
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .offset: 0
.size: 4
.value_kind: by_value
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 24
.size: 8
.value_kind: global_buffer
- .offset: 32
.size: 4
.value_kind: hidden_block_count_x
- .offset: 36
.size: 4
.value_kind: hidden_block_count_y
- .offset: 40
.size: 4
.value_kind: hidden_block_count_z
- .offset: 44
.size: 2
.value_kind: hidden_group_size_x
- .offset: 46
.size: 2
.value_kind: hidden_group_size_y
- .offset: 48
.size: 2
.value_kind: hidden_group_size_z
- .offset: 50
.size: 2
.value_kind: hidden_remainder_x
- .offset: 52
.size: 2
.value_kind: hidden_remainder_y
- .offset: 54
.size: 2
.value_kind: hidden_remainder_z
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 88
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 96
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 288
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z9AddVectoriPKfS0_Pf
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z9AddVectoriPKfS0_Pf.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 6
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | #include <hip/hip_runtime.h>
#include "includes.h"
/* kernel.cu */
__global__ void AddVector( int vecSize, const float* vecA, const float* vecB, float* vecC)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < vecSize)
vecC[i] = vecA[i] + vecB[i];
} | .text
.file "AddVector.hip"
.globl _Z24__device_stub__AddVectoriPKfS0_Pf # -- Begin function _Z24__device_stub__AddVectoriPKfS0_Pf
.p2align 4, 0x90
.type _Z24__device_stub__AddVectoriPKfS0_Pf,@function
_Z24__device_stub__AddVectoriPKfS0_Pf: # @_Z24__device_stub__AddVectoriPKfS0_Pf
.cfi_startproc
# %bb.0:
subq $120, %rsp
.cfi_def_cfa_offset 128
movl %edi, 4(%rsp)
movq %rsi, 72(%rsp)
movq %rdx, 64(%rsp)
movq %rcx, 56(%rsp)
leaq 4(%rsp), %rax
movq %rax, 80(%rsp)
leaq 72(%rsp), %rax
movq %rax, 88(%rsp)
leaq 64(%rsp), %rax
movq %rax, 96(%rsp)
leaq 56(%rsp), %rax
movq %rax, 104(%rsp)
leaq 40(%rsp), %rdi
leaq 24(%rsp), %rsi
leaq 16(%rsp), %rdx
leaq 8(%rsp), %rcx
callq __hipPopCallConfiguration
movq 40(%rsp), %rsi
movl 48(%rsp), %edx
movq 24(%rsp), %rcx
movl 32(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z9AddVectoriPKfS0_Pf, %edi
pushq 8(%rsp)
.cfi_adjust_cfa_offset 8
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $136, %rsp
.cfi_adjust_cfa_offset -136
retq
.Lfunc_end0:
.size _Z24__device_stub__AddVectoriPKfS0_Pf, .Lfunc_end0-_Z24__device_stub__AddVectoriPKfS0_Pf
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB1_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB1_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z9AddVectoriPKfS0_Pf, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end1:
.size __hip_module_ctor, .Lfunc_end1-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB2_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB2_2:
retq
.Lfunc_end2:
.size __hip_module_dtor, .Lfunc_end2-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z9AddVectoriPKfS0_Pf,@object # @_Z9AddVectoriPKfS0_Pf
.section .rodata,"a",@progbits
.globl _Z9AddVectoriPKfS0_Pf
.p2align 3, 0x0
_Z9AddVectoriPKfS0_Pf:
.quad _Z24__device_stub__AddVectoriPKfS0_Pf
.size _Z9AddVectoriPKfS0_Pf, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "_Z9AddVectoriPKfS0_Pf"
.size .L__unnamed_1, 22
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z24__device_stub__AddVectoriPKfS0_Pf
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z9AddVectoriPKfS0_Pf
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly. | code for sm_80
Function : _Z9AddVectoriPKfS0_Pf
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ S2R R6, SR_CTAID.X ; /* 0x0000000000067919 */
/* 0x000e280000002500 */
/*0020*/ S2R R3, SR_TID.X ; /* 0x0000000000037919 */
/* 0x000e240000002100 */
/*0030*/ IMAD R6, R6, c[0x0][0x0], R3 ; /* 0x0000000006067a24 */
/* 0x001fca00078e0203 */
/*0040*/ ISETP.GE.AND P0, PT, R6, c[0x0][0x160], PT ; /* 0x0000580006007a0c */
/* 0x000fda0003f06270 */
/*0050*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*0060*/ HFMA2.MMA R7, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff077435 */
/* 0x000fe200000001ff */
/*0070*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fd20000000a00 */
/*0080*/ IMAD.WIDE R4, R6, R7, c[0x0][0x170] ; /* 0x00005c0006047625 */
/* 0x000fc800078e0207 */
/*0090*/ IMAD.WIDE R2, R6.reuse, R7.reuse, c[0x0][0x168] ; /* 0x00005a0006027625 */
/* 0x0c0fe400078e0207 */
/*00a0*/ LDG.E R4, [R4.64] ; /* 0x0000000404047981 */
/* 0x000ea8000c1e1900 */
/*00b0*/ LDG.E R3, [R2.64] ; /* 0x0000000402037981 */
/* 0x000ea2000c1e1900 */
/*00c0*/ IMAD.WIDE R6, R6, R7, c[0x0][0x178] ; /* 0x00005e0006067625 */
/* 0x000fc800078e0207 */
/*00d0*/ FADD R9, R4, R3 ; /* 0x0000000304097221 */
/* 0x004fca0000000000 */
/*00e0*/ STG.E [R6.64], R9 ; /* 0x0000000906007986 */
/* 0x000fe2000c101904 */
/*00f0*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0100*/ BRA 0x100; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0110*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0120*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0130*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0140*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0150*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0160*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0170*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0180*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0190*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z9AddVectoriPKfS0_Pf
.globl _Z9AddVectoriPKfS0_Pf
.p2align 8
.type _Z9AddVectoriPKfS0_Pf,@function
_Z9AddVectoriPKfS0_Pf:
s_clause 0x1
s_load_b32 s2, s[0:1], 0x2c
s_load_b32 s3, s[0:1], 0x0
s_waitcnt lgkmcnt(0)
s_and_b32 s2, s2, 0xffff
s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_mad_u64_u32 v[1:2], null, s15, s2, v[0:1]
s_mov_b32 s2, exec_lo
v_cmpx_gt_i32_e64 s3, v1
s_cbranch_execz .LBB0_2
s_load_b128 s[4:7], s[0:1], 0x8
v_ashrrev_i32_e32 v2, 31, v1
s_load_b64 s[0:1], s[0:1], 0x18
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[0:1], 2, v[1:2]
s_waitcnt lgkmcnt(0)
v_add_co_u32 v2, vcc_lo, s4, v0
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v3, vcc_lo, s5, v1, vcc_lo
v_add_co_u32 v4, vcc_lo, s6, v0
v_add_co_ci_u32_e32 v5, vcc_lo, s7, v1, vcc_lo
v_add_co_u32 v0, vcc_lo, s0, v0
global_load_b32 v2, v[2:3], off
global_load_b32 v3, v[4:5], off
v_add_co_ci_u32_e32 v1, vcc_lo, s1, v1, vcc_lo
s_waitcnt vmcnt(0)
v_add_f32_e32 v2, v2, v3
global_store_b32 v[0:1], v2, off
.LBB0_2:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z9AddVectoriPKfS0_Pf
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 288
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 6
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z9AddVectoriPKfS0_Pf, .Lfunc_end0-_Z9AddVectoriPKfS0_Pf
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .offset: 0
.size: 4
.value_kind: by_value
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 24
.size: 8
.value_kind: global_buffer
- .offset: 32
.size: 4
.value_kind: hidden_block_count_x
- .offset: 36
.size: 4
.value_kind: hidden_block_count_y
- .offset: 40
.size: 4
.value_kind: hidden_block_count_z
- .offset: 44
.size: 2
.value_kind: hidden_group_size_x
- .offset: 46
.size: 2
.value_kind: hidden_group_size_y
- .offset: 48
.size: 2
.value_kind: hidden_group_size_z
- .offset: 50
.size: 2
.value_kind: hidden_remainder_x
- .offset: 52
.size: 2
.value_kind: hidden_remainder_y
- .offset: 54
.size: 2
.value_kind: hidden_remainder_z
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 88
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 96
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 288
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z9AddVectoriPKfS0_Pf
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z9AddVectoriPKfS0_Pf.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 6
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_000588fe_00000000-6_AddVector.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2029:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2029:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z35__device_stub__Z9AddVectoriPKfS0_PfiPKfS0_Pf
.type _Z35__device_stub__Z9AddVectoriPKfS0_PfiPKfS0_Pf, @function
_Z35__device_stub__Z9AddVectoriPKfS0_PfiPKfS0_Pf:
.LFB2051:
.cfi_startproc
endbr64
subq $152, %rsp
.cfi_def_cfa_offset 160
movl %edi, 28(%rsp)
movq %rsi, 16(%rsp)
movq %rdx, 8(%rsp)
movq %rcx, (%rsp)
movq %fs:40, %rax
movq %rax, 136(%rsp)
xorl %eax, %eax
leaq 28(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 8(%rsp), %rax
movq %rax, 112(%rsp)
movq %rsp, %rax
movq %rax, 120(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 136(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $152, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 168
pushq 40(%rsp)
.cfi_def_cfa_offset 176
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z9AddVectoriPKfS0_Pf(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 160
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2051:
.size _Z35__device_stub__Z9AddVectoriPKfS0_PfiPKfS0_Pf, .-_Z35__device_stub__Z9AddVectoriPKfS0_PfiPKfS0_Pf
.globl _Z9AddVectoriPKfS0_Pf
.type _Z9AddVectoriPKfS0_Pf, @function
_Z9AddVectoriPKfS0_Pf:
.LFB2052:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z35__device_stub__Z9AddVectoriPKfS0_PfiPKfS0_Pf
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2052:
.size _Z9AddVectoriPKfS0_Pf, .-_Z9AddVectoriPKfS0_Pf
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "_Z9AddVectoriPKfS0_Pf"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2054:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC0(%rip), %rdx
movq %rdx, %rcx
leaq _Z9AddVectoriPKfS0_Pf(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2054:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "AddVector.hip"
.globl _Z24__device_stub__AddVectoriPKfS0_Pf # -- Begin function _Z24__device_stub__AddVectoriPKfS0_Pf
.p2align 4, 0x90
.type _Z24__device_stub__AddVectoriPKfS0_Pf,@function
_Z24__device_stub__AddVectoriPKfS0_Pf: # @_Z24__device_stub__AddVectoriPKfS0_Pf
.cfi_startproc
# %bb.0:
subq $120, %rsp
.cfi_def_cfa_offset 128
movl %edi, 4(%rsp)
movq %rsi, 72(%rsp)
movq %rdx, 64(%rsp)
movq %rcx, 56(%rsp)
leaq 4(%rsp), %rax
movq %rax, 80(%rsp)
leaq 72(%rsp), %rax
movq %rax, 88(%rsp)
leaq 64(%rsp), %rax
movq %rax, 96(%rsp)
leaq 56(%rsp), %rax
movq %rax, 104(%rsp)
leaq 40(%rsp), %rdi
leaq 24(%rsp), %rsi
leaq 16(%rsp), %rdx
leaq 8(%rsp), %rcx
callq __hipPopCallConfiguration
movq 40(%rsp), %rsi
movl 48(%rsp), %edx
movq 24(%rsp), %rcx
movl 32(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z9AddVectoriPKfS0_Pf, %edi
pushq 8(%rsp)
.cfi_adjust_cfa_offset 8
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $136, %rsp
.cfi_adjust_cfa_offset -136
retq
.Lfunc_end0:
.size _Z24__device_stub__AddVectoriPKfS0_Pf, .Lfunc_end0-_Z24__device_stub__AddVectoriPKfS0_Pf
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB1_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB1_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z9AddVectoriPKfS0_Pf, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end1:
.size __hip_module_ctor, .Lfunc_end1-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB2_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB2_2:
retq
.Lfunc_end2:
.size __hip_module_dtor, .Lfunc_end2-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z9AddVectoriPKfS0_Pf,@object # @_Z9AddVectoriPKfS0_Pf
.section .rodata,"a",@progbits
.globl _Z9AddVectoriPKfS0_Pf
.p2align 3, 0x0
_Z9AddVectoriPKfS0_Pf:
.quad _Z24__device_stub__AddVectoriPKfS0_Pf
.size _Z9AddVectoriPKfS0_Pf, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "_Z9AddVectoriPKfS0_Pf"
.size .L__unnamed_1, 22
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z24__device_stub__AddVectoriPKfS0_Pf
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z9AddVectoriPKfS0_Pf
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly. | #include "includes.h"
__global__ void ker_gkylCartFieldAccumulate(unsigned s, unsigned nv, double fact, const double *inp, double *out)
{
for (int n = blockIdx.x*blockDim.x + threadIdx.x + s; n < s + nv; n += blockDim.x * gridDim.x)
out[n] += fact*inp[n];
} | code for sm_80
Function : _Z27ker_gkylCartFieldAccumulatejjdPKdPd
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ S2R R0, SR_CTAID.X ; /* 0x0000000000007919 */
/* 0x000e220000002500 */
/*0020*/ ULDC.64 UR4, c[0x0][0x160] ; /* 0x0000580000047ab9 */
/* 0x000fe40000000a00 */
/*0030*/ UIADD3 UR4, UR5, UR4, URZ ; /* 0x0000000405047290 */
/* 0x000fe2000fffe03f */
/*0040*/ S2R R3, SR_TID.X ; /* 0x0000000000037919 */
/* 0x000e240000002100 */
/*0050*/ IMAD R0, R0, c[0x0][0x0], R3 ; /* 0x0000000000007a24 */
/* 0x001fca00078e0203 */
/*0060*/ IADD3 R0, R0, c[0x0][0x160], RZ ; /* 0x0000580000007a10 */
/* 0x000fc80007ffe0ff */
/*0070*/ ISETP.GE.U32.AND P0, PT, R0, UR4, PT ; /* 0x0000000400007c0c */
/* 0x000fda000bf06070 */
/*0080*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*0090*/ ULDC.64 UR6, c[0x0][0x118] ; /* 0x0000460000067ab9 */
/* 0x000fe40000000a00 */
/*00a0*/ HFMA2.MMA R5, -RZ, RZ, 0, 4.76837158203125e-07 ; /* 0x00000008ff057435 */
/* 0x001fd400000001ff */
/*00b0*/ IMAD.WIDE R2, R0, R5, c[0x0][0x170] ; /* 0x00005c0000027625 */
/* 0x000fc800078e0205 */
/*00c0*/ IMAD.WIDE R4, R0, R5, c[0x0][0x178] ; /* 0x00005e0000047625 */
/* 0x000fe400078e0205 */
/*00d0*/ LDG.E.64 R2, [R2.64] ; /* 0x0000000602027981 */
/* 0x000ea8000c1e1b00 */
/*00e0*/ LDG.E.64 R6, [R4.64] ; /* 0x0000000604067981 */
/* 0x000ea2000c1e1b00 */
/*00f0*/ MOV R9, c[0x0][0x0] ; /* 0x0000000000097a02 */
/* 0x000fca0000000f00 */
/*0100*/ IMAD R0, R9, c[0x0][0xc], R0 ; /* 0x0000030009007a24 */
/* 0x000fca00078e0200 */
/*0110*/ ISETP.GE.U32.AND P0, PT, R0, UR4, PT ; /* 0x0000000400007c0c */
/* 0x000fe2000bf06070 */
/*0120*/ DFMA R6, R2, c[0x0][0x168], R6 ; /* 0x00005a0002067a2b */
/* 0x004e0e0000000006 */
/*0130*/ STG.E.64 [R4.64], R6 ; /* 0x0000000604007986 */
/* 0x0011ea000c101b06 */
/*0140*/ @!P0 BRA 0xa0 ; /* 0xffffff5000008947 */
/* 0x000fea000383ffff */
/*0150*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0160*/ BRA 0x160; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0170*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0180*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0190*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | #include "includes.h"
__global__ void ker_gkylCartFieldAccumulate(unsigned s, unsigned nv, double fact, const double *inp, double *out)
{
for (int n = blockIdx.x*blockDim.x + threadIdx.x + s; n < s + nv; n += blockDim.x * gridDim.x)
out[n] += fact*inp[n];
} | .file "tmpxft_000cabd2_00000000-6_ker_gkylCartFieldAccumulate.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2029:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2029:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z53__device_stub__Z27ker_gkylCartFieldAccumulatejjdPKdPdjjdPKdPd
.type _Z53__device_stub__Z27ker_gkylCartFieldAccumulatejjdPKdPdjjdPKdPd, @function
_Z53__device_stub__Z27ker_gkylCartFieldAccumulatejjdPKdPdjjdPKdPd:
.LFB2051:
.cfi_startproc
endbr64
subq $152, %rsp
.cfi_def_cfa_offset 160
movl %edi, 28(%rsp)
movl %esi, 24(%rsp)
movsd %xmm0, 16(%rsp)
movq %rdx, 8(%rsp)
movq %rcx, (%rsp)
movq %fs:40, %rax
movq %rax, 136(%rsp)
xorl %eax, %eax
leaq 28(%rsp), %rax
movq %rax, 96(%rsp)
leaq 24(%rsp), %rax
movq %rax, 104(%rsp)
leaq 16(%rsp), %rax
movq %rax, 112(%rsp)
leaq 8(%rsp), %rax
movq %rax, 120(%rsp)
movq %rsp, %rax
movq %rax, 128(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 136(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $152, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 168
pushq 40(%rsp)
.cfi_def_cfa_offset 176
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z27ker_gkylCartFieldAccumulatejjdPKdPd(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 160
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2051:
.size _Z53__device_stub__Z27ker_gkylCartFieldAccumulatejjdPKdPdjjdPKdPd, .-_Z53__device_stub__Z27ker_gkylCartFieldAccumulatejjdPKdPdjjdPKdPd
.globl _Z27ker_gkylCartFieldAccumulatejjdPKdPd
.type _Z27ker_gkylCartFieldAccumulatejjdPKdPd, @function
_Z27ker_gkylCartFieldAccumulatejjdPKdPd:
.LFB2052:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z53__device_stub__Z27ker_gkylCartFieldAccumulatejjdPKdPdjjdPKdPd
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2052:
.size _Z27ker_gkylCartFieldAccumulatejjdPKdPd, .-_Z27ker_gkylCartFieldAccumulatejjdPKdPd
.section .rodata.str1.8,"aMS",@progbits,1
.align 8
.LC0:
.string "_Z27ker_gkylCartFieldAccumulatejjdPKdPd"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2054:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC0(%rip), %rdx
movq %rdx, %rcx
leaq _Z27ker_gkylCartFieldAccumulatejjdPKdPd(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2054:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | #include "includes.h"
__global__ void ker_gkylCartFieldAccumulate(unsigned s, unsigned nv, double fact, const double *inp, double *out)
{
for (int n = blockIdx.x*blockDim.x + threadIdx.x + s; n < s + nv; n += blockDim.x * gridDim.x)
out[n] += fact*inp[n];
} | #include <hip/hip_runtime.h>
#include "includes.h"
__global__ void ker_gkylCartFieldAccumulate(unsigned s, unsigned nv, double fact, const double *inp, double *out)
{
for (int n = blockIdx.x*blockDim.x + threadIdx.x + s; n < s + nv; n += blockDim.x * gridDim.x)
out[n] += fact*inp[n];
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | #include <hip/hip_runtime.h>
#include "includes.h"
__global__ void ker_gkylCartFieldAccumulate(unsigned s, unsigned nv, double fact, const double *inp, double *out)
{
for (int n = blockIdx.x*blockDim.x + threadIdx.x + s; n < s + nv; n += blockDim.x * gridDim.x)
out[n] += fact*inp[n];
} | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z27ker_gkylCartFieldAccumulatejjdPKdPd
.globl _Z27ker_gkylCartFieldAccumulatejjdPKdPd
.p2align 8
.type _Z27ker_gkylCartFieldAccumulatejjdPKdPd,@function
_Z27ker_gkylCartFieldAccumulatejjdPKdPd:
s_clause 0x1
s_load_b32 s6, s[0:1], 0x2c
s_load_b64 s[4:5], s[0:1], 0x0
s_add_u32 s2, s0, 32
s_addc_u32 s3, s1, 0
s_waitcnt lgkmcnt(0)
s_and_b32 s9, s6, 0xffff
s_add_i32 s8, s5, s4
s_mul_i32 s15, s15, s9
s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_add3_u32 v0, s15, s4, v0
s_mov_b32 s4, exec_lo
v_cmpx_gt_u32_e64 s8, v0
s_cbranch_execz .LBB0_3
s_load_b32 s2, s[2:3], 0x0
s_clause 0x1
s_load_b128 s[4:7], s[0:1], 0x8
s_load_b64 s[0:1], s[0:1], 0x18
s_mov_b32 s3, 0
s_waitcnt lgkmcnt(0)
s_mul_i32 s2, s2, s9
.p2align 6
.LBB0_2:
v_ashrrev_i32_e32 v1, 31, v0
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
v_lshlrev_b64 v[1:2], 3, v[0:1]
v_add_nc_u32_e32 v0, s2, v0
v_add_co_u32 v3, vcc_lo, s6, v1
s_delay_alu instid0(VALU_DEP_3)
v_add_co_ci_u32_e32 v4, vcc_lo, s7, v2, vcc_lo
v_add_co_u32 v1, vcc_lo, s0, v1
v_add_co_ci_u32_e32 v2, vcc_lo, s1, v2, vcc_lo
v_cmp_le_u32_e32 vcc_lo, s8, v0
global_load_b64 v[3:4], v[3:4], off
global_load_b64 v[5:6], v[1:2], off
s_or_b32 s3, vcc_lo, s3
s_waitcnt vmcnt(0)
v_fma_f64 v[3:4], v[3:4], s[4:5], v[5:6]
global_store_b64 v[1:2], v[3:4], off
s_and_not1_b32 exec_lo, exec_lo, s3
s_cbranch_execnz .LBB0_2
.LBB0_3:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z27ker_gkylCartFieldAccumulatejjdPKdPd
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 288
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 7
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z27ker_gkylCartFieldAccumulatejjdPKdPd, .Lfunc_end0-_Z27ker_gkylCartFieldAccumulatejjdPKdPd
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .offset: 0
.size: 4
.value_kind: by_value
- .offset: 4
.size: 4
.value_kind: by_value
- .offset: 8
.size: 8
.value_kind: by_value
- .address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 24
.size: 8
.value_kind: global_buffer
- .offset: 32
.size: 4
.value_kind: hidden_block_count_x
- .offset: 36
.size: 4
.value_kind: hidden_block_count_y
- .offset: 40
.size: 4
.value_kind: hidden_block_count_z
- .offset: 44
.size: 2
.value_kind: hidden_group_size_x
- .offset: 46
.size: 2
.value_kind: hidden_group_size_y
- .offset: 48
.size: 2
.value_kind: hidden_group_size_z
- .offset: 50
.size: 2
.value_kind: hidden_remainder_x
- .offset: 52
.size: 2
.value_kind: hidden_remainder_y
- .offset: 54
.size: 2
.value_kind: hidden_remainder_z
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 88
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 96
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 288
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z27ker_gkylCartFieldAccumulatejjdPKdPd
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z27ker_gkylCartFieldAccumulatejjdPKdPd.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 7
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | #include <hip/hip_runtime.h>
#include "includes.h"
__global__ void ker_gkylCartFieldAccumulate(unsigned s, unsigned nv, double fact, const double *inp, double *out)
{
for (int n = blockIdx.x*blockDim.x + threadIdx.x + s; n < s + nv; n += blockDim.x * gridDim.x)
out[n] += fact*inp[n];
} | .text
.file "ker_gkylCartFieldAccumulate.hip"
.globl _Z42__device_stub__ker_gkylCartFieldAccumulatejjdPKdPd # -- Begin function _Z42__device_stub__ker_gkylCartFieldAccumulatejjdPKdPd
.p2align 4, 0x90
.type _Z42__device_stub__ker_gkylCartFieldAccumulatejjdPKdPd,@function
_Z42__device_stub__ker_gkylCartFieldAccumulatejjdPKdPd: # @_Z42__device_stub__ker_gkylCartFieldAccumulatejjdPKdPd
.cfi_startproc
# %bb.0:
subq $120, %rsp
.cfi_def_cfa_offset 128
movl %edi, 4(%rsp)
movl %esi, (%rsp)
movsd %xmm0, 72(%rsp)
movq %rdx, 64(%rsp)
movq %rcx, 56(%rsp)
leaq 4(%rsp), %rax
movq %rax, 80(%rsp)
movq %rsp, %rax
movq %rax, 88(%rsp)
leaq 72(%rsp), %rax
movq %rax, 96(%rsp)
leaq 64(%rsp), %rax
movq %rax, 104(%rsp)
leaq 56(%rsp), %rax
movq %rax, 112(%rsp)
leaq 40(%rsp), %rdi
leaq 24(%rsp), %rsi
leaq 16(%rsp), %rdx
leaq 8(%rsp), %rcx
callq __hipPopCallConfiguration
movq 40(%rsp), %rsi
movl 48(%rsp), %edx
movq 24(%rsp), %rcx
movl 32(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z27ker_gkylCartFieldAccumulatejjdPKdPd, %edi
pushq 8(%rsp)
.cfi_adjust_cfa_offset 8
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $136, %rsp
.cfi_adjust_cfa_offset -136
retq
.Lfunc_end0:
.size _Z42__device_stub__ker_gkylCartFieldAccumulatejjdPKdPd, .Lfunc_end0-_Z42__device_stub__ker_gkylCartFieldAccumulatejjdPKdPd
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB1_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB1_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z27ker_gkylCartFieldAccumulatejjdPKdPd, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end1:
.size __hip_module_ctor, .Lfunc_end1-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB2_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB2_2:
retq
.Lfunc_end2:
.size __hip_module_dtor, .Lfunc_end2-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z27ker_gkylCartFieldAccumulatejjdPKdPd,@object # @_Z27ker_gkylCartFieldAccumulatejjdPKdPd
.section .rodata,"a",@progbits
.globl _Z27ker_gkylCartFieldAccumulatejjdPKdPd
.p2align 3, 0x0
_Z27ker_gkylCartFieldAccumulatejjdPKdPd:
.quad _Z42__device_stub__ker_gkylCartFieldAccumulatejjdPKdPd
.size _Z27ker_gkylCartFieldAccumulatejjdPKdPd, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "_Z27ker_gkylCartFieldAccumulatejjdPKdPd"
.size .L__unnamed_1, 40
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z42__device_stub__ker_gkylCartFieldAccumulatejjdPKdPd
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z27ker_gkylCartFieldAccumulatejjdPKdPd
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly. | code for sm_80
Function : _Z27ker_gkylCartFieldAccumulatejjdPKdPd
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ S2R R0, SR_CTAID.X ; /* 0x0000000000007919 */
/* 0x000e220000002500 */
/*0020*/ ULDC.64 UR4, c[0x0][0x160] ; /* 0x0000580000047ab9 */
/* 0x000fe40000000a00 */
/*0030*/ UIADD3 UR4, UR5, UR4, URZ ; /* 0x0000000405047290 */
/* 0x000fe2000fffe03f */
/*0040*/ S2R R3, SR_TID.X ; /* 0x0000000000037919 */
/* 0x000e240000002100 */
/*0050*/ IMAD R0, R0, c[0x0][0x0], R3 ; /* 0x0000000000007a24 */
/* 0x001fca00078e0203 */
/*0060*/ IADD3 R0, R0, c[0x0][0x160], RZ ; /* 0x0000580000007a10 */
/* 0x000fc80007ffe0ff */
/*0070*/ ISETP.GE.U32.AND P0, PT, R0, UR4, PT ; /* 0x0000000400007c0c */
/* 0x000fda000bf06070 */
/*0080*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*0090*/ ULDC.64 UR6, c[0x0][0x118] ; /* 0x0000460000067ab9 */
/* 0x000fe40000000a00 */
/*00a0*/ HFMA2.MMA R5, -RZ, RZ, 0, 4.76837158203125e-07 ; /* 0x00000008ff057435 */
/* 0x001fd400000001ff */
/*00b0*/ IMAD.WIDE R2, R0, R5, c[0x0][0x170] ; /* 0x00005c0000027625 */
/* 0x000fc800078e0205 */
/*00c0*/ IMAD.WIDE R4, R0, R5, c[0x0][0x178] ; /* 0x00005e0000047625 */
/* 0x000fe400078e0205 */
/*00d0*/ LDG.E.64 R2, [R2.64] ; /* 0x0000000602027981 */
/* 0x000ea8000c1e1b00 */
/*00e0*/ LDG.E.64 R6, [R4.64] ; /* 0x0000000604067981 */
/* 0x000ea2000c1e1b00 */
/*00f0*/ MOV R9, c[0x0][0x0] ; /* 0x0000000000097a02 */
/* 0x000fca0000000f00 */
/*0100*/ IMAD R0, R9, c[0x0][0xc], R0 ; /* 0x0000030009007a24 */
/* 0x000fca00078e0200 */
/*0110*/ ISETP.GE.U32.AND P0, PT, R0, UR4, PT ; /* 0x0000000400007c0c */
/* 0x000fe2000bf06070 */
/*0120*/ DFMA R6, R2, c[0x0][0x168], R6 ; /* 0x00005a0002067a2b */
/* 0x004e0e0000000006 */
/*0130*/ STG.E.64 [R4.64], R6 ; /* 0x0000000604007986 */
/* 0x0011ea000c101b06 */
/*0140*/ @!P0 BRA 0xa0 ; /* 0xffffff5000008947 */
/* 0x000fea000383ffff */
/*0150*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0160*/ BRA 0x160; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0170*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0180*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0190*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z27ker_gkylCartFieldAccumulatejjdPKdPd
.globl _Z27ker_gkylCartFieldAccumulatejjdPKdPd
.p2align 8
.type _Z27ker_gkylCartFieldAccumulatejjdPKdPd,@function
_Z27ker_gkylCartFieldAccumulatejjdPKdPd:
s_clause 0x1
s_load_b32 s6, s[0:1], 0x2c
s_load_b64 s[4:5], s[0:1], 0x0
s_add_u32 s2, s0, 32
s_addc_u32 s3, s1, 0
s_waitcnt lgkmcnt(0)
s_and_b32 s9, s6, 0xffff
s_add_i32 s8, s5, s4
s_mul_i32 s15, s15, s9
s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_add3_u32 v0, s15, s4, v0
s_mov_b32 s4, exec_lo
v_cmpx_gt_u32_e64 s8, v0
s_cbranch_execz .LBB0_3
s_load_b32 s2, s[2:3], 0x0
s_clause 0x1
s_load_b128 s[4:7], s[0:1], 0x8
s_load_b64 s[0:1], s[0:1], 0x18
s_mov_b32 s3, 0
s_waitcnt lgkmcnt(0)
s_mul_i32 s2, s2, s9
.p2align 6
.LBB0_2:
v_ashrrev_i32_e32 v1, 31, v0
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
v_lshlrev_b64 v[1:2], 3, v[0:1]
v_add_nc_u32_e32 v0, s2, v0
v_add_co_u32 v3, vcc_lo, s6, v1
s_delay_alu instid0(VALU_DEP_3)
v_add_co_ci_u32_e32 v4, vcc_lo, s7, v2, vcc_lo
v_add_co_u32 v1, vcc_lo, s0, v1
v_add_co_ci_u32_e32 v2, vcc_lo, s1, v2, vcc_lo
v_cmp_le_u32_e32 vcc_lo, s8, v0
global_load_b64 v[3:4], v[3:4], off
global_load_b64 v[5:6], v[1:2], off
s_or_b32 s3, vcc_lo, s3
s_waitcnt vmcnt(0)
v_fma_f64 v[3:4], v[3:4], s[4:5], v[5:6]
global_store_b64 v[1:2], v[3:4], off
s_and_not1_b32 exec_lo, exec_lo, s3
s_cbranch_execnz .LBB0_2
.LBB0_3:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z27ker_gkylCartFieldAccumulatejjdPKdPd
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 288
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 7
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z27ker_gkylCartFieldAccumulatejjdPKdPd, .Lfunc_end0-_Z27ker_gkylCartFieldAccumulatejjdPKdPd
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .offset: 0
.size: 4
.value_kind: by_value
- .offset: 4
.size: 4
.value_kind: by_value
- .offset: 8
.size: 8
.value_kind: by_value
- .address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 24
.size: 8
.value_kind: global_buffer
- .offset: 32
.size: 4
.value_kind: hidden_block_count_x
- .offset: 36
.size: 4
.value_kind: hidden_block_count_y
- .offset: 40
.size: 4
.value_kind: hidden_block_count_z
- .offset: 44
.size: 2
.value_kind: hidden_group_size_x
- .offset: 46
.size: 2
.value_kind: hidden_group_size_y
- .offset: 48
.size: 2
.value_kind: hidden_group_size_z
- .offset: 50
.size: 2
.value_kind: hidden_remainder_x
- .offset: 52
.size: 2
.value_kind: hidden_remainder_y
- .offset: 54
.size: 2
.value_kind: hidden_remainder_z
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 88
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 96
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 288
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z27ker_gkylCartFieldAccumulatejjdPKdPd
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z27ker_gkylCartFieldAccumulatejjdPKdPd.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 7
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_000cabd2_00000000-6_ker_gkylCartFieldAccumulate.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2029:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2029:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z53__device_stub__Z27ker_gkylCartFieldAccumulatejjdPKdPdjjdPKdPd
.type _Z53__device_stub__Z27ker_gkylCartFieldAccumulatejjdPKdPdjjdPKdPd, @function
_Z53__device_stub__Z27ker_gkylCartFieldAccumulatejjdPKdPdjjdPKdPd:
.LFB2051:
.cfi_startproc
endbr64
subq $152, %rsp
.cfi_def_cfa_offset 160
movl %edi, 28(%rsp)
movl %esi, 24(%rsp)
movsd %xmm0, 16(%rsp)
movq %rdx, 8(%rsp)
movq %rcx, (%rsp)
movq %fs:40, %rax
movq %rax, 136(%rsp)
xorl %eax, %eax
leaq 28(%rsp), %rax
movq %rax, 96(%rsp)
leaq 24(%rsp), %rax
movq %rax, 104(%rsp)
leaq 16(%rsp), %rax
movq %rax, 112(%rsp)
leaq 8(%rsp), %rax
movq %rax, 120(%rsp)
movq %rsp, %rax
movq %rax, 128(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 136(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $152, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 168
pushq 40(%rsp)
.cfi_def_cfa_offset 176
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z27ker_gkylCartFieldAccumulatejjdPKdPd(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 160
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2051:
.size _Z53__device_stub__Z27ker_gkylCartFieldAccumulatejjdPKdPdjjdPKdPd, .-_Z53__device_stub__Z27ker_gkylCartFieldAccumulatejjdPKdPdjjdPKdPd
.globl _Z27ker_gkylCartFieldAccumulatejjdPKdPd
.type _Z27ker_gkylCartFieldAccumulatejjdPKdPd, @function
_Z27ker_gkylCartFieldAccumulatejjdPKdPd:
.LFB2052:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z53__device_stub__Z27ker_gkylCartFieldAccumulatejjdPKdPdjjdPKdPd
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2052:
.size _Z27ker_gkylCartFieldAccumulatejjdPKdPd, .-_Z27ker_gkylCartFieldAccumulatejjdPKdPd
.section .rodata.str1.8,"aMS",@progbits,1
.align 8
.LC0:
.string "_Z27ker_gkylCartFieldAccumulatejjdPKdPd"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2054:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC0(%rip), %rdx
movq %rdx, %rcx
leaq _Z27ker_gkylCartFieldAccumulatejjdPKdPd(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2054:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "ker_gkylCartFieldAccumulate.hip"
.globl _Z42__device_stub__ker_gkylCartFieldAccumulatejjdPKdPd # -- Begin function _Z42__device_stub__ker_gkylCartFieldAccumulatejjdPKdPd
.p2align 4, 0x90
.type _Z42__device_stub__ker_gkylCartFieldAccumulatejjdPKdPd,@function
_Z42__device_stub__ker_gkylCartFieldAccumulatejjdPKdPd: # @_Z42__device_stub__ker_gkylCartFieldAccumulatejjdPKdPd
.cfi_startproc
# %bb.0:
subq $120, %rsp
.cfi_def_cfa_offset 128
movl %edi, 4(%rsp)
movl %esi, (%rsp)
movsd %xmm0, 72(%rsp)
movq %rdx, 64(%rsp)
movq %rcx, 56(%rsp)
leaq 4(%rsp), %rax
movq %rax, 80(%rsp)
movq %rsp, %rax
movq %rax, 88(%rsp)
leaq 72(%rsp), %rax
movq %rax, 96(%rsp)
leaq 64(%rsp), %rax
movq %rax, 104(%rsp)
leaq 56(%rsp), %rax
movq %rax, 112(%rsp)
leaq 40(%rsp), %rdi
leaq 24(%rsp), %rsi
leaq 16(%rsp), %rdx
leaq 8(%rsp), %rcx
callq __hipPopCallConfiguration
movq 40(%rsp), %rsi
movl 48(%rsp), %edx
movq 24(%rsp), %rcx
movl 32(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z27ker_gkylCartFieldAccumulatejjdPKdPd, %edi
pushq 8(%rsp)
.cfi_adjust_cfa_offset 8
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $136, %rsp
.cfi_adjust_cfa_offset -136
retq
.Lfunc_end0:
.size _Z42__device_stub__ker_gkylCartFieldAccumulatejjdPKdPd, .Lfunc_end0-_Z42__device_stub__ker_gkylCartFieldAccumulatejjdPKdPd
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB1_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB1_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z27ker_gkylCartFieldAccumulatejjdPKdPd, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end1:
.size __hip_module_ctor, .Lfunc_end1-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB2_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB2_2:
retq
.Lfunc_end2:
.size __hip_module_dtor, .Lfunc_end2-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z27ker_gkylCartFieldAccumulatejjdPKdPd,@object # @_Z27ker_gkylCartFieldAccumulatejjdPKdPd
.section .rodata,"a",@progbits
.globl _Z27ker_gkylCartFieldAccumulatejjdPKdPd
.p2align 3, 0x0
_Z27ker_gkylCartFieldAccumulatejjdPKdPd:
.quad _Z42__device_stub__ker_gkylCartFieldAccumulatejjdPKdPd
.size _Z27ker_gkylCartFieldAccumulatejjdPKdPd, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "_Z27ker_gkylCartFieldAccumulatejjdPKdPd"
.size .L__unnamed_1, 40
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z42__device_stub__ker_gkylCartFieldAccumulatejjdPKdPd
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z27ker_gkylCartFieldAccumulatejjdPKdPd
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly. | #include "includes.h"
__global__ void gpu_init(int *mapad, int max, int size){
/*Identificaciones necesarios*/
int IDX_Thread = threadIdx.x; /*Identificacion del hilo en la dimension*/
int IDY_Thread = threadIdx.y; /*Identificacion del hilo en la dimension y*/
int IDX_block = blockIdx.x; /*Identificacion del bloque en la dimension x*/
int IDY_block = blockIdx.y; /*Identificacion del bloque en la dimension y */
int shapeGrid_X = gridDim.x; /*Numeros del bloques en la dimension */
int threads_per_block = blockDim.x * blockDim.y; /* Numero de hilos por bloque (1 dimension) */
/*Formula para calcular la posicion*/ //Posicion del vector dependiendo del hilo y del bloque
int position = threads_per_block * ((IDY_block * shapeGrid_X)+IDX_block)+((IDY_Thread*blockDim.x)+IDX_Thread);
//inicializamos
if(position<size) mapad[position] = max;
} | code for sm_80
Function : _Z8gpu_initPiii
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ S2R R0, SR_CTAID.Y ; /* 0x0000000000007919 */
/* 0x000e280000002600 */
/*0020*/ S2R R5, SR_CTAID.X ; /* 0x0000000000057919 */
/* 0x000e280000002500 */
/*0030*/ S2R R7, SR_TID.Y ; /* 0x0000000000077919 */
/* 0x000e680000002200 */
/*0040*/ S2R R3, SR_TID.X ; /* 0x0000000000037919 */
/* 0x000ea20000002100 */
/*0050*/ IMAD R0, R0, c[0x0][0xc], R5 ; /* 0x0000030000007a24 */
/* 0x001fc800078e0205 */
/*0060*/ IMAD R0, R0, c[0x0][0x4], R7 ; /* 0x0000010000007a24 */
/* 0x002fc800078e0207 */
/*0070*/ IMAD R0, R0, c[0x0][0x0], R3 ; /* 0x0000000000007a24 */
/* 0x004fca00078e0203 */
/*0080*/ ISETP.GE.AND P0, PT, R0, c[0x0][0x16c], PT ; /* 0x00005b0000007a0c */
/* 0x000fda0003f06270 */
/*0090*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*00a0*/ HFMA2.MMA R3, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff037435 */
/* 0x000fe200000001ff */
/*00b0*/ MOV R5, c[0x0][0x168] ; /* 0x00005a0000057a02 */
/* 0x000fe20000000f00 */
/*00c0*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fd00000000a00 */
/*00d0*/ IMAD.WIDE R2, R0, R3, c[0x0][0x160] ; /* 0x0000580000027625 */
/* 0x000fca00078e0203 */
/*00e0*/ STG.E [R2.64], R5 ; /* 0x0000000502007986 */
/* 0x000fe2000c101904 */
/*00f0*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0100*/ BRA 0x100; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0110*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0120*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0130*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0140*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0150*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0160*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0170*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0180*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0190*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | #include "includes.h"
__global__ void gpu_init(int *mapad, int max, int size){
/*Identificaciones necesarios*/
int IDX_Thread = threadIdx.x; /*Identificacion del hilo en la dimension*/
int IDY_Thread = threadIdx.y; /*Identificacion del hilo en la dimension y*/
int IDX_block = blockIdx.x; /*Identificacion del bloque en la dimension x*/
int IDY_block = blockIdx.y; /*Identificacion del bloque en la dimension y */
int shapeGrid_X = gridDim.x; /*Numeros del bloques en la dimension */
int threads_per_block = blockDim.x * blockDim.y; /* Numero de hilos por bloque (1 dimension) */
/*Formula para calcular la posicion*/ //Posicion del vector dependiendo del hilo y del bloque
int position = threads_per_block * ((IDY_block * shapeGrid_X)+IDX_block)+((IDY_Thread*blockDim.x)+IDX_Thread);
//inicializamos
if(position<size) mapad[position] = max;
} | .file "tmpxft_000cd000_00000000-6_gpu_init.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2029:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2029:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z29__device_stub__Z8gpu_initPiiiPiii
.type _Z29__device_stub__Z8gpu_initPiiiPiii, @function
_Z29__device_stub__Z8gpu_initPiiiPiii:
.LFB2051:
.cfi_startproc
endbr64
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 8(%rsp)
movl %esi, 4(%rsp)
movl %edx, (%rsp)
movq %fs:40, %rax
movq %rax, 104(%rsp)
xorl %eax, %eax
leaq 8(%rsp), %rax
movq %rax, 80(%rsp)
leaq 4(%rsp), %rax
movq %rax, 88(%rsp)
movq %rsp, %rax
movq %rax, 96(%rsp)
movl $1, 32(%rsp)
movl $1, 36(%rsp)
movl $1, 40(%rsp)
movl $1, 44(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
leaq 24(%rsp), %rcx
leaq 16(%rsp), %rdx
leaq 44(%rsp), %rsi
leaq 32(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 104(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $120, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 24(%rsp)
.cfi_def_cfa_offset 136
pushq 24(%rsp)
.cfi_def_cfa_offset 144
leaq 96(%rsp), %r9
movq 60(%rsp), %rcx
movl 68(%rsp), %r8d
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
leaq _Z8gpu_initPiii(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 128
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2051:
.size _Z29__device_stub__Z8gpu_initPiiiPiii, .-_Z29__device_stub__Z8gpu_initPiiiPiii
.globl _Z8gpu_initPiii
.type _Z8gpu_initPiii, @function
_Z8gpu_initPiii:
.LFB2052:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z29__device_stub__Z8gpu_initPiiiPiii
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2052:
.size _Z8gpu_initPiii, .-_Z8gpu_initPiii
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "_Z8gpu_initPiii"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2054:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC0(%rip), %rdx
movq %rdx, %rcx
leaq _Z8gpu_initPiii(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2054:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | #include "includes.h"
__global__ void gpu_init(int *mapad, int max, int size){
/*Identificaciones necesarios*/
int IDX_Thread = threadIdx.x; /*Identificacion del hilo en la dimension*/
int IDY_Thread = threadIdx.y; /*Identificacion del hilo en la dimension y*/
int IDX_block = blockIdx.x; /*Identificacion del bloque en la dimension x*/
int IDY_block = blockIdx.y; /*Identificacion del bloque en la dimension y */
int shapeGrid_X = gridDim.x; /*Numeros del bloques en la dimension */
int threads_per_block = blockDim.x * blockDim.y; /* Numero de hilos por bloque (1 dimension) */
/*Formula para calcular la posicion*/ //Posicion del vector dependiendo del hilo y del bloque
int position = threads_per_block * ((IDY_block * shapeGrid_X)+IDX_block)+((IDY_Thread*blockDim.x)+IDX_Thread);
//inicializamos
if(position<size) mapad[position] = max;
} | #include <hip/hip_runtime.h>
#include "includes.h"
__global__ void gpu_init(int *mapad, int max, int size){
/*Identificaciones necesarios*/
int IDX_Thread = threadIdx.x; /*Identificacion del hilo en la dimension*/
int IDY_Thread = threadIdx.y; /*Identificacion del hilo en la dimension y*/
int IDX_block = blockIdx.x; /*Identificacion del bloque en la dimension x*/
int IDY_block = blockIdx.y; /*Identificacion del bloque en la dimension y */
int shapeGrid_X = gridDim.x; /*Numeros del bloques en la dimension */
int threads_per_block = blockDim.x * blockDim.y; /* Numero de hilos por bloque (1 dimension) */
/*Formula para calcular la posicion*/ //Posicion del vector dependiendo del hilo y del bloque
int position = threads_per_block * ((IDY_block * shapeGrid_X)+IDX_block)+((IDY_Thread*blockDim.x)+IDX_Thread);
//inicializamos
if(position<size) mapad[position] = max;
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | #include <hip/hip_runtime.h>
#include "includes.h"
__global__ void gpu_init(int *mapad, int max, int size){
/*Identificaciones necesarios*/
int IDX_Thread = threadIdx.x; /*Identificacion del hilo en la dimension*/
int IDY_Thread = threadIdx.y; /*Identificacion del hilo en la dimension y*/
int IDX_block = blockIdx.x; /*Identificacion del bloque en la dimension x*/
int IDY_block = blockIdx.y; /*Identificacion del bloque en la dimension y */
int shapeGrid_X = gridDim.x; /*Numeros del bloques en la dimension */
int threads_per_block = blockDim.x * blockDim.y; /* Numero de hilos por bloque (1 dimension) */
/*Formula para calcular la posicion*/ //Posicion del vector dependiendo del hilo y del bloque
int position = threads_per_block * ((IDY_block * shapeGrid_X)+IDX_block)+((IDY_Thread*blockDim.x)+IDX_Thread);
//inicializamos
if(position<size) mapad[position] = max;
} | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z8gpu_initPiii
.globl _Z8gpu_initPiii
.p2align 8
.type _Z8gpu_initPiii,@function
_Z8gpu_initPiii:
s_clause 0x1
s_load_b64 s[2:3], s[0:1], 0xc
s_load_b32 s4, s[0:1], 0x1c
v_bfe_u32 v1, v0, 10, 10
s_waitcnt lgkmcnt(0)
s_mul_i32 s3, s3, s15
s_lshr_b32 s5, s4, 16
s_add_i32 s3, s3, s14
s_delay_alu instid0(SALU_CYCLE_1)
v_mad_u64_u32 v[2:3], null, s3, s5, v[1:2]
v_and_b32_e32 v3, 0x3ff, v0
s_and_b32 s3, s4, 0xffff
s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1)
v_mad_u64_u32 v[0:1], null, v2, s3, v[3:4]
s_delay_alu instid0(VALU_DEP_1)
v_cmp_gt_i32_e32 vcc_lo, s2, v0
s_and_saveexec_b32 s2, vcc_lo
s_cbranch_execz .LBB0_2
s_clause 0x1
s_load_b64 s[2:3], s[0:1], 0x0
s_load_b32 s0, s[0:1], 0x8
v_ashrrev_i32_e32 v1, 31, v0
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[0:1], 2, v[0:1]
s_waitcnt lgkmcnt(0)
v_add_co_u32 v0, vcc_lo, s2, v0
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v1, vcc_lo, s3, v1, vcc_lo
v_mov_b32_e32 v2, s0
global_store_b32 v[0:1], v2, off
.LBB0_2:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z8gpu_initPiii
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 272
.amdhsa_user_sgpr_count 14
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 1
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 1
.amdhsa_next_free_vgpr 5
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z8gpu_initPiii, .Lfunc_end0-_Z8gpu_initPiii
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .offset: 8
.size: 4
.value_kind: by_value
- .offset: 12
.size: 4
.value_kind: by_value
- .offset: 16
.size: 4
.value_kind: hidden_block_count_x
- .offset: 20
.size: 4
.value_kind: hidden_block_count_y
- .offset: 24
.size: 4
.value_kind: hidden_block_count_z
- .offset: 28
.size: 2
.value_kind: hidden_group_size_x
- .offset: 30
.size: 2
.value_kind: hidden_group_size_y
- .offset: 32
.size: 2
.value_kind: hidden_group_size_z
- .offset: 34
.size: 2
.value_kind: hidden_remainder_x
- .offset: 36
.size: 2
.value_kind: hidden_remainder_y
- .offset: 38
.size: 2
.value_kind: hidden_remainder_z
- .offset: 56
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 64
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 80
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 272
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z8gpu_initPiii
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z8gpu_initPiii.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 5
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | #include <hip/hip_runtime.h>
#include "includes.h"
__global__ void gpu_init(int *mapad, int max, int size){
/*Identificaciones necesarios*/
int IDX_Thread = threadIdx.x; /*Identificacion del hilo en la dimension*/
int IDY_Thread = threadIdx.y; /*Identificacion del hilo en la dimension y*/
int IDX_block = blockIdx.x; /*Identificacion del bloque en la dimension x*/
int IDY_block = blockIdx.y; /*Identificacion del bloque en la dimension y */
int shapeGrid_X = gridDim.x; /*Numeros del bloques en la dimension */
int threads_per_block = blockDim.x * blockDim.y; /* Numero de hilos por bloque (1 dimension) */
/*Formula para calcular la posicion*/ //Posicion del vector dependiendo del hilo y del bloque
int position = threads_per_block * ((IDY_block * shapeGrid_X)+IDX_block)+((IDY_Thread*blockDim.x)+IDX_Thread);
//inicializamos
if(position<size) mapad[position] = max;
} | .text
.file "gpu_init.hip"
.globl _Z23__device_stub__gpu_initPiii # -- Begin function _Z23__device_stub__gpu_initPiii
.p2align 4, 0x90
.type _Z23__device_stub__gpu_initPiii,@function
_Z23__device_stub__gpu_initPiii: # @_Z23__device_stub__gpu_initPiii
.cfi_startproc
# %bb.0:
subq $88, %rsp
.cfi_def_cfa_offset 96
movq %rdi, 56(%rsp)
movl %esi, 4(%rsp)
movl %edx, (%rsp)
leaq 56(%rsp), %rax
movq %rax, 64(%rsp)
leaq 4(%rsp), %rax
movq %rax, 72(%rsp)
movq %rsp, %rax
movq %rax, 80(%rsp)
leaq 40(%rsp), %rdi
leaq 24(%rsp), %rsi
leaq 16(%rsp), %rdx
leaq 8(%rsp), %rcx
callq __hipPopCallConfiguration
movq 40(%rsp), %rsi
movl 48(%rsp), %edx
movq 24(%rsp), %rcx
movl 32(%rsp), %r8d
leaq 64(%rsp), %r9
movl $_Z8gpu_initPiii, %edi
pushq 8(%rsp)
.cfi_adjust_cfa_offset 8
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $104, %rsp
.cfi_adjust_cfa_offset -104
retq
.Lfunc_end0:
.size _Z23__device_stub__gpu_initPiii, .Lfunc_end0-_Z23__device_stub__gpu_initPiii
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB1_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB1_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z8gpu_initPiii, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end1:
.size __hip_module_ctor, .Lfunc_end1-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB2_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB2_2:
retq
.Lfunc_end2:
.size __hip_module_dtor, .Lfunc_end2-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z8gpu_initPiii,@object # @_Z8gpu_initPiii
.section .rodata,"a",@progbits
.globl _Z8gpu_initPiii
.p2align 3, 0x0
_Z8gpu_initPiii:
.quad _Z23__device_stub__gpu_initPiii
.size _Z8gpu_initPiii, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "_Z8gpu_initPiii"
.size .L__unnamed_1, 16
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z23__device_stub__gpu_initPiii
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z8gpu_initPiii
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly. | code for sm_80
Function : _Z8gpu_initPiii
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ S2R R0, SR_CTAID.Y ; /* 0x0000000000007919 */
/* 0x000e280000002600 */
/*0020*/ S2R R5, SR_CTAID.X ; /* 0x0000000000057919 */
/* 0x000e280000002500 */
/*0030*/ S2R R7, SR_TID.Y ; /* 0x0000000000077919 */
/* 0x000e680000002200 */
/*0040*/ S2R R3, SR_TID.X ; /* 0x0000000000037919 */
/* 0x000ea20000002100 */
/*0050*/ IMAD R0, R0, c[0x0][0xc], R5 ; /* 0x0000030000007a24 */
/* 0x001fc800078e0205 */
/*0060*/ IMAD R0, R0, c[0x0][0x4], R7 ; /* 0x0000010000007a24 */
/* 0x002fc800078e0207 */
/*0070*/ IMAD R0, R0, c[0x0][0x0], R3 ; /* 0x0000000000007a24 */
/* 0x004fca00078e0203 */
/*0080*/ ISETP.GE.AND P0, PT, R0, c[0x0][0x16c], PT ; /* 0x00005b0000007a0c */
/* 0x000fda0003f06270 */
/*0090*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*00a0*/ HFMA2.MMA R3, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff037435 */
/* 0x000fe200000001ff */
/*00b0*/ MOV R5, c[0x0][0x168] ; /* 0x00005a0000057a02 */
/* 0x000fe20000000f00 */
/*00c0*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fd00000000a00 */
/*00d0*/ IMAD.WIDE R2, R0, R3, c[0x0][0x160] ; /* 0x0000580000027625 */
/* 0x000fca00078e0203 */
/*00e0*/ STG.E [R2.64], R5 ; /* 0x0000000502007986 */
/* 0x000fe2000c101904 */
/*00f0*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0100*/ BRA 0x100; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0110*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0120*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0130*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0140*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0150*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0160*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0170*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0180*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0190*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z8gpu_initPiii
.globl _Z8gpu_initPiii
.p2align 8
.type _Z8gpu_initPiii,@function
_Z8gpu_initPiii:
s_clause 0x1
s_load_b64 s[2:3], s[0:1], 0xc
s_load_b32 s4, s[0:1], 0x1c
v_bfe_u32 v1, v0, 10, 10
s_waitcnt lgkmcnt(0)
s_mul_i32 s3, s3, s15
s_lshr_b32 s5, s4, 16
s_add_i32 s3, s3, s14
s_delay_alu instid0(SALU_CYCLE_1)
v_mad_u64_u32 v[2:3], null, s3, s5, v[1:2]
v_and_b32_e32 v3, 0x3ff, v0
s_and_b32 s3, s4, 0xffff
s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1)
v_mad_u64_u32 v[0:1], null, v2, s3, v[3:4]
s_delay_alu instid0(VALU_DEP_1)
v_cmp_gt_i32_e32 vcc_lo, s2, v0
s_and_saveexec_b32 s2, vcc_lo
s_cbranch_execz .LBB0_2
s_clause 0x1
s_load_b64 s[2:3], s[0:1], 0x0
s_load_b32 s0, s[0:1], 0x8
v_ashrrev_i32_e32 v1, 31, v0
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[0:1], 2, v[0:1]
s_waitcnt lgkmcnt(0)
v_add_co_u32 v0, vcc_lo, s2, v0
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v1, vcc_lo, s3, v1, vcc_lo
v_mov_b32_e32 v2, s0
global_store_b32 v[0:1], v2, off
.LBB0_2:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z8gpu_initPiii
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 272
.amdhsa_user_sgpr_count 14
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 1
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 1
.amdhsa_next_free_vgpr 5
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z8gpu_initPiii, .Lfunc_end0-_Z8gpu_initPiii
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .offset: 8
.size: 4
.value_kind: by_value
- .offset: 12
.size: 4
.value_kind: by_value
- .offset: 16
.size: 4
.value_kind: hidden_block_count_x
- .offset: 20
.size: 4
.value_kind: hidden_block_count_y
- .offset: 24
.size: 4
.value_kind: hidden_block_count_z
- .offset: 28
.size: 2
.value_kind: hidden_group_size_x
- .offset: 30
.size: 2
.value_kind: hidden_group_size_y
- .offset: 32
.size: 2
.value_kind: hidden_group_size_z
- .offset: 34
.size: 2
.value_kind: hidden_remainder_x
- .offset: 36
.size: 2
.value_kind: hidden_remainder_y
- .offset: 38
.size: 2
.value_kind: hidden_remainder_z
- .offset: 56
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 64
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 80
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 272
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z8gpu_initPiii
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z8gpu_initPiii.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 5
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_000cd000_00000000-6_gpu_init.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2029:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2029:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z29__device_stub__Z8gpu_initPiiiPiii
.type _Z29__device_stub__Z8gpu_initPiiiPiii, @function
_Z29__device_stub__Z8gpu_initPiiiPiii:
.LFB2051:
.cfi_startproc
endbr64
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 8(%rsp)
movl %esi, 4(%rsp)
movl %edx, (%rsp)
movq %fs:40, %rax
movq %rax, 104(%rsp)
xorl %eax, %eax
leaq 8(%rsp), %rax
movq %rax, 80(%rsp)
leaq 4(%rsp), %rax
movq %rax, 88(%rsp)
movq %rsp, %rax
movq %rax, 96(%rsp)
movl $1, 32(%rsp)
movl $1, 36(%rsp)
movl $1, 40(%rsp)
movl $1, 44(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
leaq 24(%rsp), %rcx
leaq 16(%rsp), %rdx
leaq 44(%rsp), %rsi
leaq 32(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 104(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $120, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 24(%rsp)
.cfi_def_cfa_offset 136
pushq 24(%rsp)
.cfi_def_cfa_offset 144
leaq 96(%rsp), %r9
movq 60(%rsp), %rcx
movl 68(%rsp), %r8d
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
leaq _Z8gpu_initPiii(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 128
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2051:
.size _Z29__device_stub__Z8gpu_initPiiiPiii, .-_Z29__device_stub__Z8gpu_initPiiiPiii
.globl _Z8gpu_initPiii
.type _Z8gpu_initPiii, @function
_Z8gpu_initPiii:
.LFB2052:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z29__device_stub__Z8gpu_initPiiiPiii
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2052:
.size _Z8gpu_initPiii, .-_Z8gpu_initPiii
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "_Z8gpu_initPiii"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2054:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC0(%rip), %rdx
movq %rdx, %rcx
leaq _Z8gpu_initPiii(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2054:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "gpu_init.hip"
.globl _Z23__device_stub__gpu_initPiii # -- Begin function _Z23__device_stub__gpu_initPiii
.p2align 4, 0x90
.type _Z23__device_stub__gpu_initPiii,@function
_Z23__device_stub__gpu_initPiii: # @_Z23__device_stub__gpu_initPiii
.cfi_startproc
# %bb.0:
subq $88, %rsp
.cfi_def_cfa_offset 96
movq %rdi, 56(%rsp)
movl %esi, 4(%rsp)
movl %edx, (%rsp)
leaq 56(%rsp), %rax
movq %rax, 64(%rsp)
leaq 4(%rsp), %rax
movq %rax, 72(%rsp)
movq %rsp, %rax
movq %rax, 80(%rsp)
leaq 40(%rsp), %rdi
leaq 24(%rsp), %rsi
leaq 16(%rsp), %rdx
leaq 8(%rsp), %rcx
callq __hipPopCallConfiguration
movq 40(%rsp), %rsi
movl 48(%rsp), %edx
movq 24(%rsp), %rcx
movl 32(%rsp), %r8d
leaq 64(%rsp), %r9
movl $_Z8gpu_initPiii, %edi
pushq 8(%rsp)
.cfi_adjust_cfa_offset 8
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $104, %rsp
.cfi_adjust_cfa_offset -104
retq
.Lfunc_end0:
.size _Z23__device_stub__gpu_initPiii, .Lfunc_end0-_Z23__device_stub__gpu_initPiii
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB1_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB1_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z8gpu_initPiii, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end1:
.size __hip_module_ctor, .Lfunc_end1-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB2_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB2_2:
retq
.Lfunc_end2:
.size __hip_module_dtor, .Lfunc_end2-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z8gpu_initPiii,@object # @_Z8gpu_initPiii
.section .rodata,"a",@progbits
.globl _Z8gpu_initPiii
.p2align 3, 0x0
_Z8gpu_initPiii:
.quad _Z23__device_stub__gpu_initPiii
.size _Z8gpu_initPiii, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "_Z8gpu_initPiii"
.size .L__unnamed_1, 16
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z23__device_stub__gpu_initPiii
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z8gpu_initPiii
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly. | //#include "cuda_runtime.h"
//#include "device_launch_parameters.h"
#include <stdio.h>
#include<cuda.h>
#define n 5
__global__ void add(int *a, int *b, int *c) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < n) {
c[i] = a[i] + b[i];
}
}
int main(){
int a[n];
int b[n];
int i;
int c[n]= {0};
int* dev_a;
int* dev_b;
int* dev_c;
cudaMalloc((void**)&dev_c, n * sizeof(int));
cudaMalloc((void**)&dev_a, n * sizeof(int));
cudaMalloc((void**)&dev_b, n * sizeof(int));
printf("\narray elements (1st):\n");
for(i=0;i<n;i++){
scanf("%d",&a[i]);
}
printf("\narray elements (2nd):\n");
for(i=0;i<n;i++){
scanf("%d",&b[i]);
}
cudaMemcpy(dev_a, a, n * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(dev_b, b, n* sizeof(int), cudaMemcpyHostToDevice);
add<<<n,1>>>(dev_a, dev_b, dev_c);
cudaMemcpy(c, dev_c, n* sizeof(int), cudaMemcpyDeviceToHost);
printf("\nsum is\n");
for(i=0;i<n;i++){
printf("%d\n",c[i]);
}
cudaFree(dev_c);
cudaFree(dev_a);
cudaFree(dev_b);
cudaDeviceReset();
return 0;
} | code for sm_80
Function : _Z3addPiS_S_
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ S2R R6, SR_CTAID.X ; /* 0x0000000000067919 */
/* 0x000e280000002500 */
/*0020*/ S2R R3, SR_TID.X ; /* 0x0000000000037919 */
/* 0x000e240000002100 */
/*0030*/ IMAD R6, R6, c[0x0][0x0], R3 ; /* 0x0000000006067a24 */
/* 0x001fca00078e0203 */
/*0040*/ ISETP.GT.AND P0, PT, R6, 0x4, PT ; /* 0x000000040600780c */
/* 0x000fda0003f04270 */
/*0050*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*0060*/ HFMA2.MMA R7, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff077435 */
/* 0x000fe200000001ff */
/*0070*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fd20000000a00 */
/*0080*/ IMAD.WIDE R4, R6, R7, c[0x0][0x168] ; /* 0x00005a0006047625 */
/* 0x000fc800078e0207 */
/*0090*/ IMAD.WIDE R2, R6.reuse, R7.reuse, c[0x0][0x160] ; /* 0x0000580006027625 */
/* 0x0c0fe400078e0207 */
/*00a0*/ LDG.E R4, [R4.64] ; /* 0x0000000404047981 */
/* 0x000ea8000c1e1900 */
/*00b0*/ LDG.E R3, [R2.64] ; /* 0x0000000402037981 */
/* 0x000ea2000c1e1900 */
/*00c0*/ IMAD.WIDE R6, R6, R7, c[0x0][0x170] ; /* 0x00005c0006067625 */
/* 0x000fe200078e0207 */
/*00d0*/ IADD3 R9, R4, R3, RZ ; /* 0x0000000304097210 */
/* 0x004fca0007ffe0ff */
/*00e0*/ STG.E [R6.64], R9 ; /* 0x0000000906007986 */
/* 0x000fe2000c101904 */
/*00f0*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0100*/ BRA 0x100; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0110*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0120*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0130*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0140*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0150*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0160*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0170*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0180*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0190*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | //#include "cuda_runtime.h"
//#include "device_launch_parameters.h"
#include <stdio.h>
#include<cuda.h>
#define n 5
__global__ void add(int *a, int *b, int *c) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < n) {
c[i] = a[i] + b[i];
}
}
int main(){
int a[n];
int b[n];
int i;
int c[n]= {0};
int* dev_a;
int* dev_b;
int* dev_c;
cudaMalloc((void**)&dev_c, n * sizeof(int));
cudaMalloc((void**)&dev_a, n * sizeof(int));
cudaMalloc((void**)&dev_b, n * sizeof(int));
printf("\narray elements (1st):\n");
for(i=0;i<n;i++){
scanf("%d",&a[i]);
}
printf("\narray elements (2nd):\n");
for(i=0;i<n;i++){
scanf("%d",&b[i]);
}
cudaMemcpy(dev_a, a, n * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(dev_b, b, n* sizeof(int), cudaMemcpyHostToDevice);
add<<<n,1>>>(dev_a, dev_b, dev_c);
cudaMemcpy(c, dev_c, n* sizeof(int), cudaMemcpyDeviceToHost);
printf("\nsum is\n");
for(i=0;i<n;i++){
printf("%d\n",c[i]);
}
cudaFree(dev_c);
cudaFree(dev_a);
cudaFree(dev_b);
cudaDeviceReset();
return 0;
} | .file "tmpxft_0019003d_00000000-6_two_array_sum.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2060:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2060:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z26__device_stub__Z3addPiS_S_PiS_S_
.type _Z26__device_stub__Z3addPiS_S_PiS_S_, @function
_Z26__device_stub__Z3addPiS_S_PiS_S_:
.LFB2082:
.cfi_startproc
endbr64
subq $136, %rsp
.cfi_def_cfa_offset 144
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movq %rdx, 8(%rsp)
movq %fs:40, %rax
movq %rax, 120(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 8(%rsp), %rax
movq %rax, 112(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 120(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $136, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 152
pushq 40(%rsp)
.cfi_def_cfa_offset 160
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z3addPiS_S_(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 144
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2082:
.size _Z26__device_stub__Z3addPiS_S_PiS_S_, .-_Z26__device_stub__Z3addPiS_S_PiS_S_
.globl _Z3addPiS_S_
.type _Z3addPiS_S_, @function
_Z3addPiS_S_:
.LFB2083:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z26__device_stub__Z3addPiS_S_PiS_S_
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2083:
.size _Z3addPiS_S_, .-_Z3addPiS_S_
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "\narray elements (1st):\n"
.LC1:
.string "%d"
.LC2:
.string "\narray elements (2nd):\n"
.LC3:
.string "\nsum is\n"
.LC4:
.string "%d\n"
.text
.globl main
.type main, @function
main:
.LFB2057:
.cfi_startproc
endbr64
pushq %r12
.cfi_def_cfa_offset 16
.cfi_offset 12, -16
pushq %rbp
.cfi_def_cfa_offset 24
.cfi_offset 6, -24
pushq %rbx
.cfi_def_cfa_offset 32
.cfi_offset 3, -32
subq $144, %rsp
.cfi_def_cfa_offset 176
movq %fs:40, %rax
movq %rax, 136(%rsp)
xorl %eax, %eax
pxor %xmm0, %xmm0
movaps %xmm0, 112(%rsp)
movl $0, 128(%rsp)
leaq 16(%rsp), %rdi
movl $20, %esi
call cudaMalloc@PLT
movq %rsp, %rdi
movl $20, %esi
call cudaMalloc@PLT
leaq 8(%rsp), %rdi
movl $20, %esi
call cudaMalloc@PLT
leaq .LC0(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
leaq 48(%rsp), %rbx
leaq 68(%rsp), %r12
leaq .LC1(%rip), %rbp
.L12:
movq %rbx, %rsi
movq %rbp, %rdi
movl $0, %eax
call __isoc23_scanf@PLT
addq $4, %rbx
cmpq %r12, %rbx
jne .L12
leaq .LC2(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
leaq 80(%rsp), %rbx
leaq 100(%rsp), %r12
leaq .LC1(%rip), %rbp
.L13:
movq %rbx, %rsi
movq %rbp, %rdi
movl $0, %eax
call __isoc23_scanf@PLT
addq $4, %rbx
cmpq %r12, %rbx
jne .L13
leaq 48(%rsp), %rsi
movl $1, %ecx
movl $20, %edx
movq (%rsp), %rdi
call cudaMemcpy@PLT
leaq 80(%rsp), %rsi
movl $1, %ecx
movl $20, %edx
movq 8(%rsp), %rdi
call cudaMemcpy@PLT
movl $1, 36(%rsp)
movl $1, 40(%rsp)
movl $1, 44(%rsp)
movl $5, 24(%rsp)
movl $1, 28(%rsp)
movl $1, 32(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 36(%rsp), %rdx
movl $1, %ecx
movq 24(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L21
.L14:
leaq 112(%rsp), %rbx
movl $2, %ecx
movl $20, %edx
movq 16(%rsp), %rsi
movq %rbx, %rdi
call cudaMemcpy@PLT
leaq .LC3(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
leaq 132(%rsp), %r12
leaq .LC4(%rip), %rbp
.L15:
movl (%rbx), %edx
movq %rbp, %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
addq $4, %rbx
cmpq %r12, %rbx
jne .L15
movq 16(%rsp), %rdi
call cudaFree@PLT
movq (%rsp), %rdi
call cudaFree@PLT
movq 8(%rsp), %rdi
call cudaFree@PLT
call cudaDeviceReset@PLT
movq 136(%rsp), %rax
subq %fs:40, %rax
jne .L22
movl $0, %eax
addq $144, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 32
popq %rbx
.cfi_def_cfa_offset 24
popq %rbp
.cfi_def_cfa_offset 16
popq %r12
.cfi_def_cfa_offset 8
ret
.L21:
.cfi_restore_state
movq 16(%rsp), %rdx
movq 8(%rsp), %rsi
movq (%rsp), %rdi
call _Z26__device_stub__Z3addPiS_S_PiS_S_
jmp .L14
.L22:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2057:
.size main, .-main
.section .rodata.str1.1
.LC5:
.string "_Z3addPiS_S_"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2085:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC5(%rip), %rdx
movq %rdx, %rcx
leaq _Z3addPiS_S_(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2085:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | //#include "cuda_runtime.h"
//#include "device_launch_parameters.h"
#include <stdio.h>
#include<cuda.h>
#define n 5
__global__ void add(int *a, int *b, int *c) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < n) {
c[i] = a[i] + b[i];
}
}
int main(){
int a[n];
int b[n];
int i;
int c[n]= {0};
int* dev_a;
int* dev_b;
int* dev_c;
cudaMalloc((void**)&dev_c, n * sizeof(int));
cudaMalloc((void**)&dev_a, n * sizeof(int));
cudaMalloc((void**)&dev_b, n * sizeof(int));
printf("\narray elements (1st):\n");
for(i=0;i<n;i++){
scanf("%d",&a[i]);
}
printf("\narray elements (2nd):\n");
for(i=0;i<n;i++){
scanf("%d",&b[i]);
}
cudaMemcpy(dev_a, a, n * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(dev_b, b, n* sizeof(int), cudaMemcpyHostToDevice);
add<<<n,1>>>(dev_a, dev_b, dev_c);
cudaMemcpy(c, dev_c, n* sizeof(int), cudaMemcpyDeviceToHost);
printf("\nsum is\n");
for(i=0;i<n;i++){
printf("%d\n",c[i]);
}
cudaFree(dev_c);
cudaFree(dev_a);
cudaFree(dev_b);
cudaDeviceReset();
return 0;
} | //#include "cuda_runtime.h"
//#include "device_launch_parameters.h"
#include <stdio.h>
#include<hip/hip_runtime.h>
#define n 5
__global__ void add(int *a, int *b, int *c) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < n) {
c[i] = a[i] + b[i];
}
}
int main(){
int a[n];
int b[n];
int i;
int c[n]= {0};
int* dev_a;
int* dev_b;
int* dev_c;
hipMalloc((void**)&dev_c, n * sizeof(int));
hipMalloc((void**)&dev_a, n * sizeof(int));
hipMalloc((void**)&dev_b, n * sizeof(int));
printf("\narray elements (1st):\n");
for(i=0;i<n;i++){
scanf("%d",&a[i]);
}
printf("\narray elements (2nd):\n");
for(i=0;i<n;i++){
scanf("%d",&b[i]);
}
hipMemcpy(dev_a, a, n * sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(dev_b, b, n* sizeof(int), hipMemcpyHostToDevice);
add<<<n,1>>>(dev_a, dev_b, dev_c);
hipMemcpy(c, dev_c, n* sizeof(int), hipMemcpyDeviceToHost);
printf("\nsum is\n");
for(i=0;i<n;i++){
printf("%d\n",c[i]);
}
hipFree(dev_c);
hipFree(dev_a);
hipFree(dev_b);
hipDeviceReset();
return 0;
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | //#include "cuda_runtime.h"
//#include "device_launch_parameters.h"
#include <stdio.h>
#include<hip/hip_runtime.h>
#define n 5
__global__ void add(int *a, int *b, int *c) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < n) {
c[i] = a[i] + b[i];
}
}
int main(){
int a[n];
int b[n];
int i;
int c[n]= {0};
int* dev_a;
int* dev_b;
int* dev_c;
hipMalloc((void**)&dev_c, n * sizeof(int));
hipMalloc((void**)&dev_a, n * sizeof(int));
hipMalloc((void**)&dev_b, n * sizeof(int));
printf("\narray elements (1st):\n");
for(i=0;i<n;i++){
scanf("%d",&a[i]);
}
printf("\narray elements (2nd):\n");
for(i=0;i<n;i++){
scanf("%d",&b[i]);
}
hipMemcpy(dev_a, a, n * sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(dev_b, b, n* sizeof(int), hipMemcpyHostToDevice);
add<<<n,1>>>(dev_a, dev_b, dev_c);
hipMemcpy(c, dev_c, n* sizeof(int), hipMemcpyDeviceToHost);
printf("\nsum is\n");
for(i=0;i<n;i++){
printf("%d\n",c[i]);
}
hipFree(dev_c);
hipFree(dev_a);
hipFree(dev_b);
hipDeviceReset();
return 0;
} | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z3addPiS_S_
.globl _Z3addPiS_S_
.p2align 8
.type _Z3addPiS_S_,@function
_Z3addPiS_S_:
s_load_b32 s2, s[0:1], 0x24
s_waitcnt lgkmcnt(0)
s_and_b32 s2, s2, 0xffff
s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_mad_u64_u32 v[1:2], null, s15, s2, v[0:1]
s_mov_b32 s2, exec_lo
v_cmpx_gt_i32_e32 5, v1
s_cbranch_execz .LBB0_2
s_load_b128 s[4:7], s[0:1], 0x0
v_ashrrev_i32_e32 v2, 31, v1
s_load_b64 s[0:1], s[0:1], 0x10
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[0:1], 2, v[1:2]
s_waitcnt lgkmcnt(0)
v_add_co_u32 v2, vcc_lo, s4, v0
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v3, vcc_lo, s5, v1, vcc_lo
v_add_co_u32 v4, vcc_lo, s6, v0
v_add_co_ci_u32_e32 v5, vcc_lo, s7, v1, vcc_lo
v_add_co_u32 v0, vcc_lo, s0, v0
global_load_b32 v2, v[2:3], off
global_load_b32 v3, v[4:5], off
v_add_co_ci_u32_e32 v1, vcc_lo, s1, v1, vcc_lo
s_waitcnt vmcnt(0)
v_add_nc_u32_e32 v2, v3, v2
global_store_b32 v[0:1], v2, off
.LBB0_2:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z3addPiS_S_
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 280
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 6
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z3addPiS_S_, .Lfunc_end0-_Z3addPiS_S_
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
- .offset: 24
.size: 4
.value_kind: hidden_block_count_x
- .offset: 28
.size: 4
.value_kind: hidden_block_count_y
- .offset: 32
.size: 4
.value_kind: hidden_block_count_z
- .offset: 36
.size: 2
.value_kind: hidden_group_size_x
- .offset: 38
.size: 2
.value_kind: hidden_group_size_y
- .offset: 40
.size: 2
.value_kind: hidden_group_size_z
- .offset: 42
.size: 2
.value_kind: hidden_remainder_x
- .offset: 44
.size: 2
.value_kind: hidden_remainder_y
- .offset: 46
.size: 2
.value_kind: hidden_remainder_z
- .offset: 64
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 88
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 280
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z3addPiS_S_
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z3addPiS_S_.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 6
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | //#include "cuda_runtime.h"
//#include "device_launch_parameters.h"
#include <stdio.h>
#include<hip/hip_runtime.h>
#define n 5
__global__ void add(int *a, int *b, int *c) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < n) {
c[i] = a[i] + b[i];
}
}
int main(){
int a[n];
int b[n];
int i;
int c[n]= {0};
int* dev_a;
int* dev_b;
int* dev_c;
hipMalloc((void**)&dev_c, n * sizeof(int));
hipMalloc((void**)&dev_a, n * sizeof(int));
hipMalloc((void**)&dev_b, n * sizeof(int));
printf("\narray elements (1st):\n");
for(i=0;i<n;i++){
scanf("%d",&a[i]);
}
printf("\narray elements (2nd):\n");
for(i=0;i<n;i++){
scanf("%d",&b[i]);
}
hipMemcpy(dev_a, a, n * sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(dev_b, b, n* sizeof(int), hipMemcpyHostToDevice);
add<<<n,1>>>(dev_a, dev_b, dev_c);
hipMemcpy(c, dev_c, n* sizeof(int), hipMemcpyDeviceToHost);
printf("\nsum is\n");
for(i=0;i<n;i++){
printf("%d\n",c[i]);
}
hipFree(dev_c);
hipFree(dev_a);
hipFree(dev_b);
hipDeviceReset();
return 0;
} | .text
.file "two_array_sum.hip"
.globl _Z18__device_stub__addPiS_S_ # -- Begin function _Z18__device_stub__addPiS_S_
.p2align 4, 0x90
.type _Z18__device_stub__addPiS_S_,@function
_Z18__device_stub__addPiS_S_: # @_Z18__device_stub__addPiS_S_
.cfi_startproc
# %bb.0:
subq $104, %rsp
.cfi_def_cfa_offset 112
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movq %rdx, 56(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 56(%rsp), %rax
movq %rax, 96(%rsp)
leaq 40(%rsp), %rdi
leaq 24(%rsp), %rsi
leaq 16(%rsp), %rdx
leaq 8(%rsp), %rcx
callq __hipPopCallConfiguration
movq 40(%rsp), %rsi
movl 48(%rsp), %edx
movq 24(%rsp), %rcx
movl 32(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z3addPiS_S_, %edi
pushq 8(%rsp)
.cfi_adjust_cfa_offset 8
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $120, %rsp
.cfi_adjust_cfa_offset -120
retq
.Lfunc_end0:
.size _Z18__device_stub__addPiS_S_, .Lfunc_end0-_Z18__device_stub__addPiS_S_
.cfi_endproc
# -- End function
.globl main # -- Begin function main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
pushq %rbx
.cfi_def_cfa_offset 16
subq $224, %rsp
.cfi_def_cfa_offset 240
.cfi_offset %rbx, -16
xorps %xmm0, %xmm0
movaps %xmm0, 96(%rsp)
movl $0, 112(%rsp)
movq %rsp, %rdi
movl $20, %esi
callq hipMalloc
leaq 16(%rsp), %rdi
movl $20, %esi
callq hipMalloc
leaq 8(%rsp), %rdi
movl $20, %esi
callq hipMalloc
movl $.Lstr, %edi
callq puts@PLT
xorl %ebx, %ebx
.p2align 4, 0x90
.LBB1_1: # =>This Inner Loop Header: Depth=1
leaq (%rsp,%rbx), %rsi
addq $192, %rsi
movl $.L.str.1, %edi
xorl %eax, %eax
callq __isoc23_scanf
addq $4, %rbx
cmpq $20, %rbx
jne .LBB1_1
# %bb.2:
movl $.Lstr.1, %edi
callq puts@PLT
xorl %ebx, %ebx
.p2align 4, 0x90
.LBB1_3: # =>This Inner Loop Header: Depth=1
leaq (%rsp,%rbx), %rsi
addq $160, %rsi
movl $.L.str.1, %edi
xorl %eax, %eax
callq __isoc23_scanf
addq $4, %rbx
cmpq $20, %rbx
jne .LBB1_3
# %bb.4:
movq 16(%rsp), %rdi
leaq 192(%rsp), %rsi
movl $20, %edx
movl $1, %ecx
callq hipMemcpy
movq 8(%rsp), %rdi
leaq 160(%rsp), %rsi
movl $20, %edx
movl $1, %ecx
callq hipMemcpy
movabsq $4294967297, %rdx # imm = 0x100000001
leaq 4(%rdx), %rdi
movl $1, %esi
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB1_6
# %bb.5:
movq 16(%rsp), %rax
movq 8(%rsp), %rcx
movq (%rsp), %rdx
movq %rax, 88(%rsp)
movq %rcx, 80(%rsp)
movq %rdx, 72(%rsp)
leaq 88(%rsp), %rax
movq %rax, 128(%rsp)
leaq 80(%rsp), %rax
movq %rax, 136(%rsp)
leaq 72(%rsp), %rax
movq %rax, 144(%rsp)
leaq 56(%rsp), %rdi
leaq 40(%rsp), %rsi
leaq 32(%rsp), %rdx
leaq 24(%rsp), %rcx
callq __hipPopCallConfiguration
movq 56(%rsp), %rsi
movl 64(%rsp), %edx
movq 40(%rsp), %rcx
movl 48(%rsp), %r8d
leaq 128(%rsp), %r9
movl $_Z3addPiS_S_, %edi
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
pushq 40(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB1_6:
movq (%rsp), %rsi
leaq 96(%rsp), %rdi
movl $20, %edx
movl $2, %ecx
callq hipMemcpy
movl $.Lstr.2, %edi
callq puts@PLT
xorl %ebx, %ebx
.p2align 4, 0x90
.LBB1_7: # =>This Inner Loop Header: Depth=1
movl 96(%rsp,%rbx,4), %esi
movl $.L.str.4, %edi
xorl %eax, %eax
callq printf
incq %rbx
cmpq $5, %rbx
jne .LBB1_7
# %bb.8:
movq (%rsp), %rdi
callq hipFree
movq 16(%rsp), %rdi
callq hipFree
movq 8(%rsp), %rdi
callq hipFree
callq hipDeviceReset
xorl %eax, %eax
addq $224, %rsp
.cfi_def_cfa_offset 16
popq %rbx
.cfi_def_cfa_offset 8
retq
.Lfunc_end1:
.size main, .Lfunc_end1-main
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB2_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB2_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z3addPiS_S_, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end2:
.size __hip_module_ctor, .Lfunc_end2-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB3_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB3_2:
retq
.Lfunc_end3:
.size __hip_module_dtor, .Lfunc_end3-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z3addPiS_S_,@object # @_Z3addPiS_S_
.section .rodata,"a",@progbits
.globl _Z3addPiS_S_
.p2align 3, 0x0
_Z3addPiS_S_:
.quad _Z18__device_stub__addPiS_S_
.size _Z3addPiS_S_, 8
.type .L.str.1,@object # @.str.1
.section .rodata.str1.1,"aMS",@progbits,1
.L.str.1:
.asciz "%d"
.size .L.str.1, 3
.type .L.str.4,@object # @.str.4
.L.str.4:
.asciz "%d\n"
.size .L.str.4, 4
.type .L__unnamed_1,@object # @0
.L__unnamed_1:
.asciz "_Z3addPiS_S_"
.size .L__unnamed_1, 13
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.type .Lstr,@object # @str
.section .rodata.str1.1,"aMS",@progbits,1
.Lstr:
.asciz "\narray elements (1st):"
.size .Lstr, 23
.type .Lstr.1,@object # @str.1
.Lstr.1:
.asciz "\narray elements (2nd):"
.size .Lstr.1, 23
.type .Lstr.2,@object # @str.2
.Lstr.2:
.asciz "\nsum is"
.size .Lstr.2, 8
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z18__device_stub__addPiS_S_
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z3addPiS_S_
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly. | code for sm_80
Function : _Z3addPiS_S_
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ S2R R6, SR_CTAID.X ; /* 0x0000000000067919 */
/* 0x000e280000002500 */
/*0020*/ S2R R3, SR_TID.X ; /* 0x0000000000037919 */
/* 0x000e240000002100 */
/*0030*/ IMAD R6, R6, c[0x0][0x0], R3 ; /* 0x0000000006067a24 */
/* 0x001fca00078e0203 */
/*0040*/ ISETP.GT.AND P0, PT, R6, 0x4, PT ; /* 0x000000040600780c */
/* 0x000fda0003f04270 */
/*0050*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*0060*/ HFMA2.MMA R7, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff077435 */
/* 0x000fe200000001ff */
/*0070*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fd20000000a00 */
/*0080*/ IMAD.WIDE R4, R6, R7, c[0x0][0x168] ; /* 0x00005a0006047625 */
/* 0x000fc800078e0207 */
/*0090*/ IMAD.WIDE R2, R6.reuse, R7.reuse, c[0x0][0x160] ; /* 0x0000580006027625 */
/* 0x0c0fe400078e0207 */
/*00a0*/ LDG.E R4, [R4.64] ; /* 0x0000000404047981 */
/* 0x000ea8000c1e1900 */
/*00b0*/ LDG.E R3, [R2.64] ; /* 0x0000000402037981 */
/* 0x000ea2000c1e1900 */
/*00c0*/ IMAD.WIDE R6, R6, R7, c[0x0][0x170] ; /* 0x00005c0006067625 */
/* 0x000fe200078e0207 */
/*00d0*/ IADD3 R9, R4, R3, RZ ; /* 0x0000000304097210 */
/* 0x004fca0007ffe0ff */
/*00e0*/ STG.E [R6.64], R9 ; /* 0x0000000906007986 */
/* 0x000fe2000c101904 */
/*00f0*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0100*/ BRA 0x100; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0110*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0120*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0130*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0140*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0150*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0160*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0170*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0180*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0190*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z3addPiS_S_
.globl _Z3addPiS_S_
.p2align 8
.type _Z3addPiS_S_,@function
_Z3addPiS_S_:
s_load_b32 s2, s[0:1], 0x24
s_waitcnt lgkmcnt(0)
s_and_b32 s2, s2, 0xffff
s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_mad_u64_u32 v[1:2], null, s15, s2, v[0:1]
s_mov_b32 s2, exec_lo
v_cmpx_gt_i32_e32 5, v1
s_cbranch_execz .LBB0_2
s_load_b128 s[4:7], s[0:1], 0x0
v_ashrrev_i32_e32 v2, 31, v1
s_load_b64 s[0:1], s[0:1], 0x10
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[0:1], 2, v[1:2]
s_waitcnt lgkmcnt(0)
v_add_co_u32 v2, vcc_lo, s4, v0
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v3, vcc_lo, s5, v1, vcc_lo
v_add_co_u32 v4, vcc_lo, s6, v0
v_add_co_ci_u32_e32 v5, vcc_lo, s7, v1, vcc_lo
v_add_co_u32 v0, vcc_lo, s0, v0
global_load_b32 v2, v[2:3], off
global_load_b32 v3, v[4:5], off
v_add_co_ci_u32_e32 v1, vcc_lo, s1, v1, vcc_lo
s_waitcnt vmcnt(0)
v_add_nc_u32_e32 v2, v3, v2
global_store_b32 v[0:1], v2, off
.LBB0_2:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z3addPiS_S_
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 280
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 6
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z3addPiS_S_, .Lfunc_end0-_Z3addPiS_S_
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
- .offset: 24
.size: 4
.value_kind: hidden_block_count_x
- .offset: 28
.size: 4
.value_kind: hidden_block_count_y
- .offset: 32
.size: 4
.value_kind: hidden_block_count_z
- .offset: 36
.size: 2
.value_kind: hidden_group_size_x
- .offset: 38
.size: 2
.value_kind: hidden_group_size_y
- .offset: 40
.size: 2
.value_kind: hidden_group_size_z
- .offset: 42
.size: 2
.value_kind: hidden_remainder_x
- .offset: 44
.size: 2
.value_kind: hidden_remainder_y
- .offset: 46
.size: 2
.value_kind: hidden_remainder_z
- .offset: 64
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 88
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 280
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z3addPiS_S_
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z3addPiS_S_.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 6
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_0019003d_00000000-6_two_array_sum.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2060:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2060:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z26__device_stub__Z3addPiS_S_PiS_S_
.type _Z26__device_stub__Z3addPiS_S_PiS_S_, @function
_Z26__device_stub__Z3addPiS_S_PiS_S_:
.LFB2082:
.cfi_startproc
endbr64
subq $136, %rsp
.cfi_def_cfa_offset 144
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movq %rdx, 8(%rsp)
movq %fs:40, %rax
movq %rax, 120(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 8(%rsp), %rax
movq %rax, 112(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 120(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $136, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 152
pushq 40(%rsp)
.cfi_def_cfa_offset 160
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z3addPiS_S_(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 144
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2082:
.size _Z26__device_stub__Z3addPiS_S_PiS_S_, .-_Z26__device_stub__Z3addPiS_S_PiS_S_
.globl _Z3addPiS_S_
.type _Z3addPiS_S_, @function
_Z3addPiS_S_:
.LFB2083:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z26__device_stub__Z3addPiS_S_PiS_S_
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2083:
.size _Z3addPiS_S_, .-_Z3addPiS_S_
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "\narray elements (1st):\n"
.LC1:
.string "%d"
.LC2:
.string "\narray elements (2nd):\n"
.LC3:
.string "\nsum is\n"
.LC4:
.string "%d\n"
.text
.globl main
.type main, @function
main:
.LFB2057:
.cfi_startproc
endbr64
pushq %r12
.cfi_def_cfa_offset 16
.cfi_offset 12, -16
pushq %rbp
.cfi_def_cfa_offset 24
.cfi_offset 6, -24
pushq %rbx
.cfi_def_cfa_offset 32
.cfi_offset 3, -32
subq $144, %rsp
.cfi_def_cfa_offset 176
movq %fs:40, %rax
movq %rax, 136(%rsp)
xorl %eax, %eax
pxor %xmm0, %xmm0
movaps %xmm0, 112(%rsp)
movl $0, 128(%rsp)
leaq 16(%rsp), %rdi
movl $20, %esi
call cudaMalloc@PLT
movq %rsp, %rdi
movl $20, %esi
call cudaMalloc@PLT
leaq 8(%rsp), %rdi
movl $20, %esi
call cudaMalloc@PLT
leaq .LC0(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
leaq 48(%rsp), %rbx
leaq 68(%rsp), %r12
leaq .LC1(%rip), %rbp
.L12:
movq %rbx, %rsi
movq %rbp, %rdi
movl $0, %eax
call __isoc23_scanf@PLT
addq $4, %rbx
cmpq %r12, %rbx
jne .L12
leaq .LC2(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
leaq 80(%rsp), %rbx
leaq 100(%rsp), %r12
leaq .LC1(%rip), %rbp
.L13:
movq %rbx, %rsi
movq %rbp, %rdi
movl $0, %eax
call __isoc23_scanf@PLT
addq $4, %rbx
cmpq %r12, %rbx
jne .L13
leaq 48(%rsp), %rsi
movl $1, %ecx
movl $20, %edx
movq (%rsp), %rdi
call cudaMemcpy@PLT
leaq 80(%rsp), %rsi
movl $1, %ecx
movl $20, %edx
movq 8(%rsp), %rdi
call cudaMemcpy@PLT
movl $1, 36(%rsp)
movl $1, 40(%rsp)
movl $1, 44(%rsp)
movl $5, 24(%rsp)
movl $1, 28(%rsp)
movl $1, 32(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 36(%rsp), %rdx
movl $1, %ecx
movq 24(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L21
.L14:
leaq 112(%rsp), %rbx
movl $2, %ecx
movl $20, %edx
movq 16(%rsp), %rsi
movq %rbx, %rdi
call cudaMemcpy@PLT
leaq .LC3(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
leaq 132(%rsp), %r12
leaq .LC4(%rip), %rbp
.L15:
movl (%rbx), %edx
movq %rbp, %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
addq $4, %rbx
cmpq %r12, %rbx
jne .L15
movq 16(%rsp), %rdi
call cudaFree@PLT
movq (%rsp), %rdi
call cudaFree@PLT
movq 8(%rsp), %rdi
call cudaFree@PLT
call cudaDeviceReset@PLT
movq 136(%rsp), %rax
subq %fs:40, %rax
jne .L22
movl $0, %eax
addq $144, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 32
popq %rbx
.cfi_def_cfa_offset 24
popq %rbp
.cfi_def_cfa_offset 16
popq %r12
.cfi_def_cfa_offset 8
ret
.L21:
.cfi_restore_state
movq 16(%rsp), %rdx
movq 8(%rsp), %rsi
movq (%rsp), %rdi
call _Z26__device_stub__Z3addPiS_S_PiS_S_
jmp .L14
.L22:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2057:
.size main, .-main
.section .rodata.str1.1
.LC5:
.string "_Z3addPiS_S_"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2085:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC5(%rip), %rdx
movq %rdx, %rcx
leaq _Z3addPiS_S_(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2085:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "two_array_sum.hip"
.globl _Z18__device_stub__addPiS_S_ # -- Begin function _Z18__device_stub__addPiS_S_
.p2align 4, 0x90
.type _Z18__device_stub__addPiS_S_,@function
_Z18__device_stub__addPiS_S_: # @_Z18__device_stub__addPiS_S_
.cfi_startproc
# %bb.0:
subq $104, %rsp
.cfi_def_cfa_offset 112
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movq %rdx, 56(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 56(%rsp), %rax
movq %rax, 96(%rsp)
leaq 40(%rsp), %rdi
leaq 24(%rsp), %rsi
leaq 16(%rsp), %rdx
leaq 8(%rsp), %rcx
callq __hipPopCallConfiguration
movq 40(%rsp), %rsi
movl 48(%rsp), %edx
movq 24(%rsp), %rcx
movl 32(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z3addPiS_S_, %edi
pushq 8(%rsp)
.cfi_adjust_cfa_offset 8
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $120, %rsp
.cfi_adjust_cfa_offset -120
retq
.Lfunc_end0:
.size _Z18__device_stub__addPiS_S_, .Lfunc_end0-_Z18__device_stub__addPiS_S_
.cfi_endproc
# -- End function
.globl main # -- Begin function main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
pushq %rbx
.cfi_def_cfa_offset 16
subq $224, %rsp
.cfi_def_cfa_offset 240
.cfi_offset %rbx, -16
xorps %xmm0, %xmm0
movaps %xmm0, 96(%rsp)
movl $0, 112(%rsp)
movq %rsp, %rdi
movl $20, %esi
callq hipMalloc
leaq 16(%rsp), %rdi
movl $20, %esi
callq hipMalloc
leaq 8(%rsp), %rdi
movl $20, %esi
callq hipMalloc
movl $.Lstr, %edi
callq puts@PLT
xorl %ebx, %ebx
.p2align 4, 0x90
.LBB1_1: # =>This Inner Loop Header: Depth=1
leaq (%rsp,%rbx), %rsi
addq $192, %rsi
movl $.L.str.1, %edi
xorl %eax, %eax
callq __isoc23_scanf
addq $4, %rbx
cmpq $20, %rbx
jne .LBB1_1
# %bb.2:
movl $.Lstr.1, %edi
callq puts@PLT
xorl %ebx, %ebx
.p2align 4, 0x90
.LBB1_3: # =>This Inner Loop Header: Depth=1
leaq (%rsp,%rbx), %rsi
addq $160, %rsi
movl $.L.str.1, %edi
xorl %eax, %eax
callq __isoc23_scanf
addq $4, %rbx
cmpq $20, %rbx
jne .LBB1_3
# %bb.4:
movq 16(%rsp), %rdi
leaq 192(%rsp), %rsi
movl $20, %edx
movl $1, %ecx
callq hipMemcpy
movq 8(%rsp), %rdi
leaq 160(%rsp), %rsi
movl $20, %edx
movl $1, %ecx
callq hipMemcpy
movabsq $4294967297, %rdx # imm = 0x100000001
leaq 4(%rdx), %rdi
movl $1, %esi
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB1_6
# %bb.5:
movq 16(%rsp), %rax
movq 8(%rsp), %rcx
movq (%rsp), %rdx
movq %rax, 88(%rsp)
movq %rcx, 80(%rsp)
movq %rdx, 72(%rsp)
leaq 88(%rsp), %rax
movq %rax, 128(%rsp)
leaq 80(%rsp), %rax
movq %rax, 136(%rsp)
leaq 72(%rsp), %rax
movq %rax, 144(%rsp)
leaq 56(%rsp), %rdi
leaq 40(%rsp), %rsi
leaq 32(%rsp), %rdx
leaq 24(%rsp), %rcx
callq __hipPopCallConfiguration
movq 56(%rsp), %rsi
movl 64(%rsp), %edx
movq 40(%rsp), %rcx
movl 48(%rsp), %r8d
leaq 128(%rsp), %r9
movl $_Z3addPiS_S_, %edi
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
pushq 40(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB1_6:
movq (%rsp), %rsi
leaq 96(%rsp), %rdi
movl $20, %edx
movl $2, %ecx
callq hipMemcpy
movl $.Lstr.2, %edi
callq puts@PLT
xorl %ebx, %ebx
.p2align 4, 0x90
.LBB1_7: # =>This Inner Loop Header: Depth=1
movl 96(%rsp,%rbx,4), %esi
movl $.L.str.4, %edi
xorl %eax, %eax
callq printf
incq %rbx
cmpq $5, %rbx
jne .LBB1_7
# %bb.8:
movq (%rsp), %rdi
callq hipFree
movq 16(%rsp), %rdi
callq hipFree
movq 8(%rsp), %rdi
callq hipFree
callq hipDeviceReset
xorl %eax, %eax
addq $224, %rsp
.cfi_def_cfa_offset 16
popq %rbx
.cfi_def_cfa_offset 8
retq
.Lfunc_end1:
.size main, .Lfunc_end1-main
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB2_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB2_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z3addPiS_S_, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end2:
.size __hip_module_ctor, .Lfunc_end2-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB3_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB3_2:
retq
.Lfunc_end3:
.size __hip_module_dtor, .Lfunc_end3-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z3addPiS_S_,@object # @_Z3addPiS_S_
.section .rodata,"a",@progbits
.globl _Z3addPiS_S_
.p2align 3, 0x0
_Z3addPiS_S_:
.quad _Z18__device_stub__addPiS_S_
.size _Z3addPiS_S_, 8
.type .L.str.1,@object # @.str.1
.section .rodata.str1.1,"aMS",@progbits,1
.L.str.1:
.asciz "%d"
.size .L.str.1, 3
.type .L.str.4,@object # @.str.4
.L.str.4:
.asciz "%d\n"
.size .L.str.4, 4
.type .L__unnamed_1,@object # @0
.L__unnamed_1:
.asciz "_Z3addPiS_S_"
.size .L__unnamed_1, 13
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.type .Lstr,@object # @str
.section .rodata.str1.1,"aMS",@progbits,1
.Lstr:
.asciz "\narray elements (1st):"
.size .Lstr, 23
.type .Lstr.1,@object # @str.1
.Lstr.1:
.asciz "\narray elements (2nd):"
.size .Lstr.1, 23
.type .Lstr.2,@object # @str.2
.Lstr.2:
.asciz "\nsum is"
.size .Lstr.2, 8
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z18__device_stub__addPiS_S_
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z3addPiS_S_
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly. | /**
* Copyright 1993-2012 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*/
#include <stdio.h>
#include <stdlib.h>
#include <algorithm>
#include <utility>
#include <iostream>
#include <bitset>
#include <math.h>
#include <time.h>
#include <chrono>
#include <cuda.h>
/**
* This macro checks return value of the CUDA runtime call and exits
* the application if the call failed.
*
* See cuda.h for error code descriptions.
*/
#ifndef BLOCK_SIZE
#define BLOCK_SIZE 1024
#endif
#ifndef ELAPSED_TIME
#define ELAPSED_TIME 0
#endif
#ifndef EXP_BITS_SIZE
#define EXP_BITS_SIZE 10
#endif
void print(int* h_data, int n) {
std::cout << "\n";
for (int i = 0; i < n; i++) {
std::cout << h_data[i] << " ";
}
std::cout << "\n";
}
void cudaTest(cudaError_t error) {
if (error != cudaSuccess) {
printf("cuda returned error %s (code %d), line(%d)\n",
cudaGetErrorString(error), error, __LINE__);
exit(EXIT_FAILURE);
}
}
/*
* 256 threads per block
* 4 elements per thread
* = 1024 elements per block
* = n/1024 blocks
*/
template<int BITS_NUMBER = 64>
__global__ void radix_sort(int *d_vec, int *d_seg, int num_segments) {
int bx = blockIdx.x;
int tx = threadIdx.x;
int begin = d_seg[bx];
int end = d_seg[bx + 1];
int size = end - begin;
__shared__ int s_vec[BLOCK_SIZE];
__shared__ int s_aux[BLOCK_SIZE];
__shared__ int s_pref_sum_one[BLOCK_SIZE];
__shared__ int s_pref_sum_zero[BLOCK_SIZE];
for (int k = 0; k < size; k += BLOCK_SIZE) {
int threadIndexGlobal = begin + k + tx;
int block = BLOCK_SIZE;
if(BLOCK_SIZE+k > size)
block = size-k;
if (threadIndexGlobal < end) {
s_vec[tx] = d_vec[threadIndexGlobal];
__syncthreads();
int i, j;
int exp = 0;
for (j = 0; j < BITS_NUMBER; j++) {
int x = (s_vec[tx] >> exp) & 1;
s_pref_sum_one[tx] = x;
s_pref_sum_zero[tx] = 1-x;
__syncthreads();
for (i = 1; i < block; i*=2) {
int index = tx + i;
if (index < block) {
int one = s_pref_sum_one[tx] + s_pref_sum_one[index];
int zero = s_pref_sum_zero[tx] + s_pref_sum_zero[index];
__syncthreads();
s_pref_sum_one[index] = one;
s_pref_sum_zero[index] = zero;
__syncthreads();
}
}
x = (s_vec[tx] >> exp) & 1;
int index = (x) * (s_pref_sum_one[tx] + s_pref_sum_zero[block-1] - 1)
+ (1 - x) * (s_pref_sum_zero[tx] - 1);
s_aux[index] = s_vec[tx];
__syncthreads();
s_vec[tx] = s_aux[tx];
__syncthreads();
exp++;
}
d_vec[threadIndexGlobal] = s_aux[tx];
}
}
}
int main(int argc, char **argv) {
int num_of_segments;
int num_of_elements;
int i;
scanf("%d", &num_of_segments);
int mem_size_seg = sizeof(int) * (num_of_segments + 1);
int *h_seg = (int *) malloc(mem_size_seg);
for (i = 0; i < num_of_segments + 1; i++)
scanf("%d", &h_seg[i]);
scanf("%d", &num_of_elements);
int mem_size_vec = sizeof(int) * num_of_elements;
int *h_vec = (int *) malloc(mem_size_vec);
for (i = 0; i < num_of_elements; i++)
scanf("%d", &h_vec[i]);
// print(h_seg, num_of_segments + 1); print(h_vec, num_of_elements);
// Allocate device memory
int *d_seg, *d_vec;
cudaTest(cudaMalloc((void **) &d_seg, mem_size_seg));
cudaTest(cudaMalloc((void **) &d_vec, mem_size_vec));
//cudaTest(cudaMalloc((void **) &d_aux, mem_size_vec));
// copy host memory to device
cudaTest(cudaMemcpy(d_seg, h_seg, mem_size_seg, cudaMemcpyHostToDevice));
cudaTest(cudaMemcpy(d_vec, h_vec, mem_size_vec, cudaMemcpyHostToDevice));
// Setup execution parameters
// int devID = 0;
// cudaDeviceProp deviceProp;
// cudaTest(cudaGetDeviceProperties(&deviceProp, devID));
// unsigned int multiprocessor_number = deviceProp.multiProcessorCount;
// //unsigned int grid_blocks_max_x = deviceProp.maxGridSize[0];
// //unsigned int sharedMemoryTotal = deviceProp.sharedMemPerBlock/(sizeof(int));
//
int blocksize = BLOCK_SIZE; //num_of_elements / num_of_segments;
//if (blocksize > 1024)
// blocksize = 1024;
dim3 threads(blocksize, 1);
//dim3 grid(num_of_segments / blocksize + 1, 1);
dim3 grid(num_of_segments, 1);
std::chrono::high_resolution_clock::time_point start =
std::chrono::high_resolution_clock::now();
radix_sort<EXP_BITS_SIZE> <<<grid, threads>>>(d_vec, d_seg,
num_of_segments);
cudaDeviceSynchronize();
std::chrono::high_resolution_clock::time_point stop =
std::chrono::high_resolution_clock::now();
cudaTest(cudaPeekAtLastError());
std::chrono::duration<double> time_span = std::chrono::duration_cast<
std::chrono::duration<double>>(stop - start);
cudaTest(cudaMemcpy(h_seg, d_seg, mem_size_seg, cudaMemcpyDeviceToHost));
cudaTest(cudaMemcpy(h_vec, d_vec, mem_size_vec, cudaMemcpyDeviceToHost));
//print(h_seg, num_of_segments + 1);
//print(h_vec, num_of_elements);
//print(h_seg, 10);
//print(h_vec, 1000);
if (ELAPSED_TIME == 1)
// std::cout << "It took me " << time_span.count() * 1000
// << " miliseconds.\n";
;
else
print(h_vec, num_of_elements);
// cudaDeviceReset causes the driver to clean up all state. While
// not mandatory in normal operation, it is good practice. It is also
// needed to ensure correct operation when the application is being
// profiled. Calling cudaDeviceReset causes all profile data to be
// flushed before the application exits
cudaDeviceReset();
free(h_seg);
free(h_vec);
cudaFree(d_seg);
cudaFree(d_vec);
return 0;
}
/**
* // cudaDeviceReset causes the driver to clean up all state. While
// not mandatory in normal operation, it is good practice. It is also
// needed to ensure correct operation when the application is being
// profiled. Calling cudaDeviceReset causes all profile data to be
// flushed before the application exits
cudaDeviceReset();
*/
/*
printf("thread=%d | aux=%d %d %d %d\n", bx * blockDim.x + tx, d_aux[begin], d_aux[begin + 1], d_aux[begin + 2], d_aux[begin + 3]);
int devID = 0;
cudaDeviceProp deviceProp;
cudaTest(cudaGetDeviceProperties(&deviceProp, devID));
unsigned int multiprocessorNumber = deviceProp.multiProcessorCount;
unsigned int sharedMemoryTotal = deviceProp.sharedMemPerBlock/(sizeof(int));
std::cout << "multiprocessorNumber: " << multiprocessorNumber << "\n";
std::cout << "sharedMemoryTotal: " << sharedMemoryTotal << "\n";
std::cout << "numberOfSegmentsPerBlock: " << sharedMemoryTotal << "\n";
*/ | code for sm_80
Function : _Z10radix_sortILi10EEvPiS0_i
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ IMAD.MOV.U32 R1, RZ, RZ, c[0x0][0x28] ; /* 0x00000a00ff017624 */
/* 0x000fe400078e00ff */
/*0010*/ S2R R2, SR_CTAID.X ; /* 0x0000000000027919 */
/* 0x000e220000002500 */
/*0020*/ IMAD.MOV.U32 R3, RZ, RZ, 0x4 ; /* 0x00000004ff037424 */
/* 0x000fe200078e00ff */
/*0030*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fc60000000a00 */
/*0040*/ IMAD.WIDE R2, R2, R3, c[0x0][0x168] ; /* 0x00005a0002027625 */
/* 0x001fca00078e0203 */
/*0050*/ LDG.E R0, [R2.64+0x4] ; /* 0x0000040402007981 */
/* 0x000ea8000c1e1900 */
/*0060*/ LDG.E R6, [R2.64] ; /* 0x0000000402067981 */
/* 0x000ea4000c1e1900 */
/*0070*/ IMAD.IADD R4, R0, 0x1, -R6 ; /* 0x0000000100047824 */
/* 0x004fca00078e0a06 */
/*0080*/ ISETP.GE.AND P0, PT, R4, 0x1, PT ; /* 0x000000010400780c */
/* 0x000fda0003f06270 */
/*0090*/ @!P0 EXIT ; /* 0x000000000000894d */
/* 0x000fea0003800000 */
/*00a0*/ S2R R5, SR_TID.X ; /* 0x0000000000057919 */
/* 0x000e220000002100 */
/*00b0*/ IMAD.MOV.U32 R3, RZ, RZ, RZ ; /* 0x000000ffff037224 */
/* 0x000fe400078e00ff */
/*00c0*/ IMAD.IADD R6, R6, 0x1, R5 ; /* 0x0000000106067824 */
/* 0x001fe400078e0205 */
/*00d0*/ IMAD.SHL.U32 R7, R5, 0x4, RZ ; /* 0x0000000405077824 */
/* 0x000fc600078e00ff */
/*00e0*/ IADD3 R11, R6, R3, RZ ; /* 0x00000003060b7210 */
/* 0x000fe20007ffe0ff */
/*00f0*/ IMAD.IADD R8, R4, 0x1, -R3 ; /* 0x0000000104087824 */
/* 0x000fe200078e0a03 */
/*0100*/ IADD3 R9, R3, 0x400, RZ ; /* 0x0000040003097810 */
/* 0x000fe40007ffe0ff */
/*0110*/ ISETP.GE.AND P1, PT, R11, R0, PT ; /* 0x000000000b00720c */
/* 0x000fe40003f26270 */
/*0120*/ ISETP.GT.AND P0, PT, R9, R4, PT ; /* 0x000000040900720c */
/* 0x000fc80003f04270 */
/*0130*/ SEL R8, R8, 0x400, P0 ; /* 0x0000040008087807 */
/* 0x000fce0000000000 */
/*0140*/ @P1 BRA 0x540 ; /* 0x000003f000001947 */
/* 0x000fea0003800000 */
/*0150*/ IMAD.MOV.U32 R2, RZ, RZ, 0x4 ; /* 0x00000004ff027424 */
/* 0x000fc800078e00ff */
/*0160*/ IMAD.WIDE R2, R11, R2, c[0x0][0x160] ; /* 0x000058000b027625 */
/* 0x000fca00078e0202 */
/*0170*/ LDG.E R14, [R2.64] ; /* 0x00000004020e7981 */
/* 0x000ea2000c1e1900 */
/*0180*/ WARPSYNC 0xffffffff ; /* 0xffffffff00007948 */
/* 0x000fe20003800000 */
/*0190*/ HFMA2.MMA R12, -RZ, RZ, 0, 0 ; /* 0x00000000ff0c7435 */
/* 0x000fe200000001ff */
/*01a0*/ IMAD.MOV.U32 R10, RZ, RZ, RZ ; /* 0x000000ffff0a7224 */
/* 0x000fe400078e00ff */
/*01b0*/ IMAD.SHL.U32 R11, R8, 0x4, RZ ; /* 0x00000004080b7824 */
/* 0x000fe200078e00ff */
/*01c0*/ STS [R5.X4], R14 ; /* 0x0000000e05007388 */
/* 0x0041e80000004800 */
/*01d0*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */
/* 0x000fec0000010000 */
/*01e0*/ LDS R13, [R5.X4] ; /* 0x00000000050d7984 */
/* 0x000e620000004800 */
/*01f0*/ ISETP.GE.AND P0, PT, R8, 0x2, PT ; /* 0x000000020800780c */
/* 0x000fe20003f06270 */
/*0200*/ WARPSYNC 0xffffffff ; /* 0xffffffff00007948 */
/* 0x000fe20003800000 */
/*0210*/ BSSY B0, 0x3d0 ; /* 0x000001b000007945 */
/* 0x000fe20003800000 */
/*0220*/ SHF.R.U32.HI R13, RZ, R10, R13 ; /* 0x0000000aff0d7219 */
/* 0x002fc8000001160d */
/*0230*/ LOP3.LUT R14, R13, 0x1, RZ, 0xc0, !PT ; /* 0x000000010d0e7812 */
/* 0x001fc800078ec0ff */
/*0240*/ LOP3.LUT R16, R14, 0x1, RZ, 0x3c, !PT ; /* 0x000000010e107812 */
/* 0x000fe200078e3cff */
/*0250*/ STS [R5.X4+0x2000], R14 ; /* 0x0020000e05007388 */
/* 0x0001e80000004800 */
/*0260*/ STS [R5.X4+0x3000], R16 ; /* 0x0030001005007388 */
/* 0x0001e80000004800 */
/*0270*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */
/* 0x000fec0000010000 */
/*0280*/ @!P0 BRA 0x3c0 ; /* 0x0000013000008947 */
/* 0x000fea0003800000 */
/*0290*/ IMAD.MOV.U32 R14, RZ, RZ, 0x1 ; /* 0x00000001ff0e7424 */
/* 0x001fc800078e00ff */
/*02a0*/ IMAD.IADD R13, R5, 0x1, R14 ; /* 0x00000001050d7824 */
/* 0x001fca00078e020e */
/*02b0*/ ISETP.GE.AND P0, PT, R13, R8, PT ; /* 0x000000080d00720c */
/* 0x000fda0003f06270 */
/*02c0*/ @P0 BRA 0x390 ; /* 0x000000c000000947 */
/* 0x000fea0003800000 */
/*02d0*/ IMAD R20, R14, 0x4, R7 ; /* 0x000000040e147824 */
/* 0x000fe200078e0207 */
/*02e0*/ LDS R13, [R5.X4+0x2000] ; /* 0x00200000050d7984 */
/* 0x000fe20000004800 */
/*02f0*/ WARPSYNC 0xffffffff ; /* 0xffffffff00007948 */
/* 0x000fe60003800000 */
/*0300*/ LDS R18, [R5.X4+0x3000] ; /* 0x0030000005127984 */
/* 0x000fe80000004800 */
/*0310*/ LDS R16, [R20+0x2000] ; /* 0x0020000014107984 */
/* 0x000e280000000800 */
/*0320*/ LDS R15, [R20+0x3000] ; /* 0x00300000140f7984 */
/* 0x000e620000000800 */
/*0330*/ IMAD.IADD R13, R13, 0x1, R16 ; /* 0x000000010d0d7824 */
/* 0x001fc600078e0210 */
/*0340*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */
/* 0x000fe20000010000 */
/*0350*/ IADD3 R15, R15, R18, RZ ; /* 0x000000120f0f7210 */
/* 0x002fca0007ffe0ff */
/*0360*/ STS [R20+0x2000], R13 ; /* 0x0020000d14007388 */
/* 0x0001e80000000800 */
/*0370*/ STS [R20+0x3000], R15 ; /* 0x0030000f14007388 */
/* 0x0001e80000000800 */
/*0380*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */
/* 0x000fec0000010000 */
/*0390*/ IMAD.SHL.U32 R14, R14, 0x2, RZ ; /* 0x000000020e0e7824 */
/* 0x000fca00078e00ff */
/*03a0*/ ISETP.GE.AND P0, PT, R14, R8, PT ; /* 0x000000080e00720c */
/* 0x000fda0003f06270 */
/*03b0*/ @!P0 BRA 0x2a0 ; /* 0xfffffee000008947 */
/* 0x000fea000383ffff */
/*03c0*/ BSYNC B0 ; /* 0x0000000000007941 */
/* 0x001fea0003800000 */
/*03d0*/ LDS R13, [R5.X4] ; /* 0x00000000050d7984 */
/* 0x000e220000004800 */
/*03e0*/ WARPSYNC 0xffffffff ; /* 0xffffffff00007948 */
/* 0x000fe20003800000 */
/*03f0*/ IADD3 R12, R12, 0x1, RZ ; /* 0x000000010c0c7810 */
/* 0x000fe40007ffe0ff */
/*0400*/ LDS R15, [R5.X4+0x2000] ; /* 0x00200000050f7984 */
/* 0x000fe40000004800 */
/*0410*/ ISETP.GE.U32.AND P0, PT, R12, 0xa, PT ; /* 0x0000000a0c00780c */
/* 0x000fe40003f06070 */
/*0420*/ LDS R16, [R11+0x2ffc] ; /* 0x002ffc000b107984 */
/* 0x000e680000000800 */
/*0430*/ LDS R17, [R5.X4+0x3000] ; /* 0x0030000005117984 */
/* 0x000ea20000004800 */
/*0440*/ SHF.R.U32.HI R14, RZ, R10, R13 ; /* 0x0000000aff0e7219 */
/* 0x001fc4000001160d */
/*0450*/ IADD3 R10, R10, 0x1, RZ ; /* 0x000000010a0a7810 */
/* 0x000fe40007ffe0ff */
/*0460*/ LOP3.LUT R14, R14, 0x1, RZ, 0xc0, !PT ; /* 0x000000010e0e7812 */
/* 0x000fe400078ec0ff */
/*0470*/ IADD3 R15, R15, -0x1, R16 ; /* 0xffffffff0f0f7810 */
/* 0x002fe40007ffe010 */
/*0480*/ LOP3.LUT R16, R14.reuse, 0x1, RZ, 0x3c, !PT ; /* 0x000000010e107812 */
/* 0x040fe400078e3cff */
/*0490*/ IADD3 R17, R17, -0x1, RZ ; /* 0xffffffff11117810 */
/* 0x004fe20007ffe0ff */
/*04a0*/ IMAD R15, R14, R15, RZ ; /* 0x0000000f0e0f7224 */
/* 0x000fc800078e02ff */
/*04b0*/ IMAD R16, R16, R17, R15 ; /* 0x0000001110107224 */
/* 0x000fca00078e020f */
/*04c0*/ STS [R16.X4+0x1000], R13 ; /* 0x0010000d10007388 */
/* 0x000fe80000004800 */
/*04d0*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */
/* 0x000fec0000010000 */
/*04e0*/ LDS R14, [R5.X4+0x1000] ; /* 0x00100000050e7984 */
/* 0x000e280000004800 */
/*04f0*/ STS [R5.X4], R14 ; /* 0x0000000e05007388 */
/* 0x0011e80000004800 */
/*0500*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */
/* 0x000fec0000010000 */
/*0510*/ @!P0 BRA 0x1e0 ; /* 0xfffffcc000008947 */
/* 0x001fea000383ffff */
/*0520*/ LDS R11, [R5.X4+0x1000] ; /* 0x00100000050b7984 */
/* 0x000e280000004800 */
/*0530*/ STG.E [R2.64], R11 ; /* 0x0000000b02007986 */
/* 0x0011e4000c101904 */
/*0540*/ ISETP.GT.AND P0, PT, R4, R9, PT ; /* 0x000000090400720c */
/* 0x000fe20003f04270 */
/*0550*/ IMAD.MOV.U32 R3, RZ, RZ, R9 ; /* 0x000000ffff037224 */
/* 0x001fd800078e0009 */
/*0560*/ @P0 BRA 0xe0 ; /* 0xfffffb7000000947 */
/* 0x000fea000383ffff */
/*0570*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0580*/ BRA 0x580; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0590*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*05a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*05b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*05c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*05d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*05e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*05f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0600*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0610*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0620*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0630*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0640*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0650*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0660*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0670*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | /**
* Copyright 1993-2012 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*/
#include <stdio.h>
#include <stdlib.h>
#include <algorithm>
#include <utility>
#include <iostream>
#include <bitset>
#include <math.h>
#include <time.h>
#include <chrono>
#include <cuda.h>
/**
* This macro checks return value of the CUDA runtime call and exits
* the application if the call failed.
*
* See cuda.h for error code descriptions.
*/
#ifndef BLOCK_SIZE
#define BLOCK_SIZE 1024
#endif
#ifndef ELAPSED_TIME
#define ELAPSED_TIME 0
#endif
#ifndef EXP_BITS_SIZE
#define EXP_BITS_SIZE 10
#endif
void print(int* h_data, int n) {
std::cout << "\n";
for (int i = 0; i < n; i++) {
std::cout << h_data[i] << " ";
}
std::cout << "\n";
}
void cudaTest(cudaError_t error) {
if (error != cudaSuccess) {
printf("cuda returned error %s (code %d), line(%d)\n",
cudaGetErrorString(error), error, __LINE__);
exit(EXIT_FAILURE);
}
}
/*
* 256 threads per block
* 4 elements per thread
* = 1024 elements per block
* = n/1024 blocks
*/
template<int BITS_NUMBER = 64>
__global__ void radix_sort(int *d_vec, int *d_seg, int num_segments) {
int bx = blockIdx.x;
int tx = threadIdx.x;
int begin = d_seg[bx];
int end = d_seg[bx + 1];
int size = end - begin;
__shared__ int s_vec[BLOCK_SIZE];
__shared__ int s_aux[BLOCK_SIZE];
__shared__ int s_pref_sum_one[BLOCK_SIZE];
__shared__ int s_pref_sum_zero[BLOCK_SIZE];
for (int k = 0; k < size; k += BLOCK_SIZE) {
int threadIndexGlobal = begin + k + tx;
int block = BLOCK_SIZE;
if(BLOCK_SIZE+k > size)
block = size-k;
if (threadIndexGlobal < end) {
s_vec[tx] = d_vec[threadIndexGlobal];
__syncthreads();
int i, j;
int exp = 0;
for (j = 0; j < BITS_NUMBER; j++) {
int x = (s_vec[tx] >> exp) & 1;
s_pref_sum_one[tx] = x;
s_pref_sum_zero[tx] = 1-x;
__syncthreads();
for (i = 1; i < block; i*=2) {
int index = tx + i;
if (index < block) {
int one = s_pref_sum_one[tx] + s_pref_sum_one[index];
int zero = s_pref_sum_zero[tx] + s_pref_sum_zero[index];
__syncthreads();
s_pref_sum_one[index] = one;
s_pref_sum_zero[index] = zero;
__syncthreads();
}
}
x = (s_vec[tx] >> exp) & 1;
int index = (x) * (s_pref_sum_one[tx] + s_pref_sum_zero[block-1] - 1)
+ (1 - x) * (s_pref_sum_zero[tx] - 1);
s_aux[index] = s_vec[tx];
__syncthreads();
s_vec[tx] = s_aux[tx];
__syncthreads();
exp++;
}
d_vec[threadIndexGlobal] = s_aux[tx];
}
}
}
int main(int argc, char **argv) {
int num_of_segments;
int num_of_elements;
int i;
scanf("%d", &num_of_segments);
int mem_size_seg = sizeof(int) * (num_of_segments + 1);
int *h_seg = (int *) malloc(mem_size_seg);
for (i = 0; i < num_of_segments + 1; i++)
scanf("%d", &h_seg[i]);
scanf("%d", &num_of_elements);
int mem_size_vec = sizeof(int) * num_of_elements;
int *h_vec = (int *) malloc(mem_size_vec);
for (i = 0; i < num_of_elements; i++)
scanf("%d", &h_vec[i]);
// print(h_seg, num_of_segments + 1); print(h_vec, num_of_elements);
// Allocate device memory
int *d_seg, *d_vec;
cudaTest(cudaMalloc((void **) &d_seg, mem_size_seg));
cudaTest(cudaMalloc((void **) &d_vec, mem_size_vec));
//cudaTest(cudaMalloc((void **) &d_aux, mem_size_vec));
// copy host memory to device
cudaTest(cudaMemcpy(d_seg, h_seg, mem_size_seg, cudaMemcpyHostToDevice));
cudaTest(cudaMemcpy(d_vec, h_vec, mem_size_vec, cudaMemcpyHostToDevice));
// Setup execution parameters
// int devID = 0;
// cudaDeviceProp deviceProp;
// cudaTest(cudaGetDeviceProperties(&deviceProp, devID));
// unsigned int multiprocessor_number = deviceProp.multiProcessorCount;
// //unsigned int grid_blocks_max_x = deviceProp.maxGridSize[0];
// //unsigned int sharedMemoryTotal = deviceProp.sharedMemPerBlock/(sizeof(int));
//
int blocksize = BLOCK_SIZE; //num_of_elements / num_of_segments;
//if (blocksize > 1024)
// blocksize = 1024;
dim3 threads(blocksize, 1);
//dim3 grid(num_of_segments / blocksize + 1, 1);
dim3 grid(num_of_segments, 1);
std::chrono::high_resolution_clock::time_point start =
std::chrono::high_resolution_clock::now();
radix_sort<EXP_BITS_SIZE> <<<grid, threads>>>(d_vec, d_seg,
num_of_segments);
cudaDeviceSynchronize();
std::chrono::high_resolution_clock::time_point stop =
std::chrono::high_resolution_clock::now();
cudaTest(cudaPeekAtLastError());
std::chrono::duration<double> time_span = std::chrono::duration_cast<
std::chrono::duration<double>>(stop - start);
cudaTest(cudaMemcpy(h_seg, d_seg, mem_size_seg, cudaMemcpyDeviceToHost));
cudaTest(cudaMemcpy(h_vec, d_vec, mem_size_vec, cudaMemcpyDeviceToHost));
//print(h_seg, num_of_segments + 1);
//print(h_vec, num_of_elements);
//print(h_seg, 10);
//print(h_vec, 1000);
if (ELAPSED_TIME == 1)
// std::cout << "It took me " << time_span.count() * 1000
// << " miliseconds.\n";
;
else
print(h_vec, num_of_elements);
// cudaDeviceReset causes the driver to clean up all state. While
// not mandatory in normal operation, it is good practice. It is also
// needed to ensure correct operation when the application is being
// profiled. Calling cudaDeviceReset causes all profile data to be
// flushed before the application exits
cudaDeviceReset();
free(h_seg);
free(h_vec);
cudaFree(d_seg);
cudaFree(d_vec);
return 0;
}
/**
* // cudaDeviceReset causes the driver to clean up all state. While
// not mandatory in normal operation, it is good practice. It is also
// needed to ensure correct operation when the application is being
// profiled. Calling cudaDeviceReset causes all profile data to be
// flushed before the application exits
cudaDeviceReset();
*/
/*
printf("thread=%d | aux=%d %d %d %d\n", bx * blockDim.x + tx, d_aux[begin], d_aux[begin + 1], d_aux[begin + 2], d_aux[begin + 3]);
int devID = 0;
cudaDeviceProp deviceProp;
cudaTest(cudaGetDeviceProperties(&deviceProp, devID));
unsigned int multiprocessorNumber = deviceProp.multiProcessorCount;
unsigned int sharedMemoryTotal = deviceProp.sharedMemPerBlock/(sizeof(int));
std::cout << "multiprocessorNumber: " << multiprocessorNumber << "\n";
std::cout << "sharedMemoryTotal: " << sharedMemoryTotal << "\n";
std::cout << "numberOfSegmentsPerBlock: " << sharedMemoryTotal << "\n";
*/ | .file "tmpxft_000611a1_00000000-6_sharev1.cudafe1.cpp"
.text
#APP
.globl _ZSt21ios_base_library_initv
#NO_APP
.section .text._Z10radix_sortILi10EEvPiS0_i,"axG",@progbits,_Z10radix_sortILi10EEvPiS0_i,comdat
.weak _Z10radix_sortILi10EEvPiS0_i
.type _Z10radix_sortILi10EEvPiS0_i, @function
_Z10radix_sortILi10EEvPiS0_i:
.LFB4545:
.cfi_startproc
endbr64
subq $136, %rsp
.cfi_def_cfa_offset 144
movq %fs:40, %rax
movq %rax, 120(%rsp)
xorl %eax, %eax
movq %rdi, 16(%rsp)
movq %rsi, 24(%rsp)
movl %edx, 12(%rsp)
leaq 16(%rsp), %rax
movq %rax, 96(%rsp)
leaq 24(%rsp), %rax
movq %rax, 104(%rsp)
leaq 12(%rsp), %rax
movq %rax, 112(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L5
.L1:
movq 120(%rsp), %rax
subq %fs:40, %rax
jne .L6
addq $136, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L5:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 152
pushq 40(%rsp)
.cfi_def_cfa_offset 160
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z10radix_sortILi10EEvPiS0_i(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 144
jmp .L1
.L6:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE4545:
.size _Z10radix_sortILi10EEvPiS0_i, .-_Z10radix_sortILi10EEvPiS0_i
.text
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB4192:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE4192:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "\n"
.LC1:
.string " "
.text
.globl _Z5printPii
.type _Z5printPii, @function
_Z5printPii:
.LFB4183:
.cfi_startproc
endbr64
pushq %r13
.cfi_def_cfa_offset 16
.cfi_offset 13, -16
pushq %r12
.cfi_def_cfa_offset 24
.cfi_offset 12, -24
pushq %rbp
.cfi_def_cfa_offset 32
.cfi_offset 6, -32
pushq %rbx
.cfi_def_cfa_offset 40
.cfi_offset 3, -40
subq $8, %rsp
.cfi_def_cfa_offset 48
movq %rdi, %r12
movl %esi, %ebp
movl $1, %edx
leaq .LC0(%rip), %rsi
leaq _ZSt4cout(%rip), %rdi
call _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l@PLT
testl %ebp, %ebp
jle .L10
movq %r12, %rbx
movslq %ebp, %rbp
leaq (%r12,%rbp,4), %r13
leaq _ZSt4cout(%rip), %r12
leaq .LC1(%rip), %rbp
.L11:
movl (%rbx), %esi
movq %r12, %rdi
call _ZNSolsEi@PLT
movq %rax, %rdi
movl $1, %edx
movq %rbp, %rsi
call _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l@PLT
addq $4, %rbx
cmpq %r13, %rbx
jne .L11
.L10:
movl $1, %edx
leaq .LC0(%rip), %rsi
leaq _ZSt4cout(%rip), %rdi
call _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l@PLT
addq $8, %rsp
.cfi_def_cfa_offset 40
popq %rbx
.cfi_def_cfa_offset 32
popq %rbp
.cfi_def_cfa_offset 24
popq %r12
.cfi_def_cfa_offset 16
popq %r13
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE4183:
.size _Z5printPii, .-_Z5printPii
.section .rodata.str1.8,"aMS",@progbits,1
.align 8
.LC2:
.string "cuda returned error %s (code %d), line(%d)\n"
.text
.globl _Z8cudaTest9cudaError
.type _Z8cudaTest9cudaError, @function
_Z8cudaTest9cudaError:
.LFB4184:
.cfi_startproc
endbr64
testl %edi, %edi
jne .L19
ret
.L19:
pushq %rbx
.cfi_def_cfa_offset 16
.cfi_offset 3, -16
movl %edi, %ebx
call cudaGetErrorString@PLT
movq %rax, %rdx
movl $51, %r8d
movl %ebx, %ecx
leaq .LC2(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $1, %edi
call exit@PLT
.cfi_endproc
.LFE4184:
.size _Z8cudaTest9cudaError, .-_Z8cudaTest9cudaError
.section .rodata.str1.1
.LC3:
.string "%d"
.text
.globl main
.type main, @function
main:
.LFB4187:
.cfi_startproc
endbr64
pushq %r15
.cfi_def_cfa_offset 16
.cfi_offset 15, -16
pushq %r14
.cfi_def_cfa_offset 24
.cfi_offset 14, -24
pushq %r13
.cfi_def_cfa_offset 32
.cfi_offset 13, -32
pushq %r12
.cfi_def_cfa_offset 40
.cfi_offset 12, -40
pushq %rbp
.cfi_def_cfa_offset 48
.cfi_offset 6, -48
pushq %rbx
.cfi_def_cfa_offset 56
.cfi_offset 3, -56
subq $88, %rsp
.cfi_def_cfa_offset 144
movq %fs:40, %rax
movq %rax, 72(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rsi
leaq .LC3(%rip), %rdi
call __isoc23_scanf@PLT
movl 24(%rsp), %ebx
leal 4(,%rbx,4), %r15d
movslq %r15d, %r15
movq %r15, %rdi
call malloc@PLT
movq %rax, 8(%rsp)
testl %ebx, %ebx
js .L21
movq %rax, %rbp
movl $0, %ebx
leaq .LC3(%rip), %r12
.L22:
movq %rbp, %rsi
movq %r12, %rdi
movl $0, %eax
call __isoc23_scanf@PLT
addl $1, %ebx
addq $4, %rbp
cmpl %ebx, 24(%rsp)
jge .L22
.L21:
leaq 28(%rsp), %rsi
leaq .LC3(%rip), %rdi
movl $0, %eax
call __isoc23_scanf@PLT
movl 28(%rsp), %ebx
leal 0(,%rbx,4), %r14d
movslq %r14d, %r14
movq %r14, %rdi
call malloc@PLT
movq %rax, %r13
testl %ebx, %ebx
jle .L23
movq %rax, %rbp
movl $0, %ebx
leaq .LC3(%rip), %r12
.L24:
movq %rbp, %rsi
movq %r12, %rdi
movl $0, %eax
call __isoc23_scanf@PLT
addl $1, %ebx
addq $4, %rbp
cmpl %ebx, 28(%rsp)
jg .L24
.L23:
leaq 32(%rsp), %rdi
movq %r15, %rsi
call cudaMalloc@PLT
movl %eax, %edi
call _Z8cudaTest9cudaError
leaq 40(%rsp), %rdi
movq %r14, %rsi
call cudaMalloc@PLT
movl %eax, %edi
call _Z8cudaTest9cudaError
movl $1, %ecx
movq %r15, %rdx
movq 8(%rsp), %rsi
movq 32(%rsp), %rdi
call cudaMemcpy@PLT
movl %eax, %edi
call _Z8cudaTest9cudaError
movl $1, %ecx
movq %r14, %rdx
movq %r13, %rsi
movq 40(%rsp), %rdi
call cudaMemcpy@PLT
movl %eax, %edi
call _Z8cudaTest9cudaError
movl $1024, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl 24(%rsp), %eax
movl %eax, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
call _ZNSt6chrono3_V212system_clock3nowEv@PLT
movl 56(%rsp), %ecx
movl $0, %r9d
movl $0, %r8d
movq 48(%rsp), %rdx
movq 60(%rsp), %rdi
movl 68(%rsp), %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L30
.L25:
call cudaDeviceSynchronize@PLT
call _ZNSt6chrono3_V212system_clock3nowEv@PLT
call cudaPeekAtLastError@PLT
movl %eax, %edi
call _Z8cudaTest9cudaError
movl $2, %ecx
movq %r15, %rdx
movq 32(%rsp), %rsi
movq 8(%rsp), %rbx
movq %rbx, %rdi
call cudaMemcpy@PLT
movl %eax, %edi
call _Z8cudaTest9cudaError
movl $2, %ecx
movq %r14, %rdx
movq 40(%rsp), %rsi
movq %r13, %rdi
call cudaMemcpy@PLT
movl %eax, %edi
call _Z8cudaTest9cudaError
movl 28(%rsp), %esi
movq %r13, %rdi
call _Z5printPii
call cudaDeviceReset@PLT
movq %rbx, %rdi
call free@PLT
movq %r13, %rdi
call free@PLT
movq 32(%rsp), %rdi
call cudaFree@PLT
movq 40(%rsp), %rdi
call cudaFree@PLT
movq 72(%rsp), %rax
subq %fs:40, %rax
jne .L31
movl $0, %eax
addq $88, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %rbp
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r13
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
ret
.L30:
.cfi_restore_state
movl 24(%rsp), %edx
movq 32(%rsp), %rsi
movq 40(%rsp), %rdi
call _Z10radix_sortILi10EEvPiS0_i
jmp .L25
.L31:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE4187:
.size main, .-main
.section .rodata.str1.1
.LC4:
.string "_Z10radix_sortILi10EEvPiS0_i"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB4217:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC4(%rip), %rdx
movq %rdx, %rcx
leaq _Z10radix_sortILi10EEvPiS0_i(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE4217:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | /**
* Copyright 1993-2012 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*/
#include <stdio.h>
#include <stdlib.h>
#include <algorithm>
#include <utility>
#include <iostream>
#include <bitset>
#include <math.h>
#include <time.h>
#include <chrono>
#include <cuda.h>
/**
* This macro checks return value of the CUDA runtime call and exits
* the application if the call failed.
*
* See cuda.h for error code descriptions.
*/
#ifndef BLOCK_SIZE
#define BLOCK_SIZE 1024
#endif
#ifndef ELAPSED_TIME
#define ELAPSED_TIME 0
#endif
#ifndef EXP_BITS_SIZE
#define EXP_BITS_SIZE 10
#endif
void print(int* h_data, int n) {
std::cout << "\n";
for (int i = 0; i < n; i++) {
std::cout << h_data[i] << " ";
}
std::cout << "\n";
}
void cudaTest(cudaError_t error) {
if (error != cudaSuccess) {
printf("cuda returned error %s (code %d), line(%d)\n",
cudaGetErrorString(error), error, __LINE__);
exit(EXIT_FAILURE);
}
}
/*
* 256 threads per block
* 4 elements per thread
* = 1024 elements per block
* = n/1024 blocks
*/
template<int BITS_NUMBER = 64>
__global__ void radix_sort(int *d_vec, int *d_seg, int num_segments) {
int bx = blockIdx.x;
int tx = threadIdx.x;
int begin = d_seg[bx];
int end = d_seg[bx + 1];
int size = end - begin;
__shared__ int s_vec[BLOCK_SIZE];
__shared__ int s_aux[BLOCK_SIZE];
__shared__ int s_pref_sum_one[BLOCK_SIZE];
__shared__ int s_pref_sum_zero[BLOCK_SIZE];
for (int k = 0; k < size; k += BLOCK_SIZE) {
int threadIndexGlobal = begin + k + tx;
int block = BLOCK_SIZE;
if(BLOCK_SIZE+k > size)
block = size-k;
if (threadIndexGlobal < end) {
s_vec[tx] = d_vec[threadIndexGlobal];
__syncthreads();
int i, j;
int exp = 0;
for (j = 0; j < BITS_NUMBER; j++) {
int x = (s_vec[tx] >> exp) & 1;
s_pref_sum_one[tx] = x;
s_pref_sum_zero[tx] = 1-x;
__syncthreads();
for (i = 1; i < block; i*=2) {
int index = tx + i;
if (index < block) {
int one = s_pref_sum_one[tx] + s_pref_sum_one[index];
int zero = s_pref_sum_zero[tx] + s_pref_sum_zero[index];
__syncthreads();
s_pref_sum_one[index] = one;
s_pref_sum_zero[index] = zero;
__syncthreads();
}
}
x = (s_vec[tx] >> exp) & 1;
int index = (x) * (s_pref_sum_one[tx] + s_pref_sum_zero[block-1] - 1)
+ (1 - x) * (s_pref_sum_zero[tx] - 1);
s_aux[index] = s_vec[tx];
__syncthreads();
s_vec[tx] = s_aux[tx];
__syncthreads();
exp++;
}
d_vec[threadIndexGlobal] = s_aux[tx];
}
}
}
int main(int argc, char **argv) {
int num_of_segments;
int num_of_elements;
int i;
scanf("%d", &num_of_segments);
int mem_size_seg = sizeof(int) * (num_of_segments + 1);
int *h_seg = (int *) malloc(mem_size_seg);
for (i = 0; i < num_of_segments + 1; i++)
scanf("%d", &h_seg[i]);
scanf("%d", &num_of_elements);
int mem_size_vec = sizeof(int) * num_of_elements;
int *h_vec = (int *) malloc(mem_size_vec);
for (i = 0; i < num_of_elements; i++)
scanf("%d", &h_vec[i]);
// print(h_seg, num_of_segments + 1); print(h_vec, num_of_elements);
// Allocate device memory
int *d_seg, *d_vec;
cudaTest(cudaMalloc((void **) &d_seg, mem_size_seg));
cudaTest(cudaMalloc((void **) &d_vec, mem_size_vec));
//cudaTest(cudaMalloc((void **) &d_aux, mem_size_vec));
// copy host memory to device
cudaTest(cudaMemcpy(d_seg, h_seg, mem_size_seg, cudaMemcpyHostToDevice));
cudaTest(cudaMemcpy(d_vec, h_vec, mem_size_vec, cudaMemcpyHostToDevice));
// Setup execution parameters
// int devID = 0;
// cudaDeviceProp deviceProp;
// cudaTest(cudaGetDeviceProperties(&deviceProp, devID));
// unsigned int multiprocessor_number = deviceProp.multiProcessorCount;
// //unsigned int grid_blocks_max_x = deviceProp.maxGridSize[0];
// //unsigned int sharedMemoryTotal = deviceProp.sharedMemPerBlock/(sizeof(int));
//
int blocksize = BLOCK_SIZE; //num_of_elements / num_of_segments;
//if (blocksize > 1024)
// blocksize = 1024;
dim3 threads(blocksize, 1);
//dim3 grid(num_of_segments / blocksize + 1, 1);
dim3 grid(num_of_segments, 1);
std::chrono::high_resolution_clock::time_point start =
std::chrono::high_resolution_clock::now();
radix_sort<EXP_BITS_SIZE> <<<grid, threads>>>(d_vec, d_seg,
num_of_segments);
cudaDeviceSynchronize();
std::chrono::high_resolution_clock::time_point stop =
std::chrono::high_resolution_clock::now();
cudaTest(cudaPeekAtLastError());
std::chrono::duration<double> time_span = std::chrono::duration_cast<
std::chrono::duration<double>>(stop - start);
cudaTest(cudaMemcpy(h_seg, d_seg, mem_size_seg, cudaMemcpyDeviceToHost));
cudaTest(cudaMemcpy(h_vec, d_vec, mem_size_vec, cudaMemcpyDeviceToHost));
//print(h_seg, num_of_segments + 1);
//print(h_vec, num_of_elements);
//print(h_seg, 10);
//print(h_vec, 1000);
if (ELAPSED_TIME == 1)
// std::cout << "It took me " << time_span.count() * 1000
// << " miliseconds.\n";
;
else
print(h_vec, num_of_elements);
// cudaDeviceReset causes the driver to clean up all state. While
// not mandatory in normal operation, it is good practice. It is also
// needed to ensure correct operation when the application is being
// profiled. Calling cudaDeviceReset causes all profile data to be
// flushed before the application exits
cudaDeviceReset();
free(h_seg);
free(h_vec);
cudaFree(d_seg);
cudaFree(d_vec);
return 0;
}
/**
* // cudaDeviceReset causes the driver to clean up all state. While
// not mandatory in normal operation, it is good practice. It is also
// needed to ensure correct operation when the application is being
// profiled. Calling cudaDeviceReset causes all profile data to be
// flushed before the application exits
cudaDeviceReset();
*/
/*
printf("thread=%d | aux=%d %d %d %d\n", bx * blockDim.x + tx, d_aux[begin], d_aux[begin + 1], d_aux[begin + 2], d_aux[begin + 3]);
int devID = 0;
cudaDeviceProp deviceProp;
cudaTest(cudaGetDeviceProperties(&deviceProp, devID));
unsigned int multiprocessorNumber = deviceProp.multiProcessorCount;
unsigned int sharedMemoryTotal = deviceProp.sharedMemPerBlock/(sizeof(int));
std::cout << "multiprocessorNumber: " << multiprocessorNumber << "\n";
std::cout << "sharedMemoryTotal: " << sharedMemoryTotal << "\n";
std::cout << "numberOfSegmentsPerBlock: " << sharedMemoryTotal << "\n";
*/ | /**
* Copyright 1993-2012 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*/
#include <stdio.h>
#include <stdlib.h>
#include <algorithm>
#include <utility>
#include <iostream>
#include <bitset>
#include <math.h>
#include <time.h>
#include <chrono>
#include <hip/hip_runtime.h>
/**
* This macro checks return value of the CUDA runtime call and exits
* the application if the call failed.
*
* See cuda.h for error code descriptions.
*/
#ifndef BLOCK_SIZE
#define BLOCK_SIZE 1024
#endif
#ifndef ELAPSED_TIME
#define ELAPSED_TIME 0
#endif
#ifndef EXP_BITS_SIZE
#define EXP_BITS_SIZE 10
#endif
void print(int* h_data, int n) {
std::cout << "\n";
for (int i = 0; i < n; i++) {
std::cout << h_data[i] << " ";
}
std::cout << "\n";
}
void cudaTest(hipError_t error) {
if (error != hipSuccess) {
printf("cuda returned error %s (code %d), line(%d)\n",
hipGetErrorString(error), error, __LINE__);
exit(EXIT_FAILURE);
}
}
/*
* 256 threads per block
* 4 elements per thread
* = 1024 elements per block
* = n/1024 blocks
*/
template<int BITS_NUMBER = 64>
__global__ void radix_sort(int *d_vec, int *d_seg, int num_segments) {
int bx = blockIdx.x;
int tx = threadIdx.x;
int begin = d_seg[bx];
int end = d_seg[bx + 1];
int size = end - begin;
__shared__ int s_vec[BLOCK_SIZE];
__shared__ int s_aux[BLOCK_SIZE];
__shared__ int s_pref_sum_one[BLOCK_SIZE];
__shared__ int s_pref_sum_zero[BLOCK_SIZE];
for (int k = 0; k < size; k += BLOCK_SIZE) {
int threadIndexGlobal = begin + k + tx;
int block = BLOCK_SIZE;
if(BLOCK_SIZE+k > size)
block = size-k;
if (threadIndexGlobal < end) {
s_vec[tx] = d_vec[threadIndexGlobal];
__syncthreads();
int i, j;
int exp = 0;
for (j = 0; j < BITS_NUMBER; j++) {
int x = (s_vec[tx] >> exp) & 1;
s_pref_sum_one[tx] = x;
s_pref_sum_zero[tx] = 1-x;
__syncthreads();
for (i = 1; i < block; i*=2) {
int index = tx + i;
if (index < block) {
int one = s_pref_sum_one[tx] + s_pref_sum_one[index];
int zero = s_pref_sum_zero[tx] + s_pref_sum_zero[index];
__syncthreads();
s_pref_sum_one[index] = one;
s_pref_sum_zero[index] = zero;
__syncthreads();
}
}
x = (s_vec[tx] >> exp) & 1;
int index = (x) * (s_pref_sum_one[tx] + s_pref_sum_zero[block-1] - 1)
+ (1 - x) * (s_pref_sum_zero[tx] - 1);
s_aux[index] = s_vec[tx];
__syncthreads();
s_vec[tx] = s_aux[tx];
__syncthreads();
exp++;
}
d_vec[threadIndexGlobal] = s_aux[tx];
}
}
}
int main(int argc, char **argv) {
int num_of_segments;
int num_of_elements;
int i;
scanf("%d", &num_of_segments);
int mem_size_seg = sizeof(int) * (num_of_segments + 1);
int *h_seg = (int *) malloc(mem_size_seg);
for (i = 0; i < num_of_segments + 1; i++)
scanf("%d", &h_seg[i]);
scanf("%d", &num_of_elements);
int mem_size_vec = sizeof(int) * num_of_elements;
int *h_vec = (int *) malloc(mem_size_vec);
for (i = 0; i < num_of_elements; i++)
scanf("%d", &h_vec[i]);
// print(h_seg, num_of_segments + 1); print(h_vec, num_of_elements);
// Allocate device memory
int *d_seg, *d_vec;
cudaTest(hipMalloc((void **) &d_seg, mem_size_seg));
cudaTest(hipMalloc((void **) &d_vec, mem_size_vec));
//cudaTest(cudaMalloc((void **) &d_aux, mem_size_vec));
// copy host memory to device
cudaTest(hipMemcpy(d_seg, h_seg, mem_size_seg, hipMemcpyHostToDevice));
cudaTest(hipMemcpy(d_vec, h_vec, mem_size_vec, hipMemcpyHostToDevice));
// Setup execution parameters
// int devID = 0;
// cudaDeviceProp deviceProp;
// cudaTest(cudaGetDeviceProperties(&deviceProp, devID));
// unsigned int multiprocessor_number = deviceProp.multiProcessorCount;
// //unsigned int grid_blocks_max_x = deviceProp.maxGridSize[0];
// //unsigned int sharedMemoryTotal = deviceProp.sharedMemPerBlock/(sizeof(int));
//
int blocksize = BLOCK_SIZE; //num_of_elements / num_of_segments;
//if (blocksize > 1024)
// blocksize = 1024;
dim3 threads(blocksize, 1);
//dim3 grid(num_of_segments / blocksize + 1, 1);
dim3 grid(num_of_segments, 1);
std::chrono::high_resolution_clock::time_point start =
std::chrono::high_resolution_clock::now();
radix_sort<EXP_BITS_SIZE> <<<grid, threads>>>(d_vec, d_seg,
num_of_segments);
hipDeviceSynchronize();
std::chrono::high_resolution_clock::time_point stop =
std::chrono::high_resolution_clock::now();
cudaTest(hipPeekAtLastError());
std::chrono::duration<double> time_span = std::chrono::duration_cast<
std::chrono::duration<double>>(stop - start);
cudaTest(hipMemcpy(h_seg, d_seg, mem_size_seg, hipMemcpyDeviceToHost));
cudaTest(hipMemcpy(h_vec, d_vec, mem_size_vec, hipMemcpyDeviceToHost));
//print(h_seg, num_of_segments + 1);
//print(h_vec, num_of_elements);
//print(h_seg, 10);
//print(h_vec, 1000);
if (ELAPSED_TIME == 1)
// std::cout << "It took me " << time_span.count() * 1000
// << " miliseconds.\n";
;
else
print(h_vec, num_of_elements);
// cudaDeviceReset causes the driver to clean up all state. While
// not mandatory in normal operation, it is good practice. It is also
// needed to ensure correct operation when the application is being
// profiled. Calling cudaDeviceReset causes all profile data to be
// flushed before the application exits
hipDeviceReset();
free(h_seg);
free(h_vec);
hipFree(d_seg);
hipFree(d_vec);
return 0;
}
/**
* // cudaDeviceReset causes the driver to clean up all state. While
// not mandatory in normal operation, it is good practice. It is also
// needed to ensure correct operation when the application is being
// profiled. Calling cudaDeviceReset causes all profile data to be
// flushed before the application exits
cudaDeviceReset();
*/
/*
printf("thread=%d | aux=%d %d %d %d\n", bx * blockDim.x + tx, d_aux[begin], d_aux[begin + 1], d_aux[begin + 2], d_aux[begin + 3]);
int devID = 0;
cudaDeviceProp deviceProp;
cudaTest(cudaGetDeviceProperties(&deviceProp, devID));
unsigned int multiprocessorNumber = deviceProp.multiProcessorCount;
unsigned int sharedMemoryTotal = deviceProp.sharedMemPerBlock/(sizeof(int));
std::cout << "multiprocessorNumber: " << multiprocessorNumber << "\n";
std::cout << "sharedMemoryTotal: " << sharedMemoryTotal << "\n";
std::cout << "numberOfSegmentsPerBlock: " << sharedMemoryTotal << "\n";
*/ |
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | /**
* Copyright 1993-2012 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*/
#include <stdio.h>
#include <stdlib.h>
#include <algorithm>
#include <utility>
#include <iostream>
#include <bitset>
#include <math.h>
#include <time.h>
#include <chrono>
#include <hip/hip_runtime.h>
/**
* This macro checks return value of the CUDA runtime call and exits
* the application if the call failed.
*
* See cuda.h for error code descriptions.
*/
#ifndef BLOCK_SIZE
#define BLOCK_SIZE 1024
#endif
#ifndef ELAPSED_TIME
#define ELAPSED_TIME 0
#endif
#ifndef EXP_BITS_SIZE
#define EXP_BITS_SIZE 10
#endif
void print(int* h_data, int n) {
std::cout << "\n";
for (int i = 0; i < n; i++) {
std::cout << h_data[i] << " ";
}
std::cout << "\n";
}
void cudaTest(hipError_t error) {
if (error != hipSuccess) {
printf("cuda returned error %s (code %d), line(%d)\n",
hipGetErrorString(error), error, __LINE__);
exit(EXIT_FAILURE);
}
}
/*
* 256 threads per block
* 4 elements per thread
* = 1024 elements per block
* = n/1024 blocks
*/
template<int BITS_NUMBER = 64>
__global__ void radix_sort(int *d_vec, int *d_seg, int num_segments) {
int bx = blockIdx.x;
int tx = threadIdx.x;
int begin = d_seg[bx];
int end = d_seg[bx + 1];
int size = end - begin;
__shared__ int s_vec[BLOCK_SIZE];
__shared__ int s_aux[BLOCK_SIZE];
__shared__ int s_pref_sum_one[BLOCK_SIZE];
__shared__ int s_pref_sum_zero[BLOCK_SIZE];
for (int k = 0; k < size; k += BLOCK_SIZE) {
int threadIndexGlobal = begin + k + tx;
int block = BLOCK_SIZE;
if(BLOCK_SIZE+k > size)
block = size-k;
if (threadIndexGlobal < end) {
s_vec[tx] = d_vec[threadIndexGlobal];
__syncthreads();
int i, j;
int exp = 0;
for (j = 0; j < BITS_NUMBER; j++) {
int x = (s_vec[tx] >> exp) & 1;
s_pref_sum_one[tx] = x;
s_pref_sum_zero[tx] = 1-x;
__syncthreads();
for (i = 1; i < block; i*=2) {
int index = tx + i;
if (index < block) {
int one = s_pref_sum_one[tx] + s_pref_sum_one[index];
int zero = s_pref_sum_zero[tx] + s_pref_sum_zero[index];
__syncthreads();
s_pref_sum_one[index] = one;
s_pref_sum_zero[index] = zero;
__syncthreads();
}
}
x = (s_vec[tx] >> exp) & 1;
int index = (x) * (s_pref_sum_one[tx] + s_pref_sum_zero[block-1] - 1)
+ (1 - x) * (s_pref_sum_zero[tx] - 1);
s_aux[index] = s_vec[tx];
__syncthreads();
s_vec[tx] = s_aux[tx];
__syncthreads();
exp++;
}
d_vec[threadIndexGlobal] = s_aux[tx];
}
}
}
int main(int argc, char **argv) {
int num_of_segments;
int num_of_elements;
int i;
scanf("%d", &num_of_segments);
int mem_size_seg = sizeof(int) * (num_of_segments + 1);
int *h_seg = (int *) malloc(mem_size_seg);
for (i = 0; i < num_of_segments + 1; i++)
scanf("%d", &h_seg[i]);
scanf("%d", &num_of_elements);
int mem_size_vec = sizeof(int) * num_of_elements;
int *h_vec = (int *) malloc(mem_size_vec);
for (i = 0; i < num_of_elements; i++)
scanf("%d", &h_vec[i]);
// print(h_seg, num_of_segments + 1); print(h_vec, num_of_elements);
// Allocate device memory
int *d_seg, *d_vec;
cudaTest(hipMalloc((void **) &d_seg, mem_size_seg));
cudaTest(hipMalloc((void **) &d_vec, mem_size_vec));
//cudaTest(cudaMalloc((void **) &d_aux, mem_size_vec));
// copy host memory to device
cudaTest(hipMemcpy(d_seg, h_seg, mem_size_seg, hipMemcpyHostToDevice));
cudaTest(hipMemcpy(d_vec, h_vec, mem_size_vec, hipMemcpyHostToDevice));
// Setup execution parameters
// int devID = 0;
// cudaDeviceProp deviceProp;
// cudaTest(cudaGetDeviceProperties(&deviceProp, devID));
// unsigned int multiprocessor_number = deviceProp.multiProcessorCount;
// //unsigned int grid_blocks_max_x = deviceProp.maxGridSize[0];
// //unsigned int sharedMemoryTotal = deviceProp.sharedMemPerBlock/(sizeof(int));
//
int blocksize = BLOCK_SIZE; //num_of_elements / num_of_segments;
//if (blocksize > 1024)
// blocksize = 1024;
dim3 threads(blocksize, 1);
//dim3 grid(num_of_segments / blocksize + 1, 1);
dim3 grid(num_of_segments, 1);
std::chrono::high_resolution_clock::time_point start =
std::chrono::high_resolution_clock::now();
radix_sort<EXP_BITS_SIZE> <<<grid, threads>>>(d_vec, d_seg,
num_of_segments);
hipDeviceSynchronize();
std::chrono::high_resolution_clock::time_point stop =
std::chrono::high_resolution_clock::now();
cudaTest(hipPeekAtLastError());
std::chrono::duration<double> time_span = std::chrono::duration_cast<
std::chrono::duration<double>>(stop - start);
cudaTest(hipMemcpy(h_seg, d_seg, mem_size_seg, hipMemcpyDeviceToHost));
cudaTest(hipMemcpy(h_vec, d_vec, mem_size_vec, hipMemcpyDeviceToHost));
//print(h_seg, num_of_segments + 1);
//print(h_vec, num_of_elements);
//print(h_seg, 10);
//print(h_vec, 1000);
if (ELAPSED_TIME == 1)
// std::cout << "It took me " << time_span.count() * 1000
// << " miliseconds.\n";
;
else
print(h_vec, num_of_elements);
// cudaDeviceReset causes the driver to clean up all state. While
// not mandatory in normal operation, it is good practice. It is also
// needed to ensure correct operation when the application is being
// profiled. Calling cudaDeviceReset causes all profile data to be
// flushed before the application exits
hipDeviceReset();
free(h_seg);
free(h_vec);
hipFree(d_seg);
hipFree(d_vec);
return 0;
}
/**
* // cudaDeviceReset causes the driver to clean up all state. While
// not mandatory in normal operation, it is good practice. It is also
// needed to ensure correct operation when the application is being
// profiled. Calling cudaDeviceReset causes all profile data to be
// flushed before the application exits
cudaDeviceReset();
*/
/*
printf("thread=%d | aux=%d %d %d %d\n", bx * blockDim.x + tx, d_aux[begin], d_aux[begin + 1], d_aux[begin + 2], d_aux[begin + 3]);
int devID = 0;
cudaDeviceProp deviceProp;
cudaTest(cudaGetDeviceProperties(&deviceProp, devID));
unsigned int multiprocessorNumber = deviceProp.multiProcessorCount;
unsigned int sharedMemoryTotal = deviceProp.sharedMemPerBlock/(sizeof(int));
std::cout << "multiprocessorNumber: " << multiprocessorNumber << "\n";
std::cout << "sharedMemoryTotal: " << sharedMemoryTotal << "\n";
std::cout << "numberOfSegmentsPerBlock: " << sharedMemoryTotal << "\n";
*/ | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.section .text._Z10radix_sortILi10EEvPiS0_i,"axG",@progbits,_Z10radix_sortILi10EEvPiS0_i,comdat
.protected _Z10radix_sortILi10EEvPiS0_i
.globl _Z10radix_sortILi10EEvPiS0_i
.p2align 8
.type _Z10radix_sortILi10EEvPiS0_i,@function
_Z10radix_sortILi10EEvPiS0_i:
s_load_b64 s[4:5], s[0:1], 0x8
s_mov_b32 s2, s15
s_ashr_i32 s3, s15, 31
s_delay_alu instid0(SALU_CYCLE_1)
s_lshl_b64 s[2:3], s[2:3], 2
s_waitcnt lgkmcnt(0)
s_add_u32 s2, s4, s2
s_addc_u32 s3, s5, s3
s_load_b64 s[2:3], s[2:3], 0x0
s_waitcnt lgkmcnt(0)
s_sub_i32 s4, s3, s2
s_delay_alu instid0(SALU_CYCLE_1)
s_cmp_lt_i32 s4, 1
s_cbranch_scc1 .LBB0_12
s_load_b64 s[0:1], s[0:1], 0x0
v_lshlrev_b32_e32 v3, 2, v0
v_add_nc_u32_e32 v4, s2, v0
s_mov_b32 s6, 0
s_delay_alu instid0(VALU_DEP_2)
v_or_b32_e32 v5, 0x3000, v3
v_or_b32_e32 v6, 0x1000, v3
v_or_b32_e32 v7, 0x2000, v3
s_branch .LBB0_4
.LBB0_2:
ds_load_b32 v8, v7
s_waitcnt lgkmcnt(0)
global_store_b32 v[1:2], v8, off
.LBB0_3:
s_or_b32 exec_lo, exec_lo, s5
s_cmp_ge_i32 s2, s4
s_mov_b32 s6, s2
s_cbranch_scc1 .LBB0_12
.LBB0_4:
v_add_nc_u32_e32 v1, s6, v4
s_add_i32 s2, s6, 0x400
s_mov_b32 s5, exec_lo
s_delay_alu instid0(VALU_DEP_1)
v_cmpx_gt_i32_e64 s3, v1
s_cbranch_execz .LBB0_3
v_ashrrev_i32_e32 v2, 31, v1
s_sub_i32 s6, s4, s6
s_cmp_gt_i32 s2, s4
s_mov_b32 s8, 0
s_cselect_b32 s6, s6, 0x400
v_lshlrev_b64 v[1:2], 2, v[1:2]
s_cmp_gt_i32 s6, 1
s_cselect_b32 s7, -1, 0
s_lshl_b32 s9, s6, 2
s_delay_alu instid0(SALU_CYCLE_1)
s_addk_i32 s9, 0xffc
s_waitcnt lgkmcnt(0)
v_add_co_u32 v1, vcc_lo, s0, v1
v_add_co_ci_u32_e32 v2, vcc_lo, s1, v2, vcc_lo
v_mov_b32_e32 v8, s9
global_load_b32 v9, v[1:2], off
s_waitcnt vmcnt(0)
ds_store_b32 v5, v9
s_waitcnt lgkmcnt(0)
s_waitcnt_vscnt null, 0x0
s_barrier
buffer_gl0_inv
s_branch .LBB0_7
.LBB0_6:
ds_load_b32 v9, v5
ds_load_b32 v10, v3
ds_load_b32 v11, v8
ds_load_b32 v12, v6
s_waitcnt lgkmcnt(3)
v_bfe_u32 v13, v9, s8, 1
s_add_i32 s8, s8, 1
s_waitcnt lgkmcnt(1)
v_add3_u32 v10, v10, v11, -1
s_waitcnt lgkmcnt(0)
v_add_nc_u32_e32 v11, -1, v12
s_cmp_lg_u32 s8, 10
v_xor_b32_e32 v12, 1, v13
v_mul_lo_u32 v10, v10, v13
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mul_lo_u32 v11, v11, v12
v_add_lshl_u32 v10, v11, v10, 2
ds_store_b32 v10, v9 offset:8192
s_waitcnt lgkmcnt(0)
s_barrier
buffer_gl0_inv
ds_load_b32 v9, v7
s_waitcnt lgkmcnt(0)
ds_store_b32 v5, v9
s_waitcnt lgkmcnt(0)
s_barrier
buffer_gl0_inv
s_cbranch_scc0 .LBB0_2
.LBB0_7:
ds_load_b32 v9, v5
s_and_not1_b32 vcc_lo, exec_lo, s7
s_waitcnt lgkmcnt(0)
v_bfe_u32 v9, v9, s8, 1
s_delay_alu instid0(VALU_DEP_1)
v_xor_b32_e32 v10, 1, v9
ds_store_b32 v3, v9
ds_store_b32 v6, v10
s_waitcnt lgkmcnt(0)
s_barrier
buffer_gl0_inv
s_cbranch_vccnz .LBB0_6
s_mov_b32 s9, 1
s_branch .LBB0_10
.p2align 6
.LBB0_9:
s_or_b32 exec_lo, exec_lo, s10
s_lshl_b32 s9, s9, 1
s_delay_alu instid0(SALU_CYCLE_1)
s_cmp_lt_i32 s9, s6
s_cbranch_scc0 .LBB0_6
.LBB0_10:
v_add_nc_u32_e32 v9, s9, v0
s_mov_b32 s10, exec_lo
s_delay_alu instid0(VALU_DEP_1)
v_cmpx_gt_i32_e64 s6, v9
s_cbranch_execz .LBB0_9
v_lshlrev_b32_e32 v11, 2, v9
ds_load_2addr_stride64_b32 v[9:10], v11 offset1:16
ds_load_b32 v12, v3
ds_load_b32 v13, v6
s_waitcnt lgkmcnt(0)
s_barrier
buffer_gl0_inv
v_add_nc_u32_e32 v9, v9, v12
v_add_nc_u32_e32 v10, v10, v13
ds_store_2addr_stride64_b32 v11, v9, v10 offset1:16
s_waitcnt lgkmcnt(0)
s_barrier
buffer_gl0_inv
s_branch .LBB0_9
.LBB0_12:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z10radix_sortILi10EEvPiS0_i
.amdhsa_group_segment_fixed_size 16384
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 20
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 14
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.section .text._Z10radix_sortILi10EEvPiS0_i,"axG",@progbits,_Z10radix_sortILi10EEvPiS0_i,comdat
.Lfunc_end0:
.size _Z10radix_sortILi10EEvPiS0_i, .Lfunc_end0-_Z10radix_sortILi10EEvPiS0_i
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .offset: 16
.size: 4
.value_kind: by_value
.group_segment_fixed_size: 16384
.kernarg_segment_align: 8
.kernarg_segment_size: 20
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z10radix_sortILi10EEvPiS0_i
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z10radix_sortILi10EEvPiS0_i.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 14
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | /**
* Copyright 1993-2012 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*/
#include <stdio.h>
#include <stdlib.h>
#include <algorithm>
#include <utility>
#include <iostream>
#include <bitset>
#include <math.h>
#include <time.h>
#include <chrono>
#include <hip/hip_runtime.h>
/**
* This macro checks return value of the CUDA runtime call and exits
* the application if the call failed.
*
* See cuda.h for error code descriptions.
*/
#ifndef BLOCK_SIZE
#define BLOCK_SIZE 1024
#endif
#ifndef ELAPSED_TIME
#define ELAPSED_TIME 0
#endif
#ifndef EXP_BITS_SIZE
#define EXP_BITS_SIZE 10
#endif
void print(int* h_data, int n) {
std::cout << "\n";
for (int i = 0; i < n; i++) {
std::cout << h_data[i] << " ";
}
std::cout << "\n";
}
void cudaTest(hipError_t error) {
if (error != hipSuccess) {
printf("cuda returned error %s (code %d), line(%d)\n",
hipGetErrorString(error), error, __LINE__);
exit(EXIT_FAILURE);
}
}
/*
* 256 threads per block
* 4 elements per thread
* = 1024 elements per block
* = n/1024 blocks
*/
template<int BITS_NUMBER = 64>
__global__ void radix_sort(int *d_vec, int *d_seg, int num_segments) {
int bx = blockIdx.x;
int tx = threadIdx.x;
int begin = d_seg[bx];
int end = d_seg[bx + 1];
int size = end - begin;
__shared__ int s_vec[BLOCK_SIZE];
__shared__ int s_aux[BLOCK_SIZE];
__shared__ int s_pref_sum_one[BLOCK_SIZE];
__shared__ int s_pref_sum_zero[BLOCK_SIZE];
for (int k = 0; k < size; k += BLOCK_SIZE) {
int threadIndexGlobal = begin + k + tx;
int block = BLOCK_SIZE;
if(BLOCK_SIZE+k > size)
block = size-k;
if (threadIndexGlobal < end) {
s_vec[tx] = d_vec[threadIndexGlobal];
__syncthreads();
int i, j;
int exp = 0;
for (j = 0; j < BITS_NUMBER; j++) {
int x = (s_vec[tx] >> exp) & 1;
s_pref_sum_one[tx] = x;
s_pref_sum_zero[tx] = 1-x;
__syncthreads();
for (i = 1; i < block; i*=2) {
int index = tx + i;
if (index < block) {
int one = s_pref_sum_one[tx] + s_pref_sum_one[index];
int zero = s_pref_sum_zero[tx] + s_pref_sum_zero[index];
__syncthreads();
s_pref_sum_one[index] = one;
s_pref_sum_zero[index] = zero;
__syncthreads();
}
}
x = (s_vec[tx] >> exp) & 1;
int index = (x) * (s_pref_sum_one[tx] + s_pref_sum_zero[block-1] - 1)
+ (1 - x) * (s_pref_sum_zero[tx] - 1);
s_aux[index] = s_vec[tx];
__syncthreads();
s_vec[tx] = s_aux[tx];
__syncthreads();
exp++;
}
d_vec[threadIndexGlobal] = s_aux[tx];
}
}
}
int main(int argc, char **argv) {
int num_of_segments;
int num_of_elements;
int i;
scanf("%d", &num_of_segments);
int mem_size_seg = sizeof(int) * (num_of_segments + 1);
int *h_seg = (int *) malloc(mem_size_seg);
for (i = 0; i < num_of_segments + 1; i++)
scanf("%d", &h_seg[i]);
scanf("%d", &num_of_elements);
int mem_size_vec = sizeof(int) * num_of_elements;
int *h_vec = (int *) malloc(mem_size_vec);
for (i = 0; i < num_of_elements; i++)
scanf("%d", &h_vec[i]);
// print(h_seg, num_of_segments + 1); print(h_vec, num_of_elements);
// Allocate device memory
int *d_seg, *d_vec;
cudaTest(hipMalloc((void **) &d_seg, mem_size_seg));
cudaTest(hipMalloc((void **) &d_vec, mem_size_vec));
//cudaTest(cudaMalloc((void **) &d_aux, mem_size_vec));
// copy host memory to device
cudaTest(hipMemcpy(d_seg, h_seg, mem_size_seg, hipMemcpyHostToDevice));
cudaTest(hipMemcpy(d_vec, h_vec, mem_size_vec, hipMemcpyHostToDevice));
// Setup execution parameters
// int devID = 0;
// cudaDeviceProp deviceProp;
// cudaTest(cudaGetDeviceProperties(&deviceProp, devID));
// unsigned int multiprocessor_number = deviceProp.multiProcessorCount;
// //unsigned int grid_blocks_max_x = deviceProp.maxGridSize[0];
// //unsigned int sharedMemoryTotal = deviceProp.sharedMemPerBlock/(sizeof(int));
//
int blocksize = BLOCK_SIZE; //num_of_elements / num_of_segments;
//if (blocksize > 1024)
// blocksize = 1024;
dim3 threads(blocksize, 1);
//dim3 grid(num_of_segments / blocksize + 1, 1);
dim3 grid(num_of_segments, 1);
std::chrono::high_resolution_clock::time_point start =
std::chrono::high_resolution_clock::now();
radix_sort<EXP_BITS_SIZE> <<<grid, threads>>>(d_vec, d_seg,
num_of_segments);
hipDeviceSynchronize();
std::chrono::high_resolution_clock::time_point stop =
std::chrono::high_resolution_clock::now();
cudaTest(hipPeekAtLastError());
std::chrono::duration<double> time_span = std::chrono::duration_cast<
std::chrono::duration<double>>(stop - start);
cudaTest(hipMemcpy(h_seg, d_seg, mem_size_seg, hipMemcpyDeviceToHost));
cudaTest(hipMemcpy(h_vec, d_vec, mem_size_vec, hipMemcpyDeviceToHost));
//print(h_seg, num_of_segments + 1);
//print(h_vec, num_of_elements);
//print(h_seg, 10);
//print(h_vec, 1000);
if (ELAPSED_TIME == 1)
// std::cout << "It took me " << time_span.count() * 1000
// << " miliseconds.\n";
;
else
print(h_vec, num_of_elements);
// cudaDeviceReset causes the driver to clean up all state. While
// not mandatory in normal operation, it is good practice. It is also
// needed to ensure correct operation when the application is being
// profiled. Calling cudaDeviceReset causes all profile data to be
// flushed before the application exits
hipDeviceReset();
free(h_seg);
free(h_vec);
hipFree(d_seg);
hipFree(d_vec);
return 0;
}
/**
* // cudaDeviceReset causes the driver to clean up all state. While
// not mandatory in normal operation, it is good practice. It is also
// needed to ensure correct operation when the application is being
// profiled. Calling cudaDeviceReset causes all profile data to be
// flushed before the application exits
cudaDeviceReset();
*/
/*
printf("thread=%d | aux=%d %d %d %d\n", bx * blockDim.x + tx, d_aux[begin], d_aux[begin + 1], d_aux[begin + 2], d_aux[begin + 3]);
int devID = 0;
cudaDeviceProp deviceProp;
cudaTest(cudaGetDeviceProperties(&deviceProp, devID));
unsigned int multiprocessorNumber = deviceProp.multiProcessorCount;
unsigned int sharedMemoryTotal = deviceProp.sharedMemPerBlock/(sizeof(int));
std::cout << "multiprocessorNumber: " << multiprocessorNumber << "\n";
std::cout << "sharedMemoryTotal: " << sharedMemoryTotal << "\n";
std::cout << "numberOfSegmentsPerBlock: " << sharedMemoryTotal << "\n";
*/ | .text
.file "sharev1.hip"
# Start of file scope inline assembly
.globl _ZSt21ios_base_library_initv
# End of file scope inline assembly
.globl _Z5printPii # -- Begin function _Z5printPii
.p2align 4, 0x90
.type _Z5printPii,@function
_Z5printPii: # @_Z5printPii
.cfi_startproc
# %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
pushq %r15
.cfi_def_cfa_offset 24
pushq %r14
.cfi_def_cfa_offset 32
pushq %rbx
.cfi_def_cfa_offset 40
pushq %rax
.cfi_def_cfa_offset 48
.cfi_offset %rbx, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
.cfi_offset %rbp, -16
movl %esi, %ebp
movq %rdi, %rbx
movl $_ZSt4cout, %edi
movl $.L.str, %esi
movl $1, %edx
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
testl %ebp, %ebp
jle .LBB0_3
# %bb.1: # %.lr.ph.preheader
movl %ebp, %r14d
xorl %r15d, %r15d
.p2align 4, 0x90
.LBB0_2: # %.lr.ph
# =>This Inner Loop Header: Depth=1
movl (%rbx,%r15,4), %esi
movl $_ZSt4cout, %edi
callq _ZNSolsEi
movl $.L.str.1, %esi
movl $1, %edx
movq %rax, %rdi
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
incq %r15
cmpq %r15, %r14
jne .LBB0_2
.LBB0_3: # %._crit_edge
movl $_ZSt4cout, %edi
movl $.L.str, %esi
movl $1, %edx
addq $8, %rsp
.cfi_def_cfa_offset 40
popq %rbx
.cfi_def_cfa_offset 32
popq %r14
.cfi_def_cfa_offset 24
popq %r15
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
jmp _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l # TAILCALL
.Lfunc_end0:
.size _Z5printPii, .Lfunc_end0-_Z5printPii
.cfi_endproc
# -- End function
.globl _Z8cudaTest10hipError_t # -- Begin function _Z8cudaTest10hipError_t
.p2align 4, 0x90
.type _Z8cudaTest10hipError_t,@function
_Z8cudaTest10hipError_t: # @_Z8cudaTest10hipError_t
.cfi_startproc
# %bb.0:
testl %edi, %edi
jne .LBB1_2
# %bb.1:
retq
.LBB1_2:
pushq %rbx
.cfi_def_cfa_offset 16
.cfi_offset %rbx, -16
movl %edi, %ebx
callq hipGetErrorString
movl $.L.str.2, %edi
movq %rax, %rsi
movl %ebx, %edx
movl $51, %ecx
xorl %eax, %eax
callq printf
movl $1, %edi
callq exit
.Lfunc_end1:
.size _Z8cudaTest10hipError_t, .Lfunc_end1-_Z8cudaTest10hipError_t
.cfi_endproc
# -- End function
.globl main # -- Begin function main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
pushq %r15
.cfi_def_cfa_offset 24
pushq %r14
.cfi_def_cfa_offset 32
pushq %r13
.cfi_def_cfa_offset 40
pushq %r12
.cfi_def_cfa_offset 48
pushq %rbx
.cfi_def_cfa_offset 56
subq $120, %rsp
.cfi_def_cfa_offset 176
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
.cfi_offset %rbp, -16
movq %rsp, %rsi
movl $.L.str.3, %edi
xorl %eax, %eax
callq __isoc23_scanf
movl (%rsp), %r14d
leal 4(,%r14,4), %eax
movslq %eax, %r15
movq %r15, %rdi
callq malloc
movq %rax, %rbx
testl %r14d, %r14d
js .LBB2_3
# %bb.1: # %.lr.ph.preheader
movq $-1, %r12
movq %rbx, %r14
.p2align 4, 0x90
.LBB2_2: # %.lr.ph
# =>This Inner Loop Header: Depth=1
movl $.L.str.3, %edi
movq %r14, %rsi
xorl %eax, %eax
callq __isoc23_scanf
movslq (%rsp), %rax
incq %r12
addq $4, %r14
cmpq %rax, %r12
jl .LBB2_2
.LBB2_3: # %._crit_edge
leaq 4(%rsp), %rsi
movl $.L.str.3, %edi
xorl %eax, %eax
callq __isoc23_scanf
movl 4(%rsp), %r13d
leal (,%r13,4), %eax
movslq %eax, %r12
movq %r12, %rdi
callq malloc
movq %rax, %r14
testl %r13d, %r13d
jle .LBB2_6
# %bb.4: # %.lr.ph52.preheader
movq %r14, %r13
xorl %ebp, %ebp
.p2align 4, 0x90
.LBB2_5: # %.lr.ph52
# =>This Inner Loop Header: Depth=1
movl $.L.str.3, %edi
movq %r13, %rsi
xorl %eax, %eax
callq __isoc23_scanf
incq %rbp
movslq 4(%rsp), %rax
addq $4, %r13
cmpq %rax, %rbp
jl .LBB2_5
.LBB2_6: # %._crit_edge53
leaq 16(%rsp), %rdi
movq %r15, %rsi
callq hipMalloc
testl %eax, %eax
jne .LBB2_19
# %bb.7: # %_Z8cudaTest10hipError_t.exit
leaq 8(%rsp), %rdi
movq %r12, %rsi
callq hipMalloc
testl %eax, %eax
jne .LBB2_19
# %bb.8: # %_Z8cudaTest10hipError_t.exit32
movq 16(%rsp), %rdi
movq %rbx, %rsi
movq %r15, %rdx
movl $1, %ecx
callq hipMemcpy
testl %eax, %eax
jne .LBB2_19
# %bb.9: # %_Z8cudaTest10hipError_t.exit34
movq 8(%rsp), %rdi
movq %r14, %rsi
movq %r12, %rdx
movl $1, %ecx
callq hipMemcpy
testl %eax, %eax
jne .LBB2_19
# %bb.10: # %_Z8cudaTest10hipError_t.exit36
movl (%rsp), %r13d
movabsq $4294967296, %rbp # imm = 0x100000000
orq %rbp, %r13
callq _ZNSt6chrono3_V212system_clock3nowEv
orq $1024, %rbp # imm = 0x400
movq %r13, %rdi
movl $1, %esi
movq %rbp, %rdx
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB2_12
# %bb.11:
movq 8(%rsp), %rax
movq 16(%rsp), %rcx
movl (%rsp), %edx
movq %rax, 88(%rsp)
movq %rcx, 80(%rsp)
movl %edx, 28(%rsp)
leaq 88(%rsp), %rax
movq %rax, 96(%rsp)
leaq 80(%rsp), %rax
movq %rax, 104(%rsp)
leaq 28(%rsp), %rax
movq %rax, 112(%rsp)
leaq 64(%rsp), %rdi
leaq 48(%rsp), %rsi
leaq 40(%rsp), %rdx
leaq 32(%rsp), %rcx
callq __hipPopCallConfiguration
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
movq 48(%rsp), %rcx
movl 56(%rsp), %r8d
leaq 96(%rsp), %r9
movl $_Z10radix_sortILi10EEvPiS0_i, %edi
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
pushq 48(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB2_12:
callq hipDeviceSynchronize
callq _ZNSt6chrono3_V212system_clock3nowEv
callq hipPeekAtLastError
testl %eax, %eax
jne .LBB2_19
# %bb.13: # %_Z8cudaTest10hipError_t.exit38
movq 16(%rsp), %rsi
movq %rbx, %rdi
movq %r15, %rdx
movl $2, %ecx
callq hipMemcpy
testl %eax, %eax
jne .LBB2_19
# %bb.14: # %_Z8cudaTest10hipError_t.exit40
movq 8(%rsp), %rsi
movq %r14, %rdi
movq %r12, %rdx
movl $2, %ecx
callq hipMemcpy
testl %eax, %eax
jne .LBB2_19
# %bb.15: # %_Z8cudaTest10hipError_t.exit42
movl 4(%rsp), %r15d
movl $_ZSt4cout, %edi
movl $.L.str, %esi
movl $1, %edx
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
testl %r15d, %r15d
jle .LBB2_18
# %bb.16: # %.lr.ph.preheader.i
xorl %r12d, %r12d
.p2align 4, 0x90
.LBB2_17: # %.lr.ph.i
# =>This Inner Loop Header: Depth=1
movl (%r14,%r12,4), %esi
movl $_ZSt4cout, %edi
callq _ZNSolsEi
movl $.L.str.1, %esi
movl $1, %edx
movq %rax, %rdi
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
incq %r12
cmpq %r12, %r15
jne .LBB2_17
.LBB2_18: # %_Z5printPii.exit
movl $_ZSt4cout, %edi
movl $.L.str, %esi
movl $1, %edx
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
callq hipDeviceReset
movq %rbx, %rdi
callq free
movq %r14, %rdi
callq free
movq 16(%rsp), %rdi
callq hipFree
movq 8(%rsp), %rdi
callq hipFree
xorl %eax, %eax
addq $120, %rsp
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %r12
.cfi_def_cfa_offset 40
popq %r13
.cfi_def_cfa_offset 32
popq %r14
.cfi_def_cfa_offset 24
popq %r15
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
retq
.LBB2_19:
.cfi_def_cfa_offset 176
movl %eax, %edi
movl %eax, %ebx
callq hipGetErrorString
movl $.L.str.2, %edi
movq %rax, %rsi
movl %ebx, %edx
movl $51, %ecx
xorl %eax, %eax
callq printf
movl $1, %edi
callq exit
.Lfunc_end2:
.size main, .Lfunc_end2-main
.cfi_endproc
# -- End function
.section .text._Z25__device_stub__radix_sortILi10EEvPiS0_i,"axG",@progbits,_Z25__device_stub__radix_sortILi10EEvPiS0_i,comdat
.weak _Z25__device_stub__radix_sortILi10EEvPiS0_i # -- Begin function _Z25__device_stub__radix_sortILi10EEvPiS0_i
.p2align 4, 0x90
.type _Z25__device_stub__radix_sortILi10EEvPiS0_i,@function
_Z25__device_stub__radix_sortILi10EEvPiS0_i: # @_Z25__device_stub__radix_sortILi10EEvPiS0_i
.cfi_startproc
# %bb.0:
subq $104, %rsp
.cfi_def_cfa_offset 112
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movl %edx, 12(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 12(%rsp), %rax
movq %rax, 96(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z10radix_sortILi10EEvPiS0_i, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $120, %rsp
.cfi_adjust_cfa_offset -120
retq
.Lfunc_end3:
.size _Z25__device_stub__radix_sortILi10EEvPiS0_i, .Lfunc_end3-_Z25__device_stub__radix_sortILi10EEvPiS0_i
.cfi_endproc
# -- End function
.text
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB4_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB4_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z10radix_sortILi10EEvPiS0_i, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end4:
.size __hip_module_ctor, .Lfunc_end4-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB5_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB5_2:
retq
.Lfunc_end5:
.size __hip_module_dtor, .Lfunc_end5-__hip_module_dtor
.cfi_endproc
# -- End function
.type .L.str,@object # @.str
.section .rodata.str1.1,"aMS",@progbits,1
.L.str:
.asciz "\n"
.size .L.str, 2
.type .L.str.1,@object # @.str.1
.L.str.1:
.asciz " "
.size .L.str.1, 2
.type .L.str.2,@object # @.str.2
.L.str.2:
.asciz "cuda returned error %s (code %d), line(%d)\n"
.size .L.str.2, 44
.type .L.str.3,@object # @.str.3
.L.str.3:
.asciz "%d"
.size .L.str.3, 3
.type _Z10radix_sortILi10EEvPiS0_i,@object # @_Z10radix_sortILi10EEvPiS0_i
.section .rodata._Z10radix_sortILi10EEvPiS0_i,"aG",@progbits,_Z10radix_sortILi10EEvPiS0_i,comdat
.weak _Z10radix_sortILi10EEvPiS0_i
.p2align 3, 0x0
_Z10radix_sortILi10EEvPiS0_i:
.quad _Z25__device_stub__radix_sortILi10EEvPiS0_i
.size _Z10radix_sortILi10EEvPiS0_i, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "_Z10radix_sortILi10EEvPiS0_i"
.size .L__unnamed_1, 29
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z25__device_stub__radix_sortILi10EEvPiS0_i
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _ZSt4cout
.addrsig_sym _Z10radix_sortILi10EEvPiS0_i
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly. | code for sm_80
Function : _Z10radix_sortILi10EEvPiS0_i
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ IMAD.MOV.U32 R1, RZ, RZ, c[0x0][0x28] ; /* 0x00000a00ff017624 */
/* 0x000fe400078e00ff */
/*0010*/ S2R R2, SR_CTAID.X ; /* 0x0000000000027919 */
/* 0x000e220000002500 */
/*0020*/ IMAD.MOV.U32 R3, RZ, RZ, 0x4 ; /* 0x00000004ff037424 */
/* 0x000fe200078e00ff */
/*0030*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fc60000000a00 */
/*0040*/ IMAD.WIDE R2, R2, R3, c[0x0][0x168] ; /* 0x00005a0002027625 */
/* 0x001fca00078e0203 */
/*0050*/ LDG.E R0, [R2.64+0x4] ; /* 0x0000040402007981 */
/* 0x000ea8000c1e1900 */
/*0060*/ LDG.E R6, [R2.64] ; /* 0x0000000402067981 */
/* 0x000ea4000c1e1900 */
/*0070*/ IMAD.IADD R4, R0, 0x1, -R6 ; /* 0x0000000100047824 */
/* 0x004fca00078e0a06 */
/*0080*/ ISETP.GE.AND P0, PT, R4, 0x1, PT ; /* 0x000000010400780c */
/* 0x000fda0003f06270 */
/*0090*/ @!P0 EXIT ; /* 0x000000000000894d */
/* 0x000fea0003800000 */
/*00a0*/ S2R R5, SR_TID.X ; /* 0x0000000000057919 */
/* 0x000e220000002100 */
/*00b0*/ IMAD.MOV.U32 R3, RZ, RZ, RZ ; /* 0x000000ffff037224 */
/* 0x000fe400078e00ff */
/*00c0*/ IMAD.IADD R6, R6, 0x1, R5 ; /* 0x0000000106067824 */
/* 0x001fe400078e0205 */
/*00d0*/ IMAD.SHL.U32 R7, R5, 0x4, RZ ; /* 0x0000000405077824 */
/* 0x000fc600078e00ff */
/*00e0*/ IADD3 R11, R6, R3, RZ ; /* 0x00000003060b7210 */
/* 0x000fe20007ffe0ff */
/*00f0*/ IMAD.IADD R8, R4, 0x1, -R3 ; /* 0x0000000104087824 */
/* 0x000fe200078e0a03 */
/*0100*/ IADD3 R9, R3, 0x400, RZ ; /* 0x0000040003097810 */
/* 0x000fe40007ffe0ff */
/*0110*/ ISETP.GE.AND P1, PT, R11, R0, PT ; /* 0x000000000b00720c */
/* 0x000fe40003f26270 */
/*0120*/ ISETP.GT.AND P0, PT, R9, R4, PT ; /* 0x000000040900720c */
/* 0x000fc80003f04270 */
/*0130*/ SEL R8, R8, 0x400, P0 ; /* 0x0000040008087807 */
/* 0x000fce0000000000 */
/*0140*/ @P1 BRA 0x540 ; /* 0x000003f000001947 */
/* 0x000fea0003800000 */
/*0150*/ IMAD.MOV.U32 R2, RZ, RZ, 0x4 ; /* 0x00000004ff027424 */
/* 0x000fc800078e00ff */
/*0160*/ IMAD.WIDE R2, R11, R2, c[0x0][0x160] ; /* 0x000058000b027625 */
/* 0x000fca00078e0202 */
/*0170*/ LDG.E R14, [R2.64] ; /* 0x00000004020e7981 */
/* 0x000ea2000c1e1900 */
/*0180*/ WARPSYNC 0xffffffff ; /* 0xffffffff00007948 */
/* 0x000fe20003800000 */
/*0190*/ HFMA2.MMA R12, -RZ, RZ, 0, 0 ; /* 0x00000000ff0c7435 */
/* 0x000fe200000001ff */
/*01a0*/ IMAD.MOV.U32 R10, RZ, RZ, RZ ; /* 0x000000ffff0a7224 */
/* 0x000fe400078e00ff */
/*01b0*/ IMAD.SHL.U32 R11, R8, 0x4, RZ ; /* 0x00000004080b7824 */
/* 0x000fe200078e00ff */
/*01c0*/ STS [R5.X4], R14 ; /* 0x0000000e05007388 */
/* 0x0041e80000004800 */
/*01d0*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */
/* 0x000fec0000010000 */
/*01e0*/ LDS R13, [R5.X4] ; /* 0x00000000050d7984 */
/* 0x000e620000004800 */
/*01f0*/ ISETP.GE.AND P0, PT, R8, 0x2, PT ; /* 0x000000020800780c */
/* 0x000fe20003f06270 */
/*0200*/ WARPSYNC 0xffffffff ; /* 0xffffffff00007948 */
/* 0x000fe20003800000 */
/*0210*/ BSSY B0, 0x3d0 ; /* 0x000001b000007945 */
/* 0x000fe20003800000 */
/*0220*/ SHF.R.U32.HI R13, RZ, R10, R13 ; /* 0x0000000aff0d7219 */
/* 0x002fc8000001160d */
/*0230*/ LOP3.LUT R14, R13, 0x1, RZ, 0xc0, !PT ; /* 0x000000010d0e7812 */
/* 0x001fc800078ec0ff */
/*0240*/ LOP3.LUT R16, R14, 0x1, RZ, 0x3c, !PT ; /* 0x000000010e107812 */
/* 0x000fe200078e3cff */
/*0250*/ STS [R5.X4+0x2000], R14 ; /* 0x0020000e05007388 */
/* 0x0001e80000004800 */
/*0260*/ STS [R5.X4+0x3000], R16 ; /* 0x0030001005007388 */
/* 0x0001e80000004800 */
/*0270*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */
/* 0x000fec0000010000 */
/*0280*/ @!P0 BRA 0x3c0 ; /* 0x0000013000008947 */
/* 0x000fea0003800000 */
/*0290*/ IMAD.MOV.U32 R14, RZ, RZ, 0x1 ; /* 0x00000001ff0e7424 */
/* 0x001fc800078e00ff */
/*02a0*/ IMAD.IADD R13, R5, 0x1, R14 ; /* 0x00000001050d7824 */
/* 0x001fca00078e020e */
/*02b0*/ ISETP.GE.AND P0, PT, R13, R8, PT ; /* 0x000000080d00720c */
/* 0x000fda0003f06270 */
/*02c0*/ @P0 BRA 0x390 ; /* 0x000000c000000947 */
/* 0x000fea0003800000 */
/*02d0*/ IMAD R20, R14, 0x4, R7 ; /* 0x000000040e147824 */
/* 0x000fe200078e0207 */
/*02e0*/ LDS R13, [R5.X4+0x2000] ; /* 0x00200000050d7984 */
/* 0x000fe20000004800 */
/*02f0*/ WARPSYNC 0xffffffff ; /* 0xffffffff00007948 */
/* 0x000fe60003800000 */
/*0300*/ LDS R18, [R5.X4+0x3000] ; /* 0x0030000005127984 */
/* 0x000fe80000004800 */
/*0310*/ LDS R16, [R20+0x2000] ; /* 0x0020000014107984 */
/* 0x000e280000000800 */
/*0320*/ LDS R15, [R20+0x3000] ; /* 0x00300000140f7984 */
/* 0x000e620000000800 */
/*0330*/ IMAD.IADD R13, R13, 0x1, R16 ; /* 0x000000010d0d7824 */
/* 0x001fc600078e0210 */
/*0340*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */
/* 0x000fe20000010000 */
/*0350*/ IADD3 R15, R15, R18, RZ ; /* 0x000000120f0f7210 */
/* 0x002fca0007ffe0ff */
/*0360*/ STS [R20+0x2000], R13 ; /* 0x0020000d14007388 */
/* 0x0001e80000000800 */
/*0370*/ STS [R20+0x3000], R15 ; /* 0x0030000f14007388 */
/* 0x0001e80000000800 */
/*0380*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */
/* 0x000fec0000010000 */
/*0390*/ IMAD.SHL.U32 R14, R14, 0x2, RZ ; /* 0x000000020e0e7824 */
/* 0x000fca00078e00ff */
/*03a0*/ ISETP.GE.AND P0, PT, R14, R8, PT ; /* 0x000000080e00720c */
/* 0x000fda0003f06270 */
/*03b0*/ @!P0 BRA 0x2a0 ; /* 0xfffffee000008947 */
/* 0x000fea000383ffff */
/*03c0*/ BSYNC B0 ; /* 0x0000000000007941 */
/* 0x001fea0003800000 */
/*03d0*/ LDS R13, [R5.X4] ; /* 0x00000000050d7984 */
/* 0x000e220000004800 */
/*03e0*/ WARPSYNC 0xffffffff ; /* 0xffffffff00007948 */
/* 0x000fe20003800000 */
/*03f0*/ IADD3 R12, R12, 0x1, RZ ; /* 0x000000010c0c7810 */
/* 0x000fe40007ffe0ff */
/*0400*/ LDS R15, [R5.X4+0x2000] ; /* 0x00200000050f7984 */
/* 0x000fe40000004800 */
/*0410*/ ISETP.GE.U32.AND P0, PT, R12, 0xa, PT ; /* 0x0000000a0c00780c */
/* 0x000fe40003f06070 */
/*0420*/ LDS R16, [R11+0x2ffc] ; /* 0x002ffc000b107984 */
/* 0x000e680000000800 */
/*0430*/ LDS R17, [R5.X4+0x3000] ; /* 0x0030000005117984 */
/* 0x000ea20000004800 */
/*0440*/ SHF.R.U32.HI R14, RZ, R10, R13 ; /* 0x0000000aff0e7219 */
/* 0x001fc4000001160d */
/*0450*/ IADD3 R10, R10, 0x1, RZ ; /* 0x000000010a0a7810 */
/* 0x000fe40007ffe0ff */
/*0460*/ LOP3.LUT R14, R14, 0x1, RZ, 0xc0, !PT ; /* 0x000000010e0e7812 */
/* 0x000fe400078ec0ff */
/*0470*/ IADD3 R15, R15, -0x1, R16 ; /* 0xffffffff0f0f7810 */
/* 0x002fe40007ffe010 */
/*0480*/ LOP3.LUT R16, R14.reuse, 0x1, RZ, 0x3c, !PT ; /* 0x000000010e107812 */
/* 0x040fe400078e3cff */
/*0490*/ IADD3 R17, R17, -0x1, RZ ; /* 0xffffffff11117810 */
/* 0x004fe20007ffe0ff */
/*04a0*/ IMAD R15, R14, R15, RZ ; /* 0x0000000f0e0f7224 */
/* 0x000fc800078e02ff */
/*04b0*/ IMAD R16, R16, R17, R15 ; /* 0x0000001110107224 */
/* 0x000fca00078e020f */
/*04c0*/ STS [R16.X4+0x1000], R13 ; /* 0x0010000d10007388 */
/* 0x000fe80000004800 */
/*04d0*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */
/* 0x000fec0000010000 */
/*04e0*/ LDS R14, [R5.X4+0x1000] ; /* 0x00100000050e7984 */
/* 0x000e280000004800 */
/*04f0*/ STS [R5.X4], R14 ; /* 0x0000000e05007388 */
/* 0x0011e80000004800 */
/*0500*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */
/* 0x000fec0000010000 */
/*0510*/ @!P0 BRA 0x1e0 ; /* 0xfffffcc000008947 */
/* 0x001fea000383ffff */
/*0520*/ LDS R11, [R5.X4+0x1000] ; /* 0x00100000050b7984 */
/* 0x000e280000004800 */
/*0530*/ STG.E [R2.64], R11 ; /* 0x0000000b02007986 */
/* 0x0011e4000c101904 */
/*0540*/ ISETP.GT.AND P0, PT, R4, R9, PT ; /* 0x000000090400720c */
/* 0x000fe20003f04270 */
/*0550*/ IMAD.MOV.U32 R3, RZ, RZ, R9 ; /* 0x000000ffff037224 */
/* 0x001fd800078e0009 */
/*0560*/ @P0 BRA 0xe0 ; /* 0xfffffb7000000947 */
/* 0x000fea000383ffff */
/*0570*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0580*/ BRA 0x580; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0590*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*05a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*05b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*05c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*05d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*05e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*05f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0600*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0610*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0620*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0630*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0640*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0650*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0660*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0670*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.section .text._Z10radix_sortILi10EEvPiS0_i,"axG",@progbits,_Z10radix_sortILi10EEvPiS0_i,comdat
.protected _Z10radix_sortILi10EEvPiS0_i
.globl _Z10radix_sortILi10EEvPiS0_i
.p2align 8
.type _Z10radix_sortILi10EEvPiS0_i,@function
_Z10radix_sortILi10EEvPiS0_i:
s_load_b64 s[4:5], s[0:1], 0x8
s_mov_b32 s2, s15
s_ashr_i32 s3, s15, 31
s_delay_alu instid0(SALU_CYCLE_1)
s_lshl_b64 s[2:3], s[2:3], 2
s_waitcnt lgkmcnt(0)
s_add_u32 s2, s4, s2
s_addc_u32 s3, s5, s3
s_load_b64 s[2:3], s[2:3], 0x0
s_waitcnt lgkmcnt(0)
s_sub_i32 s4, s3, s2
s_delay_alu instid0(SALU_CYCLE_1)
s_cmp_lt_i32 s4, 1
s_cbranch_scc1 .LBB0_12
s_load_b64 s[0:1], s[0:1], 0x0
v_lshlrev_b32_e32 v3, 2, v0
v_add_nc_u32_e32 v4, s2, v0
s_mov_b32 s6, 0
s_delay_alu instid0(VALU_DEP_2)
v_or_b32_e32 v5, 0x3000, v3
v_or_b32_e32 v6, 0x1000, v3
v_or_b32_e32 v7, 0x2000, v3
s_branch .LBB0_4
.LBB0_2:
ds_load_b32 v8, v7
s_waitcnt lgkmcnt(0)
global_store_b32 v[1:2], v8, off
.LBB0_3:
s_or_b32 exec_lo, exec_lo, s5
s_cmp_ge_i32 s2, s4
s_mov_b32 s6, s2
s_cbranch_scc1 .LBB0_12
.LBB0_4:
v_add_nc_u32_e32 v1, s6, v4
s_add_i32 s2, s6, 0x400
s_mov_b32 s5, exec_lo
s_delay_alu instid0(VALU_DEP_1)
v_cmpx_gt_i32_e64 s3, v1
s_cbranch_execz .LBB0_3
v_ashrrev_i32_e32 v2, 31, v1
s_sub_i32 s6, s4, s6
s_cmp_gt_i32 s2, s4
s_mov_b32 s8, 0
s_cselect_b32 s6, s6, 0x400
v_lshlrev_b64 v[1:2], 2, v[1:2]
s_cmp_gt_i32 s6, 1
s_cselect_b32 s7, -1, 0
s_lshl_b32 s9, s6, 2
s_delay_alu instid0(SALU_CYCLE_1)
s_addk_i32 s9, 0xffc
s_waitcnt lgkmcnt(0)
v_add_co_u32 v1, vcc_lo, s0, v1
v_add_co_ci_u32_e32 v2, vcc_lo, s1, v2, vcc_lo
v_mov_b32_e32 v8, s9
global_load_b32 v9, v[1:2], off
s_waitcnt vmcnt(0)
ds_store_b32 v5, v9
s_waitcnt lgkmcnt(0)
s_waitcnt_vscnt null, 0x0
s_barrier
buffer_gl0_inv
s_branch .LBB0_7
.LBB0_6:
ds_load_b32 v9, v5
ds_load_b32 v10, v3
ds_load_b32 v11, v8
ds_load_b32 v12, v6
s_waitcnt lgkmcnt(3)
v_bfe_u32 v13, v9, s8, 1
s_add_i32 s8, s8, 1
s_waitcnt lgkmcnt(1)
v_add3_u32 v10, v10, v11, -1
s_waitcnt lgkmcnt(0)
v_add_nc_u32_e32 v11, -1, v12
s_cmp_lg_u32 s8, 10
v_xor_b32_e32 v12, 1, v13
v_mul_lo_u32 v10, v10, v13
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mul_lo_u32 v11, v11, v12
v_add_lshl_u32 v10, v11, v10, 2
ds_store_b32 v10, v9 offset:8192
s_waitcnt lgkmcnt(0)
s_barrier
buffer_gl0_inv
ds_load_b32 v9, v7
s_waitcnt lgkmcnt(0)
ds_store_b32 v5, v9
s_waitcnt lgkmcnt(0)
s_barrier
buffer_gl0_inv
s_cbranch_scc0 .LBB0_2
.LBB0_7:
ds_load_b32 v9, v5
s_and_not1_b32 vcc_lo, exec_lo, s7
s_waitcnt lgkmcnt(0)
v_bfe_u32 v9, v9, s8, 1
s_delay_alu instid0(VALU_DEP_1)
v_xor_b32_e32 v10, 1, v9
ds_store_b32 v3, v9
ds_store_b32 v6, v10
s_waitcnt lgkmcnt(0)
s_barrier
buffer_gl0_inv
s_cbranch_vccnz .LBB0_6
s_mov_b32 s9, 1
s_branch .LBB0_10
.p2align 6
.LBB0_9:
s_or_b32 exec_lo, exec_lo, s10
s_lshl_b32 s9, s9, 1
s_delay_alu instid0(SALU_CYCLE_1)
s_cmp_lt_i32 s9, s6
s_cbranch_scc0 .LBB0_6
.LBB0_10:
v_add_nc_u32_e32 v9, s9, v0
s_mov_b32 s10, exec_lo
s_delay_alu instid0(VALU_DEP_1)
v_cmpx_gt_i32_e64 s6, v9
s_cbranch_execz .LBB0_9
v_lshlrev_b32_e32 v11, 2, v9
ds_load_2addr_stride64_b32 v[9:10], v11 offset1:16
ds_load_b32 v12, v3
ds_load_b32 v13, v6
s_waitcnt lgkmcnt(0)
s_barrier
buffer_gl0_inv
v_add_nc_u32_e32 v9, v9, v12
v_add_nc_u32_e32 v10, v10, v13
ds_store_2addr_stride64_b32 v11, v9, v10 offset1:16
s_waitcnt lgkmcnt(0)
s_barrier
buffer_gl0_inv
s_branch .LBB0_9
.LBB0_12:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z10radix_sortILi10EEvPiS0_i
.amdhsa_group_segment_fixed_size 16384
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 20
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 14
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.section .text._Z10radix_sortILi10EEvPiS0_i,"axG",@progbits,_Z10radix_sortILi10EEvPiS0_i,comdat
.Lfunc_end0:
.size _Z10radix_sortILi10EEvPiS0_i, .Lfunc_end0-_Z10radix_sortILi10EEvPiS0_i
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .offset: 16
.size: 4
.value_kind: by_value
.group_segment_fixed_size: 16384
.kernarg_segment_align: 8
.kernarg_segment_size: 20
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z10radix_sortILi10EEvPiS0_i
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z10radix_sortILi10EEvPiS0_i.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 14
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_000611a1_00000000-6_sharev1.cudafe1.cpp"
.text
#APP
.globl _ZSt21ios_base_library_initv
#NO_APP
.section .text._Z10radix_sortILi10EEvPiS0_i,"axG",@progbits,_Z10radix_sortILi10EEvPiS0_i,comdat
.weak _Z10radix_sortILi10EEvPiS0_i
.type _Z10radix_sortILi10EEvPiS0_i, @function
_Z10radix_sortILi10EEvPiS0_i:
.LFB4545:
.cfi_startproc
endbr64
subq $136, %rsp
.cfi_def_cfa_offset 144
movq %fs:40, %rax
movq %rax, 120(%rsp)
xorl %eax, %eax
movq %rdi, 16(%rsp)
movq %rsi, 24(%rsp)
movl %edx, 12(%rsp)
leaq 16(%rsp), %rax
movq %rax, 96(%rsp)
leaq 24(%rsp), %rax
movq %rax, 104(%rsp)
leaq 12(%rsp), %rax
movq %rax, 112(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L5
.L1:
movq 120(%rsp), %rax
subq %fs:40, %rax
jne .L6
addq $136, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L5:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 152
pushq 40(%rsp)
.cfi_def_cfa_offset 160
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z10radix_sortILi10EEvPiS0_i(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 144
jmp .L1
.L6:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE4545:
.size _Z10radix_sortILi10EEvPiS0_i, .-_Z10radix_sortILi10EEvPiS0_i
.text
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB4192:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE4192:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "\n"
.LC1:
.string " "
.text
.globl _Z5printPii
.type _Z5printPii, @function
_Z5printPii:
.LFB4183:
.cfi_startproc
endbr64
pushq %r13
.cfi_def_cfa_offset 16
.cfi_offset 13, -16
pushq %r12
.cfi_def_cfa_offset 24
.cfi_offset 12, -24
pushq %rbp
.cfi_def_cfa_offset 32
.cfi_offset 6, -32
pushq %rbx
.cfi_def_cfa_offset 40
.cfi_offset 3, -40
subq $8, %rsp
.cfi_def_cfa_offset 48
movq %rdi, %r12
movl %esi, %ebp
movl $1, %edx
leaq .LC0(%rip), %rsi
leaq _ZSt4cout(%rip), %rdi
call _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l@PLT
testl %ebp, %ebp
jle .L10
movq %r12, %rbx
movslq %ebp, %rbp
leaq (%r12,%rbp,4), %r13
leaq _ZSt4cout(%rip), %r12
leaq .LC1(%rip), %rbp
.L11:
movl (%rbx), %esi
movq %r12, %rdi
call _ZNSolsEi@PLT
movq %rax, %rdi
movl $1, %edx
movq %rbp, %rsi
call _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l@PLT
addq $4, %rbx
cmpq %r13, %rbx
jne .L11
.L10:
movl $1, %edx
leaq .LC0(%rip), %rsi
leaq _ZSt4cout(%rip), %rdi
call _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l@PLT
addq $8, %rsp
.cfi_def_cfa_offset 40
popq %rbx
.cfi_def_cfa_offset 32
popq %rbp
.cfi_def_cfa_offset 24
popq %r12
.cfi_def_cfa_offset 16
popq %r13
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE4183:
.size _Z5printPii, .-_Z5printPii
.section .rodata.str1.8,"aMS",@progbits,1
.align 8
.LC2:
.string "cuda returned error %s (code %d), line(%d)\n"
.text
.globl _Z8cudaTest9cudaError
.type _Z8cudaTest9cudaError, @function
_Z8cudaTest9cudaError:
.LFB4184:
.cfi_startproc
endbr64
testl %edi, %edi
jne .L19
ret
.L19:
pushq %rbx
.cfi_def_cfa_offset 16
.cfi_offset 3, -16
movl %edi, %ebx
call cudaGetErrorString@PLT
movq %rax, %rdx
movl $51, %r8d
movl %ebx, %ecx
leaq .LC2(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $1, %edi
call exit@PLT
.cfi_endproc
.LFE4184:
.size _Z8cudaTest9cudaError, .-_Z8cudaTest9cudaError
.section .rodata.str1.1
.LC3:
.string "%d"
.text
.globl main
.type main, @function
main:
.LFB4187:
.cfi_startproc
endbr64
pushq %r15
.cfi_def_cfa_offset 16
.cfi_offset 15, -16
pushq %r14
.cfi_def_cfa_offset 24
.cfi_offset 14, -24
pushq %r13
.cfi_def_cfa_offset 32
.cfi_offset 13, -32
pushq %r12
.cfi_def_cfa_offset 40
.cfi_offset 12, -40
pushq %rbp
.cfi_def_cfa_offset 48
.cfi_offset 6, -48
pushq %rbx
.cfi_def_cfa_offset 56
.cfi_offset 3, -56
subq $88, %rsp
.cfi_def_cfa_offset 144
movq %fs:40, %rax
movq %rax, 72(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rsi
leaq .LC3(%rip), %rdi
call __isoc23_scanf@PLT
movl 24(%rsp), %ebx
leal 4(,%rbx,4), %r15d
movslq %r15d, %r15
movq %r15, %rdi
call malloc@PLT
movq %rax, 8(%rsp)
testl %ebx, %ebx
js .L21
movq %rax, %rbp
movl $0, %ebx
leaq .LC3(%rip), %r12
.L22:
movq %rbp, %rsi
movq %r12, %rdi
movl $0, %eax
call __isoc23_scanf@PLT
addl $1, %ebx
addq $4, %rbp
cmpl %ebx, 24(%rsp)
jge .L22
.L21:
leaq 28(%rsp), %rsi
leaq .LC3(%rip), %rdi
movl $0, %eax
call __isoc23_scanf@PLT
movl 28(%rsp), %ebx
leal 0(,%rbx,4), %r14d
movslq %r14d, %r14
movq %r14, %rdi
call malloc@PLT
movq %rax, %r13
testl %ebx, %ebx
jle .L23
movq %rax, %rbp
movl $0, %ebx
leaq .LC3(%rip), %r12
.L24:
movq %rbp, %rsi
movq %r12, %rdi
movl $0, %eax
call __isoc23_scanf@PLT
addl $1, %ebx
addq $4, %rbp
cmpl %ebx, 28(%rsp)
jg .L24
.L23:
leaq 32(%rsp), %rdi
movq %r15, %rsi
call cudaMalloc@PLT
movl %eax, %edi
call _Z8cudaTest9cudaError
leaq 40(%rsp), %rdi
movq %r14, %rsi
call cudaMalloc@PLT
movl %eax, %edi
call _Z8cudaTest9cudaError
movl $1, %ecx
movq %r15, %rdx
movq 8(%rsp), %rsi
movq 32(%rsp), %rdi
call cudaMemcpy@PLT
movl %eax, %edi
call _Z8cudaTest9cudaError
movl $1, %ecx
movq %r14, %rdx
movq %r13, %rsi
movq 40(%rsp), %rdi
call cudaMemcpy@PLT
movl %eax, %edi
call _Z8cudaTest9cudaError
movl $1024, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl 24(%rsp), %eax
movl %eax, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
call _ZNSt6chrono3_V212system_clock3nowEv@PLT
movl 56(%rsp), %ecx
movl $0, %r9d
movl $0, %r8d
movq 48(%rsp), %rdx
movq 60(%rsp), %rdi
movl 68(%rsp), %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L30
.L25:
call cudaDeviceSynchronize@PLT
call _ZNSt6chrono3_V212system_clock3nowEv@PLT
call cudaPeekAtLastError@PLT
movl %eax, %edi
call _Z8cudaTest9cudaError
movl $2, %ecx
movq %r15, %rdx
movq 32(%rsp), %rsi
movq 8(%rsp), %rbx
movq %rbx, %rdi
call cudaMemcpy@PLT
movl %eax, %edi
call _Z8cudaTest9cudaError
movl $2, %ecx
movq %r14, %rdx
movq 40(%rsp), %rsi
movq %r13, %rdi
call cudaMemcpy@PLT
movl %eax, %edi
call _Z8cudaTest9cudaError
movl 28(%rsp), %esi
movq %r13, %rdi
call _Z5printPii
call cudaDeviceReset@PLT
movq %rbx, %rdi
call free@PLT
movq %r13, %rdi
call free@PLT
movq 32(%rsp), %rdi
call cudaFree@PLT
movq 40(%rsp), %rdi
call cudaFree@PLT
movq 72(%rsp), %rax
subq %fs:40, %rax
jne .L31
movl $0, %eax
addq $88, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %rbp
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r13
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
ret
.L30:
.cfi_restore_state
movl 24(%rsp), %edx
movq 32(%rsp), %rsi
movq 40(%rsp), %rdi
call _Z10radix_sortILi10EEvPiS0_i
jmp .L25
.L31:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE4187:
.size main, .-main
.section .rodata.str1.1
.LC4:
.string "_Z10radix_sortILi10EEvPiS0_i"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB4217:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC4(%rip), %rdx
movq %rdx, %rcx
leaq _Z10radix_sortILi10EEvPiS0_i(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE4217:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "sharev1.hip"
# Start of file scope inline assembly
.globl _ZSt21ios_base_library_initv
# End of file scope inline assembly
.globl _Z5printPii # -- Begin function _Z5printPii
.p2align 4, 0x90
.type _Z5printPii,@function
_Z5printPii: # @_Z5printPii
.cfi_startproc
# %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
pushq %r15
.cfi_def_cfa_offset 24
pushq %r14
.cfi_def_cfa_offset 32
pushq %rbx
.cfi_def_cfa_offset 40
pushq %rax
.cfi_def_cfa_offset 48
.cfi_offset %rbx, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
.cfi_offset %rbp, -16
movl %esi, %ebp
movq %rdi, %rbx
movl $_ZSt4cout, %edi
movl $.L.str, %esi
movl $1, %edx
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
testl %ebp, %ebp
jle .LBB0_3
# %bb.1: # %.lr.ph.preheader
movl %ebp, %r14d
xorl %r15d, %r15d
.p2align 4, 0x90
.LBB0_2: # %.lr.ph
# =>This Inner Loop Header: Depth=1
movl (%rbx,%r15,4), %esi
movl $_ZSt4cout, %edi
callq _ZNSolsEi
movl $.L.str.1, %esi
movl $1, %edx
movq %rax, %rdi
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
incq %r15
cmpq %r15, %r14
jne .LBB0_2
.LBB0_3: # %._crit_edge
movl $_ZSt4cout, %edi
movl $.L.str, %esi
movl $1, %edx
addq $8, %rsp
.cfi_def_cfa_offset 40
popq %rbx
.cfi_def_cfa_offset 32
popq %r14
.cfi_def_cfa_offset 24
popq %r15
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
jmp _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l # TAILCALL
.Lfunc_end0:
.size _Z5printPii, .Lfunc_end0-_Z5printPii
.cfi_endproc
# -- End function
.globl _Z8cudaTest10hipError_t # -- Begin function _Z8cudaTest10hipError_t
.p2align 4, 0x90
.type _Z8cudaTest10hipError_t,@function
_Z8cudaTest10hipError_t: # @_Z8cudaTest10hipError_t
.cfi_startproc
# %bb.0:
testl %edi, %edi
jne .LBB1_2
# %bb.1:
retq
.LBB1_2:
pushq %rbx
.cfi_def_cfa_offset 16
.cfi_offset %rbx, -16
movl %edi, %ebx
callq hipGetErrorString
movl $.L.str.2, %edi
movq %rax, %rsi
movl %ebx, %edx
movl $51, %ecx
xorl %eax, %eax
callq printf
movl $1, %edi
callq exit
.Lfunc_end1:
.size _Z8cudaTest10hipError_t, .Lfunc_end1-_Z8cudaTest10hipError_t
.cfi_endproc
# -- End function
.globl main # -- Begin function main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
pushq %r15
.cfi_def_cfa_offset 24
pushq %r14
.cfi_def_cfa_offset 32
pushq %r13
.cfi_def_cfa_offset 40
pushq %r12
.cfi_def_cfa_offset 48
pushq %rbx
.cfi_def_cfa_offset 56
subq $120, %rsp
.cfi_def_cfa_offset 176
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
.cfi_offset %rbp, -16
movq %rsp, %rsi
movl $.L.str.3, %edi
xorl %eax, %eax
callq __isoc23_scanf
movl (%rsp), %r14d
leal 4(,%r14,4), %eax
movslq %eax, %r15
movq %r15, %rdi
callq malloc
movq %rax, %rbx
testl %r14d, %r14d
js .LBB2_3
# %bb.1: # %.lr.ph.preheader
movq $-1, %r12
movq %rbx, %r14
.p2align 4, 0x90
.LBB2_2: # %.lr.ph
# =>This Inner Loop Header: Depth=1
movl $.L.str.3, %edi
movq %r14, %rsi
xorl %eax, %eax
callq __isoc23_scanf
movslq (%rsp), %rax
incq %r12
addq $4, %r14
cmpq %rax, %r12
jl .LBB2_2
.LBB2_3: # %._crit_edge
leaq 4(%rsp), %rsi
movl $.L.str.3, %edi
xorl %eax, %eax
callq __isoc23_scanf
movl 4(%rsp), %r13d
leal (,%r13,4), %eax
movslq %eax, %r12
movq %r12, %rdi
callq malloc
movq %rax, %r14
testl %r13d, %r13d
jle .LBB2_6
# %bb.4: # %.lr.ph52.preheader
movq %r14, %r13
xorl %ebp, %ebp
.p2align 4, 0x90
.LBB2_5: # %.lr.ph52
# =>This Inner Loop Header: Depth=1
movl $.L.str.3, %edi
movq %r13, %rsi
xorl %eax, %eax
callq __isoc23_scanf
incq %rbp
movslq 4(%rsp), %rax
addq $4, %r13
cmpq %rax, %rbp
jl .LBB2_5
.LBB2_6: # %._crit_edge53
leaq 16(%rsp), %rdi
movq %r15, %rsi
callq hipMalloc
testl %eax, %eax
jne .LBB2_19
# %bb.7: # %_Z8cudaTest10hipError_t.exit
leaq 8(%rsp), %rdi
movq %r12, %rsi
callq hipMalloc
testl %eax, %eax
jne .LBB2_19
# %bb.8: # %_Z8cudaTest10hipError_t.exit32
movq 16(%rsp), %rdi
movq %rbx, %rsi
movq %r15, %rdx
movl $1, %ecx
callq hipMemcpy
testl %eax, %eax
jne .LBB2_19
# %bb.9: # %_Z8cudaTest10hipError_t.exit34
movq 8(%rsp), %rdi
movq %r14, %rsi
movq %r12, %rdx
movl $1, %ecx
callq hipMemcpy
testl %eax, %eax
jne .LBB2_19
# %bb.10: # %_Z8cudaTest10hipError_t.exit36
movl (%rsp), %r13d
movabsq $4294967296, %rbp # imm = 0x100000000
orq %rbp, %r13
callq _ZNSt6chrono3_V212system_clock3nowEv
orq $1024, %rbp # imm = 0x400
movq %r13, %rdi
movl $1, %esi
movq %rbp, %rdx
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB2_12
# %bb.11:
movq 8(%rsp), %rax
movq 16(%rsp), %rcx
movl (%rsp), %edx
movq %rax, 88(%rsp)
movq %rcx, 80(%rsp)
movl %edx, 28(%rsp)
leaq 88(%rsp), %rax
movq %rax, 96(%rsp)
leaq 80(%rsp), %rax
movq %rax, 104(%rsp)
leaq 28(%rsp), %rax
movq %rax, 112(%rsp)
leaq 64(%rsp), %rdi
leaq 48(%rsp), %rsi
leaq 40(%rsp), %rdx
leaq 32(%rsp), %rcx
callq __hipPopCallConfiguration
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
movq 48(%rsp), %rcx
movl 56(%rsp), %r8d
leaq 96(%rsp), %r9
movl $_Z10radix_sortILi10EEvPiS0_i, %edi
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
pushq 48(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB2_12:
callq hipDeviceSynchronize
callq _ZNSt6chrono3_V212system_clock3nowEv
callq hipPeekAtLastError
testl %eax, %eax
jne .LBB2_19
# %bb.13: # %_Z8cudaTest10hipError_t.exit38
movq 16(%rsp), %rsi
movq %rbx, %rdi
movq %r15, %rdx
movl $2, %ecx
callq hipMemcpy
testl %eax, %eax
jne .LBB2_19
# %bb.14: # %_Z8cudaTest10hipError_t.exit40
movq 8(%rsp), %rsi
movq %r14, %rdi
movq %r12, %rdx
movl $2, %ecx
callq hipMemcpy
testl %eax, %eax
jne .LBB2_19
# %bb.15: # %_Z8cudaTest10hipError_t.exit42
movl 4(%rsp), %r15d
movl $_ZSt4cout, %edi
movl $.L.str, %esi
movl $1, %edx
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
testl %r15d, %r15d
jle .LBB2_18
# %bb.16: # %.lr.ph.preheader.i
xorl %r12d, %r12d
.p2align 4, 0x90
.LBB2_17: # %.lr.ph.i
# =>This Inner Loop Header: Depth=1
movl (%r14,%r12,4), %esi
movl $_ZSt4cout, %edi
callq _ZNSolsEi
movl $.L.str.1, %esi
movl $1, %edx
movq %rax, %rdi
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
incq %r12
cmpq %r12, %r15
jne .LBB2_17
.LBB2_18: # %_Z5printPii.exit
movl $_ZSt4cout, %edi
movl $.L.str, %esi
movl $1, %edx
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
callq hipDeviceReset
movq %rbx, %rdi
callq free
movq %r14, %rdi
callq free
movq 16(%rsp), %rdi
callq hipFree
movq 8(%rsp), %rdi
callq hipFree
xorl %eax, %eax
addq $120, %rsp
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %r12
.cfi_def_cfa_offset 40
popq %r13
.cfi_def_cfa_offset 32
popq %r14
.cfi_def_cfa_offset 24
popq %r15
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
retq
.LBB2_19:
.cfi_def_cfa_offset 176
movl %eax, %edi
movl %eax, %ebx
callq hipGetErrorString
movl $.L.str.2, %edi
movq %rax, %rsi
movl %ebx, %edx
movl $51, %ecx
xorl %eax, %eax
callq printf
movl $1, %edi
callq exit
.Lfunc_end2:
.size main, .Lfunc_end2-main
.cfi_endproc
# -- End function
.section .text._Z25__device_stub__radix_sortILi10EEvPiS0_i,"axG",@progbits,_Z25__device_stub__radix_sortILi10EEvPiS0_i,comdat
.weak _Z25__device_stub__radix_sortILi10EEvPiS0_i # -- Begin function _Z25__device_stub__radix_sortILi10EEvPiS0_i
.p2align 4, 0x90
.type _Z25__device_stub__radix_sortILi10EEvPiS0_i,@function
_Z25__device_stub__radix_sortILi10EEvPiS0_i: # @_Z25__device_stub__radix_sortILi10EEvPiS0_i
.cfi_startproc
# %bb.0:
subq $104, %rsp
.cfi_def_cfa_offset 112
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movl %edx, 12(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 12(%rsp), %rax
movq %rax, 96(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z10radix_sortILi10EEvPiS0_i, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $120, %rsp
.cfi_adjust_cfa_offset -120
retq
.Lfunc_end3:
.size _Z25__device_stub__radix_sortILi10EEvPiS0_i, .Lfunc_end3-_Z25__device_stub__radix_sortILi10EEvPiS0_i
.cfi_endproc
# -- End function
.text
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB4_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB4_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z10radix_sortILi10EEvPiS0_i, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end4:
.size __hip_module_ctor, .Lfunc_end4-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB5_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB5_2:
retq
.Lfunc_end5:
.size __hip_module_dtor, .Lfunc_end5-__hip_module_dtor
.cfi_endproc
# -- End function
.type .L.str,@object # @.str
.section .rodata.str1.1,"aMS",@progbits,1
.L.str:
.asciz "\n"
.size .L.str, 2
.type .L.str.1,@object # @.str.1
.L.str.1:
.asciz " "
.size .L.str.1, 2
.type .L.str.2,@object # @.str.2
.L.str.2:
.asciz "cuda returned error %s (code %d), line(%d)\n"
.size .L.str.2, 44
.type .L.str.3,@object # @.str.3
.L.str.3:
.asciz "%d"
.size .L.str.3, 3
.type _Z10radix_sortILi10EEvPiS0_i,@object # @_Z10radix_sortILi10EEvPiS0_i
.section .rodata._Z10radix_sortILi10EEvPiS0_i,"aG",@progbits,_Z10radix_sortILi10EEvPiS0_i,comdat
.weak _Z10radix_sortILi10EEvPiS0_i
.p2align 3, 0x0
_Z10radix_sortILi10EEvPiS0_i:
.quad _Z25__device_stub__radix_sortILi10EEvPiS0_i
.size _Z10radix_sortILi10EEvPiS0_i, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "_Z10radix_sortILi10EEvPiS0_i"
.size .L__unnamed_1, 29
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z25__device_stub__radix_sortILi10EEvPiS0_i
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _ZSt4cout
.addrsig_sym _Z10radix_sortILi10EEvPiS0_i
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly. | #include "iostream"
#define N 257
__global__ void sum_of_array(float *arr1, float *arr2, float *arr3) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
arr3[i] = arr1[i] + arr2[i];
}
void initialize_array(float *arr, int size) {
for (int i = 0; i < size; i++) {
arr[i] = (float) random();
}
}
int main() {
float *arr1, *arr2, *arr3, *d_arr1, *d_arr2, *d_arr3;
size_t n_byte = N * sizeof(float);
arr1 = (float *) malloc(n_byte);
arr2 = (float *) malloc(n_byte);
arr3 = (float *) malloc(n_byte);
initialize_array(arr1, N);
initialize_array(arr2, N);
initialize_array(arr3, N);
printf("start cudaMalloc\n");
cudaMalloc((void **) &d_arr1, N);
cudaMalloc((void **) &d_arr2, N);
cudaMalloc((void **) &d_arr3, N);
printf("finish cudaMalloc\n");
printf("start cudaMemcpy\n");
cudaMemcpy(d_arr1, arr1, n_byte, cudaMemcpyHostToDevice);
cudaMemcpy(d_arr2, arr2, n_byte, cudaMemcpyHostToDevice);
cudaMemcpy(d_arr3, arr3, n_byte, cudaMemcpyHostToDevice);
printf("finish cudaMemcpy\n");
printf("start kernel function\n");
sum_of_array<<<(N + 255) / 256, 256>>>(d_arr1, d_arr2, d_arr3);
printf("finish kernel function\n");
cudaMemcpy(arr3, d_arr3, n_byte, cudaMemcpyDeviceToHost);
printf("%f", *arr3);
} | code for sm_80
Function : _Z12sum_of_arrayPfS_S_
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ S2R R6, SR_CTAID.X ; /* 0x0000000000067919 */
/* 0x000e220000002500 */
/*0020*/ HFMA2.MMA R7, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff077435 */
/* 0x000fe200000001ff */
/*0030*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fe40000000a00 */
/*0040*/ S2R R3, SR_TID.X ; /* 0x0000000000037919 */
/* 0x000e240000002100 */
/*0050*/ IMAD R6, R6, c[0x0][0x0], R3 ; /* 0x0000000006067a24 */
/* 0x001fca00078e0203 */
/*0060*/ IMAD.WIDE R2, R6, R7, c[0x0][0x160] ; /* 0x0000580006027625 */
/* 0x000fc800078e0207 */
/*0070*/ IMAD.WIDE R4, R6.reuse, R7.reuse, c[0x0][0x168] ; /* 0x00005a0006047625 */
/* 0x0c0fe400078e0207 */
/*0080*/ LDG.E R2, [R2.64] ; /* 0x0000000402027981 */
/* 0x000ea8000c1e1900 */
/*0090*/ LDG.E R5, [R4.64] ; /* 0x0000000404057981 */
/* 0x000ea2000c1e1900 */
/*00a0*/ IMAD.WIDE R6, R6, R7, c[0x0][0x170] ; /* 0x00005c0006067625 */
/* 0x000fc800078e0207 */
/*00b0*/ FADD R9, R2, R5 ; /* 0x0000000502097221 */
/* 0x004fca0000000000 */
/*00c0*/ STG.E [R6.64], R9 ; /* 0x0000000906007986 */
/* 0x000fe2000c101904 */
/*00d0*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*00e0*/ BRA 0xe0; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*00f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0100*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0110*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0120*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0130*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0140*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0150*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0160*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0170*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | #include "iostream"
#define N 257
__global__ void sum_of_array(float *arr1, float *arr2, float *arr3) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
arr3[i] = arr1[i] + arr2[i];
}
void initialize_array(float *arr, int size) {
for (int i = 0; i < size; i++) {
arr[i] = (float) random();
}
}
int main() {
float *arr1, *arr2, *arr3, *d_arr1, *d_arr2, *d_arr3;
size_t n_byte = N * sizeof(float);
arr1 = (float *) malloc(n_byte);
arr2 = (float *) malloc(n_byte);
arr3 = (float *) malloc(n_byte);
initialize_array(arr1, N);
initialize_array(arr2, N);
initialize_array(arr3, N);
printf("start cudaMalloc\n");
cudaMalloc((void **) &d_arr1, N);
cudaMalloc((void **) &d_arr2, N);
cudaMalloc((void **) &d_arr3, N);
printf("finish cudaMalloc\n");
printf("start cudaMemcpy\n");
cudaMemcpy(d_arr1, arr1, n_byte, cudaMemcpyHostToDevice);
cudaMemcpy(d_arr2, arr2, n_byte, cudaMemcpyHostToDevice);
cudaMemcpy(d_arr3, arr3, n_byte, cudaMemcpyHostToDevice);
printf("finish cudaMemcpy\n");
printf("start kernel function\n");
sum_of_array<<<(N + 255) / 256, 256>>>(d_arr1, d_arr2, d_arr3);
printf("finish kernel function\n");
cudaMemcpy(arr3, d_arr3, n_byte, cudaMemcpyDeviceToHost);
printf("%f", *arr3);
} | .file "tmpxft_0006a227_00000000-6_main.cudafe1.cpp"
.text
#APP
.globl _ZSt21ios_base_library_initv
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB3673:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3673:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z16initialize_arrayPfi
.type _Z16initialize_arrayPfi, @function
_Z16initialize_arrayPfi:
.LFB3669:
.cfi_startproc
endbr64
testl %esi, %esi
jle .L8
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset 6, -16
pushq %rbx
.cfi_def_cfa_offset 24
.cfi_offset 3, -24
subq $8, %rsp
.cfi_def_cfa_offset 32
movq %rdi, %rbx
movslq %esi, %rsi
leaq (%rdi,%rsi,4), %rbp
.L5:
call random@PLT
pxor %xmm0, %xmm0
cvtsi2ssq %rax, %xmm0
movss %xmm0, (%rbx)
addq $4, %rbx
cmpq %rbp, %rbx
jne .L5
addq $8, %rsp
.cfi_def_cfa_offset 24
popq %rbx
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
ret
.L8:
.cfi_restore 3
.cfi_restore 6
ret
.cfi_endproc
.LFE3669:
.size _Z16initialize_arrayPfi, .-_Z16initialize_arrayPfi
.globl _Z36__device_stub__Z12sum_of_arrayPfS_S_PfS_S_
.type _Z36__device_stub__Z12sum_of_arrayPfS_S_PfS_S_, @function
_Z36__device_stub__Z12sum_of_arrayPfS_S_PfS_S_:
.LFB3695:
.cfi_startproc
endbr64
subq $136, %rsp
.cfi_def_cfa_offset 144
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movq %rdx, 8(%rsp)
movq %fs:40, %rax
movq %rax, 120(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 8(%rsp), %rax
movq %rax, 112(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L15
.L11:
movq 120(%rsp), %rax
subq %fs:40, %rax
jne .L16
addq $136, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L15:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 152
pushq 40(%rsp)
.cfi_def_cfa_offset 160
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z12sum_of_arrayPfS_S_(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 144
jmp .L11
.L16:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE3695:
.size _Z36__device_stub__Z12sum_of_arrayPfS_S_PfS_S_, .-_Z36__device_stub__Z12sum_of_arrayPfS_S_PfS_S_
.globl _Z12sum_of_arrayPfS_S_
.type _Z12sum_of_arrayPfS_S_, @function
_Z12sum_of_arrayPfS_S_:
.LFB3696:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z36__device_stub__Z12sum_of_arrayPfS_S_PfS_S_
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3696:
.size _Z12sum_of_arrayPfS_S_, .-_Z12sum_of_arrayPfS_S_
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "start cudaMalloc\n"
.LC1:
.string "finish cudaMalloc\n"
.LC2:
.string "start cudaMemcpy\n"
.LC3:
.string "finish cudaMemcpy\n"
.LC4:
.string "start kernel function\n"
.LC5:
.string "finish kernel function\n"
.LC6:
.string "%f"
.text
.globl main
.type main, @function
main:
.LFB3670:
.cfi_startproc
endbr64
pushq %r12
.cfi_def_cfa_offset 16
.cfi_offset 12, -16
pushq %rbp
.cfi_def_cfa_offset 24
.cfi_offset 6, -24
pushq %rbx
.cfi_def_cfa_offset 32
.cfi_offset 3, -32
subq $64, %rsp
.cfi_def_cfa_offset 96
movq %fs:40, %rax
movq %rax, 56(%rsp)
xorl %eax, %eax
movl $1028, %edi
call malloc@PLT
movq %rax, %r12
movl $1028, %edi
call malloc@PLT
movq %rax, %rbp
movl $1028, %edi
call malloc@PLT
movq %rax, %rbx
movl $257, %esi
movq %r12, %rdi
call _Z16initialize_arrayPfi
movl $257, %esi
movq %rbp, %rdi
call _Z16initialize_arrayPfi
movl $257, %esi
movq %rbx, %rdi
call _Z16initialize_arrayPfi
leaq .LC0(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
leaq 8(%rsp), %rdi
movl $257, %esi
call cudaMalloc@PLT
leaq 16(%rsp), %rdi
movl $257, %esi
call cudaMalloc@PLT
leaq 24(%rsp), %rdi
movl $257, %esi
call cudaMalloc@PLT
leaq .LC1(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
leaq .LC2(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $1, %ecx
movl $1028, %edx
movq %r12, %rsi
movq 8(%rsp), %rdi
call cudaMemcpy@PLT
movl $1, %ecx
movl $1028, %edx
movq %rbp, %rsi
movq 16(%rsp), %rdi
call cudaMemcpy@PLT
movl $1, %ecx
movl $1028, %edx
movq %rbx, %rsi
movq 24(%rsp), %rdi
call cudaMemcpy@PLT
leaq .LC3(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
leaq .LC4(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $256, 44(%rsp)
movl $1, 48(%rsp)
movl $2, 32(%rsp)
movl $1, 36(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 44(%rsp), %rdx
movl $1, %ecx
movq 32(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L23
.L20:
leaq .LC5(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $2, %ecx
movl $1028, %edx
movq 24(%rsp), %rsi
movq %rbx, %rdi
call cudaMemcpy@PLT
pxor %xmm0, %xmm0
cvtss2sd (%rbx), %xmm0
leaq .LC6(%rip), %rsi
movl $2, %edi
movl $1, %eax
call __printf_chk@PLT
movq 56(%rsp), %rax
subq %fs:40, %rax
jne .L24
movl $0, %eax
addq $64, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 32
popq %rbx
.cfi_def_cfa_offset 24
popq %rbp
.cfi_def_cfa_offset 16
popq %r12
.cfi_def_cfa_offset 8
ret
.L23:
.cfi_restore_state
movq 24(%rsp), %rdx
movq 16(%rsp), %rsi
movq 8(%rsp), %rdi
call _Z36__device_stub__Z12sum_of_arrayPfS_S_PfS_S_
jmp .L20
.L24:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE3670:
.size main, .-main
.section .rodata.str1.1
.LC7:
.string "_Z12sum_of_arrayPfS_S_"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB3698:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC7(%rip), %rdx
movq %rdx, %rcx
leaq _Z12sum_of_arrayPfS_S_(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3698:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | #include "iostream"
#define N 257
__global__ void sum_of_array(float *arr1, float *arr2, float *arr3) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
arr3[i] = arr1[i] + arr2[i];
}
void initialize_array(float *arr, int size) {
for (int i = 0; i < size; i++) {
arr[i] = (float) random();
}
}
int main() {
float *arr1, *arr2, *arr3, *d_arr1, *d_arr2, *d_arr3;
size_t n_byte = N * sizeof(float);
arr1 = (float *) malloc(n_byte);
arr2 = (float *) malloc(n_byte);
arr3 = (float *) malloc(n_byte);
initialize_array(arr1, N);
initialize_array(arr2, N);
initialize_array(arr3, N);
printf("start cudaMalloc\n");
cudaMalloc((void **) &d_arr1, N);
cudaMalloc((void **) &d_arr2, N);
cudaMalloc((void **) &d_arr3, N);
printf("finish cudaMalloc\n");
printf("start cudaMemcpy\n");
cudaMemcpy(d_arr1, arr1, n_byte, cudaMemcpyHostToDevice);
cudaMemcpy(d_arr2, arr2, n_byte, cudaMemcpyHostToDevice);
cudaMemcpy(d_arr3, arr3, n_byte, cudaMemcpyHostToDevice);
printf("finish cudaMemcpy\n");
printf("start kernel function\n");
sum_of_array<<<(N + 255) / 256, 256>>>(d_arr1, d_arr2, d_arr3);
printf("finish kernel function\n");
cudaMemcpy(arr3, d_arr3, n_byte, cudaMemcpyDeviceToHost);
printf("%f", *arr3);
} | #include <hip/hip_runtime.h>
#include "iostream"
#define N 257
__global__ void sum_of_array(float *arr1, float *arr2, float *arr3) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
arr3[i] = arr1[i] + arr2[i];
}
void initialize_array(float *arr, int size) {
for (int i = 0; i < size; i++) {
arr[i] = (float) random();
}
}
int main() {
float *arr1, *arr2, *arr3, *d_arr1, *d_arr2, *d_arr3;
size_t n_byte = N * sizeof(float);
arr1 = (float *) malloc(n_byte);
arr2 = (float *) malloc(n_byte);
arr3 = (float *) malloc(n_byte);
initialize_array(arr1, N);
initialize_array(arr2, N);
initialize_array(arr3, N);
printf("start cudaMalloc\n");
hipMalloc((void **) &d_arr1, N);
hipMalloc((void **) &d_arr2, N);
hipMalloc((void **) &d_arr3, N);
printf("finish cudaMalloc\n");
printf("start cudaMemcpy\n");
hipMemcpy(d_arr1, arr1, n_byte, hipMemcpyHostToDevice);
hipMemcpy(d_arr2, arr2, n_byte, hipMemcpyHostToDevice);
hipMemcpy(d_arr3, arr3, n_byte, hipMemcpyHostToDevice);
printf("finish cudaMemcpy\n");
printf("start kernel function\n");
sum_of_array<<<(N + 255) / 256, 256>>>(d_arr1, d_arr2, d_arr3);
printf("finish kernel function\n");
hipMemcpy(arr3, d_arr3, n_byte, hipMemcpyDeviceToHost);
printf("%f", *arr3);
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | #include <hip/hip_runtime.h>
#include "iostream"
#define N 257
__global__ void sum_of_array(float *arr1, float *arr2, float *arr3) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
arr3[i] = arr1[i] + arr2[i];
}
void initialize_array(float *arr, int size) {
for (int i = 0; i < size; i++) {
arr[i] = (float) random();
}
}
int main() {
float *arr1, *arr2, *arr3, *d_arr1, *d_arr2, *d_arr3;
size_t n_byte = N * sizeof(float);
arr1 = (float *) malloc(n_byte);
arr2 = (float *) malloc(n_byte);
arr3 = (float *) malloc(n_byte);
initialize_array(arr1, N);
initialize_array(arr2, N);
initialize_array(arr3, N);
printf("start cudaMalloc\n");
hipMalloc((void **) &d_arr1, N);
hipMalloc((void **) &d_arr2, N);
hipMalloc((void **) &d_arr3, N);
printf("finish cudaMalloc\n");
printf("start cudaMemcpy\n");
hipMemcpy(d_arr1, arr1, n_byte, hipMemcpyHostToDevice);
hipMemcpy(d_arr2, arr2, n_byte, hipMemcpyHostToDevice);
hipMemcpy(d_arr3, arr3, n_byte, hipMemcpyHostToDevice);
printf("finish cudaMemcpy\n");
printf("start kernel function\n");
sum_of_array<<<(N + 255) / 256, 256>>>(d_arr1, d_arr2, d_arr3);
printf("finish kernel function\n");
hipMemcpy(arr3, d_arr3, n_byte, hipMemcpyDeviceToHost);
printf("%f", *arr3);
} | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z12sum_of_arrayPfS_S_
.globl _Z12sum_of_arrayPfS_S_
.p2align 8
.type _Z12sum_of_arrayPfS_S_,@function
_Z12sum_of_arrayPfS_S_:
s_clause 0x2
s_load_b32 s2, s[0:1], 0x24
s_load_b128 s[4:7], s[0:1], 0x0
s_load_b64 s[0:1], s[0:1], 0x10
s_waitcnt lgkmcnt(0)
s_and_b32 s2, s2, 0xffff
s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mad_u64_u32 v[1:2], null, s15, s2, v[0:1]
v_ashrrev_i32_e32 v2, 31, v1
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[0:1], 2, v[1:2]
v_add_co_u32 v2, vcc_lo, s4, v0
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v3, vcc_lo, s5, v1, vcc_lo
v_add_co_u32 v4, vcc_lo, s6, v0
v_add_co_ci_u32_e32 v5, vcc_lo, s7, v1, vcc_lo
v_add_co_u32 v0, vcc_lo, s0, v0
global_load_b32 v2, v[2:3], off
global_load_b32 v3, v[4:5], off
v_add_co_ci_u32_e32 v1, vcc_lo, s1, v1, vcc_lo
s_waitcnt vmcnt(0)
v_add_f32_e32 v2, v2, v3
global_store_b32 v[0:1], v2, off
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z12sum_of_arrayPfS_S_
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 280
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 6
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z12sum_of_arrayPfS_S_, .Lfunc_end0-_Z12sum_of_arrayPfS_S_
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
- .offset: 24
.size: 4
.value_kind: hidden_block_count_x
- .offset: 28
.size: 4
.value_kind: hidden_block_count_y
- .offset: 32
.size: 4
.value_kind: hidden_block_count_z
- .offset: 36
.size: 2
.value_kind: hidden_group_size_x
- .offset: 38
.size: 2
.value_kind: hidden_group_size_y
- .offset: 40
.size: 2
.value_kind: hidden_group_size_z
- .offset: 42
.size: 2
.value_kind: hidden_remainder_x
- .offset: 44
.size: 2
.value_kind: hidden_remainder_y
- .offset: 46
.size: 2
.value_kind: hidden_remainder_z
- .offset: 64
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 88
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 280
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z12sum_of_arrayPfS_S_
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z12sum_of_arrayPfS_S_.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 6
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | #include <hip/hip_runtime.h>
#include "iostream"
#define N 257
__global__ void sum_of_array(float *arr1, float *arr2, float *arr3) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
arr3[i] = arr1[i] + arr2[i];
}
void initialize_array(float *arr, int size) {
for (int i = 0; i < size; i++) {
arr[i] = (float) random();
}
}
int main() {
float *arr1, *arr2, *arr3, *d_arr1, *d_arr2, *d_arr3;
size_t n_byte = N * sizeof(float);
arr1 = (float *) malloc(n_byte);
arr2 = (float *) malloc(n_byte);
arr3 = (float *) malloc(n_byte);
initialize_array(arr1, N);
initialize_array(arr2, N);
initialize_array(arr3, N);
printf("start cudaMalloc\n");
hipMalloc((void **) &d_arr1, N);
hipMalloc((void **) &d_arr2, N);
hipMalloc((void **) &d_arr3, N);
printf("finish cudaMalloc\n");
printf("start cudaMemcpy\n");
hipMemcpy(d_arr1, arr1, n_byte, hipMemcpyHostToDevice);
hipMemcpy(d_arr2, arr2, n_byte, hipMemcpyHostToDevice);
hipMemcpy(d_arr3, arr3, n_byte, hipMemcpyHostToDevice);
printf("finish cudaMemcpy\n");
printf("start kernel function\n");
sum_of_array<<<(N + 255) / 256, 256>>>(d_arr1, d_arr2, d_arr3);
printf("finish kernel function\n");
hipMemcpy(arr3, d_arr3, n_byte, hipMemcpyDeviceToHost);
printf("%f", *arr3);
} | .text
.file "main.hip"
# Start of file scope inline assembly
.globl _ZSt21ios_base_library_initv
# End of file scope inline assembly
.globl _Z27__device_stub__sum_of_arrayPfS_S_ # -- Begin function _Z27__device_stub__sum_of_arrayPfS_S_
.p2align 4, 0x90
.type _Z27__device_stub__sum_of_arrayPfS_S_,@function
_Z27__device_stub__sum_of_arrayPfS_S_: # @_Z27__device_stub__sum_of_arrayPfS_S_
.cfi_startproc
# %bb.0:
subq $104, %rsp
.cfi_def_cfa_offset 112
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movq %rdx, 56(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 56(%rsp), %rax
movq %rax, 96(%rsp)
leaq 40(%rsp), %rdi
leaq 24(%rsp), %rsi
leaq 16(%rsp), %rdx
leaq 8(%rsp), %rcx
callq __hipPopCallConfiguration
movq 40(%rsp), %rsi
movl 48(%rsp), %edx
movq 24(%rsp), %rcx
movl 32(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z12sum_of_arrayPfS_S_, %edi
pushq 8(%rsp)
.cfi_adjust_cfa_offset 8
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $120, %rsp
.cfi_adjust_cfa_offset -120
retq
.Lfunc_end0:
.size _Z27__device_stub__sum_of_arrayPfS_S_, .Lfunc_end0-_Z27__device_stub__sum_of_arrayPfS_S_
.cfi_endproc
# -- End function
.globl _Z16initialize_arrayPfi # -- Begin function _Z16initialize_arrayPfi
.p2align 4, 0x90
.type _Z16initialize_arrayPfi,@function
_Z16initialize_arrayPfi: # @_Z16initialize_arrayPfi
.cfi_startproc
# %bb.0:
testl %esi, %esi
jle .LBB1_4
# %bb.1: # %.lr.ph.preheader
pushq %r15
.cfi_def_cfa_offset 16
pushq %r14
.cfi_def_cfa_offset 24
pushq %rbx
.cfi_def_cfa_offset 32
.cfi_offset %rbx, -32
.cfi_offset %r14, -24
.cfi_offset %r15, -16
movq %rdi, %rbx
movl %esi, %r14d
xorl %r15d, %r15d
.p2align 4, 0x90
.LBB1_2: # %.lr.ph
# =>This Inner Loop Header: Depth=1
callq random
xorps %xmm0, %xmm0
cvtsi2ss %rax, %xmm0
movss %xmm0, (%rbx,%r15,4)
incq %r15
cmpq %r15, %r14
jne .LBB1_2
# %bb.3:
popq %rbx
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
.cfi_restore %rbx
.cfi_restore %r14
.cfi_restore %r15
.LBB1_4: # %._crit_edge
retq
.Lfunc_end1:
.size _Z16initialize_arrayPfi, .Lfunc_end1-_Z16initialize_arrayPfi
.cfi_endproc
# -- End function
.globl main # -- Begin function main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
pushq %r15
.cfi_def_cfa_offset 16
pushq %r14
.cfi_def_cfa_offset 24
pushq %r12
.cfi_def_cfa_offset 32
pushq %rbx
.cfi_def_cfa_offset 40
subq $120, %rsp
.cfi_def_cfa_offset 160
.cfi_offset %rbx, -40
.cfi_offset %r12, -32
.cfi_offset %r14, -24
.cfi_offset %r15, -16
movl $1028, %edi # imm = 0x404
callq malloc
movq %rax, %r15
movl $1028, %edi # imm = 0x404
callq malloc
movq %rax, %r14
movl $1028, %edi # imm = 0x404
callq malloc
movq %rax, %rbx
xorl %r12d, %r12d
.p2align 4, 0x90
.LBB2_1: # %.lr.ph.i
# =>This Inner Loop Header: Depth=1
callq random
xorps %xmm0, %xmm0
cvtsi2ss %rax, %xmm0
movss %xmm0, (%r15,%r12,4)
incq %r12
cmpq $257, %r12 # imm = 0x101
jne .LBB2_1
# %bb.2: # %.lr.ph.i22.preheader
xorl %r12d, %r12d
.p2align 4, 0x90
.LBB2_3: # %.lr.ph.i22
# =>This Inner Loop Header: Depth=1
callq random
xorps %xmm0, %xmm0
cvtsi2ss %rax, %xmm0
movss %xmm0, (%r14,%r12,4)
incq %r12
cmpq $257, %r12 # imm = 0x101
jne .LBB2_3
# %bb.4: # %.lr.ph.i27.preheader
xorl %r12d, %r12d
.p2align 4, 0x90
.LBB2_5: # %.lr.ph.i27
# =>This Inner Loop Header: Depth=1
callq random
xorps %xmm0, %xmm0
cvtsi2ss %rax, %xmm0
movss %xmm0, (%rbx,%r12,4)
incq %r12
cmpq $257, %r12 # imm = 0x101
jne .LBB2_5
# %bb.6: # %_Z16initialize_arrayPfi.exit31
movl $.Lstr, %edi
callq puts@PLT
leaq 16(%rsp), %rdi
movl $257, %esi # imm = 0x101
callq hipMalloc
leaq 8(%rsp), %rdi
movl $257, %esi # imm = 0x101
callq hipMalloc
movq %rsp, %rdi
movl $257, %esi # imm = 0x101
callq hipMalloc
movl $.Lstr.1, %edi
callq puts@PLT
movl $.Lstr.2, %edi
callq puts@PLT
movq 16(%rsp), %rdi
movl $1028, %edx # imm = 0x404
movq %r15, %rsi
movl $1, %ecx
callq hipMemcpy
movq 8(%rsp), %rdi
movl $1028, %edx # imm = 0x404
movq %r14, %rsi
movl $1, %ecx
callq hipMemcpy
movq (%rsp), %rdi
movl $1028, %edx # imm = 0x404
movq %rbx, %rsi
movl $1, %ecx
callq hipMemcpy
movl $.Lstr.3, %edi
callq puts@PLT
movl $.Lstr.4, %edi
callq puts@PLT
movabsq $4294967298, %rdi # imm = 0x100000002
leaq 254(%rdi), %rdx
movl $1, %esi
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB2_8
# %bb.7:
movq 16(%rsp), %rax
movq 8(%rsp), %rcx
movq (%rsp), %rdx
movq %rax, 88(%rsp)
movq %rcx, 80(%rsp)
movq %rdx, 72(%rsp)
leaq 88(%rsp), %rax
movq %rax, 96(%rsp)
leaq 80(%rsp), %rax
movq %rax, 104(%rsp)
leaq 72(%rsp), %rax
movq %rax, 112(%rsp)
leaq 56(%rsp), %rdi
leaq 40(%rsp), %rsi
leaq 32(%rsp), %rdx
leaq 24(%rsp), %rcx
callq __hipPopCallConfiguration
movq 56(%rsp), %rsi
movl 64(%rsp), %edx
movq 40(%rsp), %rcx
movl 48(%rsp), %r8d
leaq 96(%rsp), %r9
movl $_Z12sum_of_arrayPfS_S_, %edi
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
pushq 40(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB2_8:
movl $.Lstr.5, %edi
callq puts@PLT
movq (%rsp), %rsi
movl $1028, %edx # imm = 0x404
movq %rbx, %rdi
movl $2, %ecx
callq hipMemcpy
movss (%rbx), %xmm0 # xmm0 = mem[0],zero,zero,zero
cvtss2sd %xmm0, %xmm0
movl $.L.str.6, %edi
movb $1, %al
callq printf
xorl %eax, %eax
addq $120, %rsp
.cfi_def_cfa_offset 40
popq %rbx
.cfi_def_cfa_offset 32
popq %r12
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
retq
.Lfunc_end2:
.size main, .Lfunc_end2-main
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB3_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB3_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z12sum_of_arrayPfS_S_, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end3:
.size __hip_module_ctor, .Lfunc_end3-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB4_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB4_2:
retq
.Lfunc_end4:
.size __hip_module_dtor, .Lfunc_end4-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z12sum_of_arrayPfS_S_,@object # @_Z12sum_of_arrayPfS_S_
.section .rodata,"a",@progbits
.globl _Z12sum_of_arrayPfS_S_
.p2align 3, 0x0
_Z12sum_of_arrayPfS_S_:
.quad _Z27__device_stub__sum_of_arrayPfS_S_
.size _Z12sum_of_arrayPfS_S_, 8
.type .L.str.6,@object # @.str.6
.section .rodata.str1.1,"aMS",@progbits,1
.L.str.6:
.asciz "%f"
.size .L.str.6, 3
.type .L__unnamed_1,@object # @0
.L__unnamed_1:
.asciz "_Z12sum_of_arrayPfS_S_"
.size .L__unnamed_1, 23
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.type .Lstr,@object # @str
.section .rodata.str1.1,"aMS",@progbits,1
.Lstr:
.asciz "start cudaMalloc"
.size .Lstr, 17
.type .Lstr.1,@object # @str.1
.Lstr.1:
.asciz "finish cudaMalloc"
.size .Lstr.1, 18
.type .Lstr.2,@object # @str.2
.Lstr.2:
.asciz "start cudaMemcpy"
.size .Lstr.2, 17
.type .Lstr.3,@object # @str.3
.Lstr.3:
.asciz "finish cudaMemcpy"
.size .Lstr.3, 18
.type .Lstr.4,@object # @str.4
.Lstr.4:
.asciz "start kernel function"
.size .Lstr.4, 22
.type .Lstr.5,@object # @str.5
.Lstr.5:
.asciz "finish kernel function"
.size .Lstr.5, 23
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z27__device_stub__sum_of_arrayPfS_S_
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z12sum_of_arrayPfS_S_
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly. | code for sm_80
Function : _Z12sum_of_arrayPfS_S_
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ S2R R6, SR_CTAID.X ; /* 0x0000000000067919 */
/* 0x000e220000002500 */
/*0020*/ HFMA2.MMA R7, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff077435 */
/* 0x000fe200000001ff */
/*0030*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fe40000000a00 */
/*0040*/ S2R R3, SR_TID.X ; /* 0x0000000000037919 */
/* 0x000e240000002100 */
/*0050*/ IMAD R6, R6, c[0x0][0x0], R3 ; /* 0x0000000006067a24 */
/* 0x001fca00078e0203 */
/*0060*/ IMAD.WIDE R2, R6, R7, c[0x0][0x160] ; /* 0x0000580006027625 */
/* 0x000fc800078e0207 */
/*0070*/ IMAD.WIDE R4, R6.reuse, R7.reuse, c[0x0][0x168] ; /* 0x00005a0006047625 */
/* 0x0c0fe400078e0207 */
/*0080*/ LDG.E R2, [R2.64] ; /* 0x0000000402027981 */
/* 0x000ea8000c1e1900 */
/*0090*/ LDG.E R5, [R4.64] ; /* 0x0000000404057981 */
/* 0x000ea2000c1e1900 */
/*00a0*/ IMAD.WIDE R6, R6, R7, c[0x0][0x170] ; /* 0x00005c0006067625 */
/* 0x000fc800078e0207 */
/*00b0*/ FADD R9, R2, R5 ; /* 0x0000000502097221 */
/* 0x004fca0000000000 */
/*00c0*/ STG.E [R6.64], R9 ; /* 0x0000000906007986 */
/* 0x000fe2000c101904 */
/*00d0*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*00e0*/ BRA 0xe0; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*00f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0100*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0110*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0120*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0130*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0140*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0150*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0160*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0170*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z12sum_of_arrayPfS_S_
.globl _Z12sum_of_arrayPfS_S_
.p2align 8
.type _Z12sum_of_arrayPfS_S_,@function
_Z12sum_of_arrayPfS_S_:
s_clause 0x2
s_load_b32 s2, s[0:1], 0x24
s_load_b128 s[4:7], s[0:1], 0x0
s_load_b64 s[0:1], s[0:1], 0x10
s_waitcnt lgkmcnt(0)
s_and_b32 s2, s2, 0xffff
s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mad_u64_u32 v[1:2], null, s15, s2, v[0:1]
v_ashrrev_i32_e32 v2, 31, v1
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[0:1], 2, v[1:2]
v_add_co_u32 v2, vcc_lo, s4, v0
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v3, vcc_lo, s5, v1, vcc_lo
v_add_co_u32 v4, vcc_lo, s6, v0
v_add_co_ci_u32_e32 v5, vcc_lo, s7, v1, vcc_lo
v_add_co_u32 v0, vcc_lo, s0, v0
global_load_b32 v2, v[2:3], off
global_load_b32 v3, v[4:5], off
v_add_co_ci_u32_e32 v1, vcc_lo, s1, v1, vcc_lo
s_waitcnt vmcnt(0)
v_add_f32_e32 v2, v2, v3
global_store_b32 v[0:1], v2, off
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z12sum_of_arrayPfS_S_
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 280
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 6
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z12sum_of_arrayPfS_S_, .Lfunc_end0-_Z12sum_of_arrayPfS_S_
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
- .offset: 24
.size: 4
.value_kind: hidden_block_count_x
- .offset: 28
.size: 4
.value_kind: hidden_block_count_y
- .offset: 32
.size: 4
.value_kind: hidden_block_count_z
- .offset: 36
.size: 2
.value_kind: hidden_group_size_x
- .offset: 38
.size: 2
.value_kind: hidden_group_size_y
- .offset: 40
.size: 2
.value_kind: hidden_group_size_z
- .offset: 42
.size: 2
.value_kind: hidden_remainder_x
- .offset: 44
.size: 2
.value_kind: hidden_remainder_y
- .offset: 46
.size: 2
.value_kind: hidden_remainder_z
- .offset: 64
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 88
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 280
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z12sum_of_arrayPfS_S_
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z12sum_of_arrayPfS_S_.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 6
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_0006a227_00000000-6_main.cudafe1.cpp"
.text
#APP
.globl _ZSt21ios_base_library_initv
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB3673:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3673:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z16initialize_arrayPfi
.type _Z16initialize_arrayPfi, @function
_Z16initialize_arrayPfi:
.LFB3669:
.cfi_startproc
endbr64
testl %esi, %esi
jle .L8
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset 6, -16
pushq %rbx
.cfi_def_cfa_offset 24
.cfi_offset 3, -24
subq $8, %rsp
.cfi_def_cfa_offset 32
movq %rdi, %rbx
movslq %esi, %rsi
leaq (%rdi,%rsi,4), %rbp
.L5:
call random@PLT
pxor %xmm0, %xmm0
cvtsi2ssq %rax, %xmm0
movss %xmm0, (%rbx)
addq $4, %rbx
cmpq %rbp, %rbx
jne .L5
addq $8, %rsp
.cfi_def_cfa_offset 24
popq %rbx
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
ret
.L8:
.cfi_restore 3
.cfi_restore 6
ret
.cfi_endproc
.LFE3669:
.size _Z16initialize_arrayPfi, .-_Z16initialize_arrayPfi
.globl _Z36__device_stub__Z12sum_of_arrayPfS_S_PfS_S_
.type _Z36__device_stub__Z12sum_of_arrayPfS_S_PfS_S_, @function
_Z36__device_stub__Z12sum_of_arrayPfS_S_PfS_S_:
.LFB3695:
.cfi_startproc
endbr64
subq $136, %rsp
.cfi_def_cfa_offset 144
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movq %rdx, 8(%rsp)
movq %fs:40, %rax
movq %rax, 120(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 8(%rsp), %rax
movq %rax, 112(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L15
.L11:
movq 120(%rsp), %rax
subq %fs:40, %rax
jne .L16
addq $136, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L15:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 152
pushq 40(%rsp)
.cfi_def_cfa_offset 160
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z12sum_of_arrayPfS_S_(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 144
jmp .L11
.L16:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE3695:
.size _Z36__device_stub__Z12sum_of_arrayPfS_S_PfS_S_, .-_Z36__device_stub__Z12sum_of_arrayPfS_S_PfS_S_
.globl _Z12sum_of_arrayPfS_S_
.type _Z12sum_of_arrayPfS_S_, @function
_Z12sum_of_arrayPfS_S_:
.LFB3696:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z36__device_stub__Z12sum_of_arrayPfS_S_PfS_S_
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3696:
.size _Z12sum_of_arrayPfS_S_, .-_Z12sum_of_arrayPfS_S_
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "start cudaMalloc\n"
.LC1:
.string "finish cudaMalloc\n"
.LC2:
.string "start cudaMemcpy\n"
.LC3:
.string "finish cudaMemcpy\n"
.LC4:
.string "start kernel function\n"
.LC5:
.string "finish kernel function\n"
.LC6:
.string "%f"
.text
.globl main
.type main, @function
main:
.LFB3670:
.cfi_startproc
endbr64
pushq %r12
.cfi_def_cfa_offset 16
.cfi_offset 12, -16
pushq %rbp
.cfi_def_cfa_offset 24
.cfi_offset 6, -24
pushq %rbx
.cfi_def_cfa_offset 32
.cfi_offset 3, -32
subq $64, %rsp
.cfi_def_cfa_offset 96
movq %fs:40, %rax
movq %rax, 56(%rsp)
xorl %eax, %eax
movl $1028, %edi
call malloc@PLT
movq %rax, %r12
movl $1028, %edi
call malloc@PLT
movq %rax, %rbp
movl $1028, %edi
call malloc@PLT
movq %rax, %rbx
movl $257, %esi
movq %r12, %rdi
call _Z16initialize_arrayPfi
movl $257, %esi
movq %rbp, %rdi
call _Z16initialize_arrayPfi
movl $257, %esi
movq %rbx, %rdi
call _Z16initialize_arrayPfi
leaq .LC0(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
leaq 8(%rsp), %rdi
movl $257, %esi
call cudaMalloc@PLT
leaq 16(%rsp), %rdi
movl $257, %esi
call cudaMalloc@PLT
leaq 24(%rsp), %rdi
movl $257, %esi
call cudaMalloc@PLT
leaq .LC1(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
leaq .LC2(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $1, %ecx
movl $1028, %edx
movq %r12, %rsi
movq 8(%rsp), %rdi
call cudaMemcpy@PLT
movl $1, %ecx
movl $1028, %edx
movq %rbp, %rsi
movq 16(%rsp), %rdi
call cudaMemcpy@PLT
movl $1, %ecx
movl $1028, %edx
movq %rbx, %rsi
movq 24(%rsp), %rdi
call cudaMemcpy@PLT
leaq .LC3(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
leaq .LC4(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $256, 44(%rsp)
movl $1, 48(%rsp)
movl $2, 32(%rsp)
movl $1, 36(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 44(%rsp), %rdx
movl $1, %ecx
movq 32(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L23
.L20:
leaq .LC5(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $2, %ecx
movl $1028, %edx
movq 24(%rsp), %rsi
movq %rbx, %rdi
call cudaMemcpy@PLT
pxor %xmm0, %xmm0
cvtss2sd (%rbx), %xmm0
leaq .LC6(%rip), %rsi
movl $2, %edi
movl $1, %eax
call __printf_chk@PLT
movq 56(%rsp), %rax
subq %fs:40, %rax
jne .L24
movl $0, %eax
addq $64, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 32
popq %rbx
.cfi_def_cfa_offset 24
popq %rbp
.cfi_def_cfa_offset 16
popq %r12
.cfi_def_cfa_offset 8
ret
.L23:
.cfi_restore_state
movq 24(%rsp), %rdx
movq 16(%rsp), %rsi
movq 8(%rsp), %rdi
call _Z36__device_stub__Z12sum_of_arrayPfS_S_PfS_S_
jmp .L20
.L24:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE3670:
.size main, .-main
.section .rodata.str1.1
.LC7:
.string "_Z12sum_of_arrayPfS_S_"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB3698:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC7(%rip), %rdx
movq %rdx, %rcx
leaq _Z12sum_of_arrayPfS_S_(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3698:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "main.hip"
# Start of file scope inline assembly
.globl _ZSt21ios_base_library_initv
# End of file scope inline assembly
.globl _Z27__device_stub__sum_of_arrayPfS_S_ # -- Begin function _Z27__device_stub__sum_of_arrayPfS_S_
.p2align 4, 0x90
.type _Z27__device_stub__sum_of_arrayPfS_S_,@function
_Z27__device_stub__sum_of_arrayPfS_S_: # @_Z27__device_stub__sum_of_arrayPfS_S_
.cfi_startproc
# %bb.0:
subq $104, %rsp
.cfi_def_cfa_offset 112
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movq %rdx, 56(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 56(%rsp), %rax
movq %rax, 96(%rsp)
leaq 40(%rsp), %rdi
leaq 24(%rsp), %rsi
leaq 16(%rsp), %rdx
leaq 8(%rsp), %rcx
callq __hipPopCallConfiguration
movq 40(%rsp), %rsi
movl 48(%rsp), %edx
movq 24(%rsp), %rcx
movl 32(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z12sum_of_arrayPfS_S_, %edi
pushq 8(%rsp)
.cfi_adjust_cfa_offset 8
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $120, %rsp
.cfi_adjust_cfa_offset -120
retq
.Lfunc_end0:
.size _Z27__device_stub__sum_of_arrayPfS_S_, .Lfunc_end0-_Z27__device_stub__sum_of_arrayPfS_S_
.cfi_endproc
# -- End function
.globl _Z16initialize_arrayPfi # -- Begin function _Z16initialize_arrayPfi
.p2align 4, 0x90
.type _Z16initialize_arrayPfi,@function
_Z16initialize_arrayPfi: # @_Z16initialize_arrayPfi
.cfi_startproc
# %bb.0:
testl %esi, %esi
jle .LBB1_4
# %bb.1: # %.lr.ph.preheader
pushq %r15
.cfi_def_cfa_offset 16
pushq %r14
.cfi_def_cfa_offset 24
pushq %rbx
.cfi_def_cfa_offset 32
.cfi_offset %rbx, -32
.cfi_offset %r14, -24
.cfi_offset %r15, -16
movq %rdi, %rbx
movl %esi, %r14d
xorl %r15d, %r15d
.p2align 4, 0x90
.LBB1_2: # %.lr.ph
# =>This Inner Loop Header: Depth=1
callq random
xorps %xmm0, %xmm0
cvtsi2ss %rax, %xmm0
movss %xmm0, (%rbx,%r15,4)
incq %r15
cmpq %r15, %r14
jne .LBB1_2
# %bb.3:
popq %rbx
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
.cfi_restore %rbx
.cfi_restore %r14
.cfi_restore %r15
.LBB1_4: # %._crit_edge
retq
.Lfunc_end1:
.size _Z16initialize_arrayPfi, .Lfunc_end1-_Z16initialize_arrayPfi
.cfi_endproc
# -- End function
.globl main # -- Begin function main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
pushq %r15
.cfi_def_cfa_offset 16
pushq %r14
.cfi_def_cfa_offset 24
pushq %r12
.cfi_def_cfa_offset 32
pushq %rbx
.cfi_def_cfa_offset 40
subq $120, %rsp
.cfi_def_cfa_offset 160
.cfi_offset %rbx, -40
.cfi_offset %r12, -32
.cfi_offset %r14, -24
.cfi_offset %r15, -16
movl $1028, %edi # imm = 0x404
callq malloc
movq %rax, %r15
movl $1028, %edi # imm = 0x404
callq malloc
movq %rax, %r14
movl $1028, %edi # imm = 0x404
callq malloc
movq %rax, %rbx
xorl %r12d, %r12d
.p2align 4, 0x90
.LBB2_1: # %.lr.ph.i
# =>This Inner Loop Header: Depth=1
callq random
xorps %xmm0, %xmm0
cvtsi2ss %rax, %xmm0
movss %xmm0, (%r15,%r12,4)
incq %r12
cmpq $257, %r12 # imm = 0x101
jne .LBB2_1
# %bb.2: # %.lr.ph.i22.preheader
xorl %r12d, %r12d
.p2align 4, 0x90
.LBB2_3: # %.lr.ph.i22
# =>This Inner Loop Header: Depth=1
callq random
xorps %xmm0, %xmm0
cvtsi2ss %rax, %xmm0
movss %xmm0, (%r14,%r12,4)
incq %r12
cmpq $257, %r12 # imm = 0x101
jne .LBB2_3
# %bb.4: # %.lr.ph.i27.preheader
xorl %r12d, %r12d
.p2align 4, 0x90
.LBB2_5: # %.lr.ph.i27
# =>This Inner Loop Header: Depth=1
callq random
xorps %xmm0, %xmm0
cvtsi2ss %rax, %xmm0
movss %xmm0, (%rbx,%r12,4)
incq %r12
cmpq $257, %r12 # imm = 0x101
jne .LBB2_5
# %bb.6: # %_Z16initialize_arrayPfi.exit31
movl $.Lstr, %edi
callq puts@PLT
leaq 16(%rsp), %rdi
movl $257, %esi # imm = 0x101
callq hipMalloc
leaq 8(%rsp), %rdi
movl $257, %esi # imm = 0x101
callq hipMalloc
movq %rsp, %rdi
movl $257, %esi # imm = 0x101
callq hipMalloc
movl $.Lstr.1, %edi
callq puts@PLT
movl $.Lstr.2, %edi
callq puts@PLT
movq 16(%rsp), %rdi
movl $1028, %edx # imm = 0x404
movq %r15, %rsi
movl $1, %ecx
callq hipMemcpy
movq 8(%rsp), %rdi
movl $1028, %edx # imm = 0x404
movq %r14, %rsi
movl $1, %ecx
callq hipMemcpy
movq (%rsp), %rdi
movl $1028, %edx # imm = 0x404
movq %rbx, %rsi
movl $1, %ecx
callq hipMemcpy
movl $.Lstr.3, %edi
callq puts@PLT
movl $.Lstr.4, %edi
callq puts@PLT
movabsq $4294967298, %rdi # imm = 0x100000002
leaq 254(%rdi), %rdx
movl $1, %esi
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB2_8
# %bb.7:
movq 16(%rsp), %rax
movq 8(%rsp), %rcx
movq (%rsp), %rdx
movq %rax, 88(%rsp)
movq %rcx, 80(%rsp)
movq %rdx, 72(%rsp)
leaq 88(%rsp), %rax
movq %rax, 96(%rsp)
leaq 80(%rsp), %rax
movq %rax, 104(%rsp)
leaq 72(%rsp), %rax
movq %rax, 112(%rsp)
leaq 56(%rsp), %rdi
leaq 40(%rsp), %rsi
leaq 32(%rsp), %rdx
leaq 24(%rsp), %rcx
callq __hipPopCallConfiguration
movq 56(%rsp), %rsi
movl 64(%rsp), %edx
movq 40(%rsp), %rcx
movl 48(%rsp), %r8d
leaq 96(%rsp), %r9
movl $_Z12sum_of_arrayPfS_S_, %edi
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
pushq 40(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB2_8:
movl $.Lstr.5, %edi
callq puts@PLT
movq (%rsp), %rsi
movl $1028, %edx # imm = 0x404
movq %rbx, %rdi
movl $2, %ecx
callq hipMemcpy
movss (%rbx), %xmm0 # xmm0 = mem[0],zero,zero,zero
cvtss2sd %xmm0, %xmm0
movl $.L.str.6, %edi
movb $1, %al
callq printf
xorl %eax, %eax
addq $120, %rsp
.cfi_def_cfa_offset 40
popq %rbx
.cfi_def_cfa_offset 32
popq %r12
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
retq
.Lfunc_end2:
.size main, .Lfunc_end2-main
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB3_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB3_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z12sum_of_arrayPfS_S_, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end3:
.size __hip_module_ctor, .Lfunc_end3-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB4_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB4_2:
retq
.Lfunc_end4:
.size __hip_module_dtor, .Lfunc_end4-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z12sum_of_arrayPfS_S_,@object # @_Z12sum_of_arrayPfS_S_
.section .rodata,"a",@progbits
.globl _Z12sum_of_arrayPfS_S_
.p2align 3, 0x0
_Z12sum_of_arrayPfS_S_:
.quad _Z27__device_stub__sum_of_arrayPfS_S_
.size _Z12sum_of_arrayPfS_S_, 8
.type .L.str.6,@object # @.str.6
.section .rodata.str1.1,"aMS",@progbits,1
.L.str.6:
.asciz "%f"
.size .L.str.6, 3
.type .L__unnamed_1,@object # @0
.L__unnamed_1:
.asciz "_Z12sum_of_arrayPfS_S_"
.size .L__unnamed_1, 23
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.type .Lstr,@object # @str
.section .rodata.str1.1,"aMS",@progbits,1
.Lstr:
.asciz "start cudaMalloc"
.size .Lstr, 17
.type .Lstr.1,@object # @str.1
.Lstr.1:
.asciz "finish cudaMalloc"
.size .Lstr.1, 18
.type .Lstr.2,@object # @str.2
.Lstr.2:
.asciz "start cudaMemcpy"
.size .Lstr.2, 17
.type .Lstr.3,@object # @str.3
.Lstr.3:
.asciz "finish cudaMemcpy"
.size .Lstr.3, 18
.type .Lstr.4,@object # @str.4
.Lstr.4:
.asciz "start kernel function"
.size .Lstr.4, 22
.type .Lstr.5,@object # @str.5
.Lstr.5:
.asciz "finish kernel function"
.size .Lstr.5, 23
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z27__device_stub__sum_of_arrayPfS_S_
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z12sum_of_arrayPfS_S_
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | #include <stdio.h>
#include <stdlib.h>
#include <cuda.h>
#include <curand_kernel.h>
#define CUDA_CALL(x) do { if((x) != cudaSuccess) { \
printf("Error at %s:%d\n",__FILE__,__LINE__); \
return EXIT_FAILURE;}} while(0)
__global__ void setup_kernel(curandState *state)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
/* Each thread gets same seed, a different sequence
* number, no offset */
curand_init(345893, id, 0, &state[id]);
}
__global__ void generate_uniform_kernel(curandState *state,
int n_points_per_thread,
int *result)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
int count = 0;
float x;
float y;
float z;
float r2;
/* Copy state to local memory for efficiency */
curandState localState = state[id];
/* Generate pseudo-random uniforms */
for(int i = 0; i < n_points_per_thread; i++) {
x = curand_uniform(&localState) * 4 - 2;
y = curand_uniform(&localState) * 4 - 2;
r2 = pow(x, 2) + pow(y, 2);
z = curand_uniform(&localState);
if(z < exp(-1 * r2)) {
count++;
}
// if (z > 0.5)
// {
// count++;
// }
}
/* Copy state back to global memory */
state[id] = localState;
/* Store results */
result[id] += count;
}
__global__ void shmem_reduce( int *d_out, int *d_in)
{
extern __shared__ int sdata[];
int myId = threadIdx.x + blockDim.x * blockIdx.x;
int tid = threadIdx.x;
sdata[tid]=d_in[myId];
int s = blockDim.x / 2;
while(s>0)
{
if (tid<s)
{
sdata[tid]+=sdata[tid+s];
}
__syncthreads();
s=( int)s/2;
}
if (tid == 0)
{
d_out[blockIdx.x] =sdata[0];
}
}
int main()
{
int n_threads = 1024;
int n_points_per_thread = 1000000;
curandState *devStates;
// int total;
int *devResults;
int *devIntermediate;
int *devReduced;
int *hostResults;
int *hostReduced;
hostResults = ( int *)calloc(n_threads, sizeof( int));
hostReduced = ( int *)calloc(n_threads, sizeof( int));
CUDA_CALL(cudaMalloc((void **)&devResults, n_threads * sizeof( int)));
CUDA_CALL(cudaMalloc((void **)&devReduced, n_threads * sizeof( int)));
CUDA_CALL(cudaMalloc((void **)&devIntermediate, n_threads * sizeof( int)));
CUDA_CALL(cudaMalloc((void **)&devStates, n_threads * sizeof(curandState)));
CUDA_CALL(cudaMemset(devResults, 0, n_threads * sizeof( int)));
CUDA_CALL(cudaMemset(devReduced, 0, n_threads * sizeof( int)));
setup_kernel<<<1, n_threads>>>(devStates);
generate_uniform_kernel<<<1, n_threads>>>(devStates, n_points_per_thread, devResults);
shmem_reduce<<<n_threads / 32, 32, n_threads * sizeof(int)>>>(devIntermediate,devResults);
shmem_reduce<<<32, 32, 32 * sizeof(int)>>>(devReduced,devIntermediate);
CUDA_CALL(cudaMemcpy(hostResults, devResults, n_threads*sizeof(int), cudaMemcpyDeviceToHost));
CUDA_CALL(cudaMemcpy(hostReduced, devReduced, n_threads*sizeof(int), cudaMemcpyDeviceToHost));
// for (int i=0; i<n_threads; i++)
// {
// printf("%d ", hostResults[i]);
// }
// printf("\n");
/*
for (int i=0; i<n_threads; i++)
{
printf("%d ", hostReduced[i]);
}
printf("\n");*/
printf("Total area: %1.7f \n", (float) hostReduced[0] / (float) n_points_per_thread / (float) n_threads * 16);
CUDA_CALL(cudaFree(devResults));
CUDA_CALL(cudaFree(devReduced));
CUDA_CALL(cudaFree(devStates));
CUDA_CALL(cudaFree(devIntermediate));
free(hostReduced);
free(hostResults);
return EXIT_SUCCESS;
} | .file "tmpxft_000f6368_00000000-6_gauss.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2274:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2274:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z50__device_stub__Z12setup_kernelP17curandStateXORWOWP17curandStateXORWOW
.type _Z50__device_stub__Z12setup_kernelP17curandStateXORWOWP17curandStateXORWOW, @function
_Z50__device_stub__Z12setup_kernelP17curandStateXORWOWP17curandStateXORWOW:
.LFB2296:
.cfi_startproc
endbr64
subq $104, %rsp
.cfi_def_cfa_offset 112
movq %rdi, 8(%rsp)
movq %fs:40, %rax
movq %rax, 88(%rsp)
xorl %eax, %eax
leaq 8(%rsp), %rax
movq %rax, 80(%rsp)
movl $1, 32(%rsp)
movl $1, 36(%rsp)
movl $1, 40(%rsp)
movl $1, 44(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
leaq 24(%rsp), %rcx
leaq 16(%rsp), %rdx
leaq 44(%rsp), %rsi
leaq 32(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 88(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $104, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 24(%rsp)
.cfi_def_cfa_offset 120
pushq 24(%rsp)
.cfi_def_cfa_offset 128
leaq 96(%rsp), %r9
movq 60(%rsp), %rcx
movl 68(%rsp), %r8d
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
leaq _Z12setup_kernelP17curandStateXORWOW(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 112
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2296:
.size _Z50__device_stub__Z12setup_kernelP17curandStateXORWOWP17curandStateXORWOW, .-_Z50__device_stub__Z12setup_kernelP17curandStateXORWOWP17curandStateXORWOW
.globl _Z12setup_kernelP17curandStateXORWOW
.type _Z12setup_kernelP17curandStateXORWOW, @function
_Z12setup_kernelP17curandStateXORWOW:
.LFB2297:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z50__device_stub__Z12setup_kernelP17curandStateXORWOWP17curandStateXORWOW
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2297:
.size _Z12setup_kernelP17curandStateXORWOW, .-_Z12setup_kernelP17curandStateXORWOW
.globl _Z64__device_stub__Z23generate_uniform_kernelP17curandStateXORWOWiPiP17curandStateXORWOWiPi
.type _Z64__device_stub__Z23generate_uniform_kernelP17curandStateXORWOWiPiP17curandStateXORWOWiPi, @function
_Z64__device_stub__Z23generate_uniform_kernelP17curandStateXORWOWiPiP17curandStateXORWOWiPi:
.LFB2298:
.cfi_startproc
endbr64
subq $136, %rsp
.cfi_def_cfa_offset 144
movq %rdi, 24(%rsp)
movl %esi, 20(%rsp)
movq %rdx, 8(%rsp)
movq %fs:40, %rax
movq %rax, 120(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 20(%rsp), %rax
movq %rax, 104(%rsp)
leaq 8(%rsp), %rax
movq %rax, 112(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L15
.L11:
movq 120(%rsp), %rax
subq %fs:40, %rax
jne .L16
addq $136, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L15:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 152
pushq 40(%rsp)
.cfi_def_cfa_offset 160
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z23generate_uniform_kernelP17curandStateXORWOWiPi(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 144
jmp .L11
.L16:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2298:
.size _Z64__device_stub__Z23generate_uniform_kernelP17curandStateXORWOWiPiP17curandStateXORWOWiPi, .-_Z64__device_stub__Z23generate_uniform_kernelP17curandStateXORWOWiPiP17curandStateXORWOWiPi
.globl _Z23generate_uniform_kernelP17curandStateXORWOWiPi
.type _Z23generate_uniform_kernelP17curandStateXORWOWiPi, @function
_Z23generate_uniform_kernelP17curandStateXORWOWiPi:
.LFB2299:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z64__device_stub__Z23generate_uniform_kernelP17curandStateXORWOWiPiP17curandStateXORWOWiPi
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2299:
.size _Z23generate_uniform_kernelP17curandStateXORWOWiPi, .-_Z23generate_uniform_kernelP17curandStateXORWOWiPi
.globl _Z34__device_stub__Z12shmem_reducePiS_PiS_
.type _Z34__device_stub__Z12shmem_reducePiS_PiS_, @function
_Z34__device_stub__Z12shmem_reducePiS_PiS_:
.LFB2300:
.cfi_startproc
endbr64
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 8(%rsp)
movq %rsi, (%rsp)
movq %fs:40, %rax
movq %rax, 104(%rsp)
xorl %eax, %eax
leaq 8(%rsp), %rax
movq %rax, 80(%rsp)
movq %rsp, %rax
movq %rax, 88(%rsp)
movl $1, 32(%rsp)
movl $1, 36(%rsp)
movl $1, 40(%rsp)
movl $1, 44(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
leaq 24(%rsp), %rcx
leaq 16(%rsp), %rdx
leaq 44(%rsp), %rsi
leaq 32(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L23
.L19:
movq 104(%rsp), %rax
subq %fs:40, %rax
jne .L24
addq $120, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L23:
.cfi_restore_state
pushq 24(%rsp)
.cfi_def_cfa_offset 136
pushq 24(%rsp)
.cfi_def_cfa_offset 144
leaq 96(%rsp), %r9
movq 60(%rsp), %rcx
movl 68(%rsp), %r8d
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
leaq _Z12shmem_reducePiS_(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 128
jmp .L19
.L24:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2300:
.size _Z34__device_stub__Z12shmem_reducePiS_PiS_, .-_Z34__device_stub__Z12shmem_reducePiS_PiS_
.globl _Z12shmem_reducePiS_
.type _Z12shmem_reducePiS_, @function
_Z12shmem_reducePiS_:
.LFB2301:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z34__device_stub__Z12shmem_reducePiS_PiS_
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2301:
.size _Z12shmem_reducePiS_, .-_Z12shmem_reducePiS_
.section .rodata.str1.8,"aMS",@progbits,1
.align 8
.LC0:
.string "/home/ubuntu/Datasets/stackv2/train-structured/aleksey-uvarov/hpc-2019/master/gauss.cu"
.section .rodata.str1.1,"aMS",@progbits,1
.LC1:
.string "Error at %s:%d\n"
.LC5:
.string "Total area: %1.7f \n"
.text
.globl main
.type main, @function
main:
.LFB2271:
.cfi_startproc
endbr64
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset 6, -16
pushq %rbx
.cfi_def_cfa_offset 24
.cfi_offset 3, -24
subq $72, %rsp
.cfi_def_cfa_offset 96
movq %fs:40, %rax
movq %rax, 56(%rsp)
xorl %eax, %eax
movl $4, %esi
movl $1024, %edi
call calloc@PLT
movq %rax, %rbp
movl $4, %esi
movl $1024, %edi
call calloc@PLT
movq %rax, %rbx
leaq 8(%rsp), %rdi
movl $4096, %esi
call cudaMalloc@PLT
testl %eax, %eax
jne .L47
leaq 24(%rsp), %rdi
movl $4096, %esi
call cudaMalloc@PLT
testl %eax, %eax
jne .L48
leaq 16(%rsp), %rdi
movl $4096, %esi
call cudaMalloc@PLT
testl %eax, %eax
jne .L49
movq %rsp, %rdi
movl $49152, %esi
call cudaMalloc@PLT
testl %eax, %eax
jne .L50
movl $4096, %edx
movl $0, %esi
movq 8(%rsp), %rdi
call cudaMemset@PLT
testl %eax, %eax
jne .L51
movl $4096, %edx
movl $0, %esi
movq 24(%rsp), %rdi
call cudaMemset@PLT
testl %eax, %eax
jne .L52
movl $1024, 44(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 32(%rsp)
movl $1, 36(%rsp)
movl $1, 40(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 44(%rsp), %rdx
movl $1, %ecx
movq 32(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L53
.L35:
movl $1024, 44(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 32(%rsp)
movl $1, 36(%rsp)
movl $1, 40(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 44(%rsp), %rdx
movl $1, %ecx
movq 32(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L54
.L36:
movl $32, 44(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $32, 32(%rsp)
movl $1, 36(%rsp)
movl $1, 40(%rsp)
movl $0, %r9d
movl $4096, %r8d
movq 44(%rsp), %rdx
movl $1, %ecx
movq 32(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L55
.L37:
movl $32, 44(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $32, 32(%rsp)
movl $1, 36(%rsp)
movl $1, 40(%rsp)
movl $0, %r9d
movl $128, %r8d
movq 44(%rsp), %rdx
movl $1, %ecx
movq 32(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L56
.L38:
movl $2, %ecx
movl $4096, %edx
movq 8(%rsp), %rsi
movq %rbp, %rdi
call cudaMemcpy@PLT
testl %eax, %eax
jne .L57
movl $2, %ecx
movl $4096, %edx
movq 24(%rsp), %rsi
movq %rbx, %rdi
call cudaMemcpy@PLT
testl %eax, %eax
jne .L58
pxor %xmm0, %xmm0
cvtsi2ssl (%rbx), %xmm0
divss .LC2(%rip), %xmm0
mulss .LC3(%rip), %xmm0
mulss .LC4(%rip), %xmm0
cvtss2sd %xmm0, %xmm0
leaq .LC5(%rip), %rsi
movl $2, %edi
movl $1, %eax
call __printf_chk@PLT
movq 8(%rsp), %rdi
call cudaFree@PLT
testl %eax, %eax
jne .L59
movq 24(%rsp), %rdi
call cudaFree@PLT
testl %eax, %eax
jne .L60
movq (%rsp), %rdi
call cudaFree@PLT
testl %eax, %eax
jne .L61
movq 16(%rsp), %rdi
call cudaFree@PLT
testl %eax, %eax
jne .L62
movq %rbx, %rdi
call free@PLT
movq %rbp, %rdi
call free@PLT
movl $0, %eax
jmp .L27
.L47:
movl $93, %ecx
leaq .LC0(%rip), %rdx
leaq .LC1(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $1, %eax
.L27:
movq 56(%rsp), %rdx
subq %fs:40, %rdx
jne .L63
addq $72, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 24
popq %rbx
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
ret
.L48:
.cfi_restore_state
movl $94, %ecx
leaq .LC0(%rip), %rdx
leaq .LC1(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $1, %eax
jmp .L27
.L49:
movl $95, %ecx
leaq .LC0(%rip), %rdx
leaq .LC1(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $1, %eax
jmp .L27
.L50:
movl $97, %ecx
leaq .LC0(%rip), %rdx
leaq .LC1(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $1, %eax
jmp .L27
.L51:
movl $99, %ecx
leaq .LC0(%rip), %rdx
leaq .LC1(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $1, %eax
jmp .L27
.L52:
movl $100, %ecx
leaq .LC0(%rip), %rdx
leaq .LC1(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $1, %eax
jmp .L27
.L53:
movq (%rsp), %rdi
call _Z50__device_stub__Z12setup_kernelP17curandStateXORWOWP17curandStateXORWOW
jmp .L35
.L54:
movq 8(%rsp), %rdx
movl $1000000, %esi
movq (%rsp), %rdi
call _Z64__device_stub__Z23generate_uniform_kernelP17curandStateXORWOWiPiP17curandStateXORWOWiPi
jmp .L36
.L55:
movq 8(%rsp), %rsi
movq 16(%rsp), %rdi
call _Z34__device_stub__Z12shmem_reducePiS_PiS_
jmp .L37
.L56:
movq 16(%rsp), %rsi
movq 24(%rsp), %rdi
call _Z34__device_stub__Z12shmem_reducePiS_PiS_
jmp .L38
.L57:
movl $110, %ecx
leaq .LC0(%rip), %rdx
leaq .LC1(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $1, %eax
jmp .L27
.L58:
movl $112, %ecx
leaq .LC0(%rip), %rdx
leaq .LC1(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $1, %eax
jmp .L27
.L59:
movl $130, %ecx
leaq .LC0(%rip), %rdx
leaq .LC1(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $1, %eax
jmp .L27
.L60:
movl $131, %ecx
leaq .LC0(%rip), %rdx
leaq .LC1(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $1, %eax
jmp .L27
.L61:
movl $132, %ecx
leaq .LC0(%rip), %rdx
leaq .LC1(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $1, %eax
jmp .L27
.L62:
movl $133, %ecx
leaq .LC0(%rip), %rdx
leaq .LC1(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $1, %eax
jmp .L27
.L63:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2271:
.size main, .-main
.section .rodata.str1.1
.LC6:
.string "_Z12shmem_reducePiS_"
.section .rodata.str1.8
.align 8
.LC7:
.string "_Z23generate_uniform_kernelP17curandStateXORWOWiPi"
.align 8
.LC8:
.string "_Z12setup_kernelP17curandStateXORWOW"
.section .rodata.str1.1
.LC9:
.string "precalc_xorwow_matrix"
.LC10:
.string "precalc_xorwow_offset_matrix"
.LC11:
.string "mrg32k3aM1"
.LC12:
.string "mrg32k3aM2"
.LC13:
.string "mrg32k3aM1SubSeq"
.LC14:
.string "mrg32k3aM2SubSeq"
.LC15:
.string "mrg32k3aM1Seq"
.LC16:
.string "mrg32k3aM2Seq"
.LC17:
.string "__cr_lgamma_table"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2303:
.cfi_startproc
endbr64
pushq %rbx
.cfi_def_cfa_offset 16
.cfi_offset 3, -16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rbx
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC6(%rip), %rdx
movq %rdx, %rcx
leaq _Z12shmem_reducePiS_(%rip), %rsi
movq %rax, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC7(%rip), %rdx
movq %rdx, %rcx
leaq _Z23generate_uniform_kernelP17curandStateXORWOWiPi(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC8(%rip), %rdx
movq %rdx, %rcx
leaq _Z12setup_kernelP17curandStateXORWOW(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
movl $102400, %r9d
movl $0, %r8d
leaq .LC9(%rip), %rdx
movq %rdx, %rcx
leaq _ZL21precalc_xorwow_matrix(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterVar@PLT
addq $16, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
movl $102400, %r9d
movl $0, %r8d
leaq .LC10(%rip), %rdx
movq %rdx, %rcx
leaq _ZL28precalc_xorwow_offset_matrix(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterVar@PLT
addq $16, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
movl $2304, %r9d
movl $0, %r8d
leaq .LC11(%rip), %rdx
movq %rdx, %rcx
leaq _ZL10mrg32k3aM1(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterVar@PLT
addq $16, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
movl $2304, %r9d
movl $0, %r8d
leaq .LC12(%rip), %rdx
movq %rdx, %rcx
leaq _ZL10mrg32k3aM2(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterVar@PLT
addq $16, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
movl $2016, %r9d
movl $0, %r8d
leaq .LC13(%rip), %rdx
movq %rdx, %rcx
leaq _ZL16mrg32k3aM1SubSeq(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterVar@PLT
addq $16, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
movl $2016, %r9d
movl $0, %r8d
leaq .LC14(%rip), %rdx
movq %rdx, %rcx
leaq _ZL16mrg32k3aM2SubSeq(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterVar@PLT
addq $16, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
movl $2304, %r9d
movl $0, %r8d
leaq .LC15(%rip), %rdx
movq %rdx, %rcx
leaq _ZL13mrg32k3aM1Seq(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterVar@PLT
addq $16, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
movl $2304, %r9d
movl $0, %r8d
leaq .LC16(%rip), %rdx
movq %rdx, %rcx
leaq _ZL13mrg32k3aM2Seq(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterVar@PLT
addq $16, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $1
.cfi_def_cfa_offset 32
movl $72, %r9d
movl $0, %r8d
leaq .LC17(%rip), %rdx
movq %rdx, %rcx
leaq _ZL17__cr_lgamma_table(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterVar@PLT
addq $16, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
popq %rbx
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2303:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.local _ZL17__cr_lgamma_table
.comm _ZL17__cr_lgamma_table,72,32
.local _ZL13mrg32k3aM2Seq
.comm _ZL13mrg32k3aM2Seq,2304,32
.local _ZL13mrg32k3aM1Seq
.comm _ZL13mrg32k3aM1Seq,2304,32
.local _ZL16mrg32k3aM2SubSeq
.comm _ZL16mrg32k3aM2SubSeq,2016,32
.local _ZL16mrg32k3aM1SubSeq
.comm _ZL16mrg32k3aM1SubSeq,2016,32
.local _ZL10mrg32k3aM2
.comm _ZL10mrg32k3aM2,2304,32
.local _ZL10mrg32k3aM1
.comm _ZL10mrg32k3aM1,2304,32
.local _ZL28precalc_xorwow_offset_matrix
.comm _ZL28precalc_xorwow_offset_matrix,102400,32
.local _ZL21precalc_xorwow_matrix
.comm _ZL21precalc_xorwow_matrix,102400,32
.section .rodata.cst4,"aM",@progbits,4
.align 4
.LC2:
.long 1232348160
.align 4
.LC3:
.long 981467136
.align 4
.LC4:
.long 1098907648
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | #include <stdio.h>
#include <stdlib.h>
#include <cuda.h>
#include <curand_kernel.h>
#define CUDA_CALL(x) do { if((x) != cudaSuccess) { \
printf("Error at %s:%d\n",__FILE__,__LINE__); \
return EXIT_FAILURE;}} while(0)
__global__ void setup_kernel(curandState *state)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
/* Each thread gets same seed, a different sequence
* number, no offset */
curand_init(345893, id, 0, &state[id]);
}
__global__ void generate_uniform_kernel(curandState *state,
int n_points_per_thread,
int *result)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
int count = 0;
float x;
float y;
float z;
float r2;
/* Copy state to local memory for efficiency */
curandState localState = state[id];
/* Generate pseudo-random uniforms */
for(int i = 0; i < n_points_per_thread; i++) {
x = curand_uniform(&localState) * 4 - 2;
y = curand_uniform(&localState) * 4 - 2;
r2 = pow(x, 2) + pow(y, 2);
z = curand_uniform(&localState);
if(z < exp(-1 * r2)) {
count++;
}
// if (z > 0.5)
// {
// count++;
// }
}
/* Copy state back to global memory */
state[id] = localState;
/* Store results */
result[id] += count;
}
__global__ void shmem_reduce( int *d_out, int *d_in)
{
extern __shared__ int sdata[];
int myId = threadIdx.x + blockDim.x * blockIdx.x;
int tid = threadIdx.x;
sdata[tid]=d_in[myId];
int s = blockDim.x / 2;
while(s>0)
{
if (tid<s)
{
sdata[tid]+=sdata[tid+s];
}
__syncthreads();
s=( int)s/2;
}
if (tid == 0)
{
d_out[blockIdx.x] =sdata[0];
}
}
int main()
{
int n_threads = 1024;
int n_points_per_thread = 1000000;
curandState *devStates;
// int total;
int *devResults;
int *devIntermediate;
int *devReduced;
int *hostResults;
int *hostReduced;
hostResults = ( int *)calloc(n_threads, sizeof( int));
hostReduced = ( int *)calloc(n_threads, sizeof( int));
CUDA_CALL(cudaMalloc((void **)&devResults, n_threads * sizeof( int)));
CUDA_CALL(cudaMalloc((void **)&devReduced, n_threads * sizeof( int)));
CUDA_CALL(cudaMalloc((void **)&devIntermediate, n_threads * sizeof( int)));
CUDA_CALL(cudaMalloc((void **)&devStates, n_threads * sizeof(curandState)));
CUDA_CALL(cudaMemset(devResults, 0, n_threads * sizeof( int)));
CUDA_CALL(cudaMemset(devReduced, 0, n_threads * sizeof( int)));
setup_kernel<<<1, n_threads>>>(devStates);
generate_uniform_kernel<<<1, n_threads>>>(devStates, n_points_per_thread, devResults);
shmem_reduce<<<n_threads / 32, 32, n_threads * sizeof(int)>>>(devIntermediate,devResults);
shmem_reduce<<<32, 32, 32 * sizeof(int)>>>(devReduced,devIntermediate);
CUDA_CALL(cudaMemcpy(hostResults, devResults, n_threads*sizeof(int), cudaMemcpyDeviceToHost));
CUDA_CALL(cudaMemcpy(hostReduced, devReduced, n_threads*sizeof(int), cudaMemcpyDeviceToHost));
// for (int i=0; i<n_threads; i++)
// {
// printf("%d ", hostResults[i]);
// }
// printf("\n");
/*
for (int i=0; i<n_threads; i++)
{
printf("%d ", hostReduced[i]);
}
printf("\n");*/
printf("Total area: %1.7f \n", (float) hostReduced[0] / (float) n_points_per_thread / (float) n_threads * 16);
CUDA_CALL(cudaFree(devResults));
CUDA_CALL(cudaFree(devReduced));
CUDA_CALL(cudaFree(devStates));
CUDA_CALL(cudaFree(devIntermediate));
free(hostReduced);
free(hostResults);
return EXIT_SUCCESS;
} | #include <stdio.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <hiprand/hiprand_kernel.h>
#define CUDA_CALL(x) do { if((x) != hipSuccess) { \
printf("Error at %s:%d\n",__FILE__,__LINE__); \
return EXIT_FAILURE;}} while(0)
__global__ void setup_kernel(hiprandState *state)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
/* Each thread gets same seed, a different sequence
* number, no offset */
hiprand_init(345893, id, 0, &state[id]);
}
__global__ void generate_uniform_kernel(hiprandState *state,
int n_points_per_thread,
int *result)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
int count = 0;
float x;
float y;
float z;
float r2;
/* Copy state to local memory for efficiency */
hiprandState localState = state[id];
/* Generate pseudo-random uniforms */
for(int i = 0; i < n_points_per_thread; i++) {
x = hiprand_uniform(&localState) * 4 - 2;
y = hiprand_uniform(&localState) * 4 - 2;
r2 = pow(x, 2) + pow(y, 2);
z = hiprand_uniform(&localState);
if(z < exp(-1 * r2)) {
count++;
}
// if (z > 0.5)
// {
// count++;
// }
}
/* Copy state back to global memory */
state[id] = localState;
/* Store results */
result[id] += count;
}
__global__ void shmem_reduce( int *d_out, int *d_in)
{
extern __shared__ int sdata[];
int myId = threadIdx.x + blockDim.x * blockIdx.x;
int tid = threadIdx.x;
sdata[tid]=d_in[myId];
int s = blockDim.x / 2;
while(s>0)
{
if (tid<s)
{
sdata[tid]+=sdata[tid+s];
}
__syncthreads();
s=( int)s/2;
}
if (tid == 0)
{
d_out[blockIdx.x] =sdata[0];
}
}
int main()
{
int n_threads = 1024;
int n_points_per_thread = 1000000;
hiprandState *devStates;
// int total;
int *devResults;
int *devIntermediate;
int *devReduced;
int *hostResults;
int *hostReduced;
hostResults = ( int *)calloc(n_threads, sizeof( int));
hostReduced = ( int *)calloc(n_threads, sizeof( int));
CUDA_CALL(hipMalloc((void **)&devResults, n_threads * sizeof( int)));
CUDA_CALL(hipMalloc((void **)&devReduced, n_threads * sizeof( int)));
CUDA_CALL(hipMalloc((void **)&devIntermediate, n_threads * sizeof( int)));
CUDA_CALL(hipMalloc((void **)&devStates, n_threads * sizeof(hiprandState)));
CUDA_CALL(hipMemset(devResults, 0, n_threads * sizeof( int)));
CUDA_CALL(hipMemset(devReduced, 0, n_threads * sizeof( int)));
setup_kernel<<<1, n_threads>>>(devStates);
generate_uniform_kernel<<<1, n_threads>>>(devStates, n_points_per_thread, devResults);
shmem_reduce<<<n_threads / 32, 32, n_threads * sizeof(int)>>>(devIntermediate,devResults);
shmem_reduce<<<32, 32, 32 * sizeof(int)>>>(devReduced,devIntermediate);
CUDA_CALL(hipMemcpy(hostResults, devResults, n_threads*sizeof(int), hipMemcpyDeviceToHost));
CUDA_CALL(hipMemcpy(hostReduced, devReduced, n_threads*sizeof(int), hipMemcpyDeviceToHost));
// for (int i=0; i<n_threads; i++)
// {
// printf("%d ", hostResults[i]);
// }
// printf("\n");
/*
for (int i=0; i<n_threads; i++)
{
printf("%d ", hostReduced[i]);
}
printf("\n");*/
printf("Total area: %1.7f \n", (float) hostReduced[0] / (float) n_points_per_thread / (float) n_threads * 16);
CUDA_CALL(hipFree(devResults));
CUDA_CALL(hipFree(devReduced));
CUDA_CALL(hipFree(devStates));
CUDA_CALL(hipFree(devIntermediate));
free(hostReduced);
free(hostResults);
return EXIT_SUCCESS;
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | #include <stdio.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <hiprand/hiprand_kernel.h>
#define CUDA_CALL(x) do { if((x) != hipSuccess) { \
printf("Error at %s:%d\n",__FILE__,__LINE__); \
return EXIT_FAILURE;}} while(0)
__global__ void setup_kernel(hiprandState *state)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
/* Each thread gets same seed, a different sequence
* number, no offset */
hiprand_init(345893, id, 0, &state[id]);
}
__global__ void generate_uniform_kernel(hiprandState *state,
int n_points_per_thread,
int *result)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
int count = 0;
float x;
float y;
float z;
float r2;
/* Copy state to local memory for efficiency */
hiprandState localState = state[id];
/* Generate pseudo-random uniforms */
for(int i = 0; i < n_points_per_thread; i++) {
x = hiprand_uniform(&localState) * 4 - 2;
y = hiprand_uniform(&localState) * 4 - 2;
r2 = pow(x, 2) + pow(y, 2);
z = hiprand_uniform(&localState);
if(z < exp(-1 * r2)) {
count++;
}
// if (z > 0.5)
// {
// count++;
// }
}
/* Copy state back to global memory */
state[id] = localState;
/* Store results */
result[id] += count;
}
__global__ void shmem_reduce( int *d_out, int *d_in)
{
extern __shared__ int sdata[];
int myId = threadIdx.x + blockDim.x * blockIdx.x;
int tid = threadIdx.x;
sdata[tid]=d_in[myId];
int s = blockDim.x / 2;
while(s>0)
{
if (tid<s)
{
sdata[tid]+=sdata[tid+s];
}
__syncthreads();
s=( int)s/2;
}
if (tid == 0)
{
d_out[blockIdx.x] =sdata[0];
}
}
int main()
{
int n_threads = 1024;
int n_points_per_thread = 1000000;
hiprandState *devStates;
// int total;
int *devResults;
int *devIntermediate;
int *devReduced;
int *hostResults;
int *hostReduced;
hostResults = ( int *)calloc(n_threads, sizeof( int));
hostReduced = ( int *)calloc(n_threads, sizeof( int));
CUDA_CALL(hipMalloc((void **)&devResults, n_threads * sizeof( int)));
CUDA_CALL(hipMalloc((void **)&devReduced, n_threads * sizeof( int)));
CUDA_CALL(hipMalloc((void **)&devIntermediate, n_threads * sizeof( int)));
CUDA_CALL(hipMalloc((void **)&devStates, n_threads * sizeof(hiprandState)));
CUDA_CALL(hipMemset(devResults, 0, n_threads * sizeof( int)));
CUDA_CALL(hipMemset(devReduced, 0, n_threads * sizeof( int)));
setup_kernel<<<1, n_threads>>>(devStates);
generate_uniform_kernel<<<1, n_threads>>>(devStates, n_points_per_thread, devResults);
shmem_reduce<<<n_threads / 32, 32, n_threads * sizeof(int)>>>(devIntermediate,devResults);
shmem_reduce<<<32, 32, 32 * sizeof(int)>>>(devReduced,devIntermediate);
CUDA_CALL(hipMemcpy(hostResults, devResults, n_threads*sizeof(int), hipMemcpyDeviceToHost));
CUDA_CALL(hipMemcpy(hostReduced, devReduced, n_threads*sizeof(int), hipMemcpyDeviceToHost));
// for (int i=0; i<n_threads; i++)
// {
// printf("%d ", hostResults[i]);
// }
// printf("\n");
/*
for (int i=0; i<n_threads; i++)
{
printf("%d ", hostReduced[i]);
}
printf("\n");*/
printf("Total area: %1.7f \n", (float) hostReduced[0] / (float) n_points_per_thread / (float) n_threads * 16);
CUDA_CALL(hipFree(devResults));
CUDA_CALL(hipFree(devReduced));
CUDA_CALL(hipFree(devStates));
CUDA_CALL(hipFree(devIntermediate));
free(hostReduced);
free(hostResults);
return EXIT_SUCCESS;
} | .text
.file "gauss.hip"
.globl _Z27__device_stub__setup_kernelP12hiprandState # -- Begin function _Z27__device_stub__setup_kernelP12hiprandState
.p2align 4, 0x90
.type _Z27__device_stub__setup_kernelP12hiprandState,@function
_Z27__device_stub__setup_kernelP12hiprandState: # @_Z27__device_stub__setup_kernelP12hiprandState
.cfi_startproc
# %bb.0:
subq $72, %rsp
.cfi_def_cfa_offset 80
movq %rdi, 64(%rsp)
leaq 64(%rsp), %rax
movq %rax, (%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
movq %rsp, %r9
movl $_Z12setup_kernelP12hiprandState, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $88, %rsp
.cfi_adjust_cfa_offset -88
retq
.Lfunc_end0:
.size _Z27__device_stub__setup_kernelP12hiprandState, .Lfunc_end0-_Z27__device_stub__setup_kernelP12hiprandState
.cfi_endproc
# -- End function
.globl _Z38__device_stub__generate_uniform_kernelP12hiprandStateiPi # -- Begin function _Z38__device_stub__generate_uniform_kernelP12hiprandStateiPi
.p2align 4, 0x90
.type _Z38__device_stub__generate_uniform_kernelP12hiprandStateiPi,@function
_Z38__device_stub__generate_uniform_kernelP12hiprandStateiPi: # @_Z38__device_stub__generate_uniform_kernelP12hiprandStateiPi
.cfi_startproc
# %bb.0:
subq $104, %rsp
.cfi_def_cfa_offset 112
movq %rdi, 72(%rsp)
movl %esi, 12(%rsp)
movq %rdx, 64(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 12(%rsp), %rax
movq %rax, 88(%rsp)
leaq 64(%rsp), %rax
movq %rax, 96(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z23generate_uniform_kernelP12hiprandStateiPi, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $120, %rsp
.cfi_adjust_cfa_offset -120
retq
.Lfunc_end1:
.size _Z38__device_stub__generate_uniform_kernelP12hiprandStateiPi, .Lfunc_end1-_Z38__device_stub__generate_uniform_kernelP12hiprandStateiPi
.cfi_endproc
# -- End function
.globl _Z27__device_stub__shmem_reducePiS_ # -- Begin function _Z27__device_stub__shmem_reducePiS_
.p2align 4, 0x90
.type _Z27__device_stub__shmem_reducePiS_,@function
_Z27__device_stub__shmem_reducePiS_: # @_Z27__device_stub__shmem_reducePiS_
.cfi_startproc
# %bb.0:
subq $88, %rsp
.cfi_def_cfa_offset 96
movq %rdi, 56(%rsp)
movq %rsi, 48(%rsp)
leaq 56(%rsp), %rax
movq %rax, 64(%rsp)
leaq 48(%rsp), %rax
movq %rax, 72(%rsp)
leaq 32(%rsp), %rdi
leaq 16(%rsp), %rsi
leaq 8(%rsp), %rdx
movq %rsp, %rcx
callq __hipPopCallConfiguration
movq 32(%rsp), %rsi
movl 40(%rsp), %edx
movq 16(%rsp), %rcx
movl 24(%rsp), %r8d
leaq 64(%rsp), %r9
movl $_Z12shmem_reducePiS_, %edi
pushq (%rsp)
.cfi_adjust_cfa_offset 8
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $104, %rsp
.cfi_adjust_cfa_offset -104
retq
.Lfunc_end2:
.size _Z27__device_stub__shmem_reducePiS_, .Lfunc_end2-_Z27__device_stub__shmem_reducePiS_
.cfi_endproc
# -- End function
.section .rodata.cst4,"aM",@progbits,4
.p2align 2, 0x0 # -- Begin function main
.LCPI3_0:
.long 0x49742400 # float 1.0E+6
.LCPI3_1:
.long 0x3a800000 # float 9.765625E-4
.LCPI3_2:
.long 0x41800000 # float 16
.text
.globl main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
pushq %r15
.cfi_def_cfa_offset 16
pushq %r14
.cfi_def_cfa_offset 24
pushq %r13
.cfi_def_cfa_offset 32
pushq %r12
.cfi_def_cfa_offset 40
pushq %rbx
.cfi_def_cfa_offset 48
subq $32, %rsp
.cfi_def_cfa_offset 80
.cfi_offset %rbx, -48
.cfi_offset %r12, -40
.cfi_offset %r13, -32
.cfi_offset %r14, -24
.cfi_offset %r15, -16
movl $1024, %edi # imm = 0x400
movl $4, %esi
callq calloc
movq %rax, %rbx
movl $1024, %edi # imm = 0x400
movl $4, %esi
callq calloc
movq %rax, %r14
movq %rsp, %rdi
movl $4096, %esi # imm = 0x1000
callq hipMalloc
testl %eax, %eax
je .LBB3_3
# %bb.1:
movl $.L.str, %edi
movl $.L.str.1, %esi
movl $93, %edx
jmp .LBB3_2
.LBB3_3:
leaq 8(%rsp), %rdi
movl $4096, %esi # imm = 0x1000
callq hipMalloc
testl %eax, %eax
je .LBB3_5
# %bb.4:
movl $.L.str, %edi
movl $.L.str.1, %esi
movl $94, %edx
jmp .LBB3_2
.LBB3_5:
leaq 16(%rsp), %rdi
movl $4096, %esi # imm = 0x1000
callq hipMalloc
testl %eax, %eax
je .LBB3_7
# %bb.6:
movl $.L.str, %edi
movl $.L.str.1, %esi
movl $95, %edx
jmp .LBB3_2
.LBB3_7:
leaq 24(%rsp), %rdi
movl $49152, %esi # imm = 0xC000
callq hipMalloc
testl %eax, %eax
je .LBB3_9
# %bb.8:
movl $.L.str, %edi
movl $.L.str.1, %esi
movl $97, %edx
jmp .LBB3_2
.LBB3_9:
movq (%rsp), %rdi
movl $4096, %edx # imm = 0x1000
xorl %esi, %esi
callq hipMemset
testl %eax, %eax
je .LBB3_11
# %bb.10:
movl $.L.str, %edi
movl $.L.str.1, %esi
movl $99, %edx
jmp .LBB3_2
.LBB3_11:
movq 8(%rsp), %rdi
movl $4096, %edx # imm = 0x1000
xorl %esi, %esi
callq hipMemset
testl %eax, %eax
je .LBB3_13
# %bb.12:
movl $.L.str, %edi
movl $.L.str.1, %esi
movl $100, %edx
jmp .LBB3_2
.LBB3_13:
movabsq $4294967328, %r15 # imm = 0x100000020
leaq -31(%r15), %r13
leaq 992(%r15), %r12
movq %r13, %rdi
movl $1, %esi
movq %r12, %rdx
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB3_15
# %bb.14:
movq 24(%rsp), %rdi
callq _Z27__device_stub__setup_kernelP12hiprandState
.LBB3_15:
movq %r13, %rdi
movl $1, %esi
movq %r12, %rdx
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB3_17
# %bb.16:
movq 24(%rsp), %rdi
movq (%rsp), %rdx
movl $1000000, %esi # imm = 0xF4240
callq _Z38__device_stub__generate_uniform_kernelP12hiprandStateiPi
.LBB3_17:
movl $4096, %r8d # imm = 0x1000
movq %r15, %rdi
movl $1, %esi
movq %r15, %rdx
movl $1, %ecx
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB3_19
# %bb.18:
movq 16(%rsp), %rdi
movq (%rsp), %rsi
callq _Z27__device_stub__shmem_reducePiS_
.LBB3_19:
movl $128, %r8d
movq %r15, %rdi
movl $1, %esi
movq %r15, %rdx
movl $1, %ecx
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB3_21
# %bb.20:
movq 8(%rsp), %rdi
movq 16(%rsp), %rsi
callq _Z27__device_stub__shmem_reducePiS_
.LBB3_21:
movq (%rsp), %rsi
movl $4096, %edx # imm = 0x1000
movq %rbx, %rdi
movl $2, %ecx
callq hipMemcpy
testl %eax, %eax
je .LBB3_23
# %bb.22:
movl $.L.str, %edi
movl $.L.str.1, %esi
movl $110, %edx
jmp .LBB3_2
.LBB3_23:
movq 8(%rsp), %rsi
movl $4096, %edx # imm = 0x1000
movq %r14, %rdi
movl $2, %ecx
callq hipMemcpy
testl %eax, %eax
je .LBB3_25
# %bb.24:
movl $.L.str, %edi
movl $.L.str.1, %esi
movl $112, %edx
jmp .LBB3_2
.LBB3_25:
cvtsi2ssl (%r14), %xmm0
divss .LCPI3_0(%rip), %xmm0
mulss .LCPI3_1(%rip), %xmm0
mulss .LCPI3_2(%rip), %xmm0
cvtss2sd %xmm0, %xmm0
movl $.L.str.2, %edi
movb $1, %al
callq printf
movq (%rsp), %rdi
callq hipFree
testl %eax, %eax
je .LBB3_27
# %bb.26:
movl $.L.str, %edi
movl $.L.str.1, %esi
movl $130, %edx
jmp .LBB3_2
.LBB3_27:
movq 8(%rsp), %rdi
callq hipFree
testl %eax, %eax
je .LBB3_29
# %bb.28:
movl $.L.str, %edi
movl $.L.str.1, %esi
movl $131, %edx
jmp .LBB3_2
.LBB3_29:
movq 24(%rsp), %rdi
callq hipFree
testl %eax, %eax
je .LBB3_31
# %bb.30:
movl $.L.str, %edi
movl $.L.str.1, %esi
movl $132, %edx
jmp .LBB3_2
.LBB3_31:
movq 16(%rsp), %rdi
callq hipFree
testl %eax, %eax
je .LBB3_33
# %bb.32:
movl $.L.str, %edi
movl $.L.str.1, %esi
movl $133, %edx
.LBB3_2:
xorl %eax, %eax
callq printf
movl $1, %eax
.LBB3_34:
addq $32, %rsp
.cfi_def_cfa_offset 48
popq %rbx
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r13
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
retq
.LBB3_33:
.cfi_def_cfa_offset 80
movq %r14, %rdi
callq free
movq %rbx, %rdi
callq free
xorl %eax, %eax
jmp .LBB3_34
.Lfunc_end3:
.size main, .Lfunc_end3-main
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
pushq %rbx
.cfi_def_cfa_offset 16
subq $32, %rsp
.cfi_def_cfa_offset 48
.cfi_offset %rbx, -16
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB4_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB4_2:
movq __hip_gpubin_handle(%rip), %rbx
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z12setup_kernelP12hiprandState, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z23generate_uniform_kernelP12hiprandStateiPi, %esi
movl $.L__unnamed_2, %edx
movl $.L__unnamed_2, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z12shmem_reducePiS_, %esi
movl $.L__unnamed_3, %edx
movl $.L__unnamed_3, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $32, %rsp
.cfi_def_cfa_offset 16
popq %rbx
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end4:
.size __hip_module_ctor, .Lfunc_end4-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB5_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB5_2:
retq
.Lfunc_end5:
.size __hip_module_dtor, .Lfunc_end5-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z12setup_kernelP12hiprandState,@object # @_Z12setup_kernelP12hiprandState
.section .rodata,"a",@progbits
.globl _Z12setup_kernelP12hiprandState
.p2align 3, 0x0
_Z12setup_kernelP12hiprandState:
.quad _Z27__device_stub__setup_kernelP12hiprandState
.size _Z12setup_kernelP12hiprandState, 8
.type _Z23generate_uniform_kernelP12hiprandStateiPi,@object # @_Z23generate_uniform_kernelP12hiprandStateiPi
.globl _Z23generate_uniform_kernelP12hiprandStateiPi
.p2align 3, 0x0
_Z23generate_uniform_kernelP12hiprandStateiPi:
.quad _Z38__device_stub__generate_uniform_kernelP12hiprandStateiPi
.size _Z23generate_uniform_kernelP12hiprandStateiPi, 8
.type _Z12shmem_reducePiS_,@object # @_Z12shmem_reducePiS_
.globl _Z12shmem_reducePiS_
.p2align 3, 0x0
_Z12shmem_reducePiS_:
.quad _Z27__device_stub__shmem_reducePiS_
.size _Z12shmem_reducePiS_, 8
.type .L.str,@object # @.str
.section .rodata.str1.1,"aMS",@progbits,1
.L.str:
.asciz "Error at %s:%d\n"
.size .L.str, 16
.type .L.str.1,@object # @.str.1
.L.str.1:
.asciz "/home/ubuntu/Datasets/stackv2/train-structured-repos-hip/aleksey-uvarov/hpc-2019/master/gauss.hip"
.size .L.str.1, 98
.type .L.str.2,@object # @.str.2
.L.str.2:
.asciz "Total area: %1.7f \n"
.size .L.str.2, 20
.type .L__unnamed_1,@object # @0
.L__unnamed_1:
.asciz "_Z12setup_kernelP12hiprandState"
.size .L__unnamed_1, 32
.type .L__unnamed_2,@object # @1
.L__unnamed_2:
.asciz "_Z23generate_uniform_kernelP12hiprandStateiPi"
.size .L__unnamed_2, 46
.type .L__unnamed_3,@object # @2
.L__unnamed_3:
.asciz "_Z12shmem_reducePiS_"
.size .L__unnamed_3, 21
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z27__device_stub__setup_kernelP12hiprandState
.addrsig_sym _Z38__device_stub__generate_uniform_kernelP12hiprandStateiPi
.addrsig_sym _Z27__device_stub__shmem_reducePiS_
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z12setup_kernelP12hiprandState
.addrsig_sym _Z23generate_uniform_kernelP12hiprandStateiPi
.addrsig_sym _Z12shmem_reducePiS_
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_000f6368_00000000-6_gauss.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2274:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2274:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z50__device_stub__Z12setup_kernelP17curandStateXORWOWP17curandStateXORWOW
.type _Z50__device_stub__Z12setup_kernelP17curandStateXORWOWP17curandStateXORWOW, @function
_Z50__device_stub__Z12setup_kernelP17curandStateXORWOWP17curandStateXORWOW:
.LFB2296:
.cfi_startproc
endbr64
subq $104, %rsp
.cfi_def_cfa_offset 112
movq %rdi, 8(%rsp)
movq %fs:40, %rax
movq %rax, 88(%rsp)
xorl %eax, %eax
leaq 8(%rsp), %rax
movq %rax, 80(%rsp)
movl $1, 32(%rsp)
movl $1, 36(%rsp)
movl $1, 40(%rsp)
movl $1, 44(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
leaq 24(%rsp), %rcx
leaq 16(%rsp), %rdx
leaq 44(%rsp), %rsi
leaq 32(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 88(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $104, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 24(%rsp)
.cfi_def_cfa_offset 120
pushq 24(%rsp)
.cfi_def_cfa_offset 128
leaq 96(%rsp), %r9
movq 60(%rsp), %rcx
movl 68(%rsp), %r8d
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
leaq _Z12setup_kernelP17curandStateXORWOW(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 112
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2296:
.size _Z50__device_stub__Z12setup_kernelP17curandStateXORWOWP17curandStateXORWOW, .-_Z50__device_stub__Z12setup_kernelP17curandStateXORWOWP17curandStateXORWOW
.globl _Z12setup_kernelP17curandStateXORWOW
.type _Z12setup_kernelP17curandStateXORWOW, @function
_Z12setup_kernelP17curandStateXORWOW:
.LFB2297:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z50__device_stub__Z12setup_kernelP17curandStateXORWOWP17curandStateXORWOW
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2297:
.size _Z12setup_kernelP17curandStateXORWOW, .-_Z12setup_kernelP17curandStateXORWOW
.globl _Z64__device_stub__Z23generate_uniform_kernelP17curandStateXORWOWiPiP17curandStateXORWOWiPi
.type _Z64__device_stub__Z23generate_uniform_kernelP17curandStateXORWOWiPiP17curandStateXORWOWiPi, @function
_Z64__device_stub__Z23generate_uniform_kernelP17curandStateXORWOWiPiP17curandStateXORWOWiPi:
.LFB2298:
.cfi_startproc
endbr64
subq $136, %rsp
.cfi_def_cfa_offset 144
movq %rdi, 24(%rsp)
movl %esi, 20(%rsp)
movq %rdx, 8(%rsp)
movq %fs:40, %rax
movq %rax, 120(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 20(%rsp), %rax
movq %rax, 104(%rsp)
leaq 8(%rsp), %rax
movq %rax, 112(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L15
.L11:
movq 120(%rsp), %rax
subq %fs:40, %rax
jne .L16
addq $136, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L15:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 152
pushq 40(%rsp)
.cfi_def_cfa_offset 160
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z23generate_uniform_kernelP17curandStateXORWOWiPi(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 144
jmp .L11
.L16:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2298:
.size _Z64__device_stub__Z23generate_uniform_kernelP17curandStateXORWOWiPiP17curandStateXORWOWiPi, .-_Z64__device_stub__Z23generate_uniform_kernelP17curandStateXORWOWiPiP17curandStateXORWOWiPi
.globl _Z23generate_uniform_kernelP17curandStateXORWOWiPi
.type _Z23generate_uniform_kernelP17curandStateXORWOWiPi, @function
_Z23generate_uniform_kernelP17curandStateXORWOWiPi:
.LFB2299:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z64__device_stub__Z23generate_uniform_kernelP17curandStateXORWOWiPiP17curandStateXORWOWiPi
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2299:
.size _Z23generate_uniform_kernelP17curandStateXORWOWiPi, .-_Z23generate_uniform_kernelP17curandStateXORWOWiPi
.globl _Z34__device_stub__Z12shmem_reducePiS_PiS_
.type _Z34__device_stub__Z12shmem_reducePiS_PiS_, @function
_Z34__device_stub__Z12shmem_reducePiS_PiS_:
.LFB2300:
.cfi_startproc
endbr64
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 8(%rsp)
movq %rsi, (%rsp)
movq %fs:40, %rax
movq %rax, 104(%rsp)
xorl %eax, %eax
leaq 8(%rsp), %rax
movq %rax, 80(%rsp)
movq %rsp, %rax
movq %rax, 88(%rsp)
movl $1, 32(%rsp)
movl $1, 36(%rsp)
movl $1, 40(%rsp)
movl $1, 44(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
leaq 24(%rsp), %rcx
leaq 16(%rsp), %rdx
leaq 44(%rsp), %rsi
leaq 32(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L23
.L19:
movq 104(%rsp), %rax
subq %fs:40, %rax
jne .L24
addq $120, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L23:
.cfi_restore_state
pushq 24(%rsp)
.cfi_def_cfa_offset 136
pushq 24(%rsp)
.cfi_def_cfa_offset 144
leaq 96(%rsp), %r9
movq 60(%rsp), %rcx
movl 68(%rsp), %r8d
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
leaq _Z12shmem_reducePiS_(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 128
jmp .L19
.L24:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2300:
.size _Z34__device_stub__Z12shmem_reducePiS_PiS_, .-_Z34__device_stub__Z12shmem_reducePiS_PiS_
.globl _Z12shmem_reducePiS_
.type _Z12shmem_reducePiS_, @function
_Z12shmem_reducePiS_:
.LFB2301:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z34__device_stub__Z12shmem_reducePiS_PiS_
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2301:
.size _Z12shmem_reducePiS_, .-_Z12shmem_reducePiS_
.section .rodata.str1.8,"aMS",@progbits,1
.align 8
.LC0:
.string "/home/ubuntu/Datasets/stackv2/train-structured/aleksey-uvarov/hpc-2019/master/gauss.cu"
.section .rodata.str1.1,"aMS",@progbits,1
.LC1:
.string "Error at %s:%d\n"
.LC5:
.string "Total area: %1.7f \n"
.text
.globl main
.type main, @function
main:
.LFB2271:
.cfi_startproc
endbr64
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset 6, -16
pushq %rbx
.cfi_def_cfa_offset 24
.cfi_offset 3, -24
subq $72, %rsp
.cfi_def_cfa_offset 96
movq %fs:40, %rax
movq %rax, 56(%rsp)
xorl %eax, %eax
movl $4, %esi
movl $1024, %edi
call calloc@PLT
movq %rax, %rbp
movl $4, %esi
movl $1024, %edi
call calloc@PLT
movq %rax, %rbx
leaq 8(%rsp), %rdi
movl $4096, %esi
call cudaMalloc@PLT
testl %eax, %eax
jne .L47
leaq 24(%rsp), %rdi
movl $4096, %esi
call cudaMalloc@PLT
testl %eax, %eax
jne .L48
leaq 16(%rsp), %rdi
movl $4096, %esi
call cudaMalloc@PLT
testl %eax, %eax
jne .L49
movq %rsp, %rdi
movl $49152, %esi
call cudaMalloc@PLT
testl %eax, %eax
jne .L50
movl $4096, %edx
movl $0, %esi
movq 8(%rsp), %rdi
call cudaMemset@PLT
testl %eax, %eax
jne .L51
movl $4096, %edx
movl $0, %esi
movq 24(%rsp), %rdi
call cudaMemset@PLT
testl %eax, %eax
jne .L52
movl $1024, 44(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 32(%rsp)
movl $1, 36(%rsp)
movl $1, 40(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 44(%rsp), %rdx
movl $1, %ecx
movq 32(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L53
.L35:
movl $1024, 44(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 32(%rsp)
movl $1, 36(%rsp)
movl $1, 40(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 44(%rsp), %rdx
movl $1, %ecx
movq 32(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L54
.L36:
movl $32, 44(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $32, 32(%rsp)
movl $1, 36(%rsp)
movl $1, 40(%rsp)
movl $0, %r9d
movl $4096, %r8d
movq 44(%rsp), %rdx
movl $1, %ecx
movq 32(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L55
.L37:
movl $32, 44(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $32, 32(%rsp)
movl $1, 36(%rsp)
movl $1, 40(%rsp)
movl $0, %r9d
movl $128, %r8d
movq 44(%rsp), %rdx
movl $1, %ecx
movq 32(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L56
.L38:
movl $2, %ecx
movl $4096, %edx
movq 8(%rsp), %rsi
movq %rbp, %rdi
call cudaMemcpy@PLT
testl %eax, %eax
jne .L57
movl $2, %ecx
movl $4096, %edx
movq 24(%rsp), %rsi
movq %rbx, %rdi
call cudaMemcpy@PLT
testl %eax, %eax
jne .L58
pxor %xmm0, %xmm0
cvtsi2ssl (%rbx), %xmm0
divss .LC2(%rip), %xmm0
mulss .LC3(%rip), %xmm0
mulss .LC4(%rip), %xmm0
cvtss2sd %xmm0, %xmm0
leaq .LC5(%rip), %rsi
movl $2, %edi
movl $1, %eax
call __printf_chk@PLT
movq 8(%rsp), %rdi
call cudaFree@PLT
testl %eax, %eax
jne .L59
movq 24(%rsp), %rdi
call cudaFree@PLT
testl %eax, %eax
jne .L60
movq (%rsp), %rdi
call cudaFree@PLT
testl %eax, %eax
jne .L61
movq 16(%rsp), %rdi
call cudaFree@PLT
testl %eax, %eax
jne .L62
movq %rbx, %rdi
call free@PLT
movq %rbp, %rdi
call free@PLT
movl $0, %eax
jmp .L27
.L47:
movl $93, %ecx
leaq .LC0(%rip), %rdx
leaq .LC1(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $1, %eax
.L27:
movq 56(%rsp), %rdx
subq %fs:40, %rdx
jne .L63
addq $72, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 24
popq %rbx
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
ret
.L48:
.cfi_restore_state
movl $94, %ecx
leaq .LC0(%rip), %rdx
leaq .LC1(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $1, %eax
jmp .L27
.L49:
movl $95, %ecx
leaq .LC0(%rip), %rdx
leaq .LC1(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $1, %eax
jmp .L27
.L50:
movl $97, %ecx
leaq .LC0(%rip), %rdx
leaq .LC1(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $1, %eax
jmp .L27
.L51:
movl $99, %ecx
leaq .LC0(%rip), %rdx
leaq .LC1(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $1, %eax
jmp .L27
.L52:
movl $100, %ecx
leaq .LC0(%rip), %rdx
leaq .LC1(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $1, %eax
jmp .L27
.L53:
movq (%rsp), %rdi
call _Z50__device_stub__Z12setup_kernelP17curandStateXORWOWP17curandStateXORWOW
jmp .L35
.L54:
movq 8(%rsp), %rdx
movl $1000000, %esi
movq (%rsp), %rdi
call _Z64__device_stub__Z23generate_uniform_kernelP17curandStateXORWOWiPiP17curandStateXORWOWiPi
jmp .L36
.L55:
movq 8(%rsp), %rsi
movq 16(%rsp), %rdi
call _Z34__device_stub__Z12shmem_reducePiS_PiS_
jmp .L37
.L56:
movq 16(%rsp), %rsi
movq 24(%rsp), %rdi
call _Z34__device_stub__Z12shmem_reducePiS_PiS_
jmp .L38
.L57:
movl $110, %ecx
leaq .LC0(%rip), %rdx
leaq .LC1(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $1, %eax
jmp .L27
.L58:
movl $112, %ecx
leaq .LC0(%rip), %rdx
leaq .LC1(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $1, %eax
jmp .L27
.L59:
movl $130, %ecx
leaq .LC0(%rip), %rdx
leaq .LC1(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $1, %eax
jmp .L27
.L60:
movl $131, %ecx
leaq .LC0(%rip), %rdx
leaq .LC1(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $1, %eax
jmp .L27
.L61:
movl $132, %ecx
leaq .LC0(%rip), %rdx
leaq .LC1(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $1, %eax
jmp .L27
.L62:
movl $133, %ecx
leaq .LC0(%rip), %rdx
leaq .LC1(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $1, %eax
jmp .L27
.L63:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2271:
.size main, .-main
.section .rodata.str1.1
.LC6:
.string "_Z12shmem_reducePiS_"
.section .rodata.str1.8
.align 8
.LC7:
.string "_Z23generate_uniform_kernelP17curandStateXORWOWiPi"
.align 8
.LC8:
.string "_Z12setup_kernelP17curandStateXORWOW"
.section .rodata.str1.1
.LC9:
.string "precalc_xorwow_matrix"
.LC10:
.string "precalc_xorwow_offset_matrix"
.LC11:
.string "mrg32k3aM1"
.LC12:
.string "mrg32k3aM2"
.LC13:
.string "mrg32k3aM1SubSeq"
.LC14:
.string "mrg32k3aM2SubSeq"
.LC15:
.string "mrg32k3aM1Seq"
.LC16:
.string "mrg32k3aM2Seq"
.LC17:
.string "__cr_lgamma_table"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2303:
.cfi_startproc
endbr64
pushq %rbx
.cfi_def_cfa_offset 16
.cfi_offset 3, -16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rbx
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC6(%rip), %rdx
movq %rdx, %rcx
leaq _Z12shmem_reducePiS_(%rip), %rsi
movq %rax, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC7(%rip), %rdx
movq %rdx, %rcx
leaq _Z23generate_uniform_kernelP17curandStateXORWOWiPi(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC8(%rip), %rdx
movq %rdx, %rcx
leaq _Z12setup_kernelP17curandStateXORWOW(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
movl $102400, %r9d
movl $0, %r8d
leaq .LC9(%rip), %rdx
movq %rdx, %rcx
leaq _ZL21precalc_xorwow_matrix(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterVar@PLT
addq $16, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
movl $102400, %r9d
movl $0, %r8d
leaq .LC10(%rip), %rdx
movq %rdx, %rcx
leaq _ZL28precalc_xorwow_offset_matrix(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterVar@PLT
addq $16, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
movl $2304, %r9d
movl $0, %r8d
leaq .LC11(%rip), %rdx
movq %rdx, %rcx
leaq _ZL10mrg32k3aM1(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterVar@PLT
addq $16, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
movl $2304, %r9d
movl $0, %r8d
leaq .LC12(%rip), %rdx
movq %rdx, %rcx
leaq _ZL10mrg32k3aM2(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterVar@PLT
addq $16, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
movl $2016, %r9d
movl $0, %r8d
leaq .LC13(%rip), %rdx
movq %rdx, %rcx
leaq _ZL16mrg32k3aM1SubSeq(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterVar@PLT
addq $16, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
movl $2016, %r9d
movl $0, %r8d
leaq .LC14(%rip), %rdx
movq %rdx, %rcx
leaq _ZL16mrg32k3aM2SubSeq(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterVar@PLT
addq $16, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
movl $2304, %r9d
movl $0, %r8d
leaq .LC15(%rip), %rdx
movq %rdx, %rcx
leaq _ZL13mrg32k3aM1Seq(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterVar@PLT
addq $16, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
movl $2304, %r9d
movl $0, %r8d
leaq .LC16(%rip), %rdx
movq %rdx, %rcx
leaq _ZL13mrg32k3aM2Seq(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterVar@PLT
addq $16, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $1
.cfi_def_cfa_offset 32
movl $72, %r9d
movl $0, %r8d
leaq .LC17(%rip), %rdx
movq %rdx, %rcx
leaq _ZL17__cr_lgamma_table(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterVar@PLT
addq $16, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
popq %rbx
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2303:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.local _ZL17__cr_lgamma_table
.comm _ZL17__cr_lgamma_table,72,32
.local _ZL13mrg32k3aM2Seq
.comm _ZL13mrg32k3aM2Seq,2304,32
.local _ZL13mrg32k3aM1Seq
.comm _ZL13mrg32k3aM1Seq,2304,32
.local _ZL16mrg32k3aM2SubSeq
.comm _ZL16mrg32k3aM2SubSeq,2016,32
.local _ZL16mrg32k3aM1SubSeq
.comm _ZL16mrg32k3aM1SubSeq,2016,32
.local _ZL10mrg32k3aM2
.comm _ZL10mrg32k3aM2,2304,32
.local _ZL10mrg32k3aM1
.comm _ZL10mrg32k3aM1,2304,32
.local _ZL28precalc_xorwow_offset_matrix
.comm _ZL28precalc_xorwow_offset_matrix,102400,32
.local _ZL21precalc_xorwow_matrix
.comm _ZL21precalc_xorwow_matrix,102400,32
.section .rodata.cst4,"aM",@progbits,4
.align 4
.LC2:
.long 1232348160
.align 4
.LC3:
.long 981467136
.align 4
.LC4:
.long 1098907648
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "gauss.hip"
.globl _Z27__device_stub__setup_kernelP12hiprandState # -- Begin function _Z27__device_stub__setup_kernelP12hiprandState
.p2align 4, 0x90
.type _Z27__device_stub__setup_kernelP12hiprandState,@function
_Z27__device_stub__setup_kernelP12hiprandState: # @_Z27__device_stub__setup_kernelP12hiprandState
.cfi_startproc
# %bb.0:
subq $72, %rsp
.cfi_def_cfa_offset 80
movq %rdi, 64(%rsp)
leaq 64(%rsp), %rax
movq %rax, (%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
movq %rsp, %r9
movl $_Z12setup_kernelP12hiprandState, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $88, %rsp
.cfi_adjust_cfa_offset -88
retq
.Lfunc_end0:
.size _Z27__device_stub__setup_kernelP12hiprandState, .Lfunc_end0-_Z27__device_stub__setup_kernelP12hiprandState
.cfi_endproc
# -- End function
.globl _Z38__device_stub__generate_uniform_kernelP12hiprandStateiPi # -- Begin function _Z38__device_stub__generate_uniform_kernelP12hiprandStateiPi
.p2align 4, 0x90
.type _Z38__device_stub__generate_uniform_kernelP12hiprandStateiPi,@function
_Z38__device_stub__generate_uniform_kernelP12hiprandStateiPi: # @_Z38__device_stub__generate_uniform_kernelP12hiprandStateiPi
.cfi_startproc
# %bb.0:
subq $104, %rsp
.cfi_def_cfa_offset 112
movq %rdi, 72(%rsp)
movl %esi, 12(%rsp)
movq %rdx, 64(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 12(%rsp), %rax
movq %rax, 88(%rsp)
leaq 64(%rsp), %rax
movq %rax, 96(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z23generate_uniform_kernelP12hiprandStateiPi, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $120, %rsp
.cfi_adjust_cfa_offset -120
retq
.Lfunc_end1:
.size _Z38__device_stub__generate_uniform_kernelP12hiprandStateiPi, .Lfunc_end1-_Z38__device_stub__generate_uniform_kernelP12hiprandStateiPi
.cfi_endproc
# -- End function
.globl _Z27__device_stub__shmem_reducePiS_ # -- Begin function _Z27__device_stub__shmem_reducePiS_
.p2align 4, 0x90
.type _Z27__device_stub__shmem_reducePiS_,@function
_Z27__device_stub__shmem_reducePiS_: # @_Z27__device_stub__shmem_reducePiS_
.cfi_startproc
# %bb.0:
subq $88, %rsp
.cfi_def_cfa_offset 96
movq %rdi, 56(%rsp)
movq %rsi, 48(%rsp)
leaq 56(%rsp), %rax
movq %rax, 64(%rsp)
leaq 48(%rsp), %rax
movq %rax, 72(%rsp)
leaq 32(%rsp), %rdi
leaq 16(%rsp), %rsi
leaq 8(%rsp), %rdx
movq %rsp, %rcx
callq __hipPopCallConfiguration
movq 32(%rsp), %rsi
movl 40(%rsp), %edx
movq 16(%rsp), %rcx
movl 24(%rsp), %r8d
leaq 64(%rsp), %r9
movl $_Z12shmem_reducePiS_, %edi
pushq (%rsp)
.cfi_adjust_cfa_offset 8
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $104, %rsp
.cfi_adjust_cfa_offset -104
retq
.Lfunc_end2:
.size _Z27__device_stub__shmem_reducePiS_, .Lfunc_end2-_Z27__device_stub__shmem_reducePiS_
.cfi_endproc
# -- End function
.section .rodata.cst4,"aM",@progbits,4
.p2align 2, 0x0 # -- Begin function main
.LCPI3_0:
.long 0x49742400 # float 1.0E+6
.LCPI3_1:
.long 0x3a800000 # float 9.765625E-4
.LCPI3_2:
.long 0x41800000 # float 16
.text
.globl main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
pushq %r15
.cfi_def_cfa_offset 16
pushq %r14
.cfi_def_cfa_offset 24
pushq %r13
.cfi_def_cfa_offset 32
pushq %r12
.cfi_def_cfa_offset 40
pushq %rbx
.cfi_def_cfa_offset 48
subq $32, %rsp
.cfi_def_cfa_offset 80
.cfi_offset %rbx, -48
.cfi_offset %r12, -40
.cfi_offset %r13, -32
.cfi_offset %r14, -24
.cfi_offset %r15, -16
movl $1024, %edi # imm = 0x400
movl $4, %esi
callq calloc
movq %rax, %rbx
movl $1024, %edi # imm = 0x400
movl $4, %esi
callq calloc
movq %rax, %r14
movq %rsp, %rdi
movl $4096, %esi # imm = 0x1000
callq hipMalloc
testl %eax, %eax
je .LBB3_3
# %bb.1:
movl $.L.str, %edi
movl $.L.str.1, %esi
movl $93, %edx
jmp .LBB3_2
.LBB3_3:
leaq 8(%rsp), %rdi
movl $4096, %esi # imm = 0x1000
callq hipMalloc
testl %eax, %eax
je .LBB3_5
# %bb.4:
movl $.L.str, %edi
movl $.L.str.1, %esi
movl $94, %edx
jmp .LBB3_2
.LBB3_5:
leaq 16(%rsp), %rdi
movl $4096, %esi # imm = 0x1000
callq hipMalloc
testl %eax, %eax
je .LBB3_7
# %bb.6:
movl $.L.str, %edi
movl $.L.str.1, %esi
movl $95, %edx
jmp .LBB3_2
.LBB3_7:
leaq 24(%rsp), %rdi
movl $49152, %esi # imm = 0xC000
callq hipMalloc
testl %eax, %eax
je .LBB3_9
# %bb.8:
movl $.L.str, %edi
movl $.L.str.1, %esi
movl $97, %edx
jmp .LBB3_2
.LBB3_9:
movq (%rsp), %rdi
movl $4096, %edx # imm = 0x1000
xorl %esi, %esi
callq hipMemset
testl %eax, %eax
je .LBB3_11
# %bb.10:
movl $.L.str, %edi
movl $.L.str.1, %esi
movl $99, %edx
jmp .LBB3_2
.LBB3_11:
movq 8(%rsp), %rdi
movl $4096, %edx # imm = 0x1000
xorl %esi, %esi
callq hipMemset
testl %eax, %eax
je .LBB3_13
# %bb.12:
movl $.L.str, %edi
movl $.L.str.1, %esi
movl $100, %edx
jmp .LBB3_2
.LBB3_13:
movabsq $4294967328, %r15 # imm = 0x100000020
leaq -31(%r15), %r13
leaq 992(%r15), %r12
movq %r13, %rdi
movl $1, %esi
movq %r12, %rdx
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB3_15
# %bb.14:
movq 24(%rsp), %rdi
callq _Z27__device_stub__setup_kernelP12hiprandState
.LBB3_15:
movq %r13, %rdi
movl $1, %esi
movq %r12, %rdx
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB3_17
# %bb.16:
movq 24(%rsp), %rdi
movq (%rsp), %rdx
movl $1000000, %esi # imm = 0xF4240
callq _Z38__device_stub__generate_uniform_kernelP12hiprandStateiPi
.LBB3_17:
movl $4096, %r8d # imm = 0x1000
movq %r15, %rdi
movl $1, %esi
movq %r15, %rdx
movl $1, %ecx
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB3_19
# %bb.18:
movq 16(%rsp), %rdi
movq (%rsp), %rsi
callq _Z27__device_stub__shmem_reducePiS_
.LBB3_19:
movl $128, %r8d
movq %r15, %rdi
movl $1, %esi
movq %r15, %rdx
movl $1, %ecx
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB3_21
# %bb.20:
movq 8(%rsp), %rdi
movq 16(%rsp), %rsi
callq _Z27__device_stub__shmem_reducePiS_
.LBB3_21:
movq (%rsp), %rsi
movl $4096, %edx # imm = 0x1000
movq %rbx, %rdi
movl $2, %ecx
callq hipMemcpy
testl %eax, %eax
je .LBB3_23
# %bb.22:
movl $.L.str, %edi
movl $.L.str.1, %esi
movl $110, %edx
jmp .LBB3_2
.LBB3_23:
movq 8(%rsp), %rsi
movl $4096, %edx # imm = 0x1000
movq %r14, %rdi
movl $2, %ecx
callq hipMemcpy
testl %eax, %eax
je .LBB3_25
# %bb.24:
movl $.L.str, %edi
movl $.L.str.1, %esi
movl $112, %edx
jmp .LBB3_2
.LBB3_25:
cvtsi2ssl (%r14), %xmm0
divss .LCPI3_0(%rip), %xmm0
mulss .LCPI3_1(%rip), %xmm0
mulss .LCPI3_2(%rip), %xmm0
cvtss2sd %xmm0, %xmm0
movl $.L.str.2, %edi
movb $1, %al
callq printf
movq (%rsp), %rdi
callq hipFree
testl %eax, %eax
je .LBB3_27
# %bb.26:
movl $.L.str, %edi
movl $.L.str.1, %esi
movl $130, %edx
jmp .LBB3_2
.LBB3_27:
movq 8(%rsp), %rdi
callq hipFree
testl %eax, %eax
je .LBB3_29
# %bb.28:
movl $.L.str, %edi
movl $.L.str.1, %esi
movl $131, %edx
jmp .LBB3_2
.LBB3_29:
movq 24(%rsp), %rdi
callq hipFree
testl %eax, %eax
je .LBB3_31
# %bb.30:
movl $.L.str, %edi
movl $.L.str.1, %esi
movl $132, %edx
jmp .LBB3_2
.LBB3_31:
movq 16(%rsp), %rdi
callq hipFree
testl %eax, %eax
je .LBB3_33
# %bb.32:
movl $.L.str, %edi
movl $.L.str.1, %esi
movl $133, %edx
.LBB3_2:
xorl %eax, %eax
callq printf
movl $1, %eax
.LBB3_34:
addq $32, %rsp
.cfi_def_cfa_offset 48
popq %rbx
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r13
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
retq
.LBB3_33:
.cfi_def_cfa_offset 80
movq %r14, %rdi
callq free
movq %rbx, %rdi
callq free
xorl %eax, %eax
jmp .LBB3_34
.Lfunc_end3:
.size main, .Lfunc_end3-main
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
pushq %rbx
.cfi_def_cfa_offset 16
subq $32, %rsp
.cfi_def_cfa_offset 48
.cfi_offset %rbx, -16
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB4_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB4_2:
movq __hip_gpubin_handle(%rip), %rbx
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z12setup_kernelP12hiprandState, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z23generate_uniform_kernelP12hiprandStateiPi, %esi
movl $.L__unnamed_2, %edx
movl $.L__unnamed_2, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z12shmem_reducePiS_, %esi
movl $.L__unnamed_3, %edx
movl $.L__unnamed_3, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $32, %rsp
.cfi_def_cfa_offset 16
popq %rbx
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end4:
.size __hip_module_ctor, .Lfunc_end4-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB5_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB5_2:
retq
.Lfunc_end5:
.size __hip_module_dtor, .Lfunc_end5-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z12setup_kernelP12hiprandState,@object # @_Z12setup_kernelP12hiprandState
.section .rodata,"a",@progbits
.globl _Z12setup_kernelP12hiprandState
.p2align 3, 0x0
_Z12setup_kernelP12hiprandState:
.quad _Z27__device_stub__setup_kernelP12hiprandState
.size _Z12setup_kernelP12hiprandState, 8
.type _Z23generate_uniform_kernelP12hiprandStateiPi,@object # @_Z23generate_uniform_kernelP12hiprandStateiPi
.globl _Z23generate_uniform_kernelP12hiprandStateiPi
.p2align 3, 0x0
_Z23generate_uniform_kernelP12hiprandStateiPi:
.quad _Z38__device_stub__generate_uniform_kernelP12hiprandStateiPi
.size _Z23generate_uniform_kernelP12hiprandStateiPi, 8
.type _Z12shmem_reducePiS_,@object # @_Z12shmem_reducePiS_
.globl _Z12shmem_reducePiS_
.p2align 3, 0x0
_Z12shmem_reducePiS_:
.quad _Z27__device_stub__shmem_reducePiS_
.size _Z12shmem_reducePiS_, 8
.type .L.str,@object # @.str
.section .rodata.str1.1,"aMS",@progbits,1
.L.str:
.asciz "Error at %s:%d\n"
.size .L.str, 16
.type .L.str.1,@object # @.str.1
.L.str.1:
.asciz "/home/ubuntu/Datasets/stackv2/train-structured-repos-hip/aleksey-uvarov/hpc-2019/master/gauss.hip"
.size .L.str.1, 98
.type .L.str.2,@object # @.str.2
.L.str.2:
.asciz "Total area: %1.7f \n"
.size .L.str.2, 20
.type .L__unnamed_1,@object # @0
.L__unnamed_1:
.asciz "_Z12setup_kernelP12hiprandState"
.size .L__unnamed_1, 32
.type .L__unnamed_2,@object # @1
.L__unnamed_2:
.asciz "_Z23generate_uniform_kernelP12hiprandStateiPi"
.size .L__unnamed_2, 46
.type .L__unnamed_3,@object # @2
.L__unnamed_3:
.asciz "_Z12shmem_reducePiS_"
.size .L__unnamed_3, 21
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z27__device_stub__setup_kernelP12hiprandState
.addrsig_sym _Z38__device_stub__generate_uniform_kernelP12hiprandStateiPi
.addrsig_sym _Z27__device_stub__shmem_reducePiS_
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z12setup_kernelP12hiprandState
.addrsig_sym _Z23generate_uniform_kernelP12hiprandStateiPi
.addrsig_sym _Z12shmem_reducePiS_
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly. | #include "includes.h"
using namespace std;
#define ITERATIONS 40000
enum pixel_position {INSIDE_MASK, BOUNDRY, OUTSIDE};
__global__ void poisson_jacobi_kernel(float *targetimg, float *outimg, int *boundary_array,int c, int w, int h, int boundBoxMinX, int boundBoxMaxX, int boundBoxMinY, int boundBoxMaxY){
int x = threadIdx.x + blockIdx.x * blockDim.x + boundBoxMinX;
int y = threadIdx.y + blockIdx.y * blockDim.y + boundBoxMinY;
for(int channel = 0; channel < c; channel++){
int id = x + y*w + channel * w * h;
int idx_nextX = x+1 + w*y +w*h*channel;
int idx_prevX = x-1 + w*y + w*h*channel;
int idx_nextY = x + w*(y+1) +w*h*channel;
int idx_prevY = x + w*(y-1) +w*h*channel;
//printf("id: %d, idx_nextX: %d, idx_prevX: %d, idx_nextY: %d, idx_prevY: %d\n", id, idx_nextX, idx_prevX, idx_nextY, idx_prevY);
if(boundary_array[id] == INSIDE_MASK){
double neighbor_target = targetimg[idx_nextY]+targetimg[idx_nextX]+targetimg[idx_prevX]+targetimg[idx_prevY];
double neighbor_output = outimg[idx_nextY]+outimg[idx_nextX]+outimg[idx_prevX]+outimg[idx_prevY];
outimg[id] = 0.25*(4*targetimg[id]-neighbor_target + neighbor_output);
}
}
} | code for sm_80
Function : _Z21poisson_jacobi_kernelPfS_Piiiiiiii
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ MOV R2, c[0x0][0x178] ; /* 0x00005e0000027a02 */
/* 0x000fc80000000f00 */
/*0020*/ ISETP.GE.AND P0, PT, R2, 0x1, PT ; /* 0x000000010200780c */
/* 0x000fda0003f06270 */
/*0030*/ @!P0 EXIT ; /* 0x000000000000894d */
/* 0x000fea0003800000 */
/*0040*/ S2R R9, SR_CTAID.Y ; /* 0x0000000000097919 */
/* 0x000e220000002600 */
/*0050*/ ISETP.NE.AND P0, PT, R2.reuse, 0x1, PT ; /* 0x000000010200780c */
/* 0x040fe20003f05270 */
/*0060*/ UMOV UR4, URZ ; /* 0x0000003f00047c82 */
/* 0x000fe20008000000 */
/*0070*/ MOV R4, c[0x0][0x180] ; /* 0x0000600000047a02 */
/* 0x000fe20000000f00 */
/*0080*/ S2R R6, SR_TID.Y ; /* 0x0000000000067919 */
/* 0x000e220000002200 */
/*0090*/ ULDC.64 UR6, c[0x0][0x118] ; /* 0x0000460000067ab9 */
/* 0x000fe20000000a00 */
/*00a0*/ LOP3.LUT R2, R2, 0x1, RZ, 0xc0, !PT ; /* 0x0000000102027812 */
/* 0x000fe400078ec0ff */
/*00b0*/ S2R R0, SR_CTAID.X ; /* 0x0000000000007919 */
/* 0x000e620000002500 */
/*00c0*/ IMAD R4, R4, c[0x0][0x17c], RZ ; /* 0x00005f0004047a24 */
/* 0x000fc600078e02ff */
/*00d0*/ S2R R3, SR_TID.X ; /* 0x0000000000037919 */
/* 0x000ea20000002100 */
/*00e0*/ IMAD R5, R9, c[0x0][0x4], R6 ; /* 0x0000010009057a24 */
/* 0x001fca00078e0206 */
/*00f0*/ IADD3 R5, R5, c[0x0][0x18c], RZ ; /* 0x0000630005057a10 */
/* 0x000fe20007ffe0ff */
/*0100*/ @!P0 BRA 0x780 ; /* 0x0000067000008947 */
/* 0x000fea0003800000 */
/*0110*/ IADD3 R6, R6, c[0x0][0x18c], RZ ; /* 0x0000630006067a10 */
/* 0x002fe20007ffe0ff */
/*0120*/ UMOV UR4, URZ ; /* 0x0000003f00047c82 */
/* 0x000fe20008000000 */
/*0130*/ IADD3 R7, R3, c[0x0][0x184], RZ ; /* 0x0000610003077a10 */
/* 0x004fe40007ffe0ff */
/*0140*/ IADD3 R8, R6, c[0x0][0x180], RZ ; /* 0x0000600006087a10 */
/* 0x000fe20007ffe0ff */
/*0150*/ IMAD R10, R9.reuse, c[0x0][0x4], R6 ; /* 0x00000100090a7a24 */
/* 0x040fe400078e0206 */
/*0160*/ IMAD R6, R0, c[0x0][0x0], R7 ; /* 0x0000000000067a24 */
/* 0x000fe400078e0207 */
/*0170*/ IMAD R9, R9, c[0x0][0x4], R8 ; /* 0x0000010009097a24 */
/* 0x000fe200078e0208 */
/*0180*/ IADD3 R11, R10.reuse, 0x1, RZ ; /* 0x000000010a0b7810 */
/* 0x040fe20007ffe0ff */
/*0190*/ IMAD R7, R5, c[0x0][0x17c], R6 ; /* 0x00005f0005077a24 */
/* 0x000fe200078e0206 */
/*01a0*/ IADD3 R13, R10, -0x1, RZ ; /* 0xffffffff0a0d7810 */
/* 0x000fc40007ffe0ff */
/*01b0*/ IADD3 R15, R9, -0x1, RZ ; /* 0xffffffff090f7810 */
/* 0x000fe20007ffe0ff */
/*01c0*/ IMAD R11, R11, c[0x0][0x17c], R6.reuse ; /* 0x00005f000b0b7a24 */
/* 0x100fe200078e0206 */
/*01d0*/ IADD3 R25, R9.reuse, 0x1, RZ ; /* 0x0000000109197810 */
/* 0x040fe20007ffe0ff */
/*01e0*/ IMAD R9, R9, c[0x0][0x17c], R6.reuse ; /* 0x00005f0009097a24 */
/* 0x100fe200078e0206 */
/*01f0*/ IADD3 R27, R7, 0x1, RZ ; /* 0x00000001071b7810 */
/* 0x000fe20007ffe0ff */
/*0200*/ IMAD R13, R13, c[0x0][0x17c], R6.reuse ; /* 0x00005f000d0d7a24 */
/* 0x100fe400078e0206 */
/*0210*/ IMAD R15, R15, c[0x0][0x17c], R6.reuse ; /* 0x00005f000f0f7a24 */
/* 0x100fe400078e0206 */
/*0220*/ IMAD R25, R25, c[0x0][0x17c], R6 ; /* 0x00005f0019197a24 */
/* 0x000fe200078e0206 */
/*0230*/ IADD3 R6, R2, -c[0x0][0x178], RZ ; /* 0x80005e0002067a10 */
/* 0x000fc40007ffe0ff */
/*0240*/ HFMA2.MMA R8, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff087435 */
/* 0x000fd400000001ff */
/*0250*/ IMAD.WIDE R16, R7, R8, c[0x0][0x170] ; /* 0x00005c0007107625 */
/* 0x001fcc00078e0208 */
/*0260*/ LDG.E R16, [R16.64] ; /* 0x0000000610107981 */
/* 0x000ea2000c1e1900 */
/*0270*/ BSSY B0, 0x490 ; /* 0x0000021000007945 */
/* 0x000fe20003800000 */
/*0280*/ ISETP.NE.AND P0, PT, R16, RZ, PT ; /* 0x000000ff1000720c */
/* 0x004fda0003f05270 */
/*0290*/ @P0 BRA 0x480 ; /* 0x000001e000000947 */
/* 0x000fea0003800000 */
/*02a0*/ IMAD.WIDE R28, R11, R8, c[0x0][0x160] ; /* 0x000058000b1c7625 */
/* 0x000fc800078e0208 */
/*02b0*/ IMAD.WIDE R18, R27, R8.reuse, c[0x0][0x160] ; /* 0x000058001b127625 */
/* 0x080fe200078e0208 */
/*02c0*/ LDG.E R10, [R28.64] ; /* 0x000000061c0a7981 */
/* 0x0000a6000c1e1900 */
/*02d0*/ IMAD.WIDE R20, R11, R8.reuse, c[0x0][0x168] ; /* 0x00005a000b147625 */
/* 0x080fe200078e0208 */
/*02e0*/ LDG.E R12, [R18.64] ; /* 0x00000006120c7981 */
/* 0x000ea6000c1e1900 */
/*02f0*/ IMAD.WIDE R16, R27, R8.reuse, c[0x0][0x168] ; /* 0x00005a001b107625 */
/* 0x080fe200078e0208 */
/*0300*/ LDG.E R24, [R18.64+-0x8] ; /* 0xfffff80612187981 */
/* 0x000ee6000c1e1900 */
/*0310*/ IMAD.WIDE R22, R13.reuse, R8.reuse, c[0x0][0x160] ; /* 0x000058000d167625 */
/* 0x0c0fe200078e0208 */
/*0320*/ LDG.E R20, [R20.64] ; /* 0x0000000614147981 */
/* 0x000328000c1e1900 */
/*0330*/ LDG.E R26, [R16.64] ; /* 0x00000006101a7981 */
/* 0x000f22000c1e1900 */
/*0340*/ IMAD.WIDE R28, R13, R8, c[0x0][0x168] ; /* 0x00005a000d1c7625 */
/* 0x001fc600078e0208 */
/*0350*/ LDG.E R14, [R18.64+-0x4] ; /* 0xfffffc06120e7981 */
/* 0x000f68000c1e1900 */
/*0360*/ LDG.E R22, [R22.64] ; /* 0x0000000616167981 */
/* 0x000f68000c1e1900 */
/*0370*/ LDG.E R21, [R16.64+-0x8] ; /* 0xfffff80610157981 */
/* 0x002f68000c1e1900 */
/*0380*/ LDG.E R28, [R28.64] ; /* 0x000000061c1c7981 */
/* 0x000f62000c1e1900 */
/*0390*/ FADD R10, R12, R10 ; /* 0x0000000a0c0a7221 */
/* 0x004fc80000000000 */
/*03a0*/ FADD R10, R10, R24 ; /* 0x000000180a0a7221 */
/* 0x008fe40000000000 */
/*03b0*/ FADD R26, R26, R20 ; /* 0x000000141a1a7221 */
/* 0x010fe40000000000 */
/*03c0*/ FMUL R14, R14, 4 ; /* 0x408000000e0e7820 */
/* 0x020fe40000400000 */
/*03d0*/ FADD R10, R10, R22 ; /* 0x000000160a0a7221 */
/* 0x000fe40000000000 */
/*03e0*/ FADD R26, R26, R21 ; /* 0x000000151a1a7221 */
/* 0x000fe20000000000 */
/*03f0*/ F2F.F64.F32 R18, R14 ; /* 0x0000000e00127310 */
/* 0x000fe60000201800 */
/*0400*/ FADD R22, R26, R28 ; /* 0x0000001c1a167221 */
/* 0x000fca0000000000 */
/*0410*/ F2F.F64.F32 R20, R10 ; /* 0x0000000a00147310 */
/* 0x000e300000201800 */
/*0420*/ F2F.F64.F32 R22, R22 ; /* 0x0000001600167310 */
/* 0x000e620000201800 */
/*0430*/ DADD R18, -R20, R18 ; /* 0x0000000014127229 */
/* 0x001e4c0000000112 */
/*0440*/ DADD R18, R22, R18 ; /* 0x0000000016127229 */
/* 0x002e0c0000000012 */
/*0450*/ DMUL R18, R18, 0.25 ; /* 0x3fd0000012127828 */
/* 0x001e140000000000 */
/*0460*/ F2F.F32.F64 R19, R18 ; /* 0x0000001200137310 */
/* 0x001e240000301000 */
/*0470*/ STG.E [R16.64+-0x4], R19 ; /* 0xfffffc1310007986 */
/* 0x0011e4000c101906 */
/*0480*/ BSYNC B0 ; /* 0x0000000000007941 */
/* 0x000fea0003800000 */
/*0490*/ IMAD.WIDE R16, R9, R8, c[0x0][0x170] ; /* 0x00005c0009107625 */
/* 0x001fcc00078e0208 */
/*04a0*/ LDG.E R16, [R16.64] ; /* 0x0000000610107981 */
/* 0x000ea2000c1e1900 */
/*04b0*/ BSSY B0, 0x6d0 ; /* 0x0000021000007945 */
/* 0x000fe20003800000 */
/*04c0*/ ISETP.NE.AND P0, PT, R16, RZ, PT ; /* 0x000000ff1000720c */
/* 0x004fda0003f05270 */
/*04d0*/ @P0 BRA 0x6c0 ; /* 0x000001e000000947 */
/* 0x000fea0003800000 */
/*04e0*/ IMAD.WIDE R28, R25, R8, c[0x0][0x160] ; /* 0x00005800191c7625 */
/* 0x000fc800078e0208 */
/*04f0*/ IMAD.WIDE R18, R9, R8.reuse, c[0x0][0x160] ; /* 0x0000580009127625 */
/* 0x080fe200078e0208 */
/*0500*/ LDG.E R10, [R28.64] ; /* 0x000000061c0a7981 */
/* 0x0000a6000c1e1900 */
/*0510*/ IMAD.WIDE R20, R25, R8.reuse, c[0x0][0x168] ; /* 0x00005a0019147625 */
/* 0x080fe200078e0208 */
/*0520*/ LDG.E R12, [R18.64+0x4] ; /* 0x00000406120c7981 */
/* 0x000ea6000c1e1900 */
/*0530*/ IMAD.WIDE R16, R9, R8.reuse, c[0x0][0x168] ; /* 0x00005a0009107625 */
/* 0x080fe200078e0208 */
/*0540*/ LDG.E R24, [R18.64+-0x4] ; /* 0xfffffc0612187981 */
/* 0x000ee6000c1e1900 */
/*0550*/ IMAD.WIDE R22, R15.reuse, R8.reuse, c[0x0][0x160] ; /* 0x000058000f167625 */
/* 0x0c0fe200078e0208 */
/*0560*/ LDG.E R20, [R20.64] ; /* 0x0000000614147981 */
/* 0x000f28000c1e1900 */
/*0570*/ LDG.E R26, [R16.64+0x4] ; /* 0x00000406101a7981 */
/* 0x000f22000c1e1900 */
/*0580*/ IMAD.WIDE R28, R15, R8, c[0x0][0x168] ; /* 0x00005a000f1c7625 */
/* 0x001fc600078e0208 */
/*0590*/ LDG.E R14, [R18.64] ; /* 0x00000006120e7981 */
/* 0x000f68000c1e1900 */
/*05a0*/ LDG.E R22, [R22.64] ; /* 0x0000000616167981 */
/* 0x000f68000c1e1900 */
/*05b0*/ LDG.E R8, [R16.64+-0x4] ; /* 0xfffffc0610087981 */
/* 0x000f68000c1e1900 */
/*05c0*/ LDG.E R28, [R28.64] ; /* 0x000000061c1c7981 */
/* 0x000f62000c1e1900 */
/*05d0*/ FADD R10, R12, R10 ; /* 0x0000000a0c0a7221 */
/* 0x004fc80000000000 */
/*05e0*/ FADD R10, R10, R24 ; /* 0x000000180a0a7221 */
/* 0x008fe40000000000 */
/*05f0*/ FADD R26, R26, R20 ; /* 0x000000141a1a7221 */
/* 0x010fe40000000000 */
/*0600*/ FMUL R14, R14, 4 ; /* 0x408000000e0e7820 */
/* 0x020fe40000400000 */
/*0610*/ FADD R10, R10, R22 ; /* 0x000000160a0a7221 */
/* 0x000fe40000000000 */
/*0620*/ F2F.F64.F32 R18, R14 ; /* 0x0000000e00127310 */
/* 0x000fe20000201800 */
/*0630*/ FADD R8, R26, R8 ; /* 0x000000081a087221 */
/* 0x000fce0000000000 */
/*0640*/ F2F.F64.F32 R20, R10 ; /* 0x0000000a00147310 */
/* 0x000e220000201800 */
/*0650*/ FADD R8, R8, R28 ; /* 0x0000001c08087221 */
/* 0x000fce0000000000 */
/*0660*/ F2F.F64.F32 R22, R8 ; /* 0x0000000800167310 */
/* 0x000e620000201800 */
/*0670*/ DADD R18, -R20, R18 ; /* 0x0000000014127229 */
/* 0x001e4c0000000112 */
/*0680*/ DADD R18, R22, R18 ; /* 0x0000000016127229 */
/* 0x002e0c0000000012 */
/*0690*/ DMUL R18, R18, 0.25 ; /* 0x3fd0000012127828 */
/* 0x001e140000000000 */
/*06a0*/ F2F.F32.F64 R19, R18 ; /* 0x0000001200137310 */
/* 0x001e240000301000 */
/*06b0*/ STG.E [R16.64], R19 ; /* 0x0000001310007986 */
/* 0x0011e4000c101906 */
/*06c0*/ BSYNC B0 ; /* 0x0000000000007941 */
/* 0x000fea0003800000 */
/*06d0*/ UIADD3 UR4, UR4, 0x2, URZ ; /* 0x0000000204047890 */
/* 0x000fe2000fffe03f */
/*06e0*/ LEA R9, R4.reuse, R9, 0x1 ; /* 0x0000000904097211 */
/* 0x040fe400078e08ff */
/*06f0*/ LEA R15, R4.reuse, R15, 0x1 ; /* 0x0000000f040f7211 */
/* 0x040fe400078e08ff */
/*0700*/ LEA R25, R4, R25, 0x1 ; /* 0x0000001904197211 */
/* 0x000fe400078e08ff */
/*0710*/ IADD3 R8, R6, UR4, RZ ; /* 0x0000000406087c10 */
/* 0x000fe4000fffe0ff */
/*0720*/ LEA R27, R4, R27, 0x1 ; /* 0x0000001b041b7211 */
/* 0x000fc400078e08ff */
/*0730*/ ISETP.NE.AND P0, PT, R8, RZ, PT ; /* 0x000000ff0800720c */
/* 0x000fe40003f05270 */
/*0740*/ LEA R11, R4.reuse, R11, 0x1 ; /* 0x0000000b040b7211 */
/* 0x040fe400078e08ff */
/*0750*/ LEA R13, R4.reuse, R13, 0x1 ; /* 0x0000000d040d7211 */
/* 0x040fe400078e08ff */
/*0760*/ LEA R7, R4, R7, 0x1 ; /* 0x0000000704077211 */
/* 0x000fce00078e08ff */
/*0770*/ @P0 BRA 0x240 ; /* 0xfffffac000000947 */
/* 0x000fea000383ffff */
/*0780*/ ISETP.NE.AND P0, PT, R2, RZ, PT ; /* 0x000000ff0200720c */
/* 0x002fe20003f05270 */
/*0790*/ IMAD R0, R0, c[0x0][0x0], R3 ; /* 0x0000000000007a24 */
/* 0x004fd800078e0203 */
/*07a0*/ @!P0 EXIT ; /* 0x000000000000894d */
/* 0x000fea0003800000 */
/*07b0*/ IADD3 R6, R0, c[0x0][0x184], RZ ; /* 0x0000610000067a10 */
/* 0x000fe40007ffe0ff */
/*07c0*/ MOV R0, 0x4 ; /* 0x0000000400007802 */
/* 0x000fc60000000f00 */
/*07d0*/ IMAD R3, R5, c[0x0][0x17c], R6 ; /* 0x00005f0005037a24 */
/* 0x000fc800078e0206 */
/*07e0*/ IMAD R9, R4, UR4, R3 ; /* 0x0000000404097c24 */
/* 0x000fc8000f8e0203 */
/*07f0*/ IMAD.WIDE R2, R9, R0, c[0x0][0x170] ; /* 0x00005c0009027625 */
/* 0x000fcc00078e0200 */
/*0800*/ LDG.E R2, [R2.64] ; /* 0x0000000602027981 */
/* 0x000ea2000c1e1900 */
/*0810*/ MOV R8, c[0x0][0x17c] ; /* 0x00005f0000087a02 */
/* 0x000fca0000000f00 */
/*0820*/ IMAD R5, R5, R8, c[0x0][0x17c] ; /* 0x00005f0005057624 */
/* 0x000fe200078e0208 */
/*0830*/ IADD3 R8, RZ, -c[0x0][0x17c], RZ ; /* 0x80005f00ff087a10 */
/* 0x000fc80007ffe0ff */
/*0840*/ LEA R7, R8, R5.reuse, 0x1 ; /* 0x0000000508077211 */
/* 0x080fe400078e08ff */
/*0850*/ IADD3 R5, R6.reuse, R5, RZ ; /* 0x0000000506057210 */
/* 0x040fe40007ffe0ff */
/*0860*/ IADD3 R7, R6, R7, RZ ; /* 0x0000000706077210 */
/* 0x000fc60007ffe0ff */
/*0870*/ IMAD R11, R4.reuse, UR4, R5 ; /* 0x00000004040b7c24 */
/* 0x040fe4000f8e0205 */
/*0880*/ IMAD R13, R4, UR4, R7 ; /* 0x00000004040d7c24 */
/* 0x000fe2000f8e0207 */
/*0890*/ ISETP.NE.AND P0, PT, R2, RZ, PT ; /* 0x000000ff0200720c */
/* 0x004fda0003f05270 */
/*08a0*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*08b0*/ IMAD.WIDE R4, R11, R0, c[0x0][0x160] ; /* 0x000058000b047625 */
/* 0x000fc800078e0200 */
/*08c0*/ IMAD.WIDE R6, R9, R0.reuse, c[0x0][0x160] ; /* 0x0000580009067625 */
/* 0x080fe400078e0200 */
/*08d0*/ LDG.E R5, [R4.64] ; /* 0x0000000604057981 */
/* 0x000ea4000c1e1900 */
/*08e0*/ IMAD.WIDE R10, R11, R0.reuse, c[0x0][0x168] ; /* 0x00005a000b0a7625 */
/* 0x080fe400078e0200 */
/*08f0*/ LDG.E R14, [R6.64+0x4] ; /* 0x00000406060e7981 */
/* 0x000ea4000c1e1900 */
/*0900*/ IMAD.WIDE R2, R9, R0.reuse, c[0x0][0x168] ; /* 0x00005a0009027625 */
/* 0x080fe400078e0200 */
/*0910*/ LDG.E R16, [R6.64+-0x4] ; /* 0xfffffc0606107981 */
/* 0x001ee4000c1e1900 */
/*0920*/ IMAD.WIDE R8, R13, R0, c[0x0][0x160] ; /* 0x000058000d087625 */
/* 0x000fc400078e0200 */
/*0930*/ LDG.E R10, [R10.64] ; /* 0x000000060a0a7981 */
/* 0x000f24000c1e1900 */
/*0940*/ IMAD.WIDE R12, R13, R0, c[0x0][0x168] ; /* 0x00005a000d0c7625 */
/* 0x000fe400078e0200 */
/*0950*/ LDG.E R17, [R2.64+0x4] ; /* 0x0000040602117981 */
/* 0x000f28000c1e1900 */
/*0960*/ LDG.E R15, [R6.64] ; /* 0x00000006060f7981 */
/* 0x000f68000c1e1900 */
/*0970*/ LDG.E R8, [R8.64] ; /* 0x0000000608087981 */
/* 0x000f68000c1e1900 */
/*0980*/ LDG.E R18, [R2.64+-0x4] ; /* 0xfffffc0602127981 */
/* 0x000f68000c1e1900 */
/*0990*/ LDG.E R12, [R12.64] ; /* 0x000000060c0c7981 */
/* 0x000f62000c1e1900 */
/*09a0*/ FADD R5, R14, R5 ; /* 0x000000050e057221 */
/* 0x004fc80000000000 */
/*09b0*/ FADD R5, R5, R16 ; /* 0x0000001005057221 */
/* 0x008fe40000000000 */
/*09c0*/ FADD R17, R17, R10 ; /* 0x0000000a11117221 */
/* 0x010fe40000000000 */
/*09d0*/ FMUL R15, R15, 4 ; /* 0x408000000f0f7820 */
/* 0x020fe40000400000 */
/*09e0*/ FADD R0, R5, R8 ; /* 0x0000000805007221 */
/* 0x000fe40000000000 */
/*09f0*/ FADD R17, R17, R18 ; /* 0x0000001211117221 */
/* 0x000fe40000000000 */
/*0a00*/ F2F.F64.F32 R14, R15 ; /* 0x0000000f000e7310 */
/* 0x000fe40000201800 */
/*0a10*/ FADD R6, R17, R12 ; /* 0x0000000c11067221 */
/* 0x000fcc0000000000 */
/*0a20*/ F2F.F64.F32 R4, R0 ; /* 0x0000000000047310 */
/* 0x000e300000201800 */
/*0a30*/ F2F.F64.F32 R6, R6 ; /* 0x0000000600067310 */
/* 0x000e620000201800 */
/*0a40*/ DADD R4, -R4, R14 ; /* 0x0000000004047229 */
/* 0x001e4c000000010e */
/*0a50*/ DADD R4, R6, R4 ; /* 0x0000000006047229 */
/* 0x002e0c0000000004 */
/*0a60*/ DMUL R4, R4, 0.25 ; /* 0x3fd0000004047828 */
/* 0x001e140000000000 */
/*0a70*/ F2F.F32.F64 R5, R4 ; /* 0x0000000400057310 */
/* 0x001e240000301000 */
/*0a80*/ STG.E [R2.64], R5 ; /* 0x0000000502007986 */
/* 0x001fe2000c101906 */
/*0a90*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0aa0*/ BRA 0xaa0; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0ab0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0ac0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0ad0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0ae0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0af0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0b00*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0b10*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0b20*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0b30*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0b40*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0b50*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0b60*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0b70*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | #include "includes.h"
using namespace std;
#define ITERATIONS 40000
enum pixel_position {INSIDE_MASK, BOUNDRY, OUTSIDE};
__global__ void poisson_jacobi_kernel(float *targetimg, float *outimg, int *boundary_array,int c, int w, int h, int boundBoxMinX, int boundBoxMaxX, int boundBoxMinY, int boundBoxMaxY){
int x = threadIdx.x + blockIdx.x * blockDim.x + boundBoxMinX;
int y = threadIdx.y + blockIdx.y * blockDim.y + boundBoxMinY;
for(int channel = 0; channel < c; channel++){
int id = x + y*w + channel * w * h;
int idx_nextX = x+1 + w*y +w*h*channel;
int idx_prevX = x-1 + w*y + w*h*channel;
int idx_nextY = x + w*(y+1) +w*h*channel;
int idx_prevY = x + w*(y-1) +w*h*channel;
//printf("id: %d, idx_nextX: %d, idx_prevX: %d, idx_nextY: %d, idx_prevY: %d\n", id, idx_nextX, idx_prevX, idx_nextY, idx_prevY);
if(boundary_array[id] == INSIDE_MASK){
double neighbor_target = targetimg[idx_nextY]+targetimg[idx_nextX]+targetimg[idx_prevX]+targetimg[idx_prevY];
double neighbor_output = outimg[idx_nextY]+outimg[idx_nextX]+outimg[idx_prevX]+outimg[idx_prevY];
outimg[id] = 0.25*(4*targetimg[id]-neighbor_target + neighbor_output);
}
}
} | .file "tmpxft_00093fc1_00000000-6_poisson_jacobi_kernel.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2029:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2029:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z52__device_stub__Z21poisson_jacobi_kernelPfS_PiiiiiiiiPfS_Piiiiiiii
.type _Z52__device_stub__Z21poisson_jacobi_kernelPfS_PiiiiiiiiPfS_Piiiiiiii, @function
_Z52__device_stub__Z21poisson_jacobi_kernelPfS_PiiiiiiiiPfS_Piiiiiiii:
.LFB2051:
.cfi_startproc
endbr64
subq $216, %rsp
.cfi_def_cfa_offset 224
movq %rdi, 40(%rsp)
movq %rsi, 32(%rsp)
movq %rdx, 24(%rsp)
movl %ecx, 20(%rsp)
movl %r8d, 16(%rsp)
movl %r9d, 12(%rsp)
movq %fs:40, %rax
movq %rax, 200(%rsp)
xorl %eax, %eax
leaq 40(%rsp), %rax
movq %rax, 112(%rsp)
leaq 32(%rsp), %rax
movq %rax, 120(%rsp)
leaq 24(%rsp), %rax
movq %rax, 128(%rsp)
leaq 20(%rsp), %rax
movq %rax, 136(%rsp)
leaq 16(%rsp), %rax
movq %rax, 144(%rsp)
leaq 12(%rsp), %rax
movq %rax, 152(%rsp)
leaq 224(%rsp), %rax
movq %rax, 160(%rsp)
leaq 232(%rsp), %rax
movq %rax, 168(%rsp)
leaq 240(%rsp), %rax
movq %rax, 176(%rsp)
leaq 248(%rsp), %rax
movq %rax, 184(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
movl $1, 72(%rsp)
movl $1, 76(%rsp)
movl $1, 80(%rsp)
movl $1, 84(%rsp)
leaq 56(%rsp), %rcx
leaq 48(%rsp), %rdx
leaq 76(%rsp), %rsi
leaq 64(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 200(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $216, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 56(%rsp)
.cfi_def_cfa_offset 232
pushq 56(%rsp)
.cfi_def_cfa_offset 240
leaq 128(%rsp), %r9
movq 92(%rsp), %rcx
movl 100(%rsp), %r8d
movq 80(%rsp), %rsi
movl 88(%rsp), %edx
leaq _Z21poisson_jacobi_kernelPfS_Piiiiiiii(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 224
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2051:
.size _Z52__device_stub__Z21poisson_jacobi_kernelPfS_PiiiiiiiiPfS_Piiiiiiii, .-_Z52__device_stub__Z21poisson_jacobi_kernelPfS_PiiiiiiiiPfS_Piiiiiiii
.globl _Z21poisson_jacobi_kernelPfS_Piiiiiiii
.type _Z21poisson_jacobi_kernelPfS_Piiiiiiii, @function
_Z21poisson_jacobi_kernelPfS_Piiiiiiii:
.LFB2052:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movl 40(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 24
movl 40(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 32
movl 40(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 40
movl 40(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 48
call _Z52__device_stub__Z21poisson_jacobi_kernelPfS_PiiiiiiiiPfS_Piiiiiiii
addq $40, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2052:
.size _Z21poisson_jacobi_kernelPfS_Piiiiiiii, .-_Z21poisson_jacobi_kernelPfS_Piiiiiiii
.section .rodata.str1.8,"aMS",@progbits,1
.align 8
.LC0:
.string "_Z21poisson_jacobi_kernelPfS_Piiiiiiii"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2054:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC0(%rip), %rdx
movq %rdx, %rcx
leaq _Z21poisson_jacobi_kernelPfS_Piiiiiiii(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2054:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | #include "includes.h"
using namespace std;
#define ITERATIONS 40000
enum pixel_position {INSIDE_MASK, BOUNDRY, OUTSIDE};
__global__ void poisson_jacobi_kernel(float *targetimg, float *outimg, int *boundary_array,int c, int w, int h, int boundBoxMinX, int boundBoxMaxX, int boundBoxMinY, int boundBoxMaxY){
int x = threadIdx.x + blockIdx.x * blockDim.x + boundBoxMinX;
int y = threadIdx.y + blockIdx.y * blockDim.y + boundBoxMinY;
for(int channel = 0; channel < c; channel++){
int id = x + y*w + channel * w * h;
int idx_nextX = x+1 + w*y +w*h*channel;
int idx_prevX = x-1 + w*y + w*h*channel;
int idx_nextY = x + w*(y+1) +w*h*channel;
int idx_prevY = x + w*(y-1) +w*h*channel;
//printf("id: %d, idx_nextX: %d, idx_prevX: %d, idx_nextY: %d, idx_prevY: %d\n", id, idx_nextX, idx_prevX, idx_nextY, idx_prevY);
if(boundary_array[id] == INSIDE_MASK){
double neighbor_target = targetimg[idx_nextY]+targetimg[idx_nextX]+targetimg[idx_prevX]+targetimg[idx_prevY];
double neighbor_output = outimg[idx_nextY]+outimg[idx_nextX]+outimg[idx_prevX]+outimg[idx_prevY];
outimg[id] = 0.25*(4*targetimg[id]-neighbor_target + neighbor_output);
}
}
} | #include <hip/hip_runtime.h>
#include "includes.h"
using namespace std;
#define ITERATIONS 40000
enum pixel_position {INSIDE_MASK, BOUNDRY, OUTSIDE};
__global__ void poisson_jacobi_kernel(float *targetimg, float *outimg, int *boundary_array,int c, int w, int h, int boundBoxMinX, int boundBoxMaxX, int boundBoxMinY, int boundBoxMaxY){
int x = threadIdx.x + blockIdx.x * blockDim.x + boundBoxMinX;
int y = threadIdx.y + blockIdx.y * blockDim.y + boundBoxMinY;
for(int channel = 0; channel < c; channel++){
int id = x + y*w + channel * w * h;
int idx_nextX = x+1 + w*y +w*h*channel;
int idx_prevX = x-1 + w*y + w*h*channel;
int idx_nextY = x + w*(y+1) +w*h*channel;
int idx_prevY = x + w*(y-1) +w*h*channel;
//printf("id: %d, idx_nextX: %d, idx_prevX: %d, idx_nextY: %d, idx_prevY: %d\n", id, idx_nextX, idx_prevX, idx_nextY, idx_prevY);
if(boundary_array[id] == INSIDE_MASK){
double neighbor_target = targetimg[idx_nextY]+targetimg[idx_nextX]+targetimg[idx_prevX]+targetimg[idx_prevY];
double neighbor_output = outimg[idx_nextY]+outimg[idx_nextX]+outimg[idx_prevX]+outimg[idx_prevY];
outimg[id] = 0.25*(4*targetimg[id]-neighbor_target + neighbor_output);
}
}
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | #include <hip/hip_runtime.h>
#include "includes.h"
using namespace std;
#define ITERATIONS 40000
enum pixel_position {INSIDE_MASK, BOUNDRY, OUTSIDE};
__global__ void poisson_jacobi_kernel(float *targetimg, float *outimg, int *boundary_array,int c, int w, int h, int boundBoxMinX, int boundBoxMaxX, int boundBoxMinY, int boundBoxMaxY){
int x = threadIdx.x + blockIdx.x * blockDim.x + boundBoxMinX;
int y = threadIdx.y + blockIdx.y * blockDim.y + boundBoxMinY;
for(int channel = 0; channel < c; channel++){
int id = x + y*w + channel * w * h;
int idx_nextX = x+1 + w*y +w*h*channel;
int idx_prevX = x-1 + w*y + w*h*channel;
int idx_nextY = x + w*(y+1) +w*h*channel;
int idx_prevY = x + w*(y-1) +w*h*channel;
//printf("id: %d, idx_nextX: %d, idx_prevX: %d, idx_nextY: %d, idx_prevY: %d\n", id, idx_nextX, idx_prevX, idx_nextY, idx_prevY);
if(boundary_array[id] == INSIDE_MASK){
double neighbor_target = targetimg[idx_nextY]+targetimg[idx_nextX]+targetimg[idx_prevX]+targetimg[idx_prevY];
double neighbor_output = outimg[idx_nextY]+outimg[idx_nextX]+outimg[idx_prevX]+outimg[idx_prevY];
outimg[id] = 0.25*(4*targetimg[id]-neighbor_target + neighbor_output);
}
}
} | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z21poisson_jacobi_kernelPfS_Piiiiiiii
.globl _Z21poisson_jacobi_kernelPfS_Piiiiiiii
.p2align 8
.type _Z21poisson_jacobi_kernelPfS_Piiiiiiii,@function
_Z21poisson_jacobi_kernelPfS_Piiiiiiii:
s_load_b32 s2, s[0:1], 0x18
s_waitcnt lgkmcnt(0)
s_cmp_lt_i32 s2, 1
s_cbranch_scc1 .LBB0_5
s_clause 0x3
s_load_b32 s3, s[0:1], 0x44
s_load_b32 s10, s[0:1], 0x2c
s_load_b32 s11, s[0:1], 0x24
s_load_b64 s[8:9], s[0:1], 0x1c
v_bfe_u32 v1, v0, 10, 10
s_clause 0x1
s_load_b128 s[4:7], s[0:1], 0x0
s_load_b64 s[0:1], s[0:1], 0x10
v_and_b32_e32 v0, 0x3ff, v0
s_waitcnt lgkmcnt(0)
s_lshr_b32 s12, s3, 16
s_and_b32 s3, s3, 0xffff
s_mul_i32 s15, s15, s12
v_add_nc_u32_e32 v2, s11, v0
v_add3_u32 v1, s15, s10, v1
s_mul_i32 s14, s14, s3
s_mul_i32 s3, s9, s8
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_mul_lo_u32 v3, v1, s8
v_add_nc_u32_e32 v4, -1, v1
v_mad_u64_u32 v[0:1], null, v4, s8, v[2:3]
v_add3_u32 v5, v3, s8, v2
v_add_nc_u32_e32 v6, v2, v3
s_branch .LBB0_3
.LBB0_2:
s_or_b32 exec_lo, exec_lo, s8
v_add_nc_u32_e32 v5, s3, v5
v_add_nc_u32_e32 v0, s3, v0
v_add_nc_u32_e32 v6, s3, v6
s_add_i32 s2, s2, -1
s_delay_alu instid0(SALU_CYCLE_1)
s_cmp_eq_u32 s2, 0
s_cbranch_scc1 .LBB0_5
.LBB0_3:
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_add_nc_u32_e32 v3, s14, v6
s_mov_b32 s8, exec_lo
v_ashrrev_i32_e32 v4, 31, v3
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[1:2], 2, v[3:4]
v_add_co_u32 v7, vcc_lo, s0, v1
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v8, vcc_lo, s1, v2, vcc_lo
global_load_b32 v4, v[7:8], off
s_waitcnt vmcnt(0)
v_cmpx_eq_u32_e32 0, v4
s_cbranch_execz .LBB0_2
v_add_nc_u32_e32 v7, s14, v5
v_add_nc_u32_e32 v9, 1, v3
v_add_nc_u32_e32 v3, -1, v3
v_add_nc_u32_e32 v11, s14, v0
s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
v_ashrrev_i32_e32 v8, 31, v7
v_ashrrev_i32_e32 v10, 31, v9
s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
v_ashrrev_i32_e32 v4, 31, v3
v_ashrrev_i32_e32 v12, 31, v11
s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
v_lshlrev_b64 v[7:8], 2, v[7:8]
v_lshlrev_b64 v[9:10], 2, v[9:10]
s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
v_lshlrev_b64 v[3:4], 2, v[3:4]
v_lshlrev_b64 v[11:12], 2, v[11:12]
s_delay_alu instid0(VALU_DEP_4)
v_add_co_u32 v13, vcc_lo, s4, v7
v_add_co_ci_u32_e32 v14, vcc_lo, s5, v8, vcc_lo
v_add_co_u32 v15, vcc_lo, s4, v9
v_add_co_ci_u32_e32 v16, vcc_lo, s5, v10, vcc_lo
v_add_co_u32 v17, vcc_lo, s4, v3
v_add_co_ci_u32_e32 v18, vcc_lo, s5, v4, vcc_lo
s_clause 0x2
global_load_b32 v19, v[13:14], off
global_load_b32 v20, v[15:16], off
global_load_b32 v17, v[17:18], off
v_add_co_u32 v13, vcc_lo, s4, v11
v_add_co_ci_u32_e32 v14, vcc_lo, s5, v12, vcc_lo
v_add_co_u32 v15, vcc_lo, s4, v1
v_add_co_ci_u32_e32 v16, vcc_lo, s5, v2, vcc_lo
s_clause 0x1
global_load_b32 v13, v[13:14], off
global_load_b32 v14, v[15:16], off
v_add_co_u32 v7, vcc_lo, s6, v7
v_add_co_ci_u32_e32 v8, vcc_lo, s7, v8, vcc_lo
v_add_co_u32 v9, vcc_lo, s6, v9
v_add_co_ci_u32_e32 v10, vcc_lo, s7, v10, vcc_lo
v_add_co_u32 v3, vcc_lo, s6, v3
v_add_co_ci_u32_e32 v4, vcc_lo, s7, v4, vcc_lo
s_clause 0x1
global_load_b32 v15, v[7:8], off
global_load_b32 v9, v[9:10], off
v_add_co_u32 v7, vcc_lo, s6, v11
v_add_co_ci_u32_e32 v8, vcc_lo, s7, v12, vcc_lo
s_clause 0x1
global_load_b32 v10, v[3:4], off
global_load_b32 v11, v[7:8], off
v_add_co_u32 v1, vcc_lo, s6, v1
v_add_co_ci_u32_e32 v2, vcc_lo, s7, v2, vcc_lo
s_waitcnt vmcnt(7)
v_add_f32_e32 v3, v19, v20
s_waitcnt vmcnt(6)
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_add_f32_e32 v3, v3, v17
s_waitcnt vmcnt(5)
v_add_f32_e32 v3, v3, v13
s_waitcnt vmcnt(4)
v_mul_f32_e32 v7, 4.0, v14
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_cvt_f64_f32_e32 v[3:4], v3
v_cvt_f64_f32_e32 v[7:8], v7
s_waitcnt vmcnt(2)
v_add_f32_e32 v9, v15, v9
s_waitcnt vmcnt(1)
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_add_f32_e32 v9, v9, v10
s_waitcnt vmcnt(0)
v_add_f32_e32 v9, v9, v11
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_cvt_f64_f32_e32 v[9:10], v9
v_add_f64 v[3:4], v[7:8], -v[3:4]
v_add_f64 v[3:4], v[3:4], v[9:10]
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_ldexp_f64 v[3:4], v[3:4], -2
v_cvt_f32_f64_e32 v3, v[3:4]
global_store_b32 v[1:2], v3, off
s_branch .LBB0_2
.LBB0_5:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z21poisson_jacobi_kernelPfS_Piiiiiiii
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 312
.amdhsa_user_sgpr_count 14
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 1
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 1
.amdhsa_next_free_vgpr 21
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z21poisson_jacobi_kernelPfS_Piiiiiiii, .Lfunc_end0-_Z21poisson_jacobi_kernelPfS_Piiiiiiii
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
- .offset: 24
.size: 4
.value_kind: by_value
- .offset: 28
.size: 4
.value_kind: by_value
- .offset: 32
.size: 4
.value_kind: by_value
- .offset: 36
.size: 4
.value_kind: by_value
- .offset: 40
.size: 4
.value_kind: by_value
- .offset: 44
.size: 4
.value_kind: by_value
- .offset: 48
.size: 4
.value_kind: by_value
- .offset: 56
.size: 4
.value_kind: hidden_block_count_x
- .offset: 60
.size: 4
.value_kind: hidden_block_count_y
- .offset: 64
.size: 4
.value_kind: hidden_block_count_z
- .offset: 68
.size: 2
.value_kind: hidden_group_size_x
- .offset: 70
.size: 2
.value_kind: hidden_group_size_y
- .offset: 72
.size: 2
.value_kind: hidden_group_size_z
- .offset: 74
.size: 2
.value_kind: hidden_remainder_x
- .offset: 76
.size: 2
.value_kind: hidden_remainder_y
- .offset: 78
.size: 2
.value_kind: hidden_remainder_z
- .offset: 96
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 104
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 112
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 120
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 312
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z21poisson_jacobi_kernelPfS_Piiiiiiii
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z21poisson_jacobi_kernelPfS_Piiiiiiii.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 21
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.