system stringclasses 7 values | input stringlengths 10 94.5k | output stringlengths 14 55.5k |
|---|---|---|
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | /*
CS 4402 Distributed and Parallel Systems
Assignment 2 Question 1: N thread blocks and N threads per thread block
Sarah Whelan 250778849
TO RUN: nvcc q1_swhela2.cu -o q1_swhela2
./q1_swhela2
*/
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <math.h>
int modBy = 103; // common prime num used for modding coefficient values during generation, multiplication, and addition
void genPolynomials(int *polyA, int *polyB, int size);
void multPolynomialsSerial(int *polyA, int *polyB, int polySize, int *product, int productSize);
__global__ void multPolynomialsParallel(int *polyA, int *polyB, int *product, int polySize, int modBy);
__global__ void sumProductsParallel(int prodSize, int threadsPerBlock, int *summedProduct, int *products, int numBlocks, int modBy);
void checkCUDAError(const char* msg);
int main() {
srand(time(NULL));
int numTerms;
// get user desired input on length of polynomials
printf("Specify the number of terms in the polynomial by specifying the exponent on base 2, UP TO 10, e.g. enter '3' if you want 2^3 terms (AKA 8 terms) per polynomial: ");
scanf("%d", &numTerms);
printf("\nYou entered '%d'.\n", numTerms);
if (numTerms > 10) {
printf("Invalid entry. The maximum number of terms is 2^10. Please enter a term less than or equal to 10 next time.");
return 1;
}
// then bitshift by input value to determine actual value of numTerms
numTerms = 1 << numTerms;
printf("Number of terms per polynomial = %d, hence each polynomial will have degree = %d.\n\n", numTerms, numTerms-1);
// use numTerms as the number of blocks per thread and the number of blocks
int threadsPerBlock = numTerms;
int blocks = numTerms;
// instantiate and allocate host memory blocks to store each polynomial of size numTerms
int *host_polyA, *host_polyB;
host_polyA = (int *) malloc(numTerms * sizeof(int));
host_polyB = (int *) malloc(numTerms * sizeof(int));
// generate random polynomials of size numTerms
printf("Generating polynomials...\n\n");
genPolynomials(host_polyA, host_polyB, numTerms);
printf("polyA:\n");
for (int i = 0; i < numTerms; i++) {
printf("%dx^%d ", host_polyA[i], i);
if (i != numTerms-1) {
printf("+ ");
}
}
printf("\n\npolyB:\n");
for (int i = 0; i < numTerms; i++) {
printf("%dx^%d ", host_polyB[i], i);
if (i != numTerms-1) {
printf("+ ");
}
}
printf("\n\n");
// determine degree of product
int degreeOfProduct = (numTerms - 1) * 2; // e.g. degree(polyA, polyB) = 3 then x^3 * x^3 = x^6 and degree = numTerms - 1
// allocate blocks of memory on the host for storing the product with size degreeOfProduct + 1 (serial)
// and numTerms*numTerms for the intermediary parallel product, as well asthe final parallel product
// two different allocations in order to verify results at the end!
int *host_product_serial, *host_product_parallel, *host_final_product;
host_product_serial = (int *) malloc((degreeOfProduct+1) * sizeof(int)); // sum of products is intrinsic
host_product_parallel = (int *) malloc(numTerms * numTerms * sizeof(int)); // because of n threads in each n thread blocks
host_final_product = (int *) malloc((degreeOfProduct+1) * sizeof(int)); // final product from parallel version once summed
// ensure all vals in host_product_parallel are 0 (this is done within the serial version so we don't need to worry about that one)
for (int i = 0; i < numTerms*numTerms; i++) {
host_product_parallel[i] = 0;
}
// ensure all vals in host_final_product are 0
for (int i = 0; i < degreeOfProduct+1; i++) {
host_final_product[i] = 0;
}
// initialize and allocate memory on the devices for storing dev_polyA, dev_polyB, and dev_product
int *dev_polyA, *dev_polyB, *dev_product;
cudaMalloc( (void **) &dev_polyA, numTerms * sizeof(int));
cudaMalloc( (void **) &dev_polyB, numTerms * sizeof(int));
cudaMalloc( (void **) &dev_product, numTerms * numTerms * sizeof(int));
// copy polynomials: host -> device (dest, src, size, direction)
cudaMemcpy(dev_polyA, host_polyA, numTerms * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(dev_polyB, host_polyB, numTerms * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(dev_product, host_product_parallel, numTerms * numTerms * sizeof(int), cudaMemcpyHostToDevice);
// setup kernel params & launch
dim3 dimGrid(blocks);
dim3 dimBlock(threadsPerBlock);
multPolynomialsParallel<<<dimGrid, dimBlock>>>(dev_polyA, dev_polyB, dev_product, numTerms, modBy);
cudaThreadSynchronize(); // wait for ALL threads from all blocks to complete
checkCUDAError("kernel invocation");
// copy dev_product back into host_product_parallel (dest, src, size, direction)
cudaMemcpy(host_product_parallel, dev_product, numTerms * numTerms * sizeof(int), cudaMemcpyDeviceToHost);
/* ~~~ now we need to deal with the summation of intermediary products ~~~ */
// allocate device mem for final product
int *dev_final;
cudaMalloc( (void **) &dev_final, (degreeOfProduct+1) * sizeof(int));
// copy zero'd host_final_product to dev_final and host_product_parallel to dev_product
// (dest, src, size, direction)
cudaMemcpy(dev_final, host_final_product, (degreeOfProduct+1) * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(dev_product, host_product_parallel, numTerms * numTerms * sizeof(int), cudaMemcpyHostToDevice);
// parameters are (int prodSize, int threadsPerBlock, int *summedProduct, int *products, int numBlocks, int modBy)
sumProductsParallel<<<dimGrid, dimBlock>>>(degreeOfProduct+1, threadsPerBlock, dev_final, dev_product, blocks, modBy);
cudaThreadSynchronize(); // wait for ALL threads from all blocks to complete
checkCUDAError("kernel invocation");
// copy summation of products back to host (dest, src, size, direction)
cudaMemcpy(host_final_product, dev_final, (degreeOfProduct+1) * sizeof(int), cudaMemcpyDeviceToHost);
// multiply polynomials in serial and write to host_product_serial for verification
multPolynomialsSerial(host_polyA, host_polyB, numTerms, host_product_serial, degreeOfProduct+1);
printf("Serial result:\n");
for (int i = 0; i < degreeOfProduct+1; i++) {
printf("%dx^%d ", host_product_serial[i], i);
if (i != degreeOfProduct) {
printf("+ ");
}
}
printf("\n\nParallel result:\n");
for (int i = 0; i < degreeOfProduct+1; i++) {
printf("%dx^%d ", host_final_product[i], i);
if (i != degreeOfProduct) {
printf("+ ");
}
}
printf("\n\n");
bool allRight = 1;
for (int i = 0; i < degreeOfProduct+1; i++) {
if (host_product_serial[i] == host_final_product[i]) {
continue;
} else {
printf("Coefficients at degree %d are not equivalent: serial!=parallel (%d!=%d)\n", i, host_product_serial[i], host_final_product[i]);
allRight = 0;
}
}
if (allRight) {
printf("Verification successful. The serial and parallel polynomial multiplications produced the same result!\n\n");
} else {
printf("Looks like there were some discrepancies. Verification failed.\n\n");
}
// free host and device memory
free(host_polyA);
free(host_polyB);
free(host_product_serial);
free(host_product_parallel);
free(host_final_product);
cudaFree(dev_polyA);
cudaFree(dev_polyB);
cudaFree(dev_product);
cudaFree(dev_final);
return 0;
}
// genPolynomials takes two polynomials and their size (number of terms per polynomial),
// and generates random coefficients for each term mod p
void genPolynomials(int *polyA, int *polyB, int size) {
// coefficient generation using rand mod p where p = 103
for (int i = 0; i < size; i++) {
polyA[i] = rand() % modBy;
if (polyA[i] == 0) { // we don't want any zeros!!!
polyA[i] = 1;
}
polyB[i] = rand() % modBy;
if (polyB[i] == 0) {
polyB[i] = 1;
}
}
}
// multPolynomialsSerial takes two polynomials and their size, in addition to a memory block to place
// the sum of products into, as well as the size of the product polynomial
void multPolynomialsSerial(int *polyA, int *polyB, int polySize, int *product, int productSize) {
int degreeOfTerms;
// ensure all coefficients of product are 0
for (int i = 0; i < productSize; i++) {
product[i] = 0;
}
// calculate sum of products
for (int a = 0; a < polySize; a++) { // iterate through terms in A
for (int b = 0; b < polySize; b++) { // for each term in A, iterate through all terms in B
// add degrees (indices) to determine which index this product belongs to in the product array block
degreeOfTerms = a + b;
// add product of terms to previous sum and mod by 103
product[degreeOfTerms] = (product[degreeOfTerms] + polyA[a] * polyB[b]) % modBy;
}
}
}
// multPolynomialsParallel determines the intermediary products of the polynomial multiplication problem
__global__ void multPolynomialsParallel(int *polyA, int *polyB, int *product, int polySize, int modBy) {
int a = blockIdx.x; // all threads in the same block will access the same polyA element
int b = threadIdx.x; // but all threads will access individual polyB elements
int myIndex = blockDim.x * blockIdx.x + threadIdx.x; // where to write this thread's product
product[myIndex] = (polyA[a] * polyB[b]) % modBy;
}
// sumProductsParallel uses prodSize threads, each thread corresponding to a degree, to sum common terms and determine the actual product of polyA and polyB
__global__ void sumProductsParallel(int prodSize, int threadsPerBlock, int *summedProduct, int *products, int numBlocks, int modBy) {
int responsibleFor = blockIdx.x * blockDim.x + threadIdx.x; // used to check which threads are going to be active during this step
if (responsibleFor < prodSize) { // e.g. 1 < 7 so thread 1 is going to be in charge of summing the x^1 terms, threads >= prodSize will be inactive for remainder
for (int blockNum = 0; blockNum < numBlocks; blockNum++) { // loop through blocks
for (int indexInBlock = 0; indexInBlock < threadsPerBlock; indexInBlock++) { // loop through each index per block
int degreeOfElement = blockNum + indexInBlock; // the degree related to the coefficient stored at each products[] index is equal to the block number + the relative index in the block
if (indexInBlock == 0 && degreeOfElement > responsibleFor) {
return; // this thread is done summing its common terms
}
else if (degreeOfElement == responsibleFor) { // if this thread is responsible for the degree we just calculated
int spotInProducts = blockNum * blockDim.x + indexInBlock; // get its actual index in products[]
summedProduct[responsibleFor] = (summedProduct[responsibleFor] + products[spotInProducts]) % modBy; // and write that value into the final summedProduct[our degree]
}
}
}
}
}
void checkCUDAError(const char *msg) {
cudaError_t err = cudaGetLastError();
if(cudaSuccess != err)
{
fprintf(stderr, "CUDA error: %s: %s.\n", msg, cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
} | .file "tmpxft_000e6cb3_00000000-6_q1_swhela2.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2063:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2063:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z14genPolynomialsPiS_i
.type _Z14genPolynomialsPiS_i, @function
_Z14genPolynomialsPiS_i:
.LFB2058:
.cfi_startproc
endbr64
testl %edx, %edx
jle .L10
pushq %r13
.cfi_def_cfa_offset 16
.cfi_offset 13, -16
pushq %r12
.cfi_def_cfa_offset 24
.cfi_offset 12, -24
pushq %rbp
.cfi_def_cfa_offset 32
.cfi_offset 6, -32
pushq %rbx
.cfi_def_cfa_offset 40
.cfi_offset 3, -40
subq $8, %rsp
.cfi_def_cfa_offset 48
movq %rsi, %rbx
movq %rdi, %rbp
movslq %edx, %rdx
leaq (%rsi,%rdx,4), %r13
movl $1, %r12d
.L7:
call rand@PLT
cltd
idivl modBy(%rip)
testl %edx, %edx
cmove %r12d, %edx
movl %edx, 0(%rbp)
call rand@PLT
cltd
idivl modBy(%rip)
testl %edx, %edx
cmove %r12d, %edx
movl %edx, (%rbx)
addq $4, %rbx
addq $4, %rbp
cmpq %r13, %rbx
jne .L7
addq $8, %rsp
.cfi_def_cfa_offset 40
popq %rbx
.cfi_def_cfa_offset 32
popq %rbp
.cfi_def_cfa_offset 24
popq %r12
.cfi_def_cfa_offset 16
popq %r13
.cfi_def_cfa_offset 8
ret
.L10:
.cfi_restore 3
.cfi_restore 6
.cfi_restore 12
.cfi_restore 13
ret
.cfi_endproc
.LFE2058:
.size _Z14genPolynomialsPiS_i, .-_Z14genPolynomialsPiS_i
.globl _Z21multPolynomialsSerialPiS_iS_i
.type _Z21multPolynomialsSerialPiS_iS_i, @function
_Z21multPolynomialsSerialPiS_iS_i:
.LFB2059:
.cfi_startproc
endbr64
movq %rsi, %r10
movl %edx, %r9d
movq %rcx, %r11
testl %r8d, %r8d
jle .L14
movq %rcx, %rax
movslq %r8d, %r8
leaq (%rcx,%r8,4), %rdx
.L15:
movl $0, (%rax)
addq $4, %rax
cmpq %rdx, %rax
jne .L15
.L14:
testl %r9d, %r9d
jle .L22
pushq %rbx
.cfi_def_cfa_offset 16
.cfi_offset 3, -16
movslq %r9d, %rax
leaq (%r10,%rax,4), %r8
movl $0, %ebx
.L17:
movq %r10, %rsi
movq %r11, %rcx
.L18:
movl (%rdi), %eax
imull (%rsi), %eax
addl (%rcx), %eax
cltd
idivl modBy(%rip)
movl %edx, (%rcx)
addq $4, %rcx
addq $4, %rsi
cmpq %r8, %rsi
jne .L18
addl $1, %ebx
addq $4, %r11
addq $4, %rdi
cmpl %ebx, %r9d
jne .L17
popq %rbx
.cfi_def_cfa_offset 8
ret
.L22:
.cfi_restore 3
ret
.cfi_endproc
.LFE2059:
.size _Z21multPolynomialsSerialPiS_iS_i, .-_Z21multPolynomialsSerialPiS_iS_i
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "CUDA error: %s: %s.\n"
.text
.globl _Z14checkCUDAErrorPKc
.type _Z14checkCUDAErrorPKc, @function
_Z14checkCUDAErrorPKc:
.LFB2060:
.cfi_startproc
endbr64
pushq %rbx
.cfi_def_cfa_offset 16
.cfi_offset 3, -16
movq %rdi, %rbx
call cudaGetLastError@PLT
testl %eax, %eax
jne .L28
popq %rbx
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L28:
.cfi_restore_state
movl %eax, %edi
call cudaGetErrorString@PLT
movq %rax, %r8
movq %rbx, %rcx
leaq .LC0(%rip), %rdx
movl $2, %esi
movq stderr(%rip), %rdi
movl $0, %eax
call __fprintf_chk@PLT
movl $1, %edi
call exit@PLT
.cfi_endproc
.LFE2060:
.size _Z14checkCUDAErrorPKc, .-_Z14checkCUDAErrorPKc
.globl _Z49__device_stub__Z23multPolynomialsParallelPiS_S_iiPiS_S_ii
.type _Z49__device_stub__Z23multPolynomialsParallelPiS_S_iiPiS_S_ii, @function
_Z49__device_stub__Z23multPolynomialsParallelPiS_S_iiPiS_S_ii:
.LFB2085:
.cfi_startproc
endbr64
subq $152, %rsp
.cfi_def_cfa_offset 160
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movq %rdx, 8(%rsp)
movl %ecx, 4(%rsp)
movl %r8d, (%rsp)
movq %fs:40, %rax
movq %rax, 136(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 8(%rsp), %rax
movq %rax, 112(%rsp)
leaq 4(%rsp), %rax
movq %rax, 120(%rsp)
movq %rsp, %rax
movq %rax, 128(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L33
.L29:
movq 136(%rsp), %rax
subq %fs:40, %rax
jne .L34
addq $152, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L33:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 168
pushq 40(%rsp)
.cfi_def_cfa_offset 176
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z23multPolynomialsParallelPiS_S_ii(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 160
jmp .L29
.L34:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2085:
.size _Z49__device_stub__Z23multPolynomialsParallelPiS_S_iiPiS_S_ii, .-_Z49__device_stub__Z23multPolynomialsParallelPiS_S_iiPiS_S_ii
.globl _Z23multPolynomialsParallelPiS_S_ii
.type _Z23multPolynomialsParallelPiS_S_ii, @function
_Z23multPolynomialsParallelPiS_S_ii:
.LFB2086:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z49__device_stub__Z23multPolynomialsParallelPiS_S_iiPiS_S_ii
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2086:
.size _Z23multPolynomialsParallelPiS_S_ii, .-_Z23multPolynomialsParallelPiS_S_ii
.globl _Z45__device_stub__Z19sumProductsParalleliiPiS_iiiiPiS_ii
.type _Z45__device_stub__Z19sumProductsParalleliiPiS_iiiiPiS_ii, @function
_Z45__device_stub__Z19sumProductsParalleliiPiS_iiiiPiS_ii:
.LFB2087:
.cfi_startproc
endbr64
subq $168, %rsp
.cfi_def_cfa_offset 176
movl %edi, 28(%rsp)
movl %esi, 24(%rsp)
movq %rdx, 16(%rsp)
movq %rcx, 8(%rsp)
movl %r8d, 4(%rsp)
movl %r9d, (%rsp)
movq %fs:40, %rax
movq %rax, 152(%rsp)
xorl %eax, %eax
leaq 28(%rsp), %rax
movq %rax, 96(%rsp)
leaq 24(%rsp), %rax
movq %rax, 104(%rsp)
leaq 16(%rsp), %rax
movq %rax, 112(%rsp)
leaq 8(%rsp), %rax
movq %rax, 120(%rsp)
leaq 4(%rsp), %rax
movq %rax, 128(%rsp)
movq %rsp, %rax
movq %rax, 136(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L41
.L37:
movq 152(%rsp), %rax
subq %fs:40, %rax
jne .L42
addq $168, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L41:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 184
pushq 40(%rsp)
.cfi_def_cfa_offset 192
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z19sumProductsParalleliiPiS_ii(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 176
jmp .L37
.L42:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2087:
.size _Z45__device_stub__Z19sumProductsParalleliiPiS_iiiiPiS_ii, .-_Z45__device_stub__Z19sumProductsParalleliiPiS_iiiiPiS_ii
.globl _Z19sumProductsParalleliiPiS_ii
.type _Z19sumProductsParalleliiPiS_ii, @function
_Z19sumProductsParalleliiPiS_ii:
.LFB2088:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z45__device_stub__Z19sumProductsParalleliiPiS_iiiiPiS_ii
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2088:
.size _Z19sumProductsParalleliiPiS_ii, .-_Z19sumProductsParalleliiPiS_ii
.section .rodata.str1.8,"aMS",@progbits,1
.align 8
.LC1:
.string "Specify the number of terms in the polynomial by specifying the exponent on base 2, UP TO 10, e.g. enter '3' if you want 2^3 terms (AKA 8 terms) per polynomial: "
.section .rodata.str1.1
.LC2:
.string "%d"
.LC3:
.string "\nYou entered '%d'.\n"
.section .rodata.str1.8
.align 8
.LC4:
.string "Invalid entry. The maximum number of terms is 2^10. Please enter a term less than or equal to 10 next time."
.align 8
.LC5:
.string "Number of terms per polynomial = %d, hence each polynomial will have degree = %d.\n\n"
.section .rodata.str1.1
.LC6:
.string "Generating polynomials...\n\n"
.LC7:
.string "polyA:\n"
.LC8:
.string "%dx^%d "
.LC9:
.string "+ "
.LC10:
.string "\n\npolyB:\n"
.LC11:
.string "\n\n"
.LC12:
.string "kernel invocation"
.LC13:
.string "Serial result:\n"
.LC14:
.string "\n\nParallel result:\n"
.section .rodata.str1.8
.align 8
.LC15:
.string "Coefficients at degree %d are not equivalent: serial!=parallel (%d!=%d)\n"
.align 8
.LC16:
.string "Verification successful. The serial and parallel polynomial multiplications produced the same result!\n\n"
.align 8
.LC17:
.string "Looks like there were some discrepancies. Verification failed.\n\n"
.text
.globl main
.type main, @function
main:
.LFB2057:
.cfi_startproc
endbr64
pushq %r15
.cfi_def_cfa_offset 16
.cfi_offset 15, -16
pushq %r14
.cfi_def_cfa_offset 24
.cfi_offset 14, -24
pushq %r13
.cfi_def_cfa_offset 32
.cfi_offset 13, -32
pushq %r12
.cfi_def_cfa_offset 40
.cfi_offset 12, -40
pushq %rbp
.cfi_def_cfa_offset 48
.cfi_offset 6, -48
pushq %rbx
.cfi_def_cfa_offset 56
.cfi_offset 3, -56
subq $136, %rsp
.cfi_def_cfa_offset 192
movq %fs:40, %rax
movq %rax, 120(%rsp)
xorl %eax, %eax
movl $0, %edi
call time@PLT
movl %eax, %edi
call srand@PLT
leaq .LC1(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
leaq 60(%rsp), %rsi
leaq .LC2(%rip), %rdi
movl $0, %eax
call __isoc23_scanf@PLT
movl 60(%rsp), %edx
leaq .LC3(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl 60(%rsp), %ecx
cmpl $10, %ecx
jg .L79
movl $1, %edx
sall %cl, %edx
movl %edx, 60(%rsp)
leal -1(%rdx), %ecx
leaq .LC5(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl 60(%rsp), %r15d
movslq %r15d, %rbx
salq $2, %rbx
movq %rbx, %rdi
call malloc@PLT
movq %rax, %r13
movq %rbx, %rdi
call malloc@PLT
movq %rax, %r14
leaq .LC6(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl 60(%rsp), %edx
movq %r14, %rsi
movq %r13, %rdi
call _Z14genPolynomialsPiS_i
leaq .LC7(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
cmpl $0, 60(%rsp)
jle .L48
movl $0, %ebx
leaq .LC8(%rip), %rbp
leaq .LC9(%rip), %r12
jmp .L50
.L79:
leaq .LC4(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $1, %eax
jmp .L45
.L49:
addq $1, %rbx
cmpl %ebx, 60(%rsp)
jle .L48
.L50:
movl 0(%r13,%rbx,4), %edx
movl %ebx, %ecx
movq %rbp, %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl 60(%rsp), %eax
subl $1, %eax
cmpl %ebx, %eax
je .L49
movq %r12, %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
jmp .L49
.L48:
leaq .LC10(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
cmpl $0, 60(%rsp)
jle .L51
movl $0, %ebx
leaq .LC8(%rip), %rbp
leaq .LC9(%rip), %r12
jmp .L53
.L52:
addq $1, %rbx
cmpl %ebx, 60(%rsp)
jle .L51
.L53:
movl (%r14,%rbx,4), %edx
movl %ebx, %ecx
movq %rbp, %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl 60(%rsp), %eax
subl $1, %eax
cmpl %ebx, %eax
je .L52
movq %r12, %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
jmp .L52
.L51:
leaq .LC11(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl 60(%rsp), %r12d
leal -2(%r12,%r12), %eax
movl %eax, 40(%rsp)
addl $1, %eax
movl %eax, 32(%rsp)
cltq
leaq 0(,%rax,4), %rbp
movq %rbp, %rdi
call malloc@PLT
movq %rax, 24(%rsp)
movl %r12d, %eax
imull %r12d, %eax
movl %eax, 44(%rsp)
movslq %eax, %rbx
salq $2, %rbx
movq %rbx, %rdi
call malloc@PLT
movq %rax, 16(%rsp)
movq %rbp, %rdi
call malloc@PLT
movq %rax, 8(%rsp)
cmpl $0, 44(%rsp)
jle .L54
movq 16(%rsp), %rcx
movq %rcx, %rax
addq %rcx, %rbx
.L55:
movl $0, (%rax)
addq $4, %rax
cmpq %rbx, %rax
jne .L55
.L54:
cmpl $0, 40(%rsp)
js .L56
movq 8(%rsp), %rcx
movq %rcx, %rax
leal -1(%r12,%r12), %edx
leaq (%rcx,%rdx,4), %rdx
.L57:
movl $0, (%rax)
addq $4, %rax
cmpq %rdx, %rax
jne .L57
.L56:
movslq %r12d, %rsi
salq $2, %rsi
leaq 64(%rsp), %rdi
call cudaMalloc@PLT
movslq 60(%rsp), %rsi
salq $2, %rsi
leaq 72(%rsp), %rdi
call cudaMalloc@PLT
movl 60(%rsp), %esi
imull %esi, %esi
movslq %esi, %rsi
salq $2, %rsi
leaq 80(%rsp), %rdi
call cudaMalloc@PLT
movslq 60(%rsp), %rdx
salq $2, %rdx
movl $1, %ecx
movq %r13, %rsi
movq 64(%rsp), %rdi
call cudaMemcpy@PLT
movslq 60(%rsp), %rdx
salq $2, %rdx
movl $1, %ecx
movq %r14, %rsi
movq 72(%rsp), %rdi
call cudaMemcpy@PLT
movl 60(%rsp), %edx
imull %edx, %edx
movslq %edx, %rdx
salq $2, %rdx
movl $1, %ecx
movq 16(%rsp), %rsi
movq 80(%rsp), %rdi
call cudaMemcpy@PLT
movl %r15d, 96(%rsp)
movl $1, 100(%rsp)
movl $1, 104(%rsp)
movl %r15d, 108(%rsp)
movl $1, 112(%rsp)
movl $1, 116(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 108(%rsp), %rdx
movl $1, %ecx
movq 96(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L80
.L58:
call cudaThreadSynchronize@PLT
leaq .LC12(%rip), %rdi
call _Z14checkCUDAErrorPKc
movl 60(%rsp), %edx
imull %edx, %edx
movslq %edx, %rdx
salq $2, %rdx
movl $2, %ecx
movq 80(%rsp), %rsi
movq 16(%rsp), %rbx
movq %rbx, %rdi
call cudaMemcpy@PLT
leaq 88(%rsp), %rdi
movq %rbp, %rsi
call cudaMalloc@PLT
movl $1, %ecx
movq %rbp, %rdx
movq 8(%rsp), %rsi
movq 88(%rsp), %rdi
call cudaMemcpy@PLT
movl 60(%rsp), %edx
imull %edx, %edx
movslq %edx, %rdx
salq $2, %rdx
movl $1, %ecx
movq %rbx, %rsi
movq 80(%rsp), %rdi
call cudaMemcpy@PLT
movl 116(%rsp), %ecx
movl $0, %r9d
movl $0, %r8d
movq 108(%rsp), %rdx
movq 96(%rsp), %rdi
movl 104(%rsp), %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L81
.L59:
call cudaThreadSynchronize@PLT
leaq .LC12(%rip), %rdi
call _Z14checkCUDAErrorPKc
movl $2, %ecx
movq %rbp, %rdx
movq 88(%rsp), %rsi
movq 8(%rsp), %rdi
call cudaMemcpy@PLT
movl 32(%rsp), %r8d
movq 24(%rsp), %rbp
movq %rbp, %rcx
movl 60(%rsp), %edx
movq %r14, %rsi
movq %r13, %rdi
call _Z21multPolynomialsSerialPiS_iS_i
leaq .LC13(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
cmpl $0, 40(%rsp)
js .L60
leal -2(%r12,%r12), %r12d
leal 1(%r12), %r15d
movl $0, %ebx
jmp .L62
.L80:
movl modBy(%rip), %r8d
movl 60(%rsp), %ecx
movq 80(%rsp), %rdx
movq 72(%rsp), %rsi
movq 64(%rsp), %rdi
call _Z49__device_stub__Z23multPolynomialsParallelPiS_S_iiPiS_S_ii
jmp .L58
.L81:
movl modBy(%rip), %r9d
movl %r15d, %r8d
movq 80(%rsp), %rcx
movq 88(%rsp), %rdx
movl %r15d, %esi
movl 32(%rsp), %edi
call _Z45__device_stub__Z19sumProductsParalleliiPiS_iiiiPiS_ii
jmp .L59
.L82:
leaq .LC9(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
addl $1, %ebx
addq $4, %rbp
cmpl %r15d, %ebx
je .L61
.L62:
movl 0(%rbp), %edx
movl %ebx, %ecx
leaq .LC8(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
cmpl %r12d, %ebx
jne .L82
.L61:
leaq .LC14(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movq 8(%rsp), %rbp
leal 1(%r12), %r15d
movl $0, %ebx
jmp .L64
.L83:
leaq .LC9(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
addl $1, %ebx
addq $4, %rbp
cmpl %r15d, %ebx
je .L63
.L64:
movl 0(%rbp), %edx
movl %ebx, %ecx
leaq .LC8(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
cmpl %ebx, %r12d
jne .L83
.L63:
leaq .LC11(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $0, %ebx
movl $1, %eax
leaq .LC15(%rip), %rbp
movq %r13, 32(%rsp)
movl 40(%rsp), %r12d
movq 24(%rsp), %r13
movq 8(%rsp), %r15
jmp .L66
.L85:
movl %ebx, %edx
movq %rbp, %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $0, %eax
.L65:
addq $1, %rbx
cmpl %ebx, %r12d
jl .L84
.L66:
movl 0(%r13,%rbx,4), %ecx
movl (%r15,%rbx,4), %r8d
cmpl %r8d, %ecx
jne .L85
jmp .L65
.L84:
movq 32(%rsp), %r13
testb %al, %al
jne .L69
leaq .LC17(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
jmp .L68
.L60:
leaq .LC14(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
leaq .LC11(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
.L69:
leaq .LC16(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
.L68:
movq %r13, %rdi
call free@PLT
movq %r14, %rdi
call free@PLT
movq 24(%rsp), %rdi
call free@PLT
movq 16(%rsp), %rdi
call free@PLT
movq 8(%rsp), %rdi
call free@PLT
movq 64(%rsp), %rdi
call cudaFree@PLT
movq 72(%rsp), %rdi
call cudaFree@PLT
movq 80(%rsp), %rdi
call cudaFree@PLT
movq 88(%rsp), %rdi
call cudaFree@PLT
movl $0, %eax
.L45:
movq 120(%rsp), %rdx
subq %fs:40, %rdx
jne .L86
addq $136, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %rbp
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r13
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
ret
.L86:
.cfi_restore_state
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2057:
.size main, .-main
.section .rodata.str1.8
.align 8
.LC18:
.string "_Z19sumProductsParalleliiPiS_ii"
.align 8
.LC19:
.string "_Z23multPolynomialsParallelPiS_S_ii"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2090:
.cfi_startproc
endbr64
pushq %rbx
.cfi_def_cfa_offset 16
.cfi_offset 3, -16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rbx
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC18(%rip), %rdx
movq %rdx, %rcx
leaq _Z19sumProductsParalleliiPiS_ii(%rip), %rsi
movq %rax, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC19(%rip), %rdx
movq %rdx, %rcx
leaq _Z23multPolynomialsParallelPiS_S_ii(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
popq %rbx
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2090:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.globl modBy
.data
.align 4
.type modBy, @object
.size modBy, 4
modBy:
.long 103
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | /*
CS 4402 Distributed and Parallel Systems
Assignment 2 Question 1: N thread blocks and N threads per thread block
Sarah Whelan 250778849
TO RUN: nvcc q1_swhela2.cu -o q1_swhela2
./q1_swhela2
*/
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <math.h>
int modBy = 103; // common prime num used for modding coefficient values during generation, multiplication, and addition
void genPolynomials(int *polyA, int *polyB, int size);
void multPolynomialsSerial(int *polyA, int *polyB, int polySize, int *product, int productSize);
__global__ void multPolynomialsParallel(int *polyA, int *polyB, int *product, int polySize, int modBy);
__global__ void sumProductsParallel(int prodSize, int threadsPerBlock, int *summedProduct, int *products, int numBlocks, int modBy);
void checkCUDAError(const char* msg);
int main() {
srand(time(NULL));
int numTerms;
// get user desired input on length of polynomials
printf("Specify the number of terms in the polynomial by specifying the exponent on base 2, UP TO 10, e.g. enter '3' if you want 2^3 terms (AKA 8 terms) per polynomial: ");
scanf("%d", &numTerms);
printf("\nYou entered '%d'.\n", numTerms);
if (numTerms > 10) {
printf("Invalid entry. The maximum number of terms is 2^10. Please enter a term less than or equal to 10 next time.");
return 1;
}
// then bitshift by input value to determine actual value of numTerms
numTerms = 1 << numTerms;
printf("Number of terms per polynomial = %d, hence each polynomial will have degree = %d.\n\n", numTerms, numTerms-1);
// use numTerms as the number of blocks per thread and the number of blocks
int threadsPerBlock = numTerms;
int blocks = numTerms;
// instantiate and allocate host memory blocks to store each polynomial of size numTerms
int *host_polyA, *host_polyB;
host_polyA = (int *) malloc(numTerms * sizeof(int));
host_polyB = (int *) malloc(numTerms * sizeof(int));
// generate random polynomials of size numTerms
printf("Generating polynomials...\n\n");
genPolynomials(host_polyA, host_polyB, numTerms);
printf("polyA:\n");
for (int i = 0; i < numTerms; i++) {
printf("%dx^%d ", host_polyA[i], i);
if (i != numTerms-1) {
printf("+ ");
}
}
printf("\n\npolyB:\n");
for (int i = 0; i < numTerms; i++) {
printf("%dx^%d ", host_polyB[i], i);
if (i != numTerms-1) {
printf("+ ");
}
}
printf("\n\n");
// determine degree of product
int degreeOfProduct = (numTerms - 1) * 2; // e.g. degree(polyA, polyB) = 3 then x^3 * x^3 = x^6 and degree = numTerms - 1
// allocate blocks of memory on the host for storing the product with size degreeOfProduct + 1 (serial)
// and numTerms*numTerms for the intermediary parallel product, as well asthe final parallel product
// two different allocations in order to verify results at the end!
int *host_product_serial, *host_product_parallel, *host_final_product;
host_product_serial = (int *) malloc((degreeOfProduct+1) * sizeof(int)); // sum of products is intrinsic
host_product_parallel = (int *) malloc(numTerms * numTerms * sizeof(int)); // because of n threads in each n thread blocks
host_final_product = (int *) malloc((degreeOfProduct+1) * sizeof(int)); // final product from parallel version once summed
// ensure all vals in host_product_parallel are 0 (this is done within the serial version so we don't need to worry about that one)
for (int i = 0; i < numTerms*numTerms; i++) {
host_product_parallel[i] = 0;
}
// ensure all vals in host_final_product are 0
for (int i = 0; i < degreeOfProduct+1; i++) {
host_final_product[i] = 0;
}
// initialize and allocate memory on the devices for storing dev_polyA, dev_polyB, and dev_product
int *dev_polyA, *dev_polyB, *dev_product;
cudaMalloc( (void **) &dev_polyA, numTerms * sizeof(int));
cudaMalloc( (void **) &dev_polyB, numTerms * sizeof(int));
cudaMalloc( (void **) &dev_product, numTerms * numTerms * sizeof(int));
// copy polynomials: host -> device (dest, src, size, direction)
cudaMemcpy(dev_polyA, host_polyA, numTerms * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(dev_polyB, host_polyB, numTerms * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(dev_product, host_product_parallel, numTerms * numTerms * sizeof(int), cudaMemcpyHostToDevice);
// setup kernel params & launch
dim3 dimGrid(blocks);
dim3 dimBlock(threadsPerBlock);
multPolynomialsParallel<<<dimGrid, dimBlock>>>(dev_polyA, dev_polyB, dev_product, numTerms, modBy);
cudaThreadSynchronize(); // wait for ALL threads from all blocks to complete
checkCUDAError("kernel invocation");
// copy dev_product back into host_product_parallel (dest, src, size, direction)
cudaMemcpy(host_product_parallel, dev_product, numTerms * numTerms * sizeof(int), cudaMemcpyDeviceToHost);
/* ~~~ now we need to deal with the summation of intermediary products ~~~ */
// allocate device mem for final product
int *dev_final;
cudaMalloc( (void **) &dev_final, (degreeOfProduct+1) * sizeof(int));
// copy zero'd host_final_product to dev_final and host_product_parallel to dev_product
// (dest, src, size, direction)
cudaMemcpy(dev_final, host_final_product, (degreeOfProduct+1) * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(dev_product, host_product_parallel, numTerms * numTerms * sizeof(int), cudaMemcpyHostToDevice);
// parameters are (int prodSize, int threadsPerBlock, int *summedProduct, int *products, int numBlocks, int modBy)
sumProductsParallel<<<dimGrid, dimBlock>>>(degreeOfProduct+1, threadsPerBlock, dev_final, dev_product, blocks, modBy);
cudaThreadSynchronize(); // wait for ALL threads from all blocks to complete
checkCUDAError("kernel invocation");
// copy summation of products back to host (dest, src, size, direction)
cudaMemcpy(host_final_product, dev_final, (degreeOfProduct+1) * sizeof(int), cudaMemcpyDeviceToHost);
// multiply polynomials in serial and write to host_product_serial for verification
multPolynomialsSerial(host_polyA, host_polyB, numTerms, host_product_serial, degreeOfProduct+1);
printf("Serial result:\n");
for (int i = 0; i < degreeOfProduct+1; i++) {
printf("%dx^%d ", host_product_serial[i], i);
if (i != degreeOfProduct) {
printf("+ ");
}
}
printf("\n\nParallel result:\n");
for (int i = 0; i < degreeOfProduct+1; i++) {
printf("%dx^%d ", host_final_product[i], i);
if (i != degreeOfProduct) {
printf("+ ");
}
}
printf("\n\n");
bool allRight = 1;
for (int i = 0; i < degreeOfProduct+1; i++) {
if (host_product_serial[i] == host_final_product[i]) {
continue;
} else {
printf("Coefficients at degree %d are not equivalent: serial!=parallel (%d!=%d)\n", i, host_product_serial[i], host_final_product[i]);
allRight = 0;
}
}
if (allRight) {
printf("Verification successful. The serial and parallel polynomial multiplications produced the same result!\n\n");
} else {
printf("Looks like there were some discrepancies. Verification failed.\n\n");
}
// free host and device memory
free(host_polyA);
free(host_polyB);
free(host_product_serial);
free(host_product_parallel);
free(host_final_product);
cudaFree(dev_polyA);
cudaFree(dev_polyB);
cudaFree(dev_product);
cudaFree(dev_final);
return 0;
}
// genPolynomials takes two polynomials and their size (number of terms per polynomial),
// and generates random coefficients for each term mod p
void genPolynomials(int *polyA, int *polyB, int size) {
// coefficient generation using rand mod p where p = 103
for (int i = 0; i < size; i++) {
polyA[i] = rand() % modBy;
if (polyA[i] == 0) { // we don't want any zeros!!!
polyA[i] = 1;
}
polyB[i] = rand() % modBy;
if (polyB[i] == 0) {
polyB[i] = 1;
}
}
}
// multPolynomialsSerial takes two polynomials and their size, in addition to a memory block to place
// the sum of products into, as well as the size of the product polynomial
void multPolynomialsSerial(int *polyA, int *polyB, int polySize, int *product, int productSize) {
int degreeOfTerms;
// ensure all coefficients of product are 0
for (int i = 0; i < productSize; i++) {
product[i] = 0;
}
// calculate sum of products
for (int a = 0; a < polySize; a++) { // iterate through terms in A
for (int b = 0; b < polySize; b++) { // for each term in A, iterate through all terms in B
// add degrees (indices) to determine which index this product belongs to in the product array block
degreeOfTerms = a + b;
// add product of terms to previous sum and mod by 103
product[degreeOfTerms] = (product[degreeOfTerms] + polyA[a] * polyB[b]) % modBy;
}
}
}
// multPolynomialsParallel determines the intermediary products of the polynomial multiplication problem
__global__ void multPolynomialsParallel(int *polyA, int *polyB, int *product, int polySize, int modBy) {
int a = blockIdx.x; // all threads in the same block will access the same polyA element
int b = threadIdx.x; // but all threads will access individual polyB elements
int myIndex = blockDim.x * blockIdx.x + threadIdx.x; // where to write this thread's product
product[myIndex] = (polyA[a] * polyB[b]) % modBy;
}
// sumProductsParallel uses prodSize threads, each thread corresponding to a degree, to sum common terms and determine the actual product of polyA and polyB
__global__ void sumProductsParallel(int prodSize, int threadsPerBlock, int *summedProduct, int *products, int numBlocks, int modBy) {
int responsibleFor = blockIdx.x * blockDim.x + threadIdx.x; // used to check which threads are going to be active during this step
if (responsibleFor < prodSize) { // e.g. 1 < 7 so thread 1 is going to be in charge of summing the x^1 terms, threads >= prodSize will be inactive for remainder
for (int blockNum = 0; blockNum < numBlocks; blockNum++) { // loop through blocks
for (int indexInBlock = 0; indexInBlock < threadsPerBlock; indexInBlock++) { // loop through each index per block
int degreeOfElement = blockNum + indexInBlock; // the degree related to the coefficient stored at each products[] index is equal to the block number + the relative index in the block
if (indexInBlock == 0 && degreeOfElement > responsibleFor) {
return; // this thread is done summing its common terms
}
else if (degreeOfElement == responsibleFor) { // if this thread is responsible for the degree we just calculated
int spotInProducts = blockNum * blockDim.x + indexInBlock; // get its actual index in products[]
summedProduct[responsibleFor] = (summedProduct[responsibleFor] + products[spotInProducts]) % modBy; // and write that value into the final summedProduct[our degree]
}
}
}
}
}
void checkCUDAError(const char *msg) {
cudaError_t err = cudaGetLastError();
if(cudaSuccess != err)
{
fprintf(stderr, "CUDA error: %s: %s.\n", msg, cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
} | /*
CS 4402 Distributed and Parallel Systems
Assignment 2 Question 1: N thread blocks and N threads per thread block
Sarah Whelan 250778849
TO RUN: nvcc q1_swhela2.cu -o q1_swhela2
./q1_swhela2
*/
#include <hip/hip_runtime.h>
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <math.h>
int modBy = 103; // common prime num used for modding coefficient values during generation, multiplication, and addition
void genPolynomials(int *polyA, int *polyB, int size);
void multPolynomialsSerial(int *polyA, int *polyB, int polySize, int *product, int productSize);
__global__ void multPolynomialsParallel(int *polyA, int *polyB, int *product, int polySize, int modBy);
__global__ void sumProductsParallel(int prodSize, int threadsPerBlock, int *summedProduct, int *products, int numBlocks, int modBy);
void checkCUDAError(const char* msg);
int main() {
srand(time(NULL));
int numTerms;
// get user desired input on length of polynomials
printf("Specify the number of terms in the polynomial by specifying the exponent on base 2, UP TO 10, e.g. enter '3' if you want 2^3 terms (AKA 8 terms) per polynomial: ");
scanf("%d", &numTerms);
printf("\nYou entered '%d'.\n", numTerms);
if (numTerms > 10) {
printf("Invalid entry. The maximum number of terms is 2^10. Please enter a term less than or equal to 10 next time.");
return 1;
}
// then bitshift by input value to determine actual value of numTerms
numTerms = 1 << numTerms;
printf("Number of terms per polynomial = %d, hence each polynomial will have degree = %d.\n\n", numTerms, numTerms-1);
// use numTerms as the number of blocks per thread and the number of blocks
int threadsPerBlock = numTerms;
int blocks = numTerms;
// instantiate and allocate host memory blocks to store each polynomial of size numTerms
int *host_polyA, *host_polyB;
host_polyA = (int *) malloc(numTerms * sizeof(int));
host_polyB = (int *) malloc(numTerms * sizeof(int));
// generate random polynomials of size numTerms
printf("Generating polynomials...\n\n");
genPolynomials(host_polyA, host_polyB, numTerms);
printf("polyA:\n");
for (int i = 0; i < numTerms; i++) {
printf("%dx^%d ", host_polyA[i], i);
if (i != numTerms-1) {
printf("+ ");
}
}
printf("\n\npolyB:\n");
for (int i = 0; i < numTerms; i++) {
printf("%dx^%d ", host_polyB[i], i);
if (i != numTerms-1) {
printf("+ ");
}
}
printf("\n\n");
// determine degree of product
int degreeOfProduct = (numTerms - 1) * 2; // e.g. degree(polyA, polyB) = 3 then x^3 * x^3 = x^6 and degree = numTerms - 1
// allocate blocks of memory on the host for storing the product with size degreeOfProduct + 1 (serial)
// and numTerms*numTerms for the intermediary parallel product, as well asthe final parallel product
// two different allocations in order to verify results at the end!
int *host_product_serial, *host_product_parallel, *host_final_product;
host_product_serial = (int *) malloc((degreeOfProduct+1) * sizeof(int)); // sum of products is intrinsic
host_product_parallel = (int *) malloc(numTerms * numTerms * sizeof(int)); // because of n threads in each n thread blocks
host_final_product = (int *) malloc((degreeOfProduct+1) * sizeof(int)); // final product from parallel version once summed
// ensure all vals in host_product_parallel are 0 (this is done within the serial version so we don't need to worry about that one)
for (int i = 0; i < numTerms*numTerms; i++) {
host_product_parallel[i] = 0;
}
// ensure all vals in host_final_product are 0
for (int i = 0; i < degreeOfProduct+1; i++) {
host_final_product[i] = 0;
}
// initialize and allocate memory on the devices for storing dev_polyA, dev_polyB, and dev_product
int *dev_polyA, *dev_polyB, *dev_product;
hipMalloc( (void **) &dev_polyA, numTerms * sizeof(int));
hipMalloc( (void **) &dev_polyB, numTerms * sizeof(int));
hipMalloc( (void **) &dev_product, numTerms * numTerms * sizeof(int));
// copy polynomials: host -> device (dest, src, size, direction)
hipMemcpy(dev_polyA, host_polyA, numTerms * sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(dev_polyB, host_polyB, numTerms * sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(dev_product, host_product_parallel, numTerms * numTerms * sizeof(int), hipMemcpyHostToDevice);
// setup kernel params & launch
dim3 dimGrid(blocks);
dim3 dimBlock(threadsPerBlock);
multPolynomialsParallel<<<dimGrid, dimBlock>>>(dev_polyA, dev_polyB, dev_product, numTerms, modBy);
hipDeviceSynchronize(); // wait for ALL threads from all blocks to complete
checkCUDAError("kernel invocation");
// copy dev_product back into host_product_parallel (dest, src, size, direction)
hipMemcpy(host_product_parallel, dev_product, numTerms * numTerms * sizeof(int), hipMemcpyDeviceToHost);
/* ~~~ now we need to deal with the summation of intermediary products ~~~ */
// allocate device mem for final product
int *dev_final;
hipMalloc( (void **) &dev_final, (degreeOfProduct+1) * sizeof(int));
// copy zero'd host_final_product to dev_final and host_product_parallel to dev_product
// (dest, src, size, direction)
hipMemcpy(dev_final, host_final_product, (degreeOfProduct+1) * sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(dev_product, host_product_parallel, numTerms * numTerms * sizeof(int), hipMemcpyHostToDevice);
// parameters are (int prodSize, int threadsPerBlock, int *summedProduct, int *products, int numBlocks, int modBy)
sumProductsParallel<<<dimGrid, dimBlock>>>(degreeOfProduct+1, threadsPerBlock, dev_final, dev_product, blocks, modBy);
hipDeviceSynchronize(); // wait for ALL threads from all blocks to complete
checkCUDAError("kernel invocation");
// copy summation of products back to host (dest, src, size, direction)
hipMemcpy(host_final_product, dev_final, (degreeOfProduct+1) * sizeof(int), hipMemcpyDeviceToHost);
// multiply polynomials in serial and write to host_product_serial for verification
multPolynomialsSerial(host_polyA, host_polyB, numTerms, host_product_serial, degreeOfProduct+1);
printf("Serial result:\n");
for (int i = 0; i < degreeOfProduct+1; i++) {
printf("%dx^%d ", host_product_serial[i], i);
if (i != degreeOfProduct) {
printf("+ ");
}
}
printf("\n\nParallel result:\n");
for (int i = 0; i < degreeOfProduct+1; i++) {
printf("%dx^%d ", host_final_product[i], i);
if (i != degreeOfProduct) {
printf("+ ");
}
}
printf("\n\n");
bool allRight = 1;
for (int i = 0; i < degreeOfProduct+1; i++) {
if (host_product_serial[i] == host_final_product[i]) {
continue;
} else {
printf("Coefficients at degree %d are not equivalent: serial!=parallel (%d!=%d)\n", i, host_product_serial[i], host_final_product[i]);
allRight = 0;
}
}
if (allRight) {
printf("Verification successful. The serial and parallel polynomial multiplications produced the same result!\n\n");
} else {
printf("Looks like there were some discrepancies. Verification failed.\n\n");
}
// free host and device memory
free(host_polyA);
free(host_polyB);
free(host_product_serial);
free(host_product_parallel);
free(host_final_product);
hipFree(dev_polyA);
hipFree(dev_polyB);
hipFree(dev_product);
hipFree(dev_final);
return 0;
}
// genPolynomials takes two polynomials and their size (number of terms per polynomial),
// and generates random coefficients for each term mod p
void genPolynomials(int *polyA, int *polyB, int size) {
// coefficient generation using rand mod p where p = 103
for (int i = 0; i < size; i++) {
polyA[i] = rand() % modBy;
if (polyA[i] == 0) { // we don't want any zeros!!!
polyA[i] = 1;
}
polyB[i] = rand() % modBy;
if (polyB[i] == 0) {
polyB[i] = 1;
}
}
}
// multPolynomialsSerial takes two polynomials and their size, in addition to a memory block to place
// the sum of products into, as well as the size of the product polynomial
void multPolynomialsSerial(int *polyA, int *polyB, int polySize, int *product, int productSize) {
int degreeOfTerms;
// ensure all coefficients of product are 0
for (int i = 0; i < productSize; i++) {
product[i] = 0;
}
// calculate sum of products
for (int a = 0; a < polySize; a++) { // iterate through terms in A
for (int b = 0; b < polySize; b++) { // for each term in A, iterate through all terms in B
// add degrees (indices) to determine which index this product belongs to in the product array block
degreeOfTerms = a + b;
// add product of terms to previous sum and mod by 103
product[degreeOfTerms] = (product[degreeOfTerms] + polyA[a] * polyB[b]) % modBy;
}
}
}
// multPolynomialsParallel determines the intermediary products of the polynomial multiplication problem
__global__ void multPolynomialsParallel(int *polyA, int *polyB, int *product, int polySize, int modBy) {
int a = blockIdx.x; // all threads in the same block will access the same polyA element
int b = threadIdx.x; // but all threads will access individual polyB elements
int myIndex = blockDim.x * blockIdx.x + threadIdx.x; // where to write this thread's product
product[myIndex] = (polyA[a] * polyB[b]) % modBy;
}
// sumProductsParallel uses prodSize threads, each thread corresponding to a degree, to sum common terms and determine the actual product of polyA and polyB
__global__ void sumProductsParallel(int prodSize, int threadsPerBlock, int *summedProduct, int *products, int numBlocks, int modBy) {
int responsibleFor = blockIdx.x * blockDim.x + threadIdx.x; // used to check which threads are going to be active during this step
if (responsibleFor < prodSize) { // e.g. 1 < 7 so thread 1 is going to be in charge of summing the x^1 terms, threads >= prodSize will be inactive for remainder
for (int blockNum = 0; blockNum < numBlocks; blockNum++) { // loop through blocks
for (int indexInBlock = 0; indexInBlock < threadsPerBlock; indexInBlock++) { // loop through each index per block
int degreeOfElement = blockNum + indexInBlock; // the degree related to the coefficient stored at each products[] index is equal to the block number + the relative index in the block
if (indexInBlock == 0 && degreeOfElement > responsibleFor) {
return; // this thread is done summing its common terms
}
else if (degreeOfElement == responsibleFor) { // if this thread is responsible for the degree we just calculated
int spotInProducts = blockNum * blockDim.x + indexInBlock; // get its actual index in products[]
summedProduct[responsibleFor] = (summedProduct[responsibleFor] + products[spotInProducts]) % modBy; // and write that value into the final summedProduct[our degree]
}
}
}
}
}
void checkCUDAError(const char *msg) {
hipError_t err = hipGetLastError();
if(hipSuccess != err)
{
fprintf(stderr, "CUDA error: %s: %s.\n", msg, hipGetErrorString(err));
exit(EXIT_FAILURE);
}
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | /*
CS 4402 Distributed and Parallel Systems
Assignment 2 Question 1: N thread blocks and N threads per thread block
Sarah Whelan 250778849
TO RUN: nvcc q1_swhela2.cu -o q1_swhela2
./q1_swhela2
*/
#include <hip/hip_runtime.h>
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <math.h>
int modBy = 103; // common prime num used for modding coefficient values during generation, multiplication, and addition
void genPolynomials(int *polyA, int *polyB, int size);
void multPolynomialsSerial(int *polyA, int *polyB, int polySize, int *product, int productSize);
__global__ void multPolynomialsParallel(int *polyA, int *polyB, int *product, int polySize, int modBy);
__global__ void sumProductsParallel(int prodSize, int threadsPerBlock, int *summedProduct, int *products, int numBlocks, int modBy);
void checkCUDAError(const char* msg);
int main() {
srand(time(NULL));
int numTerms;
// get user desired input on length of polynomials
printf("Specify the number of terms in the polynomial by specifying the exponent on base 2, UP TO 10, e.g. enter '3' if you want 2^3 terms (AKA 8 terms) per polynomial: ");
scanf("%d", &numTerms);
printf("\nYou entered '%d'.\n", numTerms);
if (numTerms > 10) {
printf("Invalid entry. The maximum number of terms is 2^10. Please enter a term less than or equal to 10 next time.");
return 1;
}
// then bitshift by input value to determine actual value of numTerms
numTerms = 1 << numTerms;
printf("Number of terms per polynomial = %d, hence each polynomial will have degree = %d.\n\n", numTerms, numTerms-1);
// use numTerms as the number of blocks per thread and the number of blocks
int threadsPerBlock = numTerms;
int blocks = numTerms;
// instantiate and allocate host memory blocks to store each polynomial of size numTerms
int *host_polyA, *host_polyB;
host_polyA = (int *) malloc(numTerms * sizeof(int));
host_polyB = (int *) malloc(numTerms * sizeof(int));
// generate random polynomials of size numTerms
printf("Generating polynomials...\n\n");
genPolynomials(host_polyA, host_polyB, numTerms);
printf("polyA:\n");
for (int i = 0; i < numTerms; i++) {
printf("%dx^%d ", host_polyA[i], i);
if (i != numTerms-1) {
printf("+ ");
}
}
printf("\n\npolyB:\n");
for (int i = 0; i < numTerms; i++) {
printf("%dx^%d ", host_polyB[i], i);
if (i != numTerms-1) {
printf("+ ");
}
}
printf("\n\n");
// determine degree of product
int degreeOfProduct = (numTerms - 1) * 2; // e.g. degree(polyA, polyB) = 3 then x^3 * x^3 = x^6 and degree = numTerms - 1
// allocate blocks of memory on the host for storing the product with size degreeOfProduct + 1 (serial)
// and numTerms*numTerms for the intermediary parallel product, as well asthe final parallel product
// two different allocations in order to verify results at the end!
int *host_product_serial, *host_product_parallel, *host_final_product;
host_product_serial = (int *) malloc((degreeOfProduct+1) * sizeof(int)); // sum of products is intrinsic
host_product_parallel = (int *) malloc(numTerms * numTerms * sizeof(int)); // because of n threads in each n thread blocks
host_final_product = (int *) malloc((degreeOfProduct+1) * sizeof(int)); // final product from parallel version once summed
// ensure all vals in host_product_parallel are 0 (this is done within the serial version so we don't need to worry about that one)
for (int i = 0; i < numTerms*numTerms; i++) {
host_product_parallel[i] = 0;
}
// ensure all vals in host_final_product are 0
for (int i = 0; i < degreeOfProduct+1; i++) {
host_final_product[i] = 0;
}
// initialize and allocate memory on the devices for storing dev_polyA, dev_polyB, and dev_product
int *dev_polyA, *dev_polyB, *dev_product;
hipMalloc( (void **) &dev_polyA, numTerms * sizeof(int));
hipMalloc( (void **) &dev_polyB, numTerms * sizeof(int));
hipMalloc( (void **) &dev_product, numTerms * numTerms * sizeof(int));
// copy polynomials: host -> device (dest, src, size, direction)
hipMemcpy(dev_polyA, host_polyA, numTerms * sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(dev_polyB, host_polyB, numTerms * sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(dev_product, host_product_parallel, numTerms * numTerms * sizeof(int), hipMemcpyHostToDevice);
// setup kernel params & launch
dim3 dimGrid(blocks);
dim3 dimBlock(threadsPerBlock);
multPolynomialsParallel<<<dimGrid, dimBlock>>>(dev_polyA, dev_polyB, dev_product, numTerms, modBy);
hipDeviceSynchronize(); // wait for ALL threads from all blocks to complete
checkCUDAError("kernel invocation");
// copy dev_product back into host_product_parallel (dest, src, size, direction)
hipMemcpy(host_product_parallel, dev_product, numTerms * numTerms * sizeof(int), hipMemcpyDeviceToHost);
/* ~~~ now we need to deal with the summation of intermediary products ~~~ */
// allocate device mem for final product
int *dev_final;
hipMalloc( (void **) &dev_final, (degreeOfProduct+1) * sizeof(int));
// copy zero'd host_final_product to dev_final and host_product_parallel to dev_product
// (dest, src, size, direction)
hipMemcpy(dev_final, host_final_product, (degreeOfProduct+1) * sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(dev_product, host_product_parallel, numTerms * numTerms * sizeof(int), hipMemcpyHostToDevice);
// parameters are (int prodSize, int threadsPerBlock, int *summedProduct, int *products, int numBlocks, int modBy)
sumProductsParallel<<<dimGrid, dimBlock>>>(degreeOfProduct+1, threadsPerBlock, dev_final, dev_product, blocks, modBy);
hipDeviceSynchronize(); // wait for ALL threads from all blocks to complete
checkCUDAError("kernel invocation");
// copy summation of products back to host (dest, src, size, direction)
hipMemcpy(host_final_product, dev_final, (degreeOfProduct+1) * sizeof(int), hipMemcpyDeviceToHost);
// multiply polynomials in serial and write to host_product_serial for verification
multPolynomialsSerial(host_polyA, host_polyB, numTerms, host_product_serial, degreeOfProduct+1);
printf("Serial result:\n");
for (int i = 0; i < degreeOfProduct+1; i++) {
printf("%dx^%d ", host_product_serial[i], i);
if (i != degreeOfProduct) {
printf("+ ");
}
}
printf("\n\nParallel result:\n");
for (int i = 0; i < degreeOfProduct+1; i++) {
printf("%dx^%d ", host_final_product[i], i);
if (i != degreeOfProduct) {
printf("+ ");
}
}
printf("\n\n");
bool allRight = 1;
for (int i = 0; i < degreeOfProduct+1; i++) {
if (host_product_serial[i] == host_final_product[i]) {
continue;
} else {
printf("Coefficients at degree %d are not equivalent: serial!=parallel (%d!=%d)\n", i, host_product_serial[i], host_final_product[i]);
allRight = 0;
}
}
if (allRight) {
printf("Verification successful. The serial and parallel polynomial multiplications produced the same result!\n\n");
} else {
printf("Looks like there were some discrepancies. Verification failed.\n\n");
}
// free host and device memory
free(host_polyA);
free(host_polyB);
free(host_product_serial);
free(host_product_parallel);
free(host_final_product);
hipFree(dev_polyA);
hipFree(dev_polyB);
hipFree(dev_product);
hipFree(dev_final);
return 0;
}
// genPolynomials takes two polynomials and their size (number of terms per polynomial),
// and generates random coefficients for each term mod p
void genPolynomials(int *polyA, int *polyB, int size) {
// coefficient generation using rand mod p where p = 103
for (int i = 0; i < size; i++) {
polyA[i] = rand() % modBy;
if (polyA[i] == 0) { // we don't want any zeros!!!
polyA[i] = 1;
}
polyB[i] = rand() % modBy;
if (polyB[i] == 0) {
polyB[i] = 1;
}
}
}
// multPolynomialsSerial takes two polynomials and their size, in addition to a memory block to place
// the sum of products into, as well as the size of the product polynomial
void multPolynomialsSerial(int *polyA, int *polyB, int polySize, int *product, int productSize) {
int degreeOfTerms;
// ensure all coefficients of product are 0
for (int i = 0; i < productSize; i++) {
product[i] = 0;
}
// calculate sum of products
for (int a = 0; a < polySize; a++) { // iterate through terms in A
for (int b = 0; b < polySize; b++) { // for each term in A, iterate through all terms in B
// add degrees (indices) to determine which index this product belongs to in the product array block
degreeOfTerms = a + b;
// add product of terms to previous sum and mod by 103
product[degreeOfTerms] = (product[degreeOfTerms] + polyA[a] * polyB[b]) % modBy;
}
}
}
// multPolynomialsParallel determines the intermediary products of the polynomial multiplication problem
__global__ void multPolynomialsParallel(int *polyA, int *polyB, int *product, int polySize, int modBy) {
int a = blockIdx.x; // all threads in the same block will access the same polyA element
int b = threadIdx.x; // but all threads will access individual polyB elements
int myIndex = blockDim.x * blockIdx.x + threadIdx.x; // where to write this thread's product
product[myIndex] = (polyA[a] * polyB[b]) % modBy;
}
// sumProductsParallel uses prodSize threads, each thread corresponding to a degree, to sum common terms and determine the actual product of polyA and polyB
__global__ void sumProductsParallel(int prodSize, int threadsPerBlock, int *summedProduct, int *products, int numBlocks, int modBy) {
int responsibleFor = blockIdx.x * blockDim.x + threadIdx.x; // used to check which threads are going to be active during this step
if (responsibleFor < prodSize) { // e.g. 1 < 7 so thread 1 is going to be in charge of summing the x^1 terms, threads >= prodSize will be inactive for remainder
for (int blockNum = 0; blockNum < numBlocks; blockNum++) { // loop through blocks
for (int indexInBlock = 0; indexInBlock < threadsPerBlock; indexInBlock++) { // loop through each index per block
int degreeOfElement = blockNum + indexInBlock; // the degree related to the coefficient stored at each products[] index is equal to the block number + the relative index in the block
if (indexInBlock == 0 && degreeOfElement > responsibleFor) {
return; // this thread is done summing its common terms
}
else if (degreeOfElement == responsibleFor) { // if this thread is responsible for the degree we just calculated
int spotInProducts = blockNum * blockDim.x + indexInBlock; // get its actual index in products[]
summedProduct[responsibleFor] = (summedProduct[responsibleFor] + products[spotInProducts]) % modBy; // and write that value into the final summedProduct[our degree]
}
}
}
}
}
void checkCUDAError(const char *msg) {
hipError_t err = hipGetLastError();
if(hipSuccess != err)
{
fprintf(stderr, "CUDA error: %s: %s.\n", msg, hipGetErrorString(err));
exit(EXIT_FAILURE);
}
} | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z23multPolynomialsParallelPiS_S_ii
.globl _Z23multPolynomialsParallelPiS_S_ii
.p2align 8
.type _Z23multPolynomialsParallelPiS_S_ii,@function
_Z23multPolynomialsParallelPiS_S_ii:
s_clause 0x1
s_load_b128 s[4:7], s[0:1], 0x0
s_load_b64 s[8:9], s[0:1], 0x10
v_lshlrev_b32_e32 v1, 2, v0
s_mov_b32 s2, s15
s_ashr_i32 s3, s15, 31
s_waitcnt lgkmcnt(0)
global_load_b32 v1, v1, s[6:7]
s_clause 0x1
s_load_b32 s6, s[0:1], 0x2c
s_load_b32 s7, s[0:1], 0x1c
s_lshl_b64 s[0:1], s[2:3], 2
s_waitcnt lgkmcnt(0)
s_and_b32 s3, s6, 0xffff
s_add_u32 s0, s4, s0
s_addc_u32 s1, s5, s1
s_ashr_i32 s4, s7, 31
s_load_b32 s0, s[0:1], 0x0
s_add_i32 s7, s7, s4
s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
s_xor_b32 s4, s7, s4
v_cvt_f32_u32_e32 v2, s4
s_sub_i32 s1, 0, s4
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_1)
v_rcp_iflag_f32_e32 v2, v2
s_waitcnt_depctr 0xfff
v_mul_f32_e32 v2, 0x4f7ffffe, v2
v_cvt_u32_f32_e32 v2, v2
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mul_lo_u32 v3, s1, v2
v_mul_hi_u32 v3, v2, v3
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_1)
v_add_nc_u32_e32 v2, v2, v3
s_waitcnt vmcnt(0) lgkmcnt(0)
v_mul_lo_u32 v1, v1, s0
v_ashrrev_i32_e32 v4, 31, v1
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_add_nc_u32_e32 v1, v1, v4
v_xor_b32_e32 v1, v1, v4
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mul_hi_u32 v2, v1, v2
v_mul_lo_u32 v2, v2, s4
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_sub_nc_u32_e32 v1, v1, v2
v_subrev_nc_u32_e32 v2, s4, v1
v_cmp_le_u32_e32 vcc_lo, s4, v1
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2)
v_cndmask_b32_e32 v3, v1, v2, vcc_lo
v_mad_u64_u32 v[1:2], null, s2, s3, v[0:1]
v_subrev_nc_u32_e32 v0, s4, v3
v_cmp_le_u32_e32 vcc_lo, s4, v3
s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
v_ashrrev_i32_e32 v2, 31, v1
v_cndmask_b32_e32 v0, v3, v0, vcc_lo
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3)
v_xor_b32_e32 v3, v0, v4
v_lshlrev_b64 v[0:1], 2, v[1:2]
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_sub_nc_u32_e32 v2, v3, v4
v_add_co_u32 v0, vcc_lo, s8, v0
s_delay_alu instid0(VALU_DEP_3)
v_add_co_ci_u32_e32 v1, vcc_lo, s9, v1, vcc_lo
global_store_b32 v[0:1], v2, off
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z23multPolynomialsParallelPiS_S_ii
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 288
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 5
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z23multPolynomialsParallelPiS_S_ii, .Lfunc_end0-_Z23multPolynomialsParallelPiS_S_ii
.section .AMDGPU.csdata,"",@progbits
.text
.protected _Z19sumProductsParalleliiPiS_ii
.globl _Z19sumProductsParalleliiPiS_ii
.p2align 8
.type _Z19sumProductsParalleliiPiS_ii,@function
_Z19sumProductsParalleliiPiS_ii:
s_clause 0x2
s_load_b32 s2, s[0:1], 0x2c
s_load_b32 s3, s[0:1], 0x0
s_load_b32 s4, s[0:1], 0x18
s_mov_b32 s6, 0
s_waitcnt lgkmcnt(0)
s_and_b32 s5, s2, 0xffff
s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_2) | instid1(VALU_DEP_1)
v_mad_u64_u32 v[1:2], null, s15, s5, v[0:1]
s_cmp_gt_i32 s4, 0
s_cselect_b32 s2, -1, 0
v_cmp_gt_i32_e32 vcc_lo, s3, v1
s_and_b32 s2, vcc_lo, s2
s_delay_alu instid0(SALU_CYCLE_1)
s_and_saveexec_b32 s3, s2
s_cbranch_execz .LBB1_13
s_clause 0x2
s_load_b32 s7, s[0:1], 0x4
s_load_b32 s9, s[0:1], 0x1c
s_load_b128 s[0:3], s[0:1], 0x8
v_ashrrev_i32_e32 v2, 31, v1
v_mov_b32_e32 v4, 0
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_4) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[2:3], 2, v[1:2]
s_waitcnt lgkmcnt(0)
s_cmp_gt_i32 s7, 0
s_cselect_b32 s8, -1, 0
s_ashr_i32 s10, s9, 31
v_add_co_u32 v2, vcc_lo, s0, v2
s_add_i32 s9, s9, s10
v_add_co_ci_u32_e32 v3, vcc_lo, s1, v3, vcc_lo
s_xor_b32 s9, s9, s10
s_mov_b32 s0, 0
v_cvt_f32_u32_e32 v0, s9
s_sub_i32 s10, 0, s9
s_mov_b32 s1, 0
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_1)
v_rcp_iflag_f32_e32 v0, v0
s_waitcnt_depctr 0xfff
v_mul_f32_e32 v0, 0x4f7ffffe, v0
v_cvt_u32_f32_e32 v5, v0
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mul_lo_u32 v0, s10, v5
v_mul_hi_u32 v6, v5, v0
v_sub_nc_u32_e32 v0, 0, v1
s_delay_alu instid0(VALU_DEP_2)
v_add_nc_u32_e32 v5, v5, v6
s_branch .LBB1_3
.LBB1_2:
s_or_b32 exec_lo, exec_lo, s12
s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
s_and_b32 s10, exec_lo, s11
s_or_b32 s6, s10, s6
s_delay_alu instid0(SALU_CYCLE_1)
s_and_not1_b32 exec_lo, exec_lo, s6
s_cbranch_execz .LBB1_13
.LBB1_3:
s_and_not1_b32 vcc_lo, exec_lo, s8
s_mov_b32 s10, -1
s_cbranch_vccnz .LBB1_11
v_add_nc_u32_e32 v6, s1, v0
s_mov_b32 s11, 0
s_mov_b32 s12, 0
s_branch .LBB1_7
.LBB1_5:
s_or_b32 exec_lo, exec_lo, s16
s_add_i32 s12, s12, 1
s_delay_alu instid0(SALU_CYCLE_1)
s_cmp_eq_u32 s7, s12
s_cselect_b32 s16, -1, 0
s_and_not1_b32 s14, s14, exec_lo
s_and_b32 s16, s16, exec_lo
s_and_not1_b32 s13, s13, exec_lo
s_or_b32 s14, s14, s16
.LBB1_6:
s_or_b32 exec_lo, exec_lo, s15
s_xor_b32 s15, s13, -1
s_and_b32 s16, exec_lo, s14
s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_2) | instid1(SALU_CYCLE_1)
s_or_b32 s11, s16, s11
s_and_not1_b32 s10, s10, exec_lo
s_and_b32 s15, s15, exec_lo
s_or_b32 s10, s10, s15
s_and_not1_b32 exec_lo, exec_lo, s11
s_cbranch_execz .LBB1_10
.LBB1_7:
s_add_i32 s15, s1, s12
s_cmp_lg_u32 s12, 0
v_cmp_le_i32_e32 vcc_lo, s15, v1
s_cselect_b32 s15, -1, 0
s_or_b32 s13, s13, exec_lo
s_or_b32 s14, s14, exec_lo
s_or_b32 s16, s15, vcc_lo
s_delay_alu instid0(SALU_CYCLE_1)
s_and_saveexec_b32 s15, s16
s_cbranch_execz .LBB1_6
v_add_nc_u32_e32 v7, s12, v6
s_mov_b32 s16, exec_lo
s_delay_alu instid0(VALU_DEP_1)
v_cmpx_eq_u32_e32 0, v7
s_cbranch_execz .LBB1_5
s_add_i32 s18, s0, s12
s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
s_ashr_i32 s19, s18, 31
s_lshl_b64 s[18:19], s[18:19], 2
s_delay_alu instid0(SALU_CYCLE_1)
s_add_u32 s18, s2, s18
s_addc_u32 s19, s3, s19
global_load_b32 v7, v[2:3], off
global_load_b32 v8, v4, s[18:19]
s_waitcnt vmcnt(0)
v_add_nc_u32_e32 v7, v8, v7
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_ashrrev_i32_e32 v8, 31, v7
v_add_nc_u32_e32 v7, v7, v8
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_xor_b32_e32 v7, v7, v8
v_mul_hi_u32 v9, v7, v5
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mul_lo_u32 v9, v9, s9
v_sub_nc_u32_e32 v7, v7, v9
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
v_subrev_nc_u32_e32 v9, s9, v7
v_cmp_le_u32_e32 vcc_lo, s9, v7
v_cndmask_b32_e32 v7, v7, v9, vcc_lo
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
v_subrev_nc_u32_e32 v9, s9, v7
v_cmp_le_u32_e32 vcc_lo, s9, v7
v_cndmask_b32_e32 v7, v7, v9, vcc_lo
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_xor_b32_e32 v7, v7, v8
v_sub_nc_u32_e32 v7, v7, v8
global_store_b32 v[2:3], v7, off
s_branch .LBB1_5
.LBB1_10:
s_or_b32 exec_lo, exec_lo, s11
.LBB1_11:
s_mov_b32 s11, -1
s_and_saveexec_b32 s12, s10
s_cbranch_execz .LBB1_2
s_add_i32 s1, s1, 1
s_add_i32 s0, s0, s5
s_cmp_eq_u32 s1, s4
s_cselect_b32 s10, -1, 0
s_delay_alu instid0(SALU_CYCLE_1)
s_or_not1_b32 s11, s10, exec_lo
s_branch .LBB1_2
.LBB1_13:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z19sumProductsParalleliiPiS_ii
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 288
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 10
.amdhsa_next_free_sgpr 20
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end1:
.size _Z19sumProductsParalleliiPiS_ii, .Lfunc_end1-_Z19sumProductsParalleliiPiS_ii
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
- .offset: 24
.size: 4
.value_kind: by_value
- .offset: 28
.size: 4
.value_kind: by_value
- .offset: 32
.size: 4
.value_kind: hidden_block_count_x
- .offset: 36
.size: 4
.value_kind: hidden_block_count_y
- .offset: 40
.size: 4
.value_kind: hidden_block_count_z
- .offset: 44
.size: 2
.value_kind: hidden_group_size_x
- .offset: 46
.size: 2
.value_kind: hidden_group_size_y
- .offset: 48
.size: 2
.value_kind: hidden_group_size_z
- .offset: 50
.size: 2
.value_kind: hidden_remainder_x
- .offset: 52
.size: 2
.value_kind: hidden_remainder_y
- .offset: 54
.size: 2
.value_kind: hidden_remainder_z
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 88
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 96
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 288
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z23multPolynomialsParallelPiS_S_ii
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z23multPolynomialsParallelPiS_S_ii.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 5
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
- .args:
- .offset: 0
.size: 4
.value_kind: by_value
- .offset: 4
.size: 4
.value_kind: by_value
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
- .offset: 24
.size: 4
.value_kind: by_value
- .offset: 28
.size: 4
.value_kind: by_value
- .offset: 32
.size: 4
.value_kind: hidden_block_count_x
- .offset: 36
.size: 4
.value_kind: hidden_block_count_y
- .offset: 40
.size: 4
.value_kind: hidden_block_count_z
- .offset: 44
.size: 2
.value_kind: hidden_group_size_x
- .offset: 46
.size: 2
.value_kind: hidden_group_size_y
- .offset: 48
.size: 2
.value_kind: hidden_group_size_z
- .offset: 50
.size: 2
.value_kind: hidden_remainder_x
- .offset: 52
.size: 2
.value_kind: hidden_remainder_y
- .offset: 54
.size: 2
.value_kind: hidden_remainder_z
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 88
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 96
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 288
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z19sumProductsParalleliiPiS_ii
.private_segment_fixed_size: 0
.sgpr_count: 22
.sgpr_spill_count: 0
.symbol: _Z19sumProductsParalleliiPiS_ii.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 10
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | /*
CS 4402 Distributed and Parallel Systems
Assignment 2 Question 1: N thread blocks and N threads per thread block
Sarah Whelan 250778849
TO RUN: nvcc q1_swhela2.cu -o q1_swhela2
./q1_swhela2
*/
#include <hip/hip_runtime.h>
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <math.h>
int modBy = 103; // common prime num used for modding coefficient values during generation, multiplication, and addition
void genPolynomials(int *polyA, int *polyB, int size);
void multPolynomialsSerial(int *polyA, int *polyB, int polySize, int *product, int productSize);
__global__ void multPolynomialsParallel(int *polyA, int *polyB, int *product, int polySize, int modBy);
__global__ void sumProductsParallel(int prodSize, int threadsPerBlock, int *summedProduct, int *products, int numBlocks, int modBy);
void checkCUDAError(const char* msg);
int main() {
srand(time(NULL));
int numTerms;
// get user desired input on length of polynomials
printf("Specify the number of terms in the polynomial by specifying the exponent on base 2, UP TO 10, e.g. enter '3' if you want 2^3 terms (AKA 8 terms) per polynomial: ");
scanf("%d", &numTerms);
printf("\nYou entered '%d'.\n", numTerms);
if (numTerms > 10) {
printf("Invalid entry. The maximum number of terms is 2^10. Please enter a term less than or equal to 10 next time.");
return 1;
}
// then bitshift by input value to determine actual value of numTerms
numTerms = 1 << numTerms;
printf("Number of terms per polynomial = %d, hence each polynomial will have degree = %d.\n\n", numTerms, numTerms-1);
// use numTerms as the number of blocks per thread and the number of blocks
int threadsPerBlock = numTerms;
int blocks = numTerms;
// instantiate and allocate host memory blocks to store each polynomial of size numTerms
int *host_polyA, *host_polyB;
host_polyA = (int *) malloc(numTerms * sizeof(int));
host_polyB = (int *) malloc(numTerms * sizeof(int));
// generate random polynomials of size numTerms
printf("Generating polynomials...\n\n");
genPolynomials(host_polyA, host_polyB, numTerms);
printf("polyA:\n");
for (int i = 0; i < numTerms; i++) {
printf("%dx^%d ", host_polyA[i], i);
if (i != numTerms-1) {
printf("+ ");
}
}
printf("\n\npolyB:\n");
for (int i = 0; i < numTerms; i++) {
printf("%dx^%d ", host_polyB[i], i);
if (i != numTerms-1) {
printf("+ ");
}
}
printf("\n\n");
// determine degree of product
int degreeOfProduct = (numTerms - 1) * 2; // e.g. degree(polyA, polyB) = 3 then x^3 * x^3 = x^6 and degree = numTerms - 1
// allocate blocks of memory on the host for storing the product with size degreeOfProduct + 1 (serial)
// and numTerms*numTerms for the intermediary parallel product, as well asthe final parallel product
// two different allocations in order to verify results at the end!
int *host_product_serial, *host_product_parallel, *host_final_product;
host_product_serial = (int *) malloc((degreeOfProduct+1) * sizeof(int)); // sum of products is intrinsic
host_product_parallel = (int *) malloc(numTerms * numTerms * sizeof(int)); // because of n threads in each n thread blocks
host_final_product = (int *) malloc((degreeOfProduct+1) * sizeof(int)); // final product from parallel version once summed
// ensure all vals in host_product_parallel are 0 (this is done within the serial version so we don't need to worry about that one)
for (int i = 0; i < numTerms*numTerms; i++) {
host_product_parallel[i] = 0;
}
// ensure all vals in host_final_product are 0
for (int i = 0; i < degreeOfProduct+1; i++) {
host_final_product[i] = 0;
}
// initialize and allocate memory on the devices for storing dev_polyA, dev_polyB, and dev_product
int *dev_polyA, *dev_polyB, *dev_product;
hipMalloc( (void **) &dev_polyA, numTerms * sizeof(int));
hipMalloc( (void **) &dev_polyB, numTerms * sizeof(int));
hipMalloc( (void **) &dev_product, numTerms * numTerms * sizeof(int));
// copy polynomials: host -> device (dest, src, size, direction)
hipMemcpy(dev_polyA, host_polyA, numTerms * sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(dev_polyB, host_polyB, numTerms * sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(dev_product, host_product_parallel, numTerms * numTerms * sizeof(int), hipMemcpyHostToDevice);
// setup kernel params & launch
dim3 dimGrid(blocks);
dim3 dimBlock(threadsPerBlock);
multPolynomialsParallel<<<dimGrid, dimBlock>>>(dev_polyA, dev_polyB, dev_product, numTerms, modBy);
hipDeviceSynchronize(); // wait for ALL threads from all blocks to complete
checkCUDAError("kernel invocation");
// copy dev_product back into host_product_parallel (dest, src, size, direction)
hipMemcpy(host_product_parallel, dev_product, numTerms * numTerms * sizeof(int), hipMemcpyDeviceToHost);
/* ~~~ now we need to deal with the summation of intermediary products ~~~ */
// allocate device mem for final product
int *dev_final;
hipMalloc( (void **) &dev_final, (degreeOfProduct+1) * sizeof(int));
// copy zero'd host_final_product to dev_final and host_product_parallel to dev_product
// (dest, src, size, direction)
hipMemcpy(dev_final, host_final_product, (degreeOfProduct+1) * sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(dev_product, host_product_parallel, numTerms * numTerms * sizeof(int), hipMemcpyHostToDevice);
// parameters are (int prodSize, int threadsPerBlock, int *summedProduct, int *products, int numBlocks, int modBy)
sumProductsParallel<<<dimGrid, dimBlock>>>(degreeOfProduct+1, threadsPerBlock, dev_final, dev_product, blocks, modBy);
hipDeviceSynchronize(); // wait for ALL threads from all blocks to complete
checkCUDAError("kernel invocation");
// copy summation of products back to host (dest, src, size, direction)
hipMemcpy(host_final_product, dev_final, (degreeOfProduct+1) * sizeof(int), hipMemcpyDeviceToHost);
// multiply polynomials in serial and write to host_product_serial for verification
multPolynomialsSerial(host_polyA, host_polyB, numTerms, host_product_serial, degreeOfProduct+1);
printf("Serial result:\n");
for (int i = 0; i < degreeOfProduct+1; i++) {
printf("%dx^%d ", host_product_serial[i], i);
if (i != degreeOfProduct) {
printf("+ ");
}
}
printf("\n\nParallel result:\n");
for (int i = 0; i < degreeOfProduct+1; i++) {
printf("%dx^%d ", host_final_product[i], i);
if (i != degreeOfProduct) {
printf("+ ");
}
}
printf("\n\n");
bool allRight = 1;
for (int i = 0; i < degreeOfProduct+1; i++) {
if (host_product_serial[i] == host_final_product[i]) {
continue;
} else {
printf("Coefficients at degree %d are not equivalent: serial!=parallel (%d!=%d)\n", i, host_product_serial[i], host_final_product[i]);
allRight = 0;
}
}
if (allRight) {
printf("Verification successful. The serial and parallel polynomial multiplications produced the same result!\n\n");
} else {
printf("Looks like there were some discrepancies. Verification failed.\n\n");
}
// free host and device memory
free(host_polyA);
free(host_polyB);
free(host_product_serial);
free(host_product_parallel);
free(host_final_product);
hipFree(dev_polyA);
hipFree(dev_polyB);
hipFree(dev_product);
hipFree(dev_final);
return 0;
}
// genPolynomials takes two polynomials and their size (number of terms per polynomial),
// and generates random coefficients for each term mod p
void genPolynomials(int *polyA, int *polyB, int size) {
// coefficient generation using rand mod p where p = 103
for (int i = 0; i < size; i++) {
polyA[i] = rand() % modBy;
if (polyA[i] == 0) { // we don't want any zeros!!!
polyA[i] = 1;
}
polyB[i] = rand() % modBy;
if (polyB[i] == 0) {
polyB[i] = 1;
}
}
}
// multPolynomialsSerial takes two polynomials and their size, in addition to a memory block to place
// the sum of products into, as well as the size of the product polynomial
void multPolynomialsSerial(int *polyA, int *polyB, int polySize, int *product, int productSize) {
int degreeOfTerms;
// ensure all coefficients of product are 0
for (int i = 0; i < productSize; i++) {
product[i] = 0;
}
// calculate sum of products
for (int a = 0; a < polySize; a++) { // iterate through terms in A
for (int b = 0; b < polySize; b++) { // for each term in A, iterate through all terms in B
// add degrees (indices) to determine which index this product belongs to in the product array block
degreeOfTerms = a + b;
// add product of terms to previous sum and mod by 103
product[degreeOfTerms] = (product[degreeOfTerms] + polyA[a] * polyB[b]) % modBy;
}
}
}
// multPolynomialsParallel determines the intermediary products of the polynomial multiplication problem
__global__ void multPolynomialsParallel(int *polyA, int *polyB, int *product, int polySize, int modBy) {
int a = blockIdx.x; // all threads in the same block will access the same polyA element
int b = threadIdx.x; // but all threads will access individual polyB elements
int myIndex = blockDim.x * blockIdx.x + threadIdx.x; // where to write this thread's product
product[myIndex] = (polyA[a] * polyB[b]) % modBy;
}
// sumProductsParallel uses prodSize threads, each thread corresponding to a degree, to sum common terms and determine the actual product of polyA and polyB
__global__ void sumProductsParallel(int prodSize, int threadsPerBlock, int *summedProduct, int *products, int numBlocks, int modBy) {
int responsibleFor = blockIdx.x * blockDim.x + threadIdx.x; // used to check which threads are going to be active during this step
if (responsibleFor < prodSize) { // e.g. 1 < 7 so thread 1 is going to be in charge of summing the x^1 terms, threads >= prodSize will be inactive for remainder
for (int blockNum = 0; blockNum < numBlocks; blockNum++) { // loop through blocks
for (int indexInBlock = 0; indexInBlock < threadsPerBlock; indexInBlock++) { // loop through each index per block
int degreeOfElement = blockNum + indexInBlock; // the degree related to the coefficient stored at each products[] index is equal to the block number + the relative index in the block
if (indexInBlock == 0 && degreeOfElement > responsibleFor) {
return; // this thread is done summing its common terms
}
else if (degreeOfElement == responsibleFor) { // if this thread is responsible for the degree we just calculated
int spotInProducts = blockNum * blockDim.x + indexInBlock; // get its actual index in products[]
summedProduct[responsibleFor] = (summedProduct[responsibleFor] + products[spotInProducts]) % modBy; // and write that value into the final summedProduct[our degree]
}
}
}
}
}
void checkCUDAError(const char *msg) {
hipError_t err = hipGetLastError();
if(hipSuccess != err)
{
fprintf(stderr, "CUDA error: %s: %s.\n", msg, hipGetErrorString(err));
exit(EXIT_FAILURE);
}
} | .text
.file "q1_swhela2.hip"
.globl main # -- Begin function main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
pushq %r15
.cfi_def_cfa_offset 24
pushq %r14
.cfi_def_cfa_offset 32
pushq %r13
.cfi_def_cfa_offset 40
pushq %r12
.cfi_def_cfa_offset 48
pushq %rbx
.cfi_def_cfa_offset 56
subq $216, %rsp
.cfi_def_cfa_offset 272
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
.cfi_offset %rbp, -16
xorl %edi, %edi
callq time
movl %eax, %edi
callq srand
movl $.L.str, %edi
xorl %eax, %eax
callq printf
movq %rsp, %rsi
movl $.L.str.1, %edi
xorl %eax, %eax
callq __isoc23_scanf
movl (%rsp), %esi
movl $.L.str.2, %edi
xorl %eax, %eax
callq printf
movl (%rsp), %ecx
cmpl $11, %ecx
jl .LBB0_2
# %bb.1:
movl $.L.str.3, %edi
xorl %eax, %eax
callq printf
movl $1, %eax
jmp .LBB0_50
.LBB0_2:
movl $1, %esi
# kill: def $cl killed $cl killed $ecx
shll %cl, %esi
movl %esi, (%rsp)
leal -1(%rsi), %edx
movl $.L.str.4, %edi
# kill: def $esi killed $esi killed $rsi
xorl %eax, %eax
callq printf
movslq (%rsp), %r13
leaq (,%r13,4), %r14
movq %r14, %rdi
callq malloc
movq %rax, %rbx
movq %r14, %rdi
callq malloc
movq %rax, %r14
movl $.Lstr, %edi
callq puts@PLT
movl (%rsp), %r15d
testl %r15d, %r15d
jle .LBB0_5
# %bb.3: # %.lr.ph.preheader.i
xorl %r12d, %r12d
.p2align 4, 0x90
.LBB0_4: # %.lr.ph.i
# =>This Inner Loop Header: Depth=1
callq rand
cltd
idivl modBy(%rip)
cmpl $1, %edx
adcl $0, %edx
movl %edx, (%rbx,%r12,4)
callq rand
cltd
idivl modBy(%rip)
cmpl $1, %edx
adcl $0, %edx
movl %edx, (%r14,%r12,4)
incq %r12
cmpq %r12, %r15
jne .LBB0_4
.LBB0_5: # %_Z14genPolynomialsPiS_i.exit
movl $.Lstr.1, %edi
callq puts@PLT
cmpl $0, (%rsp)
jle .LBB0_10
# %bb.6: # %.lr.ph.preheader
xorl %r15d, %r15d
jmp .LBB0_7
.p2align 4, 0x90
.LBB0_9: # in Loop: Header=BB0_7 Depth=1
incq %r15
movslq (%rsp), %rax
cmpq %rax, %r15
jge .LBB0_10
.LBB0_7: # %.lr.ph
# =>This Inner Loop Header: Depth=1
movl (%rbx,%r15,4), %esi
movl $.L.str.7, %edi
movl %r15d, %edx
xorl %eax, %eax
callq printf
movl (%rsp), %eax
decl %eax
cmpq %rax, %r15
je .LBB0_9
# %bb.8: # in Loop: Header=BB0_7 Depth=1
movl $.L.str.8, %edi
xorl %eax, %eax
callq printf
jmp .LBB0_9
.LBB0_10: # %._crit_edge
movl $.Lstr.2, %edi
callq puts@PLT
cmpl $0, (%rsp)
jle .LBB0_15
# %bb.11: # %.lr.ph140.preheader
xorl %r15d, %r15d
jmp .LBB0_12
.p2align 4, 0x90
.LBB0_14: # in Loop: Header=BB0_12 Depth=1
incq %r15
movslq (%rsp), %rax
cmpq %rax, %r15
jge .LBB0_15
.LBB0_12: # %.lr.ph140
# =>This Inner Loop Header: Depth=1
movl (%r14,%r15,4), %esi
movl $.L.str.7, %edi
movl %r15d, %edx
xorl %eax, %eax
callq printf
movl (%rsp), %eax
decl %eax
cmpq %rax, %r15
je .LBB0_14
# %bb.13: # in Loop: Header=BB0_12 Depth=1
movl $.L.str.8, %edi
xorl %eax, %eax
callq printf
jmp .LBB0_14
.LBB0_15: # %._crit_edge141
movq %r13, 48(%rsp) # 8-byte Spill
movl %r13d, %r12d
movl $.Lstr.6, %edi
callq puts@PLT
movslq (%rsp), %r13
leal -1(,%r13,2), %eax
movl %eax, 4(%rsp) # 4-byte Spill
movslq %eax, %r15
shlq $2, %r15
movq %r15, %rdi
callq malloc
movq %rax, 208(%rsp) # 8-byte Spill
movl %r13d, %ebp
imull %ebp, %ebp
leaq (,%rbp,4), %rdi
callq malloc
movq %r15, 200(%rsp) # 8-byte Spill
movq %r15, %rdi
movq %rax, %r15
callq malloc
movq %rax, 16(%rsp) # 8-byte Spill
movq %r13, 40(%rsp) # 8-byte Spill
testq %r13, %r13
je .LBB0_17
# %bb.16: # %.lr.ph144.preheader
cmpl $1, %ebp
adcl $0, %ebp
shlq $2, %rbp
movq %r15, %rdi
xorl %esi, %esi
movq %rbp, %rdx
callq memset@PLT
.LBB0_17: # %.preheader
cmpl $0, 4(%rsp) # 4-byte Folded Reload
movq 40(%rsp), %rbp # 8-byte Reload
jle .LBB0_19
# %bb.18: # %.lr.ph146.preheader
leal (,%rbp,2), %eax
addl $-2, %eax
leaq 4(,%rax,4), %rdx
movq 16(%rsp), %rdi # 8-byte Reload
xorl %esi, %esi
callq memset@PLT
.LBB0_19: # %._crit_edge147
movq %rbp, %rsi
shlq $2, %rsi
leaq 64(%rsp), %rdi
callq hipMalloc
movslq (%rsp), %rsi
shlq $2, %rsi
leaq 56(%rsp), %rdi
callq hipMalloc
movl (%rsp), %esi
imull %esi, %esi
shlq $2, %rsi
leaq 24(%rsp), %rdi
callq hipMalloc
movq 64(%rsp), %rdi
movslq (%rsp), %rdx
shlq $2, %rdx
movq %rbx, %rsi
movl $1, %ecx
callq hipMemcpy
movq 56(%rsp), %rdi
movslq (%rsp), %rdx
shlq $2, %rdx
movq %r14, %rsi
movl $1, %ecx
callq hipMemcpy
movq 24(%rsp), %rdi
movl (%rsp), %edx
imull %edx, %edx
shlq $2, %rdx
movq %r15, %rsi
movl $1, %ecx
callq hipMemcpy
movabsq $4294967296, %rax # imm = 0x100000000
orq %rax, %r12
movq %r12, %rdi
movl $1, %esi
movq %r12, %rdx
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
movq 208(%rsp), %r13 # 8-byte Reload
jne .LBB0_21
# %bb.20:
movq 64(%rsp), %rax
movq 56(%rsp), %rcx
movq 24(%rsp), %rdx
movl (%rsp), %esi
movl modBy(%rip), %edi
movq %rax, 128(%rsp)
movq %rcx, 120(%rsp)
movq %rdx, 80(%rsp)
movl %esi, 12(%rsp)
movl %edi, 8(%rsp)
leaq 128(%rsp), %rax
movq %rax, 144(%rsp)
leaq 120(%rsp), %rax
movq %rax, 152(%rsp)
leaq 80(%rsp), %rax
movq %rax, 160(%rsp)
leaq 12(%rsp), %rax
movq %rax, 168(%rsp)
leaq 8(%rsp), %rax
movq %rax, 176(%rsp)
leaq 104(%rsp), %rdi
leaq 88(%rsp), %rsi
leaq 72(%rsp), %rdx
leaq 32(%rsp), %rcx
callq __hipPopCallConfiguration
movq 104(%rsp), %rsi
movl 112(%rsp), %edx
movq 88(%rsp), %rcx
movl 96(%rsp), %r8d
leaq 144(%rsp), %r9
movl $_Z23multPolynomialsParallelPiS_S_ii, %edi
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
pushq 80(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB0_21:
callq hipDeviceSynchronize
callq hipGetLastError
testl %eax, %eax
movq 200(%rsp), %rbp # 8-byte Reload
jne .LBB0_51
# %bb.22: # %_Z14checkCUDAErrorPKc.exit
movq 24(%rsp), %rsi
movl (%rsp), %edx
imull %edx, %edx
shlq $2, %rdx
movq %r15, %rdi
movl $2, %ecx
callq hipMemcpy
leaq 32(%rsp), %rdi
movq %rbp, %rsi
callq hipMalloc
movq 32(%rsp), %rdi
movq 16(%rsp), %rsi # 8-byte Reload
movq %rbp, %rdx
movl $1, %ecx
callq hipMemcpy
movq 24(%rsp), %rdi
movl (%rsp), %edx
imull %edx, %edx
shlq $2, %rdx
movq %r15, %rsi
movl $1, %ecx
callq hipMemcpy
movq %r12, %rdi
movl $1, %esi
movq %r12, %rdx
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB0_24
# %bb.23:
movq 32(%rsp), %rax
movq 24(%rsp), %rcx
movl modBy(%rip), %edx
movl 4(%rsp), %esi # 4-byte Reload
movl %esi, 12(%rsp)
movq 48(%rsp), %rsi # 8-byte Reload
movl %esi, 8(%rsp)
movq %rax, 128(%rsp)
movq %rcx, 120(%rsp)
movl %esi, 140(%rsp)
movl %edx, 136(%rsp)
leaq 12(%rsp), %rax
movq %rax, 144(%rsp)
leaq 8(%rsp), %rax
movq %rax, 152(%rsp)
leaq 128(%rsp), %rax
movq %rax, 160(%rsp)
leaq 120(%rsp), %rax
movq %rax, 168(%rsp)
leaq 140(%rsp), %rax
movq %rax, 176(%rsp)
leaq 136(%rsp), %rax
movq %rax, 184(%rsp)
leaq 104(%rsp), %rdi
leaq 88(%rsp), %rsi
leaq 80(%rsp), %rdx
leaq 72(%rsp), %rcx
callq __hipPopCallConfiguration
movq 104(%rsp), %rsi
movl 112(%rsp), %edx
movq 88(%rsp), %rcx
movl 96(%rsp), %r8d
leaq 144(%rsp), %r9
movl $_Z19sumProductsParalleliiPiS_ii, %edi
pushq 72(%rsp)
.cfi_adjust_cfa_offset 8
pushq 88(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB0_24:
movq %r15, 48(%rsp) # 8-byte Spill
callq hipDeviceSynchronize
callq hipGetLastError
testl %eax, %eax
jne .LBB0_51
# %bb.25: # %_Z14checkCUDAErrorPKc.exit125
movq 32(%rsp), %rsi
movq 16(%rsp), %rdi # 8-byte Reload
movq %rbp, %rdx
movl $2, %ecx
callq hipMemcpy
movl (%rsp), %r15d
movl 4(%rsp), %eax # 4-byte Reload
movl %eax, %ebp
testl %eax, %eax
jle .LBB0_27
# %bb.26: # %.lr.ph.preheader.i130
leaq (,%rbp,4), %rdx
movq %r13, %rdi
xorl %esi, %esi
callq memset@PLT
.LBB0_27: # %.preheader23.i
movq 40(%rsp), %rax # 8-byte Reload
leal -2(,%rax,2), %r12d
testl %r15d, %r15d
jle .LBB0_32
# %bb.28: # %.preheader.lr.ph.i
movl modBy(%rip), %ecx
xorl %esi, %esi
movq %r13, %rdi
.p2align 4, 0x90
.LBB0_29: # %.preheader.i
# =>This Loop Header: Depth=1
# Child Loop BB0_30 Depth 2
movl (%rbx,%rsi,4), %r8d
xorl %r9d, %r9d
.p2align 4, 0x90
.LBB0_30: # Parent Loop BB0_29 Depth=1
# => This Inner Loop Header: Depth=2
movl (%r14,%r9,4), %eax
imull %r8d, %eax
addl (%rdi,%r9,4), %eax
cltd
idivl %ecx
movl %edx, (%rdi,%r9,4)
incq %r9
cmpq %r9, %r15
jne .LBB0_30
# %bb.31: # %._crit_edge.i
# in Loop: Header=BB0_29 Depth=1
incq %rsi
addq $4, %rdi
cmpq %r15, %rsi
jne .LBB0_29
.LBB0_32: # %_Z21multPolynomialsSerialPiS_iS_i.exit
movq %rbx, 40(%rsp) # 8-byte Spill
movq %r13, %rbx
movl $.Lstr.4, %edi
callq puts@PLT
movl %r12d, %r15d
cmpl $0, 4(%rsp) # 4-byte Folded Reload
jle .LBB0_37
# %bb.33: # %.lr.ph149.preheader
leaq 1(%r15), %r13
xorl %r12d, %r12d
jmp .LBB0_34
.p2align 4, 0x90
.LBB0_36: # in Loop: Header=BB0_34 Depth=1
incq %r12
cmpq %r12, %r13
je .LBB0_37
.LBB0_34: # %.lr.ph149
# =>This Inner Loop Header: Depth=1
movl (%rbx,%r12,4), %esi
movl $.L.str.7, %edi
movl %r12d, %edx
xorl %eax, %eax
callq printf
cmpq %r12, %r15
je .LBB0_36
# %bb.35: # in Loop: Header=BB0_34 Depth=1
movl $.L.str.8, %edi
xorl %eax, %eax
callq printf
jmp .LBB0_36
.LBB0_37: # %._crit_edge150
movl $.Lstr.5, %edi
callq puts@PLT
cmpl $0, 4(%rsp) # 4-byte Folded Reload
jle .LBB0_42
# %bb.38: # %.lr.ph153.preheader
leaq 1(%r15), %r13
xorl %r12d, %r12d
jmp .LBB0_39
.p2align 4, 0x90
.LBB0_41: # in Loop: Header=BB0_39 Depth=1
incq %r12
cmpq %r12, %r13
je .LBB0_42
.LBB0_39: # %.lr.ph153
# =>This Inner Loop Header: Depth=1
movq 16(%rsp), %rax # 8-byte Reload
movl (%rax,%r12,4), %esi
movl $.L.str.7, %edi
movl %r12d, %edx
xorl %eax, %eax
callq printf
cmpq %r12, %r15
je .LBB0_41
# %bb.40: # in Loop: Header=BB0_39 Depth=1
movl $.L.str.8, %edi
xorl %eax, %eax
callq printf
jmp .LBB0_41
.LBB0_42: # %._crit_edge154
movl $.Lstr.6, %edi
callq puts@PLT
cmpl $0, 4(%rsp) # 4-byte Folded Reload
jle .LBB0_43
# %bb.44: # %.lr.ph158.preheader
movb $1, %r15b
xorl %r12d, %r12d
movq %rbx, %r13
movq 40(%rsp), %rbx # 8-byte Reload
jmp .LBB0_45
.p2align 4, 0x90
.LBB0_47: # in Loop: Header=BB0_45 Depth=1
incq %r12
cmpq %r12, %rbp
je .LBB0_48
.LBB0_45: # %.lr.ph158
# =>This Inner Loop Header: Depth=1
movl (%r13,%r12,4), %edx
movq 16(%rsp), %rax # 8-byte Reload
movl (%rax,%r12,4), %ecx
cmpl %ecx, %edx
je .LBB0_47
# %bb.46: # in Loop: Header=BB0_45 Depth=1
xorl %r15d, %r15d
movl $.L.str.14, %edi
movl %r12d, %esi
xorl %eax, %eax
callq printf
jmp .LBB0_47
.LBB0_48: # %._crit_edge159.loopexit
testb $1, %r15b
movl $.Lstr.7, %eax
movl $.Lstr.8, %edi
cmoveq %rax, %rdi
jmp .LBB0_49
.LBB0_43:
movl $.Lstr.8, %edi
movq %rbx, %r13
movq 40(%rsp), %rbx # 8-byte Reload
.LBB0_49: # %.critedge
callq puts@PLT
movq %rbx, %rdi
callq free
movq %r14, %rdi
callq free
movq %r13, %rdi
callq free
movq 48(%rsp), %rdi # 8-byte Reload
callq free
movq 16(%rsp), %rdi # 8-byte Reload
callq free
movq 64(%rsp), %rdi
callq hipFree
movq 56(%rsp), %rdi
callq hipFree
movq 24(%rsp), %rdi
callq hipFree
movq 32(%rsp), %rdi
callq hipFree
xorl %eax, %eax
.LBB0_50:
addq $216, %rsp
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %r12
.cfi_def_cfa_offset 40
popq %r13
.cfi_def_cfa_offset 32
popq %r14
.cfi_def_cfa_offset 24
popq %r15
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
retq
.LBB0_51:
.cfi_def_cfa_offset 272
movq stderr(%rip), %rbx
movl %eax, %edi
callq hipGetErrorString
movl $.L.str.17, %esi
movl $.L.str.11, %edx
movq %rbx, %rdi
movq %rax, %rcx
xorl %eax, %eax
callq fprintf
movl $1, %edi
callq exit
.Lfunc_end0:
.size main, .Lfunc_end0-main
.cfi_endproc
# -- End function
.globl _Z14genPolynomialsPiS_i # -- Begin function _Z14genPolynomialsPiS_i
.p2align 4, 0x90
.type _Z14genPolynomialsPiS_i,@function
_Z14genPolynomialsPiS_i: # @_Z14genPolynomialsPiS_i
.cfi_startproc
# %bb.0:
testl %edx, %edx
jle .LBB1_4
# %bb.1: # %.lr.ph.preheader
pushq %r15
.cfi_def_cfa_offset 16
pushq %r14
.cfi_def_cfa_offset 24
pushq %r12
.cfi_def_cfa_offset 32
pushq %rbx
.cfi_def_cfa_offset 40
pushq %rax
.cfi_def_cfa_offset 48
.cfi_offset %rbx, -40
.cfi_offset %r12, -32
.cfi_offset %r14, -24
.cfi_offset %r15, -16
movq %rsi, %rbx
movq %rdi, %r14
movl %edx, %r15d
xorl %r12d, %r12d
.p2align 4, 0x90
.LBB1_2: # %.lr.ph
# =>This Inner Loop Header: Depth=1
callq rand
cltd
idivl modBy(%rip)
cmpl $1, %edx
adcl $0, %edx
movl %edx, (%r14,%r12,4)
callq rand
cltd
idivl modBy(%rip)
cmpl $1, %edx
adcl $0, %edx
movl %edx, (%rbx,%r12,4)
incq %r12
cmpq %r12, %r15
jne .LBB1_2
# %bb.3:
addq $8, %rsp
.cfi_def_cfa_offset 40
popq %rbx
.cfi_def_cfa_offset 32
popq %r12
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
.cfi_restore %rbx
.cfi_restore %r12
.cfi_restore %r14
.cfi_restore %r15
.LBB1_4: # %._crit_edge
retq
.Lfunc_end1:
.size _Z14genPolynomialsPiS_i, .Lfunc_end1-_Z14genPolynomialsPiS_i
.cfi_endproc
# -- End function
.globl _Z38__device_stub__multPolynomialsParallelPiS_S_ii # -- Begin function _Z38__device_stub__multPolynomialsParallelPiS_S_ii
.p2align 4, 0x90
.type _Z38__device_stub__multPolynomialsParallelPiS_S_ii,@function
_Z38__device_stub__multPolynomialsParallelPiS_S_ii: # @_Z38__device_stub__multPolynomialsParallelPiS_S_ii
.cfi_startproc
# %bb.0:
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movq %rdx, 56(%rsp)
movl %ecx, 4(%rsp)
movl %r8d, (%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 56(%rsp), %rax
movq %rax, 96(%rsp)
leaq 4(%rsp), %rax
movq %rax, 104(%rsp)
movq %rsp, %rax
movq %rax, 112(%rsp)
leaq 40(%rsp), %rdi
leaq 24(%rsp), %rsi
leaq 16(%rsp), %rdx
leaq 8(%rsp), %rcx
callq __hipPopCallConfiguration
movq 40(%rsp), %rsi
movl 48(%rsp), %edx
movq 24(%rsp), %rcx
movl 32(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z23multPolynomialsParallelPiS_S_ii, %edi
pushq 8(%rsp)
.cfi_adjust_cfa_offset 8
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $136, %rsp
.cfi_adjust_cfa_offset -136
retq
.Lfunc_end2:
.size _Z38__device_stub__multPolynomialsParallelPiS_S_ii, .Lfunc_end2-_Z38__device_stub__multPolynomialsParallelPiS_S_ii
.cfi_endproc
# -- End function
.globl _Z14checkCUDAErrorPKc # -- Begin function _Z14checkCUDAErrorPKc
.p2align 4, 0x90
.type _Z14checkCUDAErrorPKc,@function
_Z14checkCUDAErrorPKc: # @_Z14checkCUDAErrorPKc
.cfi_startproc
# %bb.0:
pushq %r14
.cfi_def_cfa_offset 16
pushq %rbx
.cfi_def_cfa_offset 24
pushq %rax
.cfi_def_cfa_offset 32
.cfi_offset %rbx, -24
.cfi_offset %r14, -16
movq %rdi, %rbx
callq hipGetLastError
testl %eax, %eax
jne .LBB3_2
# %bb.1:
addq $8, %rsp
.cfi_def_cfa_offset 24
popq %rbx
.cfi_def_cfa_offset 16
popq %r14
.cfi_def_cfa_offset 8
retq
.LBB3_2:
.cfi_def_cfa_offset 32
movq stderr(%rip), %r14
movl %eax, %edi
callq hipGetErrorString
movl $.L.str.17, %esi
movq %r14, %rdi
movq %rbx, %rdx
movq %rax, %rcx
xorl %eax, %eax
callq fprintf
movl $1, %edi
callq exit
.Lfunc_end3:
.size _Z14checkCUDAErrorPKc, .Lfunc_end3-_Z14checkCUDAErrorPKc
.cfi_endproc
# -- End function
.globl _Z34__device_stub__sumProductsParalleliiPiS_ii # -- Begin function _Z34__device_stub__sumProductsParalleliiPiS_ii
.p2align 4, 0x90
.type _Z34__device_stub__sumProductsParalleliiPiS_ii,@function
_Z34__device_stub__sumProductsParalleliiPiS_ii: # @_Z34__device_stub__sumProductsParalleliiPiS_ii
.cfi_startproc
# %bb.0:
subq $136, %rsp
.cfi_def_cfa_offset 144
movl %edi, 12(%rsp)
movl %esi, 8(%rsp)
movq %rdx, 72(%rsp)
movq %rcx, 64(%rsp)
movl %r8d, 4(%rsp)
movl %r9d, (%rsp)
leaq 12(%rsp), %rax
movq %rax, 80(%rsp)
leaq 8(%rsp), %rax
movq %rax, 88(%rsp)
leaq 72(%rsp), %rax
movq %rax, 96(%rsp)
leaq 64(%rsp), %rax
movq %rax, 104(%rsp)
leaq 4(%rsp), %rax
movq %rax, 112(%rsp)
movq %rsp, %rax
movq %rax, 120(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z19sumProductsParalleliiPiS_ii, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $152, %rsp
.cfi_adjust_cfa_offset -152
retq
.Lfunc_end4:
.size _Z34__device_stub__sumProductsParalleliiPiS_ii, .Lfunc_end4-_Z34__device_stub__sumProductsParalleliiPiS_ii
.cfi_endproc
# -- End function
.globl _Z21multPolynomialsSerialPiS_iS_i # -- Begin function _Z21multPolynomialsSerialPiS_iS_i
.p2align 4, 0x90
.type _Z21multPolynomialsSerialPiS_iS_i,@function
_Z21multPolynomialsSerialPiS_iS_i: # @_Z21multPolynomialsSerialPiS_iS_i
.cfi_startproc
# %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
pushq %r15
.cfi_def_cfa_offset 24
pushq %r14
.cfi_def_cfa_offset 32
pushq %rbx
.cfi_def_cfa_offset 40
pushq %rax
.cfi_def_cfa_offset 48
.cfi_offset %rbx, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
.cfi_offset %rbp, -16
movq %rcx, %rbx
movl %edx, %ebp
movq %rsi, %r14
movq %rdi, %r15
testl %r8d, %r8d
jle .LBB5_2
# %bb.1: # %.lr.ph.preheader
movl %r8d, %edx
shlq $2, %rdx
movq %rbx, %rdi
xorl %esi, %esi
callq memset@PLT
.LBB5_2: # %.preheader23
testl %ebp, %ebp
jle .LBB5_7
# %bb.3: # %.preheader.lr.ph
movl %ebp, %ecx
xorl %esi, %esi
.p2align 4, 0x90
.LBB5_4: # %.preheader
# =>This Loop Header: Depth=1
# Child Loop BB5_5 Depth 2
xorl %edi, %edi
.p2align 4, 0x90
.LBB5_5: # Parent Loop BB5_4 Depth=1
# => This Inner Loop Header: Depth=2
movl (%r14,%rdi,4), %eax
imull (%r15,%rsi,4), %eax
addl (%rbx,%rdi,4), %eax
cltd
idivl modBy(%rip)
movl %edx, (%rbx,%rdi,4)
incq %rdi
cmpq %rdi, %rcx
jne .LBB5_5
# %bb.6: # %._crit_edge
# in Loop: Header=BB5_4 Depth=1
incq %rsi
addq $4, %rbx
cmpq %rcx, %rsi
jne .LBB5_4
.LBB5_7: # %._crit_edge28
addq $8, %rsp
.cfi_def_cfa_offset 40
popq %rbx
.cfi_def_cfa_offset 32
popq %r14
.cfi_def_cfa_offset 24
popq %r15
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
retq
.Lfunc_end5:
.size _Z21multPolynomialsSerialPiS_iS_i, .Lfunc_end5-_Z21multPolynomialsSerialPiS_iS_i
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
pushq %rbx
.cfi_def_cfa_offset 16
subq $32, %rsp
.cfi_def_cfa_offset 48
.cfi_offset %rbx, -16
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB6_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB6_2:
movq __hip_gpubin_handle(%rip), %rbx
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z23multPolynomialsParallelPiS_S_ii, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z19sumProductsParalleliiPiS_ii, %esi
movl $.L__unnamed_2, %edx
movl $.L__unnamed_2, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $32, %rsp
.cfi_def_cfa_offset 16
popq %rbx
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end6:
.size __hip_module_ctor, .Lfunc_end6-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB7_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB7_2:
retq
.Lfunc_end7:
.size __hip_module_dtor, .Lfunc_end7-__hip_module_dtor
.cfi_endproc
# -- End function
.type modBy,@object # @modBy
.data
.globl modBy
.p2align 2, 0x0
modBy:
.long 103 # 0x67
.size modBy, 4
.type .L.str,@object # @.str
.section .rodata.str1.1,"aMS",@progbits,1
.L.str:
.asciz "Specify the number of terms in the polynomial by specifying the exponent on base 2, UP TO 10, e.g. enter '3' if you want 2^3 terms (AKA 8 terms) per polynomial: "
.size .L.str, 162
.type .L.str.1,@object # @.str.1
.L.str.1:
.asciz "%d"
.size .L.str.1, 3
.type .L.str.2,@object # @.str.2
.L.str.2:
.asciz "\nYou entered '%d'.\n"
.size .L.str.2, 20
.type .L.str.3,@object # @.str.3
.L.str.3:
.asciz "Invalid entry. The maximum number of terms is 2^10. Please enter a term less than or equal to 10 next time."
.size .L.str.3, 108
.type .L.str.4,@object # @.str.4
.L.str.4:
.asciz "Number of terms per polynomial = %d, hence each polynomial will have degree = %d.\n\n"
.size .L.str.4, 84
.type .L.str.7,@object # @.str.7
.L.str.7:
.asciz "%dx^%d "
.size .L.str.7, 8
.type .L.str.8,@object # @.str.8
.L.str.8:
.asciz "+ "
.size .L.str.8, 3
.type _Z23multPolynomialsParallelPiS_S_ii,@object # @_Z23multPolynomialsParallelPiS_S_ii
.section .rodata,"a",@progbits
.globl _Z23multPolynomialsParallelPiS_S_ii
.p2align 3, 0x0
_Z23multPolynomialsParallelPiS_S_ii:
.quad _Z38__device_stub__multPolynomialsParallelPiS_S_ii
.size _Z23multPolynomialsParallelPiS_S_ii, 8
.type .L.str.11,@object # @.str.11
.section .rodata.str1.1,"aMS",@progbits,1
.L.str.11:
.asciz "kernel invocation"
.size .L.str.11, 18
.type _Z19sumProductsParalleliiPiS_ii,@object # @_Z19sumProductsParalleliiPiS_ii
.section .rodata,"a",@progbits
.globl _Z19sumProductsParalleliiPiS_ii
.p2align 3, 0x0
_Z19sumProductsParalleliiPiS_ii:
.quad _Z34__device_stub__sumProductsParalleliiPiS_ii
.size _Z19sumProductsParalleliiPiS_ii, 8
.type .L.str.14,@object # @.str.14
.section .rodata.str1.1,"aMS",@progbits,1
.L.str.14:
.asciz "Coefficients at degree %d are not equivalent: serial!=parallel (%d!=%d)\n"
.size .L.str.14, 73
.type .L.str.17,@object # @.str.17
.L.str.17:
.asciz "CUDA error: %s: %s.\n"
.size .L.str.17, 21
.type .L__unnamed_1,@object # @0
.L__unnamed_1:
.asciz "_Z23multPolynomialsParallelPiS_S_ii"
.size .L__unnamed_1, 36
.type .L__unnamed_2,@object # @1
.L__unnamed_2:
.asciz "_Z19sumProductsParalleliiPiS_ii"
.size .L__unnamed_2, 32
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.type .Lstr,@object # @str
.section .rodata.str1.1,"aMS",@progbits,1
.Lstr:
.asciz "Generating polynomials...\n"
.size .Lstr, 27
.type .Lstr.1,@object # @str.1
.Lstr.1:
.asciz "polyA:"
.size .Lstr.1, 7
.type .Lstr.2,@object # @str.2
.Lstr.2:
.asciz "\n\npolyB:"
.size .Lstr.2, 9
.type .Lstr.4,@object # @str.4
.Lstr.4:
.asciz "Serial result:"
.size .Lstr.4, 15
.type .Lstr.5,@object # @str.5
.Lstr.5:
.asciz "\n\nParallel result:"
.size .Lstr.5, 19
.type .Lstr.6,@object # @str.6
.Lstr.6:
.asciz "\n"
.size .Lstr.6, 2
.type .Lstr.7,@object # @str.7
.Lstr.7:
.asciz "Looks like there were some discrepancies. Verification failed.\n"
.size .Lstr.7, 64
.type .Lstr.8,@object # @str.8
.Lstr.8:
.asciz "Verification successful. The serial and parallel polynomial multiplications produced the same result!\n"
.size .Lstr.8, 103
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z38__device_stub__multPolynomialsParallelPiS_S_ii
.addrsig_sym _Z34__device_stub__sumProductsParalleliiPiS_ii
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z23multPolynomialsParallelPiS_S_ii
.addrsig_sym _Z19sumProductsParalleliiPiS_ii
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly. | code for sm_80
Function : _Z19sumProductsParalleliiPiS_ii
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ IMAD.MOV.U32 R1, RZ, RZ, c[0x0][0x28] ; /* 0x00000a00ff017624 */
/* 0x000fe400078e00ff */
/*0010*/ S2R R0, SR_CTAID.X ; /* 0x0000000000007919 */
/* 0x000e220000002500 */
/*0020*/ IMAD.MOV.U32 R2, RZ, RZ, c[0x0][0x178] ; /* 0x00005e00ff027624 */
/* 0x000fc600078e00ff */
/*0030*/ S2R R3, SR_TID.X ; /* 0x0000000000037919 */
/* 0x000e240000002100 */
/*0040*/ ISETP.GE.AND P0, PT, R2, 0x1, PT ; /* 0x000000010200780c */
/* 0x000fe20003f06270 */
/*0050*/ IMAD R0, R0, c[0x0][0x0], R3 ; /* 0x0000000000007a24 */
/* 0x001fca00078e0203 */
/*0060*/ ISETP.GE.OR P0, PT, R0, c[0x0][0x160], !P0 ; /* 0x0000580000007a0c */
/* 0x000fda0004706670 */
/*0070*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*0080*/ IMAD.MOV.U32 R2, RZ, RZ, c[0x0][0x164] ; /* 0x00005900ff027624 */
/* 0x000fca00078e00ff */
/*0090*/ ISETP.GE.AND P0, PT, R2, 0x1, PT ; /* 0x000000010200780c */
/* 0x000fda0003f06270 */
/*00a0*/ @!P0 EXIT ; /* 0x000000000000894d */
/* 0x000fea0003800000 */
/*00b0*/ HFMA2.MMA R3, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff037435 */
/* 0x000fe200000001ff */
/*00c0*/ IMAD.MOV.U32 R4, RZ, RZ, RZ ; /* 0x000000ffff047224 */
/* 0x000fe200078e00ff */
/*00d0*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fd00000000a00 */
/*00e0*/ IMAD.WIDE R2, R0, R3, c[0x0][0x168] ; /* 0x00005a0000027625 */
/* 0x000fc800078e0203 */
/*00f0*/ IMAD.MOV.U32 R5, RZ, RZ, RZ ; /* 0x000000ffff057224 */
/* 0x001fc800078e00ff */
/*0100*/ IMAD.IADD R7, R5.reuse, 0x1, R4 ; /* 0x0000000105077824 */
/* 0x040fe200078e0204 */
/*0110*/ ISETP.EQ.AND P1, PT, R5, RZ, PT ; /* 0x000000ff0500720c */
/* 0x000fc80003f22270 */
/*0120*/ ISETP.GT.AND P0, PT, R7, R0, PT ; /* 0x000000000700720c */
/* 0x000fda0003f04270 */
/*0130*/ @P0 EXIT P1 ; /* 0x000000000000094d */
/* 0x001fea0000800000 */
/*0140*/ ISETP.NE.AND P0, PT, R7, R0, PT ; /* 0x000000000700720c */
/* 0x000fe20003f05270 */
/*0150*/ BSSY B0, 0x340 ; /* 0x000001e000007945 */
/* 0x000fd80003800000 */
/*0160*/ @P0 BRA 0x330 ; /* 0x000001c000000947 */
/* 0x000fea0003800000 */
/*0170*/ HFMA2.MMA R7, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff077435 */
/* 0x000fe200000001ff */
/*0180*/ IMAD R6, R4, c[0x0][0x0], R5 ; /* 0x0000000004067a24 */
/* 0x000fe200078e0205 */
/*0190*/ LDG.E R11, [R2.64] ; /* 0x00000004020b7981 */
/* 0x000eb0000c1e1900 */
/*01a0*/ IMAD.WIDE R6, R6, R7, c[0x0][0x170] ; /* 0x00005c0006067625 */
/* 0x000fcc00078e0207 */
/*01b0*/ LDG.E R6, [R6.64] ; /* 0x0000000406067981 */
/* 0x000ea2000c1e1900 */
/*01c0*/ IABS R13, c[0x0][0x17c] ; /* 0x00005f00000d7a13 */
/* 0x000fc80000000000 */
/*01d0*/ I2F.RP R10, R13 ; /* 0x0000000d000a7306 */
/* 0x000e300000209400 */
/*01e0*/ MUFU.RCP R10, R10 ; /* 0x0000000a000a7308 */
/* 0x001e240000001000 */
/*01f0*/ IADD3 R8, R10, 0xffffffe, RZ ; /* 0x0ffffffe0a087810 */
/* 0x001fcc0007ffe0ff */
/*0200*/ F2I.FTZ.U32.TRUNC.NTZ R9, R8 ; /* 0x0000000800097305 */
/* 0x000064000021f000 */
/*0210*/ MOV R8, RZ ; /* 0x000000ff00087202 */
/* 0x001fe20000000f00 */
/*0220*/ IMAD.MOV R12, RZ, RZ, -R9 ; /* 0x000000ffff0c7224 */
/* 0x002fc800078e0a09 */
/*0230*/ IMAD R15, R12, R13, RZ ; /* 0x0000000d0c0f7224 */
/* 0x000fc800078e02ff */
/*0240*/ IMAD.HI.U32 R9, R9, R15, R8 ; /* 0x0000000f09097227 */
/* 0x000fc800078e0008 */
/*0250*/ IMAD.IADD R11, R6, 0x1, R11 ; /* 0x00000001060b7824 */
/* 0x004fca00078e020b */
/*0260*/ IABS R6, R11 ; /* 0x0000000b00067213 */
/* 0x000fca0000000000 */
/*0270*/ IMAD.HI.U32 R9, R9, R6, RZ ; /* 0x0000000609097227 */
/* 0x000fc800078e00ff */
/*0280*/ IMAD.MOV R9, RZ, RZ, -R9 ; /* 0x000000ffff097224 */
/* 0x000fc800078e0a09 */
/*0290*/ IMAD R6, R13, R9, R6 ; /* 0x000000090d067224 */
/* 0x000fca00078e0206 */
/*02a0*/ ISETP.GT.U32.AND P0, PT, R13, R6, PT ; /* 0x000000060d00720c */
/* 0x000fe40003f04070 */
/*02b0*/ ISETP.GE.AND P2, PT, R11, RZ, PT ; /* 0x000000ff0b00720c */
/* 0x000fd60003f46270 */
/*02c0*/ @!P0 IMAD.IADD R6, R6, 0x1, -R13 ; /* 0x0000000106068824 */
/* 0x000fca00078e0a0d */
/*02d0*/ ISETP.GT.U32.AND P1, PT, R13, R6, PT ; /* 0x000000060d00720c */
/* 0x000fe40003f24070 */
/*02e0*/ ISETP.NE.AND P0, PT, RZ, c[0x0][0x17c], PT ; /* 0x00005f00ff007a0c */
/* 0x000fd60003f05270 */
/*02f0*/ @!P1 IADD3 R6, R6, -R13, RZ ; /* 0x8000000d06069210 */
/* 0x000fca0007ffe0ff */
/*0300*/ @!P2 IMAD.MOV R6, RZ, RZ, -R6 ; /* 0x000000ffff06a224 */
/* 0x000fe200078e0a06 */
/*0310*/ @!P0 LOP3.LUT R6, RZ, c[0x0][0x17c], RZ, 0x33, !PT ; /* 0x00005f00ff068a12 */
/* 0x000fca00078e33ff */
/*0320*/ STG.E [R2.64], R6 ; /* 0x0000000602007986 */
/* 0x0001e4000c101904 */
/*0330*/ BSYNC B0 ; /* 0x0000000000007941 */
/* 0x000fea0003800000 */
/*0340*/ IADD3 R5, R5, 0x1, RZ ; /* 0x0000000105057810 */
/* 0x000fc80007ffe0ff */
/*0350*/ ISETP.GE.AND P0, PT, R5, c[0x0][0x164], PT ; /* 0x0000590005007a0c */
/* 0x000fda0003f06270 */
/*0360*/ @!P0 BRA 0x100 ; /* 0xfffffd9000008947 */
/* 0x000fea000383ffff */
/*0370*/ IADD3 R4, R4, 0x1, RZ ; /* 0x0000000104047810 */
/* 0x000fc80007ffe0ff */
/*0380*/ ISETP.GE.AND P0, PT, R4, c[0x0][0x178], PT ; /* 0x00005e0004007a0c */
/* 0x000fda0003f06270 */
/*0390*/ @!P0 BRA 0xf0 ; /* 0xfffffd5000008947 */
/* 0x000fea000383ffff */
/*03a0*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*03b0*/ BRA 0x3b0; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*03c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*03d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*03e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*03f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0400*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0410*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0420*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0430*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0440*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0450*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0460*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0470*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
..........
Function : _Z23multPolynomialsParallelPiS_S_ii
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ S2R R9, SR_CTAID.X ; /* 0x0000000000097919 */
/* 0x000e220000002500 */
/*0020*/ HFMA2.MMA R11, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff0b7435 */
/* 0x000fe200000001ff */
/*0030*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fe40000000a00 */
/*0040*/ S2R R10, SR_TID.X ; /* 0x00000000000a7919 */
/* 0x000e6e0000002100 */
/*0050*/ IMAD.WIDE R2, R9, R11, c[0x0][0x160] ; /* 0x0000580009027625 */
/* 0x001fc800078e020b */
/*0060*/ IMAD.WIDE R4, R10, R11, c[0x0][0x168] ; /* 0x00005a000a047625 */
/* 0x002fe400078e020b */
/*0070*/ LDG.E R2, [R2.64] ; /* 0x0000000402027981 */
/* 0x0000a8000c1e1900 */
/*0080*/ LDG.E R5, [R4.64] ; /* 0x0000000404057981 */
/* 0x000ea2000c1e1900 */
/*0090*/ IABS R13, c[0x0][0x17c] ; /* 0x00005f00000d7a13 */
/* 0x000fc80000000000 */
/*00a0*/ I2F.RP R8, R13 ; /* 0x0000000d00087306 */
/* 0x000e700000209400 */
/*00b0*/ MUFU.RCP R8, R8 ; /* 0x0000000800087308 */
/* 0x002e640000001000 */
/*00c0*/ IADD3 R6, R8, 0xffffffe, RZ ; /* 0x0ffffffe08067810 */
/* 0x002fcc0007ffe0ff */
/*00d0*/ F2I.FTZ.U32.TRUNC.NTZ R7, R6 ; /* 0x0000000600077305 */
/* 0x0002e4000021f000 */
/*00e0*/ HFMA2.MMA R6, -RZ, RZ, 0, 0 ; /* 0x00000000ff067435 */
/* 0x002fe200000001ff */
/*00f0*/ IMAD.MOV R12, RZ, RZ, -R7 ; /* 0x000000ffff0c7224 */
/* 0x008fc800078e0a07 */
/*0100*/ IMAD R3, R12, R13, RZ ; /* 0x0000000d0c037224 */
/* 0x001fca00078e02ff */
/*0110*/ IMAD.HI.U32 R7, R7, R3, R6 ; /* 0x0000000307077227 */
/* 0x000fc800078e0006 */
/*0120*/ IMAD R0, R2, R5, RZ ; /* 0x0000000502007224 */
/* 0x004fca00078e02ff */
/*0130*/ IABS R2, R0 ; /* 0x0000000000027213 */
/* 0x000fe40000000000 */
/*0140*/ ISETP.GE.AND P2, PT, R0, RZ, PT ; /* 0x000000ff0000720c */
/* 0x000fc60003f46270 */
/*0150*/ IMAD.HI.U32 R7, R7, R2, RZ ; /* 0x0000000207077227 */
/* 0x000fc800078e00ff */
/*0160*/ IMAD.MOV R7, RZ, RZ, -R7 ; /* 0x000000ffff077224 */
/* 0x000fc800078e0a07 */
/*0170*/ IMAD R2, R13, R7, R2 ; /* 0x000000070d027224 */
/* 0x000fca00078e0202 */
/*0180*/ ISETP.GT.U32.AND P0, PT, R13, R2, PT ; /* 0x000000020d00720c */
/* 0x000fda0003f04070 */
/*0190*/ @!P0 IADD3 R2, R2, -R13, RZ ; /* 0x8000000d02028210 */
/* 0x000fe40007ffe0ff */
/*01a0*/ ISETP.NE.AND P0, PT, RZ, c[0x0][0x17c], PT ; /* 0x00005f00ff007a0c */
/* 0x000fe40003f05270 */
/*01b0*/ ISETP.GT.U32.AND P1, PT, R13, R2, PT ; /* 0x000000020d00720c */
/* 0x000fda0003f24070 */
/*01c0*/ @!P1 IMAD.IADD R2, R2, 0x1, -R13 ; /* 0x0000000102029824 */
/* 0x000fca00078e0a0d */
/*01d0*/ MOV R5, R2 ; /* 0x0000000200057202 */
/* 0x000fe20000000f00 */
/*01e0*/ IMAD R2, R9, c[0x0][0x0], R10 ; /* 0x0000000009027a24 */
/* 0x000fc800078e020a */
/*01f0*/ @!P2 IMAD.MOV R5, RZ, RZ, -R5 ; /* 0x000000ffff05a224 */
/* 0x000fe200078e0a05 */
/*0200*/ @!P0 LOP3.LUT R5, RZ, c[0x0][0x17c], RZ, 0x33, !PT ; /* 0x00005f00ff058a12 */
/* 0x000fe200078e33ff */
/*0210*/ IMAD.WIDE R2, R2, R11, c[0x0][0x170] ; /* 0x00005c0002027625 */
/* 0x000fca00078e020b */
/*0220*/ STG.E [R2.64], R5 ; /* 0x0000000502007986 */
/* 0x000fe2000c101904 */
/*0230*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0240*/ BRA 0x240; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0250*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0260*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0270*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0280*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0290*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z23multPolynomialsParallelPiS_S_ii
.globl _Z23multPolynomialsParallelPiS_S_ii
.p2align 8
.type _Z23multPolynomialsParallelPiS_S_ii,@function
_Z23multPolynomialsParallelPiS_S_ii:
s_clause 0x1
s_load_b128 s[4:7], s[0:1], 0x0
s_load_b64 s[8:9], s[0:1], 0x10
v_lshlrev_b32_e32 v1, 2, v0
s_mov_b32 s2, s15
s_ashr_i32 s3, s15, 31
s_waitcnt lgkmcnt(0)
global_load_b32 v1, v1, s[6:7]
s_clause 0x1
s_load_b32 s6, s[0:1], 0x2c
s_load_b32 s7, s[0:1], 0x1c
s_lshl_b64 s[0:1], s[2:3], 2
s_waitcnt lgkmcnt(0)
s_and_b32 s3, s6, 0xffff
s_add_u32 s0, s4, s0
s_addc_u32 s1, s5, s1
s_ashr_i32 s4, s7, 31
s_load_b32 s0, s[0:1], 0x0
s_add_i32 s7, s7, s4
s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
s_xor_b32 s4, s7, s4
v_cvt_f32_u32_e32 v2, s4
s_sub_i32 s1, 0, s4
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_1)
v_rcp_iflag_f32_e32 v2, v2
s_waitcnt_depctr 0xfff
v_mul_f32_e32 v2, 0x4f7ffffe, v2
v_cvt_u32_f32_e32 v2, v2
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mul_lo_u32 v3, s1, v2
v_mul_hi_u32 v3, v2, v3
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_1)
v_add_nc_u32_e32 v2, v2, v3
s_waitcnt vmcnt(0) lgkmcnt(0)
v_mul_lo_u32 v1, v1, s0
v_ashrrev_i32_e32 v4, 31, v1
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_add_nc_u32_e32 v1, v1, v4
v_xor_b32_e32 v1, v1, v4
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mul_hi_u32 v2, v1, v2
v_mul_lo_u32 v2, v2, s4
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_sub_nc_u32_e32 v1, v1, v2
v_subrev_nc_u32_e32 v2, s4, v1
v_cmp_le_u32_e32 vcc_lo, s4, v1
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2)
v_cndmask_b32_e32 v3, v1, v2, vcc_lo
v_mad_u64_u32 v[1:2], null, s2, s3, v[0:1]
v_subrev_nc_u32_e32 v0, s4, v3
v_cmp_le_u32_e32 vcc_lo, s4, v3
s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
v_ashrrev_i32_e32 v2, 31, v1
v_cndmask_b32_e32 v0, v3, v0, vcc_lo
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3)
v_xor_b32_e32 v3, v0, v4
v_lshlrev_b64 v[0:1], 2, v[1:2]
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_sub_nc_u32_e32 v2, v3, v4
v_add_co_u32 v0, vcc_lo, s8, v0
s_delay_alu instid0(VALU_DEP_3)
v_add_co_ci_u32_e32 v1, vcc_lo, s9, v1, vcc_lo
global_store_b32 v[0:1], v2, off
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z23multPolynomialsParallelPiS_S_ii
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 288
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 5
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z23multPolynomialsParallelPiS_S_ii, .Lfunc_end0-_Z23multPolynomialsParallelPiS_S_ii
.section .AMDGPU.csdata,"",@progbits
.text
.protected _Z19sumProductsParalleliiPiS_ii
.globl _Z19sumProductsParalleliiPiS_ii
.p2align 8
.type _Z19sumProductsParalleliiPiS_ii,@function
_Z19sumProductsParalleliiPiS_ii:
s_clause 0x2
s_load_b32 s2, s[0:1], 0x2c
s_load_b32 s3, s[0:1], 0x0
s_load_b32 s4, s[0:1], 0x18
s_mov_b32 s6, 0
s_waitcnt lgkmcnt(0)
s_and_b32 s5, s2, 0xffff
s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_2) | instid1(VALU_DEP_1)
v_mad_u64_u32 v[1:2], null, s15, s5, v[0:1]
s_cmp_gt_i32 s4, 0
s_cselect_b32 s2, -1, 0
v_cmp_gt_i32_e32 vcc_lo, s3, v1
s_and_b32 s2, vcc_lo, s2
s_delay_alu instid0(SALU_CYCLE_1)
s_and_saveexec_b32 s3, s2
s_cbranch_execz .LBB1_13
s_clause 0x2
s_load_b32 s7, s[0:1], 0x4
s_load_b32 s9, s[0:1], 0x1c
s_load_b128 s[0:3], s[0:1], 0x8
v_ashrrev_i32_e32 v2, 31, v1
v_mov_b32_e32 v4, 0
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_4) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[2:3], 2, v[1:2]
s_waitcnt lgkmcnt(0)
s_cmp_gt_i32 s7, 0
s_cselect_b32 s8, -1, 0
s_ashr_i32 s10, s9, 31
v_add_co_u32 v2, vcc_lo, s0, v2
s_add_i32 s9, s9, s10
v_add_co_ci_u32_e32 v3, vcc_lo, s1, v3, vcc_lo
s_xor_b32 s9, s9, s10
s_mov_b32 s0, 0
v_cvt_f32_u32_e32 v0, s9
s_sub_i32 s10, 0, s9
s_mov_b32 s1, 0
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_1)
v_rcp_iflag_f32_e32 v0, v0
s_waitcnt_depctr 0xfff
v_mul_f32_e32 v0, 0x4f7ffffe, v0
v_cvt_u32_f32_e32 v5, v0
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mul_lo_u32 v0, s10, v5
v_mul_hi_u32 v6, v5, v0
v_sub_nc_u32_e32 v0, 0, v1
s_delay_alu instid0(VALU_DEP_2)
v_add_nc_u32_e32 v5, v5, v6
s_branch .LBB1_3
.LBB1_2:
s_or_b32 exec_lo, exec_lo, s12
s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
s_and_b32 s10, exec_lo, s11
s_or_b32 s6, s10, s6
s_delay_alu instid0(SALU_CYCLE_1)
s_and_not1_b32 exec_lo, exec_lo, s6
s_cbranch_execz .LBB1_13
.LBB1_3:
s_and_not1_b32 vcc_lo, exec_lo, s8
s_mov_b32 s10, -1
s_cbranch_vccnz .LBB1_11
v_add_nc_u32_e32 v6, s1, v0
s_mov_b32 s11, 0
s_mov_b32 s12, 0
s_branch .LBB1_7
.LBB1_5:
s_or_b32 exec_lo, exec_lo, s16
s_add_i32 s12, s12, 1
s_delay_alu instid0(SALU_CYCLE_1)
s_cmp_eq_u32 s7, s12
s_cselect_b32 s16, -1, 0
s_and_not1_b32 s14, s14, exec_lo
s_and_b32 s16, s16, exec_lo
s_and_not1_b32 s13, s13, exec_lo
s_or_b32 s14, s14, s16
.LBB1_6:
s_or_b32 exec_lo, exec_lo, s15
s_xor_b32 s15, s13, -1
s_and_b32 s16, exec_lo, s14
s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_2) | instid1(SALU_CYCLE_1)
s_or_b32 s11, s16, s11
s_and_not1_b32 s10, s10, exec_lo
s_and_b32 s15, s15, exec_lo
s_or_b32 s10, s10, s15
s_and_not1_b32 exec_lo, exec_lo, s11
s_cbranch_execz .LBB1_10
.LBB1_7:
s_add_i32 s15, s1, s12
s_cmp_lg_u32 s12, 0
v_cmp_le_i32_e32 vcc_lo, s15, v1
s_cselect_b32 s15, -1, 0
s_or_b32 s13, s13, exec_lo
s_or_b32 s14, s14, exec_lo
s_or_b32 s16, s15, vcc_lo
s_delay_alu instid0(SALU_CYCLE_1)
s_and_saveexec_b32 s15, s16
s_cbranch_execz .LBB1_6
v_add_nc_u32_e32 v7, s12, v6
s_mov_b32 s16, exec_lo
s_delay_alu instid0(VALU_DEP_1)
v_cmpx_eq_u32_e32 0, v7
s_cbranch_execz .LBB1_5
s_add_i32 s18, s0, s12
s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
s_ashr_i32 s19, s18, 31
s_lshl_b64 s[18:19], s[18:19], 2
s_delay_alu instid0(SALU_CYCLE_1)
s_add_u32 s18, s2, s18
s_addc_u32 s19, s3, s19
global_load_b32 v7, v[2:3], off
global_load_b32 v8, v4, s[18:19]
s_waitcnt vmcnt(0)
v_add_nc_u32_e32 v7, v8, v7
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_ashrrev_i32_e32 v8, 31, v7
v_add_nc_u32_e32 v7, v7, v8
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_xor_b32_e32 v7, v7, v8
v_mul_hi_u32 v9, v7, v5
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mul_lo_u32 v9, v9, s9
v_sub_nc_u32_e32 v7, v7, v9
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
v_subrev_nc_u32_e32 v9, s9, v7
v_cmp_le_u32_e32 vcc_lo, s9, v7
v_cndmask_b32_e32 v7, v7, v9, vcc_lo
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
v_subrev_nc_u32_e32 v9, s9, v7
v_cmp_le_u32_e32 vcc_lo, s9, v7
v_cndmask_b32_e32 v7, v7, v9, vcc_lo
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_xor_b32_e32 v7, v7, v8
v_sub_nc_u32_e32 v7, v7, v8
global_store_b32 v[2:3], v7, off
s_branch .LBB1_5
.LBB1_10:
s_or_b32 exec_lo, exec_lo, s11
.LBB1_11:
s_mov_b32 s11, -1
s_and_saveexec_b32 s12, s10
s_cbranch_execz .LBB1_2
s_add_i32 s1, s1, 1
s_add_i32 s0, s0, s5
s_cmp_eq_u32 s1, s4
s_cselect_b32 s10, -1, 0
s_delay_alu instid0(SALU_CYCLE_1)
s_or_not1_b32 s11, s10, exec_lo
s_branch .LBB1_2
.LBB1_13:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z19sumProductsParalleliiPiS_ii
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 288
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 10
.amdhsa_next_free_sgpr 20
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end1:
.size _Z19sumProductsParalleliiPiS_ii, .Lfunc_end1-_Z19sumProductsParalleliiPiS_ii
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
- .offset: 24
.size: 4
.value_kind: by_value
- .offset: 28
.size: 4
.value_kind: by_value
- .offset: 32
.size: 4
.value_kind: hidden_block_count_x
- .offset: 36
.size: 4
.value_kind: hidden_block_count_y
- .offset: 40
.size: 4
.value_kind: hidden_block_count_z
- .offset: 44
.size: 2
.value_kind: hidden_group_size_x
- .offset: 46
.size: 2
.value_kind: hidden_group_size_y
- .offset: 48
.size: 2
.value_kind: hidden_group_size_z
- .offset: 50
.size: 2
.value_kind: hidden_remainder_x
- .offset: 52
.size: 2
.value_kind: hidden_remainder_y
- .offset: 54
.size: 2
.value_kind: hidden_remainder_z
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 88
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 96
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 288
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z23multPolynomialsParallelPiS_S_ii
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z23multPolynomialsParallelPiS_S_ii.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 5
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
- .args:
- .offset: 0
.size: 4
.value_kind: by_value
- .offset: 4
.size: 4
.value_kind: by_value
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
- .offset: 24
.size: 4
.value_kind: by_value
- .offset: 28
.size: 4
.value_kind: by_value
- .offset: 32
.size: 4
.value_kind: hidden_block_count_x
- .offset: 36
.size: 4
.value_kind: hidden_block_count_y
- .offset: 40
.size: 4
.value_kind: hidden_block_count_z
- .offset: 44
.size: 2
.value_kind: hidden_group_size_x
- .offset: 46
.size: 2
.value_kind: hidden_group_size_y
- .offset: 48
.size: 2
.value_kind: hidden_group_size_z
- .offset: 50
.size: 2
.value_kind: hidden_remainder_x
- .offset: 52
.size: 2
.value_kind: hidden_remainder_y
- .offset: 54
.size: 2
.value_kind: hidden_remainder_z
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 88
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 96
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 288
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z19sumProductsParalleliiPiS_ii
.private_segment_fixed_size: 0
.sgpr_count: 22
.sgpr_spill_count: 0
.symbol: _Z19sumProductsParalleliiPiS_ii.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 10
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_000e6cb3_00000000-6_q1_swhela2.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2063:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2063:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z14genPolynomialsPiS_i
.type _Z14genPolynomialsPiS_i, @function
_Z14genPolynomialsPiS_i:
.LFB2058:
.cfi_startproc
endbr64
testl %edx, %edx
jle .L10
pushq %r13
.cfi_def_cfa_offset 16
.cfi_offset 13, -16
pushq %r12
.cfi_def_cfa_offset 24
.cfi_offset 12, -24
pushq %rbp
.cfi_def_cfa_offset 32
.cfi_offset 6, -32
pushq %rbx
.cfi_def_cfa_offset 40
.cfi_offset 3, -40
subq $8, %rsp
.cfi_def_cfa_offset 48
movq %rsi, %rbx
movq %rdi, %rbp
movslq %edx, %rdx
leaq (%rsi,%rdx,4), %r13
movl $1, %r12d
.L7:
call rand@PLT
cltd
idivl modBy(%rip)
testl %edx, %edx
cmove %r12d, %edx
movl %edx, 0(%rbp)
call rand@PLT
cltd
idivl modBy(%rip)
testl %edx, %edx
cmove %r12d, %edx
movl %edx, (%rbx)
addq $4, %rbx
addq $4, %rbp
cmpq %r13, %rbx
jne .L7
addq $8, %rsp
.cfi_def_cfa_offset 40
popq %rbx
.cfi_def_cfa_offset 32
popq %rbp
.cfi_def_cfa_offset 24
popq %r12
.cfi_def_cfa_offset 16
popq %r13
.cfi_def_cfa_offset 8
ret
.L10:
.cfi_restore 3
.cfi_restore 6
.cfi_restore 12
.cfi_restore 13
ret
.cfi_endproc
.LFE2058:
.size _Z14genPolynomialsPiS_i, .-_Z14genPolynomialsPiS_i
.globl _Z21multPolynomialsSerialPiS_iS_i
.type _Z21multPolynomialsSerialPiS_iS_i, @function
_Z21multPolynomialsSerialPiS_iS_i:
.LFB2059:
.cfi_startproc
endbr64
movq %rsi, %r10
movl %edx, %r9d
movq %rcx, %r11
testl %r8d, %r8d
jle .L14
movq %rcx, %rax
movslq %r8d, %r8
leaq (%rcx,%r8,4), %rdx
.L15:
movl $0, (%rax)
addq $4, %rax
cmpq %rdx, %rax
jne .L15
.L14:
testl %r9d, %r9d
jle .L22
pushq %rbx
.cfi_def_cfa_offset 16
.cfi_offset 3, -16
movslq %r9d, %rax
leaq (%r10,%rax,4), %r8
movl $0, %ebx
.L17:
movq %r10, %rsi
movq %r11, %rcx
.L18:
movl (%rdi), %eax
imull (%rsi), %eax
addl (%rcx), %eax
cltd
idivl modBy(%rip)
movl %edx, (%rcx)
addq $4, %rcx
addq $4, %rsi
cmpq %r8, %rsi
jne .L18
addl $1, %ebx
addq $4, %r11
addq $4, %rdi
cmpl %ebx, %r9d
jne .L17
popq %rbx
.cfi_def_cfa_offset 8
ret
.L22:
.cfi_restore 3
ret
.cfi_endproc
.LFE2059:
.size _Z21multPolynomialsSerialPiS_iS_i, .-_Z21multPolynomialsSerialPiS_iS_i
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "CUDA error: %s: %s.\n"
.text
.globl _Z14checkCUDAErrorPKc
.type _Z14checkCUDAErrorPKc, @function
_Z14checkCUDAErrorPKc:
.LFB2060:
.cfi_startproc
endbr64
pushq %rbx
.cfi_def_cfa_offset 16
.cfi_offset 3, -16
movq %rdi, %rbx
call cudaGetLastError@PLT
testl %eax, %eax
jne .L28
popq %rbx
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L28:
.cfi_restore_state
movl %eax, %edi
call cudaGetErrorString@PLT
movq %rax, %r8
movq %rbx, %rcx
leaq .LC0(%rip), %rdx
movl $2, %esi
movq stderr(%rip), %rdi
movl $0, %eax
call __fprintf_chk@PLT
movl $1, %edi
call exit@PLT
.cfi_endproc
.LFE2060:
.size _Z14checkCUDAErrorPKc, .-_Z14checkCUDAErrorPKc
.globl _Z49__device_stub__Z23multPolynomialsParallelPiS_S_iiPiS_S_ii
.type _Z49__device_stub__Z23multPolynomialsParallelPiS_S_iiPiS_S_ii, @function
_Z49__device_stub__Z23multPolynomialsParallelPiS_S_iiPiS_S_ii:
.LFB2085:
.cfi_startproc
endbr64
subq $152, %rsp
.cfi_def_cfa_offset 160
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movq %rdx, 8(%rsp)
movl %ecx, 4(%rsp)
movl %r8d, (%rsp)
movq %fs:40, %rax
movq %rax, 136(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 8(%rsp), %rax
movq %rax, 112(%rsp)
leaq 4(%rsp), %rax
movq %rax, 120(%rsp)
movq %rsp, %rax
movq %rax, 128(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L33
.L29:
movq 136(%rsp), %rax
subq %fs:40, %rax
jne .L34
addq $152, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L33:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 168
pushq 40(%rsp)
.cfi_def_cfa_offset 176
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z23multPolynomialsParallelPiS_S_ii(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 160
jmp .L29
.L34:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2085:
.size _Z49__device_stub__Z23multPolynomialsParallelPiS_S_iiPiS_S_ii, .-_Z49__device_stub__Z23multPolynomialsParallelPiS_S_iiPiS_S_ii
.globl _Z23multPolynomialsParallelPiS_S_ii
.type _Z23multPolynomialsParallelPiS_S_ii, @function
_Z23multPolynomialsParallelPiS_S_ii:
.LFB2086:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z49__device_stub__Z23multPolynomialsParallelPiS_S_iiPiS_S_ii
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2086:
.size _Z23multPolynomialsParallelPiS_S_ii, .-_Z23multPolynomialsParallelPiS_S_ii
.globl _Z45__device_stub__Z19sumProductsParalleliiPiS_iiiiPiS_ii
.type _Z45__device_stub__Z19sumProductsParalleliiPiS_iiiiPiS_ii, @function
_Z45__device_stub__Z19sumProductsParalleliiPiS_iiiiPiS_ii:
.LFB2087:
.cfi_startproc
endbr64
subq $168, %rsp
.cfi_def_cfa_offset 176
movl %edi, 28(%rsp)
movl %esi, 24(%rsp)
movq %rdx, 16(%rsp)
movq %rcx, 8(%rsp)
movl %r8d, 4(%rsp)
movl %r9d, (%rsp)
movq %fs:40, %rax
movq %rax, 152(%rsp)
xorl %eax, %eax
leaq 28(%rsp), %rax
movq %rax, 96(%rsp)
leaq 24(%rsp), %rax
movq %rax, 104(%rsp)
leaq 16(%rsp), %rax
movq %rax, 112(%rsp)
leaq 8(%rsp), %rax
movq %rax, 120(%rsp)
leaq 4(%rsp), %rax
movq %rax, 128(%rsp)
movq %rsp, %rax
movq %rax, 136(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L41
.L37:
movq 152(%rsp), %rax
subq %fs:40, %rax
jne .L42
addq $168, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L41:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 184
pushq 40(%rsp)
.cfi_def_cfa_offset 192
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z19sumProductsParalleliiPiS_ii(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 176
jmp .L37
.L42:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2087:
.size _Z45__device_stub__Z19sumProductsParalleliiPiS_iiiiPiS_ii, .-_Z45__device_stub__Z19sumProductsParalleliiPiS_iiiiPiS_ii
.globl _Z19sumProductsParalleliiPiS_ii
.type _Z19sumProductsParalleliiPiS_ii, @function
_Z19sumProductsParalleliiPiS_ii:
.LFB2088:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z45__device_stub__Z19sumProductsParalleliiPiS_iiiiPiS_ii
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2088:
.size _Z19sumProductsParalleliiPiS_ii, .-_Z19sumProductsParalleliiPiS_ii
.section .rodata.str1.8,"aMS",@progbits,1
.align 8
.LC1:
.string "Specify the number of terms in the polynomial by specifying the exponent on base 2, UP TO 10, e.g. enter '3' if you want 2^3 terms (AKA 8 terms) per polynomial: "
.section .rodata.str1.1
.LC2:
.string "%d"
.LC3:
.string "\nYou entered '%d'.\n"
.section .rodata.str1.8
.align 8
.LC4:
.string "Invalid entry. The maximum number of terms is 2^10. Please enter a term less than or equal to 10 next time."
.align 8
.LC5:
.string "Number of terms per polynomial = %d, hence each polynomial will have degree = %d.\n\n"
.section .rodata.str1.1
.LC6:
.string "Generating polynomials...\n\n"
.LC7:
.string "polyA:\n"
.LC8:
.string "%dx^%d "
.LC9:
.string "+ "
.LC10:
.string "\n\npolyB:\n"
.LC11:
.string "\n\n"
.LC12:
.string "kernel invocation"
.LC13:
.string "Serial result:\n"
.LC14:
.string "\n\nParallel result:\n"
.section .rodata.str1.8
.align 8
.LC15:
.string "Coefficients at degree %d are not equivalent: serial!=parallel (%d!=%d)\n"
.align 8
.LC16:
.string "Verification successful. The serial and parallel polynomial multiplications produced the same result!\n\n"
.align 8
.LC17:
.string "Looks like there were some discrepancies. Verification failed.\n\n"
.text
.globl main
.type main, @function
main:
.LFB2057:
.cfi_startproc
endbr64
pushq %r15
.cfi_def_cfa_offset 16
.cfi_offset 15, -16
pushq %r14
.cfi_def_cfa_offset 24
.cfi_offset 14, -24
pushq %r13
.cfi_def_cfa_offset 32
.cfi_offset 13, -32
pushq %r12
.cfi_def_cfa_offset 40
.cfi_offset 12, -40
pushq %rbp
.cfi_def_cfa_offset 48
.cfi_offset 6, -48
pushq %rbx
.cfi_def_cfa_offset 56
.cfi_offset 3, -56
subq $136, %rsp
.cfi_def_cfa_offset 192
movq %fs:40, %rax
movq %rax, 120(%rsp)
xorl %eax, %eax
movl $0, %edi
call time@PLT
movl %eax, %edi
call srand@PLT
leaq .LC1(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
leaq 60(%rsp), %rsi
leaq .LC2(%rip), %rdi
movl $0, %eax
call __isoc23_scanf@PLT
movl 60(%rsp), %edx
leaq .LC3(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl 60(%rsp), %ecx
cmpl $10, %ecx
jg .L79
movl $1, %edx
sall %cl, %edx
movl %edx, 60(%rsp)
leal -1(%rdx), %ecx
leaq .LC5(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl 60(%rsp), %r15d
movslq %r15d, %rbx
salq $2, %rbx
movq %rbx, %rdi
call malloc@PLT
movq %rax, %r13
movq %rbx, %rdi
call malloc@PLT
movq %rax, %r14
leaq .LC6(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl 60(%rsp), %edx
movq %r14, %rsi
movq %r13, %rdi
call _Z14genPolynomialsPiS_i
leaq .LC7(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
cmpl $0, 60(%rsp)
jle .L48
movl $0, %ebx
leaq .LC8(%rip), %rbp
leaq .LC9(%rip), %r12
jmp .L50
.L79:
leaq .LC4(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $1, %eax
jmp .L45
.L49:
addq $1, %rbx
cmpl %ebx, 60(%rsp)
jle .L48
.L50:
movl 0(%r13,%rbx,4), %edx
movl %ebx, %ecx
movq %rbp, %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl 60(%rsp), %eax
subl $1, %eax
cmpl %ebx, %eax
je .L49
movq %r12, %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
jmp .L49
.L48:
leaq .LC10(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
cmpl $0, 60(%rsp)
jle .L51
movl $0, %ebx
leaq .LC8(%rip), %rbp
leaq .LC9(%rip), %r12
jmp .L53
.L52:
addq $1, %rbx
cmpl %ebx, 60(%rsp)
jle .L51
.L53:
movl (%r14,%rbx,4), %edx
movl %ebx, %ecx
movq %rbp, %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl 60(%rsp), %eax
subl $1, %eax
cmpl %ebx, %eax
je .L52
movq %r12, %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
jmp .L52
.L51:
leaq .LC11(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl 60(%rsp), %r12d
leal -2(%r12,%r12), %eax
movl %eax, 40(%rsp)
addl $1, %eax
movl %eax, 32(%rsp)
cltq
leaq 0(,%rax,4), %rbp
movq %rbp, %rdi
call malloc@PLT
movq %rax, 24(%rsp)
movl %r12d, %eax
imull %r12d, %eax
movl %eax, 44(%rsp)
movslq %eax, %rbx
salq $2, %rbx
movq %rbx, %rdi
call malloc@PLT
movq %rax, 16(%rsp)
movq %rbp, %rdi
call malloc@PLT
movq %rax, 8(%rsp)
cmpl $0, 44(%rsp)
jle .L54
movq 16(%rsp), %rcx
movq %rcx, %rax
addq %rcx, %rbx
.L55:
movl $0, (%rax)
addq $4, %rax
cmpq %rbx, %rax
jne .L55
.L54:
cmpl $0, 40(%rsp)
js .L56
movq 8(%rsp), %rcx
movq %rcx, %rax
leal -1(%r12,%r12), %edx
leaq (%rcx,%rdx,4), %rdx
.L57:
movl $0, (%rax)
addq $4, %rax
cmpq %rdx, %rax
jne .L57
.L56:
movslq %r12d, %rsi
salq $2, %rsi
leaq 64(%rsp), %rdi
call cudaMalloc@PLT
movslq 60(%rsp), %rsi
salq $2, %rsi
leaq 72(%rsp), %rdi
call cudaMalloc@PLT
movl 60(%rsp), %esi
imull %esi, %esi
movslq %esi, %rsi
salq $2, %rsi
leaq 80(%rsp), %rdi
call cudaMalloc@PLT
movslq 60(%rsp), %rdx
salq $2, %rdx
movl $1, %ecx
movq %r13, %rsi
movq 64(%rsp), %rdi
call cudaMemcpy@PLT
movslq 60(%rsp), %rdx
salq $2, %rdx
movl $1, %ecx
movq %r14, %rsi
movq 72(%rsp), %rdi
call cudaMemcpy@PLT
movl 60(%rsp), %edx
imull %edx, %edx
movslq %edx, %rdx
salq $2, %rdx
movl $1, %ecx
movq 16(%rsp), %rsi
movq 80(%rsp), %rdi
call cudaMemcpy@PLT
movl %r15d, 96(%rsp)
movl $1, 100(%rsp)
movl $1, 104(%rsp)
movl %r15d, 108(%rsp)
movl $1, 112(%rsp)
movl $1, 116(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 108(%rsp), %rdx
movl $1, %ecx
movq 96(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L80
.L58:
call cudaThreadSynchronize@PLT
leaq .LC12(%rip), %rdi
call _Z14checkCUDAErrorPKc
movl 60(%rsp), %edx
imull %edx, %edx
movslq %edx, %rdx
salq $2, %rdx
movl $2, %ecx
movq 80(%rsp), %rsi
movq 16(%rsp), %rbx
movq %rbx, %rdi
call cudaMemcpy@PLT
leaq 88(%rsp), %rdi
movq %rbp, %rsi
call cudaMalloc@PLT
movl $1, %ecx
movq %rbp, %rdx
movq 8(%rsp), %rsi
movq 88(%rsp), %rdi
call cudaMemcpy@PLT
movl 60(%rsp), %edx
imull %edx, %edx
movslq %edx, %rdx
salq $2, %rdx
movl $1, %ecx
movq %rbx, %rsi
movq 80(%rsp), %rdi
call cudaMemcpy@PLT
movl 116(%rsp), %ecx
movl $0, %r9d
movl $0, %r8d
movq 108(%rsp), %rdx
movq 96(%rsp), %rdi
movl 104(%rsp), %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L81
.L59:
call cudaThreadSynchronize@PLT
leaq .LC12(%rip), %rdi
call _Z14checkCUDAErrorPKc
movl $2, %ecx
movq %rbp, %rdx
movq 88(%rsp), %rsi
movq 8(%rsp), %rdi
call cudaMemcpy@PLT
movl 32(%rsp), %r8d
movq 24(%rsp), %rbp
movq %rbp, %rcx
movl 60(%rsp), %edx
movq %r14, %rsi
movq %r13, %rdi
call _Z21multPolynomialsSerialPiS_iS_i
leaq .LC13(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
cmpl $0, 40(%rsp)
js .L60
leal -2(%r12,%r12), %r12d
leal 1(%r12), %r15d
movl $0, %ebx
jmp .L62
.L80:
movl modBy(%rip), %r8d
movl 60(%rsp), %ecx
movq 80(%rsp), %rdx
movq 72(%rsp), %rsi
movq 64(%rsp), %rdi
call _Z49__device_stub__Z23multPolynomialsParallelPiS_S_iiPiS_S_ii
jmp .L58
.L81:
movl modBy(%rip), %r9d
movl %r15d, %r8d
movq 80(%rsp), %rcx
movq 88(%rsp), %rdx
movl %r15d, %esi
movl 32(%rsp), %edi
call _Z45__device_stub__Z19sumProductsParalleliiPiS_iiiiPiS_ii
jmp .L59
.L82:
leaq .LC9(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
addl $1, %ebx
addq $4, %rbp
cmpl %r15d, %ebx
je .L61
.L62:
movl 0(%rbp), %edx
movl %ebx, %ecx
leaq .LC8(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
cmpl %r12d, %ebx
jne .L82
.L61:
leaq .LC14(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movq 8(%rsp), %rbp
leal 1(%r12), %r15d
movl $0, %ebx
jmp .L64
.L83:
leaq .LC9(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
addl $1, %ebx
addq $4, %rbp
cmpl %r15d, %ebx
je .L63
.L64:
movl 0(%rbp), %edx
movl %ebx, %ecx
leaq .LC8(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
cmpl %ebx, %r12d
jne .L83
.L63:
leaq .LC11(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $0, %ebx
movl $1, %eax
leaq .LC15(%rip), %rbp
movq %r13, 32(%rsp)
movl 40(%rsp), %r12d
movq 24(%rsp), %r13
movq 8(%rsp), %r15
jmp .L66
.L85:
movl %ebx, %edx
movq %rbp, %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $0, %eax
.L65:
addq $1, %rbx
cmpl %ebx, %r12d
jl .L84
.L66:
movl 0(%r13,%rbx,4), %ecx
movl (%r15,%rbx,4), %r8d
cmpl %r8d, %ecx
jne .L85
jmp .L65
.L84:
movq 32(%rsp), %r13
testb %al, %al
jne .L69
leaq .LC17(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
jmp .L68
.L60:
leaq .LC14(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
leaq .LC11(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
.L69:
leaq .LC16(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
.L68:
movq %r13, %rdi
call free@PLT
movq %r14, %rdi
call free@PLT
movq 24(%rsp), %rdi
call free@PLT
movq 16(%rsp), %rdi
call free@PLT
movq 8(%rsp), %rdi
call free@PLT
movq 64(%rsp), %rdi
call cudaFree@PLT
movq 72(%rsp), %rdi
call cudaFree@PLT
movq 80(%rsp), %rdi
call cudaFree@PLT
movq 88(%rsp), %rdi
call cudaFree@PLT
movl $0, %eax
.L45:
movq 120(%rsp), %rdx
subq %fs:40, %rdx
jne .L86
addq $136, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %rbp
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r13
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
ret
.L86:
.cfi_restore_state
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2057:
.size main, .-main
.section .rodata.str1.8
.align 8
.LC18:
.string "_Z19sumProductsParalleliiPiS_ii"
.align 8
.LC19:
.string "_Z23multPolynomialsParallelPiS_S_ii"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2090:
.cfi_startproc
endbr64
pushq %rbx
.cfi_def_cfa_offset 16
.cfi_offset 3, -16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rbx
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC18(%rip), %rdx
movq %rdx, %rcx
leaq _Z19sumProductsParalleliiPiS_ii(%rip), %rsi
movq %rax, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC19(%rip), %rdx
movq %rdx, %rcx
leaq _Z23multPolynomialsParallelPiS_S_ii(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
popq %rbx
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2090:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.globl modBy
.data
.align 4
.type modBy, @object
.size modBy, 4
modBy:
.long 103
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "q1_swhela2.hip"
.globl main # -- Begin function main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
pushq %r15
.cfi_def_cfa_offset 24
pushq %r14
.cfi_def_cfa_offset 32
pushq %r13
.cfi_def_cfa_offset 40
pushq %r12
.cfi_def_cfa_offset 48
pushq %rbx
.cfi_def_cfa_offset 56
subq $216, %rsp
.cfi_def_cfa_offset 272
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
.cfi_offset %rbp, -16
xorl %edi, %edi
callq time
movl %eax, %edi
callq srand
movl $.L.str, %edi
xorl %eax, %eax
callq printf
movq %rsp, %rsi
movl $.L.str.1, %edi
xorl %eax, %eax
callq __isoc23_scanf
movl (%rsp), %esi
movl $.L.str.2, %edi
xorl %eax, %eax
callq printf
movl (%rsp), %ecx
cmpl $11, %ecx
jl .LBB0_2
# %bb.1:
movl $.L.str.3, %edi
xorl %eax, %eax
callq printf
movl $1, %eax
jmp .LBB0_50
.LBB0_2:
movl $1, %esi
# kill: def $cl killed $cl killed $ecx
shll %cl, %esi
movl %esi, (%rsp)
leal -1(%rsi), %edx
movl $.L.str.4, %edi
# kill: def $esi killed $esi killed $rsi
xorl %eax, %eax
callq printf
movslq (%rsp), %r13
leaq (,%r13,4), %r14
movq %r14, %rdi
callq malloc
movq %rax, %rbx
movq %r14, %rdi
callq malloc
movq %rax, %r14
movl $.Lstr, %edi
callq puts@PLT
movl (%rsp), %r15d
testl %r15d, %r15d
jle .LBB0_5
# %bb.3: # %.lr.ph.preheader.i
xorl %r12d, %r12d
.p2align 4, 0x90
.LBB0_4: # %.lr.ph.i
# =>This Inner Loop Header: Depth=1
callq rand
cltd
idivl modBy(%rip)
cmpl $1, %edx
adcl $0, %edx
movl %edx, (%rbx,%r12,4)
callq rand
cltd
idivl modBy(%rip)
cmpl $1, %edx
adcl $0, %edx
movl %edx, (%r14,%r12,4)
incq %r12
cmpq %r12, %r15
jne .LBB0_4
.LBB0_5: # %_Z14genPolynomialsPiS_i.exit
movl $.Lstr.1, %edi
callq puts@PLT
cmpl $0, (%rsp)
jle .LBB0_10
# %bb.6: # %.lr.ph.preheader
xorl %r15d, %r15d
jmp .LBB0_7
.p2align 4, 0x90
.LBB0_9: # in Loop: Header=BB0_7 Depth=1
incq %r15
movslq (%rsp), %rax
cmpq %rax, %r15
jge .LBB0_10
.LBB0_7: # %.lr.ph
# =>This Inner Loop Header: Depth=1
movl (%rbx,%r15,4), %esi
movl $.L.str.7, %edi
movl %r15d, %edx
xorl %eax, %eax
callq printf
movl (%rsp), %eax
decl %eax
cmpq %rax, %r15
je .LBB0_9
# %bb.8: # in Loop: Header=BB0_7 Depth=1
movl $.L.str.8, %edi
xorl %eax, %eax
callq printf
jmp .LBB0_9
.LBB0_10: # %._crit_edge
movl $.Lstr.2, %edi
callq puts@PLT
cmpl $0, (%rsp)
jle .LBB0_15
# %bb.11: # %.lr.ph140.preheader
xorl %r15d, %r15d
jmp .LBB0_12
.p2align 4, 0x90
.LBB0_14: # in Loop: Header=BB0_12 Depth=1
incq %r15
movslq (%rsp), %rax
cmpq %rax, %r15
jge .LBB0_15
.LBB0_12: # %.lr.ph140
# =>This Inner Loop Header: Depth=1
movl (%r14,%r15,4), %esi
movl $.L.str.7, %edi
movl %r15d, %edx
xorl %eax, %eax
callq printf
movl (%rsp), %eax
decl %eax
cmpq %rax, %r15
je .LBB0_14
# %bb.13: # in Loop: Header=BB0_12 Depth=1
movl $.L.str.8, %edi
xorl %eax, %eax
callq printf
jmp .LBB0_14
.LBB0_15: # %._crit_edge141
movq %r13, 48(%rsp) # 8-byte Spill
movl %r13d, %r12d
movl $.Lstr.6, %edi
callq puts@PLT
movslq (%rsp), %r13
leal -1(,%r13,2), %eax
movl %eax, 4(%rsp) # 4-byte Spill
movslq %eax, %r15
shlq $2, %r15
movq %r15, %rdi
callq malloc
movq %rax, 208(%rsp) # 8-byte Spill
movl %r13d, %ebp
imull %ebp, %ebp
leaq (,%rbp,4), %rdi
callq malloc
movq %r15, 200(%rsp) # 8-byte Spill
movq %r15, %rdi
movq %rax, %r15
callq malloc
movq %rax, 16(%rsp) # 8-byte Spill
movq %r13, 40(%rsp) # 8-byte Spill
testq %r13, %r13
je .LBB0_17
# %bb.16: # %.lr.ph144.preheader
cmpl $1, %ebp
adcl $0, %ebp
shlq $2, %rbp
movq %r15, %rdi
xorl %esi, %esi
movq %rbp, %rdx
callq memset@PLT
.LBB0_17: # %.preheader
cmpl $0, 4(%rsp) # 4-byte Folded Reload
movq 40(%rsp), %rbp # 8-byte Reload
jle .LBB0_19
# %bb.18: # %.lr.ph146.preheader
leal (,%rbp,2), %eax
addl $-2, %eax
leaq 4(,%rax,4), %rdx
movq 16(%rsp), %rdi # 8-byte Reload
xorl %esi, %esi
callq memset@PLT
.LBB0_19: # %._crit_edge147
movq %rbp, %rsi
shlq $2, %rsi
leaq 64(%rsp), %rdi
callq hipMalloc
movslq (%rsp), %rsi
shlq $2, %rsi
leaq 56(%rsp), %rdi
callq hipMalloc
movl (%rsp), %esi
imull %esi, %esi
shlq $2, %rsi
leaq 24(%rsp), %rdi
callq hipMalloc
movq 64(%rsp), %rdi
movslq (%rsp), %rdx
shlq $2, %rdx
movq %rbx, %rsi
movl $1, %ecx
callq hipMemcpy
movq 56(%rsp), %rdi
movslq (%rsp), %rdx
shlq $2, %rdx
movq %r14, %rsi
movl $1, %ecx
callq hipMemcpy
movq 24(%rsp), %rdi
movl (%rsp), %edx
imull %edx, %edx
shlq $2, %rdx
movq %r15, %rsi
movl $1, %ecx
callq hipMemcpy
movabsq $4294967296, %rax # imm = 0x100000000
orq %rax, %r12
movq %r12, %rdi
movl $1, %esi
movq %r12, %rdx
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
movq 208(%rsp), %r13 # 8-byte Reload
jne .LBB0_21
# %bb.20:
movq 64(%rsp), %rax
movq 56(%rsp), %rcx
movq 24(%rsp), %rdx
movl (%rsp), %esi
movl modBy(%rip), %edi
movq %rax, 128(%rsp)
movq %rcx, 120(%rsp)
movq %rdx, 80(%rsp)
movl %esi, 12(%rsp)
movl %edi, 8(%rsp)
leaq 128(%rsp), %rax
movq %rax, 144(%rsp)
leaq 120(%rsp), %rax
movq %rax, 152(%rsp)
leaq 80(%rsp), %rax
movq %rax, 160(%rsp)
leaq 12(%rsp), %rax
movq %rax, 168(%rsp)
leaq 8(%rsp), %rax
movq %rax, 176(%rsp)
leaq 104(%rsp), %rdi
leaq 88(%rsp), %rsi
leaq 72(%rsp), %rdx
leaq 32(%rsp), %rcx
callq __hipPopCallConfiguration
movq 104(%rsp), %rsi
movl 112(%rsp), %edx
movq 88(%rsp), %rcx
movl 96(%rsp), %r8d
leaq 144(%rsp), %r9
movl $_Z23multPolynomialsParallelPiS_S_ii, %edi
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
pushq 80(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB0_21:
callq hipDeviceSynchronize
callq hipGetLastError
testl %eax, %eax
movq 200(%rsp), %rbp # 8-byte Reload
jne .LBB0_51
# %bb.22: # %_Z14checkCUDAErrorPKc.exit
movq 24(%rsp), %rsi
movl (%rsp), %edx
imull %edx, %edx
shlq $2, %rdx
movq %r15, %rdi
movl $2, %ecx
callq hipMemcpy
leaq 32(%rsp), %rdi
movq %rbp, %rsi
callq hipMalloc
movq 32(%rsp), %rdi
movq 16(%rsp), %rsi # 8-byte Reload
movq %rbp, %rdx
movl $1, %ecx
callq hipMemcpy
movq 24(%rsp), %rdi
movl (%rsp), %edx
imull %edx, %edx
shlq $2, %rdx
movq %r15, %rsi
movl $1, %ecx
callq hipMemcpy
movq %r12, %rdi
movl $1, %esi
movq %r12, %rdx
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB0_24
# %bb.23:
movq 32(%rsp), %rax
movq 24(%rsp), %rcx
movl modBy(%rip), %edx
movl 4(%rsp), %esi # 4-byte Reload
movl %esi, 12(%rsp)
movq 48(%rsp), %rsi # 8-byte Reload
movl %esi, 8(%rsp)
movq %rax, 128(%rsp)
movq %rcx, 120(%rsp)
movl %esi, 140(%rsp)
movl %edx, 136(%rsp)
leaq 12(%rsp), %rax
movq %rax, 144(%rsp)
leaq 8(%rsp), %rax
movq %rax, 152(%rsp)
leaq 128(%rsp), %rax
movq %rax, 160(%rsp)
leaq 120(%rsp), %rax
movq %rax, 168(%rsp)
leaq 140(%rsp), %rax
movq %rax, 176(%rsp)
leaq 136(%rsp), %rax
movq %rax, 184(%rsp)
leaq 104(%rsp), %rdi
leaq 88(%rsp), %rsi
leaq 80(%rsp), %rdx
leaq 72(%rsp), %rcx
callq __hipPopCallConfiguration
movq 104(%rsp), %rsi
movl 112(%rsp), %edx
movq 88(%rsp), %rcx
movl 96(%rsp), %r8d
leaq 144(%rsp), %r9
movl $_Z19sumProductsParalleliiPiS_ii, %edi
pushq 72(%rsp)
.cfi_adjust_cfa_offset 8
pushq 88(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB0_24:
movq %r15, 48(%rsp) # 8-byte Spill
callq hipDeviceSynchronize
callq hipGetLastError
testl %eax, %eax
jne .LBB0_51
# %bb.25: # %_Z14checkCUDAErrorPKc.exit125
movq 32(%rsp), %rsi
movq 16(%rsp), %rdi # 8-byte Reload
movq %rbp, %rdx
movl $2, %ecx
callq hipMemcpy
movl (%rsp), %r15d
movl 4(%rsp), %eax # 4-byte Reload
movl %eax, %ebp
testl %eax, %eax
jle .LBB0_27
# %bb.26: # %.lr.ph.preheader.i130
leaq (,%rbp,4), %rdx
movq %r13, %rdi
xorl %esi, %esi
callq memset@PLT
.LBB0_27: # %.preheader23.i
movq 40(%rsp), %rax # 8-byte Reload
leal -2(,%rax,2), %r12d
testl %r15d, %r15d
jle .LBB0_32
# %bb.28: # %.preheader.lr.ph.i
movl modBy(%rip), %ecx
xorl %esi, %esi
movq %r13, %rdi
.p2align 4, 0x90
.LBB0_29: # %.preheader.i
# =>This Loop Header: Depth=1
# Child Loop BB0_30 Depth 2
movl (%rbx,%rsi,4), %r8d
xorl %r9d, %r9d
.p2align 4, 0x90
.LBB0_30: # Parent Loop BB0_29 Depth=1
# => This Inner Loop Header: Depth=2
movl (%r14,%r9,4), %eax
imull %r8d, %eax
addl (%rdi,%r9,4), %eax
cltd
idivl %ecx
movl %edx, (%rdi,%r9,4)
incq %r9
cmpq %r9, %r15
jne .LBB0_30
# %bb.31: # %._crit_edge.i
# in Loop: Header=BB0_29 Depth=1
incq %rsi
addq $4, %rdi
cmpq %r15, %rsi
jne .LBB0_29
.LBB0_32: # %_Z21multPolynomialsSerialPiS_iS_i.exit
movq %rbx, 40(%rsp) # 8-byte Spill
movq %r13, %rbx
movl $.Lstr.4, %edi
callq puts@PLT
movl %r12d, %r15d
cmpl $0, 4(%rsp) # 4-byte Folded Reload
jle .LBB0_37
# %bb.33: # %.lr.ph149.preheader
leaq 1(%r15), %r13
xorl %r12d, %r12d
jmp .LBB0_34
.p2align 4, 0x90
.LBB0_36: # in Loop: Header=BB0_34 Depth=1
incq %r12
cmpq %r12, %r13
je .LBB0_37
.LBB0_34: # %.lr.ph149
# =>This Inner Loop Header: Depth=1
movl (%rbx,%r12,4), %esi
movl $.L.str.7, %edi
movl %r12d, %edx
xorl %eax, %eax
callq printf
cmpq %r12, %r15
je .LBB0_36
# %bb.35: # in Loop: Header=BB0_34 Depth=1
movl $.L.str.8, %edi
xorl %eax, %eax
callq printf
jmp .LBB0_36
.LBB0_37: # %._crit_edge150
movl $.Lstr.5, %edi
callq puts@PLT
cmpl $0, 4(%rsp) # 4-byte Folded Reload
jle .LBB0_42
# %bb.38: # %.lr.ph153.preheader
leaq 1(%r15), %r13
xorl %r12d, %r12d
jmp .LBB0_39
.p2align 4, 0x90
.LBB0_41: # in Loop: Header=BB0_39 Depth=1
incq %r12
cmpq %r12, %r13
je .LBB0_42
.LBB0_39: # %.lr.ph153
# =>This Inner Loop Header: Depth=1
movq 16(%rsp), %rax # 8-byte Reload
movl (%rax,%r12,4), %esi
movl $.L.str.7, %edi
movl %r12d, %edx
xorl %eax, %eax
callq printf
cmpq %r12, %r15
je .LBB0_41
# %bb.40: # in Loop: Header=BB0_39 Depth=1
movl $.L.str.8, %edi
xorl %eax, %eax
callq printf
jmp .LBB0_41
.LBB0_42: # %._crit_edge154
movl $.Lstr.6, %edi
callq puts@PLT
cmpl $0, 4(%rsp) # 4-byte Folded Reload
jle .LBB0_43
# %bb.44: # %.lr.ph158.preheader
movb $1, %r15b
xorl %r12d, %r12d
movq %rbx, %r13
movq 40(%rsp), %rbx # 8-byte Reload
jmp .LBB0_45
.p2align 4, 0x90
.LBB0_47: # in Loop: Header=BB0_45 Depth=1
incq %r12
cmpq %r12, %rbp
je .LBB0_48
.LBB0_45: # %.lr.ph158
# =>This Inner Loop Header: Depth=1
movl (%r13,%r12,4), %edx
movq 16(%rsp), %rax # 8-byte Reload
movl (%rax,%r12,4), %ecx
cmpl %ecx, %edx
je .LBB0_47
# %bb.46: # in Loop: Header=BB0_45 Depth=1
xorl %r15d, %r15d
movl $.L.str.14, %edi
movl %r12d, %esi
xorl %eax, %eax
callq printf
jmp .LBB0_47
.LBB0_48: # %._crit_edge159.loopexit
testb $1, %r15b
movl $.Lstr.7, %eax
movl $.Lstr.8, %edi
cmoveq %rax, %rdi
jmp .LBB0_49
.LBB0_43:
movl $.Lstr.8, %edi
movq %rbx, %r13
movq 40(%rsp), %rbx # 8-byte Reload
.LBB0_49: # %.critedge
callq puts@PLT
movq %rbx, %rdi
callq free
movq %r14, %rdi
callq free
movq %r13, %rdi
callq free
movq 48(%rsp), %rdi # 8-byte Reload
callq free
movq 16(%rsp), %rdi # 8-byte Reload
callq free
movq 64(%rsp), %rdi
callq hipFree
movq 56(%rsp), %rdi
callq hipFree
movq 24(%rsp), %rdi
callq hipFree
movq 32(%rsp), %rdi
callq hipFree
xorl %eax, %eax
.LBB0_50:
addq $216, %rsp
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %r12
.cfi_def_cfa_offset 40
popq %r13
.cfi_def_cfa_offset 32
popq %r14
.cfi_def_cfa_offset 24
popq %r15
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
retq
.LBB0_51:
.cfi_def_cfa_offset 272
movq stderr(%rip), %rbx
movl %eax, %edi
callq hipGetErrorString
movl $.L.str.17, %esi
movl $.L.str.11, %edx
movq %rbx, %rdi
movq %rax, %rcx
xorl %eax, %eax
callq fprintf
movl $1, %edi
callq exit
.Lfunc_end0:
.size main, .Lfunc_end0-main
.cfi_endproc
# -- End function
.globl _Z14genPolynomialsPiS_i # -- Begin function _Z14genPolynomialsPiS_i
.p2align 4, 0x90
.type _Z14genPolynomialsPiS_i,@function
_Z14genPolynomialsPiS_i: # @_Z14genPolynomialsPiS_i
.cfi_startproc
# %bb.0:
testl %edx, %edx
jle .LBB1_4
# %bb.1: # %.lr.ph.preheader
pushq %r15
.cfi_def_cfa_offset 16
pushq %r14
.cfi_def_cfa_offset 24
pushq %r12
.cfi_def_cfa_offset 32
pushq %rbx
.cfi_def_cfa_offset 40
pushq %rax
.cfi_def_cfa_offset 48
.cfi_offset %rbx, -40
.cfi_offset %r12, -32
.cfi_offset %r14, -24
.cfi_offset %r15, -16
movq %rsi, %rbx
movq %rdi, %r14
movl %edx, %r15d
xorl %r12d, %r12d
.p2align 4, 0x90
.LBB1_2: # %.lr.ph
# =>This Inner Loop Header: Depth=1
callq rand
cltd
idivl modBy(%rip)
cmpl $1, %edx
adcl $0, %edx
movl %edx, (%r14,%r12,4)
callq rand
cltd
idivl modBy(%rip)
cmpl $1, %edx
adcl $0, %edx
movl %edx, (%rbx,%r12,4)
incq %r12
cmpq %r12, %r15
jne .LBB1_2
# %bb.3:
addq $8, %rsp
.cfi_def_cfa_offset 40
popq %rbx
.cfi_def_cfa_offset 32
popq %r12
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
.cfi_restore %rbx
.cfi_restore %r12
.cfi_restore %r14
.cfi_restore %r15
.LBB1_4: # %._crit_edge
retq
.Lfunc_end1:
.size _Z14genPolynomialsPiS_i, .Lfunc_end1-_Z14genPolynomialsPiS_i
.cfi_endproc
# -- End function
.globl _Z38__device_stub__multPolynomialsParallelPiS_S_ii # -- Begin function _Z38__device_stub__multPolynomialsParallelPiS_S_ii
.p2align 4, 0x90
.type _Z38__device_stub__multPolynomialsParallelPiS_S_ii,@function
_Z38__device_stub__multPolynomialsParallelPiS_S_ii: # @_Z38__device_stub__multPolynomialsParallelPiS_S_ii
.cfi_startproc
# %bb.0:
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movq %rdx, 56(%rsp)
movl %ecx, 4(%rsp)
movl %r8d, (%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 56(%rsp), %rax
movq %rax, 96(%rsp)
leaq 4(%rsp), %rax
movq %rax, 104(%rsp)
movq %rsp, %rax
movq %rax, 112(%rsp)
leaq 40(%rsp), %rdi
leaq 24(%rsp), %rsi
leaq 16(%rsp), %rdx
leaq 8(%rsp), %rcx
callq __hipPopCallConfiguration
movq 40(%rsp), %rsi
movl 48(%rsp), %edx
movq 24(%rsp), %rcx
movl 32(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z23multPolynomialsParallelPiS_S_ii, %edi
pushq 8(%rsp)
.cfi_adjust_cfa_offset 8
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $136, %rsp
.cfi_adjust_cfa_offset -136
retq
.Lfunc_end2:
.size _Z38__device_stub__multPolynomialsParallelPiS_S_ii, .Lfunc_end2-_Z38__device_stub__multPolynomialsParallelPiS_S_ii
.cfi_endproc
# -- End function
.globl _Z14checkCUDAErrorPKc # -- Begin function _Z14checkCUDAErrorPKc
.p2align 4, 0x90
.type _Z14checkCUDAErrorPKc,@function
_Z14checkCUDAErrorPKc: # @_Z14checkCUDAErrorPKc
.cfi_startproc
# %bb.0:
pushq %r14
.cfi_def_cfa_offset 16
pushq %rbx
.cfi_def_cfa_offset 24
pushq %rax
.cfi_def_cfa_offset 32
.cfi_offset %rbx, -24
.cfi_offset %r14, -16
movq %rdi, %rbx
callq hipGetLastError
testl %eax, %eax
jne .LBB3_2
# %bb.1:
addq $8, %rsp
.cfi_def_cfa_offset 24
popq %rbx
.cfi_def_cfa_offset 16
popq %r14
.cfi_def_cfa_offset 8
retq
.LBB3_2:
.cfi_def_cfa_offset 32
movq stderr(%rip), %r14
movl %eax, %edi
callq hipGetErrorString
movl $.L.str.17, %esi
movq %r14, %rdi
movq %rbx, %rdx
movq %rax, %rcx
xorl %eax, %eax
callq fprintf
movl $1, %edi
callq exit
.Lfunc_end3:
.size _Z14checkCUDAErrorPKc, .Lfunc_end3-_Z14checkCUDAErrorPKc
.cfi_endproc
# -- End function
.globl _Z34__device_stub__sumProductsParalleliiPiS_ii # -- Begin function _Z34__device_stub__sumProductsParalleliiPiS_ii
.p2align 4, 0x90
.type _Z34__device_stub__sumProductsParalleliiPiS_ii,@function
_Z34__device_stub__sumProductsParalleliiPiS_ii: # @_Z34__device_stub__sumProductsParalleliiPiS_ii
.cfi_startproc
# %bb.0:
subq $136, %rsp
.cfi_def_cfa_offset 144
movl %edi, 12(%rsp)
movl %esi, 8(%rsp)
movq %rdx, 72(%rsp)
movq %rcx, 64(%rsp)
movl %r8d, 4(%rsp)
movl %r9d, (%rsp)
leaq 12(%rsp), %rax
movq %rax, 80(%rsp)
leaq 8(%rsp), %rax
movq %rax, 88(%rsp)
leaq 72(%rsp), %rax
movq %rax, 96(%rsp)
leaq 64(%rsp), %rax
movq %rax, 104(%rsp)
leaq 4(%rsp), %rax
movq %rax, 112(%rsp)
movq %rsp, %rax
movq %rax, 120(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z19sumProductsParalleliiPiS_ii, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $152, %rsp
.cfi_adjust_cfa_offset -152
retq
.Lfunc_end4:
.size _Z34__device_stub__sumProductsParalleliiPiS_ii, .Lfunc_end4-_Z34__device_stub__sumProductsParalleliiPiS_ii
.cfi_endproc
# -- End function
.globl _Z21multPolynomialsSerialPiS_iS_i # -- Begin function _Z21multPolynomialsSerialPiS_iS_i
.p2align 4, 0x90
.type _Z21multPolynomialsSerialPiS_iS_i,@function
_Z21multPolynomialsSerialPiS_iS_i: # @_Z21multPolynomialsSerialPiS_iS_i
.cfi_startproc
# %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
pushq %r15
.cfi_def_cfa_offset 24
pushq %r14
.cfi_def_cfa_offset 32
pushq %rbx
.cfi_def_cfa_offset 40
pushq %rax
.cfi_def_cfa_offset 48
.cfi_offset %rbx, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
.cfi_offset %rbp, -16
movq %rcx, %rbx
movl %edx, %ebp
movq %rsi, %r14
movq %rdi, %r15
testl %r8d, %r8d
jle .LBB5_2
# %bb.1: # %.lr.ph.preheader
movl %r8d, %edx
shlq $2, %rdx
movq %rbx, %rdi
xorl %esi, %esi
callq memset@PLT
.LBB5_2: # %.preheader23
testl %ebp, %ebp
jle .LBB5_7
# %bb.3: # %.preheader.lr.ph
movl %ebp, %ecx
xorl %esi, %esi
.p2align 4, 0x90
.LBB5_4: # %.preheader
# =>This Loop Header: Depth=1
# Child Loop BB5_5 Depth 2
xorl %edi, %edi
.p2align 4, 0x90
.LBB5_5: # Parent Loop BB5_4 Depth=1
# => This Inner Loop Header: Depth=2
movl (%r14,%rdi,4), %eax
imull (%r15,%rsi,4), %eax
addl (%rbx,%rdi,4), %eax
cltd
idivl modBy(%rip)
movl %edx, (%rbx,%rdi,4)
incq %rdi
cmpq %rdi, %rcx
jne .LBB5_5
# %bb.6: # %._crit_edge
# in Loop: Header=BB5_4 Depth=1
incq %rsi
addq $4, %rbx
cmpq %rcx, %rsi
jne .LBB5_4
.LBB5_7: # %._crit_edge28
addq $8, %rsp
.cfi_def_cfa_offset 40
popq %rbx
.cfi_def_cfa_offset 32
popq %r14
.cfi_def_cfa_offset 24
popq %r15
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
retq
.Lfunc_end5:
.size _Z21multPolynomialsSerialPiS_iS_i, .Lfunc_end5-_Z21multPolynomialsSerialPiS_iS_i
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
pushq %rbx
.cfi_def_cfa_offset 16
subq $32, %rsp
.cfi_def_cfa_offset 48
.cfi_offset %rbx, -16
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB6_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB6_2:
movq __hip_gpubin_handle(%rip), %rbx
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z23multPolynomialsParallelPiS_S_ii, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z19sumProductsParalleliiPiS_ii, %esi
movl $.L__unnamed_2, %edx
movl $.L__unnamed_2, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $32, %rsp
.cfi_def_cfa_offset 16
popq %rbx
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end6:
.size __hip_module_ctor, .Lfunc_end6-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB7_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB7_2:
retq
.Lfunc_end7:
.size __hip_module_dtor, .Lfunc_end7-__hip_module_dtor
.cfi_endproc
# -- End function
.type modBy,@object # @modBy
.data
.globl modBy
.p2align 2, 0x0
modBy:
.long 103 # 0x67
.size modBy, 4
.type .L.str,@object # @.str
.section .rodata.str1.1,"aMS",@progbits,1
.L.str:
.asciz "Specify the number of terms in the polynomial by specifying the exponent on base 2, UP TO 10, e.g. enter '3' if you want 2^3 terms (AKA 8 terms) per polynomial: "
.size .L.str, 162
.type .L.str.1,@object # @.str.1
.L.str.1:
.asciz "%d"
.size .L.str.1, 3
.type .L.str.2,@object # @.str.2
.L.str.2:
.asciz "\nYou entered '%d'.\n"
.size .L.str.2, 20
.type .L.str.3,@object # @.str.3
.L.str.3:
.asciz "Invalid entry. The maximum number of terms is 2^10. Please enter a term less than or equal to 10 next time."
.size .L.str.3, 108
.type .L.str.4,@object # @.str.4
.L.str.4:
.asciz "Number of terms per polynomial = %d, hence each polynomial will have degree = %d.\n\n"
.size .L.str.4, 84
.type .L.str.7,@object # @.str.7
.L.str.7:
.asciz "%dx^%d "
.size .L.str.7, 8
.type .L.str.8,@object # @.str.8
.L.str.8:
.asciz "+ "
.size .L.str.8, 3
.type _Z23multPolynomialsParallelPiS_S_ii,@object # @_Z23multPolynomialsParallelPiS_S_ii
.section .rodata,"a",@progbits
.globl _Z23multPolynomialsParallelPiS_S_ii
.p2align 3, 0x0
_Z23multPolynomialsParallelPiS_S_ii:
.quad _Z38__device_stub__multPolynomialsParallelPiS_S_ii
.size _Z23multPolynomialsParallelPiS_S_ii, 8
.type .L.str.11,@object # @.str.11
.section .rodata.str1.1,"aMS",@progbits,1
.L.str.11:
.asciz "kernel invocation"
.size .L.str.11, 18
.type _Z19sumProductsParalleliiPiS_ii,@object # @_Z19sumProductsParalleliiPiS_ii
.section .rodata,"a",@progbits
.globl _Z19sumProductsParalleliiPiS_ii
.p2align 3, 0x0
_Z19sumProductsParalleliiPiS_ii:
.quad _Z34__device_stub__sumProductsParalleliiPiS_ii
.size _Z19sumProductsParalleliiPiS_ii, 8
.type .L.str.14,@object # @.str.14
.section .rodata.str1.1,"aMS",@progbits,1
.L.str.14:
.asciz "Coefficients at degree %d are not equivalent: serial!=parallel (%d!=%d)\n"
.size .L.str.14, 73
.type .L.str.17,@object # @.str.17
.L.str.17:
.asciz "CUDA error: %s: %s.\n"
.size .L.str.17, 21
.type .L__unnamed_1,@object # @0
.L__unnamed_1:
.asciz "_Z23multPolynomialsParallelPiS_S_ii"
.size .L__unnamed_1, 36
.type .L__unnamed_2,@object # @1
.L__unnamed_2:
.asciz "_Z19sumProductsParalleliiPiS_ii"
.size .L__unnamed_2, 32
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.type .Lstr,@object # @str
.section .rodata.str1.1,"aMS",@progbits,1
.Lstr:
.asciz "Generating polynomials...\n"
.size .Lstr, 27
.type .Lstr.1,@object # @str.1
.Lstr.1:
.asciz "polyA:"
.size .Lstr.1, 7
.type .Lstr.2,@object # @str.2
.Lstr.2:
.asciz "\n\npolyB:"
.size .Lstr.2, 9
.type .Lstr.4,@object # @str.4
.Lstr.4:
.asciz "Serial result:"
.size .Lstr.4, 15
.type .Lstr.5,@object # @str.5
.Lstr.5:
.asciz "\n\nParallel result:"
.size .Lstr.5, 19
.type .Lstr.6,@object # @str.6
.Lstr.6:
.asciz "\n"
.size .Lstr.6, 2
.type .Lstr.7,@object # @str.7
.Lstr.7:
.asciz "Looks like there were some discrepancies. Verification failed.\n"
.size .Lstr.7, 64
.type .Lstr.8,@object # @str.8
.Lstr.8:
.asciz "Verification successful. The serial and parallel polynomial multiplications produced the same result!\n"
.size .Lstr.8, 103
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z38__device_stub__multPolynomialsParallelPiS_S_ii
.addrsig_sym _Z34__device_stub__sumProductsParalleliiPiS_ii
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z23multPolynomialsParallelPiS_S_ii
.addrsig_sym _Z19sumProductsParalleliiPiS_ii
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly. | // includes, system
#include <stdio.h>
// includes CUDA Runtime
#include <cuda_runtime_api.h>
#define cudaCheckError(code) \
{ \
if((code) != cudaSuccess) { \
fprintf(stderr, "Cuda failure %s:%d: '%s' \n",__FILE__,__LINE__ , \
cudaGetErrorString(code)); \
} \
}
__global__ void kernel_1d(){
int index = blockIdx.x * blockDim.x + threadIdx.x;
printf("1D indexing demonstration");
printf("block %d,blockdim %x,thread %d,index %d\n",blockIdx.x,blockDim.x,threadIdx.x,index);
//printf("block %d,thread %d,index %d\n",blockIdx.x,threadIdx.x,index);
}
__global__ void kernel_2d(){
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
printf("2D indexing demonstration");
printf("blockidx.x %d blockidx.y %d\n ",blockIdx.x,blockIdx.y);
printf("blockdim.x %d blockdim.y %d\n ",blockDim.x,blockDim.y);
printf("block.x %d,blockdim.x %x,thread.x %d,x %d\n",blockIdx.x,blockDim.x,threadIdx.x,x);
printf("block.y %d,blockdim.y %x,thread.y %d,y %d\n",blockIdx.y,blockDim.y,threadIdx.y,y);
//printf("block %d,thread %d,index %d\n",blockIdx.x,threadIdx.x,index);
}
int main(){
kernel_1d<<<4,8>>>();
kernel_2d<<<(2,3),(3,4)>>>();
cudaCheckError(cudaDeviceSynchronize());
} | code for sm_80
Function : _Z9kernel_2dv
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ IMAD.MOV.U32 R1, RZ, RZ, c[0x0][0x28] ; /* 0x00000a00ff017624 */
/* 0x000fc600078e00ff */
/*0010*/ MOV R19, 0x0 ; /* 0x0000000000137802 */
/* 0x000fe20000000f00 */
/*0020*/ IMAD.MOV.U32 R4, RZ, RZ, c[0x4][0x18] ; /* 0x01000600ff047624 */
/* 0x000fe200078e00ff */
/*0030*/ IADD3 R1, R1, -0x10, RZ ; /* 0xfffffff001017810 */
/* 0x000fe20007ffe0ff */
/*0040*/ IMAD.MOV.U32 R5, RZ, RZ, c[0x4][0x1c] ; /* 0x01000700ff057624 */
/* 0x000fe200078e00ff */
/*0050*/ LDC.64 R8, c[0x4][R19] ; /* 0x0100000013087b82 */
/* 0x0000620000000a00 */
/*0060*/ CS2R R6, SRZ ; /* 0x0000000000067805 */
/* 0x000fe2000001ff00 */
/*0070*/ IADD3 R18, P0, R1, c[0x0][0x20], RZ ; /* 0x0000080001127a10 */
/* 0x000fca0007f1e0ff */
/*0080*/ IMAD.X R2, RZ, RZ, c[0x0][0x24], P0 ; /* 0x00000900ff027624 */
/* 0x000fe400000e06ff */
/*0090*/ LEPC R10 ; /* 0x00000000000a734e */
/* 0x000fcc0000000000 */
/*00a0*/ MOV R3, 0x110 ; /* 0x0000011000037802 */
/* 0x000fe40000000f00 */
/*00b0*/ MOV R20, 0x90 ; /* 0x0000009000147802 */
/* 0x000fe40000000f00 */
/*00c0*/ MOV R21, 0x0 ; /* 0x0000000000157802 */
/* 0x000fe40000000f00 */
/*00d0*/ MOV R0, 0x0 ; /* 0x0000000000007802 */
/* 0x000fe40000000f00 */
/*00e0*/ IADD3 R20, P0, P1, -R20, R3, R10 ; /* 0x0000000314147210 */
/* 0x000fc8000791e10a */
/*00f0*/ IADD3.X R21, ~R0, R21, R11, P0, P1 ; /* 0x0000001500157210 */
/* 0x000fc800007e250b */
/*0100*/ CALL.ABS.NOINC R8 ; /* 0x0000000008007343 */
/* 0x003fea0003c00000 */
/*0110*/ S2R R17, SR_CTAID.Y ; /* 0x0000000000117919 */
/* 0x000e220000002600 */
/*0120*/ LDC.64 R8, c[0x4][R19] ; /* 0x0100000013087b82 */
/* 0x0002a20000000a00 */
/*0130*/ IMAD.MOV.U32 R4, RZ, RZ, c[0x4][0x20] ; /* 0x01000800ff047624 */
/* 0x000fe400078e00ff */
/*0140*/ S2R R16, SR_CTAID.X ; /* 0x0000000000107919 */
/* 0x000e220000002500 */
/*0150*/ IMAD.MOV.U32 R5, RZ, RZ, c[0x4][0x24] ; /* 0x01000900ff057624 */
/* 0x000fe400078e00ff */
/*0160*/ IMAD.MOV.U32 R6, RZ, RZ, R18 ; /* 0x000000ffff067224 */
/* 0x000fe400078e0012 */
/*0170*/ IMAD.MOV.U32 R7, RZ, RZ, R2 ; /* 0x000000ffff077224 */
/* 0x000fe200078e0002 */
/*0180*/ STL.64 [R1], R16 ; /* 0x0000001001007387 */
/* 0x0013e80000100a00 */
/*0190*/ LEPC R10 ; /* 0x00000000000a734e */
/* 0x004fe40000000000 */
/*01a0*/ MOV R3, 0x210 ; /* 0x0000021000037802 */
/* 0x000fe40000000f00 */
/*01b0*/ MOV R20, 0x190 ; /* 0x0000019000147802 */
/* 0x000fc40000000f00 */
/*01c0*/ MOV R21, 0x0 ; /* 0x0000000000157802 */
/* 0x000fe40000000f00 */
/*01d0*/ MOV R0, 0x0 ; /* 0x0000000000007802 */
/* 0x000fe40000000f00 */
/*01e0*/ IADD3 R20, P0, P1, -R20, R3, R10 ; /* 0x0000000314147210 */
/* 0x000fc8000791e10a */
/*01f0*/ IADD3.X R21, ~R0, R21, R11, P0, P1 ; /* 0x0000001500157210 */
/* 0x000fc800007e250b */
/*0200*/ CALL.ABS.NOINC R8 ; /* 0x0000000008007343 */
/* 0x002fea0003c00000 */
/*0210*/ IMAD.MOV.U32 R10, RZ, RZ, c[0x0][0x0] ; /* 0x00000000ff0a7624 */
/* 0x000fe200078e00ff */
/*0220*/ LDC.64 R8, c[0x4][R19] ; /* 0x0100000013087b82 */
/* 0x0000620000000a00 */
/*0230*/ IMAD.MOV.U32 R11, RZ, RZ, c[0x0][0x4] ; /* 0x00000100ff0b7624 */
/* 0x000fe400078e00ff */
/*0240*/ IMAD.MOV.U32 R4, RZ, RZ, c[0x4][0x28] ; /* 0x01000a00ff047624 */
/* 0x000fe400078e00ff */
/*0250*/ IMAD.MOV.U32 R5, RZ, RZ, c[0x4][0x2c] ; /* 0x01000b00ff057624 */
/* 0x000fe200078e00ff */
/*0260*/ STL.64 [R1], R10 ; /* 0x0000000a01007387 */
/* 0x0001e20000100a00 */
/*0270*/ IMAD.MOV.U32 R6, RZ, RZ, R18 ; /* 0x000000ffff067224 */
/* 0x000fe400078e0012 */
/*0280*/ IMAD.MOV.U32 R7, RZ, RZ, R2 ; /* 0x000000ffff077224 */
/* 0x000fc400078e0002 */
/*0290*/ LEPC R10 ; /* 0x00000000000a734e */
/* 0x001fca0000000000 */
/*02a0*/ MOV R3, 0x310 ; /* 0x0000031000037802 */
/* 0x000fe40000000f00 */
/*02b0*/ MOV R20, 0x290 ; /* 0x0000029000147802 */
/* 0x000fc40000000f00 */
/*02c0*/ MOV R21, 0x0 ; /* 0x0000000000157802 */
/* 0x000fe40000000f00 */
/*02d0*/ MOV R0, 0x0 ; /* 0x0000000000007802 */
/* 0x000fe40000000f00 */
/*02e0*/ IADD3 R20, P0, P1, -R20, R3, R10 ; /* 0x0000000314147210 */
/* 0x000fc8000791e10a */
/*02f0*/ IADD3.X R21, ~R0, R21, R11, P0, P1 ; /* 0x0000001500157210 */
/* 0x000fc800007e250b */
/*0300*/ CALL.ABS.NOINC R8 ; /* 0x0000000008007343 */
/* 0x002fea0003c00000 */
/*0310*/ S2R R14, SR_TID.X ; /* 0x00000000000e7919 */
/* 0x000e220000002100 */
/*0320*/ IMAD.MOV.U32 R12, RZ, RZ, R16 ; /* 0x000000ffff0c7224 */
/* 0x000fe200078e0010 */
/*0330*/ LDC.64 R8, c[0x4][R19] ; /* 0x0100000013087b82 */
/* 0x0002a20000000a00 */
/*0340*/ IMAD.MOV.U32 R13, RZ, RZ, c[0x0][0x0] ; /* 0x00000000ff0d7624 */
/* 0x000fe400078e00ff */
/*0350*/ IMAD.MOV.U32 R4, RZ, RZ, c[0x4][0x30] ; /* 0x01000c00ff047624 */
/* 0x000fe400078e00ff */
/*0360*/ IMAD.MOV.U32 R5, RZ, RZ, c[0x4][0x34] ; /* 0x01000d00ff057624 */
/* 0x000fe400078e00ff */
/*0370*/ IMAD.MOV.U32 R6, RZ, RZ, R18 ; /* 0x000000ffff067224 */
/* 0x000fe400078e0012 */
/*0380*/ IMAD.MOV.U32 R7, RZ, RZ, R2 ; /* 0x000000ffff077224 */
/* 0x000fc400078e0002 */
/*0390*/ IMAD R15, R16, c[0x0][0x0], R14 ; /* 0x00000000100f7a24 */
/* 0x001fca00078e020e */
/*03a0*/ STL.128 [R1], R12 ; /* 0x0000000c01007387 */
/* 0x0003e40000100c00 */
/*03b0*/ LEPC R10 ; /* 0x00000000000a734e */
/* 0x004fe40000000000 */
/*03c0*/ MOV R3, 0x430 ; /* 0x0000043000037802 */
/* 0x000fe40000000f00 */
/*03d0*/ MOV R20, 0x3b0 ; /* 0x000003b000147802 */
/* 0x000fe40000000f00 */
/*03e0*/ MOV R21, 0x0 ; /* 0x0000000000157802 */
/* 0x000fe40000000f00 */
/*03f0*/ MOV R0, 0x0 ; /* 0x0000000000007802 */
/* 0x000fe40000000f00 */
/*0400*/ IADD3 R20, P0, P1, -R20, R3, R10 ; /* 0x0000000314147210 */
/* 0x000fc8000791e10a */
/*0410*/ IADD3.X R21, ~R0, R21, R11, P0, P1 ; /* 0x0000001500157210 */
/* 0x000fc800007e250b */
/*0420*/ CALL.ABS.NOINC R8 ; /* 0x0000000008007343 */
/* 0x002fea0003c00000 */
/*0430*/ S2R R14, SR_TID.Y ; /* 0x00000000000e7919 */
/* 0x000e220000002200 */
/*0440*/ IMAD.MOV.U32 R12, RZ, RZ, R17 ; /* 0x000000ffff0c7224 */
/* 0x000fe200078e0011 */
/*0450*/ LDC.64 R8, c[0x4][R19] ; /* 0x0100000013087b82 */
/* 0x0002a20000000a00 */
/*0460*/ IMAD.MOV.U32 R13, RZ, RZ, c[0x0][0x4] ; /* 0x00000100ff0d7624 */
/* 0x000fe400078e00ff */
/*0470*/ IMAD.MOV.U32 R6, RZ, RZ, R18 ; /* 0x000000ffff067224 */
/* 0x000fe400078e0012 */
/*0480*/ IMAD.MOV.U32 R7, RZ, RZ, R2 ; /* 0x000000ffff077224 */
/* 0x000fe400078e0002 */
/*0490*/ IMAD.MOV.U32 R4, RZ, RZ, c[0x4][0x38] ; /* 0x01000e00ff047624 */
/* 0x000fe400078e00ff */
/*04a0*/ IMAD.MOV.U32 R5, RZ, RZ, c[0x4][0x3c] ; /* 0x01000f00ff057624 */
/* 0x000fc400078e00ff */
/*04b0*/ IMAD R15, R17, c[0x0][0x4], R14 ; /* 0x00000100110f7a24 */
/* 0x001fca00078e020e */
/*04c0*/ STL.128 [R1], R12 ; /* 0x0000000c01007387 */
/* 0x0003e40000100c00 */
/*04d0*/ LEPC R2 ; /* 0x000000000002734e */
/* 0x004fe40000000000 */
/*04e0*/ MOV R11, 0x550 ; /* 0x00000550000b7802 */
/* 0x000fe40000000f00 */
/*04f0*/ MOV R20, 0x4d0 ; /* 0x000004d000147802 */
/* 0x000fe40000000f00 */
/*0500*/ MOV R21, 0x0 ; /* 0x0000000000157802 */
/* 0x000fe40000000f00 */
/*0510*/ MOV R0, 0x0 ; /* 0x0000000000007802 */
/* 0x000fe40000000f00 */
/*0520*/ IADD3 R20, P0, P1, -R20, R11, R2 ; /* 0x0000000b14147210 */
/* 0x000fc8000791e102 */
/*0530*/ IADD3.X R21, ~R0, R21, R3, P0, P1 ; /* 0x0000001500157210 */
/* 0x000fc800007e2503 */
/*0540*/ CALL.ABS.NOINC R8 ; /* 0x0000000008007343 */
/* 0x002fea0003c00000 */
/*0550*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0560*/ BRA 0x560; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0570*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0580*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0590*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*05a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*05b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*05c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*05d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*05e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*05f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
..........
Function : _Z9kernel_1dv
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ IMAD.MOV.U32 R1, RZ, RZ, c[0x0][0x28] ; /* 0x00000a00ff017624 */
/* 0x000fc600078e00ff */
/*0010*/ MOV R17, 0x0 ; /* 0x0000000000117802 */
/* 0x000fe20000000f00 */
/*0020*/ IMAD.MOV.U32 R4, RZ, RZ, c[0x4][0x8] ; /* 0x01000200ff047624 */
/* 0x000fe200078e00ff */
/*0030*/ IADD3 R1, R1, -0x10, RZ ; /* 0xfffffff001017810 */
/* 0x000fe20007ffe0ff */
/*0040*/ IMAD.MOV.U32 R5, RZ, RZ, c[0x4][0xc] ; /* 0x01000300ff057624 */
/* 0x000fe200078e00ff */
/*0050*/ LDC.64 R8, c[0x4][R17] ; /* 0x0100000011087b82 */
/* 0x0000620000000a00 */
/*0060*/ CS2R R6, SRZ ; /* 0x0000000000067805 */
/* 0x000fe2000001ff00 */
/*0070*/ IADD3 R2, P0, R1, c[0x0][0x20], RZ ; /* 0x0000080001027a10 */
/* 0x000fca0007f1e0ff */
/*0080*/ IMAD.X R16, RZ, RZ, c[0x0][0x24], P0 ; /* 0x00000900ff107624 */
/* 0x000fe400000e06ff */
/*0090*/ LEPC R10 ; /* 0x00000000000a734e */
/* 0x000fcc0000000000 */
/*00a0*/ MOV R3, 0x110 ; /* 0x0000011000037802 */
/* 0x000fe40000000f00 */
/*00b0*/ MOV R20, 0x90 ; /* 0x0000009000147802 */
/* 0x000fe40000000f00 */
/*00c0*/ MOV R21, 0x0 ; /* 0x0000000000157802 */
/* 0x000fe40000000f00 */
/*00d0*/ MOV R0, 0x0 ; /* 0x0000000000007802 */
/* 0x000fe40000000f00 */
/*00e0*/ IADD3 R20, P0, P1, -R20, R3, R10 ; /* 0x0000000314147210 */
/* 0x000fc8000791e10a */
/*00f0*/ IADD3.X R21, ~R0, R21, R11, P0, P1 ; /* 0x0000001500157210 */
/* 0x000fc800007e250b */
/*0100*/ CALL.ABS.NOINC R8 ; /* 0x0000000008007343 */
/* 0x003fea0003c00000 */
/*0110*/ S2R R14, SR_TID.X ; /* 0x00000000000e7919 */
/* 0x000e220000002100 */
/*0120*/ IMAD.MOV.U32 R13, RZ, RZ, c[0x0][0x0] ; /* 0x00000000ff0d7624 */
/* 0x000fe200078e00ff */
/*0130*/ LDC.64 R8, c[0x4][R17] ; /* 0x0100000011087b82 */
/* 0x0002a20000000a00 */
/*0140*/ IMAD.MOV.U32 R6, RZ, RZ, R2 ; /* 0x000000ffff067224 */
/* 0x000fe200078e0002 */
/*0150*/ S2R R12, SR_CTAID.X ; /* 0x00000000000c7919 */
/* 0x000e220000002500 */
/*0160*/ IMAD.MOV.U32 R7, RZ, RZ, R16 ; /* 0x000000ffff077224 */
/* 0x000fe400078e0010 */
/*0170*/ IMAD.MOV.U32 R4, RZ, RZ, c[0x4][0x10] ; /* 0x01000400ff047624 */
/* 0x000fe400078e00ff */
/*0180*/ IMAD.MOV.U32 R5, RZ, RZ, c[0x4][0x14] ; /* 0x01000500ff057624 */
/* 0x000fe400078e00ff */
/*0190*/ IMAD R15, R12, c[0x0][0x0], R14 ; /* 0x000000000c0f7a24 */
/* 0x001fca00078e020e */
/*01a0*/ STL.128 [R1], R12 ; /* 0x0000000c01007387 */
/* 0x0003e40000100c00 */
/*01b0*/ LEPC R2 ; /* 0x000000000002734e */
/* 0x004fe40000000000 */
/*01c0*/ MOV R11, 0x230 ; /* 0x00000230000b7802 */
/* 0x000fe40000000f00 */
/*01d0*/ MOV R20, 0x1b0 ; /* 0x000001b000147802 */
/* 0x000fe40000000f00 */
/*01e0*/ MOV R21, 0x0 ; /* 0x0000000000157802 */
/* 0x000fe40000000f00 */
/*01f0*/ MOV R0, 0x0 ; /* 0x0000000000007802 */
/* 0x000fe40000000f00 */
/*0200*/ IADD3 R20, P0, P1, -R20, R11, R2 ; /* 0x0000000b14147210 */
/* 0x000fc8000791e102 */
/*0210*/ IADD3.X R21, ~R0, R21, R3, P0, P1 ; /* 0x0000001500157210 */
/* 0x000fc800007e2503 */
/*0220*/ CALL.ABS.NOINC R8 ; /* 0x0000000008007343 */
/* 0x002fea0003c00000 */
/*0230*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0240*/ BRA 0x240; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0250*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0260*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0270*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0280*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0290*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | // includes, system
#include <stdio.h>
// includes CUDA Runtime
#include <cuda_runtime_api.h>
#define cudaCheckError(code) \
{ \
if((code) != cudaSuccess) { \
fprintf(stderr, "Cuda failure %s:%d: '%s' \n",__FILE__,__LINE__ , \
cudaGetErrorString(code)); \
} \
}
__global__ void kernel_1d(){
int index = blockIdx.x * blockDim.x + threadIdx.x;
printf("1D indexing demonstration");
printf("block %d,blockdim %x,thread %d,index %d\n",blockIdx.x,blockDim.x,threadIdx.x,index);
//printf("block %d,thread %d,index %d\n",blockIdx.x,threadIdx.x,index);
}
__global__ void kernel_2d(){
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
printf("2D indexing demonstration");
printf("blockidx.x %d blockidx.y %d\n ",blockIdx.x,blockIdx.y);
printf("blockdim.x %d blockdim.y %d\n ",blockDim.x,blockDim.y);
printf("block.x %d,blockdim.x %x,thread.x %d,x %d\n",blockIdx.x,blockDim.x,threadIdx.x,x);
printf("block.y %d,blockdim.y %x,thread.y %d,y %d\n",blockIdx.y,blockDim.y,threadIdx.y,y);
//printf("block %d,thread %d,index %d\n",blockIdx.x,threadIdx.x,index);
}
int main(){
kernel_1d<<<4,8>>>();
kernel_2d<<<(2,3),(3,4)>>>();
cudaCheckError(cudaDeviceSynchronize());
} | .file "tmpxft_00090d1b_00000000-6_cudaindex.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2060:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2060:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z27__device_stub__Z9kernel_1dvv
.type _Z27__device_stub__Z9kernel_1dvv, @function
_Z27__device_stub__Z9kernel_1dvv:
.LFB2082:
.cfi_startproc
endbr64
subq $88, %rsp
.cfi_def_cfa_offset 96
movq %fs:40, %rax
movq %rax, 72(%rsp)
xorl %eax, %eax
movl $1, 16(%rsp)
movl $1, 20(%rsp)
movl $1, 24(%rsp)
movl $1, 28(%rsp)
movl $1, 32(%rsp)
movl $1, 36(%rsp)
leaq 8(%rsp), %rcx
movq %rsp, %rdx
leaq 28(%rsp), %rsi
leaq 16(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 72(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $88, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 8(%rsp)
.cfi_def_cfa_offset 104
pushq 8(%rsp)
.cfi_def_cfa_offset 112
leaq 80(%rsp), %r9
movq 44(%rsp), %rcx
movl 52(%rsp), %r8d
movq 32(%rsp), %rsi
movl 40(%rsp), %edx
leaq _Z9kernel_1dv(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 96
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2082:
.size _Z27__device_stub__Z9kernel_1dvv, .-_Z27__device_stub__Z9kernel_1dvv
.globl _Z9kernel_1dv
.type _Z9kernel_1dv, @function
_Z9kernel_1dv:
.LFB2083:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z27__device_stub__Z9kernel_1dvv
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2083:
.size _Z9kernel_1dv, .-_Z9kernel_1dv
.globl _Z27__device_stub__Z9kernel_2dvv
.type _Z27__device_stub__Z9kernel_2dvv, @function
_Z27__device_stub__Z9kernel_2dvv:
.LFB2084:
.cfi_startproc
endbr64
subq $88, %rsp
.cfi_def_cfa_offset 96
movq %fs:40, %rax
movq %rax, 72(%rsp)
xorl %eax, %eax
movl $1, 16(%rsp)
movl $1, 20(%rsp)
movl $1, 24(%rsp)
movl $1, 28(%rsp)
movl $1, 32(%rsp)
movl $1, 36(%rsp)
leaq 8(%rsp), %rcx
movq %rsp, %rdx
leaq 28(%rsp), %rsi
leaq 16(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L15
.L11:
movq 72(%rsp), %rax
subq %fs:40, %rax
jne .L16
addq $88, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L15:
.cfi_restore_state
pushq 8(%rsp)
.cfi_def_cfa_offset 104
pushq 8(%rsp)
.cfi_def_cfa_offset 112
leaq 80(%rsp), %r9
movq 44(%rsp), %rcx
movl 52(%rsp), %r8d
movq 32(%rsp), %rsi
movl 40(%rsp), %edx
leaq _Z9kernel_2dv(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 96
jmp .L11
.L16:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2084:
.size _Z27__device_stub__Z9kernel_2dvv, .-_Z27__device_stub__Z9kernel_2dvv
.globl _Z9kernel_2dv
.type _Z9kernel_2dv, @function
_Z9kernel_2dv:
.LFB2085:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z27__device_stub__Z9kernel_2dvv
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2085:
.size _Z9kernel_2dv, .-_Z9kernel_2dv
.section .rodata.str1.8,"aMS",@progbits,1
.align 8
.LC0:
.string "/home/ubuntu/Datasets/stackv2/train-structured/intelav/CudaExamples/main/cudaindex.cu"
.section .rodata.str1.1,"aMS",@progbits,1
.LC1:
.string "Cuda failure %s:%d: '%s' \n"
.text
.globl main
.type main, @function
main:
.LFB2057:
.cfi_startproc
endbr64
subq $40, %rsp
.cfi_def_cfa_offset 48
movl $8, 20(%rsp)
movl $1, 24(%rsp)
movl $1, 28(%rsp)
movl $4, 8(%rsp)
movl $1, 12(%rsp)
movl $1, 16(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 20(%rsp), %rdx
movl $1, %ecx
movq 8(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L24
.L20:
movl $4, 20(%rsp)
movl $1, 24(%rsp)
movl $1, 28(%rsp)
movl $3, 8(%rsp)
movl $1, 12(%rsp)
movl $1, 16(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 20(%rsp), %rdx
movl $1, %ecx
movq 8(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L25
.L21:
call cudaDeviceSynchronize@PLT
testl %eax, %eax
jne .L26
.L22:
movl $0, %eax
addq $40, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L24:
.cfi_restore_state
call _Z27__device_stub__Z9kernel_1dvv
jmp .L20
.L25:
call _Z27__device_stub__Z9kernel_2dvv
jmp .L21
.L26:
call cudaDeviceSynchronize@PLT
movl %eax, %edi
call cudaGetErrorString@PLT
movq %rax, %r9
movl $38, %r8d
leaq .LC0(%rip), %rcx
leaq .LC1(%rip), %rdx
movl $2, %esi
movq stderr(%rip), %rdi
movl $0, %eax
call __fprintf_chk@PLT
jmp .L22
.cfi_endproc
.LFE2057:
.size main, .-main
.section .rodata.str1.1
.LC2:
.string "_Z9kernel_2dv"
.LC3:
.string "_Z9kernel_1dv"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2087:
.cfi_startproc
endbr64
pushq %rbx
.cfi_def_cfa_offset 16
.cfi_offset 3, -16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rbx
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC2(%rip), %rdx
movq %rdx, %rcx
leaq _Z9kernel_2dv(%rip), %rsi
movq %rax, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC3(%rip), %rdx
movq %rdx, %rcx
leaq _Z9kernel_1dv(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
popq %rbx
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2087:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | // includes, system
#include <stdio.h>
// includes CUDA Runtime
#include <cuda_runtime_api.h>
#define cudaCheckError(code) \
{ \
if((code) != cudaSuccess) { \
fprintf(stderr, "Cuda failure %s:%d: '%s' \n",__FILE__,__LINE__ , \
cudaGetErrorString(code)); \
} \
}
__global__ void kernel_1d(){
int index = blockIdx.x * blockDim.x + threadIdx.x;
printf("1D indexing demonstration");
printf("block %d,blockdim %x,thread %d,index %d\n",blockIdx.x,blockDim.x,threadIdx.x,index);
//printf("block %d,thread %d,index %d\n",blockIdx.x,threadIdx.x,index);
}
__global__ void kernel_2d(){
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
printf("2D indexing demonstration");
printf("blockidx.x %d blockidx.y %d\n ",blockIdx.x,blockIdx.y);
printf("blockdim.x %d blockdim.y %d\n ",blockDim.x,blockDim.y);
printf("block.x %d,blockdim.x %x,thread.x %d,x %d\n",blockIdx.x,blockDim.x,threadIdx.x,x);
printf("block.y %d,blockdim.y %x,thread.y %d,y %d\n",blockIdx.y,blockDim.y,threadIdx.y,y);
//printf("block %d,thread %d,index %d\n",blockIdx.x,threadIdx.x,index);
}
int main(){
kernel_1d<<<4,8>>>();
kernel_2d<<<(2,3),(3,4)>>>();
cudaCheckError(cudaDeviceSynchronize());
} | // includes, system
#include <hip/hip_runtime.h>
#include <stdio.h>
// includes CUDA Runtime
#include <hip/hip_runtime_api.h>
#define cudaCheckError(code) \
{ \
if((code) != hipSuccess) { \
fprintf(stderr, "Cuda failure %s:%d: '%s' \n",__FILE__,__LINE__ , \
hipGetErrorString(code)); \
} \
}
__global__ void kernel_1d(){
int index = blockIdx.x * blockDim.x + threadIdx.x;
printf("1D indexing demonstration");
printf("block %d,blockdim %x,thread %d,index %d\n",blockIdx.x,blockDim.x,threadIdx.x,index);
//printf("block %d,thread %d,index %d\n",blockIdx.x,threadIdx.x,index);
}
__global__ void kernel_2d(){
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
printf("2D indexing demonstration");
printf("blockidx.x %d blockidx.y %d\n ",blockIdx.x,blockIdx.y);
printf("blockdim.x %d blockdim.y %d\n ",blockDim.x,blockDim.y);
printf("block.x %d,blockdim.x %x,thread.x %d,x %d\n",blockIdx.x,blockDim.x,threadIdx.x,x);
printf("block.y %d,blockdim.y %x,thread.y %d,y %d\n",blockIdx.y,blockDim.y,threadIdx.y,y);
//printf("block %d,thread %d,index %d\n",blockIdx.x,threadIdx.x,index);
}
int main(){
kernel_1d<<<4,8>>>();
kernel_2d<<<(2,3),(3,4)>>>();
cudaCheckError(hipDeviceSynchronize());
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | // includes, system
#include <hip/hip_runtime.h>
#include <stdio.h>
// includes CUDA Runtime
#include <hip/hip_runtime_api.h>
#define cudaCheckError(code) \
{ \
if((code) != hipSuccess) { \
fprintf(stderr, "Cuda failure %s:%d: '%s' \n",__FILE__,__LINE__ , \
hipGetErrorString(code)); \
} \
}
__global__ void kernel_1d(){
int index = blockIdx.x * blockDim.x + threadIdx.x;
printf("1D indexing demonstration");
printf("block %d,blockdim %x,thread %d,index %d\n",blockIdx.x,blockDim.x,threadIdx.x,index);
//printf("block %d,thread %d,index %d\n",blockIdx.x,threadIdx.x,index);
}
__global__ void kernel_2d(){
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
printf("2D indexing demonstration");
printf("blockidx.x %d blockidx.y %d\n ",blockIdx.x,blockIdx.y);
printf("blockdim.x %d blockdim.y %d\n ",blockDim.x,blockDim.y);
printf("block.x %d,blockdim.x %x,thread.x %d,x %d\n",blockIdx.x,blockDim.x,threadIdx.x,x);
printf("block.y %d,blockdim.y %x,thread.y %d,y %d\n",blockIdx.y,blockDim.y,threadIdx.y,y);
//printf("block %d,thread %d,index %d\n",blockIdx.x,threadIdx.x,index);
}
int main(){
kernel_1d<<<4,8>>>();
kernel_2d<<<(2,3),(3,4)>>>();
cudaCheckError(hipDeviceSynchronize());
} | .text
.file "cudaindex.hip"
.globl _Z24__device_stub__kernel_1dv # -- Begin function _Z24__device_stub__kernel_1dv
.p2align 4, 0x90
.type _Z24__device_stub__kernel_1dv,@function
_Z24__device_stub__kernel_1dv: # @_Z24__device_stub__kernel_1dv
.cfi_startproc
# %bb.0:
subq $56, %rsp
.cfi_def_cfa_offset 64
leaq 32(%rsp), %rdi
leaq 16(%rsp), %rsi
leaq 8(%rsp), %rdx
movq %rsp, %rcx
callq __hipPopCallConfiguration
movq 32(%rsp), %rsi
movl 40(%rsp), %edx
movq 16(%rsp), %rcx
movl 24(%rsp), %r8d
leaq 48(%rsp), %r9
movl $_Z9kernel_1dv, %edi
pushq (%rsp)
.cfi_adjust_cfa_offset 8
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $72, %rsp
.cfi_adjust_cfa_offset -72
retq
.Lfunc_end0:
.size _Z24__device_stub__kernel_1dv, .Lfunc_end0-_Z24__device_stub__kernel_1dv
.cfi_endproc
# -- End function
.globl _Z24__device_stub__kernel_2dv # -- Begin function _Z24__device_stub__kernel_2dv
.p2align 4, 0x90
.type _Z24__device_stub__kernel_2dv,@function
_Z24__device_stub__kernel_2dv: # @_Z24__device_stub__kernel_2dv
.cfi_startproc
# %bb.0:
subq $56, %rsp
.cfi_def_cfa_offset 64
leaq 32(%rsp), %rdi
leaq 16(%rsp), %rsi
leaq 8(%rsp), %rdx
movq %rsp, %rcx
callq __hipPopCallConfiguration
movq 32(%rsp), %rsi
movl 40(%rsp), %edx
movq 16(%rsp), %rcx
movl 24(%rsp), %r8d
leaq 48(%rsp), %r9
movl $_Z9kernel_2dv, %edi
pushq (%rsp)
.cfi_adjust_cfa_offset 8
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $72, %rsp
.cfi_adjust_cfa_offset -72
retq
.Lfunc_end1:
.size _Z24__device_stub__kernel_2dv, .Lfunc_end1-_Z24__device_stub__kernel_2dv
.cfi_endproc
# -- End function
.globl main # -- Begin function main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
pushq %rbx
.cfi_def_cfa_offset 16
subq $64, %rsp
.cfi_def_cfa_offset 80
.cfi_offset %rbx, -16
movabsq $4294967300, %rbx # imm = 0x100000004
leaq 4(%rbx), %rdx
movq %rbx, %rdi
movl $1, %esi
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB2_2
# %bb.1:
leaq 32(%rsp), %rdi
leaq 16(%rsp), %rsi
leaq 8(%rsp), %rdx
movq %rsp, %rcx
callq __hipPopCallConfiguration
movq 32(%rsp), %rsi
movl 40(%rsp), %edx
movq 16(%rsp), %rcx
movl 24(%rsp), %r8d
leaq 48(%rsp), %r9
movl $_Z9kernel_1dv, %edi
pushq (%rsp)
.cfi_adjust_cfa_offset 8
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB2_2:
leaq -1(%rbx), %rdi
movl $1, %esi
movq %rbx, %rdx
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB2_4
# %bb.3:
leaq 32(%rsp), %rdi
leaq 16(%rsp), %rsi
leaq 8(%rsp), %rdx
movq %rsp, %rcx
callq __hipPopCallConfiguration
movq 32(%rsp), %rsi
movl 40(%rsp), %edx
movq 16(%rsp), %rcx
movl 24(%rsp), %r8d
leaq 48(%rsp), %r9
movl $_Z9kernel_2dv, %edi
pushq (%rsp)
.cfi_adjust_cfa_offset 8
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB2_4:
callq hipDeviceSynchronize
testl %eax, %eax
jne .LBB2_5
.LBB2_6:
xorl %eax, %eax
addq $64, %rsp
.cfi_def_cfa_offset 16
popq %rbx
.cfi_def_cfa_offset 8
retq
.LBB2_5:
.cfi_def_cfa_offset 80
movq stderr(%rip), %rbx
callq hipDeviceSynchronize
movl %eax, %edi
callq hipGetErrorString
movl $.L.str, %esi
movl $.L.str.1, %edx
movq %rbx, %rdi
movl $40, %ecx
movq %rax, %r8
xorl %eax, %eax
callq fprintf
jmp .LBB2_6
.Lfunc_end2:
.size main, .Lfunc_end2-main
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
pushq %rbx
.cfi_def_cfa_offset 16
subq $32, %rsp
.cfi_def_cfa_offset 48
.cfi_offset %rbx, -16
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB3_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB3_2:
movq __hip_gpubin_handle(%rip), %rbx
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z9kernel_1dv, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z9kernel_2dv, %esi
movl $.L__unnamed_2, %edx
movl $.L__unnamed_2, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $32, %rsp
.cfi_def_cfa_offset 16
popq %rbx
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end3:
.size __hip_module_ctor, .Lfunc_end3-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB4_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB4_2:
retq
.Lfunc_end4:
.size __hip_module_dtor, .Lfunc_end4-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z9kernel_1dv,@object # @_Z9kernel_1dv
.section .rodata,"a",@progbits
.globl _Z9kernel_1dv
.p2align 3, 0x0
_Z9kernel_1dv:
.quad _Z24__device_stub__kernel_1dv
.size _Z9kernel_1dv, 8
.type _Z9kernel_2dv,@object # @_Z9kernel_2dv
.globl _Z9kernel_2dv
.p2align 3, 0x0
_Z9kernel_2dv:
.quad _Z24__device_stub__kernel_2dv
.size _Z9kernel_2dv, 8
.type .L.str,@object # @.str
.section .rodata.str1.1,"aMS",@progbits,1
.L.str:
.asciz "Cuda failure %s:%d: '%s' \n"
.size .L.str, 27
.type .L.str.1,@object # @.str.1
.L.str.1:
.asciz "/home/ubuntu/Datasets/stackv2/train-structured-repos-hip/intelav/CudaExamples/main/cudaindex.hip"
.size .L.str.1, 97
.type .L__unnamed_1,@object # @0
.L__unnamed_1:
.asciz "_Z9kernel_1dv"
.size .L__unnamed_1, 14
.type .L__unnamed_2,@object # @1
.L__unnamed_2:
.asciz "_Z9kernel_2dv"
.size .L__unnamed_2, 14
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z24__device_stub__kernel_1dv
.addrsig_sym _Z24__device_stub__kernel_2dv
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z9kernel_1dv
.addrsig_sym _Z9kernel_2dv
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_00090d1b_00000000-6_cudaindex.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2060:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2060:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z27__device_stub__Z9kernel_1dvv
.type _Z27__device_stub__Z9kernel_1dvv, @function
_Z27__device_stub__Z9kernel_1dvv:
.LFB2082:
.cfi_startproc
endbr64
subq $88, %rsp
.cfi_def_cfa_offset 96
movq %fs:40, %rax
movq %rax, 72(%rsp)
xorl %eax, %eax
movl $1, 16(%rsp)
movl $1, 20(%rsp)
movl $1, 24(%rsp)
movl $1, 28(%rsp)
movl $1, 32(%rsp)
movl $1, 36(%rsp)
leaq 8(%rsp), %rcx
movq %rsp, %rdx
leaq 28(%rsp), %rsi
leaq 16(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 72(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $88, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 8(%rsp)
.cfi_def_cfa_offset 104
pushq 8(%rsp)
.cfi_def_cfa_offset 112
leaq 80(%rsp), %r9
movq 44(%rsp), %rcx
movl 52(%rsp), %r8d
movq 32(%rsp), %rsi
movl 40(%rsp), %edx
leaq _Z9kernel_1dv(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 96
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2082:
.size _Z27__device_stub__Z9kernel_1dvv, .-_Z27__device_stub__Z9kernel_1dvv
.globl _Z9kernel_1dv
.type _Z9kernel_1dv, @function
_Z9kernel_1dv:
.LFB2083:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z27__device_stub__Z9kernel_1dvv
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2083:
.size _Z9kernel_1dv, .-_Z9kernel_1dv
.globl _Z27__device_stub__Z9kernel_2dvv
.type _Z27__device_stub__Z9kernel_2dvv, @function
_Z27__device_stub__Z9kernel_2dvv:
.LFB2084:
.cfi_startproc
endbr64
subq $88, %rsp
.cfi_def_cfa_offset 96
movq %fs:40, %rax
movq %rax, 72(%rsp)
xorl %eax, %eax
movl $1, 16(%rsp)
movl $1, 20(%rsp)
movl $1, 24(%rsp)
movl $1, 28(%rsp)
movl $1, 32(%rsp)
movl $1, 36(%rsp)
leaq 8(%rsp), %rcx
movq %rsp, %rdx
leaq 28(%rsp), %rsi
leaq 16(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L15
.L11:
movq 72(%rsp), %rax
subq %fs:40, %rax
jne .L16
addq $88, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L15:
.cfi_restore_state
pushq 8(%rsp)
.cfi_def_cfa_offset 104
pushq 8(%rsp)
.cfi_def_cfa_offset 112
leaq 80(%rsp), %r9
movq 44(%rsp), %rcx
movl 52(%rsp), %r8d
movq 32(%rsp), %rsi
movl 40(%rsp), %edx
leaq _Z9kernel_2dv(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 96
jmp .L11
.L16:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2084:
.size _Z27__device_stub__Z9kernel_2dvv, .-_Z27__device_stub__Z9kernel_2dvv
.globl _Z9kernel_2dv
.type _Z9kernel_2dv, @function
_Z9kernel_2dv:
.LFB2085:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z27__device_stub__Z9kernel_2dvv
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2085:
.size _Z9kernel_2dv, .-_Z9kernel_2dv
.section .rodata.str1.8,"aMS",@progbits,1
.align 8
.LC0:
.string "/home/ubuntu/Datasets/stackv2/train-structured/intelav/CudaExamples/main/cudaindex.cu"
.section .rodata.str1.1,"aMS",@progbits,1
.LC1:
.string "Cuda failure %s:%d: '%s' \n"
.text
.globl main
.type main, @function
main:
.LFB2057:
.cfi_startproc
endbr64
subq $40, %rsp
.cfi_def_cfa_offset 48
movl $8, 20(%rsp)
movl $1, 24(%rsp)
movl $1, 28(%rsp)
movl $4, 8(%rsp)
movl $1, 12(%rsp)
movl $1, 16(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 20(%rsp), %rdx
movl $1, %ecx
movq 8(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L24
.L20:
movl $4, 20(%rsp)
movl $1, 24(%rsp)
movl $1, 28(%rsp)
movl $3, 8(%rsp)
movl $1, 12(%rsp)
movl $1, 16(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 20(%rsp), %rdx
movl $1, %ecx
movq 8(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L25
.L21:
call cudaDeviceSynchronize@PLT
testl %eax, %eax
jne .L26
.L22:
movl $0, %eax
addq $40, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L24:
.cfi_restore_state
call _Z27__device_stub__Z9kernel_1dvv
jmp .L20
.L25:
call _Z27__device_stub__Z9kernel_2dvv
jmp .L21
.L26:
call cudaDeviceSynchronize@PLT
movl %eax, %edi
call cudaGetErrorString@PLT
movq %rax, %r9
movl $38, %r8d
leaq .LC0(%rip), %rcx
leaq .LC1(%rip), %rdx
movl $2, %esi
movq stderr(%rip), %rdi
movl $0, %eax
call __fprintf_chk@PLT
jmp .L22
.cfi_endproc
.LFE2057:
.size main, .-main
.section .rodata.str1.1
.LC2:
.string "_Z9kernel_2dv"
.LC3:
.string "_Z9kernel_1dv"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2087:
.cfi_startproc
endbr64
pushq %rbx
.cfi_def_cfa_offset 16
.cfi_offset 3, -16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rbx
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC2(%rip), %rdx
movq %rdx, %rcx
leaq _Z9kernel_2dv(%rip), %rsi
movq %rax, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC3(%rip), %rdx
movq %rdx, %rcx
leaq _Z9kernel_1dv(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
popq %rbx
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2087:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "cudaindex.hip"
.globl _Z24__device_stub__kernel_1dv # -- Begin function _Z24__device_stub__kernel_1dv
.p2align 4, 0x90
.type _Z24__device_stub__kernel_1dv,@function
_Z24__device_stub__kernel_1dv: # @_Z24__device_stub__kernel_1dv
.cfi_startproc
# %bb.0:
subq $56, %rsp
.cfi_def_cfa_offset 64
leaq 32(%rsp), %rdi
leaq 16(%rsp), %rsi
leaq 8(%rsp), %rdx
movq %rsp, %rcx
callq __hipPopCallConfiguration
movq 32(%rsp), %rsi
movl 40(%rsp), %edx
movq 16(%rsp), %rcx
movl 24(%rsp), %r8d
leaq 48(%rsp), %r9
movl $_Z9kernel_1dv, %edi
pushq (%rsp)
.cfi_adjust_cfa_offset 8
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $72, %rsp
.cfi_adjust_cfa_offset -72
retq
.Lfunc_end0:
.size _Z24__device_stub__kernel_1dv, .Lfunc_end0-_Z24__device_stub__kernel_1dv
.cfi_endproc
# -- End function
.globl _Z24__device_stub__kernel_2dv # -- Begin function _Z24__device_stub__kernel_2dv
.p2align 4, 0x90
.type _Z24__device_stub__kernel_2dv,@function
_Z24__device_stub__kernel_2dv: # @_Z24__device_stub__kernel_2dv
.cfi_startproc
# %bb.0:
subq $56, %rsp
.cfi_def_cfa_offset 64
leaq 32(%rsp), %rdi
leaq 16(%rsp), %rsi
leaq 8(%rsp), %rdx
movq %rsp, %rcx
callq __hipPopCallConfiguration
movq 32(%rsp), %rsi
movl 40(%rsp), %edx
movq 16(%rsp), %rcx
movl 24(%rsp), %r8d
leaq 48(%rsp), %r9
movl $_Z9kernel_2dv, %edi
pushq (%rsp)
.cfi_adjust_cfa_offset 8
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $72, %rsp
.cfi_adjust_cfa_offset -72
retq
.Lfunc_end1:
.size _Z24__device_stub__kernel_2dv, .Lfunc_end1-_Z24__device_stub__kernel_2dv
.cfi_endproc
# -- End function
.globl main # -- Begin function main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
pushq %rbx
.cfi_def_cfa_offset 16
subq $64, %rsp
.cfi_def_cfa_offset 80
.cfi_offset %rbx, -16
movabsq $4294967300, %rbx # imm = 0x100000004
leaq 4(%rbx), %rdx
movq %rbx, %rdi
movl $1, %esi
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB2_2
# %bb.1:
leaq 32(%rsp), %rdi
leaq 16(%rsp), %rsi
leaq 8(%rsp), %rdx
movq %rsp, %rcx
callq __hipPopCallConfiguration
movq 32(%rsp), %rsi
movl 40(%rsp), %edx
movq 16(%rsp), %rcx
movl 24(%rsp), %r8d
leaq 48(%rsp), %r9
movl $_Z9kernel_1dv, %edi
pushq (%rsp)
.cfi_adjust_cfa_offset 8
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB2_2:
leaq -1(%rbx), %rdi
movl $1, %esi
movq %rbx, %rdx
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB2_4
# %bb.3:
leaq 32(%rsp), %rdi
leaq 16(%rsp), %rsi
leaq 8(%rsp), %rdx
movq %rsp, %rcx
callq __hipPopCallConfiguration
movq 32(%rsp), %rsi
movl 40(%rsp), %edx
movq 16(%rsp), %rcx
movl 24(%rsp), %r8d
leaq 48(%rsp), %r9
movl $_Z9kernel_2dv, %edi
pushq (%rsp)
.cfi_adjust_cfa_offset 8
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB2_4:
callq hipDeviceSynchronize
testl %eax, %eax
jne .LBB2_5
.LBB2_6:
xorl %eax, %eax
addq $64, %rsp
.cfi_def_cfa_offset 16
popq %rbx
.cfi_def_cfa_offset 8
retq
.LBB2_5:
.cfi_def_cfa_offset 80
movq stderr(%rip), %rbx
callq hipDeviceSynchronize
movl %eax, %edi
callq hipGetErrorString
movl $.L.str, %esi
movl $.L.str.1, %edx
movq %rbx, %rdi
movl $40, %ecx
movq %rax, %r8
xorl %eax, %eax
callq fprintf
jmp .LBB2_6
.Lfunc_end2:
.size main, .Lfunc_end2-main
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
pushq %rbx
.cfi_def_cfa_offset 16
subq $32, %rsp
.cfi_def_cfa_offset 48
.cfi_offset %rbx, -16
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB3_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB3_2:
movq __hip_gpubin_handle(%rip), %rbx
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z9kernel_1dv, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z9kernel_2dv, %esi
movl $.L__unnamed_2, %edx
movl $.L__unnamed_2, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $32, %rsp
.cfi_def_cfa_offset 16
popq %rbx
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end3:
.size __hip_module_ctor, .Lfunc_end3-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB4_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB4_2:
retq
.Lfunc_end4:
.size __hip_module_dtor, .Lfunc_end4-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z9kernel_1dv,@object # @_Z9kernel_1dv
.section .rodata,"a",@progbits
.globl _Z9kernel_1dv
.p2align 3, 0x0
_Z9kernel_1dv:
.quad _Z24__device_stub__kernel_1dv
.size _Z9kernel_1dv, 8
.type _Z9kernel_2dv,@object # @_Z9kernel_2dv
.globl _Z9kernel_2dv
.p2align 3, 0x0
_Z9kernel_2dv:
.quad _Z24__device_stub__kernel_2dv
.size _Z9kernel_2dv, 8
.type .L.str,@object # @.str
.section .rodata.str1.1,"aMS",@progbits,1
.L.str:
.asciz "Cuda failure %s:%d: '%s' \n"
.size .L.str, 27
.type .L.str.1,@object # @.str.1
.L.str.1:
.asciz "/home/ubuntu/Datasets/stackv2/train-structured-repos-hip/intelav/CudaExamples/main/cudaindex.hip"
.size .L.str.1, 97
.type .L__unnamed_1,@object # @0
.L__unnamed_1:
.asciz "_Z9kernel_1dv"
.size .L__unnamed_1, 14
.type .L__unnamed_2,@object # @1
.L__unnamed_2:
.asciz "_Z9kernel_2dv"
.size .L__unnamed_2, 14
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z24__device_stub__kernel_1dv
.addrsig_sym _Z24__device_stub__kernel_2dv
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z9kernel_1dv
.addrsig_sym _Z9kernel_2dv
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly. | /*----------------------------------------------------------------------------------*
* Copyright (c) 2010-2018 Pauli Parkkinen, Eelis Solala, Wen-Hua Xu, *
* Sergio Losilla, Elias Toivanen, Jonas Juselius *
* *
* Permission is hereby granted, free of charge, to any person obtaining a copy *
* of this software and associated documentation files (the "Software"), to deal *
* in the Software without restriction, including without limitation the rights *
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell *
* copies of the Software, and to permit persons to whom the Software is *
* furnished to do so, subject to the following conditions: *
* *
* The above copyright notice and this permission notice shall be included in all*
* copies or substantial portions of the Software. *
* *
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR *
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, *
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE *
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER *
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, *
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE *
* SOFTWARE. *
*----------------------------------------------------------------------------------*/
#ifdef HAVE_CUDA_PROFILING
#include <stdio.h>
#include "nvToolsExt.h"
#include "cuda_profiler_api.h"
const uint32_t colors[] = { 0x0000ff00, 0x000000ff, 0x00ffff00, 0x00ff00ff, 0x0000ffff, 0x00ff0000, 0x00ffffff };
const int num_colors = sizeof(colors)/sizeof(uint32_t);
#ifdef HAVE_NVTX
#define PUSH_RANGE(name,cid) { \
int color_id = cid; \
color_id = color_id%num_colors;\
nvtxEventAttributes_t eventAttrib = {0}; \
eventAttrib.version = NVTX_VERSION; \
eventAttrib.size = NVTX_EVENT_ATTRIB_STRUCT_SIZE; \
eventAttrib.colorType = NVTX_COLOR_ARGB; \
eventAttrib.color = colors[color_id]; \
eventAttrib.messageType = NVTX_MESSAGE_TYPE_ASCII; \
eventAttrib.message.ascii = name; \
nvtxRangePushEx(&eventAttrib); \
}
#define POP_RANGE nvtxRangePop();
#else
#define PUSH_RANGE(name,cid)
#define POP_RANGE
#endif
/*extern "C" void initialize_cuda_profiler_() {
cudaProfilerInitialize();
}*/
int color_number = 0;
bool cuda_profiler_started = false;
extern "C" void start_cuda_profiling_() {
printf("------------------Starting cuda profiling----------------\n");
int number_of_devices;
cudaGetDeviceCount(&number_of_devices);
for (int device = 0; device < number_of_devices; device ++) {
cudaSetDevice(device);
cudaProfilerStart();
}
cuda_profiler_started = true;
}
extern "C" void stop_cuda_profiling_() {
printf("------------------Stopping cuda profiling----------------\n");
int number_of_devices;
cudaGetDeviceCount(&number_of_devices);
for (int device = 0; device < number_of_devices; device ++) {
cudaSetDevice(device);
cudaProfilerStop();
}
cuda_profiler_started = false;
}
extern "C" void start_nvtx_timing_(char * title) {
if (cuda_profiler_started) {
PUSH_RANGE(title, color_number);
color_number ++;
}
}
extern "C" void stop_nvtx_timing_() {
if (cuda_profiler_started) {
POP_RANGE;
}
}
#endif | code for sm_80 |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | /*----------------------------------------------------------------------------------*
* Copyright (c) 2010-2018 Pauli Parkkinen, Eelis Solala, Wen-Hua Xu, *
* Sergio Losilla, Elias Toivanen, Jonas Juselius *
* *
* Permission is hereby granted, free of charge, to any person obtaining a copy *
* of this software and associated documentation files (the "Software"), to deal *
* in the Software without restriction, including without limitation the rights *
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell *
* copies of the Software, and to permit persons to whom the Software is *
* furnished to do so, subject to the following conditions: *
* *
* The above copyright notice and this permission notice shall be included in all*
* copies or substantial portions of the Software. *
* *
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR *
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, *
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE *
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER *
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, *
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE *
* SOFTWARE. *
*----------------------------------------------------------------------------------*/
#ifdef HAVE_CUDA_PROFILING
#include <stdio.h>
#include "nvToolsExt.h"
#include "cuda_profiler_api.h"
const uint32_t colors[] = { 0x0000ff00, 0x000000ff, 0x00ffff00, 0x00ff00ff, 0x0000ffff, 0x00ff0000, 0x00ffffff };
const int num_colors = sizeof(colors)/sizeof(uint32_t);
#ifdef HAVE_NVTX
#define PUSH_RANGE(name,cid) { \
int color_id = cid; \
color_id = color_id%num_colors;\
nvtxEventAttributes_t eventAttrib = {0}; \
eventAttrib.version = NVTX_VERSION; \
eventAttrib.size = NVTX_EVENT_ATTRIB_STRUCT_SIZE; \
eventAttrib.colorType = NVTX_COLOR_ARGB; \
eventAttrib.color = colors[color_id]; \
eventAttrib.messageType = NVTX_MESSAGE_TYPE_ASCII; \
eventAttrib.message.ascii = name; \
nvtxRangePushEx(&eventAttrib); \
}
#define POP_RANGE nvtxRangePop();
#else
#define PUSH_RANGE(name,cid)
#define POP_RANGE
#endif
/*extern "C" void initialize_cuda_profiler_() {
cudaProfilerInitialize();
}*/
int color_number = 0;
bool cuda_profiler_started = false;
extern "C" void start_cuda_profiling_() {
printf("------------------Starting cuda profiling----------------\n");
int number_of_devices;
cudaGetDeviceCount(&number_of_devices);
for (int device = 0; device < number_of_devices; device ++) {
cudaSetDevice(device);
cudaProfilerStart();
}
cuda_profiler_started = true;
}
extern "C" void stop_cuda_profiling_() {
printf("------------------Stopping cuda profiling----------------\n");
int number_of_devices;
cudaGetDeviceCount(&number_of_devices);
for (int device = 0; device < number_of_devices; device ++) {
cudaSetDevice(device);
cudaProfilerStop();
}
cuda_profiler_started = false;
}
extern "C" void start_nvtx_timing_(char * title) {
if (cuda_profiler_started) {
PUSH_RANGE(title, color_number);
color_number ++;
}
}
extern "C" void stop_nvtx_timing_() {
if (cuda_profiler_started) {
POP_RANGE;
}
}
#endif | .file "tmpxft_0008c152_00000000-6_cuda_profiling.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2029:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2029:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2052:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2052:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | /*----------------------------------------------------------------------------------*
* Copyright (c) 2010-2018 Pauli Parkkinen, Eelis Solala, Wen-Hua Xu, *
* Sergio Losilla, Elias Toivanen, Jonas Juselius *
* *
* Permission is hereby granted, free of charge, to any person obtaining a copy *
* of this software and associated documentation files (the "Software"), to deal *
* in the Software without restriction, including without limitation the rights *
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell *
* copies of the Software, and to permit persons to whom the Software is *
* furnished to do so, subject to the following conditions: *
* *
* The above copyright notice and this permission notice shall be included in all*
* copies or substantial portions of the Software. *
* *
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR *
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, *
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE *
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER *
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, *
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE *
* SOFTWARE. *
*----------------------------------------------------------------------------------*/
#ifdef HAVE_CUDA_PROFILING
#include <stdio.h>
#include "nvToolsExt.h"
#include "cuda_profiler_api.h"
const uint32_t colors[] = { 0x0000ff00, 0x000000ff, 0x00ffff00, 0x00ff00ff, 0x0000ffff, 0x00ff0000, 0x00ffffff };
const int num_colors = sizeof(colors)/sizeof(uint32_t);
#ifdef HAVE_NVTX
#define PUSH_RANGE(name,cid) { \
int color_id = cid; \
color_id = color_id%num_colors;\
nvtxEventAttributes_t eventAttrib = {0}; \
eventAttrib.version = NVTX_VERSION; \
eventAttrib.size = NVTX_EVENT_ATTRIB_STRUCT_SIZE; \
eventAttrib.colorType = NVTX_COLOR_ARGB; \
eventAttrib.color = colors[color_id]; \
eventAttrib.messageType = NVTX_MESSAGE_TYPE_ASCII; \
eventAttrib.message.ascii = name; \
nvtxRangePushEx(&eventAttrib); \
}
#define POP_RANGE nvtxRangePop();
#else
#define PUSH_RANGE(name,cid)
#define POP_RANGE
#endif
/*extern "C" void initialize_cuda_profiler_() {
cudaProfilerInitialize();
}*/
int color_number = 0;
bool cuda_profiler_started = false;
extern "C" void start_cuda_profiling_() {
printf("------------------Starting cuda profiling----------------\n");
int number_of_devices;
cudaGetDeviceCount(&number_of_devices);
for (int device = 0; device < number_of_devices; device ++) {
cudaSetDevice(device);
cudaProfilerStart();
}
cuda_profiler_started = true;
}
extern "C" void stop_cuda_profiling_() {
printf("------------------Stopping cuda profiling----------------\n");
int number_of_devices;
cudaGetDeviceCount(&number_of_devices);
for (int device = 0; device < number_of_devices; device ++) {
cudaSetDevice(device);
cudaProfilerStop();
}
cuda_profiler_started = false;
}
extern "C" void start_nvtx_timing_(char * title) {
if (cuda_profiler_started) {
PUSH_RANGE(title, color_number);
color_number ++;
}
}
extern "C" void stop_nvtx_timing_() {
if (cuda_profiler_started) {
POP_RANGE;
}
}
#endif | /*----------------------------------------------------------------------------------*
* Copyright (c) 2010-2018 Pauli Parkkinen, Eelis Solala, Wen-Hua Xu, *
* Sergio Losilla, Elias Toivanen, Jonas Juselius *
* *
* Permission is hereby granted, free of charge, to any person obtaining a copy *
* of this software and associated documentation files (the "Software"), to deal *
* in the Software without restriction, including without limitation the rights *
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell *
* copies of the Software, and to permit persons to whom the Software is *
* furnished to do so, subject to the following conditions: *
* *
* The above copyright notice and this permission notice shall be included in all*
* copies or substantial portions of the Software. *
* *
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR *
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, *
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE *
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER *
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, *
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE *
* SOFTWARE. *
*----------------------------------------------------------------------------------*/
#ifdef HAVE_CUDA_PROFILING
#include <hip/hip_runtime.h>
#include <stdio.h>
#include "nvToolsExt.h"
#include "hip/hip_runtime_api.h"
const uint32_t colors[] = { 0x0000ff00, 0x000000ff, 0x00ffff00, 0x00ff00ff, 0x0000ffff, 0x00ff0000, 0x00ffffff };
const int num_colors = sizeof(colors)/sizeof(uint32_t);
#ifdef HAVE_NVTX
#define PUSH_RANGE(name,cid) { \
int color_id = cid; \
color_id = color_id%num_colors;\
nvtxEventAttributes_t eventAttrib = {0}; \
eventAttrib.version = NVTX_VERSION; \
eventAttrib.size = NVTX_EVENT_ATTRIB_STRUCT_SIZE; \
eventAttrib.colorType = NVTX_COLOR_ARGB; \
eventAttrib.color = colors[color_id]; \
eventAttrib.messageType = NVTX_MESSAGE_TYPE_ASCII; \
eventAttrib.message.ascii = name; \
nvtxRangePushEx(&eventAttrib); \
}
#define POP_RANGE nvtxRangePop();
#else
#define PUSH_RANGE(name,cid)
#define POP_RANGE
#endif
/*extern "C" void initialize_cuda_profiler_() {
cudaProfilerInitialize();
}*/
int color_number = 0;
bool cuda_profiler_started = false;
extern "C" void start_cuda_profiling_() {
printf("------------------Starting cuda profiling----------------\n");
int number_of_devices;
hipGetDeviceCount(&number_of_devices);
for (int device = 0; device < number_of_devices; device ++) {
hipSetDevice(device);
hipProfilerStart();
}
cuda_profiler_started = true;
}
extern "C" void stop_cuda_profiling_() {
printf("------------------Stopping cuda profiling----------------\n");
int number_of_devices;
hipGetDeviceCount(&number_of_devices);
for (int device = 0; device < number_of_devices; device ++) {
hipSetDevice(device);
hipProfilerStop();
}
cuda_profiler_started = false;
}
extern "C" void start_nvtx_timing_(char * title) {
if (cuda_profiler_started) {
PUSH_RANGE(title, color_number);
color_number ++;
}
}
extern "C" void stop_nvtx_timing_() {
if (cuda_profiler_started) {
POP_RANGE;
}
}
#endif |
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | /*----------------------------------------------------------------------------------*
* Copyright (c) 2010-2018 Pauli Parkkinen, Eelis Solala, Wen-Hua Xu, *
* Sergio Losilla, Elias Toivanen, Jonas Juselius *
* *
* Permission is hereby granted, free of charge, to any person obtaining a copy *
* of this software and associated documentation files (the "Software"), to deal *
* in the Software without restriction, including without limitation the rights *
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell *
* copies of the Software, and to permit persons to whom the Software is *
* furnished to do so, subject to the following conditions: *
* *
* The above copyright notice and this permission notice shall be included in all*
* copies or substantial portions of the Software. *
* *
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR *
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, *
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE *
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER *
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, *
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE *
* SOFTWARE. *
*----------------------------------------------------------------------------------*/
#ifdef HAVE_CUDA_PROFILING
#include <hip/hip_runtime.h>
#include <stdio.h>
#include "nvToolsExt.h"
#include "hip/hip_runtime_api.h"
const uint32_t colors[] = { 0x0000ff00, 0x000000ff, 0x00ffff00, 0x00ff00ff, 0x0000ffff, 0x00ff0000, 0x00ffffff };
const int num_colors = sizeof(colors)/sizeof(uint32_t);
#ifdef HAVE_NVTX
#define PUSH_RANGE(name,cid) { \
int color_id = cid; \
color_id = color_id%num_colors;\
nvtxEventAttributes_t eventAttrib = {0}; \
eventAttrib.version = NVTX_VERSION; \
eventAttrib.size = NVTX_EVENT_ATTRIB_STRUCT_SIZE; \
eventAttrib.colorType = NVTX_COLOR_ARGB; \
eventAttrib.color = colors[color_id]; \
eventAttrib.messageType = NVTX_MESSAGE_TYPE_ASCII; \
eventAttrib.message.ascii = name; \
nvtxRangePushEx(&eventAttrib); \
}
#define POP_RANGE nvtxRangePop();
#else
#define PUSH_RANGE(name,cid)
#define POP_RANGE
#endif
/*extern "C" void initialize_cuda_profiler_() {
cudaProfilerInitialize();
}*/
int color_number = 0;
bool cuda_profiler_started = false;
extern "C" void start_cuda_profiling_() {
printf("------------------Starting cuda profiling----------------\n");
int number_of_devices;
hipGetDeviceCount(&number_of_devices);
for (int device = 0; device < number_of_devices; device ++) {
hipSetDevice(device);
hipProfilerStart();
}
cuda_profiler_started = true;
}
extern "C" void stop_cuda_profiling_() {
printf("------------------Stopping cuda profiling----------------\n");
int number_of_devices;
hipGetDeviceCount(&number_of_devices);
for (int device = 0; device < number_of_devices; device ++) {
hipSetDevice(device);
hipProfilerStop();
}
cuda_profiler_started = false;
}
extern "C" void start_nvtx_timing_(char * title) {
if (cuda_profiler_started) {
PUSH_RANGE(title, color_number);
color_number ++;
}
}
extern "C" void stop_nvtx_timing_() {
if (cuda_profiler_started) {
POP_RANGE;
}
}
#endif | .text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.amdgpu_metadata
---
amdhsa.kernels: []
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | /*----------------------------------------------------------------------------------*
* Copyright (c) 2010-2018 Pauli Parkkinen, Eelis Solala, Wen-Hua Xu, *
* Sergio Losilla, Elias Toivanen, Jonas Juselius *
* *
* Permission is hereby granted, free of charge, to any person obtaining a copy *
* of this software and associated documentation files (the "Software"), to deal *
* in the Software without restriction, including without limitation the rights *
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell *
* copies of the Software, and to permit persons to whom the Software is *
* furnished to do so, subject to the following conditions: *
* *
* The above copyright notice and this permission notice shall be included in all*
* copies or substantial portions of the Software. *
* *
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR *
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, *
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE *
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER *
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, *
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE *
* SOFTWARE. *
*----------------------------------------------------------------------------------*/
#ifdef HAVE_CUDA_PROFILING
#include <hip/hip_runtime.h>
#include <stdio.h>
#include "nvToolsExt.h"
#include "hip/hip_runtime_api.h"
const uint32_t colors[] = { 0x0000ff00, 0x000000ff, 0x00ffff00, 0x00ff00ff, 0x0000ffff, 0x00ff0000, 0x00ffffff };
const int num_colors = sizeof(colors)/sizeof(uint32_t);
#ifdef HAVE_NVTX
#define PUSH_RANGE(name,cid) { \
int color_id = cid; \
color_id = color_id%num_colors;\
nvtxEventAttributes_t eventAttrib = {0}; \
eventAttrib.version = NVTX_VERSION; \
eventAttrib.size = NVTX_EVENT_ATTRIB_STRUCT_SIZE; \
eventAttrib.colorType = NVTX_COLOR_ARGB; \
eventAttrib.color = colors[color_id]; \
eventAttrib.messageType = NVTX_MESSAGE_TYPE_ASCII; \
eventAttrib.message.ascii = name; \
nvtxRangePushEx(&eventAttrib); \
}
#define POP_RANGE nvtxRangePop();
#else
#define PUSH_RANGE(name,cid)
#define POP_RANGE
#endif
/*extern "C" void initialize_cuda_profiler_() {
cudaProfilerInitialize();
}*/
int color_number = 0;
bool cuda_profiler_started = false;
extern "C" void start_cuda_profiling_() {
printf("------------------Starting cuda profiling----------------\n");
int number_of_devices;
hipGetDeviceCount(&number_of_devices);
for (int device = 0; device < number_of_devices; device ++) {
hipSetDevice(device);
hipProfilerStart();
}
cuda_profiler_started = true;
}
extern "C" void stop_cuda_profiling_() {
printf("------------------Stopping cuda profiling----------------\n");
int number_of_devices;
hipGetDeviceCount(&number_of_devices);
for (int device = 0; device < number_of_devices; device ++) {
hipSetDevice(device);
hipProfilerStop();
}
cuda_profiler_started = false;
}
extern "C" void start_nvtx_timing_(char * title) {
if (cuda_profiler_started) {
PUSH_RANGE(title, color_number);
color_number ++;
}
}
extern "C" void stop_nvtx_timing_() {
if (cuda_profiler_started) {
POP_RANGE;
}
}
#endif | .text
.file "cuda_profiling.hip"
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly. | code for sm_80 | .text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.amdgpu_metadata
---
amdhsa.kernels: []
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_0008c152_00000000-6_cuda_profiling.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2029:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2029:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2052:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2052:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "cuda_profiling.hip"
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly. | __global__ void k(int *input, int *output, int count)
{
for(int i =0; i<count; i++)
output[i]=input[i]*16;
} | code for sm_80
Function : _Z1kPiS_i
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ IMAD.MOV.U32 R1, RZ, RZ, c[0x0][0x28] ; /* 0x00000a00ff017624 */
/* 0x000fe400078e00ff */
/*0010*/ IMAD.MOV.U32 R0, RZ, RZ, c[0x0][0x170] ; /* 0x00005c00ff007624 */
/* 0x000fca00078e00ff */
/*0020*/ ISETP.GE.AND P0, PT, R0, 0x1, PT ; /* 0x000000010000780c */
/* 0x000fda0003f06270 */
/*0030*/ @!P0 EXIT ; /* 0x000000000000894d */
/* 0x000fea0003800000 */
/*0040*/ IADD3 R2, R0.reuse, -0x1, RZ ; /* 0xffffffff00027810 */
/* 0x040fe20007ffe0ff */
/*0050*/ UMOV UR4, URZ ; /* 0x0000003f00047c82 */
/* 0x000fe20008000000 */
/*0060*/ LOP3.LUT R0, R0, 0x3, RZ, 0xc0, !PT ; /* 0x0000000300007812 */
/* 0x000fe200078ec0ff */
/*0070*/ ULDC.64 UR10, c[0x0][0x118] ; /* 0x00004600000a7ab9 */
/* 0x000fe20000000a00 */
/*0080*/ ISETP.GE.U32.AND P0, PT, R2, 0x3, PT ; /* 0x000000030200780c */
/* 0x000fda0003f06070 */
/*0090*/ @!P0 BRA 0x8f0 ; /* 0x0000085000008947 */
/* 0x000fea0003800000 */
/*00a0*/ IADD3 R6, -R0, c[0x0][0x170], RZ ; /* 0x00005c0000067a10 */
/* 0x000fe20007ffe1ff */
/*00b0*/ UMOV UR4, URZ ; /* 0x0000003f00047c82 */
/* 0x000fe20008000000 */
/*00c0*/ IMAD.MOV.U32 R4, RZ, RZ, c[0x0][0x168] ; /* 0x00005a00ff047624 */
/* 0x000fe200078e00ff */
/*00d0*/ MOV R5, c[0x0][0x16c] ; /* 0x00005b0000057a02 */
/* 0x000fe20000000f00 */
/*00e0*/ IMAD.MOV.U32 R2, RZ, RZ, c[0x0][0x160] ; /* 0x00005800ff027624 */
/* 0x000fe200078e00ff */
/*00f0*/ ISETP.GT.AND P0, PT, R6, RZ, PT ; /* 0x000000ff0600720c */
/* 0x000fe20003f04270 */
/*0100*/ IMAD.MOV.U32 R3, RZ, RZ, c[0x0][0x164] ; /* 0x00005900ff037624 */
/* 0x000fd800078e00ff */
/*0110*/ @!P0 BRA 0x780 ; /* 0x0000066000008947 */
/* 0x000fea0003800000 */
/*0120*/ ISETP.GT.AND P1, PT, R6, 0xc, PT ; /* 0x0000000c0600780c */
/* 0x000fe40003f24270 */
/*0130*/ PLOP3.LUT P0, PT, PT, PT, PT, 0x80, 0x0 ; /* 0x000000000000781c */
/* 0x000fd60003f0f070 */
/*0140*/ @!P1 BRA 0x510 ; /* 0x000003c000009947 */
/* 0x000fea0003800000 */
/*0150*/ PLOP3.LUT P0, PT, PT, PT, PT, 0x8, 0x0 ; /* 0x000000000000781c */
/* 0x000fe40003f0e170 */
/*0160*/ LDG.E R7, [R2.64] ; /* 0x0000000a02077981 */
/* 0x000ea4000c1e1900 */
/*0170*/ IMAD.SHL.U32 R7, R7, 0x10, RZ ; /* 0x0000001007077824 */
/* 0x004fca00078e00ff */
/*0180*/ STG.E [R4.64], R7 ; /* 0x0000000704007986 */
/* 0x0001e8000c10190a */
/*0190*/ LDG.E R8, [R2.64+0x4] ; /* 0x0000040a02087981 */
/* 0x000ea4000c1e1900 */
/*01a0*/ SHF.L.U32 R9, R8, 0x4, RZ ; /* 0x0000000408097819 */
/* 0x004fca00000006ff */
/*01b0*/ STG.E [R4.64+0x4], R9 ; /* 0x0000040904007986 */
/* 0x0003e8000c10190a */
/*01c0*/ LDG.E R8, [R2.64+0x8] ; /* 0x0000080a02087981 */
/* 0x000ea4000c1e1900 */
/*01d0*/ IMAD.SHL.U32 R11, R8, 0x10, RZ ; /* 0x00000010080b7824 */
/* 0x004fca00078e00ff */
/*01e0*/ STG.E [R4.64+0x8], R11 ; /* 0x0000080b04007986 */
/* 0x0005e8000c10190a */
/*01f0*/ LDG.E R8, [R2.64+0xc] ; /* 0x00000c0a02087981 */
/* 0x000ee4000c1e1900 */
/*0200*/ IMAD.SHL.U32 R13, R8, 0x10, RZ ; /* 0x00000010080d7824 */
/* 0x008fca00078e00ff */
/*0210*/ STG.E [R4.64+0xc], R13 ; /* 0x00000c0d04007986 */
/* 0x0007e8000c10190a */
/*0220*/ LDG.E R8, [R2.64+0x10] ; /* 0x0000100a02087981 */
/* 0x000e24000c1e1900 */
/*0230*/ IMAD.SHL.U32 R7, R8, 0x10, RZ ; /* 0x0000001008077824 */
/* 0x001fca00078e00ff */
/*0240*/ STG.E [R4.64+0x10], R7 ; /* 0x0000100704007986 */
/* 0x0001e8000c10190a */
/*0250*/ LDG.E R8, [R2.64+0x14] ; /* 0x0000140a02087981 */
/* 0x000e64000c1e1900 */
/*0260*/ SHF.L.U32 R9, R8, 0x4, RZ ; /* 0x0000000408097819 */
/* 0x002fca00000006ff */
/*0270*/ STG.E [R4.64+0x14], R9 ; /* 0x0000140904007986 */
/* 0x0003e8000c10190a */
/*0280*/ LDG.E R8, [R2.64+0x18] ; /* 0x0000180a02087981 */
/* 0x000ea4000c1e1900 */
/*0290*/ IMAD.SHL.U32 R11, R8, 0x10, RZ ; /* 0x00000010080b7824 */
/* 0x004fca00078e00ff */
/*02a0*/ STG.E [R4.64+0x18], R11 ; /* 0x0000180b04007986 */
/* 0x0005e8000c10190a */
/*02b0*/ LDG.E R8, [R2.64+0x1c] ; /* 0x00001c0a02087981 */
/* 0x000ee4000c1e1900 */
/*02c0*/ IMAD.SHL.U32 R13, R8, 0x10, RZ ; /* 0x00000010080d7824 */
/* 0x008fca00078e00ff */
/*02d0*/ STG.E [R4.64+0x1c], R13 ; /* 0x00001c0d04007986 */
/* 0x0007e8000c10190a */
/*02e0*/ LDG.E R8, [R2.64+0x20] ; /* 0x0000200a02087981 */
/* 0x000e24000c1e1900 */
/*02f0*/ IMAD.SHL.U32 R7, R8, 0x10, RZ ; /* 0x0000001008077824 */
/* 0x001fca00078e00ff */
/*0300*/ STG.E [R4.64+0x20], R7 ; /* 0x0000200704007986 */
/* 0x0001e8000c10190a */
/*0310*/ LDG.E R8, [R2.64+0x24] ; /* 0x0000240a02087981 */
/* 0x000e64000c1e1900 */
/*0320*/ SHF.L.U32 R9, R8, 0x4, RZ ; /* 0x0000000408097819 */
/* 0x002fca00000006ff */
/*0330*/ STG.E [R4.64+0x24], R9 ; /* 0x0000240904007986 */
/* 0x0003e8000c10190a */
/*0340*/ LDG.E R8, [R2.64+0x28] ; /* 0x0000280a02087981 */
/* 0x000ea4000c1e1900 */
/*0350*/ IMAD.SHL.U32 R11, R8, 0x10, RZ ; /* 0x00000010080b7824 */
/* 0x004fca00078e00ff */
/*0360*/ STG.E [R4.64+0x28], R11 ; /* 0x0000280b04007986 */
/* 0x0005e8000c10190a */
/*0370*/ LDG.E R8, [R2.64+0x2c] ; /* 0x00002c0a02087981 */
/* 0x000ee4000c1e1900 */
/*0380*/ IMAD.SHL.U32 R13, R8, 0x10, RZ ; /* 0x00000010080d7824 */
/* 0x008fca00078e00ff */
/*0390*/ STG.E [R4.64+0x2c], R13 ; /* 0x00002c0d04007986 */
/* 0x000fe8000c10190a */
/*03a0*/ LDG.E R8, [R2.64+0x30] ; /* 0x0000300a02087981 */
/* 0x000e24000c1e1900 */
/*03b0*/ IMAD.SHL.U32 R7, R8, 0x10, RZ ; /* 0x0000001008077824 */
/* 0x001fca00078e00ff */
/*03c0*/ STG.E [R4.64+0x30], R7 ; /* 0x0000300704007986 */
/* 0x0001e8000c10190a */
/*03d0*/ LDG.E R8, [R2.64+0x34] ; /* 0x0000340a02087981 */
/* 0x000e64000c1e1900 */
/*03e0*/ SHF.L.U32 R9, R8, 0x4, RZ ; /* 0x0000000408097819 */
/* 0x002fca00000006ff */
/*03f0*/ STG.E [R4.64+0x34], R9 ; /* 0x0000340904007986 */
/* 0x000fe8000c10190a */
/*0400*/ LDG.E R8, [R2.64+0x38] ; /* 0x0000380a02087981 */
/* 0x000ea2000c1e1900 */
/*0410*/ IADD3 R6, R6, -0x10, RZ ; /* 0xfffffff006067810 */
/* 0x000fe20007ffe0ff */
/*0420*/ IMAD.SHL.U32 R11, R8, 0x10, RZ ; /* 0x00000010080b7824 */
/* 0x004fca00078e00ff */
/*0430*/ STG.E [R4.64+0x38], R11 ; /* 0x0000380b04007986 */
/* 0x000fe8000c10190a */
/*0440*/ LDG.E R8, [R2.64+0x3c] ; /* 0x00003c0a02087981 */
/* 0x0002a2000c1e1900 */
/*0450*/ ISETP.GT.AND P1, PT, R6, 0xc, PT ; /* 0x0000000c0600780c */
/* 0x000fe20003f24270 */
/*0460*/ UIADD3 UR4, UR4, 0x10, URZ ; /* 0x0000001004047890 */
/* 0x000fe2000fffe03f */
/*0470*/ IADD3 R7, P3, R4, 0x40, RZ ; /* 0x0000004004077810 */
/* 0x001fe40007f7e0ff */
/*0480*/ IADD3 R10, P2, R2, 0x40, RZ ; /* 0x00000040020a7810 */
/* 0x000fca0007f5e0ff */
/*0490*/ IMAD.X R3, RZ, RZ, R3, P2 ; /* 0x000000ffff037224 */
/* 0x002fe400010e0603 */
/*04a0*/ IMAD.MOV.U32 R2, RZ, RZ, R10 ; /* 0x000000ffff027224 */
/* 0x000fe400078e000a */
/*04b0*/ IMAD.SHL.U32 R13, R8, 0x10, RZ ; /* 0x00000010080d7824 */
/* 0x004fe200078e00ff */
/*04c0*/ IADD3.X R8, RZ, R5, RZ, P3, !PT ; /* 0x00000005ff087210 */
/* 0x000fc80001ffe4ff */
/*04d0*/ STG.E [R4.64+0x3c], R13 ; /* 0x00003c0d04007986 */
/* 0x0001e4000c10190a */
/*04e0*/ IMAD.MOV.U32 R4, RZ, RZ, R7 ; /* 0x000000ffff047224 */
/* 0x001fe200078e0007 */
/*04f0*/ MOV R5, R8 ; /* 0x0000000800057202 */
/* 0x000fe20000000f00 */
/*0500*/ @P1 BRA 0x160 ; /* 0xfffffc5000001947 */
/* 0x000fea000383ffff */
/*0510*/ ISETP.GT.AND P1, PT, R6, 0x4, PT ; /* 0x000000040600780c */
/* 0x000fda0003f24270 */
/*0520*/ @!P1 BRA 0x760 ; /* 0x0000023000009947 */
/* 0x000fea0003800000 */
/*0530*/ LDG.E R7, [R2.64] ; /* 0x0000000a02077981 */
/* 0x000ea4000c1e1900 */
/*0540*/ IMAD.SHL.U32 R7, R7, 0x10, RZ ; /* 0x0000001007077824 */
/* 0x004fca00078e00ff */
/*0550*/ STG.E [R4.64], R7 ; /* 0x0000000704007986 */
/* 0x0001e8000c10190a */
/*0560*/ LDG.E R8, [R2.64+0x4] ; /* 0x0000040a02087981 */
/* 0x000ea4000c1e1900 */
/*0570*/ IMAD.SHL.U32 R9, R8, 0x10, RZ ; /* 0x0000001008097824 */
/* 0x004fca00078e00ff */
/*0580*/ STG.E [R4.64+0x4], R9 ; /* 0x0000040904007986 */
/* 0x0003e8000c10190a */
/*0590*/ LDG.E R8, [R2.64+0x8] ; /* 0x0000080a02087981 */
/* 0x000ea4000c1e1900 */
/*05a0*/ SHF.L.U32 R11, R8, 0x4, RZ ; /* 0x00000004080b7819 */
/* 0x004fca00000006ff */
/*05b0*/ STG.E [R4.64+0x8], R11 ; /* 0x0000080b04007986 */
/* 0x0005e8000c10190a */
/*05c0*/ LDG.E R8, [R2.64+0xc] ; /* 0x00000c0a02087981 */
/* 0x000ee4000c1e1900 */
/*05d0*/ IMAD.SHL.U32 R13, R8, 0x10, RZ ; /* 0x00000010080d7824 */
/* 0x008fca00078e00ff */
/*05e0*/ STG.E [R4.64+0xc], R13 ; /* 0x00000c0d04007986 */
/* 0x000fe8000c10190a */
/*05f0*/ LDG.E R8, [R2.64+0x10] ; /* 0x0000100a02087981 */
/* 0x000e24000c1e1900 */
/*0600*/ IMAD.SHL.U32 R7, R8, 0x10, RZ ; /* 0x0000001008077824 */
/* 0x001fca00078e00ff */
/*0610*/ STG.E [R4.64+0x10], R7 ; /* 0x0000100704007986 */
/* 0x0001e8000c10190a */
/*0620*/ LDG.E R8, [R2.64+0x14] ; /* 0x0000140a02087981 */
/* 0x000e64000c1e1900 */
/*0630*/ SHF.L.U32 R9, R8, 0x4, RZ ; /* 0x0000000408097819 */
/* 0x002fca00000006ff */
/*0640*/ STG.E [R4.64+0x14], R9 ; /* 0x0000140904007986 */
/* 0x0003e8000c10190a */
/*0650*/ LDG.E R8, [R2.64+0x18] ; /* 0x0000180a02087981 */
/* 0x000ea4000c1e1900 */
/*0660*/ IMAD.SHL.U32 R11, R8, 0x10, RZ ; /* 0x00000010080b7824 */
/* 0x004fca00078e00ff */
/*0670*/ STG.E [R4.64+0x18], R11 ; /* 0x0000180b04007986 */
/* 0x000fe8000c10190a */
/*0680*/ LDG.E R8, [R2.64+0x1c] ; /* 0x00001c0a02087981 */
/* 0x0004e2000c1e1900 */
/*0690*/ IADD3 R7, P2, R4, 0x20, RZ ; /* 0x0000002004077810 */
/* 0x001fe20007f5e0ff */
/*06a0*/ UIADD3 UR4, UR4, 0x8, URZ ; /* 0x0000000804047890 */
/* 0x000fe2000fffe03f */
/*06b0*/ IADD3 R10, P1, R2, 0x20, RZ ; /* 0x00000020020a7810 */
/* 0x000fe40007f3e0ff */
/*06c0*/ PLOP3.LUT P0, PT, PT, PT, PT, 0x8, 0x0 ; /* 0x000000000000781c */
/* 0x000fe40003f0e170 */
/*06d0*/ IADD3.X R9, RZ, R3, RZ, P1, !PT ; /* 0x00000003ff097210 */
/* 0x002fc40000ffe4ff */
/*06e0*/ IADD3 R6, R6, -0x8, RZ ; /* 0xfffffff806067810 */
/* 0x000fe20007ffe0ff */
/*06f0*/ IMAD.MOV.U32 R2, RZ, RZ, R10 ; /* 0x000000ffff027224 */
/* 0x004fe200078e000a */
/*0700*/ MOV R3, R9 ; /* 0x0000000900037202 */
/* 0x000fe20000000f00 */
/*0710*/ IMAD.SHL.U32 R13, R8, 0x10, RZ ; /* 0x00000010080d7824 */
/* 0x008fe400078e00ff */
/*0720*/ IMAD.X R8, RZ, RZ, R5, P2 ; /* 0x000000ffff087224 */
/* 0x000fc600010e0605 */
/*0730*/ STG.E [R4.64+0x1c], R13 ; /* 0x00001c0d04007986 */
/* 0x0001e4000c10190a */
/*0740*/ IMAD.MOV.U32 R4, RZ, RZ, R7 ; /* 0x000000ffff047224 */
/* 0x001fe400078e0007 */
/*0750*/ IMAD.MOV.U32 R5, RZ, RZ, R8 ; /* 0x000000ffff057224 */
/* 0x000fe400078e0008 */
/*0760*/ ISETP.NE.OR P0, PT, R6, RZ, P0 ; /* 0x000000ff0600720c */
/* 0x000fda0000705670 */
/*0770*/ @!P0 BRA 0x8f0 ; /* 0x0000017000008947 */
/* 0x000fea0003800000 */
/*0780*/ LDG.E R7, [R2.64] ; /* 0x0000000a02077981 */
/* 0x000ea4000c1e1900 */
/*0790*/ SHF.L.U32 R7, R7, 0x4, RZ ; /* 0x0000000407077819 */
/* 0x004fca00000006ff */
/*07a0*/ STG.E [R4.64], R7 ; /* 0x0000000704007986 */
/* 0x0001e8000c10190a */
/*07b0*/ LDG.E R8, [R2.64+0x4] ; /* 0x0000040a02087981 */
/* 0x000ea4000c1e1900 */
/*07c0*/ IMAD.SHL.U32 R9, R8, 0x10, RZ ; /* 0x0000001008097824 */
/* 0x004fca00078e00ff */
/*07d0*/ STG.E [R4.64+0x4], R9 ; /* 0x0000040904007986 */
/* 0x000fe8000c10190a */
/*07e0*/ LDG.E R8, [R2.64+0x8] ; /* 0x0000080a02087981 */
/* 0x000ea2000c1e1900 */
/*07f0*/ IADD3 R6, R6, -0x4, RZ ; /* 0xfffffffc06067810 */
/* 0x000fe20007ffe0ff */
/*0800*/ IMAD.SHL.U32 R11, R8, 0x10, RZ ; /* 0x00000010080b7824 */
/* 0x004fca00078e00ff */
/*0810*/ STG.E [R4.64+0x8], R11 ; /* 0x0000080b04007986 */
/* 0x000fe8000c10190a */
/*0820*/ LDG.E R8, [R2.64+0xc] ; /* 0x00000c0a02087981 */
/* 0x0002a2000c1e1900 */
/*0830*/ ISETP.NE.AND P0, PT, R6, RZ, PT ; /* 0x000000ff0600720c */
/* 0x000fe20003f05270 */
/*0840*/ UIADD3 UR4, UR4, 0x4, URZ ; /* 0x0000000404047890 */
/* 0x000fe2000fffe03f */
/*0850*/ IADD3 R7, P2, R4, 0x10, RZ ; /* 0x0000001004077810 */
/* 0x001fe40007f5e0ff */
/*0860*/ IADD3 R10, P1, R2, 0x10, RZ ; /* 0x00000010020a7810 */
/* 0x000fca0007f3e0ff */
/*0870*/ IMAD.X R3, RZ, RZ, R3, P1 ; /* 0x000000ffff037224 */
/* 0x002fe200008e0603 */
/*0880*/ MOV R2, R10 ; /* 0x0000000a00027202 */
/* 0x000fe40000000f00 */
/*0890*/ SHF.L.U32 R13, R8, 0x4, RZ ; /* 0x00000004080d7819 */
/* 0x004fe200000006ff */
/*08a0*/ IMAD.X R8, RZ, RZ, R5, P2 ; /* 0x000000ffff087224 */
/* 0x000fc800010e0605 */
/*08b0*/ STG.E [R4.64+0xc], R13 ; /* 0x00000c0d04007986 */
/* 0x0001e4000c10190a */
/*08c0*/ IMAD.MOV.U32 R4, RZ, RZ, R7 ; /* 0x000000ffff047224 */
/* 0x001fe400078e0007 */
/*08d0*/ IMAD.MOV.U32 R5, RZ, RZ, R8 ; /* 0x000000ffff057224 */
/* 0x000fe200078e0008 */
/*08e0*/ @P0 BRA 0x780 ; /* 0xfffffe9000000947 */
/* 0x000fea000383ffff */
/*08f0*/ ISETP.NE.AND P0, PT, R0, RZ, PT ; /* 0x000000ff0000720c */
/* 0x000fda0003f05270 */
/*0900*/ @!P0 EXIT ; /* 0x000000000000894d */
/* 0x000fea0003800000 */
/*0910*/ UMOV UR5, 0x4 ; /* 0x0000000400057882 */
/* 0x000fe40000000000 */
/*0920*/ ULDC.64 UR6, c[0x0][0x168] ; /* 0x00005a0000067ab9 */
/* 0x000fe40000000a00 */
/*0930*/ ULDC.64 UR8, c[0x0][0x160] ; /* 0x0000580000087ab9 */
/* 0x000fe40000000a00 */
/*0940*/ UIMAD.WIDE UR6, UR4, UR5, UR6 ; /* 0x00000005040672a5 */
/* 0x000fe4000f8e0206 */
/*0950*/ UIMAD.WIDE UR4, UR4, UR5, UR8 ; /* 0x00000005040472a5 */
/* 0x000fcc000f8e0208 */
/*0960*/ MOV R2, UR4 ; /* 0x0000000400027c02 */
/* 0x000fe20008000f00 */
/*0970*/ IMAD.U32 R3, RZ, RZ, UR5 ; /* 0x00000005ff037e24 */
/* 0x000fca000f8e00ff */
/*0980*/ LDG.E R2, [R2.64] ; /* 0x0000000a02027981 */
/* 0x000ea2000c1e1900 */
/*0990*/ IADD3 R0, R0, -0x1, RZ ; /* 0xffffffff00007810 */
/* 0x000fe20007ffe0ff */
/*09a0*/ IMAD.U32 R4, RZ, RZ, UR6 ; /* 0x00000006ff047e24 */
/* 0x001fe2000f8e00ff */
/*09b0*/ MOV R5, UR7 ; /* 0x0000000700057c02 */
/* 0x000fe20008000f00 */
/*09c0*/ UIADD3 UR6, UP0, UR6, 0x4, URZ ; /* 0x0000000406067890 */
/* 0x000fe2000ff1e03f */
/*09d0*/ ISETP.NE.AND P0, PT, R0, RZ, PT ; /* 0x000000ff0000720c */
/* 0x000fe20003f05270 */
/*09e0*/ UIADD3 UR4, UP1, UR4, 0x4, URZ ; /* 0x0000000404047890 */
/* 0x000fe4000ff3e03f */
/*09f0*/ UIADD3.X UR7, URZ, UR7, URZ, UP0, !UPT ; /* 0x000000073f077290 */
/* 0x000fe400087fe43f */
/*0a00*/ UIADD3.X UR5, URZ, UR5, URZ, UP1, !UPT ; /* 0x000000053f057290 */
/* 0x000fe20008ffe43f */
/*0a10*/ IMAD.SHL.U32 R7, R2, 0x10, RZ ; /* 0x0000001002077824 */
/* 0x004fca00078e00ff */
/*0a20*/ STG.E [R4.64], R7 ; /* 0x0000000704007986 */
/* 0x0001e4000c10190a */
/*0a30*/ @P0 BRA 0x960 ; /* 0xffffff2000000947 */
/* 0x000fea000383ffff */
/*0a40*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0a50*/ BRA 0xa50; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0a60*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0a70*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0a80*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0a90*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0aa0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0ab0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0ac0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0ad0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0ae0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0af0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | __global__ void k(int *input, int *output, int count)
{
for(int i =0; i<count; i++)
output[i]=input[i]*16;
} | .file "tmpxft_0019f31f_00000000-6_1.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2029:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2029:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z23__device_stub__Z1kPiS_iPiS_i
.type _Z23__device_stub__Z1kPiS_iPiS_i, @function
_Z23__device_stub__Z1kPiS_iPiS_i:
.LFB2051:
.cfi_startproc
endbr64
subq $136, %rsp
.cfi_def_cfa_offset 144
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movl %edx, 12(%rsp)
movq %fs:40, %rax
movq %rax, 120(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 12(%rsp), %rax
movq %rax, 112(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 120(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $136, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 152
pushq 40(%rsp)
.cfi_def_cfa_offset 160
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z1kPiS_i(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 144
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2051:
.size _Z23__device_stub__Z1kPiS_iPiS_i, .-_Z23__device_stub__Z1kPiS_iPiS_i
.globl _Z1kPiS_i
.type _Z1kPiS_i, @function
_Z1kPiS_i:
.LFB2052:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z23__device_stub__Z1kPiS_iPiS_i
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2052:
.size _Z1kPiS_i, .-_Z1kPiS_i
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "_Z1kPiS_i"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2054:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC0(%rip), %rdx
movq %rdx, %rcx
leaq _Z1kPiS_i(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2054:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | __global__ void k(int *input, int *output, int count)
{
for(int i =0; i<count; i++)
output[i]=input[i]*16;
} | #include <hip/hip_runtime.h>
__global__ void k(int *input, int *output, int count)
{
for(int i =0; i<count; i++)
output[i]=input[i]*16;
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | #include <hip/hip_runtime.h>
__global__ void k(int *input, int *output, int count)
{
for(int i =0; i<count; i++)
output[i]=input[i]*16;
} | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z1kPiS_i
.globl _Z1kPiS_i
.p2align 8
.type _Z1kPiS_i,@function
_Z1kPiS_i:
s_load_b32 s4, s[0:1], 0x10
s_waitcnt lgkmcnt(0)
s_cmp_lt_i32 s4, 1
s_cbranch_scc1 .LBB0_3
s_load_b128 s[0:3], s[0:1], 0x0
v_mov_b32_e32 v0, 0
.LBB0_2:
s_waitcnt lgkmcnt(0)
global_load_b32 v1, v0, s[0:1]
s_add_i32 s4, s4, -1
s_add_u32 s0, s0, 4
s_addc_u32 s1, s1, 0
s_waitcnt vmcnt(0)
v_lshlrev_b32_e32 v1, 4, v1
global_store_b32 v0, v1, s[2:3]
s_add_u32 s2, s2, 4
s_addc_u32 s3, s3, 0
s_cmp_eq_u32 s4, 0
s_cbranch_scc0 .LBB0_2
.LBB0_3:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z1kPiS_i
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 20
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 2
.amdhsa_next_free_sgpr 5
.amdhsa_reserve_vcc 0
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z1kPiS_i, .Lfunc_end0-_Z1kPiS_i
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .offset: 16
.size: 4
.value_kind: by_value
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 20
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z1kPiS_i
.private_segment_fixed_size: 0
.sgpr_count: 5
.sgpr_spill_count: 0
.symbol: _Z1kPiS_i.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 2
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | #include <hip/hip_runtime.h>
__global__ void k(int *input, int *output, int count)
{
for(int i =0; i<count; i++)
output[i]=input[i]*16;
} | .text
.file "1.hip"
.globl _Z16__device_stub__kPiS_i # -- Begin function _Z16__device_stub__kPiS_i
.p2align 4, 0x90
.type _Z16__device_stub__kPiS_i,@function
_Z16__device_stub__kPiS_i: # @_Z16__device_stub__kPiS_i
.cfi_startproc
# %bb.0:
subq $104, %rsp
.cfi_def_cfa_offset 112
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movl %edx, 12(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 12(%rsp), %rax
movq %rax, 96(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z1kPiS_i, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $120, %rsp
.cfi_adjust_cfa_offset -120
retq
.Lfunc_end0:
.size _Z16__device_stub__kPiS_i, .Lfunc_end0-_Z16__device_stub__kPiS_i
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB1_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB1_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z1kPiS_i, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end1:
.size __hip_module_ctor, .Lfunc_end1-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB2_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB2_2:
retq
.Lfunc_end2:
.size __hip_module_dtor, .Lfunc_end2-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z1kPiS_i,@object # @_Z1kPiS_i
.section .rodata,"a",@progbits
.globl _Z1kPiS_i
.p2align 3, 0x0
_Z1kPiS_i:
.quad _Z16__device_stub__kPiS_i
.size _Z1kPiS_i, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "_Z1kPiS_i"
.size .L__unnamed_1, 10
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z16__device_stub__kPiS_i
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z1kPiS_i
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly. | code for sm_80
Function : _Z1kPiS_i
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ IMAD.MOV.U32 R1, RZ, RZ, c[0x0][0x28] ; /* 0x00000a00ff017624 */
/* 0x000fe400078e00ff */
/*0010*/ IMAD.MOV.U32 R0, RZ, RZ, c[0x0][0x170] ; /* 0x00005c00ff007624 */
/* 0x000fca00078e00ff */
/*0020*/ ISETP.GE.AND P0, PT, R0, 0x1, PT ; /* 0x000000010000780c */
/* 0x000fda0003f06270 */
/*0030*/ @!P0 EXIT ; /* 0x000000000000894d */
/* 0x000fea0003800000 */
/*0040*/ IADD3 R2, R0.reuse, -0x1, RZ ; /* 0xffffffff00027810 */
/* 0x040fe20007ffe0ff */
/*0050*/ UMOV UR4, URZ ; /* 0x0000003f00047c82 */
/* 0x000fe20008000000 */
/*0060*/ LOP3.LUT R0, R0, 0x3, RZ, 0xc0, !PT ; /* 0x0000000300007812 */
/* 0x000fe200078ec0ff */
/*0070*/ ULDC.64 UR10, c[0x0][0x118] ; /* 0x00004600000a7ab9 */
/* 0x000fe20000000a00 */
/*0080*/ ISETP.GE.U32.AND P0, PT, R2, 0x3, PT ; /* 0x000000030200780c */
/* 0x000fda0003f06070 */
/*0090*/ @!P0 BRA 0x8f0 ; /* 0x0000085000008947 */
/* 0x000fea0003800000 */
/*00a0*/ IADD3 R6, -R0, c[0x0][0x170], RZ ; /* 0x00005c0000067a10 */
/* 0x000fe20007ffe1ff */
/*00b0*/ UMOV UR4, URZ ; /* 0x0000003f00047c82 */
/* 0x000fe20008000000 */
/*00c0*/ IMAD.MOV.U32 R4, RZ, RZ, c[0x0][0x168] ; /* 0x00005a00ff047624 */
/* 0x000fe200078e00ff */
/*00d0*/ MOV R5, c[0x0][0x16c] ; /* 0x00005b0000057a02 */
/* 0x000fe20000000f00 */
/*00e0*/ IMAD.MOV.U32 R2, RZ, RZ, c[0x0][0x160] ; /* 0x00005800ff027624 */
/* 0x000fe200078e00ff */
/*00f0*/ ISETP.GT.AND P0, PT, R6, RZ, PT ; /* 0x000000ff0600720c */
/* 0x000fe20003f04270 */
/*0100*/ IMAD.MOV.U32 R3, RZ, RZ, c[0x0][0x164] ; /* 0x00005900ff037624 */
/* 0x000fd800078e00ff */
/*0110*/ @!P0 BRA 0x780 ; /* 0x0000066000008947 */
/* 0x000fea0003800000 */
/*0120*/ ISETP.GT.AND P1, PT, R6, 0xc, PT ; /* 0x0000000c0600780c */
/* 0x000fe40003f24270 */
/*0130*/ PLOP3.LUT P0, PT, PT, PT, PT, 0x80, 0x0 ; /* 0x000000000000781c */
/* 0x000fd60003f0f070 */
/*0140*/ @!P1 BRA 0x510 ; /* 0x000003c000009947 */
/* 0x000fea0003800000 */
/*0150*/ PLOP3.LUT P0, PT, PT, PT, PT, 0x8, 0x0 ; /* 0x000000000000781c */
/* 0x000fe40003f0e170 */
/*0160*/ LDG.E R7, [R2.64] ; /* 0x0000000a02077981 */
/* 0x000ea4000c1e1900 */
/*0170*/ IMAD.SHL.U32 R7, R7, 0x10, RZ ; /* 0x0000001007077824 */
/* 0x004fca00078e00ff */
/*0180*/ STG.E [R4.64], R7 ; /* 0x0000000704007986 */
/* 0x0001e8000c10190a */
/*0190*/ LDG.E R8, [R2.64+0x4] ; /* 0x0000040a02087981 */
/* 0x000ea4000c1e1900 */
/*01a0*/ SHF.L.U32 R9, R8, 0x4, RZ ; /* 0x0000000408097819 */
/* 0x004fca00000006ff */
/*01b0*/ STG.E [R4.64+0x4], R9 ; /* 0x0000040904007986 */
/* 0x0003e8000c10190a */
/*01c0*/ LDG.E R8, [R2.64+0x8] ; /* 0x0000080a02087981 */
/* 0x000ea4000c1e1900 */
/*01d0*/ IMAD.SHL.U32 R11, R8, 0x10, RZ ; /* 0x00000010080b7824 */
/* 0x004fca00078e00ff */
/*01e0*/ STG.E [R4.64+0x8], R11 ; /* 0x0000080b04007986 */
/* 0x0005e8000c10190a */
/*01f0*/ LDG.E R8, [R2.64+0xc] ; /* 0x00000c0a02087981 */
/* 0x000ee4000c1e1900 */
/*0200*/ IMAD.SHL.U32 R13, R8, 0x10, RZ ; /* 0x00000010080d7824 */
/* 0x008fca00078e00ff */
/*0210*/ STG.E [R4.64+0xc], R13 ; /* 0x00000c0d04007986 */
/* 0x0007e8000c10190a */
/*0220*/ LDG.E R8, [R2.64+0x10] ; /* 0x0000100a02087981 */
/* 0x000e24000c1e1900 */
/*0230*/ IMAD.SHL.U32 R7, R8, 0x10, RZ ; /* 0x0000001008077824 */
/* 0x001fca00078e00ff */
/*0240*/ STG.E [R4.64+0x10], R7 ; /* 0x0000100704007986 */
/* 0x0001e8000c10190a */
/*0250*/ LDG.E R8, [R2.64+0x14] ; /* 0x0000140a02087981 */
/* 0x000e64000c1e1900 */
/*0260*/ SHF.L.U32 R9, R8, 0x4, RZ ; /* 0x0000000408097819 */
/* 0x002fca00000006ff */
/*0270*/ STG.E [R4.64+0x14], R9 ; /* 0x0000140904007986 */
/* 0x0003e8000c10190a */
/*0280*/ LDG.E R8, [R2.64+0x18] ; /* 0x0000180a02087981 */
/* 0x000ea4000c1e1900 */
/*0290*/ IMAD.SHL.U32 R11, R8, 0x10, RZ ; /* 0x00000010080b7824 */
/* 0x004fca00078e00ff */
/*02a0*/ STG.E [R4.64+0x18], R11 ; /* 0x0000180b04007986 */
/* 0x0005e8000c10190a */
/*02b0*/ LDG.E R8, [R2.64+0x1c] ; /* 0x00001c0a02087981 */
/* 0x000ee4000c1e1900 */
/*02c0*/ IMAD.SHL.U32 R13, R8, 0x10, RZ ; /* 0x00000010080d7824 */
/* 0x008fca00078e00ff */
/*02d0*/ STG.E [R4.64+0x1c], R13 ; /* 0x00001c0d04007986 */
/* 0x0007e8000c10190a */
/*02e0*/ LDG.E R8, [R2.64+0x20] ; /* 0x0000200a02087981 */
/* 0x000e24000c1e1900 */
/*02f0*/ IMAD.SHL.U32 R7, R8, 0x10, RZ ; /* 0x0000001008077824 */
/* 0x001fca00078e00ff */
/*0300*/ STG.E [R4.64+0x20], R7 ; /* 0x0000200704007986 */
/* 0x0001e8000c10190a */
/*0310*/ LDG.E R8, [R2.64+0x24] ; /* 0x0000240a02087981 */
/* 0x000e64000c1e1900 */
/*0320*/ SHF.L.U32 R9, R8, 0x4, RZ ; /* 0x0000000408097819 */
/* 0x002fca00000006ff */
/*0330*/ STG.E [R4.64+0x24], R9 ; /* 0x0000240904007986 */
/* 0x0003e8000c10190a */
/*0340*/ LDG.E R8, [R2.64+0x28] ; /* 0x0000280a02087981 */
/* 0x000ea4000c1e1900 */
/*0350*/ IMAD.SHL.U32 R11, R8, 0x10, RZ ; /* 0x00000010080b7824 */
/* 0x004fca00078e00ff */
/*0360*/ STG.E [R4.64+0x28], R11 ; /* 0x0000280b04007986 */
/* 0x0005e8000c10190a */
/*0370*/ LDG.E R8, [R2.64+0x2c] ; /* 0x00002c0a02087981 */
/* 0x000ee4000c1e1900 */
/*0380*/ IMAD.SHL.U32 R13, R8, 0x10, RZ ; /* 0x00000010080d7824 */
/* 0x008fca00078e00ff */
/*0390*/ STG.E [R4.64+0x2c], R13 ; /* 0x00002c0d04007986 */
/* 0x000fe8000c10190a */
/*03a0*/ LDG.E R8, [R2.64+0x30] ; /* 0x0000300a02087981 */
/* 0x000e24000c1e1900 */
/*03b0*/ IMAD.SHL.U32 R7, R8, 0x10, RZ ; /* 0x0000001008077824 */
/* 0x001fca00078e00ff */
/*03c0*/ STG.E [R4.64+0x30], R7 ; /* 0x0000300704007986 */
/* 0x0001e8000c10190a */
/*03d0*/ LDG.E R8, [R2.64+0x34] ; /* 0x0000340a02087981 */
/* 0x000e64000c1e1900 */
/*03e0*/ SHF.L.U32 R9, R8, 0x4, RZ ; /* 0x0000000408097819 */
/* 0x002fca00000006ff */
/*03f0*/ STG.E [R4.64+0x34], R9 ; /* 0x0000340904007986 */
/* 0x000fe8000c10190a */
/*0400*/ LDG.E R8, [R2.64+0x38] ; /* 0x0000380a02087981 */
/* 0x000ea2000c1e1900 */
/*0410*/ IADD3 R6, R6, -0x10, RZ ; /* 0xfffffff006067810 */
/* 0x000fe20007ffe0ff */
/*0420*/ IMAD.SHL.U32 R11, R8, 0x10, RZ ; /* 0x00000010080b7824 */
/* 0x004fca00078e00ff */
/*0430*/ STG.E [R4.64+0x38], R11 ; /* 0x0000380b04007986 */
/* 0x000fe8000c10190a */
/*0440*/ LDG.E R8, [R2.64+0x3c] ; /* 0x00003c0a02087981 */
/* 0x0002a2000c1e1900 */
/*0450*/ ISETP.GT.AND P1, PT, R6, 0xc, PT ; /* 0x0000000c0600780c */
/* 0x000fe20003f24270 */
/*0460*/ UIADD3 UR4, UR4, 0x10, URZ ; /* 0x0000001004047890 */
/* 0x000fe2000fffe03f */
/*0470*/ IADD3 R7, P3, R4, 0x40, RZ ; /* 0x0000004004077810 */
/* 0x001fe40007f7e0ff */
/*0480*/ IADD3 R10, P2, R2, 0x40, RZ ; /* 0x00000040020a7810 */
/* 0x000fca0007f5e0ff */
/*0490*/ IMAD.X R3, RZ, RZ, R3, P2 ; /* 0x000000ffff037224 */
/* 0x002fe400010e0603 */
/*04a0*/ IMAD.MOV.U32 R2, RZ, RZ, R10 ; /* 0x000000ffff027224 */
/* 0x000fe400078e000a */
/*04b0*/ IMAD.SHL.U32 R13, R8, 0x10, RZ ; /* 0x00000010080d7824 */
/* 0x004fe200078e00ff */
/*04c0*/ IADD3.X R8, RZ, R5, RZ, P3, !PT ; /* 0x00000005ff087210 */
/* 0x000fc80001ffe4ff */
/*04d0*/ STG.E [R4.64+0x3c], R13 ; /* 0x00003c0d04007986 */
/* 0x0001e4000c10190a */
/*04e0*/ IMAD.MOV.U32 R4, RZ, RZ, R7 ; /* 0x000000ffff047224 */
/* 0x001fe200078e0007 */
/*04f0*/ MOV R5, R8 ; /* 0x0000000800057202 */
/* 0x000fe20000000f00 */
/*0500*/ @P1 BRA 0x160 ; /* 0xfffffc5000001947 */
/* 0x000fea000383ffff */
/*0510*/ ISETP.GT.AND P1, PT, R6, 0x4, PT ; /* 0x000000040600780c */
/* 0x000fda0003f24270 */
/*0520*/ @!P1 BRA 0x760 ; /* 0x0000023000009947 */
/* 0x000fea0003800000 */
/*0530*/ LDG.E R7, [R2.64] ; /* 0x0000000a02077981 */
/* 0x000ea4000c1e1900 */
/*0540*/ IMAD.SHL.U32 R7, R7, 0x10, RZ ; /* 0x0000001007077824 */
/* 0x004fca00078e00ff */
/*0550*/ STG.E [R4.64], R7 ; /* 0x0000000704007986 */
/* 0x0001e8000c10190a */
/*0560*/ LDG.E R8, [R2.64+0x4] ; /* 0x0000040a02087981 */
/* 0x000ea4000c1e1900 */
/*0570*/ IMAD.SHL.U32 R9, R8, 0x10, RZ ; /* 0x0000001008097824 */
/* 0x004fca00078e00ff */
/*0580*/ STG.E [R4.64+0x4], R9 ; /* 0x0000040904007986 */
/* 0x0003e8000c10190a */
/*0590*/ LDG.E R8, [R2.64+0x8] ; /* 0x0000080a02087981 */
/* 0x000ea4000c1e1900 */
/*05a0*/ SHF.L.U32 R11, R8, 0x4, RZ ; /* 0x00000004080b7819 */
/* 0x004fca00000006ff */
/*05b0*/ STG.E [R4.64+0x8], R11 ; /* 0x0000080b04007986 */
/* 0x0005e8000c10190a */
/*05c0*/ LDG.E R8, [R2.64+0xc] ; /* 0x00000c0a02087981 */
/* 0x000ee4000c1e1900 */
/*05d0*/ IMAD.SHL.U32 R13, R8, 0x10, RZ ; /* 0x00000010080d7824 */
/* 0x008fca00078e00ff */
/*05e0*/ STG.E [R4.64+0xc], R13 ; /* 0x00000c0d04007986 */
/* 0x000fe8000c10190a */
/*05f0*/ LDG.E R8, [R2.64+0x10] ; /* 0x0000100a02087981 */
/* 0x000e24000c1e1900 */
/*0600*/ IMAD.SHL.U32 R7, R8, 0x10, RZ ; /* 0x0000001008077824 */
/* 0x001fca00078e00ff */
/*0610*/ STG.E [R4.64+0x10], R7 ; /* 0x0000100704007986 */
/* 0x0001e8000c10190a */
/*0620*/ LDG.E R8, [R2.64+0x14] ; /* 0x0000140a02087981 */
/* 0x000e64000c1e1900 */
/*0630*/ SHF.L.U32 R9, R8, 0x4, RZ ; /* 0x0000000408097819 */
/* 0x002fca00000006ff */
/*0640*/ STG.E [R4.64+0x14], R9 ; /* 0x0000140904007986 */
/* 0x0003e8000c10190a */
/*0650*/ LDG.E R8, [R2.64+0x18] ; /* 0x0000180a02087981 */
/* 0x000ea4000c1e1900 */
/*0660*/ IMAD.SHL.U32 R11, R8, 0x10, RZ ; /* 0x00000010080b7824 */
/* 0x004fca00078e00ff */
/*0670*/ STG.E [R4.64+0x18], R11 ; /* 0x0000180b04007986 */
/* 0x000fe8000c10190a */
/*0680*/ LDG.E R8, [R2.64+0x1c] ; /* 0x00001c0a02087981 */
/* 0x0004e2000c1e1900 */
/*0690*/ IADD3 R7, P2, R4, 0x20, RZ ; /* 0x0000002004077810 */
/* 0x001fe20007f5e0ff */
/*06a0*/ UIADD3 UR4, UR4, 0x8, URZ ; /* 0x0000000804047890 */
/* 0x000fe2000fffe03f */
/*06b0*/ IADD3 R10, P1, R2, 0x20, RZ ; /* 0x00000020020a7810 */
/* 0x000fe40007f3e0ff */
/*06c0*/ PLOP3.LUT P0, PT, PT, PT, PT, 0x8, 0x0 ; /* 0x000000000000781c */
/* 0x000fe40003f0e170 */
/*06d0*/ IADD3.X R9, RZ, R3, RZ, P1, !PT ; /* 0x00000003ff097210 */
/* 0x002fc40000ffe4ff */
/*06e0*/ IADD3 R6, R6, -0x8, RZ ; /* 0xfffffff806067810 */
/* 0x000fe20007ffe0ff */
/*06f0*/ IMAD.MOV.U32 R2, RZ, RZ, R10 ; /* 0x000000ffff027224 */
/* 0x004fe200078e000a */
/*0700*/ MOV R3, R9 ; /* 0x0000000900037202 */
/* 0x000fe20000000f00 */
/*0710*/ IMAD.SHL.U32 R13, R8, 0x10, RZ ; /* 0x00000010080d7824 */
/* 0x008fe400078e00ff */
/*0720*/ IMAD.X R8, RZ, RZ, R5, P2 ; /* 0x000000ffff087224 */
/* 0x000fc600010e0605 */
/*0730*/ STG.E [R4.64+0x1c], R13 ; /* 0x00001c0d04007986 */
/* 0x0001e4000c10190a */
/*0740*/ IMAD.MOV.U32 R4, RZ, RZ, R7 ; /* 0x000000ffff047224 */
/* 0x001fe400078e0007 */
/*0750*/ IMAD.MOV.U32 R5, RZ, RZ, R8 ; /* 0x000000ffff057224 */
/* 0x000fe400078e0008 */
/*0760*/ ISETP.NE.OR P0, PT, R6, RZ, P0 ; /* 0x000000ff0600720c */
/* 0x000fda0000705670 */
/*0770*/ @!P0 BRA 0x8f0 ; /* 0x0000017000008947 */
/* 0x000fea0003800000 */
/*0780*/ LDG.E R7, [R2.64] ; /* 0x0000000a02077981 */
/* 0x000ea4000c1e1900 */
/*0790*/ SHF.L.U32 R7, R7, 0x4, RZ ; /* 0x0000000407077819 */
/* 0x004fca00000006ff */
/*07a0*/ STG.E [R4.64], R7 ; /* 0x0000000704007986 */
/* 0x0001e8000c10190a */
/*07b0*/ LDG.E R8, [R2.64+0x4] ; /* 0x0000040a02087981 */
/* 0x000ea4000c1e1900 */
/*07c0*/ IMAD.SHL.U32 R9, R8, 0x10, RZ ; /* 0x0000001008097824 */
/* 0x004fca00078e00ff */
/*07d0*/ STG.E [R4.64+0x4], R9 ; /* 0x0000040904007986 */
/* 0x000fe8000c10190a */
/*07e0*/ LDG.E R8, [R2.64+0x8] ; /* 0x0000080a02087981 */
/* 0x000ea2000c1e1900 */
/*07f0*/ IADD3 R6, R6, -0x4, RZ ; /* 0xfffffffc06067810 */
/* 0x000fe20007ffe0ff */
/*0800*/ IMAD.SHL.U32 R11, R8, 0x10, RZ ; /* 0x00000010080b7824 */
/* 0x004fca00078e00ff */
/*0810*/ STG.E [R4.64+0x8], R11 ; /* 0x0000080b04007986 */
/* 0x000fe8000c10190a */
/*0820*/ LDG.E R8, [R2.64+0xc] ; /* 0x00000c0a02087981 */
/* 0x0002a2000c1e1900 */
/*0830*/ ISETP.NE.AND P0, PT, R6, RZ, PT ; /* 0x000000ff0600720c */
/* 0x000fe20003f05270 */
/*0840*/ UIADD3 UR4, UR4, 0x4, URZ ; /* 0x0000000404047890 */
/* 0x000fe2000fffe03f */
/*0850*/ IADD3 R7, P2, R4, 0x10, RZ ; /* 0x0000001004077810 */
/* 0x001fe40007f5e0ff */
/*0860*/ IADD3 R10, P1, R2, 0x10, RZ ; /* 0x00000010020a7810 */
/* 0x000fca0007f3e0ff */
/*0870*/ IMAD.X R3, RZ, RZ, R3, P1 ; /* 0x000000ffff037224 */
/* 0x002fe200008e0603 */
/*0880*/ MOV R2, R10 ; /* 0x0000000a00027202 */
/* 0x000fe40000000f00 */
/*0890*/ SHF.L.U32 R13, R8, 0x4, RZ ; /* 0x00000004080d7819 */
/* 0x004fe200000006ff */
/*08a0*/ IMAD.X R8, RZ, RZ, R5, P2 ; /* 0x000000ffff087224 */
/* 0x000fc800010e0605 */
/*08b0*/ STG.E [R4.64+0xc], R13 ; /* 0x00000c0d04007986 */
/* 0x0001e4000c10190a */
/*08c0*/ IMAD.MOV.U32 R4, RZ, RZ, R7 ; /* 0x000000ffff047224 */
/* 0x001fe400078e0007 */
/*08d0*/ IMAD.MOV.U32 R5, RZ, RZ, R8 ; /* 0x000000ffff057224 */
/* 0x000fe200078e0008 */
/*08e0*/ @P0 BRA 0x780 ; /* 0xfffffe9000000947 */
/* 0x000fea000383ffff */
/*08f0*/ ISETP.NE.AND P0, PT, R0, RZ, PT ; /* 0x000000ff0000720c */
/* 0x000fda0003f05270 */
/*0900*/ @!P0 EXIT ; /* 0x000000000000894d */
/* 0x000fea0003800000 */
/*0910*/ UMOV UR5, 0x4 ; /* 0x0000000400057882 */
/* 0x000fe40000000000 */
/*0920*/ ULDC.64 UR6, c[0x0][0x168] ; /* 0x00005a0000067ab9 */
/* 0x000fe40000000a00 */
/*0930*/ ULDC.64 UR8, c[0x0][0x160] ; /* 0x0000580000087ab9 */
/* 0x000fe40000000a00 */
/*0940*/ UIMAD.WIDE UR6, UR4, UR5, UR6 ; /* 0x00000005040672a5 */
/* 0x000fe4000f8e0206 */
/*0950*/ UIMAD.WIDE UR4, UR4, UR5, UR8 ; /* 0x00000005040472a5 */
/* 0x000fcc000f8e0208 */
/*0960*/ MOV R2, UR4 ; /* 0x0000000400027c02 */
/* 0x000fe20008000f00 */
/*0970*/ IMAD.U32 R3, RZ, RZ, UR5 ; /* 0x00000005ff037e24 */
/* 0x000fca000f8e00ff */
/*0980*/ LDG.E R2, [R2.64] ; /* 0x0000000a02027981 */
/* 0x000ea2000c1e1900 */
/*0990*/ IADD3 R0, R0, -0x1, RZ ; /* 0xffffffff00007810 */
/* 0x000fe20007ffe0ff */
/*09a0*/ IMAD.U32 R4, RZ, RZ, UR6 ; /* 0x00000006ff047e24 */
/* 0x001fe2000f8e00ff */
/*09b0*/ MOV R5, UR7 ; /* 0x0000000700057c02 */
/* 0x000fe20008000f00 */
/*09c0*/ UIADD3 UR6, UP0, UR6, 0x4, URZ ; /* 0x0000000406067890 */
/* 0x000fe2000ff1e03f */
/*09d0*/ ISETP.NE.AND P0, PT, R0, RZ, PT ; /* 0x000000ff0000720c */
/* 0x000fe20003f05270 */
/*09e0*/ UIADD3 UR4, UP1, UR4, 0x4, URZ ; /* 0x0000000404047890 */
/* 0x000fe4000ff3e03f */
/*09f0*/ UIADD3.X UR7, URZ, UR7, URZ, UP0, !UPT ; /* 0x000000073f077290 */
/* 0x000fe400087fe43f */
/*0a00*/ UIADD3.X UR5, URZ, UR5, URZ, UP1, !UPT ; /* 0x000000053f057290 */
/* 0x000fe20008ffe43f */
/*0a10*/ IMAD.SHL.U32 R7, R2, 0x10, RZ ; /* 0x0000001002077824 */
/* 0x004fca00078e00ff */
/*0a20*/ STG.E [R4.64], R7 ; /* 0x0000000704007986 */
/* 0x0001e4000c10190a */
/*0a30*/ @P0 BRA 0x960 ; /* 0xffffff2000000947 */
/* 0x000fea000383ffff */
/*0a40*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0a50*/ BRA 0xa50; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0a60*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0a70*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0a80*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0a90*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0aa0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0ab0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0ac0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0ad0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0ae0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0af0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z1kPiS_i
.globl _Z1kPiS_i
.p2align 8
.type _Z1kPiS_i,@function
_Z1kPiS_i:
s_load_b32 s4, s[0:1], 0x10
s_waitcnt lgkmcnt(0)
s_cmp_lt_i32 s4, 1
s_cbranch_scc1 .LBB0_3
s_load_b128 s[0:3], s[0:1], 0x0
v_mov_b32_e32 v0, 0
.LBB0_2:
s_waitcnt lgkmcnt(0)
global_load_b32 v1, v0, s[0:1]
s_add_i32 s4, s4, -1
s_add_u32 s0, s0, 4
s_addc_u32 s1, s1, 0
s_waitcnt vmcnt(0)
v_lshlrev_b32_e32 v1, 4, v1
global_store_b32 v0, v1, s[2:3]
s_add_u32 s2, s2, 4
s_addc_u32 s3, s3, 0
s_cmp_eq_u32 s4, 0
s_cbranch_scc0 .LBB0_2
.LBB0_3:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z1kPiS_i
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 20
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 2
.amdhsa_next_free_sgpr 5
.amdhsa_reserve_vcc 0
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z1kPiS_i, .Lfunc_end0-_Z1kPiS_i
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .offset: 16
.size: 4
.value_kind: by_value
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 20
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z1kPiS_i
.private_segment_fixed_size: 0
.sgpr_count: 5
.sgpr_spill_count: 0
.symbol: _Z1kPiS_i.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 2
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_0019f31f_00000000-6_1.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2029:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2029:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z23__device_stub__Z1kPiS_iPiS_i
.type _Z23__device_stub__Z1kPiS_iPiS_i, @function
_Z23__device_stub__Z1kPiS_iPiS_i:
.LFB2051:
.cfi_startproc
endbr64
subq $136, %rsp
.cfi_def_cfa_offset 144
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movl %edx, 12(%rsp)
movq %fs:40, %rax
movq %rax, 120(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 12(%rsp), %rax
movq %rax, 112(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 120(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $136, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 152
pushq 40(%rsp)
.cfi_def_cfa_offset 160
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z1kPiS_i(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 144
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2051:
.size _Z23__device_stub__Z1kPiS_iPiS_i, .-_Z23__device_stub__Z1kPiS_iPiS_i
.globl _Z1kPiS_i
.type _Z1kPiS_i, @function
_Z1kPiS_i:
.LFB2052:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z23__device_stub__Z1kPiS_iPiS_i
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2052:
.size _Z1kPiS_i, .-_Z1kPiS_i
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "_Z1kPiS_i"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2054:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC0(%rip), %rdx
movq %rdx, %rcx
leaq _Z1kPiS_i(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2054:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "1.hip"
.globl _Z16__device_stub__kPiS_i # -- Begin function _Z16__device_stub__kPiS_i
.p2align 4, 0x90
.type _Z16__device_stub__kPiS_i,@function
_Z16__device_stub__kPiS_i: # @_Z16__device_stub__kPiS_i
.cfi_startproc
# %bb.0:
subq $104, %rsp
.cfi_def_cfa_offset 112
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movl %edx, 12(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 12(%rsp), %rax
movq %rax, 96(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z1kPiS_i, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $120, %rsp
.cfi_adjust_cfa_offset -120
retq
.Lfunc_end0:
.size _Z16__device_stub__kPiS_i, .Lfunc_end0-_Z16__device_stub__kPiS_i
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB1_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB1_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z1kPiS_i, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end1:
.size __hip_module_ctor, .Lfunc_end1-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB2_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB2_2:
retq
.Lfunc_end2:
.size __hip_module_dtor, .Lfunc_end2-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z1kPiS_i,@object # @_Z1kPiS_i
.section .rodata,"a",@progbits
.globl _Z1kPiS_i
.p2align 3, 0x0
_Z1kPiS_i:
.quad _Z16__device_stub__kPiS_i
.size _Z1kPiS_i, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "_Z1kPiS_i"
.size .L__unnamed_1, 10
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z16__device_stub__kPiS_i
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z1kPiS_i
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly. | /*************************************************************************
> File Name: 05_0304.cu
> Author: dong xu
> Mail: gwmxyd@163.com
> Created Time: 2016年03月30日 星期三 13时37分15秒
************************************************************************/
#include <stdio.h>
#include <cuda_runtime.h>
cudaError_t addWithCuda(int *c, const int *a);
__global__ void addKernel(int *c, const int *a)
{
int i = blockIdx.x;
*c = *a + i;
printf("thread %d:a=%d,c=%d\n",i,*a,*c);
}
int main()
{
int a = 1;
int c = 0;
cudaError_t cudaStatus;
int num = 0;
cudaDeviceProp prop;
cudaStatus = cudaGetDeviceCount(&num);
for(int i = 0;i<num;i++)
{
cudaGetDeviceProperties(&prop,i);
}
cudaStatus = addWithCuda(&c, &a);
if (cudaStatus != cudaSuccess)
{
fprintf(stderr, "addWithCuda failed!");
return 1;
}
printf("a=%d,c=%d\n",a,c);
cudaStatus = cudaThreadExit();
if (cudaStatus != cudaSuccess)
{
fprintf(stderr, "cudaThreadExit failed!");
return 1;
}
return 0;
}
cudaError_t addWithCuda(int *c, const int *a)
{
int *dev_a = 0;
int *dev_c = 0;
cudaError_t cudaStatus;
cudaStatus = cudaSetDevice(0);
if (cudaStatus != cudaSuccess)
{
fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?");
goto Error;
}
cudaStatus = cudaMalloc((void**)&dev_c, sizeof(int));
if (cudaStatus != cudaSuccess)
{
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
cudaStatus = cudaMalloc((void**)&dev_a, sizeof(int));
if (cudaStatus != cudaSuccess)
{
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
cudaStatus = cudaMemcpy(dev_a, a,sizeof(int), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess)
{
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
cudaStatus = cudaMemcpy(dev_c, c,sizeof(int), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess)
{
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
printf("addKernel<<<7,1>>>(%d,%d)\n",*a,*c);
addKernel<<<7,1>>>(dev_c,dev_a);
// cudaStatus = cudaThreadSynchronize();
cudaStatus = cudaMemcpy(c, dev_c,sizeof(int), cudaMemcpyDeviceToHost);
if (cudaStatus != cudaSuccess)
{
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
printf("The anwser:%d\n",*c);
printf("addKernel<<<1,7>>>(%d,%d)\n",*a,*c);
addKernel<<<1,7>>>(dev_c,dev_a);
cudaStatus = cudaThreadSynchronize();
cudaStatus = cudaMemcpy(c, dev_c,sizeof(int), cudaMemcpyDeviceToHost);
if (cudaStatus != cudaSuccess)
{
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
printf("The anwser:%d\n",*c);
Error:
cudaFree(dev_c);
cudaFree(dev_a);
return cudaStatus;
} | code for sm_80
Function : _Z9addKernelPiPKi
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ IMAD.MOV.U32 R1, RZ, RZ, c[0x0][0x28] ; /* 0x00000a00ff017624 */
/* 0x000fe400078e00ff */
/*0010*/ IMAD.MOV.U32 R2, RZ, RZ, c[0x0][0x168] ; /* 0x00005a00ff027624 */
/* 0x000fe200078e00ff */
/*0020*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fe20000000a00 */
/*0030*/ IMAD.MOV.U32 R3, RZ, RZ, c[0x0][0x16c] ; /* 0x00005b00ff037624 */
/* 0x000fe200078e00ff */
/*0040*/ IADD3 R1, R1, -0x10, RZ ; /* 0xfffffff001017810 */
/* 0x000fc80007ffe0ff */
/*0050*/ LDG.E R5, [R2.64] ; /* 0x0000000402057981 */
/* 0x000ea2000c1e1900 */
/*0060*/ IMAD.MOV.U32 R8, RZ, RZ, c[0x0][0x160] ; /* 0x00005800ff087624 */
/* 0x000fe400078e00ff */
/*0070*/ IMAD.MOV.U32 R9, RZ, RZ, c[0x0][0x164] ; /* 0x00005900ff097624 */
/* 0x000fe200078e00ff */
/*0080*/ S2R R12, SR_CTAID.X ; /* 0x00000000000c7919 */
/* 0x000ea40000002500 */
/*0090*/ IMAD.IADD R0, R5, 0x1, R12 ; /* 0x0000000105007824 */
/* 0x004fca00078e020c */
/*00a0*/ STG.E [R8.64], R0 ; /* 0x0000000008007986 */
/* 0x0001e8000c101904 */
/*00b0*/ LDG.E R13, [R2.64] ; /* 0x00000004020d7981 */
/* 0x000ea2000c1e1900 */
/*00c0*/ MOV R10, 0x0 ; /* 0x00000000000a7802 */
/* 0x000fe20000000f00 */
/*00d0*/ IMAD.MOV.U32 R4, RZ, RZ, c[0x4][0x8] ; /* 0x01000200ff047624 */
/* 0x000fe200078e00ff */
/*00e0*/ IADD3 R6, P0, R1, c[0x0][0x20], RZ ; /* 0x0000080001067a10 */
/* 0x000fe20007f1e0ff */
/*00f0*/ STL [R1+0x8], R0 ; /* 0x0000080001007387 */
/* 0x0001e20000100800 */
/*0100*/ IMAD.MOV.U32 R5, RZ, RZ, c[0x4][0xc] ; /* 0x01000300ff057624 */
/* 0x000fe400078e00ff */
/*0110*/ LDC.64 R10, c[0x4][R10] ; /* 0x010000000a0a7b82 */
/* 0x000e620000000a00 */
/*0120*/ IMAD.X R7, RZ, RZ, c[0x0][0x24], P0 ; /* 0x00000900ff077624 */
/* 0x000fe200000e06ff */
/*0130*/ STL.64 [R1], R12 ; /* 0x0000000c01007387 */
/* 0x0041ec0000100a00 */
/*0140*/ LEPC R2 ; /* 0x000000000002734e */
/* 0x002fe40000000000 */
/*0150*/ MOV R9, 0x1c0 ; /* 0x000001c000097802 */
/* 0x001fe40000000f00 */
/*0160*/ MOV R20, 0x140 ; /* 0x0000014000147802 */
/* 0x000fc40000000f00 */
/*0170*/ MOV R21, 0x0 ; /* 0x0000000000157802 */
/* 0x000fe40000000f00 */
/*0180*/ MOV R0, 0x0 ; /* 0x0000000000007802 */
/* 0x000fe40000000f00 */
/*0190*/ IADD3 R20, P0, P1, -R20, R9, R2 ; /* 0x0000000914147210 */
/* 0x000fc8000791e102 */
/*01a0*/ IADD3.X R21, ~R0, R21, R3, P0, P1 ; /* 0x0000001500157210 */
/* 0x000fc800007e2503 */
/*01b0*/ CALL.ABS.NOINC R10 ; /* 0x000000000a007343 */
/* 0x000fea0003c00000 */
/*01c0*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*01d0*/ BRA 0x1d0; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*01e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0200*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0210*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0220*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0230*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0240*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0250*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0260*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0270*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | /*************************************************************************
> File Name: 05_0304.cu
> Author: dong xu
> Mail: gwmxyd@163.com
> Created Time: 2016年03月30日 星期三 13时37分15秒
************************************************************************/
#include <stdio.h>
#include <cuda_runtime.h>
cudaError_t addWithCuda(int *c, const int *a);
__global__ void addKernel(int *c, const int *a)
{
int i = blockIdx.x;
*c = *a + i;
printf("thread %d:a=%d,c=%d\n",i,*a,*c);
}
int main()
{
int a = 1;
int c = 0;
cudaError_t cudaStatus;
int num = 0;
cudaDeviceProp prop;
cudaStatus = cudaGetDeviceCount(&num);
for(int i = 0;i<num;i++)
{
cudaGetDeviceProperties(&prop,i);
}
cudaStatus = addWithCuda(&c, &a);
if (cudaStatus != cudaSuccess)
{
fprintf(stderr, "addWithCuda failed!");
return 1;
}
printf("a=%d,c=%d\n",a,c);
cudaStatus = cudaThreadExit();
if (cudaStatus != cudaSuccess)
{
fprintf(stderr, "cudaThreadExit failed!");
return 1;
}
return 0;
}
cudaError_t addWithCuda(int *c, const int *a)
{
int *dev_a = 0;
int *dev_c = 0;
cudaError_t cudaStatus;
cudaStatus = cudaSetDevice(0);
if (cudaStatus != cudaSuccess)
{
fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?");
goto Error;
}
cudaStatus = cudaMalloc((void**)&dev_c, sizeof(int));
if (cudaStatus != cudaSuccess)
{
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
cudaStatus = cudaMalloc((void**)&dev_a, sizeof(int));
if (cudaStatus != cudaSuccess)
{
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
cudaStatus = cudaMemcpy(dev_a, a,sizeof(int), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess)
{
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
cudaStatus = cudaMemcpy(dev_c, c,sizeof(int), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess)
{
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
printf("addKernel<<<7,1>>>(%d,%d)\n",*a,*c);
addKernel<<<7,1>>>(dev_c,dev_a);
// cudaStatus = cudaThreadSynchronize();
cudaStatus = cudaMemcpy(c, dev_c,sizeof(int), cudaMemcpyDeviceToHost);
if (cudaStatus != cudaSuccess)
{
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
printf("The anwser:%d\n",*c);
printf("addKernel<<<1,7>>>(%d,%d)\n",*a,*c);
addKernel<<<1,7>>>(dev_c,dev_a);
cudaStatus = cudaThreadSynchronize();
cudaStatus = cudaMemcpy(c, dev_c,sizeof(int), cudaMemcpyDeviceToHost);
if (cudaStatus != cudaSuccess)
{
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
printf("The anwser:%d\n",*c);
Error:
cudaFree(dev_c);
cudaFree(dev_a);
return cudaStatus;
} | .file "tmpxft_000e19b7_00000000-6_05_0304.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2061:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2061:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z31__device_stub__Z9addKernelPiPKiPiPKi
.type _Z31__device_stub__Z9addKernelPiPKiPiPKi, @function
_Z31__device_stub__Z9addKernelPiPKiPiPKi:
.LFB2083:
.cfi_startproc
endbr64
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 8(%rsp)
movq %rsi, (%rsp)
movq %fs:40, %rax
movq %rax, 104(%rsp)
xorl %eax, %eax
leaq 8(%rsp), %rax
movq %rax, 80(%rsp)
movq %rsp, %rax
movq %rax, 88(%rsp)
movl $1, 32(%rsp)
movl $1, 36(%rsp)
movl $1, 40(%rsp)
movl $1, 44(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
leaq 24(%rsp), %rcx
leaq 16(%rsp), %rdx
leaq 44(%rsp), %rsi
leaq 32(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 104(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $120, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 24(%rsp)
.cfi_def_cfa_offset 136
pushq 24(%rsp)
.cfi_def_cfa_offset 144
leaq 96(%rsp), %r9
movq 60(%rsp), %rcx
movl 68(%rsp), %r8d
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
leaq _Z9addKernelPiPKi(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 128
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2083:
.size _Z31__device_stub__Z9addKernelPiPKiPiPKi, .-_Z31__device_stub__Z9addKernelPiPKiPiPKi
.globl _Z9addKernelPiPKi
.type _Z9addKernelPiPKi, @function
_Z9addKernelPiPKi:
.LFB2084:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z31__device_stub__Z9addKernelPiPKiPiPKi
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2084:
.size _Z9addKernelPiPKi, .-_Z9addKernelPiPKi
.section .rodata.str1.8,"aMS",@progbits,1
.align 8
.LC0:
.string "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?"
.section .rodata.str1.1,"aMS",@progbits,1
.LC1:
.string "cudaMalloc failed!"
.LC2:
.string "cudaMemcpy failed!"
.LC3:
.string "addKernel<<<7,1>>>(%d,%d)\n"
.LC4:
.string "The anwser:%d\n"
.LC5:
.string "addKernel<<<1,7>>>(%d,%d)\n"
.text
.globl _Z11addWithCudaPiPKi
.type _Z11addWithCudaPiPKi, @function
_Z11addWithCudaPiPKi:
.LFB2058:
.cfi_startproc
endbr64
pushq %r12
.cfi_def_cfa_offset 16
.cfi_offset 12, -16
pushq %rbp
.cfi_def_cfa_offset 24
.cfi_offset 6, -24
pushq %rbx
.cfi_def_cfa_offset 32
.cfi_offset 3, -32
subq $48, %rsp
.cfi_def_cfa_offset 80
movq %rdi, %rbp
movq %rsi, %r12
movq %fs:40, %rax
movq %rax, 40(%rsp)
xorl %eax, %eax
movq $0, (%rsp)
movq $0, 8(%rsp)
movl $0, %edi
call cudaSetDevice@PLT
testl %eax, %eax
jne .L24
leaq 8(%rsp), %rdi
movl $4, %esi
call cudaMalloc@PLT
movl %eax, %ebx
testl %eax, %eax
jne .L25
movq %rsp, %rdi
movl $4, %esi
call cudaMalloc@PLT
movl %eax, %ebx
testl %eax, %eax
jne .L26
movl $1, %ecx
movl $4, %edx
movq %r12, %rsi
movq (%rsp), %rdi
call cudaMemcpy@PLT
movl %eax, %ebx
testl %eax, %eax
jne .L27
movl $1, %ecx
movl $4, %edx
movq %rbp, %rsi
movq 8(%rsp), %rdi
call cudaMemcpy@PLT
movl %eax, %ebx
testl %eax, %eax
jne .L28
movl 0(%rbp), %ecx
movl (%r12), %edx
leaq .LC3(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $1, 28(%rsp)
movl $1, 32(%rsp)
movl $1, 36(%rsp)
movl $7, 16(%rsp)
movl $1, 20(%rsp)
movl $1, 24(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 28(%rsp), %rdx
movl $1, %ecx
movq 16(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L29
.L18:
movl $2, %ecx
movl $4, %edx
movq 8(%rsp), %rsi
movq %rbp, %rdi
call cudaMemcpy@PLT
movl %eax, %ebx
testl %eax, %eax
jne .L30
movl 0(%rbp), %edx
leaq .LC4(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl 0(%rbp), %ecx
movl (%r12), %edx
leaq .LC5(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $7, 28(%rsp)
movl $1, 32(%rsp)
movl $1, 36(%rsp)
movl $1, 16(%rsp)
movl $1, 20(%rsp)
movl $1, 24(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 28(%rsp), %rdx
movl $1, %ecx
movq 16(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L31
.L20:
call cudaThreadSynchronize@PLT
movl $2, %ecx
movl $4, %edx
movq 8(%rsp), %rsi
movq %rbp, %rdi
call cudaMemcpy@PLT
movl %eax, %ebx
testl %eax, %eax
jne .L32
movl 0(%rbp), %edx
leaq .LC4(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
jmp .L13
.L24:
movl %eax, %ebx
leaq .LC0(%rip), %rdx
movl $2, %esi
movq stderr(%rip), %rdi
movl $0, %eax
call __fprintf_chk@PLT
.L13:
movq 8(%rsp), %rdi
call cudaFree@PLT
movq (%rsp), %rdi
call cudaFree@PLT
movq 40(%rsp), %rax
subq %fs:40, %rax
jne .L33
movl %ebx, %eax
addq $48, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 32
popq %rbx
.cfi_def_cfa_offset 24
popq %rbp
.cfi_def_cfa_offset 16
popq %r12
.cfi_def_cfa_offset 8
ret
.L25:
.cfi_restore_state
leaq .LC1(%rip), %rdx
movl $2, %esi
movq stderr(%rip), %rdi
movl $0, %eax
call __fprintf_chk@PLT
jmp .L13
.L26:
leaq .LC1(%rip), %rdx
movl $2, %esi
movq stderr(%rip), %rdi
movl $0, %eax
call __fprintf_chk@PLT
jmp .L13
.L27:
leaq .LC2(%rip), %rdx
movl $2, %esi
movq stderr(%rip), %rdi
movl $0, %eax
call __fprintf_chk@PLT
jmp .L13
.L28:
leaq .LC2(%rip), %rdx
movl $2, %esi
movq stderr(%rip), %rdi
movl $0, %eax
call __fprintf_chk@PLT
jmp .L13
.L29:
movq (%rsp), %rsi
movq 8(%rsp), %rdi
call _Z31__device_stub__Z9addKernelPiPKiPiPKi
jmp .L18
.L30:
leaq .LC2(%rip), %rdx
movl $2, %esi
movq stderr(%rip), %rdi
movl $0, %eax
call __fprintf_chk@PLT
jmp .L13
.L31:
movq (%rsp), %rsi
movq 8(%rsp), %rdi
call _Z31__device_stub__Z9addKernelPiPKiPiPKi
jmp .L20
.L32:
leaq .LC2(%rip), %rdx
movl $2, %esi
movq stderr(%rip), %rdi
movl $0, %eax
call __fprintf_chk@PLT
jmp .L13
.L33:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2058:
.size _Z11addWithCudaPiPKi, .-_Z11addWithCudaPiPKi
.section .rodata.str1.1
.LC6:
.string "addWithCuda failed!"
.LC7:
.string "a=%d,c=%d\n"
.LC8:
.string "cudaThreadExit failed!"
.text
.globl main
.type main, @function
main:
.LFB2057:
.cfi_startproc
endbr64
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset 6, -16
pushq %rbx
.cfi_def_cfa_offset 24
.cfi_offset 3, -24
subq $1064, %rsp
.cfi_def_cfa_offset 1088
movq %fs:40, %rax
movq %rax, 1048(%rsp)
xorl %eax, %eax
movl $1, 4(%rsp)
movl $0, 8(%rsp)
movl $0, 12(%rsp)
leaq 12(%rsp), %rdi
call cudaGetDeviceCount@PLT
cmpl $0, 12(%rsp)
jle .L35
movl $0, %ebx
leaq 16(%rsp), %rbp
.L36:
movl %ebx, %esi
movq %rbp, %rdi
call cudaGetDeviceProperties_v2@PLT
addl $1, %ebx
cmpl %ebx, 12(%rsp)
jg .L36
.L35:
leaq 4(%rsp), %rsi
leaq 8(%rsp), %rdi
call _Z11addWithCudaPiPKi
testl %eax, %eax
jne .L43
movl 8(%rsp), %ecx
movl 4(%rsp), %edx
leaq .LC7(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
call cudaThreadExit@PLT
movl %eax, %edx
movl $0, %eax
testl %edx, %edx
jne .L44
.L34:
movq 1048(%rsp), %rdx
subq %fs:40, %rdx
jne .L45
addq $1064, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 24
popq %rbx
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
ret
.L43:
.cfi_restore_state
leaq .LC6(%rip), %rdx
movl $2, %esi
movq stderr(%rip), %rdi
movl $0, %eax
call __fprintf_chk@PLT
movl $1, %eax
jmp .L34
.L44:
leaq .LC8(%rip), %rdx
movl $2, %esi
movq stderr(%rip), %rdi
call __fprintf_chk@PLT
movl $1, %eax
jmp .L34
.L45:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2057:
.size main, .-main
.section .rodata.str1.1
.LC9:
.string "_Z9addKernelPiPKi"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2086:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC9(%rip), %rdx
movq %rdx, %rcx
leaq _Z9addKernelPiPKi(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2086:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | /*************************************************************************
> File Name: 05_0304.cu
> Author: dong xu
> Mail: gwmxyd@163.com
> Created Time: 2016年03月30日 星期三 13时37分15秒
************************************************************************/
#include <stdio.h>
#include <cuda_runtime.h>
cudaError_t addWithCuda(int *c, const int *a);
__global__ void addKernel(int *c, const int *a)
{
int i = blockIdx.x;
*c = *a + i;
printf("thread %d:a=%d,c=%d\n",i,*a,*c);
}
int main()
{
int a = 1;
int c = 0;
cudaError_t cudaStatus;
int num = 0;
cudaDeviceProp prop;
cudaStatus = cudaGetDeviceCount(&num);
for(int i = 0;i<num;i++)
{
cudaGetDeviceProperties(&prop,i);
}
cudaStatus = addWithCuda(&c, &a);
if (cudaStatus != cudaSuccess)
{
fprintf(stderr, "addWithCuda failed!");
return 1;
}
printf("a=%d,c=%d\n",a,c);
cudaStatus = cudaThreadExit();
if (cudaStatus != cudaSuccess)
{
fprintf(stderr, "cudaThreadExit failed!");
return 1;
}
return 0;
}
cudaError_t addWithCuda(int *c, const int *a)
{
int *dev_a = 0;
int *dev_c = 0;
cudaError_t cudaStatus;
cudaStatus = cudaSetDevice(0);
if (cudaStatus != cudaSuccess)
{
fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?");
goto Error;
}
cudaStatus = cudaMalloc((void**)&dev_c, sizeof(int));
if (cudaStatus != cudaSuccess)
{
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
cudaStatus = cudaMalloc((void**)&dev_a, sizeof(int));
if (cudaStatus != cudaSuccess)
{
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
cudaStatus = cudaMemcpy(dev_a, a,sizeof(int), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess)
{
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
cudaStatus = cudaMemcpy(dev_c, c,sizeof(int), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess)
{
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
printf("addKernel<<<7,1>>>(%d,%d)\n",*a,*c);
addKernel<<<7,1>>>(dev_c,dev_a);
// cudaStatus = cudaThreadSynchronize();
cudaStatus = cudaMemcpy(c, dev_c,sizeof(int), cudaMemcpyDeviceToHost);
if (cudaStatus != cudaSuccess)
{
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
printf("The anwser:%d\n",*c);
printf("addKernel<<<1,7>>>(%d,%d)\n",*a,*c);
addKernel<<<1,7>>>(dev_c,dev_a);
cudaStatus = cudaThreadSynchronize();
cudaStatus = cudaMemcpy(c, dev_c,sizeof(int), cudaMemcpyDeviceToHost);
if (cudaStatus != cudaSuccess)
{
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
printf("The anwser:%d\n",*c);
Error:
cudaFree(dev_c);
cudaFree(dev_a);
return cudaStatus;
} | /*************************************************************************
> File Name: 05_0304.cu
> Author: dong xu
> Mail: gwmxyd@163.com
> Created Time: 2016年03月30日 星期三 13时37分15秒
************************************************************************/
#include <stdio.h>
#include <hip/hip_runtime.h>
hipError_t addWithCuda(int *c, const int *a);
__global__ void addKernel(int *c, const int *a)
{
int i = blockIdx.x;
*c = *a + i;
printf("thread %d:a=%d,c=%d\n",i,*a,*c);
}
int main()
{
int a = 1;
int c = 0;
hipError_t cudaStatus;
int num = 0;
hipDeviceProp_t prop;
cudaStatus = hipGetDeviceCount(&num);
for(int i = 0;i<num;i++)
{
hipGetDeviceProperties(&prop,i);
}
cudaStatus = addWithCuda(&c, &a);
if (cudaStatus != hipSuccess)
{
fprintf(stderr, "addWithCuda failed!");
return 1;
}
printf("a=%d,c=%d\n",a,c);
cudaStatus = hipDeviceReset();
if (cudaStatus != hipSuccess)
{
fprintf(stderr, "hipDeviceReset failed!");
return 1;
}
return 0;
}
hipError_t addWithCuda(int *c, const int *a)
{
int *dev_a = 0;
int *dev_c = 0;
hipError_t cudaStatus;
cudaStatus = hipSetDevice(0);
if (cudaStatus != hipSuccess)
{
fprintf(stderr, "hipSetDevice failed! Do you have a CUDA-capable GPU installed?");
goto Error;
}
cudaStatus = hipMalloc((void**)&dev_c, sizeof(int));
if (cudaStatus != hipSuccess)
{
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
cudaStatus = hipMalloc((void**)&dev_a, sizeof(int));
if (cudaStatus != hipSuccess)
{
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
cudaStatus = hipMemcpy(dev_a, a,sizeof(int), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess)
{
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
cudaStatus = hipMemcpy(dev_c, c,sizeof(int), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess)
{
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
printf("addKernel<<<7,1>>>(%d,%d)\n",*a,*c);
addKernel<<<7,1>>>(dev_c,dev_a);
// cudaStatus = cudaThreadSynchronize();
cudaStatus = hipMemcpy(c, dev_c,sizeof(int), hipMemcpyDeviceToHost);
if (cudaStatus != hipSuccess)
{
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
printf("The anwser:%d\n",*c);
printf("addKernel<<<1,7>>>(%d,%d)\n",*a,*c);
addKernel<<<1,7>>>(dev_c,dev_a);
cudaStatus = hipDeviceSynchronize();
cudaStatus = hipMemcpy(c, dev_c,sizeof(int), hipMemcpyDeviceToHost);
if (cudaStatus != hipSuccess)
{
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
printf("The anwser:%d\n",*c);
Error:
hipFree(dev_c);
hipFree(dev_a);
return cudaStatus;
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | /*************************************************************************
> File Name: 05_0304.cu
> Author: dong xu
> Mail: gwmxyd@163.com
> Created Time: 2016年03月30日 星期三 13时37分15秒
************************************************************************/
#include <stdio.h>
#include <hip/hip_runtime.h>
hipError_t addWithCuda(int *c, const int *a);
__global__ void addKernel(int *c, const int *a)
{
int i = blockIdx.x;
*c = *a + i;
printf("thread %d:a=%d,c=%d\n",i,*a,*c);
}
int main()
{
int a = 1;
int c = 0;
hipError_t cudaStatus;
int num = 0;
hipDeviceProp_t prop;
cudaStatus = hipGetDeviceCount(&num);
for(int i = 0;i<num;i++)
{
hipGetDeviceProperties(&prop,i);
}
cudaStatus = addWithCuda(&c, &a);
if (cudaStatus != hipSuccess)
{
fprintf(stderr, "addWithCuda failed!");
return 1;
}
printf("a=%d,c=%d\n",a,c);
cudaStatus = hipDeviceReset();
if (cudaStatus != hipSuccess)
{
fprintf(stderr, "hipDeviceReset failed!");
return 1;
}
return 0;
}
hipError_t addWithCuda(int *c, const int *a)
{
int *dev_a = 0;
int *dev_c = 0;
hipError_t cudaStatus;
cudaStatus = hipSetDevice(0);
if (cudaStatus != hipSuccess)
{
fprintf(stderr, "hipSetDevice failed! Do you have a CUDA-capable GPU installed?");
goto Error;
}
cudaStatus = hipMalloc((void**)&dev_c, sizeof(int));
if (cudaStatus != hipSuccess)
{
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
cudaStatus = hipMalloc((void**)&dev_a, sizeof(int));
if (cudaStatus != hipSuccess)
{
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
cudaStatus = hipMemcpy(dev_a, a,sizeof(int), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess)
{
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
cudaStatus = hipMemcpy(dev_c, c,sizeof(int), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess)
{
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
printf("addKernel<<<7,1>>>(%d,%d)\n",*a,*c);
addKernel<<<7,1>>>(dev_c,dev_a);
// cudaStatus = cudaThreadSynchronize();
cudaStatus = hipMemcpy(c, dev_c,sizeof(int), hipMemcpyDeviceToHost);
if (cudaStatus != hipSuccess)
{
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
printf("The anwser:%d\n",*c);
printf("addKernel<<<1,7>>>(%d,%d)\n",*a,*c);
addKernel<<<1,7>>>(dev_c,dev_a);
cudaStatus = hipDeviceSynchronize();
cudaStatus = hipMemcpy(c, dev_c,sizeof(int), hipMemcpyDeviceToHost);
if (cudaStatus != hipSuccess)
{
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
printf("The anwser:%d\n",*c);
Error:
hipFree(dev_c);
hipFree(dev_a);
return cudaStatus;
} | .text
.file "05_0304.hip"
.globl _Z24__device_stub__addKernelPiPKi # -- Begin function _Z24__device_stub__addKernelPiPKi
.p2align 4, 0x90
.type _Z24__device_stub__addKernelPiPKi,@function
_Z24__device_stub__addKernelPiPKi: # @_Z24__device_stub__addKernelPiPKi
.cfi_startproc
# %bb.0:
subq $88, %rsp
.cfi_def_cfa_offset 96
movq %rdi, 56(%rsp)
movq %rsi, 48(%rsp)
leaq 56(%rsp), %rax
movq %rax, 64(%rsp)
leaq 48(%rsp), %rax
movq %rax, 72(%rsp)
leaq 32(%rsp), %rdi
leaq 16(%rsp), %rsi
leaq 8(%rsp), %rdx
movq %rsp, %rcx
callq __hipPopCallConfiguration
movq 32(%rsp), %rsi
movl 40(%rsp), %edx
movq 16(%rsp), %rcx
movl 24(%rsp), %r8d
leaq 64(%rsp), %r9
movl $_Z9addKernelPiPKi, %edi
pushq (%rsp)
.cfi_adjust_cfa_offset 8
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $104, %rsp
.cfi_adjust_cfa_offset -104
retq
.Lfunc_end0:
.size _Z24__device_stub__addKernelPiPKi, .Lfunc_end0-_Z24__device_stub__addKernelPiPKi
.cfi_endproc
# -- End function
.globl main # -- Begin function main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
pushq %r14
.cfi_def_cfa_offset 16
pushq %rbx
.cfi_def_cfa_offset 24
subq $1496, %rsp # imm = 0x5D8
.cfi_def_cfa_offset 1520
.cfi_offset %rbx, -24
.cfi_offset %r14, -16
movl $1, 20(%rsp)
movl $0, 16(%rsp)
movl $0, 12(%rsp)
leaq 12(%rsp), %rdi
callq hipGetDeviceCount
cmpl $0, 12(%rsp)
jle .LBB1_3
# %bb.1: # %.lr.ph.preheader
xorl %ebx, %ebx
leaq 24(%rsp), %r14
.p2align 4, 0x90
.LBB1_2: # %.lr.ph
# =>This Inner Loop Header: Depth=1
movq %r14, %rdi
movl %ebx, %esi
callq hipGetDevicePropertiesR0600
incl %ebx
cmpl 12(%rsp), %ebx
jl .LBB1_2
.LBB1_3: # %._crit_edge
leaq 16(%rsp), %rdi
leaq 20(%rsp), %rsi
callq _Z11addWithCudaPiPKi
testl %eax, %eax
jne .LBB1_4
# %bb.5:
movl 20(%rsp), %esi
movl 16(%rsp), %edx
xorl %ebx, %ebx
movl $.L.str.1, %edi
xorl %eax, %eax
callq printf
callq hipDeviceReset
testl %eax, %eax
jne .LBB1_6
.LBB1_8:
movl %ebx, %eax
addq $1496, %rsp # imm = 0x5D8
.cfi_def_cfa_offset 24
popq %rbx
.cfi_def_cfa_offset 16
popq %r14
.cfi_def_cfa_offset 8
retq
.LBB1_4:
.cfi_def_cfa_offset 1520
movq stderr(%rip), %rcx
movl $.L.str, %edi
movl $19, %esi
jmp .LBB1_7
.LBB1_6:
movq stderr(%rip), %rcx
movl $.L.str.2, %edi
movl $22, %esi
.LBB1_7:
movl $1, %edx
callq fwrite@PLT
movl $1, %ebx
jmp .LBB1_8
.Lfunc_end1:
.size main, .Lfunc_end1-main
.cfi_endproc
# -- End function
.globl _Z11addWithCudaPiPKi # -- Begin function _Z11addWithCudaPiPKi
.p2align 4, 0x90
.type _Z11addWithCudaPiPKi,@function
_Z11addWithCudaPiPKi: # @_Z11addWithCudaPiPKi
.cfi_startproc
# %bb.0:
pushq %r15
.cfi_def_cfa_offset 16
pushq %r14
.cfi_def_cfa_offset 24
pushq %rbx
.cfi_def_cfa_offset 32
subq $96, %rsp
.cfi_def_cfa_offset 128
.cfi_offset %rbx, -32
.cfi_offset %r14, -24
.cfi_offset %r15, -16
movq %rsi, %r14
movq %rdi, %rbx
movq $0, 8(%rsp)
movq $0, (%rsp)
xorl %edi, %edi
callq hipSetDevice
testl %eax, %eax
jne .LBB2_1
# %bb.2:
movq %rsp, %rdi
movl $4, %esi
callq hipMalloc
testl %eax, %eax
jne .LBB2_3
# %bb.6:
leaq 8(%rsp), %rdi
movl $4, %esi
callq hipMalloc
testl %eax, %eax
jne .LBB2_3
# %bb.7:
movq 8(%rsp), %rdi
movl $4, %edx
movq %r14, %rsi
movl $1, %ecx
callq hipMemcpy
testl %eax, %eax
jne .LBB2_8
# %bb.9:
movq (%rsp), %rdi
movl $4, %edx
movq %rbx, %rsi
movl $1, %ecx
callq hipMemcpy
testl %eax, %eax
jne .LBB2_8
# %bb.10:
movabsq $4294967297, %r15 # imm = 0x100000001
movl (%r14), %esi
movl (%rbx), %edx
movl $.L.str.6, %edi
xorl %eax, %eax
callq printf
leaq 6(%r15), %rdi
movl $1, %esi
movq %r15, %rdx
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB2_12
# %bb.11:
movq (%rsp), %rax
movq 8(%rsp), %rcx
movq %rax, 72(%rsp)
movq %rcx, 64(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z9addKernelPiPKi, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB2_12:
movq (%rsp), %rsi
movl $4, %edx
movq %rbx, %rdi
movl $2, %ecx
callq hipMemcpy
testl %eax, %eax
jne .LBB2_8
# %bb.13:
movl (%rbx), %esi
movl $.L.str.7, %edi
xorl %eax, %eax
callq printf
movl (%r14), %esi
movl (%rbx), %edx
movl $.L.str.8, %edi
xorl %eax, %eax
callq printf
leaq 6(%r15), %rdx
movq %r15, %rdi
movl $1, %esi
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB2_15
# %bb.14:
movq (%rsp), %rax
movq 8(%rsp), %rcx
movq %rax, 72(%rsp)
movq %rcx, 64(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z9addKernelPiPKi, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB2_15:
callq hipDeviceSynchronize
movq (%rsp), %rsi
movl $4, %edx
movq %rbx, %rdi
movl $2, %ecx
callq hipMemcpy
testl %eax, %eax
jne .LBB2_8
# %bb.16:
movl (%rbx), %esi
xorl %ebx, %ebx
movl $.L.str.7, %edi
xorl %eax, %eax
callq printf
jmp .LBB2_17
.LBB2_8:
movl %eax, %ebx
movq stderr(%rip), %rcx
movl $.L.str.5, %edi
jmp .LBB2_4
.LBB2_3:
movl %eax, %ebx
movq stderr(%rip), %rcx
movl $.L.str.4, %edi
.LBB2_4:
movl $17, %esi
.LBB2_5:
movl $1, %edx
callq fwrite@PLT
.LBB2_17:
movq (%rsp), %rdi
callq hipFree
movq 8(%rsp), %rdi
callq hipFree
movl %ebx, %eax
addq $96, %rsp
.cfi_def_cfa_offset 32
popq %rbx
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
retq
.LBB2_1:
.cfi_def_cfa_offset 128
movl %eax, %ebx
movq stderr(%rip), %rcx
movl $.L.str.3, %edi
movl $63, %esi
jmp .LBB2_5
.Lfunc_end2:
.size _Z11addWithCudaPiPKi, .Lfunc_end2-_Z11addWithCudaPiPKi
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB3_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB3_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z9addKernelPiPKi, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end3:
.size __hip_module_ctor, .Lfunc_end3-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB4_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB4_2:
retq
.Lfunc_end4:
.size __hip_module_dtor, .Lfunc_end4-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z9addKernelPiPKi,@object # @_Z9addKernelPiPKi
.section .rodata,"a",@progbits
.globl _Z9addKernelPiPKi
.p2align 3, 0x0
_Z9addKernelPiPKi:
.quad _Z24__device_stub__addKernelPiPKi
.size _Z9addKernelPiPKi, 8
.type .L.str,@object # @.str
.section .rodata.str1.1,"aMS",@progbits,1
.L.str:
.asciz "addWithCuda failed!"
.size .L.str, 20
.type .L.str.1,@object # @.str.1
.L.str.1:
.asciz "a=%d,c=%d\n"
.size .L.str.1, 11
.type .L.str.2,@object # @.str.2
.L.str.2:
.asciz "hipDeviceReset failed!"
.size .L.str.2, 23
.type .L.str.3,@object # @.str.3
.L.str.3:
.asciz "hipSetDevice failed! Do you have a CUDA-capable GPU installed?"
.size .L.str.3, 64
.type .L.str.4,@object # @.str.4
.L.str.4:
.asciz "hipMalloc failed!"
.size .L.str.4, 18
.type .L.str.5,@object # @.str.5
.L.str.5:
.asciz "hipMemcpy failed!"
.size .L.str.5, 18
.type .L.str.6,@object # @.str.6
.L.str.6:
.asciz "addKernel<<<7,1>>>(%d,%d)\n"
.size .L.str.6, 27
.type .L.str.7,@object # @.str.7
.L.str.7:
.asciz "The anwser:%d\n"
.size .L.str.7, 15
.type .L.str.8,@object # @.str.8
.L.str.8:
.asciz "addKernel<<<1,7>>>(%d,%d)\n"
.size .L.str.8, 27
.type .L__unnamed_1,@object # @0
.L__unnamed_1:
.asciz "_Z9addKernelPiPKi"
.size .L__unnamed_1, 18
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z24__device_stub__addKernelPiPKi
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z9addKernelPiPKi
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_000e19b7_00000000-6_05_0304.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2061:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2061:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z31__device_stub__Z9addKernelPiPKiPiPKi
.type _Z31__device_stub__Z9addKernelPiPKiPiPKi, @function
_Z31__device_stub__Z9addKernelPiPKiPiPKi:
.LFB2083:
.cfi_startproc
endbr64
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 8(%rsp)
movq %rsi, (%rsp)
movq %fs:40, %rax
movq %rax, 104(%rsp)
xorl %eax, %eax
leaq 8(%rsp), %rax
movq %rax, 80(%rsp)
movq %rsp, %rax
movq %rax, 88(%rsp)
movl $1, 32(%rsp)
movl $1, 36(%rsp)
movl $1, 40(%rsp)
movl $1, 44(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
leaq 24(%rsp), %rcx
leaq 16(%rsp), %rdx
leaq 44(%rsp), %rsi
leaq 32(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 104(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $120, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 24(%rsp)
.cfi_def_cfa_offset 136
pushq 24(%rsp)
.cfi_def_cfa_offset 144
leaq 96(%rsp), %r9
movq 60(%rsp), %rcx
movl 68(%rsp), %r8d
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
leaq _Z9addKernelPiPKi(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 128
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2083:
.size _Z31__device_stub__Z9addKernelPiPKiPiPKi, .-_Z31__device_stub__Z9addKernelPiPKiPiPKi
.globl _Z9addKernelPiPKi
.type _Z9addKernelPiPKi, @function
_Z9addKernelPiPKi:
.LFB2084:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z31__device_stub__Z9addKernelPiPKiPiPKi
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2084:
.size _Z9addKernelPiPKi, .-_Z9addKernelPiPKi
.section .rodata.str1.8,"aMS",@progbits,1
.align 8
.LC0:
.string "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?"
.section .rodata.str1.1,"aMS",@progbits,1
.LC1:
.string "cudaMalloc failed!"
.LC2:
.string "cudaMemcpy failed!"
.LC3:
.string "addKernel<<<7,1>>>(%d,%d)\n"
.LC4:
.string "The anwser:%d\n"
.LC5:
.string "addKernel<<<1,7>>>(%d,%d)\n"
.text
.globl _Z11addWithCudaPiPKi
.type _Z11addWithCudaPiPKi, @function
_Z11addWithCudaPiPKi:
.LFB2058:
.cfi_startproc
endbr64
pushq %r12
.cfi_def_cfa_offset 16
.cfi_offset 12, -16
pushq %rbp
.cfi_def_cfa_offset 24
.cfi_offset 6, -24
pushq %rbx
.cfi_def_cfa_offset 32
.cfi_offset 3, -32
subq $48, %rsp
.cfi_def_cfa_offset 80
movq %rdi, %rbp
movq %rsi, %r12
movq %fs:40, %rax
movq %rax, 40(%rsp)
xorl %eax, %eax
movq $0, (%rsp)
movq $0, 8(%rsp)
movl $0, %edi
call cudaSetDevice@PLT
testl %eax, %eax
jne .L24
leaq 8(%rsp), %rdi
movl $4, %esi
call cudaMalloc@PLT
movl %eax, %ebx
testl %eax, %eax
jne .L25
movq %rsp, %rdi
movl $4, %esi
call cudaMalloc@PLT
movl %eax, %ebx
testl %eax, %eax
jne .L26
movl $1, %ecx
movl $4, %edx
movq %r12, %rsi
movq (%rsp), %rdi
call cudaMemcpy@PLT
movl %eax, %ebx
testl %eax, %eax
jne .L27
movl $1, %ecx
movl $4, %edx
movq %rbp, %rsi
movq 8(%rsp), %rdi
call cudaMemcpy@PLT
movl %eax, %ebx
testl %eax, %eax
jne .L28
movl 0(%rbp), %ecx
movl (%r12), %edx
leaq .LC3(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $1, 28(%rsp)
movl $1, 32(%rsp)
movl $1, 36(%rsp)
movl $7, 16(%rsp)
movl $1, 20(%rsp)
movl $1, 24(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 28(%rsp), %rdx
movl $1, %ecx
movq 16(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L29
.L18:
movl $2, %ecx
movl $4, %edx
movq 8(%rsp), %rsi
movq %rbp, %rdi
call cudaMemcpy@PLT
movl %eax, %ebx
testl %eax, %eax
jne .L30
movl 0(%rbp), %edx
leaq .LC4(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl 0(%rbp), %ecx
movl (%r12), %edx
leaq .LC5(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $7, 28(%rsp)
movl $1, 32(%rsp)
movl $1, 36(%rsp)
movl $1, 16(%rsp)
movl $1, 20(%rsp)
movl $1, 24(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 28(%rsp), %rdx
movl $1, %ecx
movq 16(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L31
.L20:
call cudaThreadSynchronize@PLT
movl $2, %ecx
movl $4, %edx
movq 8(%rsp), %rsi
movq %rbp, %rdi
call cudaMemcpy@PLT
movl %eax, %ebx
testl %eax, %eax
jne .L32
movl 0(%rbp), %edx
leaq .LC4(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
jmp .L13
.L24:
movl %eax, %ebx
leaq .LC0(%rip), %rdx
movl $2, %esi
movq stderr(%rip), %rdi
movl $0, %eax
call __fprintf_chk@PLT
.L13:
movq 8(%rsp), %rdi
call cudaFree@PLT
movq (%rsp), %rdi
call cudaFree@PLT
movq 40(%rsp), %rax
subq %fs:40, %rax
jne .L33
movl %ebx, %eax
addq $48, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 32
popq %rbx
.cfi_def_cfa_offset 24
popq %rbp
.cfi_def_cfa_offset 16
popq %r12
.cfi_def_cfa_offset 8
ret
.L25:
.cfi_restore_state
leaq .LC1(%rip), %rdx
movl $2, %esi
movq stderr(%rip), %rdi
movl $0, %eax
call __fprintf_chk@PLT
jmp .L13
.L26:
leaq .LC1(%rip), %rdx
movl $2, %esi
movq stderr(%rip), %rdi
movl $0, %eax
call __fprintf_chk@PLT
jmp .L13
.L27:
leaq .LC2(%rip), %rdx
movl $2, %esi
movq stderr(%rip), %rdi
movl $0, %eax
call __fprintf_chk@PLT
jmp .L13
.L28:
leaq .LC2(%rip), %rdx
movl $2, %esi
movq stderr(%rip), %rdi
movl $0, %eax
call __fprintf_chk@PLT
jmp .L13
.L29:
movq (%rsp), %rsi
movq 8(%rsp), %rdi
call _Z31__device_stub__Z9addKernelPiPKiPiPKi
jmp .L18
.L30:
leaq .LC2(%rip), %rdx
movl $2, %esi
movq stderr(%rip), %rdi
movl $0, %eax
call __fprintf_chk@PLT
jmp .L13
.L31:
movq (%rsp), %rsi
movq 8(%rsp), %rdi
call _Z31__device_stub__Z9addKernelPiPKiPiPKi
jmp .L20
.L32:
leaq .LC2(%rip), %rdx
movl $2, %esi
movq stderr(%rip), %rdi
movl $0, %eax
call __fprintf_chk@PLT
jmp .L13
.L33:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2058:
.size _Z11addWithCudaPiPKi, .-_Z11addWithCudaPiPKi
.section .rodata.str1.1
.LC6:
.string "addWithCuda failed!"
.LC7:
.string "a=%d,c=%d\n"
.LC8:
.string "cudaThreadExit failed!"
.text
.globl main
.type main, @function
main:
.LFB2057:
.cfi_startproc
endbr64
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset 6, -16
pushq %rbx
.cfi_def_cfa_offset 24
.cfi_offset 3, -24
subq $1064, %rsp
.cfi_def_cfa_offset 1088
movq %fs:40, %rax
movq %rax, 1048(%rsp)
xorl %eax, %eax
movl $1, 4(%rsp)
movl $0, 8(%rsp)
movl $0, 12(%rsp)
leaq 12(%rsp), %rdi
call cudaGetDeviceCount@PLT
cmpl $0, 12(%rsp)
jle .L35
movl $0, %ebx
leaq 16(%rsp), %rbp
.L36:
movl %ebx, %esi
movq %rbp, %rdi
call cudaGetDeviceProperties_v2@PLT
addl $1, %ebx
cmpl %ebx, 12(%rsp)
jg .L36
.L35:
leaq 4(%rsp), %rsi
leaq 8(%rsp), %rdi
call _Z11addWithCudaPiPKi
testl %eax, %eax
jne .L43
movl 8(%rsp), %ecx
movl 4(%rsp), %edx
leaq .LC7(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
call cudaThreadExit@PLT
movl %eax, %edx
movl $0, %eax
testl %edx, %edx
jne .L44
.L34:
movq 1048(%rsp), %rdx
subq %fs:40, %rdx
jne .L45
addq $1064, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 24
popq %rbx
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
ret
.L43:
.cfi_restore_state
leaq .LC6(%rip), %rdx
movl $2, %esi
movq stderr(%rip), %rdi
movl $0, %eax
call __fprintf_chk@PLT
movl $1, %eax
jmp .L34
.L44:
leaq .LC8(%rip), %rdx
movl $2, %esi
movq stderr(%rip), %rdi
call __fprintf_chk@PLT
movl $1, %eax
jmp .L34
.L45:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2057:
.size main, .-main
.section .rodata.str1.1
.LC9:
.string "_Z9addKernelPiPKi"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2086:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC9(%rip), %rdx
movq %rdx, %rcx
leaq _Z9addKernelPiPKi(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2086:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "05_0304.hip"
.globl _Z24__device_stub__addKernelPiPKi # -- Begin function _Z24__device_stub__addKernelPiPKi
.p2align 4, 0x90
.type _Z24__device_stub__addKernelPiPKi,@function
_Z24__device_stub__addKernelPiPKi: # @_Z24__device_stub__addKernelPiPKi
.cfi_startproc
# %bb.0:
subq $88, %rsp
.cfi_def_cfa_offset 96
movq %rdi, 56(%rsp)
movq %rsi, 48(%rsp)
leaq 56(%rsp), %rax
movq %rax, 64(%rsp)
leaq 48(%rsp), %rax
movq %rax, 72(%rsp)
leaq 32(%rsp), %rdi
leaq 16(%rsp), %rsi
leaq 8(%rsp), %rdx
movq %rsp, %rcx
callq __hipPopCallConfiguration
movq 32(%rsp), %rsi
movl 40(%rsp), %edx
movq 16(%rsp), %rcx
movl 24(%rsp), %r8d
leaq 64(%rsp), %r9
movl $_Z9addKernelPiPKi, %edi
pushq (%rsp)
.cfi_adjust_cfa_offset 8
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $104, %rsp
.cfi_adjust_cfa_offset -104
retq
.Lfunc_end0:
.size _Z24__device_stub__addKernelPiPKi, .Lfunc_end0-_Z24__device_stub__addKernelPiPKi
.cfi_endproc
# -- End function
.globl main # -- Begin function main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
pushq %r14
.cfi_def_cfa_offset 16
pushq %rbx
.cfi_def_cfa_offset 24
subq $1496, %rsp # imm = 0x5D8
.cfi_def_cfa_offset 1520
.cfi_offset %rbx, -24
.cfi_offset %r14, -16
movl $1, 20(%rsp)
movl $0, 16(%rsp)
movl $0, 12(%rsp)
leaq 12(%rsp), %rdi
callq hipGetDeviceCount
cmpl $0, 12(%rsp)
jle .LBB1_3
# %bb.1: # %.lr.ph.preheader
xorl %ebx, %ebx
leaq 24(%rsp), %r14
.p2align 4, 0x90
.LBB1_2: # %.lr.ph
# =>This Inner Loop Header: Depth=1
movq %r14, %rdi
movl %ebx, %esi
callq hipGetDevicePropertiesR0600
incl %ebx
cmpl 12(%rsp), %ebx
jl .LBB1_2
.LBB1_3: # %._crit_edge
leaq 16(%rsp), %rdi
leaq 20(%rsp), %rsi
callq _Z11addWithCudaPiPKi
testl %eax, %eax
jne .LBB1_4
# %bb.5:
movl 20(%rsp), %esi
movl 16(%rsp), %edx
xorl %ebx, %ebx
movl $.L.str.1, %edi
xorl %eax, %eax
callq printf
callq hipDeviceReset
testl %eax, %eax
jne .LBB1_6
.LBB1_8:
movl %ebx, %eax
addq $1496, %rsp # imm = 0x5D8
.cfi_def_cfa_offset 24
popq %rbx
.cfi_def_cfa_offset 16
popq %r14
.cfi_def_cfa_offset 8
retq
.LBB1_4:
.cfi_def_cfa_offset 1520
movq stderr(%rip), %rcx
movl $.L.str, %edi
movl $19, %esi
jmp .LBB1_7
.LBB1_6:
movq stderr(%rip), %rcx
movl $.L.str.2, %edi
movl $22, %esi
.LBB1_7:
movl $1, %edx
callq fwrite@PLT
movl $1, %ebx
jmp .LBB1_8
.Lfunc_end1:
.size main, .Lfunc_end1-main
.cfi_endproc
# -- End function
.globl _Z11addWithCudaPiPKi # -- Begin function _Z11addWithCudaPiPKi
.p2align 4, 0x90
.type _Z11addWithCudaPiPKi,@function
_Z11addWithCudaPiPKi: # @_Z11addWithCudaPiPKi
.cfi_startproc
# %bb.0:
pushq %r15
.cfi_def_cfa_offset 16
pushq %r14
.cfi_def_cfa_offset 24
pushq %rbx
.cfi_def_cfa_offset 32
subq $96, %rsp
.cfi_def_cfa_offset 128
.cfi_offset %rbx, -32
.cfi_offset %r14, -24
.cfi_offset %r15, -16
movq %rsi, %r14
movq %rdi, %rbx
movq $0, 8(%rsp)
movq $0, (%rsp)
xorl %edi, %edi
callq hipSetDevice
testl %eax, %eax
jne .LBB2_1
# %bb.2:
movq %rsp, %rdi
movl $4, %esi
callq hipMalloc
testl %eax, %eax
jne .LBB2_3
# %bb.6:
leaq 8(%rsp), %rdi
movl $4, %esi
callq hipMalloc
testl %eax, %eax
jne .LBB2_3
# %bb.7:
movq 8(%rsp), %rdi
movl $4, %edx
movq %r14, %rsi
movl $1, %ecx
callq hipMemcpy
testl %eax, %eax
jne .LBB2_8
# %bb.9:
movq (%rsp), %rdi
movl $4, %edx
movq %rbx, %rsi
movl $1, %ecx
callq hipMemcpy
testl %eax, %eax
jne .LBB2_8
# %bb.10:
movabsq $4294967297, %r15 # imm = 0x100000001
movl (%r14), %esi
movl (%rbx), %edx
movl $.L.str.6, %edi
xorl %eax, %eax
callq printf
leaq 6(%r15), %rdi
movl $1, %esi
movq %r15, %rdx
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB2_12
# %bb.11:
movq (%rsp), %rax
movq 8(%rsp), %rcx
movq %rax, 72(%rsp)
movq %rcx, 64(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z9addKernelPiPKi, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB2_12:
movq (%rsp), %rsi
movl $4, %edx
movq %rbx, %rdi
movl $2, %ecx
callq hipMemcpy
testl %eax, %eax
jne .LBB2_8
# %bb.13:
movl (%rbx), %esi
movl $.L.str.7, %edi
xorl %eax, %eax
callq printf
movl (%r14), %esi
movl (%rbx), %edx
movl $.L.str.8, %edi
xorl %eax, %eax
callq printf
leaq 6(%r15), %rdx
movq %r15, %rdi
movl $1, %esi
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB2_15
# %bb.14:
movq (%rsp), %rax
movq 8(%rsp), %rcx
movq %rax, 72(%rsp)
movq %rcx, 64(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z9addKernelPiPKi, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB2_15:
callq hipDeviceSynchronize
movq (%rsp), %rsi
movl $4, %edx
movq %rbx, %rdi
movl $2, %ecx
callq hipMemcpy
testl %eax, %eax
jne .LBB2_8
# %bb.16:
movl (%rbx), %esi
xorl %ebx, %ebx
movl $.L.str.7, %edi
xorl %eax, %eax
callq printf
jmp .LBB2_17
.LBB2_8:
movl %eax, %ebx
movq stderr(%rip), %rcx
movl $.L.str.5, %edi
jmp .LBB2_4
.LBB2_3:
movl %eax, %ebx
movq stderr(%rip), %rcx
movl $.L.str.4, %edi
.LBB2_4:
movl $17, %esi
.LBB2_5:
movl $1, %edx
callq fwrite@PLT
.LBB2_17:
movq (%rsp), %rdi
callq hipFree
movq 8(%rsp), %rdi
callq hipFree
movl %ebx, %eax
addq $96, %rsp
.cfi_def_cfa_offset 32
popq %rbx
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
retq
.LBB2_1:
.cfi_def_cfa_offset 128
movl %eax, %ebx
movq stderr(%rip), %rcx
movl $.L.str.3, %edi
movl $63, %esi
jmp .LBB2_5
.Lfunc_end2:
.size _Z11addWithCudaPiPKi, .Lfunc_end2-_Z11addWithCudaPiPKi
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB3_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB3_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z9addKernelPiPKi, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end3:
.size __hip_module_ctor, .Lfunc_end3-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB4_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB4_2:
retq
.Lfunc_end4:
.size __hip_module_dtor, .Lfunc_end4-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z9addKernelPiPKi,@object # @_Z9addKernelPiPKi
.section .rodata,"a",@progbits
.globl _Z9addKernelPiPKi
.p2align 3, 0x0
_Z9addKernelPiPKi:
.quad _Z24__device_stub__addKernelPiPKi
.size _Z9addKernelPiPKi, 8
.type .L.str,@object # @.str
.section .rodata.str1.1,"aMS",@progbits,1
.L.str:
.asciz "addWithCuda failed!"
.size .L.str, 20
.type .L.str.1,@object # @.str.1
.L.str.1:
.asciz "a=%d,c=%d\n"
.size .L.str.1, 11
.type .L.str.2,@object # @.str.2
.L.str.2:
.asciz "hipDeviceReset failed!"
.size .L.str.2, 23
.type .L.str.3,@object # @.str.3
.L.str.3:
.asciz "hipSetDevice failed! Do you have a CUDA-capable GPU installed?"
.size .L.str.3, 64
.type .L.str.4,@object # @.str.4
.L.str.4:
.asciz "hipMalloc failed!"
.size .L.str.4, 18
.type .L.str.5,@object # @.str.5
.L.str.5:
.asciz "hipMemcpy failed!"
.size .L.str.5, 18
.type .L.str.6,@object # @.str.6
.L.str.6:
.asciz "addKernel<<<7,1>>>(%d,%d)\n"
.size .L.str.6, 27
.type .L.str.7,@object # @.str.7
.L.str.7:
.asciz "The anwser:%d\n"
.size .L.str.7, 15
.type .L.str.8,@object # @.str.8
.L.str.8:
.asciz "addKernel<<<1,7>>>(%d,%d)\n"
.size .L.str.8, 27
.type .L__unnamed_1,@object # @0
.L__unnamed_1:
.asciz "_Z9addKernelPiPKi"
.size .L__unnamed_1, 18
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z24__device_stub__addKernelPiPKi
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z9addKernelPiPKi
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | #include <algorithm>
#include <iostream>
#include <vector>
#include <time.h>
typedef unsigned long long data_t;
static inline void check(cudaError_t err, const char* context) {
if (err != cudaSuccess) {
std::cerr << "CUDA error: " << context << ": "
<< cudaGetErrorString(err) << std::endl;
std::exit(EXIT_FAILURE);
}
}
#define CHECK(x) check(x, #x)
const int MAX_CONSTANT = 16*1024;
__constant__ unsigned int const_sum[MAX_CONSTANT];
template <class T>
void cuda_memcpy(T* target, const T* source, std::size_t num, cudaMemcpyKind direction) {
CHECK(cudaMemcpy(target, source, num * sizeof(T), direction));
}
static inline int divup(int a, int b) {
return (a + b - 1)/b;
}
// get the 0 bit of each number by bit_shift
// example: number : 10001, bit_shit: 1, One: 1,
//
// it means check if the second bit is 1 or not.
__global__ void getMask(data_t *d_in, unsigned int *d_out, const int len, const unsigned int n, data_t bit_shift, unsigned int One) {
unsigned int index = threadIdx.x + blockDim.x * blockIdx.x;
data_t bit = 0;
data_t one=1;
data_t shift=one<<bit_shift;
unsigned int start=index*len;
if (start>=n) return;
unsigned int end=start+len;
for(unsigned int i=start;i<end && i<n; i++ ){
bit=d_in[i]&shift;
bit = (bit > 0) ? 1 : 0;
d_out[i] = (One ? bit : 1 - bit);
}
}
__global__ void getIndex(unsigned int *d_index, unsigned int *d_sum, unsigned int* d_mask, const int len, const unsigned int n,
unsigned int total_pre) {
unsigned int index = threadIdx.x + blockDim.x * blockIdx.x;
unsigned int start=index*len;
if (start>=n) return;
unsigned int end=start+len;
for (unsigned int i=start; i<end && i<n; i++){
d_index[i]=d_mask[i]?d_sum[i]:i-d_sum[i]+total_pre;
if(d_index[i]>=n){
printf(" d_sum[i] : %d, total_pre : %d, d_mask[i] : %d \n", d_sum[i], total_pre, d_mask[i]);
}
// if(d_mask[i]==1){
// d_index[i]=total_pre+d_sum[i];
// }
}
}
__global__ void scatter(data_t *d_in, unsigned int *d_index, data_t *d_out, const int len, const unsigned int n) {
unsigned int index = threadIdx.x + blockDim.x * blockIdx.x;
unsigned int start=index*len;
if (start>=n) return;
unsigned int end=start+len;
for(unsigned int i=start;i<end && i<n; i++ ){
d_out[d_index[i]]=d_in[i];
}
}
// // pay attention that blockDim.x must be power of 2
// __global__ void blellochScan(unsigned int *out, unsigned int *in,
// unsigned int *sum, unsigned int inputSize) {
// __shared__ unsigned int temp[2 * 256];
// unsigned int start = blockIdx.x * blockDim.x << 1;
// unsigned int tx = threadIdx.x;
// unsigned int index = 0;
// temp[tx] = (start + tx < inputSize)? in[start+tx]:0;
// temp[tx+blockDim.x] = (start + tx + blockDim.x < inputSize)? in[start + tx + blockDim.x] : 0;
// // Blelloch Scan
// __syncthreads();
// // reduction step
// unsigned int stride = 1;
// while (stride <= blockDim.x) {
// index = (tx + 1) * (stride << 1) - 1;
// if (index < (blockDim.x << 1)) {
// temp[index] += temp[index - stride];
// }
// stride <<= 1;
// __syncthreads();
// }
// // first store the reduction sum in sum array
// // make it zero since it is exclusive scan
// if (tx == 0) {
// // sum array contains the prefix sum of each
// // 2*blockDim blocks of element.
// if (sum != NULL) {
// sum[blockIdx.x] = temp[(blockDim.x << 1) - 1];
// }
// temp[(blockDim.x << 1) - 1] = 0;
// }
// // wait for thread zero to write
// __syncthreads();
// // post scan step
// stride = blockDim.x;
// index = 0;
// unsigned int var = 0;
// while (stride > 0) {
// index = ((stride << 1) * (tx + 1)) - 1;
// if (index < (blockDim.x << 1)) {
// var = temp[index];
// temp[index] += temp[index - stride];
// temp[index - stride] = var;
// }
// stride >>= 1;
// __syncthreads();
// }
// // now write the temp array to output
// if (start + tx < inputSize) {
// out[start + tx] = temp[tx];
// }
// if (start + tx + blockDim.x < inputSize) {
// out[start + tx + blockDim.x] = temp[tx + blockDim.x];
// }
// }
// /*
// sum out the blocks' accumulated sums to each element
// */
// __global__ void mergeScanBlocks(unsigned int *sum, unsigned int *output,
// unsigned int opSize) {
// unsigned int index = (blockDim.x * blockIdx.x << 1) + threadIdx.x;
// if (index < opSize) {
// // output[index] += sum[blockIdx.x];
// output[index] += (opSize > MAX_CONSTANT)? sum[blockIdx.x]:const_sum[blockIdx.x];
// // output[index] += tex1Dfetch(tex_sum, blockIdx.x);
// }
// if (index + blockDim.x < opSize) {
// // output[index + blockDim.x] += sum[blockIdx.x];
// output[index + blockDim.x] += (opSize > MAX_CONSTANT)? sum[blockIdx.x]:const_sum[blockIdx.x];
// // output[index + blockDim.x] += tex1Dfetch(tex_sum, blockIdx.x);
// }
// }
// /*
// api for exclusiveScan
// */
// void exclusiveScan(unsigned int *out, unsigned int *in, unsigned int in_size, unsigned int block_size) {
// unsigned int numBlocks1 = in_size / block_size;
// if (in_size % block_size) numBlocks1++;
// unsigned int numBlocks2 = numBlocks1 / 2;
// if (numBlocks1 % 2) numBlocks2++;
// dim3 dimThreadBlock;
// dimThreadBlock.x = block_size;
// dimThreadBlock.y = 1;
// dimThreadBlock.z = 1;
// dim3 dimGrid;
// dimGrid.x = numBlocks2;
// dimGrid.y = 1;
// dimGrid.z = 1;
// unsigned int *d_sumArr = NULL;
// if (in_size > (2 * block_size)) {
// // we need the sum auxilarry array only if nuFmblocks2 > 1
// CHECK(cudaMalloc((void **)&d_sumArr, numBlocks2 * sizeof(unsigned int)));
// }
// blellochScan<<<dimGrid, dimThreadBlock>>>(out, in, d_sumArr, in_size);
// if (in_size <= (2 * block_size)) {
// // out has proper exclusive scan. just return
// CHECK(cudaDeviceSynchronize());
// return;
// } else {
// // now we need to perform exclusive scan on the auxilliary sum array
// unsigned int *d_sumArr_scan;
// CHECK(cudaMalloc((void **)&d_sumArr_scan, numBlocks2 * sizeof(unsigned int)));
// exclusiveScan(d_sumArr_scan, d_sumArr, numBlocks2, block_size);
// // d_sumArr_scan now contains the exclusive scan op of individual blocks
// // now just do a one-one addition of blocks
// // cudaBindTexture(0, tex_sum, d_sumArr_scan, numBlocks2 * sizeof(unsigned int));
// if(numBlocks2 <= MAX_CONSTANT) {
// CHECK(cudaMemcpyToSymbol(const_sum, d_sumArr_scan, numBlocks2 * sizeof(unsigned int), 0, cudaMemcpyDeviceToDevice));
// }
// mergeScanBlocks<<<dimGrid, dimThreadBlock>>>(d_sumArr_scan, out, in_size);
// // cudaUnbindTexture(tex_sum);
// cudaFree(d_sumArr);
// cudaFree(d_sumArr_scan);
// }
// }
#define MAX_BLOCK_SZ 128
#define NUM_BANKS 32
#define LOG_NUM_BANKS 5
//#define ZERO_BANK_CONFLICTS
#ifdef ZERO_BANK_CONFLICTS
#define CONFLICT_FREE_OFFSET(n) \
((n) >> NUM_BANKS + (n) >> (2 * LOG_NUM_BANKS))
#else
#define CONFLICT_FREE_OFFSET(n) ((n) >> LOG_NUM_BANKS)
#endif
__global__
void gpu_add_block_sums(unsigned int* const d_out,
const unsigned int* const d_in,
unsigned int* const d_block_sums,
const size_t numElems)
{
unsigned int d_block_sum_val = d_block_sums[blockIdx.x];
// Simple implementation's performance is not significantly (if at all)
// better than previous verbose implementation
unsigned int cpy_idx = 2 * blockIdx.x * blockDim.x + threadIdx.x;
if (cpy_idx < numElems)
{
d_out[cpy_idx] = d_in[cpy_idx] + d_block_sum_val;
if (cpy_idx + blockDim.x < numElems)
d_out[cpy_idx + blockDim.x] = d_in[cpy_idx + blockDim.x] + d_block_sum_val;
}
}
__global__
void gpu_prescan(unsigned int* const d_out,
const unsigned int* const d_in,
unsigned int* const d_block_sums,
const unsigned int len,
const unsigned int shmem_sz,
const unsigned int max_elems_per_block)
{
// Allocated on invocation
extern __shared__ unsigned int s_out[];
int thid = threadIdx.x;
int ai = thid;
int bi = thid + blockDim.x;
// Zero out the shared memory
// Helpful especially when input size is not power of two
s_out[thid] = 0;
s_out[thid + blockDim.x] = 0;
// If CONFLICT_FREE_OFFSET is used, shared memory size
// must be a 2 * blockDim.x + blockDim.x/num_banks
s_out[thid + blockDim.x + (blockDim.x >> LOG_NUM_BANKS)] = 0;
__syncthreads();
// Copy d_in to shared memory
// Note that d_in's elements are scattered into shared memory
// in light of avoiding bank conflicts
unsigned int cpy_idx = max_elems_per_block * blockIdx.x + threadIdx.x;
if (cpy_idx < len)
{
s_out[ai + CONFLICT_FREE_OFFSET(ai)] = d_in[cpy_idx];
if (cpy_idx + blockDim.x < len)
s_out[bi + CONFLICT_FREE_OFFSET(bi)] = d_in[cpy_idx + blockDim.x];
}
// For both upsweep and downsweep:
// Sequential indices with conflict free padding
// Amount of padding = target index / num banks
// This "shifts" the target indices by one every multiple
// of the num banks
// offset controls the stride and starting index of
// target elems at every iteration
// d just controls which threads are active
// Sweeps are pivoted on the last element of shared memory
// Upsweep/Reduce step
int offset = 1;
for (int d = max_elems_per_block >> 1; d > 0; d >>= 1)
{
__syncthreads();
if (thid < d)
{
int ai = offset * ((thid << 1) + 1) - 1;
int bi = offset * ((thid << 1) + 2) - 1;
ai += CONFLICT_FREE_OFFSET(ai);
bi += CONFLICT_FREE_OFFSET(bi);
s_out[bi] += s_out[ai];
}
offset <<= 1;
}
// Save the total sum on the global block sums array
// Then clear the last element on the shared memory
if (thid == 0)
{
d_block_sums[blockIdx.x] = s_out[max_elems_per_block - 1
+ CONFLICT_FREE_OFFSET(max_elems_per_block - 1)];
s_out[max_elems_per_block - 1
+ CONFLICT_FREE_OFFSET(max_elems_per_block - 1)] = 0;
}
// Downsweep step
for (int d = 1; d < max_elems_per_block; d <<= 1)
{
offset >>= 1;
__syncthreads();
if (thid < d)
{
int ai = offset * ((thid << 1) + 1) - 1;
int bi = offset * ((thid << 1) + 2) - 1;
ai += CONFLICT_FREE_OFFSET(ai);
bi += CONFLICT_FREE_OFFSET(bi);
unsigned int temp = s_out[ai];
s_out[ai] = s_out[bi];
s_out[bi] += temp;
}
}
__syncthreads();
// Copy contents of shared memory to global memory
if (cpy_idx < len)
{
d_out[cpy_idx] = s_out[ai + CONFLICT_FREE_OFFSET(ai)];
if (cpy_idx + blockDim.x < len)
d_out[cpy_idx + blockDim.x] = s_out[bi + CONFLICT_FREE_OFFSET(bi)];
}
}
void sum_scan_blelloch(unsigned int* const d_out,
const unsigned int* const d_in,
const size_t numElems)
{
// Zero out d_out
CHECK(cudaMemset(d_out, 0, numElems * sizeof(unsigned int)));
// Set up number of threads and blocks
unsigned int block_sz = MAX_BLOCK_SZ / 2;
unsigned int max_elems_per_block = 2 * block_sz; // due to binary tree nature of algorithm
// If input size is not power of two, the remainder will still need a whole block
// Thus, number of blocks must be the ceiling of input size / max elems that a block can handle
//unsigned int grid_sz = (unsigned int) std::ceil((double) numElems / (double) max_elems_per_block);
// UPDATE: Instead of using ceiling and risking miscalculation due to precision, just automatically
// add 1 to the grid size when the input size cannot be divided cleanly by the block's capacity
unsigned int grid_sz = numElems / max_elems_per_block;
// Take advantage of the fact that integer division drops the decimals
if (numElems % max_elems_per_block != 0)
grid_sz += 1;
// Conflict free padding requires that shared memory be more than 2 * block_sz
unsigned int shmem_sz = max_elems_per_block + ((max_elems_per_block) >> LOG_NUM_BANKS);
// Allocate memory for array of total sums produced by each block
// Array length must be the same as number of blocks
unsigned int* d_block_sums;
CHECK(cudaMalloc(&d_block_sums, sizeof(unsigned int) * grid_sz));
CHECK(cudaMemset(d_block_sums, 0, sizeof(unsigned int) * grid_sz));
// Sum scan data allocated to each block
//gpu_sum_scan_blelloch<<<grid_sz, block_sz, sizeof(unsigned int) * max_elems_per_block >>>(d_out, d_in, d_block_sums, numElems);
gpu_prescan<<<grid_sz, block_sz, sizeof(unsigned int) * shmem_sz>>>(d_out,
d_in,
d_block_sums,
numElems,
shmem_sz,
max_elems_per_block);
// Sum scan total sums produced by each block
// Use basic implementation if number of total sums is <= 2 * block_sz
// (This requires only one block to do the scan)
if (grid_sz <= max_elems_per_block)
{
unsigned int* d_dummy_blocks_sums;
CHECK(cudaMalloc(&d_dummy_blocks_sums, sizeof(unsigned int)));
CHECK(cudaMemset(d_dummy_blocks_sums, 0, sizeof(unsigned int)));
//gpu_sum_scan_blelloch<<<1, block_sz, sizeof(unsigned int) * max_elems_per_block>>>(d_block_sums, d_block_sums, d_dummy_blocks_sums, grid_sz);
gpu_prescan<<<1, block_sz, sizeof(unsigned int) * shmem_sz>>>(d_block_sums,
d_block_sums,
d_dummy_blocks_sums,
grid_sz,
shmem_sz,
max_elems_per_block);
CHECK(cudaFree(d_dummy_blocks_sums));
}
// Else, recurse on this same function as you'll need the full-blown scan
// for the block sums
else
{
unsigned int* d_in_block_sums;
CHECK(cudaMalloc(&d_in_block_sums, sizeof(unsigned int) * grid_sz));
CHECK(cudaMemcpy(d_in_block_sums, d_block_sums, sizeof(unsigned int) * grid_sz, cudaMemcpyDeviceToDevice));
sum_scan_blelloch(d_block_sums, d_in_block_sums, grid_sz);
CHECK(cudaFree(d_in_block_sums));
}
// Add each block's total sum to its scan output
// in order to get the final, global scanned array
gpu_add_block_sums<<<grid_sz, block_sz>>>(d_out, d_out, d_block_sums, numElems);
CHECK(cudaFree(d_block_sums));
}
// idea to do exclusive prefix is similar to my ppc course https://www.youtube.com/watch?v=HVhCtl96gUs
// I will use y,z,s to specify which step I am in.
// in particular, I split the whole array into multiple smaller array. each small array has [len] numbers
// Thread level y: each thread will do addition sequentially. threads are working independently, dealing with [len] numbers.
// Thread level z: each threads in the same block will do sequentially. threads are working independently, dealing with one block.
// Thread level s: each thread will add the result from its previous thread. threads are working independently, dealing with [len] numbers.
// Block level y: this will get prefix sum in block level.
// Block level z: only one block and one thread are used here, do addition sequentially.
// Block level s: each threads will add the result from its previous block.
__global__ void prefixsum(unsigned int* mask, unsigned int* output,const int len, const unsigned int n ){
unsigned int index = threadIdx.x + blockDim.x * blockIdx.x;
int step=len;
int start=index*len+1;//exclusive
if (start>n) return; //exclusive, could equal to n
int end=start+step;
output[start]=mask[start-1];
for(unsigned int i=start+1;i<end&&i<n;i++){
output[i]+=output[i-1]+mask[i-1];//exclusive, therefore mask[i-1]
}
}
__global__ void serialsum_accrossthread(unsigned int* sum,const int len, const unsigned int n){
unsigned int index = threadIdx.x + blockDim.x * blockIdx.x;
int step=len;
int offset=2*step;
unsigned int start=step*blockDim.x*index+offset;
unsigned int end=step*blockDim.x*(index+1)+1;
for(unsigned int i=start;i<end && i<n; i+=step){
sum[i]+=sum[i-step];
}
}
__global__ void mergethread(unsigned int* sum,const int len, const unsigned int n){
if (threadIdx.x==0) return;
unsigned int index = threadIdx.x + blockDim.x * blockIdx.x;
int step=len;
unsigned int start=index*step+1;//exclusive
unsigned int end=start+step-1; // -1 is important, this position has been added in serial sum
unsigned int base=sum[start-1];
for(unsigned int i=start; i<end && i<n; i++){
sum[i]+=base;
}
}
// void serialsum_accrossblock(unsigned int* sum,const int len, const unsigned int n, const int block_size){
// int step=len*block_size;//each block has step number
// int start=2*step;
// for(unsigned int i=start; i<n; i+=step){
// sum[i]+=sum[i-step];
// }
// }
__global__ void serialsum_accrossblock(unsigned int* sum,const int len, const unsigned int n, const int block_size){
//only one block and one thread
int step=len*block_size;//each block has step number
int start=2*step;
for(unsigned int i=start; i<n; i+=step){
sum[i]+=sum[i-step];
}
}
// __global__ void mergeblock(unsigned int* sum,const int len, const unsigned int n){
// unsigned int index = threadIdx.x + blockDim.x * blockIdx.x;
// if (index==0) return; //the first block is not needed to merge
// int step=len*blockDim.x;
// int start=index*step+1; //exclusive
// int end=start+step-1;// -1 is important, this position has been added in serial sum
// int base=sum[start-1];//last element at last block
// for(int i=start; i<end && i<n; i++){
// sum[i]+=base;
// }
// }
__global__ void mergeblock(unsigned int* sum,const int len, const unsigned int n){
if (blockIdx.x==0) return;//the first block is not needed to merge
unsigned int index = threadIdx.x + blockDim.x * blockIdx.x;
int step=len;
unsigned int base_index=blockIdx.x*step*blockDim.x;
unsigned int base=sum[base_index];
int start=index*step; //only the first thread in block should excluded the first element
int end=start+step;
start=(start==base_index)?start+1:start;
// int base=sum[start-1];//last element at last block
for(int i=start; i<end && i<n; i++){
sum[i]+=base;
}
}
void psort(int n, data_t *data) {
if(n<=0) return;
// FIXME: Implement a more efficient parallel sorting algorithm for the GPU.
const int block_size=256;//64 threads per block;
const int len=2000; // add 1000 prefix sum per thread;
data_t *d_temp;
data_t *d_in=NULL;
CHECK(cudaMalloc((void**)&d_in,n*sizeof(data_t)));
data_t *d_out_long=NULL;
CHECK(cudaMalloc((void**)&d_out_long,n*sizeof(data_t)));
unsigned int *d_out=NULL;
CHECK(cudaMalloc((void**)&d_out,n*sizeof(unsigned int)));
unsigned int *d_sum=NULL;
CHECK(cudaMalloc((void**)&d_sum,n*sizeof(unsigned int)));
unsigned int *d_index=NULL;
CHECK(cudaMalloc((void**)&d_index,n*sizeof(unsigned int)));
// std::vector<unsigned int> inter_sum(n);
// unsigned int inter_sum[n];
cuda_memcpy(d_in,data,n,cudaMemcpyHostToDevice);
data_t bits=sizeof(data_t)*8;
// unsigned int out[n];
// unsigned int sum[n];
unsigned int total_zeros, mask_last;
//one pass here
clock_t test = clock();
for(data_t i=0; i<bits; i++){
CHECK(cudaMemset(d_sum,0,n*sizeof(unsigned int)));
getMask<<<divup(n,block_size*len),block_size>>>(d_in, d_out, len, n, i, 0);
CHECK(cudaGetLastError());
// CHECK(cudaMemcpy(out, d_out, n * sizeof(unsigned int), cudaMemcpyDeviceToHost));
// std::cout<<"out "<<std::endl;
// for(int j=0;j<n;j++){
// std::cout<<out[j]<<" ";
// }
// std::cout<<std::endl;
// //inclusive prefix sum
// prefixsum<<<divup(n,block_size*len),block_size>>>(d_out,d_sum,len,n);
// CHECK(cudaGetLastError());
// serialsum_accrossthread<<<divup(n,block_size*len*block_size),block_size>>>(d_sum,len,n);
// CHECK(cudaGetLastError());
// mergethread<<<divup(n,block_size*len),block_size>>>(d_sum,len,n);
// CHECK(cudaGetLastError());
// serialsum_accrossblock<<<1,1>>>(d_sum, len, n, block_size);
// CHECK(cudaGetLastError());
// // CHECK(cudaMemcpy(inter_sum.data(), d_sum, n * sizeof(unsigned int), cudaMemcpyDeviceToHost));
// // serialsum_accrossblock(inter_sum.data(), len, n, block_size);
// // CHECK(cudaMemcpy(d_sum, inter_sum.data(),n * sizeof(unsigned int), cudaMemcpyHostToDevice));
// // CHECK(cudaGetLastError());
// mergeblock<<<divup(n,block_size*len),block_size>>>(d_sum,len,n);
// CHECK(cudaGetLastError());
clock_t start = clock();
sum_scan_blelloch(d_sum, d_out, n);
std::cout<<"time: "<<double(clock()-start)/CLOCKS_PER_SEC<<std::endl;
// exclusiveScan(d_sum, d_out, n, block_size);
// CHECK(cudaMemcpy(sum, d_sum, n * sizeof(unsigned int), cudaMemcpyDeviceToHost));
// std::cout<<"sum "<<std::endl;
// for(int j=0;j<n;j++){
// std::cout<<sum[j]<<" ";
// }
// std::cout<<std::endl;
CHECK(cudaMemcpy(&total_zeros, d_sum+n-1, sizeof(unsigned int), cudaMemcpyDeviceToHost));
CHECK(cudaMemcpy(&mask_last, d_out+n-1, sizeof(unsigned int), cudaMemcpyDeviceToHost));
total_zeros+=(mask_last==1)?1:0;
getIndex<<<divup(n,block_size*len),block_size>>>(d_index, d_sum, d_out, len, n, total_zeros);
// std::cout<<"index "<<std::endl;
// CHECK(cudaMemcpy(sum, d_index, n * sizeof(unsigned int), cudaMemcpyDeviceToHost));
// for(int j=0;j<n;j++){
// std::cout<<sum[j]<<" ";
// }
// std::cout<<std::endl;
CHECK(cudaGetLastError());
// // get mask for 1 and store in d_out
// getMask<<<divup(n,block_size*len),block_size>>>(d_in, d_out, len, n, i, 1);
// CHECK(cudaGetLastError());
// //inclusive prefix sum
// CHECK(cudaMemset(d_sum,0,n*sizeof(unsigned int)));
// prefixsum<<<divup(n,block_size*len),block_size>>>(d_out,d_sum,len,n);
// CHECK(cudaGetLastError());
// serialsum_accrossthread<<<divup(n,block_size*len*block_size),block_size>>>(d_sum,len,n);
// CHECK(cudaGetLastError());
// mergethread<<<divup(n,block_size*len),block_size>>>(d_sum,len,n);
// CHECK(cudaGetLastError());
// // CHECK(cudaMemcpy(inter_sum.data() , d_sum, n * sizeof(unsigned int), cudaMemcpyDeviceToHost));
// // serialsum_accrossblock(inter_sum.data(), len, n, block_size);
// // CHECK(cudaMemcpy(d_sum, inter_sum.data(),n * sizeof(unsigned int), cudaMemcpyHostToDevice));
// serialsum_accrossblock<<<1,1>>>(d_sum, len, n, block_size);
// CHECK(cudaGetLastError());
// mergeblock<<<divup(n,block_size*len),block_size>>>(d_sum,len,n);
// CHECK(cudaGetLastError());
// getIndex<<<divup(n,block_size*len),block_size>>>(d_index, d_sum, d_out, len, n, total_zeros);
// CHECK(cudaGetLastError());
scatter<<<divup(n,block_size*len),block_size>>>(d_in, d_index, d_out_long, len, n);
CHECK(cudaGetLastError());
//must swap pointers
d_temp = d_in;
d_in = d_out_long;
d_out_long = d_temp;
}
std::cout<<"total: "<<double(clock()-test)/CLOCKS_PER_SEC<<std::endl;
cuda_memcpy(data, d_in, n, cudaMemcpyDeviceToHost);
CHECK(cudaFree(d_in));
CHECK(cudaFree(d_out_long));
CHECK(cudaFree(d_out));
CHECK(cudaFree(d_sum));
CHECK(cudaFree(d_index));
// std::sort(data, data + n);
} | #include <hip/hip_runtime.h>
#include <algorithm>
#include <iostream>
#include <vector>
#include <time.h>
typedef unsigned long long data_t;
static inline void check(hipError_t err, const char* context) {
if (err != hipSuccess) {
std::cerr << "CUDA error: " << context << ": "
<< hipGetErrorString(err) << std::endl;
std::exit(EXIT_FAILURE);
}
}
#define CHECK(x) check(x, #x)
const int MAX_CONSTANT = 16*1024;
__constant__ unsigned int const_sum[MAX_CONSTANT];
template <class T>
void cuda_memcpy(T* target, const T* source, std::size_t num, hipMemcpyKind direction) {
CHECK(hipMemcpy(target, source, num * sizeof(T), direction));
}
static inline int divup(int a, int b) {
return (a + b - 1)/b;
}
// get the 0 bit of each number by bit_shift
// example: number : 10001, bit_shit: 1, One: 1,
//
// it means check if the second bit is 1 or not.
__global__ void getMask(data_t *d_in, unsigned int *d_out, const int len, const unsigned int n, data_t bit_shift, unsigned int One) {
unsigned int index = threadIdx.x + blockDim.x * blockIdx.x;
data_t bit = 0;
data_t one=1;
data_t shift=one<<bit_shift;
unsigned int start=index*len;
if (start>=n) return;
unsigned int end=start+len;
for(unsigned int i=start;i<end && i<n; i++ ){
bit=d_in[i]&shift;
bit = (bit > 0) ? 1 : 0;
d_out[i] = (One ? bit : 1 - bit);
}
}
__global__ void getIndex(unsigned int *d_index, unsigned int *d_sum, unsigned int* d_mask, const int len, const unsigned int n,
unsigned int total_pre) {
unsigned int index = threadIdx.x + blockDim.x * blockIdx.x;
unsigned int start=index*len;
if (start>=n) return;
unsigned int end=start+len;
for (unsigned int i=start; i<end && i<n; i++){
d_index[i]=d_mask[i]?d_sum[i]:i-d_sum[i]+total_pre;
if(d_index[i]>=n){
printf(" d_sum[i] : %d, total_pre : %d, d_mask[i] : %d \n", d_sum[i], total_pre, d_mask[i]);
}
// if(d_mask[i]==1){
// d_index[i]=total_pre+d_sum[i];
// }
}
}
__global__ void scatter(data_t *d_in, unsigned int *d_index, data_t *d_out, const int len, const unsigned int n) {
unsigned int index = threadIdx.x + blockDim.x * blockIdx.x;
unsigned int start=index*len;
if (start>=n) return;
unsigned int end=start+len;
for(unsigned int i=start;i<end && i<n; i++ ){
d_out[d_index[i]]=d_in[i];
}
}
// // pay attention that blockDim.x must be power of 2
// __global__ void blellochScan(unsigned int *out, unsigned int *in,
// unsigned int *sum, unsigned int inputSize) {
// __shared__ unsigned int temp[2 * 256];
// unsigned int start = blockIdx.x * blockDim.x << 1;
// unsigned int tx = threadIdx.x;
// unsigned int index = 0;
// temp[tx] = (start + tx < inputSize)? in[start+tx]:0;
// temp[tx+blockDim.x] = (start + tx + blockDim.x < inputSize)? in[start + tx + blockDim.x] : 0;
// // Blelloch Scan
// __syncthreads();
// // reduction step
// unsigned int stride = 1;
// while (stride <= blockDim.x) {
// index = (tx + 1) * (stride << 1) - 1;
// if (index < (blockDim.x << 1)) {
// temp[index] += temp[index - stride];
// }
// stride <<= 1;
// __syncthreads();
// }
// // first store the reduction sum in sum array
// // make it zero since it is exclusive scan
// if (tx == 0) {
// // sum array contains the prefix sum of each
// // 2*blockDim blocks of element.
// if (sum != NULL) {
// sum[blockIdx.x] = temp[(blockDim.x << 1) - 1];
// }
// temp[(blockDim.x << 1) - 1] = 0;
// }
// // wait for thread zero to write
// __syncthreads();
// // post scan step
// stride = blockDim.x;
// index = 0;
// unsigned int var = 0;
// while (stride > 0) {
// index = ((stride << 1) * (tx + 1)) - 1;
// if (index < (blockDim.x << 1)) {
// var = temp[index];
// temp[index] += temp[index - stride];
// temp[index - stride] = var;
// }
// stride >>= 1;
// __syncthreads();
// }
// // now write the temp array to output
// if (start + tx < inputSize) {
// out[start + tx] = temp[tx];
// }
// if (start + tx + blockDim.x < inputSize) {
// out[start + tx + blockDim.x] = temp[tx + blockDim.x];
// }
// }
// /*
// sum out the blocks' accumulated sums to each element
// */
// __global__ void mergeScanBlocks(unsigned int *sum, unsigned int *output,
// unsigned int opSize) {
// unsigned int index = (blockDim.x * blockIdx.x << 1) + threadIdx.x;
// if (index < opSize) {
// // output[index] += sum[blockIdx.x];
// output[index] += (opSize > MAX_CONSTANT)? sum[blockIdx.x]:const_sum[blockIdx.x];
// // output[index] += tex1Dfetch(tex_sum, blockIdx.x);
// }
// if (index + blockDim.x < opSize) {
// // output[index + blockDim.x] += sum[blockIdx.x];
// output[index + blockDim.x] += (opSize > MAX_CONSTANT)? sum[blockIdx.x]:const_sum[blockIdx.x];
// // output[index + blockDim.x] += tex1Dfetch(tex_sum, blockIdx.x);
// }
// }
// /*
// api for exclusiveScan
// */
// void exclusiveScan(unsigned int *out, unsigned int *in, unsigned int in_size, unsigned int block_size) {
// unsigned int numBlocks1 = in_size / block_size;
// if (in_size % block_size) numBlocks1++;
// unsigned int numBlocks2 = numBlocks1 / 2;
// if (numBlocks1 % 2) numBlocks2++;
// dim3 dimThreadBlock;
// dimThreadBlock.x = block_size;
// dimThreadBlock.y = 1;
// dimThreadBlock.z = 1;
// dim3 dimGrid;
// dimGrid.x = numBlocks2;
// dimGrid.y = 1;
// dimGrid.z = 1;
// unsigned int *d_sumArr = NULL;
// if (in_size > (2 * block_size)) {
// // we need the sum auxilarry array only if nuFmblocks2 > 1
// CHECK(cudaMalloc((void **)&d_sumArr, numBlocks2 * sizeof(unsigned int)));
// }
// blellochScan<<<dimGrid, dimThreadBlock>>>(out, in, d_sumArr, in_size);
// if (in_size <= (2 * block_size)) {
// // out has proper exclusive scan. just return
// CHECK(cudaDeviceSynchronize());
// return;
// } else {
// // now we need to perform exclusive scan on the auxilliary sum array
// unsigned int *d_sumArr_scan;
// CHECK(cudaMalloc((void **)&d_sumArr_scan, numBlocks2 * sizeof(unsigned int)));
// exclusiveScan(d_sumArr_scan, d_sumArr, numBlocks2, block_size);
// // d_sumArr_scan now contains the exclusive scan op of individual blocks
// // now just do a one-one addition of blocks
// // cudaBindTexture(0, tex_sum, d_sumArr_scan, numBlocks2 * sizeof(unsigned int));
// if(numBlocks2 <= MAX_CONSTANT) {
// CHECK(cudaMemcpyToSymbol(const_sum, d_sumArr_scan, numBlocks2 * sizeof(unsigned int), 0, cudaMemcpyDeviceToDevice));
// }
// mergeScanBlocks<<<dimGrid, dimThreadBlock>>>(d_sumArr_scan, out, in_size);
// // cudaUnbindTexture(tex_sum);
// cudaFree(d_sumArr);
// cudaFree(d_sumArr_scan);
// }
// }
#define MAX_BLOCK_SZ 128
#define NUM_BANKS 32
#define LOG_NUM_BANKS 5
//#define ZERO_BANK_CONFLICTS
#ifdef ZERO_BANK_CONFLICTS
#define CONFLICT_FREE_OFFSET(n) \
((n) >> NUM_BANKS + (n) >> (2 * LOG_NUM_BANKS))
#else
#define CONFLICT_FREE_OFFSET(n) ((n) >> LOG_NUM_BANKS)
#endif
__global__
void gpu_add_block_sums(unsigned int* const d_out,
const unsigned int* const d_in,
unsigned int* const d_block_sums,
const size_t numElems)
{
unsigned int d_block_sum_val = d_block_sums[blockIdx.x];
// Simple implementation's performance is not significantly (if at all)
// better than previous verbose implementation
unsigned int cpy_idx = 2 * blockIdx.x * blockDim.x + threadIdx.x;
if (cpy_idx < numElems)
{
d_out[cpy_idx] = d_in[cpy_idx] + d_block_sum_val;
if (cpy_idx + blockDim.x < numElems)
d_out[cpy_idx + blockDim.x] = d_in[cpy_idx + blockDim.x] + d_block_sum_val;
}
}
__global__
void gpu_prescan(unsigned int* const d_out,
const unsigned int* const d_in,
unsigned int* const d_block_sums,
const unsigned int len,
const unsigned int shmem_sz,
const unsigned int max_elems_per_block)
{
// Allocated on invocation
extern __shared__ unsigned int s_out[];
int thid = threadIdx.x;
int ai = thid;
int bi = thid + blockDim.x;
// Zero out the shared memory
// Helpful especially when input size is not power of two
s_out[thid] = 0;
s_out[thid + blockDim.x] = 0;
// If CONFLICT_FREE_OFFSET is used, shared memory size
// must be a 2 * blockDim.x + blockDim.x/num_banks
s_out[thid + blockDim.x + (blockDim.x >> LOG_NUM_BANKS)] = 0;
__syncthreads();
// Copy d_in to shared memory
// Note that d_in's elements are scattered into shared memory
// in light of avoiding bank conflicts
unsigned int cpy_idx = max_elems_per_block * blockIdx.x + threadIdx.x;
if (cpy_idx < len)
{
s_out[ai + CONFLICT_FREE_OFFSET(ai)] = d_in[cpy_idx];
if (cpy_idx + blockDim.x < len)
s_out[bi + CONFLICT_FREE_OFFSET(bi)] = d_in[cpy_idx + blockDim.x];
}
// For both upsweep and downsweep:
// Sequential indices with conflict free padding
// Amount of padding = target index / num banks
// This "shifts" the target indices by one every multiple
// of the num banks
// offset controls the stride and starting index of
// target elems at every iteration
// d just controls which threads are active
// Sweeps are pivoted on the last element of shared memory
// Upsweep/Reduce step
int offset = 1;
for (int d = max_elems_per_block >> 1; d > 0; d >>= 1)
{
__syncthreads();
if (thid < d)
{
int ai = offset * ((thid << 1) + 1) - 1;
int bi = offset * ((thid << 1) + 2) - 1;
ai += CONFLICT_FREE_OFFSET(ai);
bi += CONFLICT_FREE_OFFSET(bi);
s_out[bi] += s_out[ai];
}
offset <<= 1;
}
// Save the total sum on the global block sums array
// Then clear the last element on the shared memory
if (thid == 0)
{
d_block_sums[blockIdx.x] = s_out[max_elems_per_block - 1
+ CONFLICT_FREE_OFFSET(max_elems_per_block - 1)];
s_out[max_elems_per_block - 1
+ CONFLICT_FREE_OFFSET(max_elems_per_block - 1)] = 0;
}
// Downsweep step
for (int d = 1; d < max_elems_per_block; d <<= 1)
{
offset >>= 1;
__syncthreads();
if (thid < d)
{
int ai = offset * ((thid << 1) + 1) - 1;
int bi = offset * ((thid << 1) + 2) - 1;
ai += CONFLICT_FREE_OFFSET(ai);
bi += CONFLICT_FREE_OFFSET(bi);
unsigned int temp = s_out[ai];
s_out[ai] = s_out[bi];
s_out[bi] += temp;
}
}
__syncthreads();
// Copy contents of shared memory to global memory
if (cpy_idx < len)
{
d_out[cpy_idx] = s_out[ai + CONFLICT_FREE_OFFSET(ai)];
if (cpy_idx + blockDim.x < len)
d_out[cpy_idx + blockDim.x] = s_out[bi + CONFLICT_FREE_OFFSET(bi)];
}
}
void sum_scan_blelloch(unsigned int* const d_out,
const unsigned int* const d_in,
const size_t numElems)
{
// Zero out d_out
CHECK(hipMemset(d_out, 0, numElems * sizeof(unsigned int)));
// Set up number of threads and blocks
unsigned int block_sz = MAX_BLOCK_SZ / 2;
unsigned int max_elems_per_block = 2 * block_sz; // due to binary tree nature of algorithm
// If input size is not power of two, the remainder will still need a whole block
// Thus, number of blocks must be the ceiling of input size / max elems that a block can handle
//unsigned int grid_sz = (unsigned int) std::ceil((double) numElems / (double) max_elems_per_block);
// UPDATE: Instead of using ceiling and risking miscalculation due to precision, just automatically
// add 1 to the grid size when the input size cannot be divided cleanly by the block's capacity
unsigned int grid_sz = numElems / max_elems_per_block;
// Take advantage of the fact that integer division drops the decimals
if (numElems % max_elems_per_block != 0)
grid_sz += 1;
// Conflict free padding requires that shared memory be more than 2 * block_sz
unsigned int shmem_sz = max_elems_per_block + ((max_elems_per_block) >> LOG_NUM_BANKS);
// Allocate memory for array of total sums produced by each block
// Array length must be the same as number of blocks
unsigned int* d_block_sums;
CHECK(hipMalloc(&d_block_sums, sizeof(unsigned int) * grid_sz));
CHECK(hipMemset(d_block_sums, 0, sizeof(unsigned int) * grid_sz));
// Sum scan data allocated to each block
//gpu_sum_scan_blelloch<<<grid_sz, block_sz, sizeof(unsigned int) * max_elems_per_block >>>(d_out, d_in, d_block_sums, numElems);
gpu_prescan<<<grid_sz, block_sz, sizeof(unsigned int) * shmem_sz>>>(d_out,
d_in,
d_block_sums,
numElems,
shmem_sz,
max_elems_per_block);
// Sum scan total sums produced by each block
// Use basic implementation if number of total sums is <= 2 * block_sz
// (This requires only one block to do the scan)
if (grid_sz <= max_elems_per_block)
{
unsigned int* d_dummy_blocks_sums;
CHECK(hipMalloc(&d_dummy_blocks_sums, sizeof(unsigned int)));
CHECK(hipMemset(d_dummy_blocks_sums, 0, sizeof(unsigned int)));
//gpu_sum_scan_blelloch<<<1, block_sz, sizeof(unsigned int) * max_elems_per_block>>>(d_block_sums, d_block_sums, d_dummy_blocks_sums, grid_sz);
gpu_prescan<<<1, block_sz, sizeof(unsigned int) * shmem_sz>>>(d_block_sums,
d_block_sums,
d_dummy_blocks_sums,
grid_sz,
shmem_sz,
max_elems_per_block);
CHECK(hipFree(d_dummy_blocks_sums));
}
// Else, recurse on this same function as you'll need the full-blown scan
// for the block sums
else
{
unsigned int* d_in_block_sums;
CHECK(hipMalloc(&d_in_block_sums, sizeof(unsigned int) * grid_sz));
CHECK(hipMemcpy(d_in_block_sums, d_block_sums, sizeof(unsigned int) * grid_sz, hipMemcpyDeviceToDevice));
sum_scan_blelloch(d_block_sums, d_in_block_sums, grid_sz);
CHECK(hipFree(d_in_block_sums));
}
// Add each block's total sum to its scan output
// in order to get the final, global scanned array
gpu_add_block_sums<<<grid_sz, block_sz>>>(d_out, d_out, d_block_sums, numElems);
CHECK(hipFree(d_block_sums));
}
// idea to do exclusive prefix is similar to my ppc course https://www.youtube.com/watch?v=HVhCtl96gUs
// I will use y,z,s to specify which step I am in.
// in particular, I split the whole array into multiple smaller array. each small array has [len] numbers
// Thread level y: each thread will do addition sequentially. threads are working independently, dealing with [len] numbers.
// Thread level z: each threads in the same block will do sequentially. threads are working independently, dealing with one block.
// Thread level s: each thread will add the result from its previous thread. threads are working independently, dealing with [len] numbers.
// Block level y: this will get prefix sum in block level.
// Block level z: only one block and one thread are used here, do addition sequentially.
// Block level s: each threads will add the result from its previous block.
__global__ void prefixsum(unsigned int* mask, unsigned int* output,const int len, const unsigned int n ){
unsigned int index = threadIdx.x + blockDim.x * blockIdx.x;
int step=len;
int start=index*len+1;//exclusive
if (start>n) return; //exclusive, could equal to n
int end=start+step;
output[start]=mask[start-1];
for(unsigned int i=start+1;i<end&&i<n;i++){
output[i]+=output[i-1]+mask[i-1];//exclusive, therefore mask[i-1]
}
}
__global__ void serialsum_accrossthread(unsigned int* sum,const int len, const unsigned int n){
unsigned int index = threadIdx.x + blockDim.x * blockIdx.x;
int step=len;
int offset=2*step;
unsigned int start=step*blockDim.x*index+offset;
unsigned int end=step*blockDim.x*(index+1)+1;
for(unsigned int i=start;i<end && i<n; i+=step){
sum[i]+=sum[i-step];
}
}
__global__ void mergethread(unsigned int* sum,const int len, const unsigned int n){
if (threadIdx.x==0) return;
unsigned int index = threadIdx.x + blockDim.x * blockIdx.x;
int step=len;
unsigned int start=index*step+1;//exclusive
unsigned int end=start+step-1; // -1 is important, this position has been added in serial sum
unsigned int base=sum[start-1];
for(unsigned int i=start; i<end && i<n; i++){
sum[i]+=base;
}
}
// void serialsum_accrossblock(unsigned int* sum,const int len, const unsigned int n, const int block_size){
// int step=len*block_size;//each block has step number
// int start=2*step;
// for(unsigned int i=start; i<n; i+=step){
// sum[i]+=sum[i-step];
// }
// }
__global__ void serialsum_accrossblock(unsigned int* sum,const int len, const unsigned int n, const int block_size){
//only one block and one thread
int step=len*block_size;//each block has step number
int start=2*step;
for(unsigned int i=start; i<n; i+=step){
sum[i]+=sum[i-step];
}
}
// __global__ void mergeblock(unsigned int* sum,const int len, const unsigned int n){
// unsigned int index = threadIdx.x + blockDim.x * blockIdx.x;
// if (index==0) return; //the first block is not needed to merge
// int step=len*blockDim.x;
// int start=index*step+1; //exclusive
// int end=start+step-1;// -1 is important, this position has been added in serial sum
// int base=sum[start-1];//last element at last block
// for(int i=start; i<end && i<n; i++){
// sum[i]+=base;
// }
// }
__global__ void mergeblock(unsigned int* sum,const int len, const unsigned int n){
if (blockIdx.x==0) return;//the first block is not needed to merge
unsigned int index = threadIdx.x + blockDim.x * blockIdx.x;
int step=len;
unsigned int base_index=blockIdx.x*step*blockDim.x;
unsigned int base=sum[base_index];
int start=index*step; //only the first thread in block should excluded the first element
int end=start+step;
start=(start==base_index)?start+1:start;
// int base=sum[start-1];//last element at last block
for(int i=start; i<end && i<n; i++){
sum[i]+=base;
}
}
void psort(int n, data_t *data) {
if(n<=0) return;
// FIXME: Implement a more efficient parallel sorting algorithm for the GPU.
const int block_size=256;//64 threads per block;
const int len=2000; // add 1000 prefix sum per thread;
data_t *d_temp;
data_t *d_in=NULL;
CHECK(hipMalloc((void**)&d_in,n*sizeof(data_t)));
data_t *d_out_long=NULL;
CHECK(hipMalloc((void**)&d_out_long,n*sizeof(data_t)));
unsigned int *d_out=NULL;
CHECK(hipMalloc((void**)&d_out,n*sizeof(unsigned int)));
unsigned int *d_sum=NULL;
CHECK(hipMalloc((void**)&d_sum,n*sizeof(unsigned int)));
unsigned int *d_index=NULL;
CHECK(hipMalloc((void**)&d_index,n*sizeof(unsigned int)));
// std::vector<unsigned int> inter_sum(n);
// unsigned int inter_sum[n];
cuda_memcpy(d_in,data,n,hipMemcpyHostToDevice);
data_t bits=sizeof(data_t)*8;
// unsigned int out[n];
// unsigned int sum[n];
unsigned int total_zeros, mask_last;
//one pass here
clock_t test = clock();
for(data_t i=0; i<bits; i++){
CHECK(hipMemset(d_sum,0,n*sizeof(unsigned int)));
getMask<<<divup(n,block_size*len),block_size>>>(d_in, d_out, len, n, i, 0);
CHECK(hipGetLastError());
// CHECK(cudaMemcpy(out, d_out, n * sizeof(unsigned int), cudaMemcpyDeviceToHost));
// std::cout<<"out "<<std::endl;
// for(int j=0;j<n;j++){
// std::cout<<out[j]<<" ";
// }
// std::cout<<std::endl;
// //inclusive prefix sum
// prefixsum<<<divup(n,block_size*len),block_size>>>(d_out,d_sum,len,n);
// CHECK(cudaGetLastError());
// serialsum_accrossthread<<<divup(n,block_size*len*block_size),block_size>>>(d_sum,len,n);
// CHECK(cudaGetLastError());
// mergethread<<<divup(n,block_size*len),block_size>>>(d_sum,len,n);
// CHECK(cudaGetLastError());
// serialsum_accrossblock<<<1,1>>>(d_sum, len, n, block_size);
// CHECK(cudaGetLastError());
// // CHECK(cudaMemcpy(inter_sum.data(), d_sum, n * sizeof(unsigned int), cudaMemcpyDeviceToHost));
// // serialsum_accrossblock(inter_sum.data(), len, n, block_size);
// // CHECK(cudaMemcpy(d_sum, inter_sum.data(),n * sizeof(unsigned int), cudaMemcpyHostToDevice));
// // CHECK(cudaGetLastError());
// mergeblock<<<divup(n,block_size*len),block_size>>>(d_sum,len,n);
// CHECK(cudaGetLastError());
clock_t start = clock();
sum_scan_blelloch(d_sum, d_out, n);
std::cout<<"time: "<<double(clock()-start)/CLOCKS_PER_SEC<<std::endl;
// exclusiveScan(d_sum, d_out, n, block_size);
// CHECK(cudaMemcpy(sum, d_sum, n * sizeof(unsigned int), cudaMemcpyDeviceToHost));
// std::cout<<"sum "<<std::endl;
// for(int j=0;j<n;j++){
// std::cout<<sum[j]<<" ";
// }
// std::cout<<std::endl;
CHECK(hipMemcpy(&total_zeros, d_sum+n-1, sizeof(unsigned int), hipMemcpyDeviceToHost));
CHECK(hipMemcpy(&mask_last, d_out+n-1, sizeof(unsigned int), hipMemcpyDeviceToHost));
total_zeros+=(mask_last==1)?1:0;
getIndex<<<divup(n,block_size*len),block_size>>>(d_index, d_sum, d_out, len, n, total_zeros);
// std::cout<<"index "<<std::endl;
// CHECK(cudaMemcpy(sum, d_index, n * sizeof(unsigned int), cudaMemcpyDeviceToHost));
// for(int j=0;j<n;j++){
// std::cout<<sum[j]<<" ";
// }
// std::cout<<std::endl;
CHECK(hipGetLastError());
// // get mask for 1 and store in d_out
// getMask<<<divup(n,block_size*len),block_size>>>(d_in, d_out, len, n, i, 1);
// CHECK(cudaGetLastError());
// //inclusive prefix sum
// CHECK(cudaMemset(d_sum,0,n*sizeof(unsigned int)));
// prefixsum<<<divup(n,block_size*len),block_size>>>(d_out,d_sum,len,n);
// CHECK(cudaGetLastError());
// serialsum_accrossthread<<<divup(n,block_size*len*block_size),block_size>>>(d_sum,len,n);
// CHECK(cudaGetLastError());
// mergethread<<<divup(n,block_size*len),block_size>>>(d_sum,len,n);
// CHECK(cudaGetLastError());
// // CHECK(cudaMemcpy(inter_sum.data() , d_sum, n * sizeof(unsigned int), cudaMemcpyDeviceToHost));
// // serialsum_accrossblock(inter_sum.data(), len, n, block_size);
// // CHECK(cudaMemcpy(d_sum, inter_sum.data(),n * sizeof(unsigned int), cudaMemcpyHostToDevice));
// serialsum_accrossblock<<<1,1>>>(d_sum, len, n, block_size);
// CHECK(cudaGetLastError());
// mergeblock<<<divup(n,block_size*len),block_size>>>(d_sum,len,n);
// CHECK(cudaGetLastError());
// getIndex<<<divup(n,block_size*len),block_size>>>(d_index, d_sum, d_out, len, n, total_zeros);
// CHECK(cudaGetLastError());
scatter<<<divup(n,block_size*len),block_size>>>(d_in, d_index, d_out_long, len, n);
CHECK(hipGetLastError());
//must swap pointers
d_temp = d_in;
d_in = d_out_long;
d_out_long = d_temp;
}
std::cout<<"total: "<<double(clock()-test)/CLOCKS_PER_SEC<<std::endl;
cuda_memcpy(data, d_in, n, hipMemcpyDeviceToHost);
CHECK(hipFree(d_in));
CHECK(hipFree(d_out_long));
CHECK(hipFree(d_out));
CHECK(hipFree(d_sum));
CHECK(hipFree(d_index));
// std::sort(data, data + n);
} |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | //pass
//--blockDim=32 --gridDim=2
#include "../common.h"
__global__ void Pathcalc_Portfolio_KernelGPU(float *d_v, float *d_Lb)
{
const int tid = blockDim.x * blockIdx.x + threadIdx.x;
const int threadN = blockDim.x * gridDim.x;
int i,path;
float L[NN], L2[L2_SIZE], z[NN];
float *L_b = L;
/* Monte Carlo LIBOR path calculation*/
for(path = tid; path < NPATH; path += threadN){
// initialise the data for current thread
for (i=0; i<N; i++) {
// for real application, z should be randomly generated
z[i] = 0.3;
L[i] = 0.05;
}
path_calc_b1(L, z, L2);
d_v[path] = portfolio_b(L,L_b);
path_calc_b2(L_b, z, L2);
d_Lb[path] = L_b[NN-1];
}
} | .file "tmpxft_0004685c_00000000-6_kernel.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2034:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2034:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z50__device_stub__Z28Pathcalc_Portfolio_KernelGPUPfS_PfS_
.type _Z50__device_stub__Z28Pathcalc_Portfolio_KernelGPUPfS_PfS_, @function
_Z50__device_stub__Z28Pathcalc_Portfolio_KernelGPUPfS_PfS_:
.LFB2056:
.cfi_startproc
endbr64
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 8(%rsp)
movq %rsi, (%rsp)
movq %fs:40, %rax
movq %rax, 104(%rsp)
xorl %eax, %eax
leaq 8(%rsp), %rax
movq %rax, 80(%rsp)
movq %rsp, %rax
movq %rax, 88(%rsp)
movl $1, 32(%rsp)
movl $1, 36(%rsp)
movl $1, 40(%rsp)
movl $1, 44(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
leaq 24(%rsp), %rcx
leaq 16(%rsp), %rdx
leaq 44(%rsp), %rsi
leaq 32(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 104(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $120, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 24(%rsp)
.cfi_def_cfa_offset 136
pushq 24(%rsp)
.cfi_def_cfa_offset 144
leaq 96(%rsp), %r9
movq 60(%rsp), %rcx
movl 68(%rsp), %r8d
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
leaq _Z28Pathcalc_Portfolio_KernelGPUPfS_(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 128
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2056:
.size _Z50__device_stub__Z28Pathcalc_Portfolio_KernelGPUPfS_PfS_, .-_Z50__device_stub__Z28Pathcalc_Portfolio_KernelGPUPfS_PfS_
.globl _Z28Pathcalc_Portfolio_KernelGPUPfS_
.type _Z28Pathcalc_Portfolio_KernelGPUPfS_, @function
_Z28Pathcalc_Portfolio_KernelGPUPfS_:
.LFB2057:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z50__device_stub__Z28Pathcalc_Portfolio_KernelGPUPfS_PfS_
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2057:
.size _Z28Pathcalc_Portfolio_KernelGPUPfS_, .-_Z28Pathcalc_Portfolio_KernelGPUPfS_
.section .rodata.str1.8,"aMS",@progbits,1
.align 8
.LC0:
.string "_Z28Pathcalc_Portfolio_KernelGPUPfS_"
.section .rodata.str1.1,"aMS",@progbits,1
.LC1:
.string "N"
.LC2:
.string "Nmat"
.LC3:
.string "Nopt"
.LC4:
.string "maturities"
.LC5:
.string "delta"
.LC6:
.string "swaprates"
.LC7:
.string "lambda"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2059:
.cfi_startproc
endbr64
pushq %rbx
.cfi_def_cfa_offset 16
.cfi_offset 3, -16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rbx
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC0(%rip), %rdx
movq %rdx, %rcx
leaq _Z28Pathcalc_Portfolio_KernelGPUPfS_(%rip), %rsi
movq %rax, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $1
.cfi_def_cfa_offset 32
movl $4, %r9d
movl $0, %r8d
leaq .LC1(%rip), %rdx
movq %rdx, %rcx
leaq _ZL1N(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterVar@PLT
addq $16, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $1
.cfi_def_cfa_offset 32
movl $4, %r9d
movl $0, %r8d
leaq .LC2(%rip), %rdx
movq %rdx, %rcx
leaq _ZL4Nmat(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterVar@PLT
addq $16, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $1
.cfi_def_cfa_offset 32
movl $4, %r9d
movl $0, %r8d
leaq .LC3(%rip), %rdx
movq %rdx, %rcx
leaq _ZL4Nopt(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterVar@PLT
addq $16, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $1
.cfi_def_cfa_offset 32
movl $60, %r9d
movl $0, %r8d
leaq .LC4(%rip), %rdx
movq %rdx, %rcx
leaq _ZL10maturities(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterVar@PLT
addq $16, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $1
.cfi_def_cfa_offset 32
movl $4, %r9d
movl $0, %r8d
leaq .LC5(%rip), %rdx
movq %rdx, %rcx
leaq _ZL5delta(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterVar@PLT
addq $16, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $1
.cfi_def_cfa_offset 32
movl $60, %r9d
movl $0, %r8d
leaq .LC6(%rip), %rdx
movq %rdx, %rcx
leaq _ZL9swaprates(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterVar@PLT
addq $16, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $1
.cfi_def_cfa_offset 32
movl $320, %r9d
movl $0, %r8d
leaq .LC7(%rip), %rdx
movq %rdx, %rcx
leaq _ZL6lambda(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterVar@PLT
addq $16, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
popq %rbx
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2059:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.local _ZL6lambda
.comm _ZL6lambda,320,32
.local _ZL9swaprates
.comm _ZL9swaprates,60,32
.local _ZL5delta
.comm _ZL5delta,4,4
.local _ZL10maturities
.comm _ZL10maturities,60,32
.local _ZL4Nopt
.comm _ZL4Nopt,4,4
.local _ZL4Nmat
.comm _ZL4Nmat,4,4
.local _ZL1N
.comm _ZL1N,4,4
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | //pass
//--blockDim=32 --gridDim=2
#include "../common.h"
__global__ void Pathcalc_Portfolio_KernelGPU(float *d_v, float *d_Lb)
{
const int tid = blockDim.x * blockIdx.x + threadIdx.x;
const int threadN = blockDim.x * gridDim.x;
int i,path;
float L[NN], L2[L2_SIZE], z[NN];
float *L_b = L;
/* Monte Carlo LIBOR path calculation*/
for(path = tid; path < NPATH; path += threadN){
// initialise the data for current thread
for (i=0; i<N; i++) {
// for real application, z should be randomly generated
z[i] = 0.3;
L[i] = 0.05;
}
path_calc_b1(L, z, L2);
d_v[path] = portfolio_b(L,L_b);
path_calc_b2(L_b, z, L2);
d_Lb[path] = L_b[NN-1];
}
} | //pass
//--blockDim=32 --gridDim=2
#include <hip/hip_runtime.h>
#include "../common.h"
__global__ void Pathcalc_Portfolio_KernelGPU(float *d_v, float *d_Lb)
{
const int tid = blockDim.x * blockIdx.x + threadIdx.x;
const int threadN = blockDim.x * gridDim.x;
int i,path;
float L[NN], L2[L2_SIZE], z[NN];
float *L_b = L;
/* Monte Carlo LIBOR path calculation*/
for(path = tid; path < NPATH; path += threadN){
// initialise the data for current thread
for (i=0; i<N; i++) {
// for real application, z should be randomly generated
z[i] = 0.3;
L[i] = 0.05;
}
path_calc_b1(L, z, L2);
d_v[path] = portfolio_b(L,L_b);
path_calc_b2(L_b, z, L2);
d_Lb[path] = L_b[NN-1];
}
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | //pass
//--blockDim=32 --gridDim=2
#include <hip/hip_runtime.h>
#include "../common.h"
__global__ void Pathcalc_Portfolio_KernelGPU(float *d_v, float *d_Lb)
{
const int tid = blockDim.x * blockIdx.x + threadIdx.x;
const int threadN = blockDim.x * gridDim.x;
int i,path;
float L[NN], L2[L2_SIZE], z[NN];
float *L_b = L;
/* Monte Carlo LIBOR path calculation*/
for(path = tid; path < NPATH; path += threadN){
// initialise the data for current thread
for (i=0; i<N; i++) {
// for real application, z should be randomly generated
z[i] = 0.3;
L[i] = 0.05;
}
path_calc_b1(L, z, L2);
d_v[path] = portfolio_b(L,L_b);
path_calc_b2(L_b, z, L2);
d_Lb[path] = L_b[NN-1];
}
} | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z28Pathcalc_Portfolio_KernelGPUPfS_
.globl _Z28Pathcalc_Portfolio_KernelGPUPfS_
.p2align 8
.type _Z28Pathcalc_Portfolio_KernelGPUPfS_,@function
_Z28Pathcalc_Portfolio_KernelGPUPfS_:
s_load_b32 s4, s[0:1], 0x1c
s_add_u32 s2, s0, 16
s_addc_u32 s3, s1, 0
s_waitcnt lgkmcnt(0)
s_and_b32 s17, s4, 0xffff
s_mov_b32 s4, exec_lo
v_mad_u64_u32 v[1:2], null, s15, s17, v[0:1]
s_delay_alu instid0(VALU_DEP_1)
v_cmpx_gt_i32_e32 0x1000, v1
s_cbranch_execz .LBB0_45
s_getpc_b64 s[4:5]
s_add_u32 s4, s4, N@rel32@lo+4
s_addc_u32 s5, s5, N@rel32@hi+12
v_mov_b32_e32 v2, 0x290
s_load_b32 s8, s[4:5], 0x0
v_or_b32_e64 v0, 0x290, 4
v_dual_mov_b32 v5, 0x3d4ccccd :: v_dual_mov_b32 v6, 0
s_delay_alu instid0(VALU_DEP_3)
v_readfirstlane_b32 s23, v2
s_mov_b32 s16, 0
s_waitcnt lgkmcnt(0)
s_cmp_gt_i32 s8, 0
s_cselect_b32 s9, -1, 0
s_getpc_b64 s[4:5]
s_add_u32 s4, s4, Nmat@rel32@lo+4
s_addc_u32 s5, s5, Nmat@rel32@hi+12
s_load_b32 s10, s[4:5], 0x0
s_waitcnt lgkmcnt(0)
s_cmp_gt_i32 s10, 0
s_cselect_b32 s11, -1, 0
s_getpc_b64 s[4:5]
s_add_u32 s4, s4, delta@rel32@lo+4
s_addc_u32 s5, s5, delta@rel32@hi+12
s_sub_i32 s13, s8, s10
s_load_b32 s12, s[4:5], 0x0
s_cmp_gt_i32 s13, 0
s_cselect_b32 s14, -1, 0
s_getpc_b64 s[4:5]
s_add_u32 s4, s4, Nopt@rel32@lo+4
s_addc_u32 s5, s5, Nopt@rel32@hi+12
s_load_b32 s2, s[2:3], 0x0
s_load_b32 s15, s[4:5], 0x0
s_load_b128 s[4:7], s[0:1], 0x0
s_lshl_b32 s1, s10, 2
s_delay_alu instid0(SALU_CYCLE_1)
v_add_nc_u32_e64 v7, 0x290, s1
s_waitcnt lgkmcnt(0)
v_mul_f32_e64 v3, 0x4f800000, s12
v_cmp_gt_f32_e64 vcc_lo, 0xf800000, s12
s_mul_i32 s17, s2, s17
s_cmp_gt_i32 s15, 0
s_delay_alu instid0(VALU_DEP_2)
v_cndmask_b32_e32 v3, s12, v3, vcc_lo
s_cselect_b32 s18, -1, 0
s_not_b32 s19, s10
s_mul_i32 s2, s10, s8
s_add_i32 s0, s8, s19
v_sqrt_f32_e32 v2, v3
v_mov_b32_e32 v4, 0x3e99999a
s_cmp_gt_i32 s0, -1
s_cselect_b32 s20, -1, 0
s_cmp_gt_i32 s8, s10
s_cselect_b32 s21, -1, 0
s_lshl_b32 s22, s8, 2
s_xor_b32 s3, s1, -4
s_waitcnt_depctr 0xfff
v_add_nc_u32_e32 v9, 1, v2
v_add_nc_u32_e32 v8, -1, v2
s_sub_i32 s26, 0x1f0, s1
s_add_i32 s1, s1, 4
s_add_i32 s24, s22, 4
v_fma_f32 v11, -v9, v2, v3
v_fma_f32 v10, -v8, v2, v3
s_add_i32 s23, s23, -4
s_add_i32 s25, s3, 0x150
s_add_i32 s26, s26, -8
s_sub_i32 s28, 0, s22
v_cmp_ge_f32_e64 s0, 0, v10
s_add_i32 s30, s23, s22
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_2)
v_cndmask_b32_e64 v2, v2, v8, s0
v_cmp_lt_f32_e64 s0, 0, v11
v_add_nc_u32_e64 v8, s3, 16
v_cndmask_b32_e64 v2, v2, v9, s0
s_lshl_b32 s0, s2, 2
s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
s_add_i32 s27, s0, 0x3d0
s_mul_i32 s0, s8, s1
v_mul_f32_e32 v9, 0x37800000, v2
s_add_i32 s29, s0, 0x3d0
s_add_i32 s27, s27, -4
s_add_i32 s29, s29, -4
s_getpc_b64 s[38:39]
s_add_u32 s38, s38, lambda@rel32@lo+4
s_addc_u32 s39, s39, lambda@rel32@hi+12
v_cndmask_b32_e32 v2, v2, v9, vcc_lo
v_cmp_class_f32_e64 vcc_lo, v3, 0x260
v_add_nc_u32_e64 v9, 0x3d0, s24
s_delay_alu instid0(VALU_DEP_3)
v_cndmask_b32_e32 v10, v2, v3, vcc_lo
s_branch .LBB0_3
.LBB0_2:
scratch_load_b32 v11, off, off offset:972
v_add_nc_u32_e32 v1, s17, v1
v_add_co_u32 v2, s0, s6, v2
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3)
v_add_co_ci_u32_e64 v3, s0, s7, v3, s0
v_cmp_lt_i32_e32 vcc_lo, 0xfff, v1
s_or_b32 s16, vcc_lo, s16
s_waitcnt vmcnt(0)
global_store_b32 v[2:3], v11, off
s_and_not1_b32 exec_lo, exec_lo, s16
s_cbranch_execz .LBB0_45
.LBB0_3:
s_and_not1_b32 vcc_lo, exec_lo, s9
s_cbranch_vccnz .LBB0_6
v_mov_b32_e32 v2, 0x3710
v_mov_b32_e32 v3, 0x290
s_mov_b32 s0, s8
.LBB0_5:
scratch_store_b32 v2, v4, off
scratch_store_b32 v3, v5, off
v_add_nc_u32_e32 v2, 4, v2
v_add_nc_u32_e32 v3, 4, v3
s_add_i32 s0, s0, -1
s_delay_alu instid0(SALU_CYCLE_1)
s_cmp_lg_u32 s0, 0
s_cbranch_scc1 .LBB0_5
.LBB0_6:
s_and_not1_b32 vcc_lo, exec_lo, s9
s_cbranch_vccnz .LBB0_9
v_mov_b32_e32 v2, 0x3d0
v_mov_b32_e32 v3, 0x290
s_mov_b32 s0, s8
.LBB0_8:
scratch_load_b32 v11, v3, off
v_add_nc_u32_e32 v3, 4, v3
s_add_i32 s0, s0, -1
s_delay_alu instid0(SALU_CYCLE_1)
s_cmp_eq_u32 s0, 0
s_waitcnt vmcnt(0)
scratch_store_b32 v2, v11, off
v_add_nc_u32_e32 v2, 4, v2
s_cbranch_scc0 .LBB0_8
.LBB0_9:
s_and_not1_b32 vcc_lo, exec_lo, s11
s_cbranch_vccnz .LBB0_15
v_dual_mov_b32 v11, v0 :: v_dual_mov_b32 v12, v9
s_mov_b32 s2, 0
s_branch .LBB0_12
.LBB0_11:
v_add_nc_u32_e32 v12, s24, v12
v_add_nc_u32_e32 v11, 4, v11
s_cmp_eq_u32 s2, s10
s_cbranch_scc1 .LBB0_15
.LBB0_12:
s_mov_b32 s0, s2
s_add_i32 s2, s2, 1
s_delay_alu instid0(SALU_CYCLE_1)
s_cmp_ge_i32 s2, s8
s_cbranch_scc1 .LBB0_11
s_lshl_b32 s0, s0, 2
v_dual_mov_b32 v14, v11 :: v_dual_mov_b32 v15, v12
s_addk_i32 s0, 0x3710
s_mov_b32 s3, s2
scratch_load_b32 v2, off, s0
s_getpc_b64 s[0:1]
s_add_u32 s0, s0, lambda@rel32@lo+4
s_addc_u32 s1, s1, lambda@rel32@hi+12
s_waitcnt vmcnt(0)
v_dual_mov_b32 v13, 0 :: v_dual_mul_f32 v2, v10, v2
s_delay_alu instid0(VALU_DEP_1)
v_cvt_f64_f32_e32 v[2:3], v2
.LBB0_14:
scratch_load_b32 v22, v14, off
s_load_b32 s31, s[0:1], 0x0
s_add_i32 s3, s3, 1
s_add_u32 s0, s0, 4
s_addc_u32 s1, s1, 0
s_cmp_ge_i32 s3, s8
s_waitcnt lgkmcnt(0)
v_mul_f32_e64 v18, s12, s31
s_waitcnt vmcnt(0)
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_mul_f32_e32 v19, v18, v22
v_fma_f32 v20, s12, v22, 1.0
v_div_scale_f32 v21, null, v20, v20, v19
v_div_scale_f32 v24, vcc_lo, v19, v20, v19
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_1)
v_rcp_f32_e32 v23, v21
s_waitcnt_depctr 0xfff
v_fma_f32 v16, -v21, v23, 1.0
v_fmac_f32_e32 v23, v16, v23
v_cvt_f64_f32_e32 v[16:17], v18
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mul_f32_e32 v25, v24, v23
v_fma_f32 v26, -v21, v25, v24
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_fmac_f32_e32 v25, v26, v23
v_fma_f32 v21, -v21, v25, v24
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_div_fmas_f32 v21, v21, v23, v25
v_div_fixup_f32 v19, v21, v20, v19
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
v_add_f32_e32 v13, v13, v19
v_fma_f64 v[16:17], v[16:17], -0.5, v[2:3]
v_mul_f32_e32 v20, v18, v13
v_cvt_f64_f32_e32 v[18:19], s31
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
v_cvt_f64_f32_e32 v[20:21], v20
v_fma_f64 v[16:17], v[16:17], v[18:19], v[20:21]
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_cvt_f32_f64_e32 v16, v[16:17]
v_mul_f32_e32 v16, 0x3fb8aa3b, v16
s_delay_alu instid0(VALU_DEP_1)
v_exp_f32_e32 v16, v16
s_waitcnt_depctr 0xfff
v_mul_f32_e32 v16, v22, v16
scratch_store_b32 v14, v16, off
scratch_store_b32 v15, v16, off
v_add_nc_u32_e32 v14, 4, v14
v_add_nc_u32_e32 v15, 4, v15
s_cbranch_scc0 .LBB0_14
s_branch .LBB0_11
.LBB0_15:
s_and_not1_b32 vcc_lo, exec_lo, s14
s_cbranch_vccnz .LBB0_21
v_dual_mov_b32 v2, 0xb0 :: v_dual_mov_b32 v3, 16
v_dual_mov_b32 v11, 0 :: v_dual_mov_b32 v12, 1.0
v_mov_b32_e32 v13, v7
s_mov_b32 s0, 0
s_set_inst_prefetch_distance 0x1
.p2align 6
.LBB0_17:
scratch_load_b32 v14, v13, off
s_add_i32 s0, s0, 1
v_add_nc_u32_e32 v13, 4, v13
s_cmp_ge_i32 s0, s13
s_waitcnt vmcnt(0)
v_fma_f32 v14, s12, v14, 1.0
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
v_div_scale_f32 v15, null, v14, v14, v12
v_div_scale_f32 v18, vcc_lo, v12, v14, v12
v_rcp_f32_e32 v16, v15
s_waitcnt_depctr 0xfff
v_fma_f32 v17, -v15, v16, 1.0
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_fmac_f32_e32 v16, v17, v16
v_mul_f32_e32 v17, v18, v16
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_fma_f32 v19, -v15, v17, v18
v_fmac_f32_e32 v17, v19, v16
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_fma_f32 v15, -v15, v17, v18
v_div_fmas_f32 v15, v15, v16, v17
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_div_fixup_f32 v12, v15, v14, v12
v_fmac_f32_e32 v11, s12, v12
scratch_store_b32 v3, v12, off
v_add_nc_u32_e32 v3, 4, v3
scratch_store_b32 v2, v11, off
v_add_nc_u32_e32 v2, 4, v2
s_cbranch_scc0 .LBB0_17
s_set_inst_prefetch_distance 0x2
s_and_not1_b32 vcc_lo, exec_lo, s14
s_cbranch_vccnz .LBB0_21
v_mov_b32_e32 v2, 0x1f0
v_mov_b32_e32 v3, 0x150
s_mov_b32 s0, 0
.LBB0_20:
scratch_store_b32 v3, v6, off
scratch_store_b32 v2, v6, off
v_add_nc_u32_e32 v2, 4, v2
v_add_nc_u32_e32 v3, 4, v3
s_add_i32 s0, s0, 1
s_delay_alu instid0(SALU_CYCLE_1)
s_cmp_ge_i32 s0, s13
s_cbranch_scc0 .LBB0_20
.LBB0_21:
s_and_not1_b32 vcc_lo, exec_lo, s18
s_cbranch_vccnz .LBB0_26
v_mov_b32_e32 v2, 0
s_mov_b32 s31, s15
s_getpc_b64 s[0:1]
s_add_u32 s0, s0, swaprates@rel32@lo+4
s_addc_u32 s1, s1, swaprates@rel32@hi+12
s_getpc_b64 s[2:3]
s_add_u32 s2, s2, maturities@rel32@lo+4
s_addc_u32 s3, s3, maturities@rel32@hi+12
s_set_inst_prefetch_distance 0x1
s_branch .LBB0_24
.p2align 6
.LBB0_23:
s_or_b32 exec_lo, exec_lo, s34
s_add_i32 s31, s31, -1
s_add_u32 s0, s0, 4
s_addc_u32 s1, s1, 0
s_add_u32 s2, s2, 4
s_addc_u32 s3, s3, 0
s_cmp_lg_u32 s31, 0
s_cbranch_scc0 .LBB0_27
.LBB0_24:
s_load_b32 s33, s[2:3], 0x0
s_waitcnt lgkmcnt(0)
s_add_i32 s33, s33, -1
s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
s_lshl_b32 s33, s33, 2
s_add_i32 s34, s33, 16
s_add_i32 s35, s33, 0xb0
scratch_load_b32 v3, off, s34
scratch_load_b32 v11, off, s35
s_load_b32 s35, s[0:1], 0x0
s_mov_b32 s34, exec_lo
s_waitcnt vmcnt(0) lgkmcnt(0)
v_fmac_f32_e32 v3, s35, v11
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_add_f32_e32 v3, -1.0, v3
v_cmpx_gt_f32_e32 0, v3
s_cbranch_execz .LBB0_23
s_add_i32 s36, s33, 0x1f0
s_addk_i32 s33, 0x150
scratch_load_b32 v11, off, s36
scratch_load_b32 v12, off, s33
v_fmac_f32_e32 v2, 0xc2c80000, v3
s_waitcnt vmcnt(1)
v_fmac_f32_e64 v11, 0xc2c80000, s35
s_waitcnt vmcnt(0)
v_add_f32_e32 v12, 0xc2c80000, v12
scratch_store_b32 off, v11, s36
scratch_store_b32 off, v12, s33
s_branch .LBB0_23
.LBB0_26:
v_mov_b32_e32 v2, 0
.LBB0_27:
s_set_inst_prefetch_distance 0x2
v_mov_b32_e32 v3, v8
s_mov_b32 s0, s26
s_mov_b32 s1, s25
s_and_not1_b32 vcc_lo, exec_lo, s20
s_mov_b32 s2, s13
s_mov_b32 s3, s23
s_cbranch_vccz .LBB0_32
.LBB0_28:
s_and_not1_b32 vcc_lo, exec_lo, s11
s_cbranch_vccnz .LBB0_34
v_mov_b32_e32 v3, 0x290
v_mov_b32_e32 v11, 1.0
s_mov_b32 s0, s10
s_set_inst_prefetch_distance 0x1
.p2align 6
.LBB0_30:
scratch_load_b32 v12, v3, off
v_cvt_f64_f32_e32 v[14:15], v11
s_add_i32 s0, s0, -1
v_add_nc_u32_e32 v3, 4, v3
s_cmp_lg_u32 s0, 0
s_waitcnt vmcnt(0)
v_mul_f32_e32 v12, s12, v12
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_cvt_f64_f32_e32 v[12:13], v12
v_add_f64 v[11:12], v[12:13], 1.0
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
v_div_scale_f64 v[16:17], null, v[11:12], v[11:12], v[14:15]
v_div_scale_f64 v[22:23], vcc_lo, v[14:15], v[11:12], v[14:15]
v_rcp_f64_e32 v[18:19], v[16:17]
s_waitcnt_depctr 0xfff
v_fma_f64 v[20:21], -v[16:17], v[18:19], 1.0
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_fma_f64 v[18:19], v[18:19], v[20:21], v[18:19]
v_fma_f64 v[20:21], -v[16:17], v[18:19], 1.0
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_fma_f64 v[18:19], v[18:19], v[20:21], v[18:19]
v_mul_f64 v[20:21], v[22:23], v[18:19]
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_fma_f64 v[16:17], -v[16:17], v[20:21], v[22:23]
v_div_fmas_f64 v[16:17], v[16:17], v[18:19], v[20:21]
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_div_fixup_f64 v[11:12], v[16:17], v[11:12], v[14:15]
v_cvt_f32_f64_e32 v11, v[11:12]
s_cbranch_scc1 .LBB0_30
s_branch .LBB0_35
.LBB0_31:
v_add_nc_u32_e32 v3, -4, v3
s_add_i32 s2, s2, -1
s_add_i32 s3, s3, -4
s_add_i32 s1, s1, -4
s_add_i32 s0, s0, -4
s_cmp_lt_i32 s2, 1
s_cbranch_scc1 .LBB0_28
.LBB0_32:
s_add_i32 s34, s3, s22
s_add_i32 s31, s0, s22
scratch_load_b32 v13, off, s34
s_add_i32 s33, s1, s22
s_add_i32 s35, s31, 4
scratch_load_b32 v12, off, s33
scratch_load_b32 v11, off, s35
v_add_nc_u32_e32 v14, s22, v3
s_cmp_eq_u32 s2, 1
scratch_load_b32 v14, v14, off
s_waitcnt vmcnt(3)
v_fma_f32 v13, s12, v13, 1.0
s_waitcnt vmcnt(1)
v_fmac_f32_e32 v12, s12, v11
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2)
v_div_scale_f32 v15, null, v13, v13, s12
s_waitcnt vmcnt(0)
v_mul_f32_e64 v14, v14, -v12
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_1)
v_rcp_f32_e32 v16, v15
s_waitcnt_depctr 0xfff
v_fma_f32 v17, -v15, v16, 1.0
v_fmac_f32_e32 v16, v17, v16
v_div_scale_f32 v17, vcc_lo, s12, v13, s12
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mul_f32_e32 v18, v17, v16
v_fma_f32 v19, -v15, v18, v17
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_fmac_f32_e32 v18, v19, v16
v_fma_f32 v15, -v15, v18, v17
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_div_fmas_f32 v15, v15, v16, v18
v_div_fixup_f32 v13, v15, v13, s12
s_delay_alu instid0(VALU_DEP_1)
v_mul_f32_e32 v13, v14, v13
scratch_store_b32 off, v12, s33
scratch_store_b32 off, v13, s34
s_cbranch_scc1 .LBB0_31
scratch_load_b32 v14, off, s31
scratch_load_b32 v15, off, s33 offset:-4
v_fma_f32 v13, s12, v13, 1.0
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
v_div_scale_f32 v16, null, v13, v13, v12
v_div_scale_f32 v19, vcc_lo, v12, v13, v12
v_rcp_f32_e32 v17, v16
s_waitcnt_depctr 0xfff
v_fma_f32 v18, -v16, v17, 1.0
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_fmac_f32_e32 v17, v18, v17
v_mul_f32_e32 v18, v19, v17
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_fma_f32 v20, -v16, v18, v19
s_waitcnt vmcnt(1)
v_dual_fmac_f32 v18, v20, v17 :: v_dual_add_f32 v11, v11, v14
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_fma_f32 v16, -v16, v18, v19
v_div_fmas_f32 v16, v16, v17, v18
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_div_fixup_f32 v12, v16, v13, v12
s_waitcnt vmcnt(0)
v_add_f32_e32 v12, v12, v15
scratch_store_b32 off, v11, s31
scratch_store_b32 off, v12, s33 offset:-4
s_branch .LBB0_31
.LBB0_34:
v_mov_b32_e32 v11, 1.0
.LBB0_35:
s_set_inst_prefetch_distance 0x2
s_delay_alu instid0(VALU_DEP_1)
v_mul_f32_e32 v12, v2, v11
s_and_not1_b32 vcc_lo, exec_lo, s11
s_cbranch_vccnz .LBB0_38
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_2)
v_mul_f32_e64 v2, s12, -v12
v_mov_b32_e32 v13, 0x290
s_mov_b32 s0, s10
v_cvt_f64_f32_e32 v[2:3], v2
s_set_inst_prefetch_distance 0x1
.p2align 6
.LBB0_37:
scratch_load_b32 v14, v13, off
s_add_i32 s0, s0, -1
s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_2) | instid1(VALU_DEP_1)
s_cmp_eq_u32 s0, 0
s_waitcnt vmcnt(0)
v_mul_f32_e32 v14, s12, v14
v_cvt_f64_f32_e32 v[14:15], v14
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_add_f64 v[14:15], v[14:15], 1.0
v_div_scale_f64 v[16:17], null, v[14:15], v[14:15], v[2:3]
v_div_scale_f64 v[22:23], vcc_lo, v[2:3], v[14:15], v[2:3]
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_1)
v_rcp_f64_e32 v[18:19], v[16:17]
s_waitcnt_depctr 0xfff
v_fma_f64 v[20:21], -v[16:17], v[18:19], 1.0
v_fma_f64 v[18:19], v[18:19], v[20:21], v[18:19]
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_fma_f64 v[20:21], -v[16:17], v[18:19], 1.0
v_fma_f64 v[18:19], v[18:19], v[20:21], v[18:19]
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mul_f64 v[20:21], v[22:23], v[18:19]
v_fma_f64 v[16:17], -v[16:17], v[20:21], v[22:23]
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_div_fmas_f64 v[16:17], v[16:17], v[18:19], v[20:21]
v_div_fixup_f64 v[14:15], v[16:17], v[14:15], v[2:3]
s_delay_alu instid0(VALU_DEP_1)
v_cvt_f32_f64_e32 v14, v[14:15]
scratch_store_b32 v13, v14, off
v_add_nc_u32_e32 v13, 4, v13
s_cbranch_scc0 .LBB0_37
.LBB0_38:
s_set_inst_prefetch_distance 0x2
v_mov_b32_e32 v2, v7
s_and_not1_b32 vcc_lo, exec_lo, s21
s_mov_b32 s0, s13
s_cbranch_vccnz .LBB0_40
.LBB0_39:
scratch_load_b32 v3, v2, off
s_add_i32 s0, s0, -1
s_delay_alu instid0(SALU_CYCLE_1)
s_cmp_lg_u32 s0, 0
s_waitcnt vmcnt(0)
v_mul_f32_e32 v3, v11, v3
scratch_store_b32 v2, v3, off
v_add_nc_u32_e32 v2, 4, v2
s_cbranch_scc1 .LBB0_39
.LBB0_40:
v_ashrrev_i32_e32 v2, 31, v1
s_mov_b32 s1, s29
s_mov_b32 s2, s27
s_mov_b32 s3, s19
s_mov_b32 s31, s10
v_lshlrev_b64 v[2:3], 2, v[1:2]
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_co_u32 v13, vcc_lo, s4, v2
v_add_co_ci_u32_e32 v14, vcc_lo, s5, v3, vcc_lo
s_and_not1_b32 vcc_lo, exec_lo, s11
global_store_b32 v[13:14], v12, off
s_cbranch_vccz .LBB0_42
s_branch .LBB0_2
.LBB0_41:
s_add_i32 s0, s31, -1
s_add_i32 s3, s3, 1
s_add_i32 s2, s2, s28
s_add_i32 s1, s1, s28
s_cmp_lt_i32 s31, 2
s_mov_b32 s31, s0
s_cbranch_scc1 .LBB0_2
.LBB0_42:
s_cmp_le_i32 s8, s31
s_cbranch_scc1 .LBB0_41
v_mov_b32_e32 v11, 0
s_mov_b32 s33, s30
s_mov_b32 s34, s1
s_mov_b32 s35, s2
s_mov_b32 s0, s8
.LBB0_44:
scratch_load_b32 v12, off, s35
scratch_load_b32 v13, off, s34
scratch_load_b32 v14, off, s33
s_add_i32 s36, s3, s0
s_add_i32 s40, s0, -1
s_ashr_i32 s37, s36, 31
s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
s_lshl_b64 s[36:37], s[36:37], 2
s_add_u32 s36, s36, s38
s_addc_u32 s37, s37, s39
s_add_i32 s35, s35, -4
s_load_b32 s36, s[36:37], 0x0
s_add_i32 s34, s34, -4
s_waitcnt vmcnt(2)
v_fma_f32 v15, s12, v12, 1.0
s_waitcnt vmcnt(1)
v_div_scale_f32 v18, null, v12, v12, v13
s_waitcnt lgkmcnt(0)
v_mul_f32_e32 v19, s36, v13
v_div_scale_f32 v20, s0, v13, v12, v13
v_div_scale_f32 v16, null, v15, v15, s12
v_rcp_f32_e32 v21, v18
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_1)
v_rcp_f32_e32 v17, v16
s_waitcnt_depctr 0xfff
v_fma_f32 v22, -v16, v17, 1.0
v_fmac_f32_e32 v17, v22, v17
v_fma_f32 v22, -v18, v21, 1.0
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_3)
v_fmac_f32_e32 v21, v22, v21
s_waitcnt vmcnt(0)
v_fmac_f32_e32 v11, v19, v14
v_div_scale_f32 v19, vcc_lo, s12, v15, s12
v_mul_f32_e32 v24, v20, v21
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mul_f32_e32 v23, v19, v17
v_fma_f32 v22, -v16, v23, v19
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_4)
v_fmac_f32_e32 v23, v22, v17
v_fma_f32 v22, -v18, v24, v20
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_fma_f32 v16, -v16, v23, v19
v_fmac_f32_e32 v24, v22, v21
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_3)
v_div_fmas_f32 v16, v16, v17, v23
v_mul_f32_e32 v17, s36, v11
v_fma_f32 v18, -v18, v24, v20
s_mov_b32 vcc_lo, s0
s_mov_b32 s0, s40
v_div_fixup_f32 v15, v16, v15, s12
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_div_fmas_f32 v16, v18, v21, v24
v_mul_f32_e32 v17, v17, v15
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_div_fixup_f32 v12, v16, v12, v13
v_mul_f32_e32 v13, v15, v17
s_delay_alu instid0(VALU_DEP_1)
v_fmac_f32_e32 v13, v14, v12
scratch_store_b32 off, v13, s33
s_add_i32 s33, s33, -4
s_cmp_le_i32 s40, s31
s_cbranch_scc0 .LBB0_44
s_branch .LBB0_41
.LBB0_45:
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z28Pathcalc_Portfolio_KernelGPUPfS_
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 14416
.amdhsa_kernarg_size 272
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 1
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 27
.amdhsa_next_free_sgpr 41
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z28Pathcalc_Portfolio_KernelGPUPfS_, .Lfunc_end0-_Z28Pathcalc_Portfolio_KernelGPUPfS_
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.protected N
.type N,@object
.section .bss,"aw",@nobits
.globl N
.p2align 2, 0x0
N:
.long 0
.size N, 4
.protected Nmat
.type Nmat,@object
.globl Nmat
.p2align 2, 0x0
Nmat:
.long 0
.size Nmat, 4
.protected Nopt
.type Nopt,@object
.globl Nopt
.p2align 2, 0x0
Nopt:
.long 0
.size Nopt, 4
.protected maturities
.type maturities,@object
.globl maturities
.p2align 4, 0x0
maturities:
.zero 60
.size maturities, 60
.protected delta
.type delta,@object
.globl delta
.p2align 2, 0x0
delta:
.long 0x00000000
.size delta, 4
.protected swaprates
.type swaprates,@object
.globl swaprates
.p2align 4, 0x0
swaprates:
.zero 60
.size swaprates, 60
.protected lambda
.type lambda,@object
.globl lambda
.p2align 4, 0x0
lambda:
.zero 320
.size lambda, 320
.type __hip_cuid_,@object
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym N
.addrsig_sym Nmat
.addrsig_sym Nopt
.addrsig_sym maturities
.addrsig_sym delta
.addrsig_sym swaprates
.addrsig_sym lambda
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .offset: 16
.size: 4
.value_kind: hidden_block_count_x
- .offset: 20
.size: 4
.value_kind: hidden_block_count_y
- .offset: 24
.size: 4
.value_kind: hidden_block_count_z
- .offset: 28
.size: 2
.value_kind: hidden_group_size_x
- .offset: 30
.size: 2
.value_kind: hidden_group_size_y
- .offset: 32
.size: 2
.value_kind: hidden_group_size_z
- .offset: 34
.size: 2
.value_kind: hidden_remainder_x
- .offset: 36
.size: 2
.value_kind: hidden_remainder_y
- .offset: 38
.size: 2
.value_kind: hidden_remainder_z
- .offset: 56
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 64
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 80
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 272
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z28Pathcalc_Portfolio_KernelGPUPfS_
.private_segment_fixed_size: 14416
.sgpr_count: 43
.sgpr_spill_count: 0
.symbol: _Z28Pathcalc_Portfolio_KernelGPUPfS_.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 27
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | //pass
//--blockDim=32 --gridDim=2
#include <hip/hip_runtime.h>
#include "../common.h"
__global__ void Pathcalc_Portfolio_KernelGPU(float *d_v, float *d_Lb)
{
const int tid = blockDim.x * blockIdx.x + threadIdx.x;
const int threadN = blockDim.x * gridDim.x;
int i,path;
float L[NN], L2[L2_SIZE], z[NN];
float *L_b = L;
/* Monte Carlo LIBOR path calculation*/
for(path = tid; path < NPATH; path += threadN){
// initialise the data for current thread
for (i=0; i<N; i++) {
// for real application, z should be randomly generated
z[i] = 0.3;
L[i] = 0.05;
}
path_calc_b1(L, z, L2);
d_v[path] = portfolio_b(L,L_b);
path_calc_b2(L_b, z, L2);
d_Lb[path] = L_b[NN-1];
}
} | .text
.file "kernel.hip"
.globl _Z43__device_stub__Pathcalc_Portfolio_KernelGPUPfS_ # -- Begin function _Z43__device_stub__Pathcalc_Portfolio_KernelGPUPfS_
.p2align 4, 0x90
.type _Z43__device_stub__Pathcalc_Portfolio_KernelGPUPfS_,@function
_Z43__device_stub__Pathcalc_Portfolio_KernelGPUPfS_: # @_Z43__device_stub__Pathcalc_Portfolio_KernelGPUPfS_
.cfi_startproc
# %bb.0:
subq $88, %rsp
.cfi_def_cfa_offset 96
movq %rdi, 56(%rsp)
movq %rsi, 48(%rsp)
leaq 56(%rsp), %rax
movq %rax, 64(%rsp)
leaq 48(%rsp), %rax
movq %rax, 72(%rsp)
leaq 32(%rsp), %rdi
leaq 16(%rsp), %rsi
leaq 8(%rsp), %rdx
movq %rsp, %rcx
callq __hipPopCallConfiguration
movq 32(%rsp), %rsi
movl 40(%rsp), %edx
movq 16(%rsp), %rcx
movl 24(%rsp), %r8d
leaq 64(%rsp), %r9
movl $_Z28Pathcalc_Portfolio_KernelGPUPfS_, %edi
pushq (%rsp)
.cfi_adjust_cfa_offset 8
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $104, %rsp
.cfi_adjust_cfa_offset -104
retq
.Lfunc_end0:
.size _Z43__device_stub__Pathcalc_Portfolio_KernelGPUPfS_, .Lfunc_end0-_Z43__device_stub__Pathcalc_Portfolio_KernelGPUPfS_
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
pushq %rbx
.cfi_def_cfa_offset 16
.cfi_offset %rbx, -16
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB1_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB1_2:
movq __hip_gpubin_handle(%rip), %rbx
subq $32, %rsp
.cfi_adjust_cfa_offset 32
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z28Pathcalc_Portfolio_KernelGPUPfS_, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
addq $32, %rsp
.cfi_adjust_cfa_offset -32
movl $N, %esi
movl $.L__unnamed_2, %edx
movl $.L__unnamed_2, %ecx
movl $4, %r9d
movq %rbx, %rdi
xorl %r8d, %r8d
pushq $0
.cfi_adjust_cfa_offset 8
pushq $1
.cfi_adjust_cfa_offset 8
callq __hipRegisterVar
addq $16, %rsp
.cfi_adjust_cfa_offset -16
movl $Nmat, %esi
movl $.L__unnamed_3, %edx
movl $.L__unnamed_3, %ecx
movl $4, %r9d
movq %rbx, %rdi
xorl %r8d, %r8d
pushq $0
.cfi_adjust_cfa_offset 8
pushq $1
.cfi_adjust_cfa_offset 8
callq __hipRegisterVar
addq $16, %rsp
.cfi_adjust_cfa_offset -16
movl $Nopt, %esi
movl $.L__unnamed_4, %edx
movl $.L__unnamed_4, %ecx
movl $4, %r9d
movq %rbx, %rdi
xorl %r8d, %r8d
pushq $0
.cfi_adjust_cfa_offset 8
pushq $1
.cfi_adjust_cfa_offset 8
callq __hipRegisterVar
addq $16, %rsp
.cfi_adjust_cfa_offset -16
movl $maturities, %esi
movl $.L__unnamed_5, %edx
movl $.L__unnamed_5, %ecx
movl $60, %r9d
movq %rbx, %rdi
xorl %r8d, %r8d
pushq $0
.cfi_adjust_cfa_offset 8
pushq $1
.cfi_adjust_cfa_offset 8
callq __hipRegisterVar
addq $16, %rsp
.cfi_adjust_cfa_offset -16
movl $delta, %esi
movl $.L__unnamed_6, %edx
movl $.L__unnamed_6, %ecx
movl $4, %r9d
movq %rbx, %rdi
xorl %r8d, %r8d
pushq $0
.cfi_adjust_cfa_offset 8
pushq $1
.cfi_adjust_cfa_offset 8
callq __hipRegisterVar
addq $16, %rsp
.cfi_adjust_cfa_offset -16
movl $swaprates, %esi
movl $.L__unnamed_7, %edx
movl $.L__unnamed_7, %ecx
movl $60, %r9d
movq %rbx, %rdi
xorl %r8d, %r8d
pushq $0
.cfi_adjust_cfa_offset 8
pushq $1
.cfi_adjust_cfa_offset 8
callq __hipRegisterVar
addq $16, %rsp
.cfi_adjust_cfa_offset -16
movl $lambda, %esi
movl $.L__unnamed_8, %edx
movl $.L__unnamed_8, %ecx
movl $320, %r9d # imm = 0x140
movq %rbx, %rdi
xorl %r8d, %r8d
pushq $0
.cfi_adjust_cfa_offset 8
pushq $1
.cfi_adjust_cfa_offset 8
callq __hipRegisterVar
addq $16, %rsp
.cfi_adjust_cfa_offset -16
movl $__hip_module_dtor, %edi
popq %rbx
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end1:
.size __hip_module_ctor, .Lfunc_end1-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB2_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB2_2:
retq
.Lfunc_end2:
.size __hip_module_dtor, .Lfunc_end2-__hip_module_dtor
.cfi_endproc
# -- End function
.type N,@object # @N
.local N
.comm N,4,4
.type Nmat,@object # @Nmat
.local Nmat
.comm Nmat,4,4
.type Nopt,@object # @Nopt
.local Nopt
.comm Nopt,4,4
.type maturities,@object # @maturities
.local maturities
.comm maturities,60,16
.type delta,@object # @delta
.local delta
.comm delta,4,4
.type swaprates,@object # @swaprates
.local swaprates
.comm swaprates,60,16
.type lambda,@object # @lambda
.local lambda
.comm lambda,320,16
.type _Z28Pathcalc_Portfolio_KernelGPUPfS_,@object # @_Z28Pathcalc_Portfolio_KernelGPUPfS_
.section .rodata,"a",@progbits
.globl _Z28Pathcalc_Portfolio_KernelGPUPfS_
.p2align 3, 0x0
_Z28Pathcalc_Portfolio_KernelGPUPfS_:
.quad _Z43__device_stub__Pathcalc_Portfolio_KernelGPUPfS_
.size _Z28Pathcalc_Portfolio_KernelGPUPfS_, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "_Z28Pathcalc_Portfolio_KernelGPUPfS_"
.size .L__unnamed_1, 37
.type .L__unnamed_2,@object # @1
.L__unnamed_2:
.asciz "N"
.size .L__unnamed_2, 2
.type .L__unnamed_3,@object # @2
.L__unnamed_3:
.asciz "Nmat"
.size .L__unnamed_3, 5
.type .L__unnamed_4,@object # @3
.L__unnamed_4:
.asciz "Nopt"
.size .L__unnamed_4, 5
.type .L__unnamed_5,@object # @4
.L__unnamed_5:
.asciz "maturities"
.size .L__unnamed_5, 11
.type .L__unnamed_6,@object # @5
.L__unnamed_6:
.asciz "delta"
.size .L__unnamed_6, 6
.type .L__unnamed_7,@object # @6
.L__unnamed_7:
.asciz "swaprates"
.size .L__unnamed_7, 10
.type .L__unnamed_8,@object # @7
.L__unnamed_8:
.asciz "lambda"
.size .L__unnamed_8, 7
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z43__device_stub__Pathcalc_Portfolio_KernelGPUPfS_
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym N
.addrsig_sym Nmat
.addrsig_sym Nopt
.addrsig_sym maturities
.addrsig_sym delta
.addrsig_sym swaprates
.addrsig_sym lambda
.addrsig_sym _Z28Pathcalc_Portfolio_KernelGPUPfS_
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_0004685c_00000000-6_kernel.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2034:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2034:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z50__device_stub__Z28Pathcalc_Portfolio_KernelGPUPfS_PfS_
.type _Z50__device_stub__Z28Pathcalc_Portfolio_KernelGPUPfS_PfS_, @function
_Z50__device_stub__Z28Pathcalc_Portfolio_KernelGPUPfS_PfS_:
.LFB2056:
.cfi_startproc
endbr64
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 8(%rsp)
movq %rsi, (%rsp)
movq %fs:40, %rax
movq %rax, 104(%rsp)
xorl %eax, %eax
leaq 8(%rsp), %rax
movq %rax, 80(%rsp)
movq %rsp, %rax
movq %rax, 88(%rsp)
movl $1, 32(%rsp)
movl $1, 36(%rsp)
movl $1, 40(%rsp)
movl $1, 44(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
leaq 24(%rsp), %rcx
leaq 16(%rsp), %rdx
leaq 44(%rsp), %rsi
leaq 32(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 104(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $120, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 24(%rsp)
.cfi_def_cfa_offset 136
pushq 24(%rsp)
.cfi_def_cfa_offset 144
leaq 96(%rsp), %r9
movq 60(%rsp), %rcx
movl 68(%rsp), %r8d
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
leaq _Z28Pathcalc_Portfolio_KernelGPUPfS_(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 128
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2056:
.size _Z50__device_stub__Z28Pathcalc_Portfolio_KernelGPUPfS_PfS_, .-_Z50__device_stub__Z28Pathcalc_Portfolio_KernelGPUPfS_PfS_
.globl _Z28Pathcalc_Portfolio_KernelGPUPfS_
.type _Z28Pathcalc_Portfolio_KernelGPUPfS_, @function
_Z28Pathcalc_Portfolio_KernelGPUPfS_:
.LFB2057:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z50__device_stub__Z28Pathcalc_Portfolio_KernelGPUPfS_PfS_
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2057:
.size _Z28Pathcalc_Portfolio_KernelGPUPfS_, .-_Z28Pathcalc_Portfolio_KernelGPUPfS_
.section .rodata.str1.8,"aMS",@progbits,1
.align 8
.LC0:
.string "_Z28Pathcalc_Portfolio_KernelGPUPfS_"
.section .rodata.str1.1,"aMS",@progbits,1
.LC1:
.string "N"
.LC2:
.string "Nmat"
.LC3:
.string "Nopt"
.LC4:
.string "maturities"
.LC5:
.string "delta"
.LC6:
.string "swaprates"
.LC7:
.string "lambda"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2059:
.cfi_startproc
endbr64
pushq %rbx
.cfi_def_cfa_offset 16
.cfi_offset 3, -16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rbx
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC0(%rip), %rdx
movq %rdx, %rcx
leaq _Z28Pathcalc_Portfolio_KernelGPUPfS_(%rip), %rsi
movq %rax, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $1
.cfi_def_cfa_offset 32
movl $4, %r9d
movl $0, %r8d
leaq .LC1(%rip), %rdx
movq %rdx, %rcx
leaq _ZL1N(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterVar@PLT
addq $16, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $1
.cfi_def_cfa_offset 32
movl $4, %r9d
movl $0, %r8d
leaq .LC2(%rip), %rdx
movq %rdx, %rcx
leaq _ZL4Nmat(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterVar@PLT
addq $16, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $1
.cfi_def_cfa_offset 32
movl $4, %r9d
movl $0, %r8d
leaq .LC3(%rip), %rdx
movq %rdx, %rcx
leaq _ZL4Nopt(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterVar@PLT
addq $16, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $1
.cfi_def_cfa_offset 32
movl $60, %r9d
movl $0, %r8d
leaq .LC4(%rip), %rdx
movq %rdx, %rcx
leaq _ZL10maturities(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterVar@PLT
addq $16, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $1
.cfi_def_cfa_offset 32
movl $4, %r9d
movl $0, %r8d
leaq .LC5(%rip), %rdx
movq %rdx, %rcx
leaq _ZL5delta(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterVar@PLT
addq $16, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $1
.cfi_def_cfa_offset 32
movl $60, %r9d
movl $0, %r8d
leaq .LC6(%rip), %rdx
movq %rdx, %rcx
leaq _ZL9swaprates(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterVar@PLT
addq $16, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $1
.cfi_def_cfa_offset 32
movl $320, %r9d
movl $0, %r8d
leaq .LC7(%rip), %rdx
movq %rdx, %rcx
leaq _ZL6lambda(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterVar@PLT
addq $16, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
popq %rbx
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2059:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.local _ZL6lambda
.comm _ZL6lambda,320,32
.local _ZL9swaprates
.comm _ZL9swaprates,60,32
.local _ZL5delta
.comm _ZL5delta,4,4
.local _ZL10maturities
.comm _ZL10maturities,60,32
.local _ZL4Nopt
.comm _ZL4Nopt,4,4
.local _ZL4Nmat
.comm _ZL4Nmat,4,4
.local _ZL1N
.comm _ZL1N,4,4
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "kernel.hip"
.globl _Z43__device_stub__Pathcalc_Portfolio_KernelGPUPfS_ # -- Begin function _Z43__device_stub__Pathcalc_Portfolio_KernelGPUPfS_
.p2align 4, 0x90
.type _Z43__device_stub__Pathcalc_Portfolio_KernelGPUPfS_,@function
_Z43__device_stub__Pathcalc_Portfolio_KernelGPUPfS_: # @_Z43__device_stub__Pathcalc_Portfolio_KernelGPUPfS_
.cfi_startproc
# %bb.0:
subq $88, %rsp
.cfi_def_cfa_offset 96
movq %rdi, 56(%rsp)
movq %rsi, 48(%rsp)
leaq 56(%rsp), %rax
movq %rax, 64(%rsp)
leaq 48(%rsp), %rax
movq %rax, 72(%rsp)
leaq 32(%rsp), %rdi
leaq 16(%rsp), %rsi
leaq 8(%rsp), %rdx
movq %rsp, %rcx
callq __hipPopCallConfiguration
movq 32(%rsp), %rsi
movl 40(%rsp), %edx
movq 16(%rsp), %rcx
movl 24(%rsp), %r8d
leaq 64(%rsp), %r9
movl $_Z28Pathcalc_Portfolio_KernelGPUPfS_, %edi
pushq (%rsp)
.cfi_adjust_cfa_offset 8
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $104, %rsp
.cfi_adjust_cfa_offset -104
retq
.Lfunc_end0:
.size _Z43__device_stub__Pathcalc_Portfolio_KernelGPUPfS_, .Lfunc_end0-_Z43__device_stub__Pathcalc_Portfolio_KernelGPUPfS_
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
pushq %rbx
.cfi_def_cfa_offset 16
.cfi_offset %rbx, -16
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB1_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB1_2:
movq __hip_gpubin_handle(%rip), %rbx
subq $32, %rsp
.cfi_adjust_cfa_offset 32
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z28Pathcalc_Portfolio_KernelGPUPfS_, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
addq $32, %rsp
.cfi_adjust_cfa_offset -32
movl $N, %esi
movl $.L__unnamed_2, %edx
movl $.L__unnamed_2, %ecx
movl $4, %r9d
movq %rbx, %rdi
xorl %r8d, %r8d
pushq $0
.cfi_adjust_cfa_offset 8
pushq $1
.cfi_adjust_cfa_offset 8
callq __hipRegisterVar
addq $16, %rsp
.cfi_adjust_cfa_offset -16
movl $Nmat, %esi
movl $.L__unnamed_3, %edx
movl $.L__unnamed_3, %ecx
movl $4, %r9d
movq %rbx, %rdi
xorl %r8d, %r8d
pushq $0
.cfi_adjust_cfa_offset 8
pushq $1
.cfi_adjust_cfa_offset 8
callq __hipRegisterVar
addq $16, %rsp
.cfi_adjust_cfa_offset -16
movl $Nopt, %esi
movl $.L__unnamed_4, %edx
movl $.L__unnamed_4, %ecx
movl $4, %r9d
movq %rbx, %rdi
xorl %r8d, %r8d
pushq $0
.cfi_adjust_cfa_offset 8
pushq $1
.cfi_adjust_cfa_offset 8
callq __hipRegisterVar
addq $16, %rsp
.cfi_adjust_cfa_offset -16
movl $maturities, %esi
movl $.L__unnamed_5, %edx
movl $.L__unnamed_5, %ecx
movl $60, %r9d
movq %rbx, %rdi
xorl %r8d, %r8d
pushq $0
.cfi_adjust_cfa_offset 8
pushq $1
.cfi_adjust_cfa_offset 8
callq __hipRegisterVar
addq $16, %rsp
.cfi_adjust_cfa_offset -16
movl $delta, %esi
movl $.L__unnamed_6, %edx
movl $.L__unnamed_6, %ecx
movl $4, %r9d
movq %rbx, %rdi
xorl %r8d, %r8d
pushq $0
.cfi_adjust_cfa_offset 8
pushq $1
.cfi_adjust_cfa_offset 8
callq __hipRegisterVar
addq $16, %rsp
.cfi_adjust_cfa_offset -16
movl $swaprates, %esi
movl $.L__unnamed_7, %edx
movl $.L__unnamed_7, %ecx
movl $60, %r9d
movq %rbx, %rdi
xorl %r8d, %r8d
pushq $0
.cfi_adjust_cfa_offset 8
pushq $1
.cfi_adjust_cfa_offset 8
callq __hipRegisterVar
addq $16, %rsp
.cfi_adjust_cfa_offset -16
movl $lambda, %esi
movl $.L__unnamed_8, %edx
movl $.L__unnamed_8, %ecx
movl $320, %r9d # imm = 0x140
movq %rbx, %rdi
xorl %r8d, %r8d
pushq $0
.cfi_adjust_cfa_offset 8
pushq $1
.cfi_adjust_cfa_offset 8
callq __hipRegisterVar
addq $16, %rsp
.cfi_adjust_cfa_offset -16
movl $__hip_module_dtor, %edi
popq %rbx
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end1:
.size __hip_module_ctor, .Lfunc_end1-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB2_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB2_2:
retq
.Lfunc_end2:
.size __hip_module_dtor, .Lfunc_end2-__hip_module_dtor
.cfi_endproc
# -- End function
.type N,@object # @N
.local N
.comm N,4,4
.type Nmat,@object # @Nmat
.local Nmat
.comm Nmat,4,4
.type Nopt,@object # @Nopt
.local Nopt
.comm Nopt,4,4
.type maturities,@object # @maturities
.local maturities
.comm maturities,60,16
.type delta,@object # @delta
.local delta
.comm delta,4,4
.type swaprates,@object # @swaprates
.local swaprates
.comm swaprates,60,16
.type lambda,@object # @lambda
.local lambda
.comm lambda,320,16
.type _Z28Pathcalc_Portfolio_KernelGPUPfS_,@object # @_Z28Pathcalc_Portfolio_KernelGPUPfS_
.section .rodata,"a",@progbits
.globl _Z28Pathcalc_Portfolio_KernelGPUPfS_
.p2align 3, 0x0
_Z28Pathcalc_Portfolio_KernelGPUPfS_:
.quad _Z43__device_stub__Pathcalc_Portfolio_KernelGPUPfS_
.size _Z28Pathcalc_Portfolio_KernelGPUPfS_, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "_Z28Pathcalc_Portfolio_KernelGPUPfS_"
.size .L__unnamed_1, 37
.type .L__unnamed_2,@object # @1
.L__unnamed_2:
.asciz "N"
.size .L__unnamed_2, 2
.type .L__unnamed_3,@object # @2
.L__unnamed_3:
.asciz "Nmat"
.size .L__unnamed_3, 5
.type .L__unnamed_4,@object # @3
.L__unnamed_4:
.asciz "Nopt"
.size .L__unnamed_4, 5
.type .L__unnamed_5,@object # @4
.L__unnamed_5:
.asciz "maturities"
.size .L__unnamed_5, 11
.type .L__unnamed_6,@object # @5
.L__unnamed_6:
.asciz "delta"
.size .L__unnamed_6, 6
.type .L__unnamed_7,@object # @6
.L__unnamed_7:
.asciz "swaprates"
.size .L__unnamed_7, 10
.type .L__unnamed_8,@object # @7
.L__unnamed_8:
.asciz "lambda"
.size .L__unnamed_8, 7
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z43__device_stub__Pathcalc_Portfolio_KernelGPUPfS_
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym N
.addrsig_sym Nmat
.addrsig_sym Nopt
.addrsig_sym maturities
.addrsig_sym delta
.addrsig_sym swaprates
.addrsig_sym lambda
.addrsig_sym _Z28Pathcalc_Portfolio_KernelGPUPfS_
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly. | //kernelPBO.cu (Rob Farber)
#include <stdio.h>
void checkCUDAError(const char *msg) {
cudaError_t err = cudaGetLastError();
if( cudaSuccess != err) {
fprintf(stderr, "Cuda error: %s: %s.\n", msg, cudaGetErrorString( err) );
exit(EXIT_FAILURE);
}
}
//Simple kernel writes changing colors to a uchar4 array
__global__ void kernel(uchar4* pos, unsigned int width, unsigned int height,
float time)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int x = index%width;
unsigned int y = index/width;
if(index < width*height) {
unsigned char r = (x + (int) time)&0xff;
unsigned char g = (y + (int) time)&0xff;
unsigned char b = ((x+y) + (int) time)&0xff;
// Each thread writes one pixel location in the texture (textel)
pos[index].w = 0;
pos[index].x = r;
pos[index].y = g;
pos[index].z = b;
}
}
// Wrapper for the __global__ call that sets up the kernel call
extern "C" void launch_kernel(uchar4* pos, unsigned int image_width,
unsigned int image_height, float time)
{
// execute the kernel
int nThreads=256;
int totalThreads = image_height * image_width;
int nBlocks = totalThreads/nThreads;
nBlocks += ((totalThreads%nThreads)>0)?1:0;
kernel<<< nBlocks, nThreads>>>(pos, image_width, image_height, time);
// make certain the kernel has completed
cudaThreadSynchronize();
checkCUDAError("kernel failed!");
} | code for sm_80
Function : _Z6kernelP6uchar4jjf
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ IMAD.MOV.U32 R1, RZ, RZ, c[0x0][0x28] ; /* 0x00000a00ff017624 */
/* 0x000fe400078e00ff */
/*0010*/ S2R R0, SR_CTAID.X ; /* 0x0000000000007919 */
/* 0x000e220000002500 */
/*0020*/ ULDC.64 UR4, c[0x0][0x168] ; /* 0x00005a0000047ab9 */
/* 0x000fe40000000a00 */
/*0030*/ UIMAD UR4, UR5, UR4, URZ ; /* 0x00000004050472a4 */
/* 0x000fe2000f8e023f */
/*0040*/ S2R R3, SR_TID.X ; /* 0x0000000000037919 */
/* 0x000e240000002100 */
/*0050*/ IMAD R0, R0, c[0x0][0x0], R3 ; /* 0x0000000000007a24 */
/* 0x001fca00078e0203 */
/*0060*/ ISETP.GE.U32.AND P0, PT, R0, UR4, PT ; /* 0x0000000400007c0c */
/* 0x000fda000bf06070 */
/*0070*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*0080*/ I2F.U32.RP R4, c[0x0][0x168] ; /* 0x00005a0000047b06 */
/* 0x000e220000209000 */
/*0090*/ ISETP.NE.U32.AND P2, PT, RZ, c[0x0][0x168], PT ; /* 0x00005a00ff007a0c */
/* 0x000fe20003f45070 */
/*00a0*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fcc0000000a00 */
/*00b0*/ MUFU.RCP R4, R4 ; /* 0x0000000400047308 */
/* 0x001e240000001000 */
/*00c0*/ IADD3 R2, R4, 0xffffffe, RZ ; /* 0x0ffffffe04027810 */
/* 0x001fcc0007ffe0ff */
/*00d0*/ F2I.TRUNC.NTZ R4, c[0x0][0x170] ; /* 0x00005c0000047b05 */
/* 0x000ff0000020f100 */
/*00e0*/ F2I.FTZ.U32.TRUNC.NTZ R3, R2 ; /* 0x0000000200037305 */
/* 0x000064000021f000 */
/*00f0*/ IMAD.MOV.U32 R2, RZ, RZ, RZ ; /* 0x000000ffff027224 */
/* 0x001fc400078e00ff */
/*0100*/ IMAD.MOV R5, RZ, RZ, -R3 ; /* 0x000000ffff057224 */
/* 0x002fc800078e0a03 */
/*0110*/ IMAD R5, R5, c[0x0][0x168], RZ ; /* 0x00005a0005057a24 */
/* 0x000fc800078e02ff */
/*0120*/ IMAD.HI.U32 R3, R3, R5, R2 ; /* 0x0000000503037227 */
/* 0x000fcc00078e0002 */
/*0130*/ IMAD.HI.U32 R3, R3, R0, RZ ; /* 0x0000000003037227 */
/* 0x000fca00078e00ff */
/*0140*/ IADD3 R5, -R3, RZ, RZ ; /* 0x000000ff03057210 */
/* 0x000fca0007ffe1ff */
/*0150*/ IMAD R5, R5, c[0x0][0x168], R0 ; /* 0x00005a0005057a24 */
/* 0x000fca00078e0200 */
/*0160*/ ISETP.GE.U32.AND P0, PT, R5, c[0x0][0x168], PT ; /* 0x00005a0005007a0c */
/* 0x000fda0003f06070 */
/*0170*/ @P0 IADD3 R5, R5, -c[0x0][0x168], RZ ; /* 0x80005a0005050a10 */
/* 0x000fe40007ffe0ff */
/*0180*/ @P0 IADD3 R3, R3, 0x1, RZ ; /* 0x0000000103030810 */
/* 0x000fe40007ffe0ff */
/*0190*/ ISETP.GE.U32.AND P1, PT, R5, c[0x0][0x168], PT ; /* 0x00005a0005007a0c */
/* 0x000fda0003f26070 */
/*01a0*/ @P1 IADD3 R3, R3, 0x1, RZ ; /* 0x0000000103031810 */
/* 0x000fe40007ffe0ff */
/*01b0*/ @!P2 LOP3.LUT R3, RZ, c[0x0][0x168], RZ, 0x33, !PT ; /* 0x00005a00ff03aa12 */
/* 0x000fca00078e33ff */
/*01c0*/ IMAD.MOV R5, RZ, RZ, -R3 ; /* 0x000000ffff057224 */
/* 0x000fe400078e0a03 */
/*01d0*/ IMAD.IADD R3, R3, 0x1, R4 ; /* 0x0000000103037824 */
/* 0x000fe400078e0204 */
/*01e0*/ IMAD R2, R5, c[0x0][0x168], R0 ; /* 0x00005a0005027a24 */
/* 0x000fe200078e0200 */
/*01f0*/ HFMA2.MMA R5, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff057435 */
/* 0x000fc800000001ff */
/*0200*/ IADD3 R4, R2.reuse, R4, RZ ; /* 0x0000000402047210 */
/* 0x040fe20007ffe0ff */
/*0210*/ IMAD.IADD R2, R2, 0x1, R3 ; /* 0x0000000102027824 */
/* 0x000fc600078e0203 */
/*0220*/ PRMT R3, R3, 0x7604, R4 ; /* 0x0000760403037816 */
/* 0x000fc80000000004 */
/*0230*/ PRMT R4, R2, 0x7054, R3 ; /* 0x0000705402047816 */
/* 0x000fe20000000003 */
/*0240*/ IMAD.WIDE R2, R0, R5, c[0x0][0x160] ; /* 0x0000580000027625 */
/* 0x000fc600078e0205 */
/*0250*/ PRMT R5, RZ, 0x654, R4 ; /* 0x00000654ff057816 */
/* 0x000fca0000000004 */
/*0260*/ STG.E [R2.64], R5 ; /* 0x0000000502007986 */
/* 0x000fe2000c101904 */
/*0270*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0280*/ BRA 0x280; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0290*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0300*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0310*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0320*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0330*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0340*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0350*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0360*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0370*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | //kernelPBO.cu (Rob Farber)
#include <stdio.h>
void checkCUDAError(const char *msg) {
cudaError_t err = cudaGetLastError();
if( cudaSuccess != err) {
fprintf(stderr, "Cuda error: %s: %s.\n", msg, cudaGetErrorString( err) );
exit(EXIT_FAILURE);
}
}
//Simple kernel writes changing colors to a uchar4 array
__global__ void kernel(uchar4* pos, unsigned int width, unsigned int height,
float time)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int x = index%width;
unsigned int y = index/width;
if(index < width*height) {
unsigned char r = (x + (int) time)&0xff;
unsigned char g = (y + (int) time)&0xff;
unsigned char b = ((x+y) + (int) time)&0xff;
// Each thread writes one pixel location in the texture (textel)
pos[index].w = 0;
pos[index].x = r;
pos[index].y = g;
pos[index].z = b;
}
}
// Wrapper for the __global__ call that sets up the kernel call
extern "C" void launch_kernel(uchar4* pos, unsigned int image_width,
unsigned int image_height, float time)
{
// execute the kernel
int nThreads=256;
int totalThreads = image_height * image_width;
int nBlocks = totalThreads/nThreads;
nBlocks += ((totalThreads%nThreads)>0)?1:0;
kernel<<< nBlocks, nThreads>>>(pos, image_width, image_height, time);
// make certain the kernel has completed
cudaThreadSynchronize();
checkCUDAError("kernel failed!");
} | .file "tmpxft_0009dda1_00000000-6_kernelPBO.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2061:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2061:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "Cuda error: %s: %s.\n"
.text
.globl _Z14checkCUDAErrorPKc
.type _Z14checkCUDAErrorPKc, @function
_Z14checkCUDAErrorPKc:
.LFB2057:
.cfi_startproc
endbr64
pushq %rbx
.cfi_def_cfa_offset 16
.cfi_offset 3, -16
movq %rdi, %rbx
call cudaGetLastError@PLT
testl %eax, %eax
jne .L6
popq %rbx
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L6:
.cfi_restore_state
movl %eax, %edi
call cudaGetErrorString@PLT
movq %rax, %r8
movq %rbx, %rcx
leaq .LC0(%rip), %rdx
movl $2, %esi
movq stderr(%rip), %rdi
movl $0, %eax
call __fprintf_chk@PLT
movl $1, %edi
call exit@PLT
.cfi_endproc
.LFE2057:
.size _Z14checkCUDAErrorPKc, .-_Z14checkCUDAErrorPKc
.globl _Z34__device_stub__Z6kernelP6uchar4jjfP6uchar4jjf
.type _Z34__device_stub__Z6kernelP6uchar4jjfP6uchar4jjf, @function
_Z34__device_stub__Z6kernelP6uchar4jjfP6uchar4jjf:
.LFB2083:
.cfi_startproc
endbr64
subq $152, %rsp
.cfi_def_cfa_offset 160
movq %rdi, 24(%rsp)
movl %esi, 20(%rsp)
movl %edx, 16(%rsp)
movss %xmm0, 12(%rsp)
movq %fs:40, %rax
movq %rax, 136(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 20(%rsp), %rax
movq %rax, 104(%rsp)
leaq 16(%rsp), %rax
movq %rax, 112(%rsp)
leaq 12(%rsp), %rax
movq %rax, 120(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L11
.L7:
movq 136(%rsp), %rax
subq %fs:40, %rax
jne .L12
addq $152, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L11:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 168
pushq 40(%rsp)
.cfi_def_cfa_offset 176
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z6kernelP6uchar4jjf(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 160
jmp .L7
.L12:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2083:
.size _Z34__device_stub__Z6kernelP6uchar4jjfP6uchar4jjf, .-_Z34__device_stub__Z6kernelP6uchar4jjfP6uchar4jjf
.globl _Z6kernelP6uchar4jjf
.type _Z6kernelP6uchar4jjf, @function
_Z6kernelP6uchar4jjf:
.LFB2084:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z34__device_stub__Z6kernelP6uchar4jjfP6uchar4jjf
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2084:
.size _Z6kernelP6uchar4jjf, .-_Z6kernelP6uchar4jjf
.section .rodata.str1.1
.LC1:
.string "kernel failed!"
.text
.globl launch_kernel
.type launch_kernel, @function
launch_kernel:
.LFB2058:
.cfi_startproc
endbr64
pushq %r12
.cfi_def_cfa_offset 16
.cfi_offset 12, -16
pushq %rbp
.cfi_def_cfa_offset 24
.cfi_offset 6, -24
pushq %rbx
.cfi_def_cfa_offset 32
.cfi_offset 3, -32
subq $48, %rsp
.cfi_def_cfa_offset 80
movq %rdi, %r12
movl %esi, %ebx
movl %edx, %ebp
movss %xmm0, 12(%rsp)
movl %edx, %eax
imull %esi, %eax
leal 255(%rax), %edx
testl %eax, %eax
cmovns %eax, %edx
sarl $8, %edx
movl $256, 36(%rsp)
movl $1, 40(%rsp)
movl %eax, %ecx
sarl $31, %ecx
shrl $24, %ecx
addl %ecx, %eax
movzbl %al, %eax
subl %ecx, %eax
testl %eax, %eax
setg %al
movzbl %al, %eax
addl %edx, %eax
movl %eax, 24(%rsp)
movl $1, 28(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 36(%rsp), %rdx
movl $1, %ecx
movq 24(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L18
.L16:
call cudaThreadSynchronize@PLT
leaq .LC1(%rip), %rdi
call _Z14checkCUDAErrorPKc
addq $48, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 32
popq %rbx
.cfi_def_cfa_offset 24
popq %rbp
.cfi_def_cfa_offset 16
popq %r12
.cfi_def_cfa_offset 8
ret
.L18:
.cfi_restore_state
movss 12(%rsp), %xmm0
movl %ebp, %edx
movl %ebx, %esi
movq %r12, %rdi
call _Z34__device_stub__Z6kernelP6uchar4jjfP6uchar4jjf
jmp .L16
.cfi_endproc
.LFE2058:
.size launch_kernel, .-launch_kernel
.section .rodata.str1.1
.LC2:
.string "_Z6kernelP6uchar4jjf"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2086:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC2(%rip), %rdx
movq %rdx, %rcx
leaq _Z6kernelP6uchar4jjf(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2086:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | //kernelPBO.cu (Rob Farber)
#include <stdio.h>
void checkCUDAError(const char *msg) {
cudaError_t err = cudaGetLastError();
if( cudaSuccess != err) {
fprintf(stderr, "Cuda error: %s: %s.\n", msg, cudaGetErrorString( err) );
exit(EXIT_FAILURE);
}
}
//Simple kernel writes changing colors to a uchar4 array
__global__ void kernel(uchar4* pos, unsigned int width, unsigned int height,
float time)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int x = index%width;
unsigned int y = index/width;
if(index < width*height) {
unsigned char r = (x + (int) time)&0xff;
unsigned char g = (y + (int) time)&0xff;
unsigned char b = ((x+y) + (int) time)&0xff;
// Each thread writes one pixel location in the texture (textel)
pos[index].w = 0;
pos[index].x = r;
pos[index].y = g;
pos[index].z = b;
}
}
// Wrapper for the __global__ call that sets up the kernel call
extern "C" void launch_kernel(uchar4* pos, unsigned int image_width,
unsigned int image_height, float time)
{
// execute the kernel
int nThreads=256;
int totalThreads = image_height * image_width;
int nBlocks = totalThreads/nThreads;
nBlocks += ((totalThreads%nThreads)>0)?1:0;
kernel<<< nBlocks, nThreads>>>(pos, image_width, image_height, time);
// make certain the kernel has completed
cudaThreadSynchronize();
checkCUDAError("kernel failed!");
} | //kernelPBO.cu (Rob Farber)
#include <hip/hip_runtime.h>
#include <stdio.h>
void checkCUDAError(const char *msg) {
hipError_t err = hipGetLastError();
if( hipSuccess != err) {
fprintf(stderr, "Cuda error: %s: %s.\n", msg, hipGetErrorString( err) );
exit(EXIT_FAILURE);
}
}
//Simple kernel writes changing colors to a uchar4 array
__global__ void kernel(uchar4* pos, unsigned int width, unsigned int height,
float time)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int x = index%width;
unsigned int y = index/width;
if(index < width*height) {
unsigned char r = (x + (int) time)&0xff;
unsigned char g = (y + (int) time)&0xff;
unsigned char b = ((x+y) + (int) time)&0xff;
// Each thread writes one pixel location in the texture (textel)
pos[index].w = 0;
pos[index].x = r;
pos[index].y = g;
pos[index].z = b;
}
}
// Wrapper for the __global__ call that sets up the kernel call
extern "C" void launch_kernel(uchar4* pos, unsigned int image_width,
unsigned int image_height, float time)
{
// execute the kernel
int nThreads=256;
int totalThreads = image_height * image_width;
int nBlocks = totalThreads/nThreads;
nBlocks += ((totalThreads%nThreads)>0)?1:0;
kernel<<< nBlocks, nThreads>>>(pos, image_width, image_height, time);
// make certain the kernel has completed
hipDeviceSynchronize();
checkCUDAError("kernel failed!");
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | //kernelPBO.cu (Rob Farber)
#include <hip/hip_runtime.h>
#include <stdio.h>
void checkCUDAError(const char *msg) {
hipError_t err = hipGetLastError();
if( hipSuccess != err) {
fprintf(stderr, "Cuda error: %s: %s.\n", msg, hipGetErrorString( err) );
exit(EXIT_FAILURE);
}
}
//Simple kernel writes changing colors to a uchar4 array
__global__ void kernel(uchar4* pos, unsigned int width, unsigned int height,
float time)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int x = index%width;
unsigned int y = index/width;
if(index < width*height) {
unsigned char r = (x + (int) time)&0xff;
unsigned char g = (y + (int) time)&0xff;
unsigned char b = ((x+y) + (int) time)&0xff;
// Each thread writes one pixel location in the texture (textel)
pos[index].w = 0;
pos[index].x = r;
pos[index].y = g;
pos[index].z = b;
}
}
// Wrapper for the __global__ call that sets up the kernel call
extern "C" void launch_kernel(uchar4* pos, unsigned int image_width,
unsigned int image_height, float time)
{
// execute the kernel
int nThreads=256;
int totalThreads = image_height * image_width;
int nBlocks = totalThreads/nThreads;
nBlocks += ((totalThreads%nThreads)>0)?1:0;
kernel<<< nBlocks, nThreads>>>(pos, image_width, image_height, time);
// make certain the kernel has completed
hipDeviceSynchronize();
checkCUDAError("kernel failed!");
} | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z6kernelP15HIP_vector_typeIhLj4EEjjf
.globl _Z6kernelP15HIP_vector_typeIhLj4EEjjf
.p2align 8
.type _Z6kernelP15HIP_vector_typeIhLj4EEjjf,@function
_Z6kernelP15HIP_vector_typeIhLj4EEjjf:
s_clause 0x1
s_load_b32 s4, s[0:1], 0x24
s_load_b64 s[2:3], s[0:1], 0x8
s_waitcnt lgkmcnt(0)
s_and_b32 s4, s4, 0xffff
s_mul_i32 s3, s3, s2
v_mad_u64_u32 v[1:2], null, s15, s4, v[0:1]
s_delay_alu instid0(VALU_DEP_1)
v_cmp_gt_u32_e32 vcc_lo, s3, v1
s_and_saveexec_b32 s3, vcc_lo
s_cbranch_execz .LBB0_2
v_cvt_f32_u32_e32 v0, s2
s_sub_i32 s3, 0, s2
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_1)
v_rcp_iflag_f32_e32 v0, v0
s_waitcnt_depctr 0xfff
v_mul_f32_e32 v0, 0x4f7ffffe, v0
v_cvt_u32_f32_e32 v0, v0
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
v_mul_lo_u32 v2, s3, v0
s_clause 0x1
s_load_b32 s3, s[0:1], 0x10
s_load_b64 s[0:1], s[0:1], 0x0
v_mul_hi_u32 v2, v0, v2
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_2)
v_add_nc_u32_e32 v0, v0, v2
s_waitcnt lgkmcnt(0)
v_cvt_i32_f32_e32 v5, s3
v_mul_hi_u32 v0, v1, v0
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
v_mul_lo_u32 v2, v0, s2
v_add_nc_u32_e32 v3, 1, v0
v_sub_nc_u32_e32 v2, v1, v2
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_4)
v_subrev_nc_u32_e32 v4, s2, v2
v_cmp_le_u32_e32 vcc_lo, s2, v2
v_cndmask_b32_e32 v0, v0, v3, vcc_lo
s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
v_cndmask_b32_e32 v2, v2, v4, vcc_lo
v_add_nc_u32_e32 v3, 1, v0
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_3)
v_cmp_le_u32_e32 vcc_lo, s2, v2
v_ashrrev_i32_e32 v2, 31, v1
v_cndmask_b32_e32 v0, v0, v3, vcc_lo
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_lshlrev_b64 v[2:3], 2, v[1:2]
v_mul_lo_u32 v4, v0, s2
v_add_nc_u32_e32 v6, v0, v5
s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
v_add_co_u32 v0, vcc_lo, s0, v2
v_sub_nc_u32_e32 v4, v1, v4
v_add_co_ci_u32_e32 v1, vcc_lo, s1, v3, vcc_lo
s_delay_alu instid0(VALU_DEP_2)
v_dual_mov_b32 v3, 0 :: v_dual_add_nc_u32 v2, v4, v5
v_add_nc_u32_e32 v4, v6, v4
s_clause 0x3
global_store_b8 v[0:1], v2, off
global_store_b8 v[0:1], v6, off offset:1
global_store_b8 v[0:1], v3, off offset:3
global_store_b8 v[0:1], v4, off offset:2
.LBB0_2:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z6kernelP15HIP_vector_typeIhLj4EEjjf
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 280
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 7
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z6kernelP15HIP_vector_typeIhLj4EEjjf, .Lfunc_end0-_Z6kernelP15HIP_vector_typeIhLj4EEjjf
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .offset: 8
.size: 4
.value_kind: by_value
- .offset: 12
.size: 4
.value_kind: by_value
- .offset: 16
.size: 4
.value_kind: by_value
- .offset: 24
.size: 4
.value_kind: hidden_block_count_x
- .offset: 28
.size: 4
.value_kind: hidden_block_count_y
- .offset: 32
.size: 4
.value_kind: hidden_block_count_z
- .offset: 36
.size: 2
.value_kind: hidden_group_size_x
- .offset: 38
.size: 2
.value_kind: hidden_group_size_y
- .offset: 40
.size: 2
.value_kind: hidden_group_size_z
- .offset: 42
.size: 2
.value_kind: hidden_remainder_x
- .offset: 44
.size: 2
.value_kind: hidden_remainder_y
- .offset: 46
.size: 2
.value_kind: hidden_remainder_z
- .offset: 64
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 88
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 280
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z6kernelP15HIP_vector_typeIhLj4EEjjf
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z6kernelP15HIP_vector_typeIhLj4EEjjf.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 7
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | //kernelPBO.cu (Rob Farber)
#include <hip/hip_runtime.h>
#include <stdio.h>
void checkCUDAError(const char *msg) {
hipError_t err = hipGetLastError();
if( hipSuccess != err) {
fprintf(stderr, "Cuda error: %s: %s.\n", msg, hipGetErrorString( err) );
exit(EXIT_FAILURE);
}
}
//Simple kernel writes changing colors to a uchar4 array
__global__ void kernel(uchar4* pos, unsigned int width, unsigned int height,
float time)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int x = index%width;
unsigned int y = index/width;
if(index < width*height) {
unsigned char r = (x + (int) time)&0xff;
unsigned char g = (y + (int) time)&0xff;
unsigned char b = ((x+y) + (int) time)&0xff;
// Each thread writes one pixel location in the texture (textel)
pos[index].w = 0;
pos[index].x = r;
pos[index].y = g;
pos[index].z = b;
}
}
// Wrapper for the __global__ call that sets up the kernel call
extern "C" void launch_kernel(uchar4* pos, unsigned int image_width,
unsigned int image_height, float time)
{
// execute the kernel
int nThreads=256;
int totalThreads = image_height * image_width;
int nBlocks = totalThreads/nThreads;
nBlocks += ((totalThreads%nThreads)>0)?1:0;
kernel<<< nBlocks, nThreads>>>(pos, image_width, image_height, time);
// make certain the kernel has completed
hipDeviceSynchronize();
checkCUDAError("kernel failed!");
} | .text
.file "kernelPBO.hip"
.globl _Z14checkCUDAErrorPKc # -- Begin function _Z14checkCUDAErrorPKc
.p2align 4, 0x90
.type _Z14checkCUDAErrorPKc,@function
_Z14checkCUDAErrorPKc: # @_Z14checkCUDAErrorPKc
.cfi_startproc
# %bb.0:
pushq %r14
.cfi_def_cfa_offset 16
pushq %rbx
.cfi_def_cfa_offset 24
pushq %rax
.cfi_def_cfa_offset 32
.cfi_offset %rbx, -24
.cfi_offset %r14, -16
movq %rdi, %rbx
callq hipGetLastError
testl %eax, %eax
jne .LBB0_2
# %bb.1:
addq $8, %rsp
.cfi_def_cfa_offset 24
popq %rbx
.cfi_def_cfa_offset 16
popq %r14
.cfi_def_cfa_offset 8
retq
.LBB0_2:
.cfi_def_cfa_offset 32
movq stderr(%rip), %r14
movl %eax, %edi
callq hipGetErrorString
movl $.L.str, %esi
movq %r14, %rdi
movq %rbx, %rdx
movq %rax, %rcx
xorl %eax, %eax
callq fprintf
movl $1, %edi
callq exit
.Lfunc_end0:
.size _Z14checkCUDAErrorPKc, .Lfunc_end0-_Z14checkCUDAErrorPKc
.cfi_endproc
# -- End function
.globl _Z21__device_stub__kernelP15HIP_vector_typeIhLj4EEjjf # -- Begin function _Z21__device_stub__kernelP15HIP_vector_typeIhLj4EEjjf
.p2align 4, 0x90
.type _Z21__device_stub__kernelP15HIP_vector_typeIhLj4EEjjf,@function
_Z21__device_stub__kernelP15HIP_vector_typeIhLj4EEjjf: # @_Z21__device_stub__kernelP15HIP_vector_typeIhLj4EEjjf
.cfi_startproc
# %bb.0:
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 72(%rsp)
movl %esi, 20(%rsp)
movl %edx, 16(%rsp)
movss %xmm0, 12(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 20(%rsp), %rax
movq %rax, 88(%rsp)
leaq 16(%rsp), %rax
movq %rax, 96(%rsp)
leaq 12(%rsp), %rax
movq %rax, 104(%rsp)
leaq 56(%rsp), %rdi
leaq 40(%rsp), %rsi
leaq 32(%rsp), %rdx
leaq 24(%rsp), %rcx
callq __hipPopCallConfiguration
movq 56(%rsp), %rsi
movl 64(%rsp), %edx
movq 40(%rsp), %rcx
movl 48(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z6kernelP15HIP_vector_typeIhLj4EEjjf, %edi
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
pushq 40(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $136, %rsp
.cfi_adjust_cfa_offset -136
retq
.Lfunc_end1:
.size _Z21__device_stub__kernelP15HIP_vector_typeIhLj4EEjjf, .Lfunc_end1-_Z21__device_stub__kernelP15HIP_vector_typeIhLj4EEjjf
.cfi_endproc
# -- End function
.globl launch_kernel # -- Begin function launch_kernel
.p2align 4, 0x90
.type launch_kernel,@function
launch_kernel: # @launch_kernel
.cfi_startproc
# %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
pushq %r14
.cfi_def_cfa_offset 24
pushq %rbx
.cfi_def_cfa_offset 32
subq $112, %rsp
.cfi_def_cfa_offset 144
.cfi_offset %rbx, -32
.cfi_offset %r14, -24
.cfi_offset %rbp, -16
movss %xmm0, 8(%rsp) # 4-byte Spill
movl %edx, %ebx
movl %esi, %ebp
movq %rdi, %r14
movl %edx, %eax
imull %esi, %eax
leal 255(%rax), %ecx
testl %eax, %eax
cmovnsl %eax, %ecx
sarl $8, %ecx
xorl %edi, %edi
testl $-2147483393, %eax # imm = 0x800000FF
setg %dil
addl %ecx, %edi
movabsq $4294967296, %rdx # imm = 0x100000000
orq %rdx, %rdi
orq $256, %rdx # imm = 0x100
movl $1, %esi
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB2_2
# %bb.1:
movq %r14, 72(%rsp)
movl %ebp, 20(%rsp)
movl %ebx, 16(%rsp)
movss 8(%rsp), %xmm0 # 4-byte Reload
# xmm0 = mem[0],zero,zero,zero
movss %xmm0, 12(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 20(%rsp), %rax
movq %rax, 88(%rsp)
leaq 16(%rsp), %rax
movq %rax, 96(%rsp)
leaq 12(%rsp), %rax
movq %rax, 104(%rsp)
leaq 56(%rsp), %rdi
leaq 40(%rsp), %rsi
leaq 32(%rsp), %rdx
leaq 24(%rsp), %rcx
callq __hipPopCallConfiguration
movq 56(%rsp), %rsi
movl 64(%rsp), %edx
movq 40(%rsp), %rcx
movl 48(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z6kernelP15HIP_vector_typeIhLj4EEjjf, %edi
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
pushq 40(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB2_2:
callq hipDeviceSynchronize
callq hipGetLastError
testl %eax, %eax
jne .LBB2_4
# %bb.3: # %_Z14checkCUDAErrorPKc.exit
addq $112, %rsp
.cfi_def_cfa_offset 32
popq %rbx
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
retq
.LBB2_4:
.cfi_def_cfa_offset 144
movq stderr(%rip), %rbx
movl %eax, %edi
callq hipGetErrorString
movl $.L.str, %esi
movl $.L.str.1, %edx
movq %rbx, %rdi
movq %rax, %rcx
xorl %eax, %eax
callq fprintf
movl $1, %edi
callq exit
.Lfunc_end2:
.size launch_kernel, .Lfunc_end2-launch_kernel
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB3_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB3_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z6kernelP15HIP_vector_typeIhLj4EEjjf, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end3:
.size __hip_module_ctor, .Lfunc_end3-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB4_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB4_2:
retq
.Lfunc_end4:
.size __hip_module_dtor, .Lfunc_end4-__hip_module_dtor
.cfi_endproc
# -- End function
.type .L.str,@object # @.str
.section .rodata.str1.1,"aMS",@progbits,1
.L.str:
.asciz "Cuda error: %s: %s.\n"
.size .L.str, 21
.type _Z6kernelP15HIP_vector_typeIhLj4EEjjf,@object # @_Z6kernelP15HIP_vector_typeIhLj4EEjjf
.section .rodata,"a",@progbits
.globl _Z6kernelP15HIP_vector_typeIhLj4EEjjf
.p2align 3, 0x0
_Z6kernelP15HIP_vector_typeIhLj4EEjjf:
.quad _Z21__device_stub__kernelP15HIP_vector_typeIhLj4EEjjf
.size _Z6kernelP15HIP_vector_typeIhLj4EEjjf, 8
.type .L.str.1,@object # @.str.1
.section .rodata.str1.1,"aMS",@progbits,1
.L.str.1:
.asciz "kernel failed!"
.size .L.str.1, 15
.type .L__unnamed_1,@object # @0
.L__unnamed_1:
.asciz "_Z6kernelP15HIP_vector_typeIhLj4EEjjf"
.size .L__unnamed_1, 38
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z21__device_stub__kernelP15HIP_vector_typeIhLj4EEjjf
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z6kernelP15HIP_vector_typeIhLj4EEjjf
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly. | code for sm_80
Function : _Z6kernelP6uchar4jjf
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ IMAD.MOV.U32 R1, RZ, RZ, c[0x0][0x28] ; /* 0x00000a00ff017624 */
/* 0x000fe400078e00ff */
/*0010*/ S2R R0, SR_CTAID.X ; /* 0x0000000000007919 */
/* 0x000e220000002500 */
/*0020*/ ULDC.64 UR4, c[0x0][0x168] ; /* 0x00005a0000047ab9 */
/* 0x000fe40000000a00 */
/*0030*/ UIMAD UR4, UR5, UR4, URZ ; /* 0x00000004050472a4 */
/* 0x000fe2000f8e023f */
/*0040*/ S2R R3, SR_TID.X ; /* 0x0000000000037919 */
/* 0x000e240000002100 */
/*0050*/ IMAD R0, R0, c[0x0][0x0], R3 ; /* 0x0000000000007a24 */
/* 0x001fca00078e0203 */
/*0060*/ ISETP.GE.U32.AND P0, PT, R0, UR4, PT ; /* 0x0000000400007c0c */
/* 0x000fda000bf06070 */
/*0070*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*0080*/ I2F.U32.RP R4, c[0x0][0x168] ; /* 0x00005a0000047b06 */
/* 0x000e220000209000 */
/*0090*/ ISETP.NE.U32.AND P2, PT, RZ, c[0x0][0x168], PT ; /* 0x00005a00ff007a0c */
/* 0x000fe20003f45070 */
/*00a0*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fcc0000000a00 */
/*00b0*/ MUFU.RCP R4, R4 ; /* 0x0000000400047308 */
/* 0x001e240000001000 */
/*00c0*/ IADD3 R2, R4, 0xffffffe, RZ ; /* 0x0ffffffe04027810 */
/* 0x001fcc0007ffe0ff */
/*00d0*/ F2I.TRUNC.NTZ R4, c[0x0][0x170] ; /* 0x00005c0000047b05 */
/* 0x000ff0000020f100 */
/*00e0*/ F2I.FTZ.U32.TRUNC.NTZ R3, R2 ; /* 0x0000000200037305 */
/* 0x000064000021f000 */
/*00f0*/ IMAD.MOV.U32 R2, RZ, RZ, RZ ; /* 0x000000ffff027224 */
/* 0x001fc400078e00ff */
/*0100*/ IMAD.MOV R5, RZ, RZ, -R3 ; /* 0x000000ffff057224 */
/* 0x002fc800078e0a03 */
/*0110*/ IMAD R5, R5, c[0x0][0x168], RZ ; /* 0x00005a0005057a24 */
/* 0x000fc800078e02ff */
/*0120*/ IMAD.HI.U32 R3, R3, R5, R2 ; /* 0x0000000503037227 */
/* 0x000fcc00078e0002 */
/*0130*/ IMAD.HI.U32 R3, R3, R0, RZ ; /* 0x0000000003037227 */
/* 0x000fca00078e00ff */
/*0140*/ IADD3 R5, -R3, RZ, RZ ; /* 0x000000ff03057210 */
/* 0x000fca0007ffe1ff */
/*0150*/ IMAD R5, R5, c[0x0][0x168], R0 ; /* 0x00005a0005057a24 */
/* 0x000fca00078e0200 */
/*0160*/ ISETP.GE.U32.AND P0, PT, R5, c[0x0][0x168], PT ; /* 0x00005a0005007a0c */
/* 0x000fda0003f06070 */
/*0170*/ @P0 IADD3 R5, R5, -c[0x0][0x168], RZ ; /* 0x80005a0005050a10 */
/* 0x000fe40007ffe0ff */
/*0180*/ @P0 IADD3 R3, R3, 0x1, RZ ; /* 0x0000000103030810 */
/* 0x000fe40007ffe0ff */
/*0190*/ ISETP.GE.U32.AND P1, PT, R5, c[0x0][0x168], PT ; /* 0x00005a0005007a0c */
/* 0x000fda0003f26070 */
/*01a0*/ @P1 IADD3 R3, R3, 0x1, RZ ; /* 0x0000000103031810 */
/* 0x000fe40007ffe0ff */
/*01b0*/ @!P2 LOP3.LUT R3, RZ, c[0x0][0x168], RZ, 0x33, !PT ; /* 0x00005a00ff03aa12 */
/* 0x000fca00078e33ff */
/*01c0*/ IMAD.MOV R5, RZ, RZ, -R3 ; /* 0x000000ffff057224 */
/* 0x000fe400078e0a03 */
/*01d0*/ IMAD.IADD R3, R3, 0x1, R4 ; /* 0x0000000103037824 */
/* 0x000fe400078e0204 */
/*01e0*/ IMAD R2, R5, c[0x0][0x168], R0 ; /* 0x00005a0005027a24 */
/* 0x000fe200078e0200 */
/*01f0*/ HFMA2.MMA R5, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff057435 */
/* 0x000fc800000001ff */
/*0200*/ IADD3 R4, R2.reuse, R4, RZ ; /* 0x0000000402047210 */
/* 0x040fe20007ffe0ff */
/*0210*/ IMAD.IADD R2, R2, 0x1, R3 ; /* 0x0000000102027824 */
/* 0x000fc600078e0203 */
/*0220*/ PRMT R3, R3, 0x7604, R4 ; /* 0x0000760403037816 */
/* 0x000fc80000000004 */
/*0230*/ PRMT R4, R2, 0x7054, R3 ; /* 0x0000705402047816 */
/* 0x000fe20000000003 */
/*0240*/ IMAD.WIDE R2, R0, R5, c[0x0][0x160] ; /* 0x0000580000027625 */
/* 0x000fc600078e0205 */
/*0250*/ PRMT R5, RZ, 0x654, R4 ; /* 0x00000654ff057816 */
/* 0x000fca0000000004 */
/*0260*/ STG.E [R2.64], R5 ; /* 0x0000000502007986 */
/* 0x000fe2000c101904 */
/*0270*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0280*/ BRA 0x280; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0290*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0300*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0310*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0320*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0330*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0340*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0350*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0360*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0370*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z6kernelP15HIP_vector_typeIhLj4EEjjf
.globl _Z6kernelP15HIP_vector_typeIhLj4EEjjf
.p2align 8
.type _Z6kernelP15HIP_vector_typeIhLj4EEjjf,@function
_Z6kernelP15HIP_vector_typeIhLj4EEjjf:
s_clause 0x1
s_load_b32 s4, s[0:1], 0x24
s_load_b64 s[2:3], s[0:1], 0x8
s_waitcnt lgkmcnt(0)
s_and_b32 s4, s4, 0xffff
s_mul_i32 s3, s3, s2
v_mad_u64_u32 v[1:2], null, s15, s4, v[0:1]
s_delay_alu instid0(VALU_DEP_1)
v_cmp_gt_u32_e32 vcc_lo, s3, v1
s_and_saveexec_b32 s3, vcc_lo
s_cbranch_execz .LBB0_2
v_cvt_f32_u32_e32 v0, s2
s_sub_i32 s3, 0, s2
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_1)
v_rcp_iflag_f32_e32 v0, v0
s_waitcnt_depctr 0xfff
v_mul_f32_e32 v0, 0x4f7ffffe, v0
v_cvt_u32_f32_e32 v0, v0
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
v_mul_lo_u32 v2, s3, v0
s_clause 0x1
s_load_b32 s3, s[0:1], 0x10
s_load_b64 s[0:1], s[0:1], 0x0
v_mul_hi_u32 v2, v0, v2
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_2)
v_add_nc_u32_e32 v0, v0, v2
s_waitcnt lgkmcnt(0)
v_cvt_i32_f32_e32 v5, s3
v_mul_hi_u32 v0, v1, v0
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
v_mul_lo_u32 v2, v0, s2
v_add_nc_u32_e32 v3, 1, v0
v_sub_nc_u32_e32 v2, v1, v2
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_4)
v_subrev_nc_u32_e32 v4, s2, v2
v_cmp_le_u32_e32 vcc_lo, s2, v2
v_cndmask_b32_e32 v0, v0, v3, vcc_lo
s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
v_cndmask_b32_e32 v2, v2, v4, vcc_lo
v_add_nc_u32_e32 v3, 1, v0
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_3)
v_cmp_le_u32_e32 vcc_lo, s2, v2
v_ashrrev_i32_e32 v2, 31, v1
v_cndmask_b32_e32 v0, v0, v3, vcc_lo
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_lshlrev_b64 v[2:3], 2, v[1:2]
v_mul_lo_u32 v4, v0, s2
v_add_nc_u32_e32 v6, v0, v5
s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
v_add_co_u32 v0, vcc_lo, s0, v2
v_sub_nc_u32_e32 v4, v1, v4
v_add_co_ci_u32_e32 v1, vcc_lo, s1, v3, vcc_lo
s_delay_alu instid0(VALU_DEP_2)
v_dual_mov_b32 v3, 0 :: v_dual_add_nc_u32 v2, v4, v5
v_add_nc_u32_e32 v4, v6, v4
s_clause 0x3
global_store_b8 v[0:1], v2, off
global_store_b8 v[0:1], v6, off offset:1
global_store_b8 v[0:1], v3, off offset:3
global_store_b8 v[0:1], v4, off offset:2
.LBB0_2:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z6kernelP15HIP_vector_typeIhLj4EEjjf
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 280
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 7
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z6kernelP15HIP_vector_typeIhLj4EEjjf, .Lfunc_end0-_Z6kernelP15HIP_vector_typeIhLj4EEjjf
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .offset: 8
.size: 4
.value_kind: by_value
- .offset: 12
.size: 4
.value_kind: by_value
- .offset: 16
.size: 4
.value_kind: by_value
- .offset: 24
.size: 4
.value_kind: hidden_block_count_x
- .offset: 28
.size: 4
.value_kind: hidden_block_count_y
- .offset: 32
.size: 4
.value_kind: hidden_block_count_z
- .offset: 36
.size: 2
.value_kind: hidden_group_size_x
- .offset: 38
.size: 2
.value_kind: hidden_group_size_y
- .offset: 40
.size: 2
.value_kind: hidden_group_size_z
- .offset: 42
.size: 2
.value_kind: hidden_remainder_x
- .offset: 44
.size: 2
.value_kind: hidden_remainder_y
- .offset: 46
.size: 2
.value_kind: hidden_remainder_z
- .offset: 64
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 88
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 280
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z6kernelP15HIP_vector_typeIhLj4EEjjf
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z6kernelP15HIP_vector_typeIhLj4EEjjf.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 7
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_0009dda1_00000000-6_kernelPBO.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2061:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2061:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "Cuda error: %s: %s.\n"
.text
.globl _Z14checkCUDAErrorPKc
.type _Z14checkCUDAErrorPKc, @function
_Z14checkCUDAErrorPKc:
.LFB2057:
.cfi_startproc
endbr64
pushq %rbx
.cfi_def_cfa_offset 16
.cfi_offset 3, -16
movq %rdi, %rbx
call cudaGetLastError@PLT
testl %eax, %eax
jne .L6
popq %rbx
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L6:
.cfi_restore_state
movl %eax, %edi
call cudaGetErrorString@PLT
movq %rax, %r8
movq %rbx, %rcx
leaq .LC0(%rip), %rdx
movl $2, %esi
movq stderr(%rip), %rdi
movl $0, %eax
call __fprintf_chk@PLT
movl $1, %edi
call exit@PLT
.cfi_endproc
.LFE2057:
.size _Z14checkCUDAErrorPKc, .-_Z14checkCUDAErrorPKc
.globl _Z34__device_stub__Z6kernelP6uchar4jjfP6uchar4jjf
.type _Z34__device_stub__Z6kernelP6uchar4jjfP6uchar4jjf, @function
_Z34__device_stub__Z6kernelP6uchar4jjfP6uchar4jjf:
.LFB2083:
.cfi_startproc
endbr64
subq $152, %rsp
.cfi_def_cfa_offset 160
movq %rdi, 24(%rsp)
movl %esi, 20(%rsp)
movl %edx, 16(%rsp)
movss %xmm0, 12(%rsp)
movq %fs:40, %rax
movq %rax, 136(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 20(%rsp), %rax
movq %rax, 104(%rsp)
leaq 16(%rsp), %rax
movq %rax, 112(%rsp)
leaq 12(%rsp), %rax
movq %rax, 120(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L11
.L7:
movq 136(%rsp), %rax
subq %fs:40, %rax
jne .L12
addq $152, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L11:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 168
pushq 40(%rsp)
.cfi_def_cfa_offset 176
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z6kernelP6uchar4jjf(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 160
jmp .L7
.L12:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2083:
.size _Z34__device_stub__Z6kernelP6uchar4jjfP6uchar4jjf, .-_Z34__device_stub__Z6kernelP6uchar4jjfP6uchar4jjf
.globl _Z6kernelP6uchar4jjf
.type _Z6kernelP6uchar4jjf, @function
_Z6kernelP6uchar4jjf:
.LFB2084:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z34__device_stub__Z6kernelP6uchar4jjfP6uchar4jjf
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2084:
.size _Z6kernelP6uchar4jjf, .-_Z6kernelP6uchar4jjf
.section .rodata.str1.1
.LC1:
.string "kernel failed!"
.text
.globl launch_kernel
.type launch_kernel, @function
launch_kernel:
.LFB2058:
.cfi_startproc
endbr64
pushq %r12
.cfi_def_cfa_offset 16
.cfi_offset 12, -16
pushq %rbp
.cfi_def_cfa_offset 24
.cfi_offset 6, -24
pushq %rbx
.cfi_def_cfa_offset 32
.cfi_offset 3, -32
subq $48, %rsp
.cfi_def_cfa_offset 80
movq %rdi, %r12
movl %esi, %ebx
movl %edx, %ebp
movss %xmm0, 12(%rsp)
movl %edx, %eax
imull %esi, %eax
leal 255(%rax), %edx
testl %eax, %eax
cmovns %eax, %edx
sarl $8, %edx
movl $256, 36(%rsp)
movl $1, 40(%rsp)
movl %eax, %ecx
sarl $31, %ecx
shrl $24, %ecx
addl %ecx, %eax
movzbl %al, %eax
subl %ecx, %eax
testl %eax, %eax
setg %al
movzbl %al, %eax
addl %edx, %eax
movl %eax, 24(%rsp)
movl $1, 28(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 36(%rsp), %rdx
movl $1, %ecx
movq 24(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L18
.L16:
call cudaThreadSynchronize@PLT
leaq .LC1(%rip), %rdi
call _Z14checkCUDAErrorPKc
addq $48, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 32
popq %rbx
.cfi_def_cfa_offset 24
popq %rbp
.cfi_def_cfa_offset 16
popq %r12
.cfi_def_cfa_offset 8
ret
.L18:
.cfi_restore_state
movss 12(%rsp), %xmm0
movl %ebp, %edx
movl %ebx, %esi
movq %r12, %rdi
call _Z34__device_stub__Z6kernelP6uchar4jjfP6uchar4jjf
jmp .L16
.cfi_endproc
.LFE2058:
.size launch_kernel, .-launch_kernel
.section .rodata.str1.1
.LC2:
.string "_Z6kernelP6uchar4jjf"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2086:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC2(%rip), %rdx
movq %rdx, %rcx
leaq _Z6kernelP6uchar4jjf(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2086:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "kernelPBO.hip"
.globl _Z14checkCUDAErrorPKc # -- Begin function _Z14checkCUDAErrorPKc
.p2align 4, 0x90
.type _Z14checkCUDAErrorPKc,@function
_Z14checkCUDAErrorPKc: # @_Z14checkCUDAErrorPKc
.cfi_startproc
# %bb.0:
pushq %r14
.cfi_def_cfa_offset 16
pushq %rbx
.cfi_def_cfa_offset 24
pushq %rax
.cfi_def_cfa_offset 32
.cfi_offset %rbx, -24
.cfi_offset %r14, -16
movq %rdi, %rbx
callq hipGetLastError
testl %eax, %eax
jne .LBB0_2
# %bb.1:
addq $8, %rsp
.cfi_def_cfa_offset 24
popq %rbx
.cfi_def_cfa_offset 16
popq %r14
.cfi_def_cfa_offset 8
retq
.LBB0_2:
.cfi_def_cfa_offset 32
movq stderr(%rip), %r14
movl %eax, %edi
callq hipGetErrorString
movl $.L.str, %esi
movq %r14, %rdi
movq %rbx, %rdx
movq %rax, %rcx
xorl %eax, %eax
callq fprintf
movl $1, %edi
callq exit
.Lfunc_end0:
.size _Z14checkCUDAErrorPKc, .Lfunc_end0-_Z14checkCUDAErrorPKc
.cfi_endproc
# -- End function
.globl _Z21__device_stub__kernelP15HIP_vector_typeIhLj4EEjjf # -- Begin function _Z21__device_stub__kernelP15HIP_vector_typeIhLj4EEjjf
.p2align 4, 0x90
.type _Z21__device_stub__kernelP15HIP_vector_typeIhLj4EEjjf,@function
_Z21__device_stub__kernelP15HIP_vector_typeIhLj4EEjjf: # @_Z21__device_stub__kernelP15HIP_vector_typeIhLj4EEjjf
.cfi_startproc
# %bb.0:
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 72(%rsp)
movl %esi, 20(%rsp)
movl %edx, 16(%rsp)
movss %xmm0, 12(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 20(%rsp), %rax
movq %rax, 88(%rsp)
leaq 16(%rsp), %rax
movq %rax, 96(%rsp)
leaq 12(%rsp), %rax
movq %rax, 104(%rsp)
leaq 56(%rsp), %rdi
leaq 40(%rsp), %rsi
leaq 32(%rsp), %rdx
leaq 24(%rsp), %rcx
callq __hipPopCallConfiguration
movq 56(%rsp), %rsi
movl 64(%rsp), %edx
movq 40(%rsp), %rcx
movl 48(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z6kernelP15HIP_vector_typeIhLj4EEjjf, %edi
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
pushq 40(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $136, %rsp
.cfi_adjust_cfa_offset -136
retq
.Lfunc_end1:
.size _Z21__device_stub__kernelP15HIP_vector_typeIhLj4EEjjf, .Lfunc_end1-_Z21__device_stub__kernelP15HIP_vector_typeIhLj4EEjjf
.cfi_endproc
# -- End function
.globl launch_kernel # -- Begin function launch_kernel
.p2align 4, 0x90
.type launch_kernel,@function
launch_kernel: # @launch_kernel
.cfi_startproc
# %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
pushq %r14
.cfi_def_cfa_offset 24
pushq %rbx
.cfi_def_cfa_offset 32
subq $112, %rsp
.cfi_def_cfa_offset 144
.cfi_offset %rbx, -32
.cfi_offset %r14, -24
.cfi_offset %rbp, -16
movss %xmm0, 8(%rsp) # 4-byte Spill
movl %edx, %ebx
movl %esi, %ebp
movq %rdi, %r14
movl %edx, %eax
imull %esi, %eax
leal 255(%rax), %ecx
testl %eax, %eax
cmovnsl %eax, %ecx
sarl $8, %ecx
xorl %edi, %edi
testl $-2147483393, %eax # imm = 0x800000FF
setg %dil
addl %ecx, %edi
movabsq $4294967296, %rdx # imm = 0x100000000
orq %rdx, %rdi
orq $256, %rdx # imm = 0x100
movl $1, %esi
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB2_2
# %bb.1:
movq %r14, 72(%rsp)
movl %ebp, 20(%rsp)
movl %ebx, 16(%rsp)
movss 8(%rsp), %xmm0 # 4-byte Reload
# xmm0 = mem[0],zero,zero,zero
movss %xmm0, 12(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 20(%rsp), %rax
movq %rax, 88(%rsp)
leaq 16(%rsp), %rax
movq %rax, 96(%rsp)
leaq 12(%rsp), %rax
movq %rax, 104(%rsp)
leaq 56(%rsp), %rdi
leaq 40(%rsp), %rsi
leaq 32(%rsp), %rdx
leaq 24(%rsp), %rcx
callq __hipPopCallConfiguration
movq 56(%rsp), %rsi
movl 64(%rsp), %edx
movq 40(%rsp), %rcx
movl 48(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z6kernelP15HIP_vector_typeIhLj4EEjjf, %edi
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
pushq 40(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB2_2:
callq hipDeviceSynchronize
callq hipGetLastError
testl %eax, %eax
jne .LBB2_4
# %bb.3: # %_Z14checkCUDAErrorPKc.exit
addq $112, %rsp
.cfi_def_cfa_offset 32
popq %rbx
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
retq
.LBB2_4:
.cfi_def_cfa_offset 144
movq stderr(%rip), %rbx
movl %eax, %edi
callq hipGetErrorString
movl $.L.str, %esi
movl $.L.str.1, %edx
movq %rbx, %rdi
movq %rax, %rcx
xorl %eax, %eax
callq fprintf
movl $1, %edi
callq exit
.Lfunc_end2:
.size launch_kernel, .Lfunc_end2-launch_kernel
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB3_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB3_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z6kernelP15HIP_vector_typeIhLj4EEjjf, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end3:
.size __hip_module_ctor, .Lfunc_end3-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB4_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB4_2:
retq
.Lfunc_end4:
.size __hip_module_dtor, .Lfunc_end4-__hip_module_dtor
.cfi_endproc
# -- End function
.type .L.str,@object # @.str
.section .rodata.str1.1,"aMS",@progbits,1
.L.str:
.asciz "Cuda error: %s: %s.\n"
.size .L.str, 21
.type _Z6kernelP15HIP_vector_typeIhLj4EEjjf,@object # @_Z6kernelP15HIP_vector_typeIhLj4EEjjf
.section .rodata,"a",@progbits
.globl _Z6kernelP15HIP_vector_typeIhLj4EEjjf
.p2align 3, 0x0
_Z6kernelP15HIP_vector_typeIhLj4EEjjf:
.quad _Z21__device_stub__kernelP15HIP_vector_typeIhLj4EEjjf
.size _Z6kernelP15HIP_vector_typeIhLj4EEjjf, 8
.type .L.str.1,@object # @.str.1
.section .rodata.str1.1,"aMS",@progbits,1
.L.str.1:
.asciz "kernel failed!"
.size .L.str.1, 15
.type .L__unnamed_1,@object # @0
.L__unnamed_1:
.asciz "_Z6kernelP15HIP_vector_typeIhLj4EEjjf"
.size .L__unnamed_1, 38
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z21__device_stub__kernelP15HIP_vector_typeIhLj4EEjjf
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z6kernelP15HIP_vector_typeIhLj4EEjjf
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly. | #include "includes.h"
// ERROR CHECKING MACROS //////////////////////////////////////////////////////
__global__ void rovCorrection(int noPoints, int noDims, int dimRes, int nYears, int noControls, int year, int control, float* regression) {
// Global thread index
int idx = blockIdx.x*blockDim.x + threadIdx.x;
if (idx < noPoints) {
float currVal = regression[year*noControls*(dimRes*noDims +
(int)pow(dimRes,noDims)*2) + control*(dimRes*noDims +
(int)pow(dimRes,noDims)*2) + dimRes*noDims + idx];
// The surrogate value cannot be greater than zero by definition
if (currVal > 0) {
regression[year*noControls*(dimRes*noDims + (int)pow(dimRes,
noDims)*2) + control*(dimRes*noDims + (int)pow(dimRes,
noDims)*2) + dimRes*noDims + idx] = 0.0;
}
}
} | code for sm_80
Function : _Z13rovCorrectioniiiiiiiPf
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ IMAD.MOV.U32 R1, RZ, RZ, c[0x0][0x28] ; /* 0x00000a00ff017624 */
/* 0x000fe400078e00ff */
/*0010*/ S2R R24, SR_CTAID.X ; /* 0x0000000000187919 */
/* 0x000e280000002500 */
/*0020*/ S2R R3, SR_TID.X ; /* 0x0000000000037919 */
/* 0x000e240000002100 */
/*0030*/ IMAD R24, R24, c[0x0][0x0], R3 ; /* 0x0000000018187a24 */
/* 0x001fca00078e0203 */
/*0040*/ ISETP.GE.AND P0, PT, R24, c[0x0][0x160], PT ; /* 0x0000580018007a0c */
/* 0x000fda0003f06270 */
/*0050*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*0060*/ I2F.F64 R4, c[0x0][0x164] ; /* 0x0000590000047b12 */
/* 0x000e300000201c00 */
/*0070*/ I2F.F64 R6, c[0x0][0x168] ; /* 0x00005a0000067b12 */
/* 0x000e620000201c00 */
/*0080*/ LOP3.LUT R0, R5, 0x7ff00000, RZ, 0xc0, !PT ; /* 0x7ff0000005007812 */
/* 0x001fc800078ec0ff */
/*0090*/ LEA.HI R3, R0, 0xfffffc0c, RZ, 0xc ; /* 0xfffffc0c00037811 */
/* 0x000fc800078f60ff */
/*00a0*/ SHF.L.U32 R0, R4.reuse, R3.reuse, RZ ; /* 0x0000000304007219 */
/* 0x0c0fe400000006ff */
/*00b0*/ SHF.L.U64.HI R3, R4, R3, R5 ; /* 0x0000000304037219 */
/* 0x000fe40000010205 */
/*00c0*/ ISETP.NE.U32.AND P0, PT, R0.reuse, RZ, PT ; /* 0x000000ff0000720c */
/* 0x040fe40003f05070 */
/*00d0*/ ISETP.NE.U32.AND P1, PT, R0, RZ, PT ; /* 0x000000ff0000720c */
/* 0x000fe40003f25070 */
/*00e0*/ ISETP.NE.AND.EX P0, PT, R3.reuse, -0x80000000, PT, P0 ; /* 0x800000000300780c */
/* 0x040fe40003f05300 */
/*00f0*/ ISETP.NE.AND.EX P1, PT, R3, -0x80000000, PT, P1 ; /* 0x800000000300780c */
/* 0x000fc40003f25310 */
/*0100*/ MOV R0, 0x120 ; /* 0x0000012000007802 */
/* 0x000fcc0000000f00 */
/*0110*/ CALL.REL.NOINC 0x5b0 ; /* 0x0000049000007944 */
/* 0x002fea0003c00000 */
/*0120*/ ISETP.GT.OR P1, PT, R7, -0x1, P1 ; /* 0xffffffff0700780c */
/* 0x000fe20000f24670 */
/*0130*/ IMAD.MOV.U32 R3, RZ, RZ, R15 ; /* 0x000000ffff037224 */
/* 0x000fe200078e000f */
/*0140*/ ISETP.NE.AND P2, PT, RZ, c[0x0][0x168], PT ; /* 0x00005a00ff007a0c */
/* 0x000fe20003f45270 */
/*0150*/ IMAD.MOV.U32 R2, RZ, RZ, R14 ; /* 0x000000ffff027224 */
/* 0x000fd400078e000e */
/*0160*/ @!P1 LOP3.LUT R9, R3, 0x80000000, RZ, 0x3c, !PT ; /* 0x8000000003099812 */
/* 0x000fca00078e3cff */
/*0170*/ @!P1 IMAD.MOV.U32 R3, RZ, RZ, R9 ; /* 0x000000ffff039224 */
/* 0x000fe200078e0009 */
/*0180*/ @!P2 BRA 0x210 ; /* 0x000000800000a947 */
/* 0x000fea0003800000 */
/*0190*/ FRND.F64.TRUNC R8, R4 ; /* 0x0000000400087313 */
/* 0x000e22000030d800 */
/*01a0*/ PLOP3.LUT P0, PT, P0, PT, PT, 0x8, 0x0 ; /* 0x000000000000781c */
/* 0x000fe2000070e170 */
/*01b0*/ DSETP.NEU.AND P1, PT, R8, R4, PT ; /* 0x000000040800722a */
/* 0x001e0c0003f2d000 */
/*01c0*/ ISETP.GT.OR P1, PT, R7, -0x1, !P1 ; /* 0xffffffff0700780c */
/* 0x001fda0004f24670 */
/*01d0*/ @P1 BRA 0x260 ; /* 0x0000008000001947 */
/* 0x000fea0003800000 */
/*01e0*/ IMAD.MOV.U32 R2, RZ, RZ, 0x0 ; /* 0x00000000ff027424 */
/* 0x000fe400078e00ff */
/*01f0*/ IMAD.MOV.U32 R3, RZ, RZ, -0x80000 ; /* 0xfff80000ff037424 */
/* 0x000fe200078e00ff */
/*0200*/ BRA 0x260 ; /* 0x0000005000007947 */
/* 0x000fea0003800000 */
/*0210*/ ISETP.GE.AND P1, PT, R5, RZ, PT ; /* 0x000000ff0500720c */
/* 0x000fe20003f26270 */
/*0220*/ DSETP.NEU.AND P0, PT, |R4|, 0.5, !P0 ; /* 0x3fe000000400742a */
/* 0x000e22000470d200 */
/*0230*/ IMAD.MOV.U32 R2, RZ, RZ, RZ ; /* 0x000000ffff027224 */
/* 0x000fca00078e00ff */
/*0240*/ SEL R3, R7, RZ, P0 ; /* 0x000000ff07037207 */
/* 0x001fcc0000000000 */
/*0250*/ @!P1 LOP3.LUT R3, R3, 0x7ff00000, RZ, 0xfc, !PT ; /* 0x7ff0000003039812 */
/* 0x000fe400078efcff */
/*0260*/ DADD R8, R6, R4 ; /* 0x0000000006087229 */
/* 0x000e220000000004 */
/*0270*/ ULDC.64 UR8, c[0x0][0x118] ; /* 0x0000460000087ab9 */
/* 0x000fd20000000a00 */
/*0280*/ LOP3.LUT R8, R9, 0x7ff00000, RZ, 0xc0, !PT ; /* 0x7ff0000009087812 */
/* 0x001fc800078ec0ff */
/*0290*/ ISETP.NE.AND P1, PT, R8, 0x7ff00000, PT ; /* 0x7ff000000800780c */
/* 0x000fda0003f25270 */
/*02a0*/ @P1 BRA 0x470 ; /* 0x000001c000001947 */
/* 0x000fea0003800000 */
/*02b0*/ DSETP.GTU.AND P1, PT, |R4|, +INF , PT ; /* 0x7ff000000400742a */
/* 0x000e0c0003f2c200 */
/*02c0*/ DSETP.GTU.OR P1, PT, |R6|, +INF , P1 ; /* 0x7ff000000600742a */
/* 0x001e1c0000f2c600 */
/*02d0*/ @P1 BRA 0x460 ; /* 0x0000018000001947 */
/* 0x001fea0003800000 */
/*02e0*/ ISETP.NE.AND P1, PT, R4, RZ, PT ; /* 0x000000ff0400720c */
/* 0x000fe40003f25270 */
/*02f0*/ LOP3.LUT R0, R5, 0x7fffffff, RZ, 0xc0, !PT ; /* 0x7fffffff05007812 */
/* 0x000fc800078ec0ff */
/*0300*/ ISETP.EQ.AND P1, PT, R0, 0x7ff00000, !P1 ; /* 0x7ff000000000780c */
/* 0x000fda0004f22270 */
/*0310*/ @!P1 BRA 0x3b0 ; /* 0x0000009000009947 */
/* 0x000fea0003800000 */
/*0320*/ ISETP.GE.AND P1, PT, R5, RZ, PT ; /* 0x000000ff0500720c */
/* 0x000fe20003f26270 */
/*0330*/ DSETP.GT.AND P0, PT, |R6|, 1, PT ; /* 0x3ff000000600742a */
/* 0x000e220003f04200 */
/*0340*/ IMAD.MOV.U32 R2, RZ, RZ, c[0x0][0x168] ; /* 0x00005a00ff027624 */
/* 0x000fca00078e00ff */
/*0350*/ SEL R0, RZ, 0x7ff00000, !P0 ; /* 0x7ff00000ff007807 */
/* 0x001fe40004000000 */
/*0360*/ ISETP.NE.AND P0, PT, R2, -0x1, PT ; /* 0xffffffff0200780c */
/* 0x000fe20003f05270 */
/*0370*/ IMAD.MOV.U32 R2, RZ, RZ, RZ ; /* 0x000000ffff027224 */
/* 0x000fc600078e00ff */
/*0380*/ @!P1 LOP3.LUT R0, R0, 0x7ff00000, RZ, 0x3c, !PT ; /* 0x7ff0000000009812 */
/* 0x000fc800078e3cff */
/*0390*/ SEL R3, R0, 0x3ff00000, P0 ; /* 0x3ff0000000037807 */
/* 0x000fe20000000000 */
/*03a0*/ BRA 0x470 ; /* 0x000000c000007947 */
/* 0x000fea0003800000 */
/*03b0*/ ISETP.NE.AND P1, PT, R6, RZ, PT ; /* 0x000000ff0600720c */
/* 0x000fe40003f25270 */
/*03c0*/ LOP3.LUT R8, R7, 0x7fffffff, RZ, 0xc0, !PT ; /* 0x7fffffff07087812 */
/* 0x000fc800078ec0ff */
/*03d0*/ ISETP.NE.OR P1, PT, R8, 0x7ff00000, P1 ; /* 0x7ff000000800780c */
/* 0x000fda0000f25670 */
/*03e0*/ @P1 BRA 0x470 ; /* 0x0000008000001947 */
/* 0x000fea0003800000 */
/*03f0*/ ISETP.LT.AND P0, PT, R7, RZ, P0 ; /* 0x000000ff0700720c */
/* 0x000fe20000701270 */
/*0400*/ IMAD.MOV.U32 R2, RZ, RZ, RZ ; /* 0x000000ffff027224 */
/* 0x000fe200078e00ff */
/*0410*/ ISETP.GT.AND P1, PT, R5, -0x1, PT ; /* 0xffffffff0500780c */
/* 0x000fe40003f24270 */
/*0420*/ ISETP.NE.AND P0, PT, R0, 0x3fe00000, P0 ; /* 0x3fe000000000780c */
/* 0x000fe40000705270 */
/*0430*/ SEL R3, RZ, 0x7ff00000, !P1 ; /* 0x7ff00000ff037807 */
/* 0x000fd60004800000 */
/*0440*/ @P0 IADD3 R3, R3, -0x80000000, RZ ; /* 0x8000000003030810 */
/* 0x000fe20007ffe0ff */
/*0450*/ BRA 0x470 ; /* 0x0000001000007947 */
/* 0x000fea0003800000 */
/*0460*/ DADD R2, R6, R4 ; /* 0x0000000006027229 */
/* 0x0000480000000004 */
/*0470*/ IMAD.MOV.U32 R5, RZ, RZ, c[0x0][0x168] ; /* 0x00005a00ff057624 */
/* 0x001fe200078e00ff */
/*0480*/ ULDC UR6, c[0x0][0x178] ; /* 0x00005e0000067ab9 */
/* 0x000fe20000000800 */
/*0490*/ IMAD.MOV.U32 R4, RZ, RZ, 0x4 ; /* 0x00000004ff047424 */
/* 0x000fe200078e00ff */
/*04a0*/ ULDC.64 UR4, c[0x0][0x170] ; /* 0x00005c0000047ab9 */
/* 0x000fe40000000a00 */
/*04b0*/ ISETP.NE.AND P0, PT, R5.reuse, 0x1, PT ; /* 0x000000010500780c */
/* 0x040fe20003f05270 */
/*04c0*/ IMAD R5, R5, c[0x0][0x164], RZ ; /* 0x0000590005057a24 */
/* 0x000fe200078e02ff */
/*04d0*/ UIMAD UR4, UR5, UR4, UR6 ; /* 0x00000004050472a4 */
/* 0x000fe4000f8e0206 */
/*04e0*/ ISETP.EQ.OR P0, PT, RZ, c[0x0][0x164], !P0 ; /* 0x00005900ff007a0c */
/* 0x000fe20004702670 */
/*04f0*/ IMAD.IADD R24, R24, 0x1, R5 ; /* 0x0000000118187824 */
/* 0x000fc600078e0205 */
/*0500*/ FSEL R2, R2, RZ, !P0 ; /* 0x000000ff02027208 */
/* 0x002fe40004000000 */
/*0510*/ FSEL R3, R3, 1.875, !P0 ; /* 0x3ff0000003037808 */
/* 0x000fc80004000000 */
/*0520*/ F2I.F64.TRUNC R2, R2 ; /* 0x0000000200027311 */
/* 0x000e24000030d100 */
/*0530*/ IMAD R5, R2, 0x2, R5 ; /* 0x0000000202057824 */
/* 0x001fc800078e0205 */
/*0540*/ IMAD R5, R5, UR4, R24 ; /* 0x0000000405057c24 */
/* 0x000fc8000f8e0218 */
/*0550*/ IMAD.WIDE R4, R5, R4, c[0x0][0x180] ; /* 0x0000600005047625 */
/* 0x000fca00078e0204 */
/*0560*/ LDG.E R0, [R4.64] ; /* 0x0000000804007981 */
/* 0x000ea4000c1e1900 */
/*0570*/ FSETP.GT.AND P0, PT, R0, RZ, PT ; /* 0x000000ff0000720b */
/* 0x004fda0003f04000 */
/*0580*/ @!P0 EXIT ; /* 0x000000000000894d */
/* 0x000fea0003800000 */
/*0590*/ STG.E [R4.64], RZ ; /* 0x000000ff04007986 */
/* 0x000fe2000c101908 */
/*05a0*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*05b0*/ DADD R2, -RZ, |R6| ; /* 0x00000000ff027229 */
/* 0x000e220000000506 */
/*05c0*/ IMAD.MOV.U32 R10, RZ, RZ, RZ ; /* 0x000000ffff0a7224 */
/* 0x000fe400078e00ff */
/*05d0*/ IMAD.MOV.U32 R16, RZ, RZ, 0x7d2cafe2 ; /* 0x7d2cafe2ff107424 */
/* 0x000fe400078e00ff */
/*05e0*/ IMAD.MOV.U32 R17, RZ, RZ, 0x3eb0f5ff ; /* 0x3eb0f5ffff117424 */
/* 0x000fca00078e00ff */
/*05f0*/ SHF.R.U32.HI R25, RZ, 0x14, R3 ; /* 0x00000014ff197819 */
/* 0x001fc80000011603 */
/*0600*/ ISETP.NE.AND P2, PT, R25, RZ, PT ; /* 0x000000ff1900720c */
/* 0x000fda0003f45270 */
/*0610*/ @!P2 DMUL R26, R2, 1.80143985094819840000e+16 ; /* 0x43500000021aa828 */
/* 0x000e140000000000 */
/*0620*/ @!P2 IMAD.MOV.U32 R3, RZ, RZ, R27 ; /* 0x000000ffff03a224 */
/* 0x001fe200078e001b */
/*0630*/ @!P2 LEA.HI R25, R27, 0xffffffca, RZ, 0xc ; /* 0xffffffca1b19a811 */
/* 0x000fe200078f60ff */
/*0640*/ @!P2 IMAD.MOV.U32 R2, RZ, RZ, R26 ; /* 0x000000ffff02a224 */
/* 0x000fc600078e001a */
/*0650*/ LOP3.LUT R3, R3, 0x800fffff, RZ, 0xc0, !PT ; /* 0x800fffff03037812 */
/* 0x000fc800078ec0ff */
/*0660*/ LOP3.LUT R3, R3, 0x3ff00000, RZ, 0xfc, !PT ; /* 0x3ff0000003037812 */
/* 0x000fc800078efcff */
/*0670*/ ISETP.GE.U32.AND P3, PT, R3, 0x3ff6a09f, PT ; /* 0x3ff6a09f0300780c */
/* 0x000fda0003f66070 */
/*0680*/ @P3 IADD3 R9, R3, -0x100000, RZ ; /* 0xfff0000003093810 */
/* 0x000fca0007ffe0ff */
/*0690*/ @P3 IMAD.MOV.U32 R3, RZ, RZ, R9 ; /* 0x000000ffff033224 */
/* 0x000fcc00078e0009 */
/*06a0*/ DADD R14, R2, 1 ; /* 0x3ff00000020e7429 */
/* 0x000e080000000000 */
/*06b0*/ DADD R2, R2, -1 ; /* 0xbff0000002027429 */
/* 0x000fe40000000000 */
/*06c0*/ MUFU.RCP64H R11, R15 ; /* 0x0000000f000b7308 */
/* 0x001e240000001800 */
/*06d0*/ DFMA R8, -R14, R10, 1 ; /* 0x3ff000000e08742b */
/* 0x001e0c000000010a */
/*06e0*/ DFMA R8, R8, R8, R8 ; /* 0x000000080808722b */
/* 0x001e0c0000000008 */
/*06f0*/ DFMA R10, R10, R8, R10 ; /* 0x000000080a0a722b */
/* 0x001e0c000000000a */
/*0700*/ DMUL R12, R10, R2 ; /* 0x000000020a0c7228 */
/* 0x001e0c0000000000 */
/*0710*/ DFMA R12, R10, R2, R12 ; /* 0x000000020a0c722b */
/* 0x001e0c000000000c */
/*0720*/ DMUL R8, R12, R12 ; /* 0x0000000c0c087228 */
/* 0x001e080000000000 */
/*0730*/ DADD R14, R2, -R12 ; /* 0x00000000020e7229 */
/* 0x000e48000000080c */
/*0740*/ DFMA R16, R8, R16, c[0x2][0x0] ; /* 0x008000000810762b */
/* 0x001e080000000010 */
/*0750*/ DADD R18, R14, R14 ; /* 0x000000000e127229 */
/* 0x002e48000000000e */
/*0760*/ DFMA R16, R8, R16, c[0x2][0x8] ; /* 0x008002000810762b */
/* 0x001e080000000010 */
/*0770*/ DFMA R2, R2, -R12, R18 ; /* 0x8000000c0202722b */
/* 0x002fc80000000012 */
/*0780*/ DFMA R16, R8, R16, c[0x2][0x10] ; /* 0x008004000810762b */
/* 0x001e080000000010 */
/*0790*/ DMUL R20, R12, R12 ; /* 0x0000000c0c147228 */
/* 0x000fc80000000000 */
/*07a0*/ DFMA R16, R8, R16, c[0x2][0x18] ; /* 0x008006000810762b */
/* 0x001e080000000010 */
/*07b0*/ DMUL R10, R10, R2 ; /* 0x000000020a0a7228 */
/* 0x000fc80000000000 */
/*07c0*/ DFMA R16, R8, R16, c[0x2][0x20] ; /* 0x008008000810762b */
/* 0x001e0c0000000010 */
/*07d0*/ DFMA R16, R8, R16, c[0x2][0x28] ; /* 0x00800a000810762b */
/* 0x001e220000000010 */
/*07e0*/ IADD3 R3, R11, 0x100000, RZ ; /* 0x001000000b037810 */
/* 0x000fe20007ffe0ff */
/*07f0*/ IMAD.MOV.U32 R2, RZ, RZ, R10 ; /* 0x000000ffff027224 */
/* 0x000fc800078e000a */
/*0800*/ DFMA R14, R8, R16, c[0x2][0x30] ; /* 0x00800c00080e762b */
/* 0x001e0c0000000010 */
/*0810*/ DADD R18, -R14, c[0x2][0x30] ; /* 0x00800c000e127629 */
/* 0x001e0c0000000100 */
/*0820*/ DFMA R18, R8, R16, R18 ; /* 0x000000100812722b */
/* 0x001e080000000012 */
/*0830*/ DMUL R16, R12, R20 ; /* 0x000000140c107228 */
/* 0x000e480000000000 */
/*0840*/ DADD R8, RZ, R18 ; /* 0x00000000ff087229 */
/* 0x001e080000000012 */
/*0850*/ DFMA R18, R12, R12, -R20 ; /* 0x0000000c0c12722b */
/* 0x000fc80000000814 */
/*0860*/ DFMA R22, R12, R20, -R16 ; /* 0x000000140c16722b */
/* 0x002e480000000810 */
/*0870*/ DADD R8, R8, c[0x2][0x38] ; /* 0x00800e0008087629 */
/* 0x001e080000000000 */
/*0880*/ DFMA R22, R10, R20, R22 ; /* 0x000000140a16722b */
/* 0x002fc80000000016 */
/*0890*/ DFMA R18, R12, R2, R18 ; /* 0x000000020c12722b */
/* 0x000e480000000012 */
/*08a0*/ DADD R2, R14, R8 ; /* 0x000000000e027229 */
/* 0x001e080000000008 */
/*08b0*/ DFMA R22, R12, R18, R22 ; /* 0x000000120c16722b */
/* 0x002fc80000000016 */
/*08c0*/ DADD R18, R14, -R2 ; /* 0x000000000e127229 */
/* 0x001e080000000802 */
/*08d0*/ DMUL R14, R2, R16 ; /* 0x00000010020e7228 */
/* 0x000e480000000000 */
/*08e0*/ DADD R8, R8, R18 ; /* 0x0000000008087229 */
/* 0x001fc80000000012 */
/*08f0*/ DFMA R18, R2, R16, -R14 ; /* 0x000000100212722b */
/* 0x002e0c000000080e */
/*0900*/ DFMA R18, R2, R22, R18 ; /* 0x000000160212722b */
/* 0x001e0c0000000012 */
/*0910*/ DFMA R18, R8, R16, R18 ; /* 0x000000100812722b */
/* 0x001e0c0000000012 */
/*0920*/ DADD R8, R14, R18 ; /* 0x000000000e087229 */
/* 0x001e0c0000000012 */
/*0930*/ DADD R2, R12, R8 ; /* 0x000000000c027229 */
/* 0x001e080000000008 */
/*0940*/ DADD R14, R14, -R8 ; /* 0x000000000e0e7229 */
/* 0x000e480000000808 */
/*0950*/ DADD R12, R12, -R2 ; /* 0x000000000c0c7229 */
/* 0x001e080000000802 */
/*0960*/ DADD R14, R18, R14 ; /* 0x00000000120e7229 */
/* 0x0023e4000000000e */
/*0970*/ IMAD.MOV.U32 R18, RZ, RZ, 0x69ce2bdf ; /* 0x69ce2bdfff127424 */
/* 0x002fe400078e00ff */
/*0980*/ DADD R12, R8, R12 ; /* 0x00000000080c7229 */
/* 0x001062000000000c */
/*0990*/ IMAD.MOV.U32 R19, RZ, RZ, 0x3e5ade15 ; /* 0x3e5ade15ff137424 */
/* 0x000fe200078e00ff */
/*09a0*/ IADD3 R8, R25.reuse, -0x3ff, RZ ; /* 0xfffffc0119087810 */
/* 0x041fe20007ffe0ff */
/*09b0*/ IMAD.MOV.U32 R9, RZ, RZ, 0x43300000 ; /* 0x43300000ff097424 */
/* 0x000fe200078e00ff */
/*09c0*/ @P3 IADD3 R8, R25, -0x3fe, RZ ; /* 0xfffffc0219083810 */
/* 0x000fe40007ffe0ff */
/*09d0*/ DADD R12, R14, R12 ; /* 0x000000000e0c7229 */
/* 0x002e04000000000c */
/*09e0*/ LOP3.LUT R8, R8, 0x80000000, RZ, 0x3c, !PT ; /* 0x8000000008087812 */
/* 0x000fc800078e3cff */
/*09f0*/ DADD R12, R10, R12 ; /* 0x000000000a0c7229 */
/* 0x001e08000000000c */
/*0a00*/ DADD R10, R8, c[0x2][0x40] ; /* 0x00801000080a7629 */
/* 0x000fc80000000000 */
/*0a10*/ DADD R14, R2, R12 ; /* 0x00000000020e7229 */
/* 0x001e0c000000000c */
/*0a20*/ DFMA R8, R10, c[0x2][0x48], R14 ; /* 0x008012000a087a2b */
/* 0x001e08000000000e */
/*0a30*/ DADD R2, R2, -R14 ; /* 0x0000000002027229 */
/* 0x000e48000000080e */
/*0a40*/ DFMA R16, -R10, c[0x2][0x48], R8 ; /* 0x008012000a107a2b */
/* 0x001e080000000108 */
/*0a50*/ DADD R2, R12, R2 ; /* 0x000000000c027229 */
/* 0x0023e40000000002 */
/*0a60*/ IMAD.SHL.U32 R12, R5.reuse, 0x2, RZ ; /* 0x00000002050c7824 */
/* 0x042fe400078e00ff */
/*0a70*/ DADD R16, -R14, R16 ; /* 0x000000000e107229 */
/* 0x0010640000000110 */
/*0a80*/ LOP3.LUT R15, R5, 0xff0fffff, RZ, 0xc0, !PT ; /* 0xff0fffff050f7812 */
/* 0x001fe200078ec0ff */
/*0a90*/ IMAD.MOV.U32 R14, RZ, RZ, R4 ; /* 0x000000ffff0e7224 */
/* 0x000fe200078e0004 */
/*0aa0*/ ISETP.GT.U32.AND P2, PT, R12, -0x2000001, PT ; /* 0xfdffffff0c00780c */
/* 0x000fe40003f44070 */
/*0ab0*/ DADD R2, R2, -R16 ; /* 0x0000000002027229 */
/* 0x002e040000000810 */
/*0ac0*/ SEL R15, R15, R5, P2 ; /* 0x000000050f0f7207 */
/* 0x000fc80001000000 */
/*0ad0*/ DFMA R10, R10, c[0x2][0x50], R2 ; /* 0x008014000a0a7a2b */
/* 0x001e0c0000000002 */
/*0ae0*/ DADD R12, R8, R10 ; /* 0x00000000080c7229 */
/* 0x001e0c000000000a */
/*0af0*/ DADD R8, R8, -R12 ; /* 0x0000000008087229 */
/* 0x001e08000000080c */
/*0b00*/ DMUL R2, R12, R14 ; /* 0x0000000e0c027228 */
/* 0x000e480000000000 */
/*0b10*/ DADD R8, R10, R8 ; /* 0x000000000a087229 */
/* 0x001fc80000000008 */
/*0b20*/ DFMA R12, R12, R14, -R2 ; /* 0x0000000e0c0c722b */
/* 0x002e0c0000000802 */
/*0b30*/ DFMA R8, R8, R14, R12 ; /* 0x0000000e0808722b */
/* 0x001064000000000c */
/*0b40*/ IMAD.MOV.U32 R12, RZ, RZ, 0x652b82fe ; /* 0x652b82feff0c7424 */
/* 0x001fe400078e00ff */
/*0b50*/ IMAD.MOV.U32 R13, RZ, RZ, 0x3ff71547 ; /* 0x3ff71547ff0d7424 */
/* 0x000fe400078e00ff */
/*0b60*/ DADD R10, R2, R8 ; /* 0x00000000020a7229 */
/* 0x002e0c0000000008 */
/*0b70*/ DFMA R12, R10, R12, 6.75539944105574400000e+15 ; /* 0x433800000a0c742b */
/* 0x001e08000000000c */
/*0b80*/ FSETP.GEU.AND P2, PT, |R11|, 4.1917929649353027344, PT ; /* 0x4086232b0b00780b */
/* 0x000fe40003f4e200 */
/*0b90*/ DADD R14, R12, -6.75539944105574400000e+15 ; /* 0xc33800000c0e7429 */
/* 0x001e0c0000000000 */
/*0ba0*/ DFMA R16, R14, c[0x2][0x58], R10 ; /* 0x008016000e107a2b */
/* 0x001e0c000000000a */
/*0bb0*/ DFMA R14, R14, c[0x2][0x60], R16 ; /* 0x008018000e0e7a2b */
/* 0x001e0c0000000010 */
/*0bc0*/ DFMA R16, R14, R18, c[0x2][0x68] ; /* 0x00801a000e10762b */
/* 0x001e0c0000000012 */
/*0bd0*/ DFMA R16, R14, R16, c[0x2][0x70] ; /* 0x00801c000e10762b */
/* 0x001e0c0000000010 */
/*0be0*/ DFMA R16, R14, R16, c[0x2][0x78] ; /* 0x00801e000e10762b */
/* 0x001e0c0000000010 */
/*0bf0*/ DFMA R16, R14, R16, c[0x2][0x80] ; /* 0x008020000e10762b */
/* 0x001e0c0000000010 */
/*0c00*/ DFMA R16, R14, R16, c[0x2][0x88] ; /* 0x008022000e10762b */
/* 0x001e0c0000000010 */
/*0c10*/ DFMA R16, R14, R16, c[0x2][0x90] ; /* 0x008024000e10762b */
/* 0x001e0c0000000010 */
/*0c20*/ DFMA R16, R14, R16, c[0x2][0x98] ; /* 0x008026000e10762b */
/* 0x001e0c0000000010 */
/*0c30*/ DFMA R16, R14, R16, c[0x2][0xa0] ; /* 0x008028000e10762b */
/* 0x001e0c0000000010 */
/*0c40*/ DFMA R16, R14, R16, c[0x2][0xa8] ; /* 0x00802a000e10762b */
/* 0x001e0c0000000010 */
/*0c50*/ DFMA R16, R14, R16, 1 ; /* 0x3ff000000e10742b */
/* 0x001e0c0000000010 */
/*0c60*/ DFMA R16, R14, R16, 1 ; /* 0x3ff000000e10742b */
/* 0x001e140000000010 */
/*0c70*/ IMAD R15, R12, 0x100000, R17 ; /* 0x001000000c0f7824 */
/* 0x001fe400078e0211 */
/*0c80*/ IMAD.MOV.U32 R14, RZ, RZ, R16 ; /* 0x000000ffff0e7224 */
/* 0x000fe200078e0010 */
/*0c90*/ @!P2 BRA 0xd80 ; /* 0x000000e00000a947 */
/* 0x000fea0003800000 */
/*0ca0*/ FSETP.GEU.AND P3, PT, |R11|, 4.2275390625, PT ; /* 0x408748000b00780b */
/* 0x000fe20003f6e200 */
/*0cb0*/ DADD R14, R10, +INF ; /* 0x7ff000000a0e7429 */
/* 0x000fc80000000000 */
/*0cc0*/ DSETP.GEU.AND P2, PT, R10, RZ, PT ; /* 0x000000ff0a00722a */
/* 0x000e0c0003f4e000 */
/*0cd0*/ FSEL R14, R14, RZ, P2 ; /* 0x000000ff0e0e7208 */
/* 0x001fe40001000000 */
/*0ce0*/ FSEL R15, R15, RZ, P2 ; /* 0x000000ff0f0f7208 */
/* 0x000fe20001000000 */
/*0cf0*/ @P3 BRA 0xd80 ; /* 0x0000008000003947 */
/* 0x000fea0003800000 */
/*0d00*/ LEA.HI R13, R12, R12, RZ, 0x1 ; /* 0x0000000c0c0d7211 */
/* 0x000fe200078f08ff */
/*0d10*/ IMAD.MOV.U32 R14, RZ, RZ, R16 ; /* 0x000000ffff0e7224 */
/* 0x000fc600078e0010 */
/*0d20*/ SHF.R.S32.HI R13, RZ, 0x1, R13 ; /* 0x00000001ff0d7819 */
/* 0x000fca000001140d */
/*0d30*/ IMAD.IADD R12, R12, 0x1, -R13 ; /* 0x000000010c0c7824 */
/* 0x000fe400078e0a0d */
/*0d40*/ IMAD R15, R13, 0x100000, R17 ; /* 0x001000000d0f7824 */
/* 0x000fc600078e0211 */
/*0d50*/ LEA R13, R12, 0x3ff00000, 0x14 ; /* 0x3ff000000c0d7811 */
/* 0x000fe200078ea0ff */
/*0d60*/ IMAD.MOV.U32 R12, RZ, RZ, RZ ; /* 0x000000ffff0c7224 */
/* 0x000fcc00078e00ff */
/*0d70*/ DMUL R14, R14, R12 ; /* 0x0000000c0e0e7228 */
/* 0x0000540000000000 */
/*0d80*/ LOP3.LUT R12, R15, 0x7fffffff, RZ, 0xc0, !PT ; /* 0x7fffffff0f0c7812 */
/* 0x003fe200078ec0ff */
/*0d90*/ DADD R2, R2, -R10 ; /* 0x0000000002027229 */
/* 0x000e06000000080a */
/*0da0*/ ISETP.NE.AND P2, PT, R12, 0x7ff00000, PT ; /* 0x7ff000000c00780c */
/* 0x000fc60003f45270 */
/*0db0*/ DADD R2, R8, R2 ; /* 0x0000000008027229 */
/* 0x001e220000000002 */
/*0dc0*/ ISETP.EQ.AND P2, PT, R14, RZ, !P2 ; /* 0x000000ff0e00720c */
/* 0x000fda0005742270 */
/*0dd0*/ @!P2 DFMA R14, R2, R14, R14 ; /* 0x0000000e020ea22b */
/* 0x001064000000000e */
/*0de0*/ IMAD.MOV.U32 R2, RZ, RZ, R0 ; /* 0x000000ffff027224 */
/* 0x001fe400078e0000 */
/*0df0*/ IMAD.MOV.U32 R3, RZ, RZ, 0x0 ; /* 0x00000000ff037424 */
/* 0x000fc800078e00ff */
/*0e00*/ RET.REL.NODEC R2 0x0 ; /* 0xfffff1f002007950 */
/* 0x002fea0003c3ffff */
/*0e10*/ BRA 0xe10; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0e20*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0e30*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0e40*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0e50*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0e60*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0e70*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0e80*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0e90*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0ea0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0eb0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0ec0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0ed0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0ee0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0ef0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | #include "includes.h"
// ERROR CHECKING MACROS //////////////////////////////////////////////////////
__global__ void rovCorrection(int noPoints, int noDims, int dimRes, int nYears, int noControls, int year, int control, float* regression) {
// Global thread index
int idx = blockIdx.x*blockDim.x + threadIdx.x;
if (idx < noPoints) {
float currVal = regression[year*noControls*(dimRes*noDims +
(int)pow(dimRes,noDims)*2) + control*(dimRes*noDims +
(int)pow(dimRes,noDims)*2) + dimRes*noDims + idx];
// The surrogate value cannot be greater than zero by definition
if (currVal > 0) {
regression[year*noControls*(dimRes*noDims + (int)pow(dimRes,
noDims)*2) + control*(dimRes*noDims + (int)pow(dimRes,
noDims)*2) + dimRes*noDims + idx] = 0.0;
}
}
} | .file "tmpxft_000f8913_00000000-6_rovCorrection.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2029:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2029:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z40__device_stub__Z13rovCorrectioniiiiiiiPfiiiiiiiPf
.type _Z40__device_stub__Z13rovCorrectioniiiiiiiPfiiiiiiiPf, @function
_Z40__device_stub__Z13rovCorrectioniiiiiiiPfiiiiiiiPf:
.LFB2051:
.cfi_startproc
endbr64
subq $184, %rsp
.cfi_def_cfa_offset 192
movl %edi, 28(%rsp)
movl %esi, 24(%rsp)
movl %edx, 20(%rsp)
movl %ecx, 16(%rsp)
movl %r8d, 12(%rsp)
movl %r9d, 8(%rsp)
movq 200(%rsp), %rax
movq %rax, (%rsp)
movq %fs:40, %rax
movq %rax, 168(%rsp)
xorl %eax, %eax
leaq 28(%rsp), %rax
movq %rax, 96(%rsp)
leaq 24(%rsp), %rax
movq %rax, 104(%rsp)
leaq 20(%rsp), %rax
movq %rax, 112(%rsp)
leaq 16(%rsp), %rax
movq %rax, 120(%rsp)
leaq 12(%rsp), %rax
movq %rax, 128(%rsp)
leaq 8(%rsp), %rax
movq %rax, 136(%rsp)
leaq 192(%rsp), %rax
movq %rax, 144(%rsp)
movq %rsp, %rax
movq %rax, 152(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 168(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $184, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 200
pushq 40(%rsp)
.cfi_def_cfa_offset 208
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z13rovCorrectioniiiiiiiPf(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 192
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2051:
.size _Z40__device_stub__Z13rovCorrectioniiiiiiiPfiiiiiiiPf, .-_Z40__device_stub__Z13rovCorrectioniiiiiiiPfiiiiiiiPf
.globl _Z13rovCorrectioniiiiiiiPf
.type _Z13rovCorrectioniiiiiiiPf, @function
_Z13rovCorrectioniiiiiiiPf:
.LFB2052:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
pushq 24(%rsp)
.cfi_def_cfa_offset 24
movl 24(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 32
call _Z40__device_stub__Z13rovCorrectioniiiiiiiPfiiiiiiiPf
addq $24, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2052:
.size _Z13rovCorrectioniiiiiiiPf, .-_Z13rovCorrectioniiiiiiiPf
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "_Z13rovCorrectioniiiiiiiPf"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2054:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC0(%rip), %rdx
movq %rdx, %rcx
leaq _Z13rovCorrectioniiiiiiiPf(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2054:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | #include "includes.h"
// ERROR CHECKING MACROS //////////////////////////////////////////////////////
__global__ void rovCorrection(int noPoints, int noDims, int dimRes, int nYears, int noControls, int year, int control, float* regression) {
// Global thread index
int idx = blockIdx.x*blockDim.x + threadIdx.x;
if (idx < noPoints) {
float currVal = regression[year*noControls*(dimRes*noDims +
(int)pow(dimRes,noDims)*2) + control*(dimRes*noDims +
(int)pow(dimRes,noDims)*2) + dimRes*noDims + idx];
// The surrogate value cannot be greater than zero by definition
if (currVal > 0) {
regression[year*noControls*(dimRes*noDims + (int)pow(dimRes,
noDims)*2) + control*(dimRes*noDims + (int)pow(dimRes,
noDims)*2) + dimRes*noDims + idx] = 0.0;
}
}
} | #include <hip/hip_runtime.h>
#include "includes.h"
// ERROR CHECKING MACROS //////////////////////////////////////////////////////
__global__ void rovCorrection(int noPoints, int noDims, int dimRes, int nYears, int noControls, int year, int control, float* regression) {
// Global thread index
int idx = blockIdx.x*blockDim.x + threadIdx.x;
if (idx < noPoints) {
float currVal = regression[year*noControls*(dimRes*noDims +
(int)pow(dimRes,noDims)*2) + control*(dimRes*noDims +
(int)pow(dimRes,noDims)*2) + dimRes*noDims + idx];
// The surrogate value cannot be greater than zero by definition
if (currVal > 0) {
regression[year*noControls*(dimRes*noDims + (int)pow(dimRes,
noDims)*2) + control*(dimRes*noDims + (int)pow(dimRes,
noDims)*2) + dimRes*noDims + idx] = 0.0;
}
}
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | #include <hip/hip_runtime.h>
#include "includes.h"
// ERROR CHECKING MACROS //////////////////////////////////////////////////////
__global__ void rovCorrection(int noPoints, int noDims, int dimRes, int nYears, int noControls, int year, int control, float* regression) {
// Global thread index
int idx = blockIdx.x*blockDim.x + threadIdx.x;
if (idx < noPoints) {
float currVal = regression[year*noControls*(dimRes*noDims +
(int)pow(dimRes,noDims)*2) + control*(dimRes*noDims +
(int)pow(dimRes,noDims)*2) + dimRes*noDims + idx];
// The surrogate value cannot be greater than zero by definition
if (currVal > 0) {
regression[year*noControls*(dimRes*noDims + (int)pow(dimRes,
noDims)*2) + control*(dimRes*noDims + (int)pow(dimRes,
noDims)*2) + dimRes*noDims + idx] = 0.0;
}
}
} | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z13rovCorrectioniiiiiiiPf
.globl _Z13rovCorrectioniiiiiiiPf
.p2align 8
.type _Z13rovCorrectioniiiiiiiPf,@function
_Z13rovCorrectioniiiiiiiPf:
s_clause 0x1
s_load_b32 s2, s[0:1], 0x34
s_load_b32 s3, s[0:1], 0x0
s_waitcnt lgkmcnt(0)
s_and_b32 s2, s2, 0xffff
s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_mad_u64_u32 v[1:2], null, s15, s2, v[0:1]
s_mov_b32 s2, exec_lo
v_cmpx_gt_i32_e64 s3, v1
s_cbranch_execz .LBB0_3
s_clause 0x3
s_load_b64 s[4:5], s[0:1], 0x4
s_load_b64 s[6:7], s[0:1], 0x10
s_load_b32 s8, s[0:1], 0x18
s_load_b64 s[2:3], s[0:1], 0x20
s_mov_b32 s1, 0x3fe55555
s_mov_b32 s0, 0x55555555
s_mov_b32 s11, 0x3fba6564
s_mov_b32 s10, 0x968915a9
s_mov_b32 s13, 0x3fbdee67
s_mov_b32 s12, 0x4222de17
s_waitcnt lgkmcnt(0)
v_cvt_f64_i32_e32 v[2:3], s4
s_cmp_lg_u32 s5, 1
v_cvt_f64_i32_e32 v[4:5], s5
s_cselect_b32 vcc_lo, -1, 0
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_3)
v_cndmask_b32_e32 v3, 0x3ff00000, v3, vcc_lo
v_cndmask_b32_e32 v2, 0, v2, vcc_lo
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_4)
v_cmp_neq_f64_e32 vcc_lo, 0, v[2:3]
v_cndmask_b32_e32 v5, 0x3ff00000, v5, vcc_lo
v_cndmask_b32_e32 v4, 0, v4, vcc_lo
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_frexp_mant_f64_e64 v[6:7], |v[4:5]|
v_cmp_gt_f64_e32 vcc_lo, s[0:1], v[6:7]
v_cndmask_b32_e64 v0, 0, 1, vcc_lo
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
v_ldexp_f64 v[6:7], v[6:7], v0
v_frexp_exp_i32_f64_e32 v0, v[4:5]
v_add_f64 v[8:9], v[6:7], 1.0
v_add_f64 v[14:15], v[6:7], -1.0
s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
v_subrev_co_ci_u32_e32 v0, vcc_lo, 0, v0, vcc_lo
v_rcp_f64_e32 v[10:11], v[8:9]
v_add_f64 v[16:17], v[8:9], -1.0
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_1)
v_add_f64 v[6:7], v[6:7], -v[16:17]
s_waitcnt_depctr 0xfff
v_fma_f64 v[12:13], -v[8:9], v[10:11], 1.0
v_fma_f64 v[10:11], v[12:13], v[10:11], v[10:11]
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_fma_f64 v[12:13], -v[8:9], v[10:11], 1.0
v_fma_f64 v[10:11], v[12:13], v[10:11], v[10:11]
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mul_f64 v[12:13], v[14:15], v[10:11]
v_mul_f64 v[18:19], v[8:9], v[12:13]
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_fma_f64 v[8:9], v[12:13], v[8:9], -v[18:19]
v_fma_f64 v[6:7], v[12:13], v[6:7], v[8:9]
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_add_f64 v[8:9], v[18:19], v[6:7]
v_add_f64 v[16:17], v[14:15], -v[8:9]
v_add_f64 v[18:19], v[8:9], -v[18:19]
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_f64 v[14:15], v[14:15], -v[16:17]
v_add_f64 v[6:7], v[18:19], -v[6:7]
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
v_add_f64 v[8:9], v[14:15], -v[8:9]
v_add_f64 v[6:7], v[6:7], v[8:9]
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_add_f64 v[6:7], v[16:17], v[6:7]
v_mul_f64 v[6:7], v[10:11], v[6:7]
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_add_f64 v[8:9], v[12:13], v[6:7]
v_add_f64 v[10:11], v[8:9], -v[12:13]
v_mul_f64 v[12:13], v[8:9], v[8:9]
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_f64 v[6:7], v[6:7], -v[10:11]
v_fma_f64 v[10:11], v[8:9], v[8:9], -v[12:13]
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
v_add_f64 v[14:15], v[6:7], v[6:7]
v_fma_f64 v[10:11], v[8:9], v[14:15], v[10:11]
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_add_f64 v[14:15], v[12:13], v[10:11]
v_fma_f64 v[16:17], v[14:15], s[12:13], s[10:11]
s_mov_b32 s11, 0x3fbe25e4
s_mov_b32 s10, 0x3abe935a
v_add_f64 v[12:13], v[14:15], -v[12:13]
v_mul_f64 v[22:23], v[8:9], v[14:15]
s_mov_b32 s13, 0x3ff71547
s_mov_b32 s12, 0x652b82fe
s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_2) | instid1(VALU_DEP_3)
v_fma_f64 v[16:17], v[14:15], v[16:17], s[10:11]
s_mov_b32 s11, 0x3fc110ef
s_mov_b32 s10, 0x47e6c9c2
v_add_f64 v[10:11], v[10:11], -v[12:13]
s_delay_alu instid0(VALU_DEP_2)
v_fma_f64 v[16:17], v[14:15], v[16:17], s[10:11]
s_mov_b32 s11, 0x3fc3b13b
s_mov_b32 s10, 0xcfa74449
s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1)
v_fma_f64 v[16:17], v[14:15], v[16:17], s[10:11]
s_mov_b32 s11, 0x3fc745d1
s_mov_b32 s10, 0x71bf3c30
s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1)
v_fma_f64 v[16:17], v[14:15], v[16:17], s[10:11]
s_mov_b32 s11, 0x3fcc71c7
s_mov_b32 s10, 0x1c7792ce
s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1)
v_fma_f64 v[16:17], v[14:15], v[16:17], s[10:11]
s_mov_b32 s11, 0x3fd24924
s_mov_b32 s10, 0x924920da
s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1)
v_fma_f64 v[16:17], v[14:15], v[16:17], s[10:11]
s_mov_b32 s11, 0x3fd99999
s_mov_b32 s10, 0x9999999c
s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1)
v_fma_f64 v[16:17], v[14:15], v[16:17], s[10:11]
s_mov_b32 s11, 0x3c7abc9e
s_mov_b32 s10, 0x3b39803f
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mul_f64 v[18:19], v[14:15], v[16:17]
v_fma_f64 v[12:13], v[14:15], v[16:17], -v[18:19]
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_fma_f64 v[12:13], v[10:11], v[16:17], v[12:13]
v_add_f64 v[16:17], v[18:19], v[12:13]
s_delay_alu instid0(VALU_DEP_1)
v_add_f64 v[20:21], v[16:17], s[0:1]
v_add_f64 v[18:19], v[16:17], -v[18:19]
s_mov_b32 s1, 0xbfe55555
s_delay_alu instid0(VALU_DEP_2) | instid1(SALU_CYCLE_1)
v_add_f64 v[24:25], v[20:21], s[0:1]
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_3) | instid1(VALU_DEP_3)
v_add_f64 v[12:13], v[12:13], -v[18:19]
v_fma_f64 v[18:19], v[14:15], v[8:9], -v[22:23]
s_mov_b32 s1, 0x3c8543b0
s_mov_b32 s0, 0xd5df274d
v_add_f64 v[16:17], v[16:17], -v[24:25]
s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
v_add_f64 v[12:13], v[12:13], s[0:1]
v_fma_f64 v[14:15], v[14:15], v[6:7], v[18:19]
s_mov_b32 s1, 0x3fe62e42
s_mov_b32 s0, 0xfefa39ef
v_ldexp_f64 v[6:7], v[6:7], 1
s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
v_add_f64 v[12:13], v[12:13], v[16:17]
v_fma_f64 v[10:11], v[10:11], v[8:9], v[14:15]
v_ldexp_f64 v[8:9], v[8:9], 1
s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
v_add_f64 v[14:15], v[20:21], v[12:13]
v_add_f64 v[16:17], v[22:23], v[10:11]
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_f64 v[18:19], v[20:21], -v[14:15]
v_mul_f64 v[20:21], v[16:17], v[14:15]
v_add_f64 v[22:23], v[16:17], -v[22:23]
s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
v_add_f64 v[12:13], v[12:13], v[18:19]
v_fma_f64 v[18:19], v[16:17], v[14:15], -v[20:21]
s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_f64 v[10:11], v[10:11], -v[22:23]
v_fma_f64 v[12:13], v[16:17], v[12:13], v[18:19]
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
v_fma_f64 v[10:11], v[10:11], v[14:15], v[12:13]
v_cvt_f64_i32_e32 v[14:15], v0
v_add_f64 v[12:13], v[20:21], v[10:11]
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_4)
v_add_f64 v[16:17], v[8:9], v[12:13]
v_add_f64 v[18:19], v[12:13], -v[20:21]
v_mul_f64 v[20:21], v[14:15], s[0:1]
s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
v_add_f64 v[8:9], v[16:17], -v[8:9]
v_add_f64 v[10:11], v[10:11], -v[18:19]
s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_3)
v_fma_f64 v[18:19], v[14:15], s[0:1], -v[20:21]
s_mov_b32 s1, 0xbfe62e42
v_add_f64 v[8:9], v[12:13], -v[8:9]
s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
v_add_f64 v[6:7], v[6:7], v[10:11]
v_fma_f64 v[10:11], v[14:15], s[10:11], v[18:19]
s_mov_b32 s11, 0xbc7abc9e
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_f64 v[6:7], v[6:7], v[8:9]
v_add_f64 v[8:9], v[20:21], v[10:11]
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_f64 v[12:13], v[16:17], v[6:7]
v_add_f64 v[20:21], v[8:9], -v[20:21]
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_3)
v_add_f64 v[14:15], v[8:9], v[12:13]
v_add_f64 v[16:17], v[12:13], -v[16:17]
v_add_f64 v[10:11], v[10:11], -v[20:21]
s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
v_add_f64 v[18:19], v[14:15], -v[8:9]
v_add_f64 v[6:7], v[6:7], -v[16:17]
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_3)
v_add_f64 v[22:23], v[14:15], -v[18:19]
v_add_f64 v[12:13], v[12:13], -v[18:19]
v_add_f64 v[16:17], v[10:11], v[6:7]
s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
v_add_f64 v[8:9], v[8:9], -v[22:23]
v_add_f64 v[8:9], v[12:13], v[8:9]
s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_f64 v[12:13], v[16:17], -v[10:11]
v_add_f64 v[8:9], v[16:17], v[8:9]
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_3)
v_add_f64 v[16:17], v[16:17], -v[12:13]
v_add_f64 v[6:7], v[6:7], -v[12:13]
v_add_f64 v[18:19], v[14:15], v[8:9]
s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_f64 v[10:11], v[10:11], -v[16:17]
v_add_f64 v[12:13], v[18:19], -v[14:15]
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_f64 v[6:7], v[6:7], v[10:11]
v_add_f64 v[8:9], v[8:9], -v[12:13]
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_add_f64 v[6:7], v[6:7], v[8:9]
v_add_f64 v[8:9], v[18:19], v[6:7]
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
v_add_f64 v[10:11], v[8:9], -v[18:19]
v_mul_f64 v[12:13], v[2:3], v[8:9]
v_add_f64 v[6:7], v[6:7], -v[10:11]
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2)
v_fma_f64 v[8:9], v[2:3], v[8:9], -v[12:13]
v_cmp_class_f64_e64 vcc_lo, v[12:13], 0x204
v_fma_f64 v[6:7], v[2:3], v[6:7], v[8:9]
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_add_f64 v[8:9], v[12:13], v[6:7]
v_dual_cndmask_b32 v11, v9, v13 :: v_dual_cndmask_b32 v10, v8, v12
v_add_f64 v[8:9], v[8:9], -v[12:13]
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_3)
v_mul_f64 v[14:15], v[10:11], s[12:13]
v_cmp_neq_f64_e64 vcc_lo, 0x7ff00000, |v[10:11]|
v_add_f64 v[6:7], v[6:7], -v[8:9]
v_mul_f64 v[8:9], v[2:3], 0.5
s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_3)
v_rndne_f64_e32 v[14:15], v[14:15]
v_dual_cndmask_b32 v7, 0, v7 :: v_dual_cndmask_b32 v6, 0, v6
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_3) | instid1(VALU_DEP_2)
v_fma_f64 v[16:17], v[14:15], s[0:1], v[10:11]
s_mov_b32 s1, 0x3e928af3
s_mov_b32 s0, 0xfca7ab0c
v_cvt_i32_f64_e32 v0, v[14:15]
v_fma_f64 v[16:17], v[14:15], s[10:11], v[16:17]
s_mov_b32 s11, 0x3e5ade15
s_mov_b32 s10, 0x6a5dcb37
s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1)
v_fma_f64 v[18:19], v[16:17], s[10:11], s[0:1]
s_mov_b32 s1, 0x3ec71dee
s_mov_b32 s0, 0x623fde64
s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1)
v_fma_f64 v[18:19], v[16:17], v[18:19], s[0:1]
s_mov_b32 s1, 0x3efa0199
s_mov_b32 s0, 0x7c89e6b0
s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1)
v_fma_f64 v[18:19], v[16:17], v[18:19], s[0:1]
s_mov_b32 s1, 0x3f2a01a0
s_mov_b32 s0, 0x14761f6e
s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1)
v_fma_f64 v[18:19], v[16:17], v[18:19], s[0:1]
s_mov_b32 s1, 0x3f56c16c
s_mov_b32 s0, 0x1852b7b0
s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1)
v_fma_f64 v[18:19], v[16:17], v[18:19], s[0:1]
s_mov_b32 s1, 0x3f811111
s_mov_b32 s0, 0x11122322
s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1)
v_fma_f64 v[18:19], v[16:17], v[18:19], s[0:1]
s_mov_b32 s1, 0x3fa55555
s_mov_b32 s0, 0x555502a1
s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1)
v_fma_f64 v[18:19], v[16:17], v[18:19], s[0:1]
s_mov_b32 s1, 0x3fc55555
s_mov_b32 s0, 0x55555511
s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1)
v_fma_f64 v[18:19], v[16:17], v[18:19], s[0:1]
s_mov_b32 s1, 0x3fe00000
s_mov_b32 s0, 11
s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1)
v_fma_f64 v[18:19], v[16:17], v[18:19], s[0:1]
v_cmp_nlt_f64_e64 s0, 0x40900000, v[10:11]
v_cmp_ngt_f64_e64 s1, 0xc090cc00, v[10:11]
s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
v_fma_f64 v[18:19], v[16:17], v[18:19], 1.0
s_and_b32 vcc_lo, s1, s0
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_fma_f64 v[14:15], v[16:17], v[18:19], 1.0
v_ldexp_f64 v[12:13], v[14:15], v0
v_trunc_f64_e32 v[14:15], v[2:3]
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_3)
v_cndmask_b32_e64 v0, 0x7ff00000, v13, s0
v_cndmask_b32_e32 v10, 0, v12, vcc_lo
v_trunc_f64_e32 v[12:13], v[8:9]
s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_2)
v_cndmask_b32_e64 v11, 0, v0, s1
v_cmp_eq_f64_e64 s1, v[14:15], v[2:3]
v_fma_f64 v[6:7], v[10:11], v[6:7], v[10:11]
v_cmp_class_f64_e64 s0, v[10:11], 0x204
v_cmp_neq_f64_e32 vcc_lo, v[12:13], v[8:9]
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_4)
v_cndmask_b32_e64 v0, v6, v10, s0
v_cndmask_b32_e64 v6, v7, v11, s0
v_cmp_gt_f64_e64 s0, 0, v[4:5]
s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_2) | instid1(VALU_DEP_1)
v_cndmask_b32_e64 v7, 0, v0, s1
s_and_b32 vcc_lo, s1, vcc_lo
v_cndmask_b32_e32 v8, 0x3ff00000, v5, vcc_lo
v_bfi_b32 v6, 0x7fffffff, v6, v8
s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
v_cndmask_b32_e64 v0, v0, v7, s0
v_cndmask_b32_e64 v8, 0x7ff80000, v6, s1
v_cmp_gt_f64_e64 s1, 0, v[2:3]
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_2)
v_cndmask_b32_e64 v3, v6, v8, s0
v_cmp_eq_f64_e64 s0, 0, v[4:5]
v_cndmask_b32_e32 v4, 0, v5, vcc_lo
s_xor_b32 s1, s1, s0
v_cndmask_b32_e64 v2, v0, 0, s0
s_and_b32 s1, s1, exec_lo
s_cselect_b32 s1, 0, 0x7ff00000
s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
v_bfi_b32 v0, 0x7fffffff, s1, v4
s_mul_i32 s1, s7, s6
s_add_i32 s1, s1, s8
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_cndmask_b32_e64 v3, v3, v0, s0
s_mul_i32 s0, s5, s4
v_cvt_i32_f64_e32 v0, v[2:3]
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_lshlrev_b32_e32 v0, 1, v0
v_add_nc_u32_e32 v0, s0, v0
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mul_lo_u32 v0, v0, s1
v_add3_u32 v0, v0, s0, v1
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_ashrrev_i32_e32 v1, 31, v0
v_lshlrev_b64 v[0:1], 2, v[0:1]
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_co_u32 v0, vcc_lo, s2, v0
v_add_co_ci_u32_e32 v1, vcc_lo, s3, v1, vcc_lo
global_load_b32 v2, v[0:1], off
s_waitcnt vmcnt(0)
v_cmp_lt_f32_e32 vcc_lo, 0, v2
s_and_b32 exec_lo, exec_lo, vcc_lo
s_cbranch_execz .LBB0_3
v_mov_b32_e32 v2, 0
global_store_b32 v[0:1], v2, off
.LBB0_3:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z13rovCorrectioniiiiiiiPf
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 296
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 26
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z13rovCorrectioniiiiiiiPf, .Lfunc_end0-_Z13rovCorrectioniiiiiiiPf
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .offset: 0
.size: 4
.value_kind: by_value
- .offset: 4
.size: 4
.value_kind: by_value
- .offset: 8
.size: 4
.value_kind: by_value
- .offset: 12
.size: 4
.value_kind: by_value
- .offset: 16
.size: 4
.value_kind: by_value
- .offset: 20
.size: 4
.value_kind: by_value
- .offset: 24
.size: 4
.value_kind: by_value
- .address_space: global
.offset: 32
.size: 8
.value_kind: global_buffer
- .offset: 40
.size: 4
.value_kind: hidden_block_count_x
- .offset: 44
.size: 4
.value_kind: hidden_block_count_y
- .offset: 48
.size: 4
.value_kind: hidden_block_count_z
- .offset: 52
.size: 2
.value_kind: hidden_group_size_x
- .offset: 54
.size: 2
.value_kind: hidden_group_size_y
- .offset: 56
.size: 2
.value_kind: hidden_group_size_z
- .offset: 58
.size: 2
.value_kind: hidden_remainder_x
- .offset: 60
.size: 2
.value_kind: hidden_remainder_y
- .offset: 62
.size: 2
.value_kind: hidden_remainder_z
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 88
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 96
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 104
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 296
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z13rovCorrectioniiiiiiiPf
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z13rovCorrectioniiiiiiiPf.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 26
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | #include <hip/hip_runtime.h>
#include "includes.h"
// ERROR CHECKING MACROS //////////////////////////////////////////////////////
__global__ void rovCorrection(int noPoints, int noDims, int dimRes, int nYears, int noControls, int year, int control, float* regression) {
// Global thread index
int idx = blockIdx.x*blockDim.x + threadIdx.x;
if (idx < noPoints) {
float currVal = regression[year*noControls*(dimRes*noDims +
(int)pow(dimRes,noDims)*2) + control*(dimRes*noDims +
(int)pow(dimRes,noDims)*2) + dimRes*noDims + idx];
// The surrogate value cannot be greater than zero by definition
if (currVal > 0) {
regression[year*noControls*(dimRes*noDims + (int)pow(dimRes,
noDims)*2) + control*(dimRes*noDims + (int)pow(dimRes,
noDims)*2) + dimRes*noDims + idx] = 0.0;
}
}
} | .text
.file "rovCorrection.hip"
.globl _Z28__device_stub__rovCorrectioniiiiiiiPf # -- Begin function _Z28__device_stub__rovCorrectioniiiiiiiPf
.p2align 4, 0x90
.type _Z28__device_stub__rovCorrectioniiiiiiiPf,@function
_Z28__device_stub__rovCorrectioniiiiiiiPf: # @_Z28__device_stub__rovCorrectioniiiiiiiPf
.cfi_startproc
# %bb.0:
subq $152, %rsp
.cfi_def_cfa_offset 160
movl %edi, 28(%rsp)
movl %esi, 24(%rsp)
movl %edx, 20(%rsp)
movl %ecx, 16(%rsp)
movl %r8d, 12(%rsp)
movl %r9d, 8(%rsp)
leaq 28(%rsp), %rax
movq %rax, 80(%rsp)
leaq 24(%rsp), %rax
movq %rax, 88(%rsp)
leaq 20(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 12(%rsp), %rax
movq %rax, 112(%rsp)
leaq 8(%rsp), %rax
movq %rax, 120(%rsp)
leaq 160(%rsp), %rax
movq %rax, 128(%rsp)
leaq 168(%rsp), %rax
movq %rax, 136(%rsp)
leaq 64(%rsp), %rdi
leaq 48(%rsp), %rsi
leaq 40(%rsp), %rdx
leaq 32(%rsp), %rcx
callq __hipPopCallConfiguration
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
movq 48(%rsp), %rcx
movl 56(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z13rovCorrectioniiiiiiiPf, %edi
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
pushq 48(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $168, %rsp
.cfi_adjust_cfa_offset -168
retq
.Lfunc_end0:
.size _Z28__device_stub__rovCorrectioniiiiiiiPf, .Lfunc_end0-_Z28__device_stub__rovCorrectioniiiiiiiPf
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB1_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB1_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z13rovCorrectioniiiiiiiPf, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end1:
.size __hip_module_ctor, .Lfunc_end1-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB2_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB2_2:
retq
.Lfunc_end2:
.size __hip_module_dtor, .Lfunc_end2-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z13rovCorrectioniiiiiiiPf,@object # @_Z13rovCorrectioniiiiiiiPf
.section .rodata,"a",@progbits
.globl _Z13rovCorrectioniiiiiiiPf
.p2align 3, 0x0
_Z13rovCorrectioniiiiiiiPf:
.quad _Z28__device_stub__rovCorrectioniiiiiiiPf
.size _Z13rovCorrectioniiiiiiiPf, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "_Z13rovCorrectioniiiiiiiPf"
.size .L__unnamed_1, 27
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z28__device_stub__rovCorrectioniiiiiiiPf
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z13rovCorrectioniiiiiiiPf
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_000f8913_00000000-6_rovCorrection.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2029:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2029:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z40__device_stub__Z13rovCorrectioniiiiiiiPfiiiiiiiPf
.type _Z40__device_stub__Z13rovCorrectioniiiiiiiPfiiiiiiiPf, @function
_Z40__device_stub__Z13rovCorrectioniiiiiiiPfiiiiiiiPf:
.LFB2051:
.cfi_startproc
endbr64
subq $184, %rsp
.cfi_def_cfa_offset 192
movl %edi, 28(%rsp)
movl %esi, 24(%rsp)
movl %edx, 20(%rsp)
movl %ecx, 16(%rsp)
movl %r8d, 12(%rsp)
movl %r9d, 8(%rsp)
movq 200(%rsp), %rax
movq %rax, (%rsp)
movq %fs:40, %rax
movq %rax, 168(%rsp)
xorl %eax, %eax
leaq 28(%rsp), %rax
movq %rax, 96(%rsp)
leaq 24(%rsp), %rax
movq %rax, 104(%rsp)
leaq 20(%rsp), %rax
movq %rax, 112(%rsp)
leaq 16(%rsp), %rax
movq %rax, 120(%rsp)
leaq 12(%rsp), %rax
movq %rax, 128(%rsp)
leaq 8(%rsp), %rax
movq %rax, 136(%rsp)
leaq 192(%rsp), %rax
movq %rax, 144(%rsp)
movq %rsp, %rax
movq %rax, 152(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 168(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $184, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 200
pushq 40(%rsp)
.cfi_def_cfa_offset 208
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z13rovCorrectioniiiiiiiPf(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 192
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2051:
.size _Z40__device_stub__Z13rovCorrectioniiiiiiiPfiiiiiiiPf, .-_Z40__device_stub__Z13rovCorrectioniiiiiiiPfiiiiiiiPf
.globl _Z13rovCorrectioniiiiiiiPf
.type _Z13rovCorrectioniiiiiiiPf, @function
_Z13rovCorrectioniiiiiiiPf:
.LFB2052:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
pushq 24(%rsp)
.cfi_def_cfa_offset 24
movl 24(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 32
call _Z40__device_stub__Z13rovCorrectioniiiiiiiPfiiiiiiiPf
addq $24, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2052:
.size _Z13rovCorrectioniiiiiiiPf, .-_Z13rovCorrectioniiiiiiiPf
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "_Z13rovCorrectioniiiiiiiPf"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2054:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC0(%rip), %rdx
movq %rdx, %rcx
leaq _Z13rovCorrectioniiiiiiiPf(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2054:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "rovCorrection.hip"
.globl _Z28__device_stub__rovCorrectioniiiiiiiPf # -- Begin function _Z28__device_stub__rovCorrectioniiiiiiiPf
.p2align 4, 0x90
.type _Z28__device_stub__rovCorrectioniiiiiiiPf,@function
_Z28__device_stub__rovCorrectioniiiiiiiPf: # @_Z28__device_stub__rovCorrectioniiiiiiiPf
.cfi_startproc
# %bb.0:
subq $152, %rsp
.cfi_def_cfa_offset 160
movl %edi, 28(%rsp)
movl %esi, 24(%rsp)
movl %edx, 20(%rsp)
movl %ecx, 16(%rsp)
movl %r8d, 12(%rsp)
movl %r9d, 8(%rsp)
leaq 28(%rsp), %rax
movq %rax, 80(%rsp)
leaq 24(%rsp), %rax
movq %rax, 88(%rsp)
leaq 20(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 12(%rsp), %rax
movq %rax, 112(%rsp)
leaq 8(%rsp), %rax
movq %rax, 120(%rsp)
leaq 160(%rsp), %rax
movq %rax, 128(%rsp)
leaq 168(%rsp), %rax
movq %rax, 136(%rsp)
leaq 64(%rsp), %rdi
leaq 48(%rsp), %rsi
leaq 40(%rsp), %rdx
leaq 32(%rsp), %rcx
callq __hipPopCallConfiguration
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
movq 48(%rsp), %rcx
movl 56(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z13rovCorrectioniiiiiiiPf, %edi
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
pushq 48(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $168, %rsp
.cfi_adjust_cfa_offset -168
retq
.Lfunc_end0:
.size _Z28__device_stub__rovCorrectioniiiiiiiPf, .Lfunc_end0-_Z28__device_stub__rovCorrectioniiiiiiiPf
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB1_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB1_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z13rovCorrectioniiiiiiiPf, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end1:
.size __hip_module_ctor, .Lfunc_end1-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB2_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB2_2:
retq
.Lfunc_end2:
.size __hip_module_dtor, .Lfunc_end2-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z13rovCorrectioniiiiiiiPf,@object # @_Z13rovCorrectioniiiiiiiPf
.section .rodata,"a",@progbits
.globl _Z13rovCorrectioniiiiiiiPf
.p2align 3, 0x0
_Z13rovCorrectioniiiiiiiPf:
.quad _Z28__device_stub__rovCorrectioniiiiiiiPf
.size _Z13rovCorrectioniiiiiiiPf, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "_Z13rovCorrectioniiiiiiiPf"
.size .L__unnamed_1, 27
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z28__device_stub__rovCorrectioniiiiiiiPf
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z13rovCorrectioniiiiiiiPf
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly. | #include "includes.h"
__global__ void _gather(const float * input, const int * indices, float * output, const int n)
{
const int tid = threadIdx.x + blockDim.x * blockIdx.x;
if (tid < n)
output[tid] = input[(tid % 6) + 6 * indices[tid / 6]];
} | code for sm_80
Function : _Z7_gatherPKfPKiPfi
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ S2R R6, SR_TID.X ; /* 0x0000000000067919 */
/* 0x000e280000002100 */
/*0020*/ S2R R3, SR_CTAID.X ; /* 0x0000000000037919 */
/* 0x000e240000002500 */
/*0030*/ IMAD R6, R3, c[0x0][0x0], R6 ; /* 0x0000000003067a24 */
/* 0x001fca00078e0206 */
/*0040*/ ISETP.GE.AND P0, PT, R6, c[0x0][0x178], PT ; /* 0x00005e0006007a0c */
/* 0x000fda0003f06270 */
/*0050*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*0060*/ IMAD.HI R0, R6, 0x2aaaaaab, RZ ; /* 0x2aaaaaab06007827 */
/* 0x000fe200078e02ff */
/*0070*/ HFMA2.MMA R8, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff087435 */
/* 0x000fe200000001ff */
/*0080*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fc60000000a00 */
/*0090*/ LEA.HI R5, R0, R0, RZ, 0x1 ; /* 0x0000000000057211 */
/* 0x000fcc00078f08ff */
/*00a0*/ IMAD.WIDE R2, R5, R8, c[0x0][0x168] ; /* 0x00005a0005027625 */
/* 0x000fcc00078e0208 */
/*00b0*/ LDG.E R2, [R2.64] ; /* 0x0000000402027981 */
/* 0x000ea2000c1e1900 */
/*00c0*/ IMAD R5, R5, -0x6, R6 ; /* 0xfffffffa05057824 */
/* 0x000fc800078e0206 */
/*00d0*/ IMAD R5, R2, 0x6, R5 ; /* 0x0000000602057824 */
/* 0x004fc800078e0205 */
/*00e0*/ IMAD.WIDE R4, R5, R8, c[0x0][0x160] ; /* 0x0000580005047625 */
/* 0x000fcc00078e0208 */
/*00f0*/ LDG.E R5, [R4.64] ; /* 0x0000000404057981 */
/* 0x000ea2000c1e1900 */
/*0100*/ IMAD.WIDE R6, R6, R8, c[0x0][0x170] ; /* 0x00005c0006067625 */
/* 0x000fca00078e0208 */
/*0110*/ STG.E [R6.64], R5 ; /* 0x0000000506007986 */
/* 0x004fe2000c101904 */
/*0120*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0130*/ BRA 0x130; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0140*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0150*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0160*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0170*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0180*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0190*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | #include "includes.h"
__global__ void _gather(const float * input, const int * indices, float * output, const int n)
{
const int tid = threadIdx.x + blockDim.x * blockIdx.x;
if (tid < n)
output[tid] = input[(tid % 6) + 6 * indices[tid / 6]];
} | .file "tmpxft_0014152a_00000000-6__gather.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2029:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2029:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z33__device_stub__Z7_gatherPKfPKiPfiPKfPKiPfi
.type _Z33__device_stub__Z7_gatherPKfPKiPfiPKfPKiPfi, @function
_Z33__device_stub__Z7_gatherPKfPKiPfiPKfPKiPfi:
.LFB2051:
.cfi_startproc
endbr64
subq $152, %rsp
.cfi_def_cfa_offset 160
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movq %rdx, 8(%rsp)
movl %ecx, 4(%rsp)
movq %fs:40, %rax
movq %rax, 136(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 8(%rsp), %rax
movq %rax, 112(%rsp)
leaq 4(%rsp), %rax
movq %rax, 120(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 136(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $152, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 168
pushq 40(%rsp)
.cfi_def_cfa_offset 176
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z7_gatherPKfPKiPfi(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 160
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2051:
.size _Z33__device_stub__Z7_gatherPKfPKiPfiPKfPKiPfi, .-_Z33__device_stub__Z7_gatherPKfPKiPfiPKfPKiPfi
.globl _Z7_gatherPKfPKiPfi
.type _Z7_gatherPKfPKiPfi, @function
_Z7_gatherPKfPKiPfi:
.LFB2052:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z33__device_stub__Z7_gatherPKfPKiPfiPKfPKiPfi
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2052:
.size _Z7_gatherPKfPKiPfi, .-_Z7_gatherPKfPKiPfi
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "_Z7_gatherPKfPKiPfi"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2054:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC0(%rip), %rdx
movq %rdx, %rcx
leaq _Z7_gatherPKfPKiPfi(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2054:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | #include "includes.h"
__global__ void _gather(const float * input, const int * indices, float * output, const int n)
{
const int tid = threadIdx.x + blockDim.x * blockIdx.x;
if (tid < n)
output[tid] = input[(tid % 6) + 6 * indices[tid / 6]];
} | #include <hip/hip_runtime.h>
#include "includes.h"
__global__ void _gather(const float * input, const int * indices, float * output, const int n)
{
const int tid = threadIdx.x + blockDim.x * blockIdx.x;
if (tid < n)
output[tid] = input[(tid % 6) + 6 * indices[tid / 6]];
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | #include <hip/hip_runtime.h>
#include "includes.h"
__global__ void _gather(const float * input, const int * indices, float * output, const int n)
{
const int tid = threadIdx.x + blockDim.x * blockIdx.x;
if (tid < n)
output[tid] = input[(tid % 6) + 6 * indices[tid / 6]];
} | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z7_gatherPKfPKiPfi
.globl _Z7_gatherPKfPKiPfi
.p2align 8
.type _Z7_gatherPKfPKiPfi,@function
_Z7_gatherPKfPKiPfi:
s_clause 0x1
s_load_b32 s2, s[0:1], 0x2c
s_load_b32 s3, s[0:1], 0x18
s_waitcnt lgkmcnt(0)
s_and_b32 s2, s2, 0xffff
s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_mad_u64_u32 v[1:2], null, s15, s2, v[0:1]
s_mov_b32 s2, exec_lo
v_cmpx_gt_i32_e64 s3, v1
s_cbranch_execz .LBB0_2
v_mul_hi_i32 v0, v1, 0x2aaaaaab
s_clause 0x1
s_load_b128 s[4:7], s[0:1], 0x0
s_load_b64 s[0:1], s[0:1], 0x10
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_lshrrev_b32_e32 v2, 31, v0
v_add_nc_u32_e32 v2, v0, v2
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
v_ashrrev_i32_e32 v3, 31, v2
v_mul_lo_u32 v0, v2, 6
v_lshlrev_b64 v[3:4], 2, v[2:3]
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2)
v_sub_nc_u32_e32 v0, v1, v0
s_waitcnt lgkmcnt(0)
v_add_co_u32 v3, vcc_lo, s6, v3
s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_3) | instid1(VALU_DEP_1)
v_add_co_ci_u32_e32 v4, vcc_lo, s7, v4, vcc_lo
global_load_b32 v4, v[3:4], off
s_waitcnt vmcnt(0)
v_mad_u64_u32 v[2:3], null, v4, 6, v[0:1]
v_ashrrev_i32_e32 v3, 31, v2
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[2:3], 2, v[2:3]
v_add_co_u32 v2, vcc_lo, s4, v2
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_1)
v_add_co_ci_u32_e32 v3, vcc_lo, s5, v3, vcc_lo
global_load_b32 v3, v[2:3], off
v_ashrrev_i32_e32 v2, 31, v1
v_lshlrev_b64 v[0:1], 2, v[1:2]
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_co_u32 v0, vcc_lo, s0, v0
v_add_co_ci_u32_e32 v1, vcc_lo, s1, v1, vcc_lo
s_waitcnt vmcnt(0)
global_store_b32 v[0:1], v3, off
.LBB0_2:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z7_gatherPKfPKiPfi
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 288
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 5
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z7_gatherPKfPKiPfi, .Lfunc_end0-_Z7_gatherPKfPKiPfi
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
- .offset: 24
.size: 4
.value_kind: by_value
- .offset: 32
.size: 4
.value_kind: hidden_block_count_x
- .offset: 36
.size: 4
.value_kind: hidden_block_count_y
- .offset: 40
.size: 4
.value_kind: hidden_block_count_z
- .offset: 44
.size: 2
.value_kind: hidden_group_size_x
- .offset: 46
.size: 2
.value_kind: hidden_group_size_y
- .offset: 48
.size: 2
.value_kind: hidden_group_size_z
- .offset: 50
.size: 2
.value_kind: hidden_remainder_x
- .offset: 52
.size: 2
.value_kind: hidden_remainder_y
- .offset: 54
.size: 2
.value_kind: hidden_remainder_z
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 88
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 96
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 288
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z7_gatherPKfPKiPfi
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z7_gatherPKfPKiPfi.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 5
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | #include <hip/hip_runtime.h>
#include "includes.h"
__global__ void _gather(const float * input, const int * indices, float * output, const int n)
{
const int tid = threadIdx.x + blockDim.x * blockIdx.x;
if (tid < n)
output[tid] = input[(tid % 6) + 6 * indices[tid / 6]];
} | .text
.file "_gather.hip"
.globl _Z22__device_stub___gatherPKfPKiPfi # -- Begin function _Z22__device_stub___gatherPKfPKiPfi
.p2align 4, 0x90
.type _Z22__device_stub___gatherPKfPKiPfi,@function
_Z22__device_stub___gatherPKfPKiPfi: # @_Z22__device_stub___gatherPKfPKiPfi
.cfi_startproc
# %bb.0:
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movq %rdx, 56(%rsp)
movl %ecx, 4(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 56(%rsp), %rax
movq %rax, 96(%rsp)
leaq 4(%rsp), %rax
movq %rax, 104(%rsp)
leaq 40(%rsp), %rdi
leaq 24(%rsp), %rsi
leaq 16(%rsp), %rdx
leaq 8(%rsp), %rcx
callq __hipPopCallConfiguration
movq 40(%rsp), %rsi
movl 48(%rsp), %edx
movq 24(%rsp), %rcx
movl 32(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z7_gatherPKfPKiPfi, %edi
pushq 8(%rsp)
.cfi_adjust_cfa_offset 8
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $136, %rsp
.cfi_adjust_cfa_offset -136
retq
.Lfunc_end0:
.size _Z22__device_stub___gatherPKfPKiPfi, .Lfunc_end0-_Z22__device_stub___gatherPKfPKiPfi
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB1_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB1_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z7_gatherPKfPKiPfi, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end1:
.size __hip_module_ctor, .Lfunc_end1-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB2_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB2_2:
retq
.Lfunc_end2:
.size __hip_module_dtor, .Lfunc_end2-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z7_gatherPKfPKiPfi,@object # @_Z7_gatherPKfPKiPfi
.section .rodata,"a",@progbits
.globl _Z7_gatherPKfPKiPfi
.p2align 3, 0x0
_Z7_gatherPKfPKiPfi:
.quad _Z22__device_stub___gatherPKfPKiPfi
.size _Z7_gatherPKfPKiPfi, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "_Z7_gatherPKfPKiPfi"
.size .L__unnamed_1, 20
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z22__device_stub___gatherPKfPKiPfi
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z7_gatherPKfPKiPfi
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly. | code for sm_80
Function : _Z7_gatherPKfPKiPfi
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ S2R R6, SR_TID.X ; /* 0x0000000000067919 */
/* 0x000e280000002100 */
/*0020*/ S2R R3, SR_CTAID.X ; /* 0x0000000000037919 */
/* 0x000e240000002500 */
/*0030*/ IMAD R6, R3, c[0x0][0x0], R6 ; /* 0x0000000003067a24 */
/* 0x001fca00078e0206 */
/*0040*/ ISETP.GE.AND P0, PT, R6, c[0x0][0x178], PT ; /* 0x00005e0006007a0c */
/* 0x000fda0003f06270 */
/*0050*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*0060*/ IMAD.HI R0, R6, 0x2aaaaaab, RZ ; /* 0x2aaaaaab06007827 */
/* 0x000fe200078e02ff */
/*0070*/ HFMA2.MMA R8, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff087435 */
/* 0x000fe200000001ff */
/*0080*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fc60000000a00 */
/*0090*/ LEA.HI R5, R0, R0, RZ, 0x1 ; /* 0x0000000000057211 */
/* 0x000fcc00078f08ff */
/*00a0*/ IMAD.WIDE R2, R5, R8, c[0x0][0x168] ; /* 0x00005a0005027625 */
/* 0x000fcc00078e0208 */
/*00b0*/ LDG.E R2, [R2.64] ; /* 0x0000000402027981 */
/* 0x000ea2000c1e1900 */
/*00c0*/ IMAD R5, R5, -0x6, R6 ; /* 0xfffffffa05057824 */
/* 0x000fc800078e0206 */
/*00d0*/ IMAD R5, R2, 0x6, R5 ; /* 0x0000000602057824 */
/* 0x004fc800078e0205 */
/*00e0*/ IMAD.WIDE R4, R5, R8, c[0x0][0x160] ; /* 0x0000580005047625 */
/* 0x000fcc00078e0208 */
/*00f0*/ LDG.E R5, [R4.64] ; /* 0x0000000404057981 */
/* 0x000ea2000c1e1900 */
/*0100*/ IMAD.WIDE R6, R6, R8, c[0x0][0x170] ; /* 0x00005c0006067625 */
/* 0x000fca00078e0208 */
/*0110*/ STG.E [R6.64], R5 ; /* 0x0000000506007986 */
/* 0x004fe2000c101904 */
/*0120*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0130*/ BRA 0x130; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0140*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0150*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0160*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0170*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0180*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0190*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z7_gatherPKfPKiPfi
.globl _Z7_gatherPKfPKiPfi
.p2align 8
.type _Z7_gatherPKfPKiPfi,@function
_Z7_gatherPKfPKiPfi:
s_clause 0x1
s_load_b32 s2, s[0:1], 0x2c
s_load_b32 s3, s[0:1], 0x18
s_waitcnt lgkmcnt(0)
s_and_b32 s2, s2, 0xffff
s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_mad_u64_u32 v[1:2], null, s15, s2, v[0:1]
s_mov_b32 s2, exec_lo
v_cmpx_gt_i32_e64 s3, v1
s_cbranch_execz .LBB0_2
v_mul_hi_i32 v0, v1, 0x2aaaaaab
s_clause 0x1
s_load_b128 s[4:7], s[0:1], 0x0
s_load_b64 s[0:1], s[0:1], 0x10
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_lshrrev_b32_e32 v2, 31, v0
v_add_nc_u32_e32 v2, v0, v2
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
v_ashrrev_i32_e32 v3, 31, v2
v_mul_lo_u32 v0, v2, 6
v_lshlrev_b64 v[3:4], 2, v[2:3]
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2)
v_sub_nc_u32_e32 v0, v1, v0
s_waitcnt lgkmcnt(0)
v_add_co_u32 v3, vcc_lo, s6, v3
s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_3) | instid1(VALU_DEP_1)
v_add_co_ci_u32_e32 v4, vcc_lo, s7, v4, vcc_lo
global_load_b32 v4, v[3:4], off
s_waitcnt vmcnt(0)
v_mad_u64_u32 v[2:3], null, v4, 6, v[0:1]
v_ashrrev_i32_e32 v3, 31, v2
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[2:3], 2, v[2:3]
v_add_co_u32 v2, vcc_lo, s4, v2
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_1)
v_add_co_ci_u32_e32 v3, vcc_lo, s5, v3, vcc_lo
global_load_b32 v3, v[2:3], off
v_ashrrev_i32_e32 v2, 31, v1
v_lshlrev_b64 v[0:1], 2, v[1:2]
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_co_u32 v0, vcc_lo, s0, v0
v_add_co_ci_u32_e32 v1, vcc_lo, s1, v1, vcc_lo
s_waitcnt vmcnt(0)
global_store_b32 v[0:1], v3, off
.LBB0_2:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z7_gatherPKfPKiPfi
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 288
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 5
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z7_gatherPKfPKiPfi, .Lfunc_end0-_Z7_gatherPKfPKiPfi
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
- .offset: 24
.size: 4
.value_kind: by_value
- .offset: 32
.size: 4
.value_kind: hidden_block_count_x
- .offset: 36
.size: 4
.value_kind: hidden_block_count_y
- .offset: 40
.size: 4
.value_kind: hidden_block_count_z
- .offset: 44
.size: 2
.value_kind: hidden_group_size_x
- .offset: 46
.size: 2
.value_kind: hidden_group_size_y
- .offset: 48
.size: 2
.value_kind: hidden_group_size_z
- .offset: 50
.size: 2
.value_kind: hidden_remainder_x
- .offset: 52
.size: 2
.value_kind: hidden_remainder_y
- .offset: 54
.size: 2
.value_kind: hidden_remainder_z
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 88
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 96
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 288
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z7_gatherPKfPKiPfi
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z7_gatherPKfPKiPfi.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 5
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_0014152a_00000000-6__gather.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2029:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2029:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z33__device_stub__Z7_gatherPKfPKiPfiPKfPKiPfi
.type _Z33__device_stub__Z7_gatherPKfPKiPfiPKfPKiPfi, @function
_Z33__device_stub__Z7_gatherPKfPKiPfiPKfPKiPfi:
.LFB2051:
.cfi_startproc
endbr64
subq $152, %rsp
.cfi_def_cfa_offset 160
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movq %rdx, 8(%rsp)
movl %ecx, 4(%rsp)
movq %fs:40, %rax
movq %rax, 136(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 8(%rsp), %rax
movq %rax, 112(%rsp)
leaq 4(%rsp), %rax
movq %rax, 120(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 136(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $152, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 168
pushq 40(%rsp)
.cfi_def_cfa_offset 176
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z7_gatherPKfPKiPfi(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 160
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2051:
.size _Z33__device_stub__Z7_gatherPKfPKiPfiPKfPKiPfi, .-_Z33__device_stub__Z7_gatherPKfPKiPfiPKfPKiPfi
.globl _Z7_gatherPKfPKiPfi
.type _Z7_gatherPKfPKiPfi, @function
_Z7_gatherPKfPKiPfi:
.LFB2052:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z33__device_stub__Z7_gatherPKfPKiPfiPKfPKiPfi
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2052:
.size _Z7_gatherPKfPKiPfi, .-_Z7_gatherPKfPKiPfi
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "_Z7_gatherPKfPKiPfi"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2054:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC0(%rip), %rdx
movq %rdx, %rcx
leaq _Z7_gatherPKfPKiPfi(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2054:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "_gather.hip"
.globl _Z22__device_stub___gatherPKfPKiPfi # -- Begin function _Z22__device_stub___gatherPKfPKiPfi
.p2align 4, 0x90
.type _Z22__device_stub___gatherPKfPKiPfi,@function
_Z22__device_stub___gatherPKfPKiPfi: # @_Z22__device_stub___gatherPKfPKiPfi
.cfi_startproc
# %bb.0:
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movq %rdx, 56(%rsp)
movl %ecx, 4(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 56(%rsp), %rax
movq %rax, 96(%rsp)
leaq 4(%rsp), %rax
movq %rax, 104(%rsp)
leaq 40(%rsp), %rdi
leaq 24(%rsp), %rsi
leaq 16(%rsp), %rdx
leaq 8(%rsp), %rcx
callq __hipPopCallConfiguration
movq 40(%rsp), %rsi
movl 48(%rsp), %edx
movq 24(%rsp), %rcx
movl 32(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z7_gatherPKfPKiPfi, %edi
pushq 8(%rsp)
.cfi_adjust_cfa_offset 8
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $136, %rsp
.cfi_adjust_cfa_offset -136
retq
.Lfunc_end0:
.size _Z22__device_stub___gatherPKfPKiPfi, .Lfunc_end0-_Z22__device_stub___gatherPKfPKiPfi
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB1_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB1_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z7_gatherPKfPKiPfi, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end1:
.size __hip_module_ctor, .Lfunc_end1-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB2_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB2_2:
retq
.Lfunc_end2:
.size __hip_module_dtor, .Lfunc_end2-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z7_gatherPKfPKiPfi,@object # @_Z7_gatherPKfPKiPfi
.section .rodata,"a",@progbits
.globl _Z7_gatherPKfPKiPfi
.p2align 3, 0x0
_Z7_gatherPKfPKiPfi:
.quad _Z22__device_stub___gatherPKfPKiPfi
.size _Z7_gatherPKfPKiPfi, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "_Z7_gatherPKfPKiPfi"
.size .L__unnamed_1, 20
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z22__device_stub___gatherPKfPKiPfi
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z7_gatherPKfPKiPfi
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly. | #include<stdio.h>
__global__ void kernel (float *out)
{
// shared memory
// the size is determined by the host application
extern __shared__ float sdata[];
// access thread id
const unsigned int tid = threadIdx.x;
// access number of threads in this block
const unsigned int num_threads = blockDim.x;
sdata[tid] = 0.0;
__syncthreads();
//extern __shared__ float device_memory[];
// Compute the index variable
int idx = blockIdx.x*blockDim.x + threadIdx.x;
//device_memory[threadIdx.x] += blockDim.x;
sdata[tid] += blockDim.x;
//device_memory[threadIdx.x] += threadIdx.x;
// Do I need this to get the output?
// Maybe not
//__syncthreads();
//out[threadIdx.x] = blockIdx.x;
out[tid] = sdata[tid];
}
int main()
{
int nbins = 16;
int dimx = 16;
int num_bytes = dimx*sizeof(float);
float *d_a=0, *h_a=0; // device and host pointers
// Allocate memory on host (CPU)
h_a = (float*)malloc(num_bytes);
// Allocate memory on device (GPU)
cudaMalloc((void**)&d_a,num_bytes);
// Check to see that there was enough memory for both
// allocations.
// If the memory allocation fails, it doesn't change the
// pointer value. That is why we set them to be 0 at declaration,
// and then see if they have changed or stayed the same.
if (0==h_a || 0==d_a)
{
printf("couldn't allocate memory\n");
return 1;
}
// Initialize array to all 0's
cudaMemset(d_a,0,num_bytes);
//-----------------------------------------------------------------------//
// Some explanatory code
/*
// This will give us 256 thread blocks, arranged in a 16x16 grid.
dim3 grid(16,16);
// This will give us 256 threads/block, arranged in a 16x16 grid.
dim3 block(16,16);
kernel<<<grid,block,0,0>>>(XXX);
// This is a shortcut for launching some thread blocks.
// It launches a grid of 32 thread blocks arranged in a 1x32 grid
// and 512 threads per block, arranged in a 1x512 array.
kernel<<<32,512>>>(YYY);
*/
//dim3 grid,block;
//block.x = 8;
//grid.x = dimx/block.x;
//kernel<<<grid,block>>>(d_a);
//kernel<<<4,16>>>(d_a);
dim3 grid(16,16);
dim3 block(16,16);
kernel<<<grid,block,0,0>>>(d_a);
cudaThreadSynchronize();
// Copy it back over
cudaMemcpy(h_a,d_a,num_bytes,cudaMemcpyDeviceToHost);
for (int i=0;i<dimx;i++)
{
printf("%f ",h_a[i]);
}
printf("\n");
free(h_a);
cudaFree(d_a);
return 0;
} | code for sm_80
Function : _Z6kernelPf
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ S2R R4, SR_TID.X ; /* 0x0000000000047919 */
/* 0x000e220000002100 */
/*0020*/ I2F.U32 R0, c[0x0][0x0] ; /* 0x0000000000007b06 */
/* 0x000e620000201000 */
/*0030*/ HFMA2.MMA R3, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff037435 */
/* 0x000fe200000001ff */
/*0040*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fe20000000a00 */
/*0050*/ STS [R4.X4], RZ ; /* 0x000000ff04007388 */
/* 0x001ff00000004800 */
/*0060*/ IMAD.WIDE.U32 R2, R4, R3, c[0x0][0x160] ; /* 0x0000580004027625 */
/* 0x000fe200078e0003 */
/*0070*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */
/* 0x000fec0000010000 */
/*0080*/ LDS R5, [R4.X4] ; /* 0x0000000004057984 */
/* 0x000e640000004800 */
/*0090*/ FADD R5, R0, R5 ; /* 0x0000000500057221 */
/* 0x002fca0000000000 */
/*00a0*/ STG.E [R2.64], R5 ; /* 0x0000000502007986 */
/* 0x000fe8000c101904 */
/*00b0*/ STS [R4.X4], R5 ; /* 0x0000000504007388 */
/* 0x000fe20000004800 */
/*00c0*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*00d0*/ BRA 0xd0; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*00e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0100*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0110*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0120*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0130*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0140*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0150*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0160*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0170*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | #include<stdio.h>
__global__ void kernel (float *out)
{
// shared memory
// the size is determined by the host application
extern __shared__ float sdata[];
// access thread id
const unsigned int tid = threadIdx.x;
// access number of threads in this block
const unsigned int num_threads = blockDim.x;
sdata[tid] = 0.0;
__syncthreads();
//extern __shared__ float device_memory[];
// Compute the index variable
int idx = blockIdx.x*blockDim.x + threadIdx.x;
//device_memory[threadIdx.x] += blockDim.x;
sdata[tid] += blockDim.x;
//device_memory[threadIdx.x] += threadIdx.x;
// Do I need this to get the output?
// Maybe not
//__syncthreads();
//out[threadIdx.x] = blockIdx.x;
out[tid] = sdata[tid];
}
int main()
{
int nbins = 16;
int dimx = 16;
int num_bytes = dimx*sizeof(float);
float *d_a=0, *h_a=0; // device and host pointers
// Allocate memory on host (CPU)
h_a = (float*)malloc(num_bytes);
// Allocate memory on device (GPU)
cudaMalloc((void**)&d_a,num_bytes);
// Check to see that there was enough memory for both
// allocations.
// If the memory allocation fails, it doesn't change the
// pointer value. That is why we set them to be 0 at declaration,
// and then see if they have changed or stayed the same.
if (0==h_a || 0==d_a)
{
printf("couldn't allocate memory\n");
return 1;
}
// Initialize array to all 0's
cudaMemset(d_a,0,num_bytes);
//-----------------------------------------------------------------------//
// Some explanatory code
/*
// This will give us 256 thread blocks, arranged in a 16x16 grid.
dim3 grid(16,16);
// This will give us 256 threads/block, arranged in a 16x16 grid.
dim3 block(16,16);
kernel<<<grid,block,0,0>>>(XXX);
// This is a shortcut for launching some thread blocks.
// It launches a grid of 32 thread blocks arranged in a 1x32 grid
// and 512 threads per block, arranged in a 1x512 array.
kernel<<<32,512>>>(YYY);
*/
//dim3 grid,block;
//block.x = 8;
//grid.x = dimx/block.x;
//kernel<<<grid,block>>>(d_a);
//kernel<<<4,16>>>(d_a);
dim3 grid(16,16);
dim3 block(16,16);
kernel<<<grid,block,0,0>>>(d_a);
cudaThreadSynchronize();
// Copy it back over
cudaMemcpy(h_a,d_a,num_bytes,cudaMemcpyDeviceToHost);
for (int i=0;i<dimx;i++)
{
printf("%f ",h_a[i]);
}
printf("\n");
free(h_a);
cudaFree(d_a);
return 0;
} | .file "tmpxft_0007fb70_00000000-6_hello_world_shared_0.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2060:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2060:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z25__device_stub__Z6kernelPfPf
.type _Z25__device_stub__Z6kernelPfPf, @function
_Z25__device_stub__Z6kernelPfPf:
.LFB2082:
.cfi_startproc
endbr64
subq $104, %rsp
.cfi_def_cfa_offset 112
movq %rdi, 8(%rsp)
movq %fs:40, %rax
movq %rax, 88(%rsp)
xorl %eax, %eax
leaq 8(%rsp), %rax
movq %rax, 80(%rsp)
movl $1, 32(%rsp)
movl $1, 36(%rsp)
movl $1, 40(%rsp)
movl $1, 44(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
leaq 24(%rsp), %rcx
leaq 16(%rsp), %rdx
leaq 44(%rsp), %rsi
leaq 32(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 88(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $104, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 24(%rsp)
.cfi_def_cfa_offset 120
pushq 24(%rsp)
.cfi_def_cfa_offset 128
leaq 96(%rsp), %r9
movq 60(%rsp), %rcx
movl 68(%rsp), %r8d
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
leaq _Z6kernelPf(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 112
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2082:
.size _Z25__device_stub__Z6kernelPfPf, .-_Z25__device_stub__Z6kernelPfPf
.globl _Z6kernelPf
.type _Z6kernelPf, @function
_Z6kernelPf:
.LFB2083:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z25__device_stub__Z6kernelPfPf
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2083:
.size _Z6kernelPf, .-_Z6kernelPf
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "couldn't allocate memory\n"
.LC1:
.string "%f "
.LC2:
.string "\n"
.text
.globl main
.type main, @function
main:
.LFB2057:
.cfi_startproc
endbr64
pushq %r13
.cfi_def_cfa_offset 16
.cfi_offset 13, -16
pushq %r12
.cfi_def_cfa_offset 24
.cfi_offset 12, -24
pushq %rbp
.cfi_def_cfa_offset 32
.cfi_offset 6, -32
pushq %rbx
.cfi_def_cfa_offset 40
.cfi_offset 3, -40
subq $56, %rsp
.cfi_def_cfa_offset 96
movq %fs:40, %rax
movq %rax, 40(%rsp)
xorl %eax, %eax
movq $0, 8(%rsp)
movl $64, %edi
call malloc@PLT
movq %rax, %r13
leaq 8(%rsp), %rdi
movl $64, %esi
call cudaMalloc@PLT
testq %r13, %r13
je .L12
movq 8(%rsp), %rdi
testq %rdi, %rdi
je .L12
movl $64, %edx
movl $0, %esi
call cudaMemset@PLT
movl $16, 16(%rsp)
movl $16, 20(%rsp)
movl $16, 28(%rsp)
movl $16, 32(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 28(%rsp), %rdx
movl $1, %ecx
movq 16(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L20
.L15:
call cudaThreadSynchronize@PLT
movl $2, %ecx
movl $64, %edx
movq 8(%rsp), %rsi
movq %r13, %rdi
call cudaMemcpy@PLT
movq %r13, %rbx
leaq 64(%r13), %r12
leaq .LC1(%rip), %rbp
.L16:
pxor %xmm0, %xmm0
cvtss2sd (%rbx), %xmm0
movq %rbp, %rsi
movl $2, %edi
movl $1, %eax
call __printf_chk@PLT
addq $4, %rbx
cmpq %r12, %rbx
jne .L16
leaq .LC2(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movq %r13, %rdi
call free@PLT
movq 8(%rsp), %rdi
call cudaFree@PLT
movl $0, %eax
.L11:
movq 40(%rsp), %rdx
subq %fs:40, %rdx
jne .L21
addq $56, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 40
popq %rbx
.cfi_def_cfa_offset 32
popq %rbp
.cfi_def_cfa_offset 24
popq %r12
.cfi_def_cfa_offset 16
popq %r13
.cfi_def_cfa_offset 8
ret
.L12:
.cfi_restore_state
leaq .LC0(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $1, %eax
jmp .L11
.L20:
movq 8(%rsp), %rdi
call _Z25__device_stub__Z6kernelPfPf
jmp .L15
.L21:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2057:
.size main, .-main
.section .rodata.str1.1
.LC3:
.string "_Z6kernelPf"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2085:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC3(%rip), %rdx
movq %rdx, %rcx
leaq _Z6kernelPf(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2085:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | #include<stdio.h>
__global__ void kernel (float *out)
{
// shared memory
// the size is determined by the host application
extern __shared__ float sdata[];
// access thread id
const unsigned int tid = threadIdx.x;
// access number of threads in this block
const unsigned int num_threads = blockDim.x;
sdata[tid] = 0.0;
__syncthreads();
//extern __shared__ float device_memory[];
// Compute the index variable
int idx = blockIdx.x*blockDim.x + threadIdx.x;
//device_memory[threadIdx.x] += blockDim.x;
sdata[tid] += blockDim.x;
//device_memory[threadIdx.x] += threadIdx.x;
// Do I need this to get the output?
// Maybe not
//__syncthreads();
//out[threadIdx.x] = blockIdx.x;
out[tid] = sdata[tid];
}
int main()
{
int nbins = 16;
int dimx = 16;
int num_bytes = dimx*sizeof(float);
float *d_a=0, *h_a=0; // device and host pointers
// Allocate memory on host (CPU)
h_a = (float*)malloc(num_bytes);
// Allocate memory on device (GPU)
cudaMalloc((void**)&d_a,num_bytes);
// Check to see that there was enough memory for both
// allocations.
// If the memory allocation fails, it doesn't change the
// pointer value. That is why we set them to be 0 at declaration,
// and then see if they have changed or stayed the same.
if (0==h_a || 0==d_a)
{
printf("couldn't allocate memory\n");
return 1;
}
// Initialize array to all 0's
cudaMemset(d_a,0,num_bytes);
//-----------------------------------------------------------------------//
// Some explanatory code
/*
// This will give us 256 thread blocks, arranged in a 16x16 grid.
dim3 grid(16,16);
// This will give us 256 threads/block, arranged in a 16x16 grid.
dim3 block(16,16);
kernel<<<grid,block,0,0>>>(XXX);
// This is a shortcut for launching some thread blocks.
// It launches a grid of 32 thread blocks arranged in a 1x32 grid
// and 512 threads per block, arranged in a 1x512 array.
kernel<<<32,512>>>(YYY);
*/
//dim3 grid,block;
//block.x = 8;
//grid.x = dimx/block.x;
//kernel<<<grid,block>>>(d_a);
//kernel<<<4,16>>>(d_a);
dim3 grid(16,16);
dim3 block(16,16);
kernel<<<grid,block,0,0>>>(d_a);
cudaThreadSynchronize();
// Copy it back over
cudaMemcpy(h_a,d_a,num_bytes,cudaMemcpyDeviceToHost);
for (int i=0;i<dimx;i++)
{
printf("%f ",h_a[i]);
}
printf("\n");
free(h_a);
cudaFree(d_a);
return 0;
} | #include <hip/hip_runtime.h>
#include<stdio.h>
__global__ void kernel (float *out)
{
// shared memory
// the size is determined by the host application
extern __shared__ float sdata[];
// access thread id
const unsigned int tid = threadIdx.x;
// access number of threads in this block
const unsigned int num_threads = blockDim.x;
sdata[tid] = 0.0;
__syncthreads();
//extern __shared__ float device_memory[];
// Compute the index variable
int idx = blockIdx.x*blockDim.x + threadIdx.x;
//device_memory[threadIdx.x] += blockDim.x;
sdata[tid] += blockDim.x;
//device_memory[threadIdx.x] += threadIdx.x;
// Do I need this to get the output?
// Maybe not
//__syncthreads();
//out[threadIdx.x] = blockIdx.x;
out[tid] = sdata[tid];
}
int main()
{
int nbins = 16;
int dimx = 16;
int num_bytes = dimx*sizeof(float);
float *d_a=0, *h_a=0; // device and host pointers
// Allocate memory on host (CPU)
h_a = (float*)malloc(num_bytes);
// Allocate memory on device (GPU)
hipMalloc((void**)&d_a,num_bytes);
// Check to see that there was enough memory for both
// allocations.
// If the memory allocation fails, it doesn't change the
// pointer value. That is why we set them to be 0 at declaration,
// and then see if they have changed or stayed the same.
if (0==h_a || 0==d_a)
{
printf("couldn't allocate memory\n");
return 1;
}
// Initialize array to all 0's
hipMemset(d_a,0,num_bytes);
//-----------------------------------------------------------------------//
// Some explanatory code
/*
// This will give us 256 thread blocks, arranged in a 16x16 grid.
dim3 grid(16,16);
// This will give us 256 threads/block, arranged in a 16x16 grid.
dim3 block(16,16);
kernel<<<grid,block,0,0>>>(XXX);
// This is a shortcut for launching some thread blocks.
// It launches a grid of 32 thread blocks arranged in a 1x32 grid
// and 512 threads per block, arranged in a 1x512 array.
kernel<<<32,512>>>(YYY);
*/
//dim3 grid,block;
//block.x = 8;
//grid.x = dimx/block.x;
//kernel<<<grid,block>>>(d_a);
//kernel<<<4,16>>>(d_a);
dim3 grid(16,16);
dim3 block(16,16);
kernel<<<grid,block,0,0>>>(d_a);
hipDeviceSynchronize();
// Copy it back over
hipMemcpy(h_a,d_a,num_bytes,hipMemcpyDeviceToHost);
for (int i=0;i<dimx;i++)
{
printf("%f ",h_a[i]);
}
printf("\n");
free(h_a);
hipFree(d_a);
return 0;
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | #include <hip/hip_runtime.h>
#include<stdio.h>
__global__ void kernel (float *out)
{
// shared memory
// the size is determined by the host application
extern __shared__ float sdata[];
// access thread id
const unsigned int tid = threadIdx.x;
// access number of threads in this block
const unsigned int num_threads = blockDim.x;
sdata[tid] = 0.0;
__syncthreads();
//extern __shared__ float device_memory[];
// Compute the index variable
int idx = blockIdx.x*blockDim.x + threadIdx.x;
//device_memory[threadIdx.x] += blockDim.x;
sdata[tid] += blockDim.x;
//device_memory[threadIdx.x] += threadIdx.x;
// Do I need this to get the output?
// Maybe not
//__syncthreads();
//out[threadIdx.x] = blockIdx.x;
out[tid] = sdata[tid];
}
int main()
{
int nbins = 16;
int dimx = 16;
int num_bytes = dimx*sizeof(float);
float *d_a=0, *h_a=0; // device and host pointers
// Allocate memory on host (CPU)
h_a = (float*)malloc(num_bytes);
// Allocate memory on device (GPU)
hipMalloc((void**)&d_a,num_bytes);
// Check to see that there was enough memory for both
// allocations.
// If the memory allocation fails, it doesn't change the
// pointer value. That is why we set them to be 0 at declaration,
// and then see if they have changed or stayed the same.
if (0==h_a || 0==d_a)
{
printf("couldn't allocate memory\n");
return 1;
}
// Initialize array to all 0's
hipMemset(d_a,0,num_bytes);
//-----------------------------------------------------------------------//
// Some explanatory code
/*
// This will give us 256 thread blocks, arranged in a 16x16 grid.
dim3 grid(16,16);
// This will give us 256 threads/block, arranged in a 16x16 grid.
dim3 block(16,16);
kernel<<<grid,block,0,0>>>(XXX);
// This is a shortcut for launching some thread blocks.
// It launches a grid of 32 thread blocks arranged in a 1x32 grid
// and 512 threads per block, arranged in a 1x512 array.
kernel<<<32,512>>>(YYY);
*/
//dim3 grid,block;
//block.x = 8;
//grid.x = dimx/block.x;
//kernel<<<grid,block>>>(d_a);
//kernel<<<4,16>>>(d_a);
dim3 grid(16,16);
dim3 block(16,16);
kernel<<<grid,block,0,0>>>(d_a);
hipDeviceSynchronize();
// Copy it back over
hipMemcpy(h_a,d_a,num_bytes,hipMemcpyDeviceToHost);
for (int i=0;i<dimx;i++)
{
printf("%f ",h_a[i]);
}
printf("\n");
free(h_a);
hipFree(d_a);
return 0;
} | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z6kernelPf
.globl _Z6kernelPf
.p2align 8
.type _Z6kernelPf,@function
_Z6kernelPf:
s_load_b32 s2, s[0:1], 0x14
v_dual_mov_b32 v1, 0 :: v_dual_lshlrev_b32 v0, 2, v0
s_load_b64 s[0:1], s[0:1], 0x0
s_waitcnt lgkmcnt(0)
s_and_b32 s2, s2, 0xffff
s_delay_alu instid0(SALU_CYCLE_1)
v_cvt_f32_u32_e32 v3, s2
v_add_nc_u32_e32 v2, 0, v0
ds_store_b32 v2, v1
s_waitcnt lgkmcnt(0)
s_barrier
buffer_gl0_inv
ds_load_b32 v1, v2
s_waitcnt lgkmcnt(0)
v_add_f32_e32 v1, v1, v3
ds_store_b32 v2, v1
global_store_b32 v0, v1, s[0:1]
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z6kernelPf
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 264
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 4
.amdhsa_next_free_sgpr 3
.amdhsa_reserve_vcc 0
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z6kernelPf, .Lfunc_end0-_Z6kernelPf
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .offset: 8
.size: 4
.value_kind: hidden_block_count_x
- .offset: 12
.size: 4
.value_kind: hidden_block_count_y
- .offset: 16
.size: 4
.value_kind: hidden_block_count_z
- .offset: 20
.size: 2
.value_kind: hidden_group_size_x
- .offset: 22
.size: 2
.value_kind: hidden_group_size_y
- .offset: 24
.size: 2
.value_kind: hidden_group_size_z
- .offset: 26
.size: 2
.value_kind: hidden_remainder_x
- .offset: 28
.size: 2
.value_kind: hidden_remainder_y
- .offset: 30
.size: 2
.value_kind: hidden_remainder_z
- .offset: 48
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 56
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 64
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 72
.size: 2
.value_kind: hidden_grid_dims
- .offset: 128
.size: 4
.value_kind: hidden_dynamic_lds_size
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 264
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z6kernelPf
.private_segment_fixed_size: 0
.sgpr_count: 3
.sgpr_spill_count: 0
.symbol: _Z6kernelPf.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 4
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | #include <hip/hip_runtime.h>
#include<stdio.h>
__global__ void kernel (float *out)
{
// shared memory
// the size is determined by the host application
extern __shared__ float sdata[];
// access thread id
const unsigned int tid = threadIdx.x;
// access number of threads in this block
const unsigned int num_threads = blockDim.x;
sdata[tid] = 0.0;
__syncthreads();
//extern __shared__ float device_memory[];
// Compute the index variable
int idx = blockIdx.x*blockDim.x + threadIdx.x;
//device_memory[threadIdx.x] += blockDim.x;
sdata[tid] += blockDim.x;
//device_memory[threadIdx.x] += threadIdx.x;
// Do I need this to get the output?
// Maybe not
//__syncthreads();
//out[threadIdx.x] = blockIdx.x;
out[tid] = sdata[tid];
}
int main()
{
int nbins = 16;
int dimx = 16;
int num_bytes = dimx*sizeof(float);
float *d_a=0, *h_a=0; // device and host pointers
// Allocate memory on host (CPU)
h_a = (float*)malloc(num_bytes);
// Allocate memory on device (GPU)
hipMalloc((void**)&d_a,num_bytes);
// Check to see that there was enough memory for both
// allocations.
// If the memory allocation fails, it doesn't change the
// pointer value. That is why we set them to be 0 at declaration,
// and then see if they have changed or stayed the same.
if (0==h_a || 0==d_a)
{
printf("couldn't allocate memory\n");
return 1;
}
// Initialize array to all 0's
hipMemset(d_a,0,num_bytes);
//-----------------------------------------------------------------------//
// Some explanatory code
/*
// This will give us 256 thread blocks, arranged in a 16x16 grid.
dim3 grid(16,16);
// This will give us 256 threads/block, arranged in a 16x16 grid.
dim3 block(16,16);
kernel<<<grid,block,0,0>>>(XXX);
// This is a shortcut for launching some thread blocks.
// It launches a grid of 32 thread blocks arranged in a 1x32 grid
// and 512 threads per block, arranged in a 1x512 array.
kernel<<<32,512>>>(YYY);
*/
//dim3 grid,block;
//block.x = 8;
//grid.x = dimx/block.x;
//kernel<<<grid,block>>>(d_a);
//kernel<<<4,16>>>(d_a);
dim3 grid(16,16);
dim3 block(16,16);
kernel<<<grid,block,0,0>>>(d_a);
hipDeviceSynchronize();
// Copy it back over
hipMemcpy(h_a,d_a,num_bytes,hipMemcpyDeviceToHost);
for (int i=0;i<dimx;i++)
{
printf("%f ",h_a[i]);
}
printf("\n");
free(h_a);
hipFree(d_a);
return 0;
} | .text
.file "hello_world_shared_0.hip"
.globl _Z21__device_stub__kernelPf # -- Begin function _Z21__device_stub__kernelPf
.p2align 4, 0x90
.type _Z21__device_stub__kernelPf,@function
_Z21__device_stub__kernelPf: # @_Z21__device_stub__kernelPf
.cfi_startproc
# %bb.0:
subq $72, %rsp
.cfi_def_cfa_offset 80
movq %rdi, 64(%rsp)
leaq 64(%rsp), %rax
movq %rax, (%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
movq %rsp, %r9
movl $_Z6kernelPf, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $88, %rsp
.cfi_adjust_cfa_offset -88
retq
.Lfunc_end0:
.size _Z21__device_stub__kernelPf, .Lfunc_end0-_Z21__device_stub__kernelPf
.cfi_endproc
# -- End function
.globl main # -- Begin function main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
pushq %r14
.cfi_def_cfa_offset 16
pushq %rbx
.cfi_def_cfa_offset 24
subq $88, %rsp
.cfi_def_cfa_offset 112
.cfi_offset %rbx, -24
.cfi_offset %r14, -16
movq $0, 8(%rsp)
movl $64, %edi
callq malloc
movq %rax, %rbx
leaq 8(%rsp), %rdi
movl $64, %esi
callq hipMalloc
testq %rbx, %rbx
je .LBB1_2
# %bb.1:
movq 8(%rsp), %rdi
testq %rdi, %rdi
je .LBB1_2
# %bb.3:
movl $64, %edx
xorl %esi, %esi
callq hipMemset
movabsq $68719476752, %rdi # imm = 0x1000000010
movl $1, %esi
movq %rdi, %rdx
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB1_5
# %bb.4:
movq 8(%rsp), %rax
movq %rax, 80(%rsp)
leaq 80(%rsp), %rax
movq %rax, 16(%rsp)
leaq 64(%rsp), %rdi
leaq 48(%rsp), %rsi
leaq 40(%rsp), %rdx
leaq 32(%rsp), %rcx
callq __hipPopCallConfiguration
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
movq 48(%rsp), %rcx
movl 56(%rsp), %r8d
leaq 16(%rsp), %r9
movl $_Z6kernelPf, %edi
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
pushq 48(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB1_5:
callq hipDeviceSynchronize
movq 8(%rsp), %rsi
movl $64, %edx
movq %rbx, %rdi
movl $2, %ecx
callq hipMemcpy
xorl %r14d, %r14d
.p2align 4, 0x90
.LBB1_6: # =>This Inner Loop Header: Depth=1
movss (%rbx,%r14,4), %xmm0 # xmm0 = mem[0],zero,zero,zero
cvtss2sd %xmm0, %xmm0
movl $.L.str.1, %edi
movb $1, %al
callq printf
incq %r14
cmpq $16, %r14
jne .LBB1_6
# %bb.7:
movl $10, %edi
callq putchar@PLT
movq %rbx, %rdi
callq free
movq 8(%rsp), %rdi
callq hipFree
xorl %eax, %eax
jmp .LBB1_8
.LBB1_2:
movl $.Lstr, %edi
callq puts@PLT
movl $1, %eax
.LBB1_8:
addq $88, %rsp
.cfi_def_cfa_offset 24
popq %rbx
.cfi_def_cfa_offset 16
popq %r14
.cfi_def_cfa_offset 8
retq
.Lfunc_end1:
.size main, .Lfunc_end1-main
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB2_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB2_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z6kernelPf, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end2:
.size __hip_module_ctor, .Lfunc_end2-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB3_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB3_2:
retq
.Lfunc_end3:
.size __hip_module_dtor, .Lfunc_end3-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z6kernelPf,@object # @_Z6kernelPf
.section .rodata,"a",@progbits
.globl _Z6kernelPf
.p2align 3, 0x0
_Z6kernelPf:
.quad _Z21__device_stub__kernelPf
.size _Z6kernelPf, 8
.type .L.str.1,@object # @.str.1
.section .rodata.str1.1,"aMS",@progbits,1
.L.str.1:
.asciz "%f "
.size .L.str.1, 4
.type .L__unnamed_1,@object # @0
.L__unnamed_1:
.asciz "_Z6kernelPf"
.size .L__unnamed_1, 12
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.type .Lstr,@object # @str
.section .rodata.str1.1,"aMS",@progbits,1
.Lstr:
.asciz "couldn't allocate memory"
.size .Lstr, 25
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z21__device_stub__kernelPf
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z6kernelPf
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly. | code for sm_80
Function : _Z6kernelPf
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ S2R R4, SR_TID.X ; /* 0x0000000000047919 */
/* 0x000e220000002100 */
/*0020*/ I2F.U32 R0, c[0x0][0x0] ; /* 0x0000000000007b06 */
/* 0x000e620000201000 */
/*0030*/ HFMA2.MMA R3, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff037435 */
/* 0x000fe200000001ff */
/*0040*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fe20000000a00 */
/*0050*/ STS [R4.X4], RZ ; /* 0x000000ff04007388 */
/* 0x001ff00000004800 */
/*0060*/ IMAD.WIDE.U32 R2, R4, R3, c[0x0][0x160] ; /* 0x0000580004027625 */
/* 0x000fe200078e0003 */
/*0070*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */
/* 0x000fec0000010000 */
/*0080*/ LDS R5, [R4.X4] ; /* 0x0000000004057984 */
/* 0x000e640000004800 */
/*0090*/ FADD R5, R0, R5 ; /* 0x0000000500057221 */
/* 0x002fca0000000000 */
/*00a0*/ STG.E [R2.64], R5 ; /* 0x0000000502007986 */
/* 0x000fe8000c101904 */
/*00b0*/ STS [R4.X4], R5 ; /* 0x0000000504007388 */
/* 0x000fe20000004800 */
/*00c0*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*00d0*/ BRA 0xd0; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*00e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0100*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0110*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0120*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0130*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0140*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0150*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0160*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0170*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z6kernelPf
.globl _Z6kernelPf
.p2align 8
.type _Z6kernelPf,@function
_Z6kernelPf:
s_load_b32 s2, s[0:1], 0x14
v_dual_mov_b32 v1, 0 :: v_dual_lshlrev_b32 v0, 2, v0
s_load_b64 s[0:1], s[0:1], 0x0
s_waitcnt lgkmcnt(0)
s_and_b32 s2, s2, 0xffff
s_delay_alu instid0(SALU_CYCLE_1)
v_cvt_f32_u32_e32 v3, s2
v_add_nc_u32_e32 v2, 0, v0
ds_store_b32 v2, v1
s_waitcnt lgkmcnt(0)
s_barrier
buffer_gl0_inv
ds_load_b32 v1, v2
s_waitcnt lgkmcnt(0)
v_add_f32_e32 v1, v1, v3
ds_store_b32 v2, v1
global_store_b32 v0, v1, s[0:1]
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z6kernelPf
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 264
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 4
.amdhsa_next_free_sgpr 3
.amdhsa_reserve_vcc 0
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z6kernelPf, .Lfunc_end0-_Z6kernelPf
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .offset: 8
.size: 4
.value_kind: hidden_block_count_x
- .offset: 12
.size: 4
.value_kind: hidden_block_count_y
- .offset: 16
.size: 4
.value_kind: hidden_block_count_z
- .offset: 20
.size: 2
.value_kind: hidden_group_size_x
- .offset: 22
.size: 2
.value_kind: hidden_group_size_y
- .offset: 24
.size: 2
.value_kind: hidden_group_size_z
- .offset: 26
.size: 2
.value_kind: hidden_remainder_x
- .offset: 28
.size: 2
.value_kind: hidden_remainder_y
- .offset: 30
.size: 2
.value_kind: hidden_remainder_z
- .offset: 48
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 56
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 64
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 72
.size: 2
.value_kind: hidden_grid_dims
- .offset: 128
.size: 4
.value_kind: hidden_dynamic_lds_size
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 264
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z6kernelPf
.private_segment_fixed_size: 0
.sgpr_count: 3
.sgpr_spill_count: 0
.symbol: _Z6kernelPf.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 4
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_0007fb70_00000000-6_hello_world_shared_0.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2060:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2060:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z25__device_stub__Z6kernelPfPf
.type _Z25__device_stub__Z6kernelPfPf, @function
_Z25__device_stub__Z6kernelPfPf:
.LFB2082:
.cfi_startproc
endbr64
subq $104, %rsp
.cfi_def_cfa_offset 112
movq %rdi, 8(%rsp)
movq %fs:40, %rax
movq %rax, 88(%rsp)
xorl %eax, %eax
leaq 8(%rsp), %rax
movq %rax, 80(%rsp)
movl $1, 32(%rsp)
movl $1, 36(%rsp)
movl $1, 40(%rsp)
movl $1, 44(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
leaq 24(%rsp), %rcx
leaq 16(%rsp), %rdx
leaq 44(%rsp), %rsi
leaq 32(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 88(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $104, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 24(%rsp)
.cfi_def_cfa_offset 120
pushq 24(%rsp)
.cfi_def_cfa_offset 128
leaq 96(%rsp), %r9
movq 60(%rsp), %rcx
movl 68(%rsp), %r8d
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
leaq _Z6kernelPf(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 112
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2082:
.size _Z25__device_stub__Z6kernelPfPf, .-_Z25__device_stub__Z6kernelPfPf
.globl _Z6kernelPf
.type _Z6kernelPf, @function
_Z6kernelPf:
.LFB2083:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z25__device_stub__Z6kernelPfPf
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2083:
.size _Z6kernelPf, .-_Z6kernelPf
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "couldn't allocate memory\n"
.LC1:
.string "%f "
.LC2:
.string "\n"
.text
.globl main
.type main, @function
main:
.LFB2057:
.cfi_startproc
endbr64
pushq %r13
.cfi_def_cfa_offset 16
.cfi_offset 13, -16
pushq %r12
.cfi_def_cfa_offset 24
.cfi_offset 12, -24
pushq %rbp
.cfi_def_cfa_offset 32
.cfi_offset 6, -32
pushq %rbx
.cfi_def_cfa_offset 40
.cfi_offset 3, -40
subq $56, %rsp
.cfi_def_cfa_offset 96
movq %fs:40, %rax
movq %rax, 40(%rsp)
xorl %eax, %eax
movq $0, 8(%rsp)
movl $64, %edi
call malloc@PLT
movq %rax, %r13
leaq 8(%rsp), %rdi
movl $64, %esi
call cudaMalloc@PLT
testq %r13, %r13
je .L12
movq 8(%rsp), %rdi
testq %rdi, %rdi
je .L12
movl $64, %edx
movl $0, %esi
call cudaMemset@PLT
movl $16, 16(%rsp)
movl $16, 20(%rsp)
movl $16, 28(%rsp)
movl $16, 32(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 28(%rsp), %rdx
movl $1, %ecx
movq 16(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L20
.L15:
call cudaThreadSynchronize@PLT
movl $2, %ecx
movl $64, %edx
movq 8(%rsp), %rsi
movq %r13, %rdi
call cudaMemcpy@PLT
movq %r13, %rbx
leaq 64(%r13), %r12
leaq .LC1(%rip), %rbp
.L16:
pxor %xmm0, %xmm0
cvtss2sd (%rbx), %xmm0
movq %rbp, %rsi
movl $2, %edi
movl $1, %eax
call __printf_chk@PLT
addq $4, %rbx
cmpq %r12, %rbx
jne .L16
leaq .LC2(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movq %r13, %rdi
call free@PLT
movq 8(%rsp), %rdi
call cudaFree@PLT
movl $0, %eax
.L11:
movq 40(%rsp), %rdx
subq %fs:40, %rdx
jne .L21
addq $56, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 40
popq %rbx
.cfi_def_cfa_offset 32
popq %rbp
.cfi_def_cfa_offset 24
popq %r12
.cfi_def_cfa_offset 16
popq %r13
.cfi_def_cfa_offset 8
ret
.L12:
.cfi_restore_state
leaq .LC0(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $1, %eax
jmp .L11
.L20:
movq 8(%rsp), %rdi
call _Z25__device_stub__Z6kernelPfPf
jmp .L15
.L21:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2057:
.size main, .-main
.section .rodata.str1.1
.LC3:
.string "_Z6kernelPf"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2085:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC3(%rip), %rdx
movq %rdx, %rcx
leaq _Z6kernelPf(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2085:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "hello_world_shared_0.hip"
.globl _Z21__device_stub__kernelPf # -- Begin function _Z21__device_stub__kernelPf
.p2align 4, 0x90
.type _Z21__device_stub__kernelPf,@function
_Z21__device_stub__kernelPf: # @_Z21__device_stub__kernelPf
.cfi_startproc
# %bb.0:
subq $72, %rsp
.cfi_def_cfa_offset 80
movq %rdi, 64(%rsp)
leaq 64(%rsp), %rax
movq %rax, (%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
movq %rsp, %r9
movl $_Z6kernelPf, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $88, %rsp
.cfi_adjust_cfa_offset -88
retq
.Lfunc_end0:
.size _Z21__device_stub__kernelPf, .Lfunc_end0-_Z21__device_stub__kernelPf
.cfi_endproc
# -- End function
.globl main # -- Begin function main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
pushq %r14
.cfi_def_cfa_offset 16
pushq %rbx
.cfi_def_cfa_offset 24
subq $88, %rsp
.cfi_def_cfa_offset 112
.cfi_offset %rbx, -24
.cfi_offset %r14, -16
movq $0, 8(%rsp)
movl $64, %edi
callq malloc
movq %rax, %rbx
leaq 8(%rsp), %rdi
movl $64, %esi
callq hipMalloc
testq %rbx, %rbx
je .LBB1_2
# %bb.1:
movq 8(%rsp), %rdi
testq %rdi, %rdi
je .LBB1_2
# %bb.3:
movl $64, %edx
xorl %esi, %esi
callq hipMemset
movabsq $68719476752, %rdi # imm = 0x1000000010
movl $1, %esi
movq %rdi, %rdx
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB1_5
# %bb.4:
movq 8(%rsp), %rax
movq %rax, 80(%rsp)
leaq 80(%rsp), %rax
movq %rax, 16(%rsp)
leaq 64(%rsp), %rdi
leaq 48(%rsp), %rsi
leaq 40(%rsp), %rdx
leaq 32(%rsp), %rcx
callq __hipPopCallConfiguration
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
movq 48(%rsp), %rcx
movl 56(%rsp), %r8d
leaq 16(%rsp), %r9
movl $_Z6kernelPf, %edi
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
pushq 48(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB1_5:
callq hipDeviceSynchronize
movq 8(%rsp), %rsi
movl $64, %edx
movq %rbx, %rdi
movl $2, %ecx
callq hipMemcpy
xorl %r14d, %r14d
.p2align 4, 0x90
.LBB1_6: # =>This Inner Loop Header: Depth=1
movss (%rbx,%r14,4), %xmm0 # xmm0 = mem[0],zero,zero,zero
cvtss2sd %xmm0, %xmm0
movl $.L.str.1, %edi
movb $1, %al
callq printf
incq %r14
cmpq $16, %r14
jne .LBB1_6
# %bb.7:
movl $10, %edi
callq putchar@PLT
movq %rbx, %rdi
callq free
movq 8(%rsp), %rdi
callq hipFree
xorl %eax, %eax
jmp .LBB1_8
.LBB1_2:
movl $.Lstr, %edi
callq puts@PLT
movl $1, %eax
.LBB1_8:
addq $88, %rsp
.cfi_def_cfa_offset 24
popq %rbx
.cfi_def_cfa_offset 16
popq %r14
.cfi_def_cfa_offset 8
retq
.Lfunc_end1:
.size main, .Lfunc_end1-main
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB2_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB2_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z6kernelPf, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end2:
.size __hip_module_ctor, .Lfunc_end2-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB3_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB3_2:
retq
.Lfunc_end3:
.size __hip_module_dtor, .Lfunc_end3-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z6kernelPf,@object # @_Z6kernelPf
.section .rodata,"a",@progbits
.globl _Z6kernelPf
.p2align 3, 0x0
_Z6kernelPf:
.quad _Z21__device_stub__kernelPf
.size _Z6kernelPf, 8
.type .L.str.1,@object # @.str.1
.section .rodata.str1.1,"aMS",@progbits,1
.L.str.1:
.asciz "%f "
.size .L.str.1, 4
.type .L__unnamed_1,@object # @0
.L__unnamed_1:
.asciz "_Z6kernelPf"
.size .L__unnamed_1, 12
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.type .Lstr,@object # @str
.section .rodata.str1.1,"aMS",@progbits,1
.Lstr:
.asciz "couldn't allocate memory"
.size .Lstr, 25
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z21__device_stub__kernelPf
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z6kernelPf
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly. | #include<iostream>
#include<stdlib.h>
#include<cuda.h>
#include<time.h>
#define BLOCK_SIZE 64
#define SOA 512
void random_ints(int *data,int size)
{
int i;
for(i=0;i<size;i++)
{
data[i]=rand()%size;
}
}
__global__ void ReductionMax2(int *input,int *results,int n)
{
__shared__ int sdata[BLOCK_SIZE];
unsigned int i=blockIdx.x*blockDim.x+threadIdx.x;
unsigned int tx=threadIdx.x;
int x=-INT_MAX;
if(i<n)
x=input[i];
sdata[tx]=x;
__syncthreads();
for(unsigned int offset=blockDim.x>>1;offset>0;offset >>=1)
{
__syncthreads();
if(tx<offset)
{
if(sdata[tx+offset]>sdata[tx])
sdata[tx]=sdata[tx+offset];
}
}
if(threadIdx.x==0)
{
results[blockIdx.x]=sdata[0];
}
}
int main()
{
int num_blocks=SOA/BLOCK_SIZE;
int num_threads=BLOCK_SIZE,i;
unsigned int mem_size_a=sizeof(int)*SOA;
int *h_a=(int*)malloc(mem_size_a);
random_ints(h_a,SOA);
int *d_a;
cudaMalloc((void**)&d_a,mem_size_a);
cudaMemcpy(d_a,h_a,mem_size_a,cudaMemcpyHostToDevice);
unsigned int mem_size_b=sizeof(int)*num_blocks;
int *d_b;
cudaMalloc((void**)&d_b,mem_size_b);
int *h_b=(int*)malloc(mem_size_b);
unsigned int mem_size_c=sizeof(int);
int *d_c;
cudaMalloc((void**)&d_c,mem_size_c);
ReductionMax2<<<num_blocks,num_threads>>>(d_a,d_b,SOA);
cudaMemcpy(h_b,d_b,mem_size_b,cudaMemcpyDeviceToHost);
ReductionMax2<<<1,num_blocks>>>(d_b,d_c,num_blocks);
int *h_c=(int*)malloc(mem_size_c);
cudaMemcpy(h_c,d_c,mem_size_c,cudaMemcpyDeviceToHost);
int j;
for(j=0;j<SOA;j++)
{
std::cout<<h_a[j]<<",";
}
std::cout<<"\nblock max";
for(j=0;j<num_blocks;j++)
{
std::cout<<h_b[j]<<",";
}
std::cout<<"\nparallel max="<<*h_c;
} | code for sm_80
Function : _Z13ReductionMax2PiS_i
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ IMAD.MOV.U32 R1, RZ, RZ, c[0x0][0x28] ; /* 0x00000a00ff017624 */
/* 0x000fe400078e00ff */
/*0010*/ S2R R6, SR_CTAID.X ; /* 0x0000000000067919 */
/* 0x000e220000002500 */
/*0020*/ HFMA2.MMA R0, -RZ, RZ, -0.0 , 5.9604644775390625e-08 ; /* 0x80000001ff007435 */
/* 0x000fe200000001ff */
/*0030*/ ULDC.64 UR6, c[0x0][0x118] ; /* 0x0000460000067ab9 */
/* 0x000fe40000000a00 */
/*0040*/ S2R R5, SR_TID.X ; /* 0x0000000000057919 */
/* 0x000e240000002100 */
/*0050*/ IMAD R2, R6, c[0x0][0x0], R5 ; /* 0x0000000006027a24 */
/* 0x001fca00078e0205 */
/*0060*/ ISETP.GE.U32.AND P0, PT, R2, c[0x0][0x170], PT ; /* 0x00005c0002007a0c */
/* 0x000fda0003f06070 */
/*0070*/ @!P0 IMAD.MOV.U32 R3, RZ, RZ, 0x4 ; /* 0x00000004ff038424 */
/* 0x000fc800078e00ff */
/*0080*/ @!P0 IMAD.WIDE.U32 R2, R2, R3, c[0x0][0x160] ; /* 0x0000580002028625 */
/* 0x000fca00078e0003 */
/*0090*/ @!P0 LDG.E R0, [R2.64] ; /* 0x0000000602008981 */
/* 0x000ea2000c1e1900 */
/*00a0*/ ULDC UR4, c[0x0][0x0] ; /* 0x0000000000047ab9 */
/* 0x000fe40000000800 */
/*00b0*/ USHF.R.U32.HI UR4, URZ, 0x1, UR4 ; /* 0x000000013f047899 */
/* 0x000fcc0008011604 */
/*00c0*/ ISETP.NE.AND P0, PT, RZ, UR4, PT ; /* 0x00000004ff007c0c */
/* 0x000fe2000bf05270 */
/*00d0*/ STS [R5.X4], R0 ; /* 0x0000000005007388 */
/* 0x0041e80000004800 */
/*00e0*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */
/* 0x000ff00000010000 */
/*00f0*/ @!P0 BRA 0x1f0 ; /* 0x000000f000008947 */
/* 0x000fea0003800000 */
/*0100*/ IMAD.SHL.U32 R0, R5, 0x4, RZ ; /* 0x0000000405007824 */
/* 0x001fe200078e00ff */
/*0110*/ MOV R3, UR4 ; /* 0x0000000400037c02 */
/* 0x000fc60008000f00 */
/*0120*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */
/* 0x000fe20000010000 */
/*0130*/ ISETP.GE.U32.AND P0, PT, R5, R3, PT ; /* 0x000000030500720c */
/* 0x000fca0003f06070 */
/*0140*/ BSSY B0, 0x1c0 ; /* 0x0000007000007945 */
/* 0x000ff00003800000 */
/*0150*/ @P0 BRA 0x1b0 ; /* 0x0000005000000947 */
/* 0x001fea0003800000 */
/*0160*/ IMAD R2, R3, 0x4, R0 ; /* 0x0000000403027824 */
/* 0x000fe200078e0200 */
/*0170*/ LDS R4, [R5.X4] ; /* 0x0000000005047984 */
/* 0x000fea0000004800 */
/*0180*/ LDS R2, [R2] ; /* 0x0000000002027984 */
/* 0x000e240000000800 */
/*0190*/ ISETP.GT.AND P0, PT, R2, R4, PT ; /* 0x000000040200720c */
/* 0x001fda0003f04270 */
/*01a0*/ @P0 STS [R5.X4], R2 ; /* 0x0000000205000388 */
/* 0x0001e40000004800 */
/*01b0*/ BSYNC B0 ; /* 0x0000000000007941 */
/* 0x000fea0003800000 */
/*01c0*/ SHF.R.U32.HI R3, RZ, 0x1, R3 ; /* 0x00000001ff037819 */
/* 0x000fc80000011603 */
/*01d0*/ ISETP.NE.AND P0, PT, R3, RZ, PT ; /* 0x000000ff0300720c */
/* 0x000fda0003f05270 */
/*01e0*/ @P0 BRA 0x120 ; /* 0xffffff3000000947 */
/* 0x000fea000383ffff */
/*01f0*/ ISETP.NE.AND P0, PT, R5, RZ, PT ; /* 0x000000ff0500720c */
/* 0x001fda0003f05270 */
/*0200*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*0210*/ LDS R5, [RZ] ; /* 0x00000000ff057984 */
/* 0x000e220000000800 */
/*0220*/ HFMA2.MMA R3, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff037435 */
/* 0x000fd400000001ff */
/*0230*/ IMAD.WIDE.U32 R2, R6, R3, c[0x0][0x168] ; /* 0x00005a0006027625 */
/* 0x000fca00078e0003 */
/*0240*/ STG.E [R2.64], R5 ; /* 0x0000000502007986 */
/* 0x001fe2000c101906 */
/*0250*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0260*/ BRA 0x260; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0270*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0280*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0290*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | #include<iostream>
#include<stdlib.h>
#include<cuda.h>
#include<time.h>
#define BLOCK_SIZE 64
#define SOA 512
void random_ints(int *data,int size)
{
int i;
for(i=0;i<size;i++)
{
data[i]=rand()%size;
}
}
__global__ void ReductionMax2(int *input,int *results,int n)
{
__shared__ int sdata[BLOCK_SIZE];
unsigned int i=blockIdx.x*blockDim.x+threadIdx.x;
unsigned int tx=threadIdx.x;
int x=-INT_MAX;
if(i<n)
x=input[i];
sdata[tx]=x;
__syncthreads();
for(unsigned int offset=blockDim.x>>1;offset>0;offset >>=1)
{
__syncthreads();
if(tx<offset)
{
if(sdata[tx+offset]>sdata[tx])
sdata[tx]=sdata[tx+offset];
}
}
if(threadIdx.x==0)
{
results[blockIdx.x]=sdata[0];
}
}
int main()
{
int num_blocks=SOA/BLOCK_SIZE;
int num_threads=BLOCK_SIZE,i;
unsigned int mem_size_a=sizeof(int)*SOA;
int *h_a=(int*)malloc(mem_size_a);
random_ints(h_a,SOA);
int *d_a;
cudaMalloc((void**)&d_a,mem_size_a);
cudaMemcpy(d_a,h_a,mem_size_a,cudaMemcpyHostToDevice);
unsigned int mem_size_b=sizeof(int)*num_blocks;
int *d_b;
cudaMalloc((void**)&d_b,mem_size_b);
int *h_b=(int*)malloc(mem_size_b);
unsigned int mem_size_c=sizeof(int);
int *d_c;
cudaMalloc((void**)&d_c,mem_size_c);
ReductionMax2<<<num_blocks,num_threads>>>(d_a,d_b,SOA);
cudaMemcpy(h_b,d_b,mem_size_b,cudaMemcpyDeviceToHost);
ReductionMax2<<<1,num_blocks>>>(d_b,d_c,num_blocks);
int *h_c=(int*)malloc(mem_size_c);
cudaMemcpy(h_c,d_c,mem_size_c,cudaMemcpyDeviceToHost);
int j;
for(j=0;j<SOA;j++)
{
std::cout<<h_a[j]<<",";
}
std::cout<<"\nblock max";
for(j=0;j<num_blocks;j++)
{
std::cout<<h_b[j]<<",";
}
std::cout<<"\nparallel max="<<*h_c;
} | .file "tmpxft_000d3131_00000000-6_asignmaxmin.cudafe1.cpp"
.text
#APP
.globl _ZSt21ios_base_library_initv
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB3673:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3673:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z11random_intsPii
.type _Z11random_intsPii, @function
_Z11random_intsPii:
.LFB3669:
.cfi_startproc
endbr64
testl %esi, %esi
jle .L8
pushq %r12
.cfi_def_cfa_offset 16
.cfi_offset 12, -16
pushq %rbp
.cfi_def_cfa_offset 24
.cfi_offset 6, -24
pushq %rbx
.cfi_def_cfa_offset 32
.cfi_offset 3, -32
movl %esi, %ebp
movq %rdi, %rbx
movslq %esi, %rax
leaq (%rdi,%rax,4), %r12
.L5:
call rand@PLT
cltd
idivl %ebp
movl %edx, (%rbx)
addq $4, %rbx
cmpq %r12, %rbx
jne .L5
popq %rbx
.cfi_def_cfa_offset 24
popq %rbp
.cfi_def_cfa_offset 16
popq %r12
.cfi_def_cfa_offset 8
ret
.L8:
.cfi_restore 3
.cfi_restore 6
.cfi_restore 12
ret
.cfi_endproc
.LFE3669:
.size _Z11random_intsPii, .-_Z11random_intsPii
.globl _Z36__device_stub__Z13ReductionMax2PiS_iPiS_i
.type _Z36__device_stub__Z13ReductionMax2PiS_iPiS_i, @function
_Z36__device_stub__Z13ReductionMax2PiS_iPiS_i:
.LFB3695:
.cfi_startproc
endbr64
subq $136, %rsp
.cfi_def_cfa_offset 144
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movl %edx, 12(%rsp)
movq %fs:40, %rax
movq %rax, 120(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 12(%rsp), %rax
movq %rax, 112(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L15
.L11:
movq 120(%rsp), %rax
subq %fs:40, %rax
jne .L16
addq $136, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L15:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 152
pushq 40(%rsp)
.cfi_def_cfa_offset 160
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z13ReductionMax2PiS_i(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 144
jmp .L11
.L16:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE3695:
.size _Z36__device_stub__Z13ReductionMax2PiS_iPiS_i, .-_Z36__device_stub__Z13ReductionMax2PiS_iPiS_i
.globl _Z13ReductionMax2PiS_i
.type _Z13ReductionMax2PiS_i, @function
_Z13ReductionMax2PiS_i:
.LFB3696:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z36__device_stub__Z13ReductionMax2PiS_iPiS_i
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3696:
.size _Z13ReductionMax2PiS_i, .-_Z13ReductionMax2PiS_i
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string ","
.LC1:
.string "\nblock max"
.LC2:
.string "\nparallel max="
.text
.globl main
.type main, @function
main:
.LFB3670:
.cfi_startproc
endbr64
pushq %r15
.cfi_def_cfa_offset 16
.cfi_offset 15, -16
pushq %r14
.cfi_def_cfa_offset 24
.cfi_offset 14, -24
pushq %r13
.cfi_def_cfa_offset 32
.cfi_offset 13, -32
pushq %r12
.cfi_def_cfa_offset 40
.cfi_offset 12, -40
pushq %rbp
.cfi_def_cfa_offset 48
.cfi_offset 6, -48
pushq %rbx
.cfi_def_cfa_offset 56
.cfi_offset 3, -56
subq $72, %rsp
.cfi_def_cfa_offset 128
movq %fs:40, %rax
movq %rax, 56(%rsp)
xorl %eax, %eax
movl $2048, %edi
call malloc@PLT
movq %rax, %rbp
movl $512, %esi
movq %rax, %rdi
call _Z11random_intsPii
leaq 8(%rsp), %rdi
movl $2048, %esi
call cudaMalloc@PLT
movl $1, %ecx
movl $2048, %edx
movq %rbp, %rsi
movq 8(%rsp), %rdi
call cudaMemcpy@PLT
leaq 16(%rsp), %rdi
movl $32, %esi
call cudaMalloc@PLT
movl $32, %edi
call malloc@PLT
movq %rax, %r14
leaq 24(%rsp), %rdi
movl $4, %esi
call cudaMalloc@PLT
movl $64, 44(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $8, 32(%rsp)
movl $1, 36(%rsp)
movl $1, 40(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 44(%rsp), %rdx
movl $1, %ecx
movq 32(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L28
.L20:
movl $2, %ecx
movl $32, %edx
movq 16(%rsp), %rsi
movq %r14, %rdi
call cudaMemcpy@PLT
movl $8, 44(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 32(%rsp)
movl $1, 36(%rsp)
movl $1, 40(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 44(%rsp), %rdx
movl $1, %ecx
movq 32(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L29
.L21:
movl $4, %edi
call malloc@PLT
movq %rax, %r15
movl $2, %ecx
movl $4, %edx
movq 24(%rsp), %rsi
movq %rax, %rdi
call cudaMemcpy@PLT
movq %rbp, %rbx
addq $2048, %rbp
leaq _ZSt4cout(%rip), %r13
leaq .LC0(%rip), %r12
.L22:
movl (%rbx), %esi
movq %r13, %rdi
call _ZNSolsEi@PLT
movq %rax, %rdi
movl $1, %edx
movq %r12, %rsi
call _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l@PLT
addq $4, %rbx
cmpq %rbx, %rbp
jne .L22
leaq .LC1(%rip), %rsi
leaq _ZSt4cout(%rip), %rdi
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT
movq %r14, %rbx
addq $32, %r14
leaq _ZSt4cout(%rip), %r12
leaq .LC0(%rip), %rbp
.L23:
movl (%rbx), %esi
movq %r12, %rdi
call _ZNSolsEi@PLT
movq %rax, %rdi
movl $1, %edx
movq %rbp, %rsi
call _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l@PLT
addq $4, %rbx
cmpq %rbx, %r14
jne .L23
leaq .LC2(%rip), %rsi
leaq _ZSt4cout(%rip), %rdi
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT
movq %rax, %rdi
movl (%r15), %esi
call _ZNSolsEi@PLT
movq 56(%rsp), %rax
subq %fs:40, %rax
jne .L30
movl $0, %eax
addq $72, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %rbp
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r13
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
ret
.L28:
.cfi_restore_state
movl $512, %edx
movq 16(%rsp), %rsi
movq 8(%rsp), %rdi
call _Z36__device_stub__Z13ReductionMax2PiS_iPiS_i
jmp .L20
.L29:
movl $8, %edx
movq 24(%rsp), %rsi
movq 16(%rsp), %rdi
call _Z36__device_stub__Z13ReductionMax2PiS_iPiS_i
jmp .L21
.L30:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE3670:
.size main, .-main
.section .rodata.str1.1
.LC3:
.string "_Z13ReductionMax2PiS_i"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB3698:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC3(%rip), %rdx
movq %rdx, %rcx
leaq _Z13ReductionMax2PiS_i(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3698:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | #include<iostream>
#include<stdlib.h>
#include<cuda.h>
#include<time.h>
#define BLOCK_SIZE 64
#define SOA 512
void random_ints(int *data,int size)
{
int i;
for(i=0;i<size;i++)
{
data[i]=rand()%size;
}
}
__global__ void ReductionMax2(int *input,int *results,int n)
{
__shared__ int sdata[BLOCK_SIZE];
unsigned int i=blockIdx.x*blockDim.x+threadIdx.x;
unsigned int tx=threadIdx.x;
int x=-INT_MAX;
if(i<n)
x=input[i];
sdata[tx]=x;
__syncthreads();
for(unsigned int offset=blockDim.x>>1;offset>0;offset >>=1)
{
__syncthreads();
if(tx<offset)
{
if(sdata[tx+offset]>sdata[tx])
sdata[tx]=sdata[tx+offset];
}
}
if(threadIdx.x==0)
{
results[blockIdx.x]=sdata[0];
}
}
int main()
{
int num_blocks=SOA/BLOCK_SIZE;
int num_threads=BLOCK_SIZE,i;
unsigned int mem_size_a=sizeof(int)*SOA;
int *h_a=(int*)malloc(mem_size_a);
random_ints(h_a,SOA);
int *d_a;
cudaMalloc((void**)&d_a,mem_size_a);
cudaMemcpy(d_a,h_a,mem_size_a,cudaMemcpyHostToDevice);
unsigned int mem_size_b=sizeof(int)*num_blocks;
int *d_b;
cudaMalloc((void**)&d_b,mem_size_b);
int *h_b=(int*)malloc(mem_size_b);
unsigned int mem_size_c=sizeof(int);
int *d_c;
cudaMalloc((void**)&d_c,mem_size_c);
ReductionMax2<<<num_blocks,num_threads>>>(d_a,d_b,SOA);
cudaMemcpy(h_b,d_b,mem_size_b,cudaMemcpyDeviceToHost);
ReductionMax2<<<1,num_blocks>>>(d_b,d_c,num_blocks);
int *h_c=(int*)malloc(mem_size_c);
cudaMemcpy(h_c,d_c,mem_size_c,cudaMemcpyDeviceToHost);
int j;
for(j=0;j<SOA;j++)
{
std::cout<<h_a[j]<<",";
}
std::cout<<"\nblock max";
for(j=0;j<num_blocks;j++)
{
std::cout<<h_b[j]<<",";
}
std::cout<<"\nparallel max="<<*h_c;
} | #include<iostream>
#include<stdlib.h>
#include<hip/hip_runtime.h>
#include<time.h>
#define BLOCK_SIZE 64
#define SOA 512
void random_ints(int *data,int size)
{
int i;
for(i=0;i<size;i++)
{
data[i]=rand()%size;
}
}
__global__ void ReductionMax2(int *input,int *results,int n)
{
__shared__ int sdata[BLOCK_SIZE];
unsigned int i=blockIdx.x*blockDim.x+threadIdx.x;
unsigned int tx=threadIdx.x;
int x=-INT_MAX;
if(i<n)
x=input[i];
sdata[tx]=x;
__syncthreads();
for(unsigned int offset=blockDim.x>>1;offset>0;offset >>=1)
{
__syncthreads();
if(tx<offset)
{
if(sdata[tx+offset]>sdata[tx])
sdata[tx]=sdata[tx+offset];
}
}
if(threadIdx.x==0)
{
results[blockIdx.x]=sdata[0];
}
}
int main()
{
int num_blocks=SOA/BLOCK_SIZE;
int num_threads=BLOCK_SIZE,i;
unsigned int mem_size_a=sizeof(int)*SOA;
int *h_a=(int*)malloc(mem_size_a);
random_ints(h_a,SOA);
int *d_a;
hipMalloc((void**)&d_a,mem_size_a);
hipMemcpy(d_a,h_a,mem_size_a,hipMemcpyHostToDevice);
unsigned int mem_size_b=sizeof(int)*num_blocks;
int *d_b;
hipMalloc((void**)&d_b,mem_size_b);
int *h_b=(int*)malloc(mem_size_b);
unsigned int mem_size_c=sizeof(int);
int *d_c;
hipMalloc((void**)&d_c,mem_size_c);
ReductionMax2<<<num_blocks,num_threads>>>(d_a,d_b,SOA);
hipMemcpy(h_b,d_b,mem_size_b,hipMemcpyDeviceToHost);
ReductionMax2<<<1,num_blocks>>>(d_b,d_c,num_blocks);
int *h_c=(int*)malloc(mem_size_c);
hipMemcpy(h_c,d_c,mem_size_c,hipMemcpyDeviceToHost);
int j;
for(j=0;j<SOA;j++)
{
std::cout<<h_a[j]<<",";
}
std::cout<<"\nblock max";
for(j=0;j<num_blocks;j++)
{
std::cout<<h_b[j]<<",";
}
std::cout<<"\nparallel max="<<*h_c;
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | #include<iostream>
#include<stdlib.h>
#include<hip/hip_runtime.h>
#include<time.h>
#define BLOCK_SIZE 64
#define SOA 512
void random_ints(int *data,int size)
{
int i;
for(i=0;i<size;i++)
{
data[i]=rand()%size;
}
}
__global__ void ReductionMax2(int *input,int *results,int n)
{
__shared__ int sdata[BLOCK_SIZE];
unsigned int i=blockIdx.x*blockDim.x+threadIdx.x;
unsigned int tx=threadIdx.x;
int x=-INT_MAX;
if(i<n)
x=input[i];
sdata[tx]=x;
__syncthreads();
for(unsigned int offset=blockDim.x>>1;offset>0;offset >>=1)
{
__syncthreads();
if(tx<offset)
{
if(sdata[tx+offset]>sdata[tx])
sdata[tx]=sdata[tx+offset];
}
}
if(threadIdx.x==0)
{
results[blockIdx.x]=sdata[0];
}
}
int main()
{
int num_blocks=SOA/BLOCK_SIZE;
int num_threads=BLOCK_SIZE,i;
unsigned int mem_size_a=sizeof(int)*SOA;
int *h_a=(int*)malloc(mem_size_a);
random_ints(h_a,SOA);
int *d_a;
hipMalloc((void**)&d_a,mem_size_a);
hipMemcpy(d_a,h_a,mem_size_a,hipMemcpyHostToDevice);
unsigned int mem_size_b=sizeof(int)*num_blocks;
int *d_b;
hipMalloc((void**)&d_b,mem_size_b);
int *h_b=(int*)malloc(mem_size_b);
unsigned int mem_size_c=sizeof(int);
int *d_c;
hipMalloc((void**)&d_c,mem_size_c);
ReductionMax2<<<num_blocks,num_threads>>>(d_a,d_b,SOA);
hipMemcpy(h_b,d_b,mem_size_b,hipMemcpyDeviceToHost);
ReductionMax2<<<1,num_blocks>>>(d_b,d_c,num_blocks);
int *h_c=(int*)malloc(mem_size_c);
hipMemcpy(h_c,d_c,mem_size_c,hipMemcpyDeviceToHost);
int j;
for(j=0;j<SOA;j++)
{
std::cout<<h_a[j]<<",";
}
std::cout<<"\nblock max";
for(j=0;j<num_blocks;j++)
{
std::cout<<h_b[j]<<",";
}
std::cout<<"\nparallel max="<<*h_c;
} | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z13ReductionMax2PiS_i
.globl _Z13ReductionMax2PiS_i
.p2align 8
.type _Z13ReductionMax2PiS_i,@function
_Z13ReductionMax2PiS_i:
s_clause 0x1
s_load_b32 s3, s[0:1], 0x24
s_load_b32 s4, s[0:1], 0x10
s_mov_b32 s2, s15
s_waitcnt lgkmcnt(0)
s_and_b32 s3, s3, 0xffff
s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
v_mad_u64_u32 v[1:2], null, s2, s3, v[0:1]
v_mov_b32_e32 v2, 0x80000001
v_cmp_gt_u32_e32 vcc_lo, s4, v1
s_and_saveexec_b32 s4, vcc_lo
s_cbranch_execz .LBB0_2
s_load_b64 s[6:7], s[0:1], 0x0
v_mov_b32_e32 v2, 0
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[1:2], 2, v[1:2]
s_waitcnt lgkmcnt(0)
v_add_co_u32 v1, vcc_lo, s6, v1
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v2, vcc_lo, s7, v2, vcc_lo
global_load_b32 v2, v[1:2], off
.LBB0_2:
s_or_b32 exec_lo, exec_lo, s4
v_lshlrev_b32_e32 v1, 2, v0
s_cmp_lt_u32 s3, 2
s_waitcnt vmcnt(0)
ds_store_b32 v1, v2
s_waitcnt lgkmcnt(0)
s_barrier
buffer_gl0_inv
s_cbranch_scc0 .LBB0_7
.LBB0_3:
s_mov_b32 s3, 0
s_mov_b32 s4, exec_lo
v_cmpx_eq_u32_e32 0, v0
s_cbranch_execz .LBB0_5
v_mov_b32_e32 v0, 0
s_load_b64 s[0:1], s[0:1], 0x8
s_lshl_b64 s[2:3], s[2:3], 2
ds_load_b32 v1, v0
s_waitcnt lgkmcnt(0)
s_add_u32 s0, s0, s2
s_addc_u32 s1, s1, s3
global_store_b32 v0, v1, s[0:1]
.LBB0_5:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.p2align 6
.LBB0_6:
s_or_b32 exec_lo, exec_lo, s5
s_cmp_lt_u32 s4, 4
s_cbranch_scc1 .LBB0_3
.LBB0_7:
s_mov_b32 s4, s3
s_lshr_b32 s3, s3, 1
s_mov_b32 s5, exec_lo
s_waitcnt lgkmcnt(0)
s_barrier
buffer_gl0_inv
v_cmpx_gt_u32_e64 s3, v0
s_cbranch_execz .LBB0_6
v_add_lshl_u32 v2, s3, v0, 2
ds_load_b32 v2, v2
ds_load_b32 v3, v1
s_waitcnt lgkmcnt(0)
v_cmp_gt_i32_e32 vcc_lo, v2, v3
s_and_b32 exec_lo, exec_lo, vcc_lo
s_cbranch_execz .LBB0_6
ds_store_b32 v1, v2
s_branch .LBB0_6
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z13ReductionMax2PiS_i
.amdhsa_group_segment_fixed_size 256
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 280
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 4
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z13ReductionMax2PiS_i, .Lfunc_end0-_Z13ReductionMax2PiS_i
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .offset: 16
.size: 4
.value_kind: by_value
- .offset: 24
.size: 4
.value_kind: hidden_block_count_x
- .offset: 28
.size: 4
.value_kind: hidden_block_count_y
- .offset: 32
.size: 4
.value_kind: hidden_block_count_z
- .offset: 36
.size: 2
.value_kind: hidden_group_size_x
- .offset: 38
.size: 2
.value_kind: hidden_group_size_y
- .offset: 40
.size: 2
.value_kind: hidden_group_size_z
- .offset: 42
.size: 2
.value_kind: hidden_remainder_x
- .offset: 44
.size: 2
.value_kind: hidden_remainder_y
- .offset: 46
.size: 2
.value_kind: hidden_remainder_z
- .offset: 64
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 88
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 256
.kernarg_segment_align: 8
.kernarg_segment_size: 280
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z13ReductionMax2PiS_i
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z13ReductionMax2PiS_i.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 4
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | #include<iostream>
#include<stdlib.h>
#include<hip/hip_runtime.h>
#include<time.h>
#define BLOCK_SIZE 64
#define SOA 512
void random_ints(int *data,int size)
{
int i;
for(i=0;i<size;i++)
{
data[i]=rand()%size;
}
}
__global__ void ReductionMax2(int *input,int *results,int n)
{
__shared__ int sdata[BLOCK_SIZE];
unsigned int i=blockIdx.x*blockDim.x+threadIdx.x;
unsigned int tx=threadIdx.x;
int x=-INT_MAX;
if(i<n)
x=input[i];
sdata[tx]=x;
__syncthreads();
for(unsigned int offset=blockDim.x>>1;offset>0;offset >>=1)
{
__syncthreads();
if(tx<offset)
{
if(sdata[tx+offset]>sdata[tx])
sdata[tx]=sdata[tx+offset];
}
}
if(threadIdx.x==0)
{
results[blockIdx.x]=sdata[0];
}
}
int main()
{
int num_blocks=SOA/BLOCK_SIZE;
int num_threads=BLOCK_SIZE,i;
unsigned int mem_size_a=sizeof(int)*SOA;
int *h_a=(int*)malloc(mem_size_a);
random_ints(h_a,SOA);
int *d_a;
hipMalloc((void**)&d_a,mem_size_a);
hipMemcpy(d_a,h_a,mem_size_a,hipMemcpyHostToDevice);
unsigned int mem_size_b=sizeof(int)*num_blocks;
int *d_b;
hipMalloc((void**)&d_b,mem_size_b);
int *h_b=(int*)malloc(mem_size_b);
unsigned int mem_size_c=sizeof(int);
int *d_c;
hipMalloc((void**)&d_c,mem_size_c);
ReductionMax2<<<num_blocks,num_threads>>>(d_a,d_b,SOA);
hipMemcpy(h_b,d_b,mem_size_b,hipMemcpyDeviceToHost);
ReductionMax2<<<1,num_blocks>>>(d_b,d_c,num_blocks);
int *h_c=(int*)malloc(mem_size_c);
hipMemcpy(h_c,d_c,mem_size_c,hipMemcpyDeviceToHost);
int j;
for(j=0;j<SOA;j++)
{
std::cout<<h_a[j]<<",";
}
std::cout<<"\nblock max";
for(j=0;j<num_blocks;j++)
{
std::cout<<h_b[j]<<",";
}
std::cout<<"\nparallel max="<<*h_c;
} | .text
.file "asignmaxmin.hip"
# Start of file scope inline assembly
.globl _ZSt21ios_base_library_initv
# End of file scope inline assembly
.globl _Z11random_intsPii # -- Begin function _Z11random_intsPii
.p2align 4, 0x90
.type _Z11random_intsPii,@function
_Z11random_intsPii: # @_Z11random_intsPii
.cfi_startproc
# %bb.0:
testl %esi, %esi
jle .LBB0_4
# %bb.1: # %.lr.ph.preheader
pushq %r15
.cfi_def_cfa_offset 16
pushq %r14
.cfi_def_cfa_offset 24
pushq %r12
.cfi_def_cfa_offset 32
pushq %rbx
.cfi_def_cfa_offset 40
pushq %rax
.cfi_def_cfa_offset 48
.cfi_offset %rbx, -40
.cfi_offset %r12, -32
.cfi_offset %r14, -24
.cfi_offset %r15, -16
movl %esi, %ebx
movq %rdi, %r14
movl %esi, %r15d
xorl %r12d, %r12d
.p2align 4, 0x90
.LBB0_2: # %.lr.ph
# =>This Inner Loop Header: Depth=1
callq rand
cltd
idivl %ebx
movl %edx, (%r14,%r12,4)
incq %r12
cmpq %r12, %r15
jne .LBB0_2
# %bb.3:
addq $8, %rsp
.cfi_def_cfa_offset 40
popq %rbx
.cfi_def_cfa_offset 32
popq %r12
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
.cfi_restore %rbx
.cfi_restore %r12
.cfi_restore %r14
.cfi_restore %r15
.LBB0_4: # %._crit_edge
retq
.Lfunc_end0:
.size _Z11random_intsPii, .Lfunc_end0-_Z11random_intsPii
.cfi_endproc
# -- End function
.globl _Z28__device_stub__ReductionMax2PiS_i # -- Begin function _Z28__device_stub__ReductionMax2PiS_i
.p2align 4, 0x90
.type _Z28__device_stub__ReductionMax2PiS_i,@function
_Z28__device_stub__ReductionMax2PiS_i: # @_Z28__device_stub__ReductionMax2PiS_i
.cfi_startproc
# %bb.0:
subq $104, %rsp
.cfi_def_cfa_offset 112
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movl %edx, 12(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 12(%rsp), %rax
movq %rax, 96(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z13ReductionMax2PiS_i, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $120, %rsp
.cfi_adjust_cfa_offset -120
retq
.Lfunc_end1:
.size _Z28__device_stub__ReductionMax2PiS_i, .Lfunc_end1-_Z28__device_stub__ReductionMax2PiS_i
.cfi_endproc
# -- End function
.globl main # -- Begin function main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
pushq %r15
.cfi_def_cfa_offset 16
pushq %r14
.cfi_def_cfa_offset 24
pushq %r12
.cfi_def_cfa_offset 32
pushq %rbx
.cfi_def_cfa_offset 40
subq $120, %rsp
.cfi_def_cfa_offset 160
.cfi_offset %rbx, -40
.cfi_offset %r12, -32
.cfi_offset %r14, -24
.cfi_offset %r15, -16
movl $2048, %edi # imm = 0x800
callq malloc
movq %rax, %rbx
xorl %r14d, %r14d
.p2align 4, 0x90
.LBB2_1: # %.lr.ph.i
# =>This Inner Loop Header: Depth=1
callq rand
# kill: def $eax killed $eax def $rax
leal 511(%rax), %ecx
testl %eax, %eax
cmovnsl %eax, %ecx
andl $-512, %ecx # imm = 0xFE00
subl %ecx, %eax
movl %eax, (%rbx,%r14,4)
incq %r14
cmpq $512, %r14 # imm = 0x200
jne .LBB2_1
# %bb.2: # %_Z11random_intsPii.exit
movabsq $4294967304, %r15 # imm = 0x100000008
leaq 88(%rsp), %rdi
movl $2048, %esi # imm = 0x800
callq hipMalloc
movq 88(%rsp), %rdi
movl $2048, %edx # imm = 0x800
movq %rbx, %rsi
movl $1, %ecx
callq hipMemcpy
leaq 8(%rsp), %rdi
movl $32, %esi
callq hipMalloc
movl $32, %edi
callq malloc
movq %rax, %r14
leaq 80(%rsp), %rdi
movl $4, %esi
callq hipMalloc
leaq 56(%r15), %rdx
movq %r15, %rdi
movl $1, %esi
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB2_4
# %bb.3:
movq 88(%rsp), %rax
movq 8(%rsp), %rcx
movq %rax, 72(%rsp)
movq %rcx, 64(%rsp)
movl $512, 4(%rsp) # imm = 0x200
leaq 72(%rsp), %rax
movq %rax, 96(%rsp)
leaq 64(%rsp), %rax
movq %rax, 104(%rsp)
leaq 4(%rsp), %rax
movq %rax, 112(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 96(%rsp), %r9
movl $_Z13ReductionMax2PiS_i, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB2_4:
movq 8(%rsp), %rsi
movl $32, %edx
movq %r14, %rdi
movl $2, %ecx
callq hipMemcpy
leaq -7(%r15), %rdi
movl $1, %esi
movq %r15, %rdx
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB2_6
# %bb.5:
movq 8(%rsp), %rax
movq 80(%rsp), %rcx
movq %rax, 72(%rsp)
movq %rcx, 64(%rsp)
movl $8, 4(%rsp)
leaq 72(%rsp), %rax
movq %rax, 96(%rsp)
leaq 64(%rsp), %rax
movq %rax, 104(%rsp)
leaq 4(%rsp), %rax
movq %rax, 112(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 96(%rsp), %r9
movl $_Z13ReductionMax2PiS_i, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB2_6:
movl $4, %edi
callq malloc
movq %rax, %r15
movq 80(%rsp), %rsi
movl $4, %edx
movq %rax, %rdi
movl $2, %ecx
callq hipMemcpy
xorl %r12d, %r12d
.p2align 4, 0x90
.LBB2_7: # =>This Inner Loop Header: Depth=1
movl (%rbx,%r12,4), %esi
movl $_ZSt4cout, %edi
callq _ZNSolsEi
movl $.L.str, %esi
movl $1, %edx
movq %rax, %rdi
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
incq %r12
cmpq $512, %r12 # imm = 0x200
jne .LBB2_7
# %bb.8:
movl $_ZSt4cout, %edi
movl $.L.str.1, %esi
movl $10, %edx
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
xorl %ebx, %ebx
.p2align 4, 0x90
.LBB2_9: # =>This Inner Loop Header: Depth=1
movl (%r14,%rbx,4), %esi
movl $_ZSt4cout, %edi
callq _ZNSolsEi
movl $.L.str, %esi
movl $1, %edx
movq %rax, %rdi
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
incq %rbx
cmpq $8, %rbx
jne .LBB2_9
# %bb.10:
movl $_ZSt4cout, %edi
movl $.L.str.2, %esi
movl $14, %edx
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
movl (%r15), %esi
movl $_ZSt4cout, %edi
callq _ZNSolsEi
xorl %eax, %eax
addq $120, %rsp
.cfi_def_cfa_offset 40
popq %rbx
.cfi_def_cfa_offset 32
popq %r12
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
retq
.Lfunc_end2:
.size main, .Lfunc_end2-main
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB3_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB3_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z13ReductionMax2PiS_i, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end3:
.size __hip_module_ctor, .Lfunc_end3-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB4_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB4_2:
retq
.Lfunc_end4:
.size __hip_module_dtor, .Lfunc_end4-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z13ReductionMax2PiS_i,@object # @_Z13ReductionMax2PiS_i
.section .rodata,"a",@progbits
.globl _Z13ReductionMax2PiS_i
.p2align 3, 0x0
_Z13ReductionMax2PiS_i:
.quad _Z28__device_stub__ReductionMax2PiS_i
.size _Z13ReductionMax2PiS_i, 8
.type .L.str,@object # @.str
.section .rodata.str1.1,"aMS",@progbits,1
.L.str:
.asciz ","
.size .L.str, 2
.type .L.str.1,@object # @.str.1
.L.str.1:
.asciz "\nblock max"
.size .L.str.1, 11
.type .L.str.2,@object # @.str.2
.L.str.2:
.asciz "\nparallel max="
.size .L.str.2, 15
.type .L__unnamed_1,@object # @0
.L__unnamed_1:
.asciz "_Z13ReductionMax2PiS_i"
.size .L__unnamed_1, 23
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z28__device_stub__ReductionMax2PiS_i
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z13ReductionMax2PiS_i
.addrsig_sym _ZSt4cout
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly. | code for sm_80
Function : _Z13ReductionMax2PiS_i
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ IMAD.MOV.U32 R1, RZ, RZ, c[0x0][0x28] ; /* 0x00000a00ff017624 */
/* 0x000fe400078e00ff */
/*0010*/ S2R R6, SR_CTAID.X ; /* 0x0000000000067919 */
/* 0x000e220000002500 */
/*0020*/ HFMA2.MMA R0, -RZ, RZ, -0.0 , 5.9604644775390625e-08 ; /* 0x80000001ff007435 */
/* 0x000fe200000001ff */
/*0030*/ ULDC.64 UR6, c[0x0][0x118] ; /* 0x0000460000067ab9 */
/* 0x000fe40000000a00 */
/*0040*/ S2R R5, SR_TID.X ; /* 0x0000000000057919 */
/* 0x000e240000002100 */
/*0050*/ IMAD R2, R6, c[0x0][0x0], R5 ; /* 0x0000000006027a24 */
/* 0x001fca00078e0205 */
/*0060*/ ISETP.GE.U32.AND P0, PT, R2, c[0x0][0x170], PT ; /* 0x00005c0002007a0c */
/* 0x000fda0003f06070 */
/*0070*/ @!P0 IMAD.MOV.U32 R3, RZ, RZ, 0x4 ; /* 0x00000004ff038424 */
/* 0x000fc800078e00ff */
/*0080*/ @!P0 IMAD.WIDE.U32 R2, R2, R3, c[0x0][0x160] ; /* 0x0000580002028625 */
/* 0x000fca00078e0003 */
/*0090*/ @!P0 LDG.E R0, [R2.64] ; /* 0x0000000602008981 */
/* 0x000ea2000c1e1900 */
/*00a0*/ ULDC UR4, c[0x0][0x0] ; /* 0x0000000000047ab9 */
/* 0x000fe40000000800 */
/*00b0*/ USHF.R.U32.HI UR4, URZ, 0x1, UR4 ; /* 0x000000013f047899 */
/* 0x000fcc0008011604 */
/*00c0*/ ISETP.NE.AND P0, PT, RZ, UR4, PT ; /* 0x00000004ff007c0c */
/* 0x000fe2000bf05270 */
/*00d0*/ STS [R5.X4], R0 ; /* 0x0000000005007388 */
/* 0x0041e80000004800 */
/*00e0*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */
/* 0x000ff00000010000 */
/*00f0*/ @!P0 BRA 0x1f0 ; /* 0x000000f000008947 */
/* 0x000fea0003800000 */
/*0100*/ IMAD.SHL.U32 R0, R5, 0x4, RZ ; /* 0x0000000405007824 */
/* 0x001fe200078e00ff */
/*0110*/ MOV R3, UR4 ; /* 0x0000000400037c02 */
/* 0x000fc60008000f00 */
/*0120*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */
/* 0x000fe20000010000 */
/*0130*/ ISETP.GE.U32.AND P0, PT, R5, R3, PT ; /* 0x000000030500720c */
/* 0x000fca0003f06070 */
/*0140*/ BSSY B0, 0x1c0 ; /* 0x0000007000007945 */
/* 0x000ff00003800000 */
/*0150*/ @P0 BRA 0x1b0 ; /* 0x0000005000000947 */
/* 0x001fea0003800000 */
/*0160*/ IMAD R2, R3, 0x4, R0 ; /* 0x0000000403027824 */
/* 0x000fe200078e0200 */
/*0170*/ LDS R4, [R5.X4] ; /* 0x0000000005047984 */
/* 0x000fea0000004800 */
/*0180*/ LDS R2, [R2] ; /* 0x0000000002027984 */
/* 0x000e240000000800 */
/*0190*/ ISETP.GT.AND P0, PT, R2, R4, PT ; /* 0x000000040200720c */
/* 0x001fda0003f04270 */
/*01a0*/ @P0 STS [R5.X4], R2 ; /* 0x0000000205000388 */
/* 0x0001e40000004800 */
/*01b0*/ BSYNC B0 ; /* 0x0000000000007941 */
/* 0x000fea0003800000 */
/*01c0*/ SHF.R.U32.HI R3, RZ, 0x1, R3 ; /* 0x00000001ff037819 */
/* 0x000fc80000011603 */
/*01d0*/ ISETP.NE.AND P0, PT, R3, RZ, PT ; /* 0x000000ff0300720c */
/* 0x000fda0003f05270 */
/*01e0*/ @P0 BRA 0x120 ; /* 0xffffff3000000947 */
/* 0x000fea000383ffff */
/*01f0*/ ISETP.NE.AND P0, PT, R5, RZ, PT ; /* 0x000000ff0500720c */
/* 0x001fda0003f05270 */
/*0200*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*0210*/ LDS R5, [RZ] ; /* 0x00000000ff057984 */
/* 0x000e220000000800 */
/*0220*/ HFMA2.MMA R3, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff037435 */
/* 0x000fd400000001ff */
/*0230*/ IMAD.WIDE.U32 R2, R6, R3, c[0x0][0x168] ; /* 0x00005a0006027625 */
/* 0x000fca00078e0003 */
/*0240*/ STG.E [R2.64], R5 ; /* 0x0000000502007986 */
/* 0x001fe2000c101906 */
/*0250*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0260*/ BRA 0x260; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0270*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0280*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0290*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z13ReductionMax2PiS_i
.globl _Z13ReductionMax2PiS_i
.p2align 8
.type _Z13ReductionMax2PiS_i,@function
_Z13ReductionMax2PiS_i:
s_clause 0x1
s_load_b32 s3, s[0:1], 0x24
s_load_b32 s4, s[0:1], 0x10
s_mov_b32 s2, s15
s_waitcnt lgkmcnt(0)
s_and_b32 s3, s3, 0xffff
s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
v_mad_u64_u32 v[1:2], null, s2, s3, v[0:1]
v_mov_b32_e32 v2, 0x80000001
v_cmp_gt_u32_e32 vcc_lo, s4, v1
s_and_saveexec_b32 s4, vcc_lo
s_cbranch_execz .LBB0_2
s_load_b64 s[6:7], s[0:1], 0x0
v_mov_b32_e32 v2, 0
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[1:2], 2, v[1:2]
s_waitcnt lgkmcnt(0)
v_add_co_u32 v1, vcc_lo, s6, v1
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v2, vcc_lo, s7, v2, vcc_lo
global_load_b32 v2, v[1:2], off
.LBB0_2:
s_or_b32 exec_lo, exec_lo, s4
v_lshlrev_b32_e32 v1, 2, v0
s_cmp_lt_u32 s3, 2
s_waitcnt vmcnt(0)
ds_store_b32 v1, v2
s_waitcnt lgkmcnt(0)
s_barrier
buffer_gl0_inv
s_cbranch_scc0 .LBB0_7
.LBB0_3:
s_mov_b32 s3, 0
s_mov_b32 s4, exec_lo
v_cmpx_eq_u32_e32 0, v0
s_cbranch_execz .LBB0_5
v_mov_b32_e32 v0, 0
s_load_b64 s[0:1], s[0:1], 0x8
s_lshl_b64 s[2:3], s[2:3], 2
ds_load_b32 v1, v0
s_waitcnt lgkmcnt(0)
s_add_u32 s0, s0, s2
s_addc_u32 s1, s1, s3
global_store_b32 v0, v1, s[0:1]
.LBB0_5:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.p2align 6
.LBB0_6:
s_or_b32 exec_lo, exec_lo, s5
s_cmp_lt_u32 s4, 4
s_cbranch_scc1 .LBB0_3
.LBB0_7:
s_mov_b32 s4, s3
s_lshr_b32 s3, s3, 1
s_mov_b32 s5, exec_lo
s_waitcnt lgkmcnt(0)
s_barrier
buffer_gl0_inv
v_cmpx_gt_u32_e64 s3, v0
s_cbranch_execz .LBB0_6
v_add_lshl_u32 v2, s3, v0, 2
ds_load_b32 v2, v2
ds_load_b32 v3, v1
s_waitcnt lgkmcnt(0)
v_cmp_gt_i32_e32 vcc_lo, v2, v3
s_and_b32 exec_lo, exec_lo, vcc_lo
s_cbranch_execz .LBB0_6
ds_store_b32 v1, v2
s_branch .LBB0_6
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z13ReductionMax2PiS_i
.amdhsa_group_segment_fixed_size 256
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 280
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 4
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z13ReductionMax2PiS_i, .Lfunc_end0-_Z13ReductionMax2PiS_i
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .offset: 16
.size: 4
.value_kind: by_value
- .offset: 24
.size: 4
.value_kind: hidden_block_count_x
- .offset: 28
.size: 4
.value_kind: hidden_block_count_y
- .offset: 32
.size: 4
.value_kind: hidden_block_count_z
- .offset: 36
.size: 2
.value_kind: hidden_group_size_x
- .offset: 38
.size: 2
.value_kind: hidden_group_size_y
- .offset: 40
.size: 2
.value_kind: hidden_group_size_z
- .offset: 42
.size: 2
.value_kind: hidden_remainder_x
- .offset: 44
.size: 2
.value_kind: hidden_remainder_y
- .offset: 46
.size: 2
.value_kind: hidden_remainder_z
- .offset: 64
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 88
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 256
.kernarg_segment_align: 8
.kernarg_segment_size: 280
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z13ReductionMax2PiS_i
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z13ReductionMax2PiS_i.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 4
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_000d3131_00000000-6_asignmaxmin.cudafe1.cpp"
.text
#APP
.globl _ZSt21ios_base_library_initv
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB3673:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3673:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z11random_intsPii
.type _Z11random_intsPii, @function
_Z11random_intsPii:
.LFB3669:
.cfi_startproc
endbr64
testl %esi, %esi
jle .L8
pushq %r12
.cfi_def_cfa_offset 16
.cfi_offset 12, -16
pushq %rbp
.cfi_def_cfa_offset 24
.cfi_offset 6, -24
pushq %rbx
.cfi_def_cfa_offset 32
.cfi_offset 3, -32
movl %esi, %ebp
movq %rdi, %rbx
movslq %esi, %rax
leaq (%rdi,%rax,4), %r12
.L5:
call rand@PLT
cltd
idivl %ebp
movl %edx, (%rbx)
addq $4, %rbx
cmpq %r12, %rbx
jne .L5
popq %rbx
.cfi_def_cfa_offset 24
popq %rbp
.cfi_def_cfa_offset 16
popq %r12
.cfi_def_cfa_offset 8
ret
.L8:
.cfi_restore 3
.cfi_restore 6
.cfi_restore 12
ret
.cfi_endproc
.LFE3669:
.size _Z11random_intsPii, .-_Z11random_intsPii
.globl _Z36__device_stub__Z13ReductionMax2PiS_iPiS_i
.type _Z36__device_stub__Z13ReductionMax2PiS_iPiS_i, @function
_Z36__device_stub__Z13ReductionMax2PiS_iPiS_i:
.LFB3695:
.cfi_startproc
endbr64
subq $136, %rsp
.cfi_def_cfa_offset 144
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movl %edx, 12(%rsp)
movq %fs:40, %rax
movq %rax, 120(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 12(%rsp), %rax
movq %rax, 112(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L15
.L11:
movq 120(%rsp), %rax
subq %fs:40, %rax
jne .L16
addq $136, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L15:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 152
pushq 40(%rsp)
.cfi_def_cfa_offset 160
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z13ReductionMax2PiS_i(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 144
jmp .L11
.L16:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE3695:
.size _Z36__device_stub__Z13ReductionMax2PiS_iPiS_i, .-_Z36__device_stub__Z13ReductionMax2PiS_iPiS_i
.globl _Z13ReductionMax2PiS_i
.type _Z13ReductionMax2PiS_i, @function
_Z13ReductionMax2PiS_i:
.LFB3696:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z36__device_stub__Z13ReductionMax2PiS_iPiS_i
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3696:
.size _Z13ReductionMax2PiS_i, .-_Z13ReductionMax2PiS_i
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string ","
.LC1:
.string "\nblock max"
.LC2:
.string "\nparallel max="
.text
.globl main
.type main, @function
main:
.LFB3670:
.cfi_startproc
endbr64
pushq %r15
.cfi_def_cfa_offset 16
.cfi_offset 15, -16
pushq %r14
.cfi_def_cfa_offset 24
.cfi_offset 14, -24
pushq %r13
.cfi_def_cfa_offset 32
.cfi_offset 13, -32
pushq %r12
.cfi_def_cfa_offset 40
.cfi_offset 12, -40
pushq %rbp
.cfi_def_cfa_offset 48
.cfi_offset 6, -48
pushq %rbx
.cfi_def_cfa_offset 56
.cfi_offset 3, -56
subq $72, %rsp
.cfi_def_cfa_offset 128
movq %fs:40, %rax
movq %rax, 56(%rsp)
xorl %eax, %eax
movl $2048, %edi
call malloc@PLT
movq %rax, %rbp
movl $512, %esi
movq %rax, %rdi
call _Z11random_intsPii
leaq 8(%rsp), %rdi
movl $2048, %esi
call cudaMalloc@PLT
movl $1, %ecx
movl $2048, %edx
movq %rbp, %rsi
movq 8(%rsp), %rdi
call cudaMemcpy@PLT
leaq 16(%rsp), %rdi
movl $32, %esi
call cudaMalloc@PLT
movl $32, %edi
call malloc@PLT
movq %rax, %r14
leaq 24(%rsp), %rdi
movl $4, %esi
call cudaMalloc@PLT
movl $64, 44(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $8, 32(%rsp)
movl $1, 36(%rsp)
movl $1, 40(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 44(%rsp), %rdx
movl $1, %ecx
movq 32(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L28
.L20:
movl $2, %ecx
movl $32, %edx
movq 16(%rsp), %rsi
movq %r14, %rdi
call cudaMemcpy@PLT
movl $8, 44(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 32(%rsp)
movl $1, 36(%rsp)
movl $1, 40(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 44(%rsp), %rdx
movl $1, %ecx
movq 32(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L29
.L21:
movl $4, %edi
call malloc@PLT
movq %rax, %r15
movl $2, %ecx
movl $4, %edx
movq 24(%rsp), %rsi
movq %rax, %rdi
call cudaMemcpy@PLT
movq %rbp, %rbx
addq $2048, %rbp
leaq _ZSt4cout(%rip), %r13
leaq .LC0(%rip), %r12
.L22:
movl (%rbx), %esi
movq %r13, %rdi
call _ZNSolsEi@PLT
movq %rax, %rdi
movl $1, %edx
movq %r12, %rsi
call _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l@PLT
addq $4, %rbx
cmpq %rbx, %rbp
jne .L22
leaq .LC1(%rip), %rsi
leaq _ZSt4cout(%rip), %rdi
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT
movq %r14, %rbx
addq $32, %r14
leaq _ZSt4cout(%rip), %r12
leaq .LC0(%rip), %rbp
.L23:
movl (%rbx), %esi
movq %r12, %rdi
call _ZNSolsEi@PLT
movq %rax, %rdi
movl $1, %edx
movq %rbp, %rsi
call _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l@PLT
addq $4, %rbx
cmpq %rbx, %r14
jne .L23
leaq .LC2(%rip), %rsi
leaq _ZSt4cout(%rip), %rdi
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT
movq %rax, %rdi
movl (%r15), %esi
call _ZNSolsEi@PLT
movq 56(%rsp), %rax
subq %fs:40, %rax
jne .L30
movl $0, %eax
addq $72, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %rbp
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r13
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
ret
.L28:
.cfi_restore_state
movl $512, %edx
movq 16(%rsp), %rsi
movq 8(%rsp), %rdi
call _Z36__device_stub__Z13ReductionMax2PiS_iPiS_i
jmp .L20
.L29:
movl $8, %edx
movq 24(%rsp), %rsi
movq 16(%rsp), %rdi
call _Z36__device_stub__Z13ReductionMax2PiS_iPiS_i
jmp .L21
.L30:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE3670:
.size main, .-main
.section .rodata.str1.1
.LC3:
.string "_Z13ReductionMax2PiS_i"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB3698:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC3(%rip), %rdx
movq %rdx, %rcx
leaq _Z13ReductionMax2PiS_i(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3698:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "asignmaxmin.hip"
# Start of file scope inline assembly
.globl _ZSt21ios_base_library_initv
# End of file scope inline assembly
.globl _Z11random_intsPii # -- Begin function _Z11random_intsPii
.p2align 4, 0x90
.type _Z11random_intsPii,@function
_Z11random_intsPii: # @_Z11random_intsPii
.cfi_startproc
# %bb.0:
testl %esi, %esi
jle .LBB0_4
# %bb.1: # %.lr.ph.preheader
pushq %r15
.cfi_def_cfa_offset 16
pushq %r14
.cfi_def_cfa_offset 24
pushq %r12
.cfi_def_cfa_offset 32
pushq %rbx
.cfi_def_cfa_offset 40
pushq %rax
.cfi_def_cfa_offset 48
.cfi_offset %rbx, -40
.cfi_offset %r12, -32
.cfi_offset %r14, -24
.cfi_offset %r15, -16
movl %esi, %ebx
movq %rdi, %r14
movl %esi, %r15d
xorl %r12d, %r12d
.p2align 4, 0x90
.LBB0_2: # %.lr.ph
# =>This Inner Loop Header: Depth=1
callq rand
cltd
idivl %ebx
movl %edx, (%r14,%r12,4)
incq %r12
cmpq %r12, %r15
jne .LBB0_2
# %bb.3:
addq $8, %rsp
.cfi_def_cfa_offset 40
popq %rbx
.cfi_def_cfa_offset 32
popq %r12
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
.cfi_restore %rbx
.cfi_restore %r12
.cfi_restore %r14
.cfi_restore %r15
.LBB0_4: # %._crit_edge
retq
.Lfunc_end0:
.size _Z11random_intsPii, .Lfunc_end0-_Z11random_intsPii
.cfi_endproc
# -- End function
.globl _Z28__device_stub__ReductionMax2PiS_i # -- Begin function _Z28__device_stub__ReductionMax2PiS_i
.p2align 4, 0x90
.type _Z28__device_stub__ReductionMax2PiS_i,@function
_Z28__device_stub__ReductionMax2PiS_i: # @_Z28__device_stub__ReductionMax2PiS_i
.cfi_startproc
# %bb.0:
subq $104, %rsp
.cfi_def_cfa_offset 112
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movl %edx, 12(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 12(%rsp), %rax
movq %rax, 96(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z13ReductionMax2PiS_i, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $120, %rsp
.cfi_adjust_cfa_offset -120
retq
.Lfunc_end1:
.size _Z28__device_stub__ReductionMax2PiS_i, .Lfunc_end1-_Z28__device_stub__ReductionMax2PiS_i
.cfi_endproc
# -- End function
.globl main # -- Begin function main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
pushq %r15
.cfi_def_cfa_offset 16
pushq %r14
.cfi_def_cfa_offset 24
pushq %r12
.cfi_def_cfa_offset 32
pushq %rbx
.cfi_def_cfa_offset 40
subq $120, %rsp
.cfi_def_cfa_offset 160
.cfi_offset %rbx, -40
.cfi_offset %r12, -32
.cfi_offset %r14, -24
.cfi_offset %r15, -16
movl $2048, %edi # imm = 0x800
callq malloc
movq %rax, %rbx
xorl %r14d, %r14d
.p2align 4, 0x90
.LBB2_1: # %.lr.ph.i
# =>This Inner Loop Header: Depth=1
callq rand
# kill: def $eax killed $eax def $rax
leal 511(%rax), %ecx
testl %eax, %eax
cmovnsl %eax, %ecx
andl $-512, %ecx # imm = 0xFE00
subl %ecx, %eax
movl %eax, (%rbx,%r14,4)
incq %r14
cmpq $512, %r14 # imm = 0x200
jne .LBB2_1
# %bb.2: # %_Z11random_intsPii.exit
movabsq $4294967304, %r15 # imm = 0x100000008
leaq 88(%rsp), %rdi
movl $2048, %esi # imm = 0x800
callq hipMalloc
movq 88(%rsp), %rdi
movl $2048, %edx # imm = 0x800
movq %rbx, %rsi
movl $1, %ecx
callq hipMemcpy
leaq 8(%rsp), %rdi
movl $32, %esi
callq hipMalloc
movl $32, %edi
callq malloc
movq %rax, %r14
leaq 80(%rsp), %rdi
movl $4, %esi
callq hipMalloc
leaq 56(%r15), %rdx
movq %r15, %rdi
movl $1, %esi
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB2_4
# %bb.3:
movq 88(%rsp), %rax
movq 8(%rsp), %rcx
movq %rax, 72(%rsp)
movq %rcx, 64(%rsp)
movl $512, 4(%rsp) # imm = 0x200
leaq 72(%rsp), %rax
movq %rax, 96(%rsp)
leaq 64(%rsp), %rax
movq %rax, 104(%rsp)
leaq 4(%rsp), %rax
movq %rax, 112(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 96(%rsp), %r9
movl $_Z13ReductionMax2PiS_i, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB2_4:
movq 8(%rsp), %rsi
movl $32, %edx
movq %r14, %rdi
movl $2, %ecx
callq hipMemcpy
leaq -7(%r15), %rdi
movl $1, %esi
movq %r15, %rdx
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB2_6
# %bb.5:
movq 8(%rsp), %rax
movq 80(%rsp), %rcx
movq %rax, 72(%rsp)
movq %rcx, 64(%rsp)
movl $8, 4(%rsp)
leaq 72(%rsp), %rax
movq %rax, 96(%rsp)
leaq 64(%rsp), %rax
movq %rax, 104(%rsp)
leaq 4(%rsp), %rax
movq %rax, 112(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 96(%rsp), %r9
movl $_Z13ReductionMax2PiS_i, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB2_6:
movl $4, %edi
callq malloc
movq %rax, %r15
movq 80(%rsp), %rsi
movl $4, %edx
movq %rax, %rdi
movl $2, %ecx
callq hipMemcpy
xorl %r12d, %r12d
.p2align 4, 0x90
.LBB2_7: # =>This Inner Loop Header: Depth=1
movl (%rbx,%r12,4), %esi
movl $_ZSt4cout, %edi
callq _ZNSolsEi
movl $.L.str, %esi
movl $1, %edx
movq %rax, %rdi
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
incq %r12
cmpq $512, %r12 # imm = 0x200
jne .LBB2_7
# %bb.8:
movl $_ZSt4cout, %edi
movl $.L.str.1, %esi
movl $10, %edx
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
xorl %ebx, %ebx
.p2align 4, 0x90
.LBB2_9: # =>This Inner Loop Header: Depth=1
movl (%r14,%rbx,4), %esi
movl $_ZSt4cout, %edi
callq _ZNSolsEi
movl $.L.str, %esi
movl $1, %edx
movq %rax, %rdi
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
incq %rbx
cmpq $8, %rbx
jne .LBB2_9
# %bb.10:
movl $_ZSt4cout, %edi
movl $.L.str.2, %esi
movl $14, %edx
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
movl (%r15), %esi
movl $_ZSt4cout, %edi
callq _ZNSolsEi
xorl %eax, %eax
addq $120, %rsp
.cfi_def_cfa_offset 40
popq %rbx
.cfi_def_cfa_offset 32
popq %r12
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
retq
.Lfunc_end2:
.size main, .Lfunc_end2-main
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB3_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB3_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z13ReductionMax2PiS_i, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end3:
.size __hip_module_ctor, .Lfunc_end3-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB4_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB4_2:
retq
.Lfunc_end4:
.size __hip_module_dtor, .Lfunc_end4-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z13ReductionMax2PiS_i,@object # @_Z13ReductionMax2PiS_i
.section .rodata,"a",@progbits
.globl _Z13ReductionMax2PiS_i
.p2align 3, 0x0
_Z13ReductionMax2PiS_i:
.quad _Z28__device_stub__ReductionMax2PiS_i
.size _Z13ReductionMax2PiS_i, 8
.type .L.str,@object # @.str
.section .rodata.str1.1,"aMS",@progbits,1
.L.str:
.asciz ","
.size .L.str, 2
.type .L.str.1,@object # @.str.1
.L.str.1:
.asciz "\nblock max"
.size .L.str.1, 11
.type .L.str.2,@object # @.str.2
.L.str.2:
.asciz "\nparallel max="
.size .L.str.2, 15
.type .L__unnamed_1,@object # @0
.L__unnamed_1:
.asciz "_Z13ReductionMax2PiS_i"
.size .L__unnamed_1, 23
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z28__device_stub__ReductionMax2PiS_i
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z13ReductionMax2PiS_i
.addrsig_sym _ZSt4cout
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly. | #include <stdio.h>
#include <cuda.h>
#define N 6
void initialize(float * vector, int num) {
for(int i=0; i<num; i++) {
vector[i] = i;
}
}
void plotVector(float * vector, int num) {
for(int i=0; i<num; i++) {
printf("%f ", vector[i]);
}
printf("\n");
}
__global__ void dot_product(float *a, float *b, float *c)
{
c[threadIdx.x] = a[threadIdx.x] * b[threadIdx.x];
}
int main(void)
{
float *h_a, *h_b, *h_c;
float *d_a, *d_b, *d_c;
int size = N * sizeof(float);
cudaMalloc((void **)&d_a, size);
cudaMalloc((void **)&d_b, size);
cudaMalloc((void **)&d_c, size);
h_a = (float *)malloc(size); initialize(h_a, N);
h_b = (float *)malloc(size); initialize(h_b, N);
h_c = (float *)malloc(size);
cudaMemcpy(d_a, h_a, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_b, h_b, size, cudaMemcpyHostToDevice);
dot_product<<<1,N>>>(d_a, d_b, d_c);
cudaMemcpy(h_c, d_c, size, cudaMemcpyDeviceToHost);
float sum = 0;
for(int i=0; i<N; i++) {
sum += h_c[i];
}
printf("Vector A: \n"); plotVector(h_a, N);
printf("Vector B: \n"); plotVector(h_b, N);
printf("Dot Product, A*B = %f\n", sum);
return 0;
} | code for sm_80
Function : _Z11dot_productPfS_S_
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ S2R R6, SR_TID.X ; /* 0x0000000000067919 */
/* 0x000e220000002100 */
/*0020*/ HFMA2.MMA R7, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff077435 */
/* 0x000fe200000001ff */
/*0030*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fd20000000a00 */
/*0040*/ IMAD.WIDE.U32 R2, R6, R7, c[0x0][0x160] ; /* 0x0000580006027625 */
/* 0x001fc800078e0007 */
/*0050*/ IMAD.WIDE.U32 R4, R6.reuse, R7.reuse, c[0x0][0x168] ; /* 0x00005a0006047625 */
/* 0x0c0fe400078e0007 */
/*0060*/ LDG.E R2, [R2.64] ; /* 0x0000000402027981 */
/* 0x000ea8000c1e1900 */
/*0070*/ LDG.E R5, [R4.64] ; /* 0x0000000404057981 */
/* 0x000ea2000c1e1900 */
/*0080*/ IMAD.WIDE.U32 R6, R6, R7, c[0x0][0x170] ; /* 0x00005c0006067625 */
/* 0x000fc800078e0007 */
/*0090*/ FMUL R9, R2, R5 ; /* 0x0000000502097220 */
/* 0x004fca0000400000 */
/*00a0*/ STG.E [R6.64], R9 ; /* 0x0000000906007986 */
/* 0x000fe2000c101904 */
/*00b0*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*00c0*/ BRA 0xc0; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*00d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0100*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0110*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0120*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0130*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0140*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0150*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0160*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0170*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | #include <stdio.h>
#include <cuda.h>
#define N 6
void initialize(float * vector, int num) {
for(int i=0; i<num; i++) {
vector[i] = i;
}
}
void plotVector(float * vector, int num) {
for(int i=0; i<num; i++) {
printf("%f ", vector[i]);
}
printf("\n");
}
__global__ void dot_product(float *a, float *b, float *c)
{
c[threadIdx.x] = a[threadIdx.x] * b[threadIdx.x];
}
int main(void)
{
float *h_a, *h_b, *h_c;
float *d_a, *d_b, *d_c;
int size = N * sizeof(float);
cudaMalloc((void **)&d_a, size);
cudaMalloc((void **)&d_b, size);
cudaMalloc((void **)&d_c, size);
h_a = (float *)malloc(size); initialize(h_a, N);
h_b = (float *)malloc(size); initialize(h_b, N);
h_c = (float *)malloc(size);
cudaMemcpy(d_a, h_a, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_b, h_b, size, cudaMemcpyHostToDevice);
dot_product<<<1,N>>>(d_a, d_b, d_c);
cudaMemcpy(h_c, d_c, size, cudaMemcpyDeviceToHost);
float sum = 0;
for(int i=0; i<N; i++) {
sum += h_c[i];
}
printf("Vector A: \n"); plotVector(h_a, N);
printf("Vector B: \n"); plotVector(h_b, N);
printf("Dot Product, A*B = %f\n", sum);
return 0;
} | .file "tmpxft_001682ec_00000000-6_dotProduct.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2062:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2062:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z10initializePfi
.type _Z10initializePfi, @function
_Z10initializePfi:
.LFB2057:
.cfi_startproc
endbr64
testl %esi, %esi
jle .L3
movslq %esi, %rsi
movl $0, %eax
.L5:
pxor %xmm0, %xmm0
cvtsi2ssl %eax, %xmm0
movss %xmm0, (%rdi,%rax,4)
addq $1, %rax
cmpq %rsi, %rax
jne .L5
.L3:
ret
.cfi_endproc
.LFE2057:
.size _Z10initializePfi, .-_Z10initializePfi
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "%f "
.LC1:
.string "\n"
.text
.globl _Z10plotVectorPfi
.type _Z10plotVectorPfi, @function
_Z10plotVectorPfi:
.LFB2058:
.cfi_startproc
endbr64
pushq %r12
.cfi_def_cfa_offset 16
.cfi_offset 12, -16
pushq %rbp
.cfi_def_cfa_offset 24
.cfi_offset 6, -24
pushq %rbx
.cfi_def_cfa_offset 32
.cfi_offset 3, -32
testl %esi, %esi
jle .L8
movq %rdi, %rbx
movslq %esi, %rsi
leaq (%rdi,%rsi,4), %r12
leaq .LC0(%rip), %rbp
.L9:
pxor %xmm0, %xmm0
cvtss2sd (%rbx), %xmm0
movq %rbp, %rsi
movl $2, %edi
movl $1, %eax
call __printf_chk@PLT
addq $4, %rbx
cmpq %r12, %rbx
jne .L9
.L8:
leaq .LC1(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
popq %rbx
.cfi_def_cfa_offset 24
popq %rbp
.cfi_def_cfa_offset 16
popq %r12
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2058:
.size _Z10plotVectorPfi, .-_Z10plotVectorPfi
.globl _Z35__device_stub__Z11dot_productPfS_S_PfS_S_
.type _Z35__device_stub__Z11dot_productPfS_S_PfS_S_, @function
_Z35__device_stub__Z11dot_productPfS_S_PfS_S_:
.LFB2084:
.cfi_startproc
endbr64
subq $136, %rsp
.cfi_def_cfa_offset 144
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movq %rdx, 8(%rsp)
movq %fs:40, %rax
movq %rax, 120(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 8(%rsp), %rax
movq %rax, 112(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L16
.L12:
movq 120(%rsp), %rax
subq %fs:40, %rax
jne .L17
addq $136, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L16:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 152
pushq 40(%rsp)
.cfi_def_cfa_offset 160
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z11dot_productPfS_S_(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 144
jmp .L12
.L17:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2084:
.size _Z35__device_stub__Z11dot_productPfS_S_PfS_S_, .-_Z35__device_stub__Z11dot_productPfS_S_PfS_S_
.globl _Z11dot_productPfS_S_
.type _Z11dot_productPfS_S_, @function
_Z11dot_productPfS_S_:
.LFB2085:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z35__device_stub__Z11dot_productPfS_S_PfS_S_
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2085:
.size _Z11dot_productPfS_S_, .-_Z11dot_productPfS_S_
.section .rodata.str1.1
.LC3:
.string "Vector A: \n"
.LC4:
.string "Vector B: \n"
.LC5:
.string "Dot Product, A*B = %f\n"
.text
.globl main
.type main, @function
main:
.LFB2059:
.cfi_startproc
endbr64
pushq %r12
.cfi_def_cfa_offset 16
.cfi_offset 12, -16
pushq %rbp
.cfi_def_cfa_offset 24
.cfi_offset 6, -24
pushq %rbx
.cfi_def_cfa_offset 32
.cfi_offset 3, -32
subq $80, %rsp
.cfi_def_cfa_offset 112
movq %fs:40, %rax
movq %rax, 72(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rdi
movl $24, %esi
call cudaMalloc@PLT
leaq 32(%rsp), %rdi
movl $24, %esi
call cudaMalloc@PLT
leaq 40(%rsp), %rdi
movl $24, %esi
call cudaMalloc@PLT
movl $24, %edi
call malloc@PLT
movq %rax, %r12
movl $6, %esi
movq %rax, %rdi
call _Z10initializePfi
movl $24, %edi
call malloc@PLT
movq %rax, %rbp
movl $6, %esi
movq %rax, %rdi
call _Z10initializePfi
movl $24, %edi
call malloc@PLT
movq %rax, %rbx
movl $1, %ecx
movl $24, %edx
movq %r12, %rsi
movq 24(%rsp), %rdi
call cudaMemcpy@PLT
movl $1, %ecx
movl $24, %edx
movq %rbp, %rsi
movq 32(%rsp), %rdi
call cudaMemcpy@PLT
movl $6, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 60(%rsp), %rdx
movl $1, %ecx
movq 48(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L26
.L21:
movl $2, %ecx
movl $24, %edx
movq 40(%rsp), %rsi
movq %rbx, %rdi
call cudaMemcpy@PLT
movq %rbx, %rdx
leaq 24(%rbx), %rax
movl $0x00000000, 12(%rsp)
.L22:
movss 12(%rsp), %xmm1
addss (%rdx), %xmm1
movss %xmm1, 12(%rsp)
addq $4, %rdx
cmpq %rax, %rdx
jne .L22
leaq .LC3(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $6, %esi
movq %r12, %rdi
call _Z10plotVectorPfi
leaq .LC4(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $6, %esi
movq %rbp, %rdi
call _Z10plotVectorPfi
pxor %xmm0, %xmm0
cvtss2sd 12(%rsp), %xmm0
leaq .LC5(%rip), %rsi
movl $2, %edi
movl $1, %eax
call __printf_chk@PLT
movq 72(%rsp), %rax
subq %fs:40, %rax
jne .L27
movl $0, %eax
addq $80, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 32
popq %rbx
.cfi_def_cfa_offset 24
popq %rbp
.cfi_def_cfa_offset 16
popq %r12
.cfi_def_cfa_offset 8
ret
.L26:
.cfi_restore_state
movq 40(%rsp), %rdx
movq 32(%rsp), %rsi
movq 24(%rsp), %rdi
call _Z35__device_stub__Z11dot_productPfS_S_PfS_S_
jmp .L21
.L27:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2059:
.size main, .-main
.section .rodata.str1.1
.LC6:
.string "_Z11dot_productPfS_S_"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2087:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC6(%rip), %rdx
movq %rdx, %rcx
leaq _Z11dot_productPfS_S_(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2087:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | #include <stdio.h>
#include <cuda.h>
#define N 6
void initialize(float * vector, int num) {
for(int i=0; i<num; i++) {
vector[i] = i;
}
}
void plotVector(float * vector, int num) {
for(int i=0; i<num; i++) {
printf("%f ", vector[i]);
}
printf("\n");
}
__global__ void dot_product(float *a, float *b, float *c)
{
c[threadIdx.x] = a[threadIdx.x] * b[threadIdx.x];
}
int main(void)
{
float *h_a, *h_b, *h_c;
float *d_a, *d_b, *d_c;
int size = N * sizeof(float);
cudaMalloc((void **)&d_a, size);
cudaMalloc((void **)&d_b, size);
cudaMalloc((void **)&d_c, size);
h_a = (float *)malloc(size); initialize(h_a, N);
h_b = (float *)malloc(size); initialize(h_b, N);
h_c = (float *)malloc(size);
cudaMemcpy(d_a, h_a, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_b, h_b, size, cudaMemcpyHostToDevice);
dot_product<<<1,N>>>(d_a, d_b, d_c);
cudaMemcpy(h_c, d_c, size, cudaMemcpyDeviceToHost);
float sum = 0;
for(int i=0; i<N; i++) {
sum += h_c[i];
}
printf("Vector A: \n"); plotVector(h_a, N);
printf("Vector B: \n"); plotVector(h_b, N);
printf("Dot Product, A*B = %f\n", sum);
return 0;
} | #include <stdio.h>
#include <hip/hip_runtime.h>
#define N 6
void initialize(float * vector, int num) {
for(int i=0; i<num; i++) {
vector[i] = i;
}
}
void plotVector(float * vector, int num) {
for(int i=0; i<num; i++) {
printf("%f ", vector[i]);
}
printf("\n");
}
__global__ void dot_product(float *a, float *b, float *c)
{
c[threadIdx.x] = a[threadIdx.x] * b[threadIdx.x];
}
int main(void)
{
float *h_a, *h_b, *h_c;
float *d_a, *d_b, *d_c;
int size = N * sizeof(float);
hipMalloc((void **)&d_a, size);
hipMalloc((void **)&d_b, size);
hipMalloc((void **)&d_c, size);
h_a = (float *)malloc(size); initialize(h_a, N);
h_b = (float *)malloc(size); initialize(h_b, N);
h_c = (float *)malloc(size);
hipMemcpy(d_a, h_a, size, hipMemcpyHostToDevice);
hipMemcpy(d_b, h_b, size, hipMemcpyHostToDevice);
dot_product<<<1,N>>>(d_a, d_b, d_c);
hipMemcpy(h_c, d_c, size, hipMemcpyDeviceToHost);
float sum = 0;
for(int i=0; i<N; i++) {
sum += h_c[i];
}
printf("Vector A: \n"); plotVector(h_a, N);
printf("Vector B: \n"); plotVector(h_b, N);
printf("Dot Product, A*B = %f\n", sum);
return 0;
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | #include <stdio.h>
#include <hip/hip_runtime.h>
#define N 6
void initialize(float * vector, int num) {
for(int i=0; i<num; i++) {
vector[i] = i;
}
}
void plotVector(float * vector, int num) {
for(int i=0; i<num; i++) {
printf("%f ", vector[i]);
}
printf("\n");
}
__global__ void dot_product(float *a, float *b, float *c)
{
c[threadIdx.x] = a[threadIdx.x] * b[threadIdx.x];
}
int main(void)
{
float *h_a, *h_b, *h_c;
float *d_a, *d_b, *d_c;
int size = N * sizeof(float);
hipMalloc((void **)&d_a, size);
hipMalloc((void **)&d_b, size);
hipMalloc((void **)&d_c, size);
h_a = (float *)malloc(size); initialize(h_a, N);
h_b = (float *)malloc(size); initialize(h_b, N);
h_c = (float *)malloc(size);
hipMemcpy(d_a, h_a, size, hipMemcpyHostToDevice);
hipMemcpy(d_b, h_b, size, hipMemcpyHostToDevice);
dot_product<<<1,N>>>(d_a, d_b, d_c);
hipMemcpy(h_c, d_c, size, hipMemcpyDeviceToHost);
float sum = 0;
for(int i=0; i<N; i++) {
sum += h_c[i];
}
printf("Vector A: \n"); plotVector(h_a, N);
printf("Vector B: \n"); plotVector(h_b, N);
printf("Dot Product, A*B = %f\n", sum);
return 0;
} | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z11dot_productPfS_S_
.globl _Z11dot_productPfS_S_
.p2align 8
.type _Z11dot_productPfS_S_,@function
_Z11dot_productPfS_S_:
s_load_b128 s[4:7], s[0:1], 0x0
v_lshlrev_b32_e32 v0, 2, v0
s_load_b64 s[0:1], s[0:1], 0x10
s_waitcnt lgkmcnt(0)
s_clause 0x1
global_load_b32 v1, v0, s[4:5]
global_load_b32 v2, v0, s[6:7]
s_waitcnt vmcnt(0)
v_mul_f32_e32 v1, v1, v2
global_store_b32 v0, v1, s[0:1]
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z11dot_productPfS_S_
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 24
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 3
.amdhsa_next_free_sgpr 8
.amdhsa_reserve_vcc 0
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z11dot_productPfS_S_, .Lfunc_end0-_Z11dot_productPfS_S_
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 24
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z11dot_productPfS_S_
.private_segment_fixed_size: 0
.sgpr_count: 8
.sgpr_spill_count: 0
.symbol: _Z11dot_productPfS_S_.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 3
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | #include <stdio.h>
#include <hip/hip_runtime.h>
#define N 6
void initialize(float * vector, int num) {
for(int i=0; i<num; i++) {
vector[i] = i;
}
}
void plotVector(float * vector, int num) {
for(int i=0; i<num; i++) {
printf("%f ", vector[i]);
}
printf("\n");
}
__global__ void dot_product(float *a, float *b, float *c)
{
c[threadIdx.x] = a[threadIdx.x] * b[threadIdx.x];
}
int main(void)
{
float *h_a, *h_b, *h_c;
float *d_a, *d_b, *d_c;
int size = N * sizeof(float);
hipMalloc((void **)&d_a, size);
hipMalloc((void **)&d_b, size);
hipMalloc((void **)&d_c, size);
h_a = (float *)malloc(size); initialize(h_a, N);
h_b = (float *)malloc(size); initialize(h_b, N);
h_c = (float *)malloc(size);
hipMemcpy(d_a, h_a, size, hipMemcpyHostToDevice);
hipMemcpy(d_b, h_b, size, hipMemcpyHostToDevice);
dot_product<<<1,N>>>(d_a, d_b, d_c);
hipMemcpy(h_c, d_c, size, hipMemcpyDeviceToHost);
float sum = 0;
for(int i=0; i<N; i++) {
sum += h_c[i];
}
printf("Vector A: \n"); plotVector(h_a, N);
printf("Vector B: \n"); plotVector(h_b, N);
printf("Dot Product, A*B = %f\n", sum);
return 0;
} | .text
.file "dotProduct.hip"
.globl _Z10initializePfi # -- Begin function _Z10initializePfi
.p2align 4, 0x90
.type _Z10initializePfi,@function
_Z10initializePfi: # @_Z10initializePfi
.cfi_startproc
# %bb.0:
testl %esi, %esi
jle .LBB0_3
# %bb.1: # %.lr.ph.preheader
movl %esi, %eax
xorl %ecx, %ecx
.p2align 4, 0x90
.LBB0_2: # %.lr.ph
# =>This Inner Loop Header: Depth=1
xorps %xmm0, %xmm0
cvtsi2ss %ecx, %xmm0
movss %xmm0, (%rdi,%rcx,4)
incq %rcx
cmpq %rcx, %rax
jne .LBB0_2
.LBB0_3: # %._crit_edge
retq
.Lfunc_end0:
.size _Z10initializePfi, .Lfunc_end0-_Z10initializePfi
.cfi_endproc
# -- End function
.globl _Z10plotVectorPfi # -- Begin function _Z10plotVectorPfi
.p2align 4, 0x90
.type _Z10plotVectorPfi,@function
_Z10plotVectorPfi: # @_Z10plotVectorPfi
.cfi_startproc
# %bb.0:
testl %esi, %esi
jle .LBB1_4
# %bb.1: # %.lr.ph.preheader
pushq %r15
.cfi_def_cfa_offset 16
pushq %r14
.cfi_def_cfa_offset 24
pushq %rbx
.cfi_def_cfa_offset 32
.cfi_offset %rbx, -32
.cfi_offset %r14, -24
.cfi_offset %r15, -16
movq %rdi, %rbx
movl %esi, %r14d
xorl %r15d, %r15d
.p2align 4, 0x90
.LBB1_2: # %.lr.ph
# =>This Inner Loop Header: Depth=1
movss (%rbx,%r15,4), %xmm0 # xmm0 = mem[0],zero,zero,zero
cvtss2sd %xmm0, %xmm0
movl $.L.str, %edi
movb $1, %al
callq printf
incq %r15
cmpq %r15, %r14
jne .LBB1_2
# %bb.3:
popq %rbx
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
.cfi_restore %rbx
.cfi_restore %r14
.cfi_restore %r15
.LBB1_4: # %._crit_edge
movl $10, %edi
jmp putchar@PLT # TAILCALL
.Lfunc_end1:
.size _Z10plotVectorPfi, .Lfunc_end1-_Z10plotVectorPfi
.cfi_endproc
# -- End function
.globl _Z26__device_stub__dot_productPfS_S_ # -- Begin function _Z26__device_stub__dot_productPfS_S_
.p2align 4, 0x90
.type _Z26__device_stub__dot_productPfS_S_,@function
_Z26__device_stub__dot_productPfS_S_: # @_Z26__device_stub__dot_productPfS_S_
.cfi_startproc
# %bb.0:
subq $104, %rsp
.cfi_def_cfa_offset 112
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movq %rdx, 56(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 56(%rsp), %rax
movq %rax, 96(%rsp)
leaq 40(%rsp), %rdi
leaq 24(%rsp), %rsi
leaq 16(%rsp), %rdx
leaq 8(%rsp), %rcx
callq __hipPopCallConfiguration
movq 40(%rsp), %rsi
movl 48(%rsp), %edx
movq 24(%rsp), %rcx
movl 32(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z11dot_productPfS_S_, %edi
pushq 8(%rsp)
.cfi_adjust_cfa_offset 8
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $120, %rsp
.cfi_adjust_cfa_offset -120
retq
.Lfunc_end2:
.size _Z26__device_stub__dot_productPfS_S_, .Lfunc_end2-_Z26__device_stub__dot_productPfS_S_
.cfi_endproc
# -- End function
.globl main # -- Begin function main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
pushq %r15
.cfi_def_cfa_offset 16
pushq %r14
.cfi_def_cfa_offset 24
pushq %rbx
.cfi_def_cfa_offset 32
subq $144, %rsp
.cfi_def_cfa_offset 176
.cfi_offset %rbx, -32
.cfi_offset %r14, -24
.cfi_offset %r15, -16
leaq 32(%rsp), %rdi
movl $24, %esi
callq hipMalloc
leaq 24(%rsp), %rdi
movl $24, %esi
callq hipMalloc
leaq 16(%rsp), %rdi
movl $24, %esi
callq hipMalloc
movl $24, %edi
callq malloc
movq %rax, %r14
xorl %eax, %eax
.p2align 4, 0x90
.LBB3_1: # %.lr.ph.i
# =>This Inner Loop Header: Depth=1
xorps %xmm0, %xmm0
cvtsi2ss %eax, %xmm0
movss %xmm0, (%r14,%rax,4)
incq %rax
cmpq $6, %rax
jne .LBB3_1
# %bb.2: # %_Z10initializePfi.exit
movl $24, %edi
callq malloc
movq %rax, %rbx
xorl %eax, %eax
.p2align 4, 0x90
.LBB3_3: # %.lr.ph.i25
# =>This Inner Loop Header: Depth=1
xorps %xmm0, %xmm0
cvtsi2ss %eax, %xmm0
movss %xmm0, (%rbx,%rax,4)
incq %rax
cmpq $6, %rax
jne .LBB3_3
# %bb.4: # %_Z10initializePfi.exit29
movl $24, %edi
callq malloc
movq %rax, %r15
movq 32(%rsp), %rdi
movl $24, %edx
movq %r14, %rsi
movl $1, %ecx
callq hipMemcpy
movq 24(%rsp), %rdi
movl $24, %edx
movq %rbx, %rsi
movl $1, %ecx
callq hipMemcpy
movabsq $4294967297, %rdi # imm = 0x100000001
leaq 5(%rdi), %rdx
movl $1, %esi
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB3_6
# %bb.5:
movq 32(%rsp), %rax
movq 24(%rsp), %rcx
movq 16(%rsp), %rdx
movq %rax, 104(%rsp)
movq %rcx, 96(%rsp)
movq %rdx, 88(%rsp)
leaq 104(%rsp), %rax
movq %rax, 112(%rsp)
leaq 96(%rsp), %rax
movq %rax, 120(%rsp)
leaq 88(%rsp), %rax
movq %rax, 128(%rsp)
leaq 72(%rsp), %rdi
leaq 56(%rsp), %rsi
leaq 48(%rsp), %rdx
leaq 40(%rsp), %rcx
callq __hipPopCallConfiguration
movq 72(%rsp), %rsi
movl 80(%rsp), %edx
movq 56(%rsp), %rcx
movl 64(%rsp), %r8d
leaq 112(%rsp), %r9
movl $_Z11dot_productPfS_S_, %edi
pushq 40(%rsp)
.cfi_adjust_cfa_offset 8
pushq 56(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB3_6:
movq 16(%rsp), %rsi
movl $24, %edx
movq %r15, %rdi
movl $2, %ecx
callq hipMemcpy
xorps %xmm0, %xmm0
xorl %eax, %eax
.p2align 4, 0x90
.LBB3_7: # =>This Inner Loop Header: Depth=1
addss (%r15,%rax,4), %xmm0
incq %rax
cmpq $6, %rax
jne .LBB3_7
# %bb.8:
movss %xmm0, 12(%rsp) # 4-byte Spill
movl $.Lstr, %edi
callq puts@PLT
xorl %r15d, %r15d
.p2align 4, 0x90
.LBB3_9: # %.lr.ph.i30
# =>This Inner Loop Header: Depth=1
movss (%r14,%r15,4), %xmm0 # xmm0 = mem[0],zero,zero,zero
cvtss2sd %xmm0, %xmm0
movl $.L.str, %edi
movb $1, %al
callq printf
incq %r15
cmpq $6, %r15
jne .LBB3_9
# %bb.10: # %_Z10plotVectorPfi.exit
movl $10, %edi
callq putchar@PLT
movl $.Lstr.1, %edi
callq puts@PLT
xorl %r14d, %r14d
.p2align 4, 0x90
.LBB3_11: # %.lr.ph.i34
# =>This Inner Loop Header: Depth=1
movss (%rbx,%r14,4), %xmm0 # xmm0 = mem[0],zero,zero,zero
cvtss2sd %xmm0, %xmm0
movl $.L.str, %edi
movb $1, %al
callq printf
incq %r14
cmpq $6, %r14
jne .LBB3_11
# %bb.12: # %_Z10plotVectorPfi.exit39
movl $10, %edi
callq putchar@PLT
movss 12(%rsp), %xmm0 # 4-byte Reload
# xmm0 = mem[0],zero,zero,zero
cvtss2sd %xmm0, %xmm0
movl $.L.str.4, %edi
movb $1, %al
callq printf
xorl %eax, %eax
addq $144, %rsp
.cfi_def_cfa_offset 32
popq %rbx
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
retq
.Lfunc_end3:
.size main, .Lfunc_end3-main
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB4_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB4_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z11dot_productPfS_S_, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end4:
.size __hip_module_ctor, .Lfunc_end4-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB5_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB5_2:
retq
.Lfunc_end5:
.size __hip_module_dtor, .Lfunc_end5-__hip_module_dtor
.cfi_endproc
# -- End function
.type .L.str,@object # @.str
.section .rodata.str1.1,"aMS",@progbits,1
.L.str:
.asciz "%f "
.size .L.str, 4
.type _Z11dot_productPfS_S_,@object # @_Z11dot_productPfS_S_
.section .rodata,"a",@progbits
.globl _Z11dot_productPfS_S_
.p2align 3, 0x0
_Z11dot_productPfS_S_:
.quad _Z26__device_stub__dot_productPfS_S_
.size _Z11dot_productPfS_S_, 8
.type .L.str.4,@object # @.str.4
.section .rodata.str1.1,"aMS",@progbits,1
.L.str.4:
.asciz "Dot Product, A*B = %f\n"
.size .L.str.4, 23
.type .L__unnamed_1,@object # @0
.L__unnamed_1:
.asciz "_Z11dot_productPfS_S_"
.size .L__unnamed_1, 22
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.type .Lstr,@object # @str
.section .rodata.str1.1,"aMS",@progbits,1
.Lstr:
.asciz "Vector A: "
.size .Lstr, 11
.type .Lstr.1,@object # @str.1
.Lstr.1:
.asciz "Vector B: "
.size .Lstr.1, 11
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z26__device_stub__dot_productPfS_S_
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z11dot_productPfS_S_
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly. | code for sm_80
Function : _Z11dot_productPfS_S_
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ S2R R6, SR_TID.X ; /* 0x0000000000067919 */
/* 0x000e220000002100 */
/*0020*/ HFMA2.MMA R7, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff077435 */
/* 0x000fe200000001ff */
/*0030*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fd20000000a00 */
/*0040*/ IMAD.WIDE.U32 R2, R6, R7, c[0x0][0x160] ; /* 0x0000580006027625 */
/* 0x001fc800078e0007 */
/*0050*/ IMAD.WIDE.U32 R4, R6.reuse, R7.reuse, c[0x0][0x168] ; /* 0x00005a0006047625 */
/* 0x0c0fe400078e0007 */
/*0060*/ LDG.E R2, [R2.64] ; /* 0x0000000402027981 */
/* 0x000ea8000c1e1900 */
/*0070*/ LDG.E R5, [R4.64] ; /* 0x0000000404057981 */
/* 0x000ea2000c1e1900 */
/*0080*/ IMAD.WIDE.U32 R6, R6, R7, c[0x0][0x170] ; /* 0x00005c0006067625 */
/* 0x000fc800078e0007 */
/*0090*/ FMUL R9, R2, R5 ; /* 0x0000000502097220 */
/* 0x004fca0000400000 */
/*00a0*/ STG.E [R6.64], R9 ; /* 0x0000000906007986 */
/* 0x000fe2000c101904 */
/*00b0*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*00c0*/ BRA 0xc0; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*00d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*00f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0100*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0110*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0120*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0130*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0140*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0150*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0160*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0170*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z11dot_productPfS_S_
.globl _Z11dot_productPfS_S_
.p2align 8
.type _Z11dot_productPfS_S_,@function
_Z11dot_productPfS_S_:
s_load_b128 s[4:7], s[0:1], 0x0
v_lshlrev_b32_e32 v0, 2, v0
s_load_b64 s[0:1], s[0:1], 0x10
s_waitcnt lgkmcnt(0)
s_clause 0x1
global_load_b32 v1, v0, s[4:5]
global_load_b32 v2, v0, s[6:7]
s_waitcnt vmcnt(0)
v_mul_f32_e32 v1, v1, v2
global_store_b32 v0, v1, s[0:1]
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z11dot_productPfS_S_
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 24
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 3
.amdhsa_next_free_sgpr 8
.amdhsa_reserve_vcc 0
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z11dot_productPfS_S_, .Lfunc_end0-_Z11dot_productPfS_S_
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 24
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z11dot_productPfS_S_
.private_segment_fixed_size: 0
.sgpr_count: 8
.sgpr_spill_count: 0
.symbol: _Z11dot_productPfS_S_.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 3
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_001682ec_00000000-6_dotProduct.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2062:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2062:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z10initializePfi
.type _Z10initializePfi, @function
_Z10initializePfi:
.LFB2057:
.cfi_startproc
endbr64
testl %esi, %esi
jle .L3
movslq %esi, %rsi
movl $0, %eax
.L5:
pxor %xmm0, %xmm0
cvtsi2ssl %eax, %xmm0
movss %xmm0, (%rdi,%rax,4)
addq $1, %rax
cmpq %rsi, %rax
jne .L5
.L3:
ret
.cfi_endproc
.LFE2057:
.size _Z10initializePfi, .-_Z10initializePfi
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "%f "
.LC1:
.string "\n"
.text
.globl _Z10plotVectorPfi
.type _Z10plotVectorPfi, @function
_Z10plotVectorPfi:
.LFB2058:
.cfi_startproc
endbr64
pushq %r12
.cfi_def_cfa_offset 16
.cfi_offset 12, -16
pushq %rbp
.cfi_def_cfa_offset 24
.cfi_offset 6, -24
pushq %rbx
.cfi_def_cfa_offset 32
.cfi_offset 3, -32
testl %esi, %esi
jle .L8
movq %rdi, %rbx
movslq %esi, %rsi
leaq (%rdi,%rsi,4), %r12
leaq .LC0(%rip), %rbp
.L9:
pxor %xmm0, %xmm0
cvtss2sd (%rbx), %xmm0
movq %rbp, %rsi
movl $2, %edi
movl $1, %eax
call __printf_chk@PLT
addq $4, %rbx
cmpq %r12, %rbx
jne .L9
.L8:
leaq .LC1(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
popq %rbx
.cfi_def_cfa_offset 24
popq %rbp
.cfi_def_cfa_offset 16
popq %r12
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2058:
.size _Z10plotVectorPfi, .-_Z10plotVectorPfi
.globl _Z35__device_stub__Z11dot_productPfS_S_PfS_S_
.type _Z35__device_stub__Z11dot_productPfS_S_PfS_S_, @function
_Z35__device_stub__Z11dot_productPfS_S_PfS_S_:
.LFB2084:
.cfi_startproc
endbr64
subq $136, %rsp
.cfi_def_cfa_offset 144
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movq %rdx, 8(%rsp)
movq %fs:40, %rax
movq %rax, 120(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 8(%rsp), %rax
movq %rax, 112(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L16
.L12:
movq 120(%rsp), %rax
subq %fs:40, %rax
jne .L17
addq $136, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L16:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 152
pushq 40(%rsp)
.cfi_def_cfa_offset 160
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z11dot_productPfS_S_(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 144
jmp .L12
.L17:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2084:
.size _Z35__device_stub__Z11dot_productPfS_S_PfS_S_, .-_Z35__device_stub__Z11dot_productPfS_S_PfS_S_
.globl _Z11dot_productPfS_S_
.type _Z11dot_productPfS_S_, @function
_Z11dot_productPfS_S_:
.LFB2085:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z35__device_stub__Z11dot_productPfS_S_PfS_S_
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2085:
.size _Z11dot_productPfS_S_, .-_Z11dot_productPfS_S_
.section .rodata.str1.1
.LC3:
.string "Vector A: \n"
.LC4:
.string "Vector B: \n"
.LC5:
.string "Dot Product, A*B = %f\n"
.text
.globl main
.type main, @function
main:
.LFB2059:
.cfi_startproc
endbr64
pushq %r12
.cfi_def_cfa_offset 16
.cfi_offset 12, -16
pushq %rbp
.cfi_def_cfa_offset 24
.cfi_offset 6, -24
pushq %rbx
.cfi_def_cfa_offset 32
.cfi_offset 3, -32
subq $80, %rsp
.cfi_def_cfa_offset 112
movq %fs:40, %rax
movq %rax, 72(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rdi
movl $24, %esi
call cudaMalloc@PLT
leaq 32(%rsp), %rdi
movl $24, %esi
call cudaMalloc@PLT
leaq 40(%rsp), %rdi
movl $24, %esi
call cudaMalloc@PLT
movl $24, %edi
call malloc@PLT
movq %rax, %r12
movl $6, %esi
movq %rax, %rdi
call _Z10initializePfi
movl $24, %edi
call malloc@PLT
movq %rax, %rbp
movl $6, %esi
movq %rax, %rdi
call _Z10initializePfi
movl $24, %edi
call malloc@PLT
movq %rax, %rbx
movl $1, %ecx
movl $24, %edx
movq %r12, %rsi
movq 24(%rsp), %rdi
call cudaMemcpy@PLT
movl $1, %ecx
movl $24, %edx
movq %rbp, %rsi
movq 32(%rsp), %rdi
call cudaMemcpy@PLT
movl $6, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 60(%rsp), %rdx
movl $1, %ecx
movq 48(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L26
.L21:
movl $2, %ecx
movl $24, %edx
movq 40(%rsp), %rsi
movq %rbx, %rdi
call cudaMemcpy@PLT
movq %rbx, %rdx
leaq 24(%rbx), %rax
movl $0x00000000, 12(%rsp)
.L22:
movss 12(%rsp), %xmm1
addss (%rdx), %xmm1
movss %xmm1, 12(%rsp)
addq $4, %rdx
cmpq %rax, %rdx
jne .L22
leaq .LC3(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $6, %esi
movq %r12, %rdi
call _Z10plotVectorPfi
leaq .LC4(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $6, %esi
movq %rbp, %rdi
call _Z10plotVectorPfi
pxor %xmm0, %xmm0
cvtss2sd 12(%rsp), %xmm0
leaq .LC5(%rip), %rsi
movl $2, %edi
movl $1, %eax
call __printf_chk@PLT
movq 72(%rsp), %rax
subq %fs:40, %rax
jne .L27
movl $0, %eax
addq $80, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 32
popq %rbx
.cfi_def_cfa_offset 24
popq %rbp
.cfi_def_cfa_offset 16
popq %r12
.cfi_def_cfa_offset 8
ret
.L26:
.cfi_restore_state
movq 40(%rsp), %rdx
movq 32(%rsp), %rsi
movq 24(%rsp), %rdi
call _Z35__device_stub__Z11dot_productPfS_S_PfS_S_
jmp .L21
.L27:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2059:
.size main, .-main
.section .rodata.str1.1
.LC6:
.string "_Z11dot_productPfS_S_"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2087:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC6(%rip), %rdx
movq %rdx, %rcx
leaq _Z11dot_productPfS_S_(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2087:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "dotProduct.hip"
.globl _Z10initializePfi # -- Begin function _Z10initializePfi
.p2align 4, 0x90
.type _Z10initializePfi,@function
_Z10initializePfi: # @_Z10initializePfi
.cfi_startproc
# %bb.0:
testl %esi, %esi
jle .LBB0_3
# %bb.1: # %.lr.ph.preheader
movl %esi, %eax
xorl %ecx, %ecx
.p2align 4, 0x90
.LBB0_2: # %.lr.ph
# =>This Inner Loop Header: Depth=1
xorps %xmm0, %xmm0
cvtsi2ss %ecx, %xmm0
movss %xmm0, (%rdi,%rcx,4)
incq %rcx
cmpq %rcx, %rax
jne .LBB0_2
.LBB0_3: # %._crit_edge
retq
.Lfunc_end0:
.size _Z10initializePfi, .Lfunc_end0-_Z10initializePfi
.cfi_endproc
# -- End function
.globl _Z10plotVectorPfi # -- Begin function _Z10plotVectorPfi
.p2align 4, 0x90
.type _Z10plotVectorPfi,@function
_Z10plotVectorPfi: # @_Z10plotVectorPfi
.cfi_startproc
# %bb.0:
testl %esi, %esi
jle .LBB1_4
# %bb.1: # %.lr.ph.preheader
pushq %r15
.cfi_def_cfa_offset 16
pushq %r14
.cfi_def_cfa_offset 24
pushq %rbx
.cfi_def_cfa_offset 32
.cfi_offset %rbx, -32
.cfi_offset %r14, -24
.cfi_offset %r15, -16
movq %rdi, %rbx
movl %esi, %r14d
xorl %r15d, %r15d
.p2align 4, 0x90
.LBB1_2: # %.lr.ph
# =>This Inner Loop Header: Depth=1
movss (%rbx,%r15,4), %xmm0 # xmm0 = mem[0],zero,zero,zero
cvtss2sd %xmm0, %xmm0
movl $.L.str, %edi
movb $1, %al
callq printf
incq %r15
cmpq %r15, %r14
jne .LBB1_2
# %bb.3:
popq %rbx
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
.cfi_restore %rbx
.cfi_restore %r14
.cfi_restore %r15
.LBB1_4: # %._crit_edge
movl $10, %edi
jmp putchar@PLT # TAILCALL
.Lfunc_end1:
.size _Z10plotVectorPfi, .Lfunc_end1-_Z10plotVectorPfi
.cfi_endproc
# -- End function
.globl _Z26__device_stub__dot_productPfS_S_ # -- Begin function _Z26__device_stub__dot_productPfS_S_
.p2align 4, 0x90
.type _Z26__device_stub__dot_productPfS_S_,@function
_Z26__device_stub__dot_productPfS_S_: # @_Z26__device_stub__dot_productPfS_S_
.cfi_startproc
# %bb.0:
subq $104, %rsp
.cfi_def_cfa_offset 112
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movq %rdx, 56(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 56(%rsp), %rax
movq %rax, 96(%rsp)
leaq 40(%rsp), %rdi
leaq 24(%rsp), %rsi
leaq 16(%rsp), %rdx
leaq 8(%rsp), %rcx
callq __hipPopCallConfiguration
movq 40(%rsp), %rsi
movl 48(%rsp), %edx
movq 24(%rsp), %rcx
movl 32(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z11dot_productPfS_S_, %edi
pushq 8(%rsp)
.cfi_adjust_cfa_offset 8
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $120, %rsp
.cfi_adjust_cfa_offset -120
retq
.Lfunc_end2:
.size _Z26__device_stub__dot_productPfS_S_, .Lfunc_end2-_Z26__device_stub__dot_productPfS_S_
.cfi_endproc
# -- End function
.globl main # -- Begin function main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
pushq %r15
.cfi_def_cfa_offset 16
pushq %r14
.cfi_def_cfa_offset 24
pushq %rbx
.cfi_def_cfa_offset 32
subq $144, %rsp
.cfi_def_cfa_offset 176
.cfi_offset %rbx, -32
.cfi_offset %r14, -24
.cfi_offset %r15, -16
leaq 32(%rsp), %rdi
movl $24, %esi
callq hipMalloc
leaq 24(%rsp), %rdi
movl $24, %esi
callq hipMalloc
leaq 16(%rsp), %rdi
movl $24, %esi
callq hipMalloc
movl $24, %edi
callq malloc
movq %rax, %r14
xorl %eax, %eax
.p2align 4, 0x90
.LBB3_1: # %.lr.ph.i
# =>This Inner Loop Header: Depth=1
xorps %xmm0, %xmm0
cvtsi2ss %eax, %xmm0
movss %xmm0, (%r14,%rax,4)
incq %rax
cmpq $6, %rax
jne .LBB3_1
# %bb.2: # %_Z10initializePfi.exit
movl $24, %edi
callq malloc
movq %rax, %rbx
xorl %eax, %eax
.p2align 4, 0x90
.LBB3_3: # %.lr.ph.i25
# =>This Inner Loop Header: Depth=1
xorps %xmm0, %xmm0
cvtsi2ss %eax, %xmm0
movss %xmm0, (%rbx,%rax,4)
incq %rax
cmpq $6, %rax
jne .LBB3_3
# %bb.4: # %_Z10initializePfi.exit29
movl $24, %edi
callq malloc
movq %rax, %r15
movq 32(%rsp), %rdi
movl $24, %edx
movq %r14, %rsi
movl $1, %ecx
callq hipMemcpy
movq 24(%rsp), %rdi
movl $24, %edx
movq %rbx, %rsi
movl $1, %ecx
callq hipMemcpy
movabsq $4294967297, %rdi # imm = 0x100000001
leaq 5(%rdi), %rdx
movl $1, %esi
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB3_6
# %bb.5:
movq 32(%rsp), %rax
movq 24(%rsp), %rcx
movq 16(%rsp), %rdx
movq %rax, 104(%rsp)
movq %rcx, 96(%rsp)
movq %rdx, 88(%rsp)
leaq 104(%rsp), %rax
movq %rax, 112(%rsp)
leaq 96(%rsp), %rax
movq %rax, 120(%rsp)
leaq 88(%rsp), %rax
movq %rax, 128(%rsp)
leaq 72(%rsp), %rdi
leaq 56(%rsp), %rsi
leaq 48(%rsp), %rdx
leaq 40(%rsp), %rcx
callq __hipPopCallConfiguration
movq 72(%rsp), %rsi
movl 80(%rsp), %edx
movq 56(%rsp), %rcx
movl 64(%rsp), %r8d
leaq 112(%rsp), %r9
movl $_Z11dot_productPfS_S_, %edi
pushq 40(%rsp)
.cfi_adjust_cfa_offset 8
pushq 56(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB3_6:
movq 16(%rsp), %rsi
movl $24, %edx
movq %r15, %rdi
movl $2, %ecx
callq hipMemcpy
xorps %xmm0, %xmm0
xorl %eax, %eax
.p2align 4, 0x90
.LBB3_7: # =>This Inner Loop Header: Depth=1
addss (%r15,%rax,4), %xmm0
incq %rax
cmpq $6, %rax
jne .LBB3_7
# %bb.8:
movss %xmm0, 12(%rsp) # 4-byte Spill
movl $.Lstr, %edi
callq puts@PLT
xorl %r15d, %r15d
.p2align 4, 0x90
.LBB3_9: # %.lr.ph.i30
# =>This Inner Loop Header: Depth=1
movss (%r14,%r15,4), %xmm0 # xmm0 = mem[0],zero,zero,zero
cvtss2sd %xmm0, %xmm0
movl $.L.str, %edi
movb $1, %al
callq printf
incq %r15
cmpq $6, %r15
jne .LBB3_9
# %bb.10: # %_Z10plotVectorPfi.exit
movl $10, %edi
callq putchar@PLT
movl $.Lstr.1, %edi
callq puts@PLT
xorl %r14d, %r14d
.p2align 4, 0x90
.LBB3_11: # %.lr.ph.i34
# =>This Inner Loop Header: Depth=1
movss (%rbx,%r14,4), %xmm0 # xmm0 = mem[0],zero,zero,zero
cvtss2sd %xmm0, %xmm0
movl $.L.str, %edi
movb $1, %al
callq printf
incq %r14
cmpq $6, %r14
jne .LBB3_11
# %bb.12: # %_Z10plotVectorPfi.exit39
movl $10, %edi
callq putchar@PLT
movss 12(%rsp), %xmm0 # 4-byte Reload
# xmm0 = mem[0],zero,zero,zero
cvtss2sd %xmm0, %xmm0
movl $.L.str.4, %edi
movb $1, %al
callq printf
xorl %eax, %eax
addq $144, %rsp
.cfi_def_cfa_offset 32
popq %rbx
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
retq
.Lfunc_end3:
.size main, .Lfunc_end3-main
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB4_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB4_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z11dot_productPfS_S_, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end4:
.size __hip_module_ctor, .Lfunc_end4-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB5_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB5_2:
retq
.Lfunc_end5:
.size __hip_module_dtor, .Lfunc_end5-__hip_module_dtor
.cfi_endproc
# -- End function
.type .L.str,@object # @.str
.section .rodata.str1.1,"aMS",@progbits,1
.L.str:
.asciz "%f "
.size .L.str, 4
.type _Z11dot_productPfS_S_,@object # @_Z11dot_productPfS_S_
.section .rodata,"a",@progbits
.globl _Z11dot_productPfS_S_
.p2align 3, 0x0
_Z11dot_productPfS_S_:
.quad _Z26__device_stub__dot_productPfS_S_
.size _Z11dot_productPfS_S_, 8
.type .L.str.4,@object # @.str.4
.section .rodata.str1.1,"aMS",@progbits,1
.L.str.4:
.asciz "Dot Product, A*B = %f\n"
.size .L.str.4, 23
.type .L__unnamed_1,@object # @0
.L__unnamed_1:
.asciz "_Z11dot_productPfS_S_"
.size .L__unnamed_1, 22
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.type .Lstr,@object # @str
.section .rodata.str1.1,"aMS",@progbits,1
.Lstr:
.asciz "Vector A: "
.size .Lstr, 11
.type .Lstr.1,@object # @str.1
.Lstr.1:
.asciz "Vector B: "
.size .Lstr.1, 11
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z26__device_stub__dot_productPfS_S_
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z11dot_productPfS_S_
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | #include <cuda.h>
#include <cuda_runtime.h>
#include "stdio.h"
#define TILE_SIZE 512
#define WARP_SIZE 32
extern "C" void CSRmatvecmult(int* ptr, int* J, float* Val, int N, int nnz, float* x, float *y, bool bVectorized);
extern "C" void ELLmatvecmult(int N, int num_cols_per_row , int * indices, float * data , float * x , float * y);
/**
* Custom CUDA error check wrapper.
*/
#define checkCUDAError() do { \
cudaError_t error = cudaGetLastError(); \
if (error != cudaSuccess) { \
printf("(CUDA) %s", cudaGetErrorString(error)); \
printf(" (" __FILE__ ":%d)\n", __LINE__); \
}\
} while (0)
/**
* Cuda kernel for: CSR_s(A)x = y
*/
__global__ void k_csr_mat_vec_mm(int *ptr, int* indices, float *data, int num_rows, float *x, float* y) {
//TODO: implement the CSR kernel
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < num_rows) {
float temp = 0;
int start = ptr[i], end = ptr[i+1]; // ptr: coalesced
for (int k = start; k < end; k++) {
temp += data[k] * x[indices[k]]; // x, data, indices: uncoalesced
}
y[i] = temp; // y: coalesced
}
}
/**
* Cuda kernel for: CSR_v(A)x = y
*/
__global__ void k_csr2_mat_vec_mm(int *ptr, int* indices, float *data, int num_rows, float *x, float* y) {
//TODO: implement the vectorized csr kernel
/*
* i) Set grid and block size in the kernel call accordingly
* ii) Assign a matrix row to each warp now
* iii) Allocate a shared array vals[] for the partial results of a block
* iv) Compute one row × vector product in a loop. This time, parallelize the loop over all 32 threads in the warp. Take care that access to the arrays indices and data is coalesced.
* v) Use a reduction of some kind (ideally: binary fan-in) to add up the partial sums in vals[] and add the output to the result vector.
*/
/* no vectorization
* usroads.mtx
* real 0m2.548s
* user 0m0.272s
* sys 0m2.212s
*/
__shared__ float vals[WARP_SIZE][WARP_SIZE];
int tx = threadIdx.x, ty = threadIdx.y;
int row_in_grid = blockIdx.y * blockDim.y + ty;
if (row_in_grid < num_rows) {
int k;
int start = ptr[row_in_grid], end = ptr[row_in_grid+1];
float temp = 0;
for (k = tx + start; k < end; k += WARP_SIZE) {
temp += data[k] * x[indices[k]];
}
vals[ty][tx] = temp;
//__syncthreads();
// First attempt
/* vectorized, sum by loop
* usroads.mtx
* real 0m2.781s
* user 0m0.288s
* sys 0m1.683s
*/
/*int l, p;
for (k = 1; k < 6; k++) {
p = (int) powf(2, k-1);
for (l = 0; l < WARP_SIZE/(2*p); l++) {
vals[ty][2*p*l] += vals[ty][2*p*l+p];
}
}*/
// Second attempt
/* vectorized, sum by binary fan-in
* usroads.mtx
* real 0m2.627s
* user 0m0.212s
* sys 0m1.543s
*/
for (k = 1; k < WARP_SIZE; k *= 2) {
if (tx % (2*k) == 0) {
vals[ty][tx] += vals[ty][tx+k];
}
//__syncthreads();
}
// End of two attempts
y[row_in_grid] = vals[threadIdx.y][0];
}
}
/**
* Cuda kernel for: ELL(A)x = y
*/
__global__ void k_ell_mat_vec_mm ( int N, int num_cols_per_row , int * indices,
float * data , float * x , float * y ) {
//TODO, but not this time: ellpack kernel
}
/**
* Perform: CSR(A)x = y
*/
void CSRmatvecmult(int* ptr, int* J, float* Val, int N, int nnz, float* x, float *y, bool bVectorized) {
int *ptr_d, *J_d;
float *Val_d, *x_d, *y_d;
/************************/
/* copy to device */
/************************/
cudaMalloc((void **) &ptr_d, (N+1) * sizeof(int));
checkCUDAError();
cudaMemcpy(ptr_d, ptr, (N+1) * sizeof(int), cudaMemcpyHostToDevice);
checkCUDAError();
cudaMalloc((void **) &J_d, nnz * sizeof(int));
checkCUDAError();
cudaMemcpy(J_d, J, nnz * sizeof(int), cudaMemcpyHostToDevice);
checkCUDAError();
cudaMalloc((void **) &Val_d, nnz * sizeof(float));
checkCUDAError();
cudaMemcpy(Val_d, Val, nnz * sizeof(float), cudaMemcpyHostToDevice);
checkCUDAError();
cudaMalloc((void **) &x_d, N * sizeof(float));
checkCUDAError();
cudaMemcpy(x_d, x, N * sizeof(float), cudaMemcpyHostToDevice);
checkCUDAError();
cudaMalloc((void **) &y_d, N * sizeof(float));
checkCUDAError();
cudaMemcpy(y_d, y, N * sizeof(float) , cudaMemcpyHostToDevice);
checkCUDAError();
/************************/
/* start kernel */
/************************/
if (bVectorized) {
//TODO: define grid and block size correctly
dim3 grid(1, (N - 1)/WARP_SIZE + 1, 1);
dim3 block(WARP_SIZE, WARP_SIZE, 1);
k_csr2_mat_vec_mm<<<grid, block>>>(ptr_d, J_d, Val_d, N, x_d, y_d);
} else {
dim3 grid((N - 1)/TILE_SIZE + 1, 1, 1);
dim3 block(TILE_SIZE, 1, 1);
k_csr_mat_vec_mm<<<grid, block>>>(ptr_d, J_d, Val_d, N, x_d, y_d);
}
checkCUDAError();
/************************/
/* copy back */
/************************/
cudaMemcpy(y, y_d, N * sizeof(float), cudaMemcpyDeviceToHost);
checkCUDAError();
/************************/
/* free memory */
/************************/
cudaFree(ptr_d);
cudaFree(J_d);
cudaFree(Val_d);
cudaFree(x_d);
cudaFree(y_d);
}
/**
* Perform: ELL(A)x = y
*/
void ELLmatvecmult(int N, int num_cols_per_row , int * indices,
float * data , float * x , float * y) {
int *indices_d;
float *data_d, *x_d, *y_d;
/************************/
/* copy to device */
/************************/
cudaMalloc((void **) &indices_d, N * num_cols_per_row * sizeof(int));
checkCUDAError();
cudaMemcpy(indices_d, indices, N * num_cols_per_row * sizeof(int), cudaMemcpyHostToDevice);
checkCUDAError();
cudaMalloc((void **) &data_d, N * num_cols_per_row * sizeof(float));
checkCUDAError();
cudaMemcpy(data_d, data, N * num_cols_per_row * sizeof(float), cudaMemcpyHostToDevice);
checkCUDAError();
cudaMalloc((void **) &x_d, N * sizeof(float));
checkCUDAError();
cudaMemcpy(x_d, x, N * sizeof(float), cudaMemcpyHostToDevice);
checkCUDAError();
cudaMalloc((void **) &y_d, N * sizeof(float));
checkCUDAError();
cudaMemcpy(y_d, y, N * sizeof(float), cudaMemcpyHostToDevice);
checkCUDAError();
/************************/
/* start kernel */
/************************/
//NYI: define grid and block size
//k_ell_mat_vec_mm <<< grid, block >>> (N, num_cols_per_row, indices_d, data_d , x_d, y_d);
checkCUDAError();
/************************/
/* copy back */
/************************/
cudaMemcpy(y, y_d, N * sizeof(float), cudaMemcpyDeviceToHost);
checkCUDAError();
/************************/
/* free memory */
/************************/
cudaFree(indices_d);
cudaFree(data_d);
cudaFree(x_d);
cudaFree(y_d);
} | .file "tmpxft_0000c4a1_00000000-6_kernels.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2061:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2061:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "(CUDA) %s"
.section .rodata.str1.8,"aMS",@progbits,1
.align 8
.LC1:
.string " (/home/ubuntu/Datasets/stackv2/train-structured/PhuNH/hpc-aa/master/t3/kernels.cu:%d)\n"
.text
.globl ELLmatvecmult
.type ELLmatvecmult, @function
ELLmatvecmult:
.LFB2058:
.cfi_startproc
endbr64
pushq %r15
.cfi_def_cfa_offset 16
.cfi_offset 15, -16
pushq %r14
.cfi_def_cfa_offset 24
.cfi_offset 14, -24
pushq %r13
.cfi_def_cfa_offset 32
.cfi_offset 13, -32
pushq %r12
.cfi_def_cfa_offset 40
.cfi_offset 12, -40
pushq %rbp
.cfi_def_cfa_offset 48
.cfi_offset 6, -48
pushq %rbx
.cfi_def_cfa_offset 56
.cfi_offset 3, -56
subq $56, %rsp
.cfi_def_cfa_offset 112
movl %edi, %ebx
movq %rdx, %r15
movq %rcx, %r14
movq %r8, %r13
movq %r9, %r12
movq %fs:40, %rax
movq %rax, 40(%rsp)
xorl %eax, %eax
imull %edi, %esi
movslq %esi, %rbp
salq $2, %rbp
leaq 8(%rsp), %rdi
movq %rbp, %rsi
call cudaMalloc@PLT
call cudaGetLastError@PLT
testl %eax, %eax
jne .L16
.L4:
movl $1, %ecx
movq %rbp, %rdx
movq %r15, %rsi
movq 8(%rsp), %rdi
call cudaMemcpy@PLT
call cudaGetLastError@PLT
testl %eax, %eax
jne .L17
.L5:
leaq 16(%rsp), %rdi
movq %rbp, %rsi
call cudaMalloc@PLT
call cudaGetLastError@PLT
testl %eax, %eax
jne .L18
.L6:
movl $1, %ecx
movq %rbp, %rdx
movq %r14, %rsi
movq 16(%rsp), %rdi
call cudaMemcpy@PLT
call cudaGetLastError@PLT
testl %eax, %eax
jne .L19
.L7:
movslq %ebx, %rbx
salq $2, %rbx
leaq 24(%rsp), %rdi
movq %rbx, %rsi
call cudaMalloc@PLT
call cudaGetLastError@PLT
testl %eax, %eax
jne .L20
.L8:
movl $1, %ecx
movq %rbx, %rdx
movq %r13, %rsi
movq 24(%rsp), %rdi
call cudaMemcpy@PLT
call cudaGetLastError@PLT
testl %eax, %eax
jne .L21
.L9:
leaq 32(%rsp), %rdi
movq %rbx, %rsi
call cudaMalloc@PLT
call cudaGetLastError@PLT
testl %eax, %eax
jne .L22
.L10:
movl $1, %ecx
movq %rbx, %rdx
movq %r12, %rsi
movq 32(%rsp), %rdi
call cudaMemcpy@PLT
call cudaGetLastError@PLT
testl %eax, %eax
jne .L23
.L11:
call cudaGetLastError@PLT
testl %eax, %eax
jne .L24
.L12:
movl $2, %ecx
movq %rbx, %rdx
movq 32(%rsp), %rsi
movq %r12, %rdi
call cudaMemcpy@PLT
call cudaGetLastError@PLT
testl %eax, %eax
jne .L25
.L13:
movq 8(%rsp), %rdi
call cudaFree@PLT
movq 16(%rsp), %rdi
call cudaFree@PLT
movq 24(%rsp), %rdi
call cudaFree@PLT
movq 32(%rsp), %rdi
call cudaFree@PLT
movq 40(%rsp), %rax
subq %fs:40, %rax
jne .L26
addq $56, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %rbp
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r13
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
ret
.L16:
.cfi_restore_state
movl %eax, %edi
call cudaGetErrorString@PLT
movq %rax, %rdx
leaq .LC0(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $194, %edx
leaq .LC1(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
jmp .L4
.L17:
movl %eax, %edi
call cudaGetErrorString@PLT
movq %rax, %rdx
leaq .LC0(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $196, %edx
leaq .LC1(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
jmp .L5
.L18:
movl %eax, %edi
call cudaGetErrorString@PLT
movq %rax, %rdx
leaq .LC0(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $199, %edx
leaq .LC1(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
jmp .L6
.L19:
movl %eax, %edi
call cudaGetErrorString@PLT
movq %rax, %rdx
leaq .LC0(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $201, %edx
leaq .LC1(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
jmp .L7
.L20:
movl %eax, %edi
call cudaGetErrorString@PLT
movq %rax, %rdx
leaq .LC0(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $204, %edx
leaq .LC1(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
jmp .L8
.L21:
movl %eax, %edi
call cudaGetErrorString@PLT
movq %rax, %rdx
leaq .LC0(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $206, %edx
leaq .LC1(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
jmp .L9
.L22:
movl %eax, %edi
call cudaGetErrorString@PLT
movq %rax, %rdx
leaq .LC0(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $209, %edx
leaq .LC1(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
jmp .L10
.L23:
movl %eax, %edi
call cudaGetErrorString@PLT
movq %rax, %rdx
leaq .LC0(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $211, %edx
leaq .LC1(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
jmp .L11
.L24:
movl %eax, %edi
call cudaGetErrorString@PLT
movq %rax, %rdx
leaq .LC0(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $219, %edx
leaq .LC1(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
jmp .L12
.L25:
movl %eax, %edi
call cudaGetErrorString@PLT
movq %rax, %rdx
leaq .LC0(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $226, %edx
leaq .LC1(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
jmp .L13
.L26:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2058:
.size ELLmatvecmult, .-ELLmatvecmult
.globl _Z47__device_stub__Z16k_csr_mat_vec_mmPiS_PfiS0_S0_PiS_PfiS0_S0_
.type _Z47__device_stub__Z16k_csr_mat_vec_mmPiS_PfiS0_S0_PiS_PfiS0_S0_, @function
_Z47__device_stub__Z16k_csr_mat_vec_mmPiS_PfiS0_S0_PiS_PfiS0_S0_:
.LFB2083:
.cfi_startproc
endbr64
subq $184, %rsp
.cfi_def_cfa_offset 192
movq %rdi, 40(%rsp)
movq %rsi, 32(%rsp)
movq %rdx, 24(%rsp)
movl %ecx, 20(%rsp)
movq %r8, 8(%rsp)
movq %r9, (%rsp)
movq %fs:40, %rax
movq %rax, 168(%rsp)
xorl %eax, %eax
leaq 40(%rsp), %rax
movq %rax, 112(%rsp)
leaq 32(%rsp), %rax
movq %rax, 120(%rsp)
leaq 24(%rsp), %rax
movq %rax, 128(%rsp)
leaq 20(%rsp), %rax
movq %rax, 136(%rsp)
leaq 8(%rsp), %rax
movq %rax, 144(%rsp)
movq %rsp, %rax
movq %rax, 152(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
movl $1, 72(%rsp)
movl $1, 76(%rsp)
movl $1, 80(%rsp)
movl $1, 84(%rsp)
leaq 56(%rsp), %rcx
leaq 48(%rsp), %rdx
leaq 76(%rsp), %rsi
leaq 64(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L31
.L27:
movq 168(%rsp), %rax
subq %fs:40, %rax
jne .L32
addq $184, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L31:
.cfi_restore_state
pushq 56(%rsp)
.cfi_def_cfa_offset 200
pushq 56(%rsp)
.cfi_def_cfa_offset 208
leaq 128(%rsp), %r9
movq 92(%rsp), %rcx
movl 100(%rsp), %r8d
movq 80(%rsp), %rsi
movl 88(%rsp), %edx
leaq _Z16k_csr_mat_vec_mmPiS_PfiS0_S0_(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 192
jmp .L27
.L32:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2083:
.size _Z47__device_stub__Z16k_csr_mat_vec_mmPiS_PfiS0_S0_PiS_PfiS0_S0_, .-_Z47__device_stub__Z16k_csr_mat_vec_mmPiS_PfiS0_S0_PiS_PfiS0_S0_
.globl _Z16k_csr_mat_vec_mmPiS_PfiS0_S0_
.type _Z16k_csr_mat_vec_mmPiS_PfiS0_S0_, @function
_Z16k_csr_mat_vec_mmPiS_PfiS0_S0_:
.LFB2084:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z47__device_stub__Z16k_csr_mat_vec_mmPiS_PfiS0_S0_PiS_PfiS0_S0_
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2084:
.size _Z16k_csr_mat_vec_mmPiS_PfiS0_S0_, .-_Z16k_csr_mat_vec_mmPiS_PfiS0_S0_
.globl _Z48__device_stub__Z17k_csr2_mat_vec_mmPiS_PfiS0_S0_PiS_PfiS0_S0_
.type _Z48__device_stub__Z17k_csr2_mat_vec_mmPiS_PfiS0_S0_PiS_PfiS0_S0_, @function
_Z48__device_stub__Z17k_csr2_mat_vec_mmPiS_PfiS0_S0_PiS_PfiS0_S0_:
.LFB2085:
.cfi_startproc
endbr64
subq $184, %rsp
.cfi_def_cfa_offset 192
movq %rdi, 40(%rsp)
movq %rsi, 32(%rsp)
movq %rdx, 24(%rsp)
movl %ecx, 20(%rsp)
movq %r8, 8(%rsp)
movq %r9, (%rsp)
movq %fs:40, %rax
movq %rax, 168(%rsp)
xorl %eax, %eax
leaq 40(%rsp), %rax
movq %rax, 112(%rsp)
leaq 32(%rsp), %rax
movq %rax, 120(%rsp)
leaq 24(%rsp), %rax
movq %rax, 128(%rsp)
leaq 20(%rsp), %rax
movq %rax, 136(%rsp)
leaq 8(%rsp), %rax
movq %rax, 144(%rsp)
movq %rsp, %rax
movq %rax, 152(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
movl $1, 72(%rsp)
movl $1, 76(%rsp)
movl $1, 80(%rsp)
movl $1, 84(%rsp)
leaq 56(%rsp), %rcx
leaq 48(%rsp), %rdx
leaq 76(%rsp), %rsi
leaq 64(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L39
.L35:
movq 168(%rsp), %rax
subq %fs:40, %rax
jne .L40
addq $184, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L39:
.cfi_restore_state
pushq 56(%rsp)
.cfi_def_cfa_offset 200
pushq 56(%rsp)
.cfi_def_cfa_offset 208
leaq 128(%rsp), %r9
movq 92(%rsp), %rcx
movl 100(%rsp), %r8d
movq 80(%rsp), %rsi
movl 88(%rsp), %edx
leaq _Z17k_csr2_mat_vec_mmPiS_PfiS0_S0_(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 192
jmp .L35
.L40:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2085:
.size _Z48__device_stub__Z17k_csr2_mat_vec_mmPiS_PfiS0_S0_PiS_PfiS0_S0_, .-_Z48__device_stub__Z17k_csr2_mat_vec_mmPiS_PfiS0_S0_PiS_PfiS0_S0_
.globl _Z17k_csr2_mat_vec_mmPiS_PfiS0_S0_
.type _Z17k_csr2_mat_vec_mmPiS_PfiS0_S0_, @function
_Z17k_csr2_mat_vec_mmPiS_PfiS0_S0_:
.LFB2086:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z48__device_stub__Z17k_csr2_mat_vec_mmPiS_PfiS0_S0_PiS_PfiS0_S0_
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2086:
.size _Z17k_csr2_mat_vec_mmPiS_PfiS0_S0_, .-_Z17k_csr2_mat_vec_mmPiS_PfiS0_S0_
.globl CSRmatvecmult
.type CSRmatvecmult, @function
CSRmatvecmult:
.LFB2057:
.cfi_startproc
endbr64
pushq %r15
.cfi_def_cfa_offset 16
.cfi_offset 15, -16
pushq %r14
.cfi_def_cfa_offset 24
.cfi_offset 14, -24
pushq %r13
.cfi_def_cfa_offset 32
.cfi_offset 13, -32
pushq %r12
.cfi_def_cfa_offset 40
.cfi_offset 12, -40
pushq %rbp
.cfi_def_cfa_offset 48
.cfi_offset 6, -48
pushq %rbx
.cfi_def_cfa_offset 56
.cfi_offset 3, -56
subq $120, %rsp
.cfi_def_cfa_offset 176
movq %rdi, %r13
movq %rsi, 8(%rsp)
movq %rdx, 16(%rsp)
movl %ecx, %r12d
movl %r8d, %ebp
movq %r9, 24(%rsp)
movq 176(%rsp), %r14
movl 184(%rsp), %r15d
movq %fs:40, %rax
movq %rax, 104(%rsp)
xorl %eax, %eax
leal 1(%rcx), %ebx
movslq %ebx, %rbx
salq $2, %rbx
leaq 40(%rsp), %rdi
movq %rbx, %rsi
call cudaMalloc@PLT
call cudaGetLastError@PLT
testl %eax, %eax
jne .L62
.L44:
movl $1, %ecx
movq %rbx, %rdx
movq %r13, %rsi
movq 40(%rsp), %rdi
call cudaMemcpy@PLT
call cudaGetLastError@PLT
testl %eax, %eax
jne .L63
.L45:
movslq %ebp, %rbp
salq $2, %rbp
leaq 48(%rsp), %rdi
movq %rbp, %rsi
call cudaMalloc@PLT
call cudaGetLastError@PLT
testl %eax, %eax
jne .L64
.L46:
movl $1, %ecx
movq %rbp, %rdx
movq 8(%rsp), %rsi
movq 48(%rsp), %rdi
call cudaMemcpy@PLT
call cudaGetLastError@PLT
testl %eax, %eax
jne .L65
.L47:
leaq 56(%rsp), %rdi
movq %rbp, %rsi
call cudaMalloc@PLT
call cudaGetLastError@PLT
testl %eax, %eax
jne .L66
.L48:
movl $1, %ecx
movq %rbp, %rdx
movq 16(%rsp), %rsi
movq 56(%rsp), %rdi
call cudaMemcpy@PLT
call cudaGetLastError@PLT
testl %eax, %eax
jne .L67
.L49:
subq $4, %rbx
leaq 64(%rsp), %rdi
movq %rbx, %rsi
call cudaMalloc@PLT
call cudaGetLastError@PLT
testl %eax, %eax
jne .L68
.L50:
movl $1, %ecx
movq %rbx, %rdx
movq 24(%rsp), %rsi
movq 64(%rsp), %rdi
call cudaMemcpy@PLT
call cudaGetLastError@PLT
testl %eax, %eax
jne .L69
.L51:
leaq 72(%rsp), %rdi
movq %rbx, %rsi
call cudaMalloc@PLT
call cudaGetLastError@PLT
testl %eax, %eax
jne .L70
.L52:
movl $1, %ecx
movq %rbx, %rdx
movq %r14, %rsi
movq 72(%rsp), %rdi
call cudaMemcpy@PLT
call cudaGetLastError@PLT
testl %eax, %eax
jne .L71
.L53:
testb %r15b, %r15b
je .L54
movl $1, 80(%rsp)
leal 30(%r12), %eax
movl %r12d, %edx
subl $1, %edx
cmovns %edx, %eax
sarl $5, %eax
addl $1, %eax
movl %eax, 84(%rsp)
movl $1, 88(%rsp)
movl $32, 92(%rsp)
movl $32, 96(%rsp)
movl $1, 100(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 92(%rsp), %rdx
movl $1, %ecx
movq 80(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L72
.L56:
call cudaGetLastError@PLT
testl %eax, %eax
jne .L73
.L58:
movl $2, %ecx
movq %rbx, %rdx
movq 72(%rsp), %rsi
movq %r14, %rdi
call cudaMemcpy@PLT
call cudaGetLastError@PLT
testl %eax, %eax
jne .L74
.L59:
movq 40(%rsp), %rdi
call cudaFree@PLT
movq 48(%rsp), %rdi
call cudaFree@PLT
movq 56(%rsp), %rdi
call cudaFree@PLT
movq 64(%rsp), %rdi
call cudaFree@PLT
movq 72(%rsp), %rdi
call cudaFree@PLT
movq 104(%rsp), %rax
subq %fs:40, %rax
jne .L75
addq $120, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %rbp
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r13
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
ret
.L62:
.cfi_restore_state
movl %eax, %edi
call cudaGetErrorString@PLT
movq %rax, %rdx
leaq .LC0(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $121, %edx
leaq .LC1(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
jmp .L44
.L63:
movl %eax, %edi
call cudaGetErrorString@PLT
movq %rax, %rdx
leaq .LC0(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $123, %edx
leaq .LC1(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
jmp .L45
.L64:
movl %eax, %edi
call cudaGetErrorString@PLT
movq %rax, %rdx
leaq .LC0(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $126, %edx
leaq .LC1(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
jmp .L46
.L65:
movl %eax, %edi
call cudaGetErrorString@PLT
movq %rax, %rdx
leaq .LC0(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $128, %edx
leaq .LC1(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
jmp .L47
.L66:
movl %eax, %edi
call cudaGetErrorString@PLT
movq %rax, %rdx
leaq .LC0(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $131, %edx
leaq .LC1(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
jmp .L48
.L67:
movl %eax, %edi
call cudaGetErrorString@PLT
movq %rax, %rdx
leaq .LC0(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $133, %edx
leaq .LC1(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
jmp .L49
.L68:
movl %eax, %edi
call cudaGetErrorString@PLT
movq %rax, %rdx
leaq .LC0(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $136, %edx
leaq .LC1(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
jmp .L50
.L69:
movl %eax, %edi
call cudaGetErrorString@PLT
movq %rax, %rdx
leaq .LC0(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $138, %edx
leaq .LC1(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
jmp .L51
.L70:
movl %eax, %edi
call cudaGetErrorString@PLT
movq %rax, %rdx
leaq .LC0(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $141, %edx
leaq .LC1(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
jmp .L52
.L71:
movl %eax, %edi
call cudaGetErrorString@PLT
movq %rax, %rdx
leaq .LC0(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $143, %edx
leaq .LC1(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
jmp .L53
.L72:
movq 72(%rsp), %r9
movq 64(%rsp), %r8
movl %r12d, %ecx
movq 56(%rsp), %rdx
movq 48(%rsp), %rsi
movq 40(%rsp), %rdi
call _Z48__device_stub__Z17k_csr2_mat_vec_mmPiS_PfiS0_S0_PiS_PfiS0_S0_
jmp .L56
.L54:
leal 510(%r12), %eax
movl %r12d, %edx
subl $1, %edx
cmovns %edx, %eax
sarl $9, %eax
addl $1, %eax
movl %eax, 80(%rsp)
movl $1, 84(%rsp)
movl $1, 88(%rsp)
movl $512, 92(%rsp)
movl $1, 96(%rsp)
movl $1, 100(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 92(%rsp), %rdx
movl $1, %ecx
movq 80(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
jne .L56
movq 72(%rsp), %r9
movq 64(%rsp), %r8
movl %r12d, %ecx
movq 56(%rsp), %rdx
movq 48(%rsp), %rsi
movq 40(%rsp), %rdi
call _Z47__device_stub__Z16k_csr_mat_vec_mmPiS_PfiS0_S0_PiS_PfiS0_S0_
jmp .L56
.L73:
movl %eax, %edi
call cudaGetErrorString@PLT
movq %rax, %rdx
leaq .LC0(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $162, %edx
leaq .LC1(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
jmp .L58
.L74:
movl %eax, %edi
call cudaGetErrorString@PLT
movq %rax, %rdx
leaq .LC0(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $169, %edx
leaq .LC1(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
jmp .L59
.L75:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2057:
.size CSRmatvecmult, .-CSRmatvecmult
.globl _Z46__device_stub__Z16k_ell_mat_vec_mmiiPiPfS0_S0_iiPiPfS0_S0_
.type _Z46__device_stub__Z16k_ell_mat_vec_mmiiPiPfS0_S0_iiPiPfS0_S0_, @function
_Z46__device_stub__Z16k_ell_mat_vec_mmiiPiPfS0_S0_iiPiPfS0_S0_:
.LFB2087:
.cfi_startproc
endbr64
subq $184, %rsp
.cfi_def_cfa_offset 192
movl %edi, 44(%rsp)
movl %esi, 40(%rsp)
movq %rdx, 32(%rsp)
movq %rcx, 24(%rsp)
movq %r8, 16(%rsp)
movq %r9, 8(%rsp)
movq %fs:40, %rax
movq %rax, 168(%rsp)
xorl %eax, %eax
leaq 44(%rsp), %rax
movq %rax, 112(%rsp)
leaq 40(%rsp), %rax
movq %rax, 120(%rsp)
leaq 32(%rsp), %rax
movq %rax, 128(%rsp)
leaq 24(%rsp), %rax
movq %rax, 136(%rsp)
leaq 16(%rsp), %rax
movq %rax, 144(%rsp)
leaq 8(%rsp), %rax
movq %rax, 152(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
movl $1, 72(%rsp)
movl $1, 76(%rsp)
movl $1, 80(%rsp)
movl $1, 84(%rsp)
leaq 56(%rsp), %rcx
leaq 48(%rsp), %rdx
leaq 76(%rsp), %rsi
leaq 64(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L80
.L76:
movq 168(%rsp), %rax
subq %fs:40, %rax
jne .L81
addq $184, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L80:
.cfi_restore_state
pushq 56(%rsp)
.cfi_def_cfa_offset 200
pushq 56(%rsp)
.cfi_def_cfa_offset 208
leaq 128(%rsp), %r9
movq 92(%rsp), %rcx
movl 100(%rsp), %r8d
movq 80(%rsp), %rsi
movl 88(%rsp), %edx
leaq _Z16k_ell_mat_vec_mmiiPiPfS0_S0_(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 192
jmp .L76
.L81:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2087:
.size _Z46__device_stub__Z16k_ell_mat_vec_mmiiPiPfS0_S0_iiPiPfS0_S0_, .-_Z46__device_stub__Z16k_ell_mat_vec_mmiiPiPfS0_S0_iiPiPfS0_S0_
.globl _Z16k_ell_mat_vec_mmiiPiPfS0_S0_
.type _Z16k_ell_mat_vec_mmiiPiPfS0_S0_, @function
_Z16k_ell_mat_vec_mmiiPiPfS0_S0_:
.LFB2088:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z46__device_stub__Z16k_ell_mat_vec_mmiiPiPfS0_S0_iiPiPfS0_S0_
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2088:
.size _Z16k_ell_mat_vec_mmiiPiPfS0_S0_, .-_Z16k_ell_mat_vec_mmiiPiPfS0_S0_
.section .rodata.str1.8
.align 8
.LC2:
.string "_Z16k_ell_mat_vec_mmiiPiPfS0_S0_"
.align 8
.LC3:
.string "_Z17k_csr2_mat_vec_mmPiS_PfiS0_S0_"
.align 8
.LC4:
.string "_Z16k_csr_mat_vec_mmPiS_PfiS0_S0_"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2090:
.cfi_startproc
endbr64
pushq %rbx
.cfi_def_cfa_offset 16
.cfi_offset 3, -16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rbx
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC2(%rip), %rdx
movq %rdx, %rcx
leaq _Z16k_ell_mat_vec_mmiiPiPfS0_S0_(%rip), %rsi
movq %rax, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC3(%rip), %rdx
movq %rdx, %rcx
leaq _Z17k_csr2_mat_vec_mmPiS_PfiS0_S0_(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC4(%rip), %rdx
movq %rdx, %rcx
leaq _Z16k_csr_mat_vec_mmPiS_PfiS0_S0_(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
popq %rbx
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2090:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | #include <cuda.h>
#include <cuda_runtime.h>
#include "stdio.h"
#define TILE_SIZE 512
#define WARP_SIZE 32
extern "C" void CSRmatvecmult(int* ptr, int* J, float* Val, int N, int nnz, float* x, float *y, bool bVectorized);
extern "C" void ELLmatvecmult(int N, int num_cols_per_row , int * indices, float * data , float * x , float * y);
/**
* Custom CUDA error check wrapper.
*/
#define checkCUDAError() do { \
cudaError_t error = cudaGetLastError(); \
if (error != cudaSuccess) { \
printf("(CUDA) %s", cudaGetErrorString(error)); \
printf(" (" __FILE__ ":%d)\n", __LINE__); \
}\
} while (0)
/**
* Cuda kernel for: CSR_s(A)x = y
*/
__global__ void k_csr_mat_vec_mm(int *ptr, int* indices, float *data, int num_rows, float *x, float* y) {
//TODO: implement the CSR kernel
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < num_rows) {
float temp = 0;
int start = ptr[i], end = ptr[i+1]; // ptr: coalesced
for (int k = start; k < end; k++) {
temp += data[k] * x[indices[k]]; // x, data, indices: uncoalesced
}
y[i] = temp; // y: coalesced
}
}
/**
* Cuda kernel for: CSR_v(A)x = y
*/
__global__ void k_csr2_mat_vec_mm(int *ptr, int* indices, float *data, int num_rows, float *x, float* y) {
//TODO: implement the vectorized csr kernel
/*
* i) Set grid and block size in the kernel call accordingly
* ii) Assign a matrix row to each warp now
* iii) Allocate a shared array vals[] for the partial results of a block
* iv) Compute one row × vector product in a loop. This time, parallelize the loop over all 32 threads in the warp. Take care that access to the arrays indices and data is coalesced.
* v) Use a reduction of some kind (ideally: binary fan-in) to add up the partial sums in vals[] and add the output to the result vector.
*/
/* no vectorization
* usroads.mtx
* real 0m2.548s
* user 0m0.272s
* sys 0m2.212s
*/
__shared__ float vals[WARP_SIZE][WARP_SIZE];
int tx = threadIdx.x, ty = threadIdx.y;
int row_in_grid = blockIdx.y * blockDim.y + ty;
if (row_in_grid < num_rows) {
int k;
int start = ptr[row_in_grid], end = ptr[row_in_grid+1];
float temp = 0;
for (k = tx + start; k < end; k += WARP_SIZE) {
temp += data[k] * x[indices[k]];
}
vals[ty][tx] = temp;
//__syncthreads();
// First attempt
/* vectorized, sum by loop
* usroads.mtx
* real 0m2.781s
* user 0m0.288s
* sys 0m1.683s
*/
/*int l, p;
for (k = 1; k < 6; k++) {
p = (int) powf(2, k-1);
for (l = 0; l < WARP_SIZE/(2*p); l++) {
vals[ty][2*p*l] += vals[ty][2*p*l+p];
}
}*/
// Second attempt
/* vectorized, sum by binary fan-in
* usroads.mtx
* real 0m2.627s
* user 0m0.212s
* sys 0m1.543s
*/
for (k = 1; k < WARP_SIZE; k *= 2) {
if (tx % (2*k) == 0) {
vals[ty][tx] += vals[ty][tx+k];
}
//__syncthreads();
}
// End of two attempts
y[row_in_grid] = vals[threadIdx.y][0];
}
}
/**
* Cuda kernel for: ELL(A)x = y
*/
__global__ void k_ell_mat_vec_mm ( int N, int num_cols_per_row , int * indices,
float * data , float * x , float * y ) {
//TODO, but not this time: ellpack kernel
}
/**
* Perform: CSR(A)x = y
*/
void CSRmatvecmult(int* ptr, int* J, float* Val, int N, int nnz, float* x, float *y, bool bVectorized) {
int *ptr_d, *J_d;
float *Val_d, *x_d, *y_d;
/************************/
/* copy to device */
/************************/
cudaMalloc((void **) &ptr_d, (N+1) * sizeof(int));
checkCUDAError();
cudaMemcpy(ptr_d, ptr, (N+1) * sizeof(int), cudaMemcpyHostToDevice);
checkCUDAError();
cudaMalloc((void **) &J_d, nnz * sizeof(int));
checkCUDAError();
cudaMemcpy(J_d, J, nnz * sizeof(int), cudaMemcpyHostToDevice);
checkCUDAError();
cudaMalloc((void **) &Val_d, nnz * sizeof(float));
checkCUDAError();
cudaMemcpy(Val_d, Val, nnz * sizeof(float), cudaMemcpyHostToDevice);
checkCUDAError();
cudaMalloc((void **) &x_d, N * sizeof(float));
checkCUDAError();
cudaMemcpy(x_d, x, N * sizeof(float), cudaMemcpyHostToDevice);
checkCUDAError();
cudaMalloc((void **) &y_d, N * sizeof(float));
checkCUDAError();
cudaMemcpy(y_d, y, N * sizeof(float) , cudaMemcpyHostToDevice);
checkCUDAError();
/************************/
/* start kernel */
/************************/
if (bVectorized) {
//TODO: define grid and block size correctly
dim3 grid(1, (N - 1)/WARP_SIZE + 1, 1);
dim3 block(WARP_SIZE, WARP_SIZE, 1);
k_csr2_mat_vec_mm<<<grid, block>>>(ptr_d, J_d, Val_d, N, x_d, y_d);
} else {
dim3 grid((N - 1)/TILE_SIZE + 1, 1, 1);
dim3 block(TILE_SIZE, 1, 1);
k_csr_mat_vec_mm<<<grid, block>>>(ptr_d, J_d, Val_d, N, x_d, y_d);
}
checkCUDAError();
/************************/
/* copy back */
/************************/
cudaMemcpy(y, y_d, N * sizeof(float), cudaMemcpyDeviceToHost);
checkCUDAError();
/************************/
/* free memory */
/************************/
cudaFree(ptr_d);
cudaFree(J_d);
cudaFree(Val_d);
cudaFree(x_d);
cudaFree(y_d);
}
/**
* Perform: ELL(A)x = y
*/
void ELLmatvecmult(int N, int num_cols_per_row , int * indices,
float * data , float * x , float * y) {
int *indices_d;
float *data_d, *x_d, *y_d;
/************************/
/* copy to device */
/************************/
cudaMalloc((void **) &indices_d, N * num_cols_per_row * sizeof(int));
checkCUDAError();
cudaMemcpy(indices_d, indices, N * num_cols_per_row * sizeof(int), cudaMemcpyHostToDevice);
checkCUDAError();
cudaMalloc((void **) &data_d, N * num_cols_per_row * sizeof(float));
checkCUDAError();
cudaMemcpy(data_d, data, N * num_cols_per_row * sizeof(float), cudaMemcpyHostToDevice);
checkCUDAError();
cudaMalloc((void **) &x_d, N * sizeof(float));
checkCUDAError();
cudaMemcpy(x_d, x, N * sizeof(float), cudaMemcpyHostToDevice);
checkCUDAError();
cudaMalloc((void **) &y_d, N * sizeof(float));
checkCUDAError();
cudaMemcpy(y_d, y, N * sizeof(float), cudaMemcpyHostToDevice);
checkCUDAError();
/************************/
/* start kernel */
/************************/
//NYI: define grid and block size
//k_ell_mat_vec_mm <<< grid, block >>> (N, num_cols_per_row, indices_d, data_d , x_d, y_d);
checkCUDAError();
/************************/
/* copy back */
/************************/
cudaMemcpy(y, y_d, N * sizeof(float), cudaMemcpyDeviceToHost);
checkCUDAError();
/************************/
/* free memory */
/************************/
cudaFree(indices_d);
cudaFree(data_d);
cudaFree(x_d);
cudaFree(y_d);
} | #include <hip/hip_runtime.h>
#include "stdio.h"
#define TILE_SIZE 512
#define WARP_SIZE 32
extern "C" void CSRmatvecmult(int* ptr, int* J, float* Val, int N, int nnz, float* x, float *y, bool bVectorized);
extern "C" void ELLmatvecmult(int N, int num_cols_per_row , int * indices, float * data , float * x , float * y);
/**
* Custom CUDA error check wrapper.
*/
#define checkCUDAError() do { \
hipError_t error = hipGetLastError(); \
if (error != hipSuccess) { \
printf("(CUDA) %s", hipGetErrorString(error)); \
printf(" (" __FILE__ ":%d)\n", __LINE__); \
}\
} while (0)
/**
* Cuda kernel for: CSR_s(A)x = y
*/
__global__ void k_csr_mat_vec_mm(int *ptr, int* indices, float *data, int num_rows, float *x, float* y) {
//TODO: implement the CSR kernel
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < num_rows) {
float temp = 0;
int start = ptr[i], end = ptr[i+1]; // ptr: coalesced
for (int k = start; k < end; k++) {
temp += data[k] * x[indices[k]]; // x, data, indices: uncoalesced
}
y[i] = temp; // y: coalesced
}
}
/**
* Cuda kernel for: CSR_v(A)x = y
*/
__global__ void k_csr2_mat_vec_mm(int *ptr, int* indices, float *data, int num_rows, float *x, float* y) {
//TODO: implement the vectorized csr kernel
/*
* i) Set grid and block size in the kernel call accordingly
* ii) Assign a matrix row to each warp now
* iii) Allocate a shared array vals[] for the partial results of a block
* iv) Compute one row × vector product in a loop. This time, parallelize the loop over all 32 threads in the warp. Take care that access to the arrays indices and data is coalesced.
* v) Use a reduction of some kind (ideally: binary fan-in) to add up the partial sums in vals[] and add the output to the result vector.
*/
/* no vectorization
* usroads.mtx
* real 0m2.548s
* user 0m0.272s
* sys 0m2.212s
*/
__shared__ float vals[WARP_SIZE][WARP_SIZE];
int tx = threadIdx.x, ty = threadIdx.y;
int row_in_grid = blockIdx.y * blockDim.y + ty;
if (row_in_grid < num_rows) {
int k;
int start = ptr[row_in_grid], end = ptr[row_in_grid+1];
float temp = 0;
for (k = tx + start; k < end; k += WARP_SIZE) {
temp += data[k] * x[indices[k]];
}
vals[ty][tx] = temp;
//__syncthreads();
// First attempt
/* vectorized, sum by loop
* usroads.mtx
* real 0m2.781s
* user 0m0.288s
* sys 0m1.683s
*/
/*int l, p;
for (k = 1; k < 6; k++) {
p = (int) powf(2, k-1);
for (l = 0; l < WARP_SIZE/(2*p); l++) {
vals[ty][2*p*l] += vals[ty][2*p*l+p];
}
}*/
// Second attempt
/* vectorized, sum by binary fan-in
* usroads.mtx
* real 0m2.627s
* user 0m0.212s
* sys 0m1.543s
*/
for (k = 1; k < WARP_SIZE; k *= 2) {
if (tx % (2*k) == 0) {
vals[ty][tx] += vals[ty][tx+k];
}
//__syncthreads();
}
// End of two attempts
y[row_in_grid] = vals[threadIdx.y][0];
}
}
/**
* Cuda kernel for: ELL(A)x = y
*/
__global__ void k_ell_mat_vec_mm ( int N, int num_cols_per_row , int * indices,
float * data , float * x , float * y ) {
//TODO, but not this time: ellpack kernel
}
/**
* Perform: CSR(A)x = y
*/
void CSRmatvecmult(int* ptr, int* J, float* Val, int N, int nnz, float* x, float *y, bool bVectorized) {
int *ptr_d, *J_d;
float *Val_d, *x_d, *y_d;
/************************/
/* copy to device */
/************************/
hipMalloc((void **) &ptr_d, (N+1) * sizeof(int));
checkCUDAError();
hipMemcpy(ptr_d, ptr, (N+1) * sizeof(int), hipMemcpyHostToDevice);
checkCUDAError();
hipMalloc((void **) &J_d, nnz * sizeof(int));
checkCUDAError();
hipMemcpy(J_d, J, nnz * sizeof(int), hipMemcpyHostToDevice);
checkCUDAError();
hipMalloc((void **) &Val_d, nnz * sizeof(float));
checkCUDAError();
hipMemcpy(Val_d, Val, nnz * sizeof(float), hipMemcpyHostToDevice);
checkCUDAError();
hipMalloc((void **) &x_d, N * sizeof(float));
checkCUDAError();
hipMemcpy(x_d, x, N * sizeof(float), hipMemcpyHostToDevice);
checkCUDAError();
hipMalloc((void **) &y_d, N * sizeof(float));
checkCUDAError();
hipMemcpy(y_d, y, N * sizeof(float) , hipMemcpyHostToDevice);
checkCUDAError();
/************************/
/* start kernel */
/************************/
if (bVectorized) {
//TODO: define grid and block size correctly
dim3 grid(1, (N - 1)/WARP_SIZE + 1, 1);
dim3 block(WARP_SIZE, WARP_SIZE, 1);
k_csr2_mat_vec_mm<<<grid, block>>>(ptr_d, J_d, Val_d, N, x_d, y_d);
} else {
dim3 grid((N - 1)/TILE_SIZE + 1, 1, 1);
dim3 block(TILE_SIZE, 1, 1);
k_csr_mat_vec_mm<<<grid, block>>>(ptr_d, J_d, Val_d, N, x_d, y_d);
}
checkCUDAError();
/************************/
/* copy back */
/************************/
hipMemcpy(y, y_d, N * sizeof(float), hipMemcpyDeviceToHost);
checkCUDAError();
/************************/
/* free memory */
/************************/
hipFree(ptr_d);
hipFree(J_d);
hipFree(Val_d);
hipFree(x_d);
hipFree(y_d);
}
/**
* Perform: ELL(A)x = y
*/
void ELLmatvecmult(int N, int num_cols_per_row , int * indices,
float * data , float * x , float * y) {
int *indices_d;
float *data_d, *x_d, *y_d;
/************************/
/* copy to device */
/************************/
hipMalloc((void **) &indices_d, N * num_cols_per_row * sizeof(int));
checkCUDAError();
hipMemcpy(indices_d, indices, N * num_cols_per_row * sizeof(int), hipMemcpyHostToDevice);
checkCUDAError();
hipMalloc((void **) &data_d, N * num_cols_per_row * sizeof(float));
checkCUDAError();
hipMemcpy(data_d, data, N * num_cols_per_row * sizeof(float), hipMemcpyHostToDevice);
checkCUDAError();
hipMalloc((void **) &x_d, N * sizeof(float));
checkCUDAError();
hipMemcpy(x_d, x, N * sizeof(float), hipMemcpyHostToDevice);
checkCUDAError();
hipMalloc((void **) &y_d, N * sizeof(float));
checkCUDAError();
hipMemcpy(y_d, y, N * sizeof(float), hipMemcpyHostToDevice);
checkCUDAError();
/************************/
/* start kernel */
/************************/
//NYI: define grid and block size
//k_ell_mat_vec_mm <<< grid, block >>> (N, num_cols_per_row, indices_d, data_d , x_d, y_d);
checkCUDAError();
/************************/
/* copy back */
/************************/
hipMemcpy(y, y_d, N * sizeof(float), hipMemcpyDeviceToHost);
checkCUDAError();
/************************/
/* free memory */
/************************/
hipFree(indices_d);
hipFree(data_d);
hipFree(x_d);
hipFree(y_d);
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | #include <hip/hip_runtime.h>
#include "stdio.h"
#define TILE_SIZE 512
#define WARP_SIZE 32
extern "C" void CSRmatvecmult(int* ptr, int* J, float* Val, int N, int nnz, float* x, float *y, bool bVectorized);
extern "C" void ELLmatvecmult(int N, int num_cols_per_row , int * indices, float * data , float * x , float * y);
/**
* Custom CUDA error check wrapper.
*/
#define checkCUDAError() do { \
hipError_t error = hipGetLastError(); \
if (error != hipSuccess) { \
printf("(CUDA) %s", hipGetErrorString(error)); \
printf(" (" __FILE__ ":%d)\n", __LINE__); \
}\
} while (0)
/**
* Cuda kernel for: CSR_s(A)x = y
*/
__global__ void k_csr_mat_vec_mm(int *ptr, int* indices, float *data, int num_rows, float *x, float* y) {
//TODO: implement the CSR kernel
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < num_rows) {
float temp = 0;
int start = ptr[i], end = ptr[i+1]; // ptr: coalesced
for (int k = start; k < end; k++) {
temp += data[k] * x[indices[k]]; // x, data, indices: uncoalesced
}
y[i] = temp; // y: coalesced
}
}
/**
* Cuda kernel for: CSR_v(A)x = y
*/
__global__ void k_csr2_mat_vec_mm(int *ptr, int* indices, float *data, int num_rows, float *x, float* y) {
//TODO: implement the vectorized csr kernel
/*
* i) Set grid and block size in the kernel call accordingly
* ii) Assign a matrix row to each warp now
* iii) Allocate a shared array vals[] for the partial results of a block
* iv) Compute one row × vector product in a loop. This time, parallelize the loop over all 32 threads in the warp. Take care that access to the arrays indices and data is coalesced.
* v) Use a reduction of some kind (ideally: binary fan-in) to add up the partial sums in vals[] and add the output to the result vector.
*/
/* no vectorization
* usroads.mtx
* real 0m2.548s
* user 0m0.272s
* sys 0m2.212s
*/
__shared__ float vals[WARP_SIZE][WARP_SIZE];
int tx = threadIdx.x, ty = threadIdx.y;
int row_in_grid = blockIdx.y * blockDim.y + ty;
if (row_in_grid < num_rows) {
int k;
int start = ptr[row_in_grid], end = ptr[row_in_grid+1];
float temp = 0;
for (k = tx + start; k < end; k += WARP_SIZE) {
temp += data[k] * x[indices[k]];
}
vals[ty][tx] = temp;
//__syncthreads();
// First attempt
/* vectorized, sum by loop
* usroads.mtx
* real 0m2.781s
* user 0m0.288s
* sys 0m1.683s
*/
/*int l, p;
for (k = 1; k < 6; k++) {
p = (int) powf(2, k-1);
for (l = 0; l < WARP_SIZE/(2*p); l++) {
vals[ty][2*p*l] += vals[ty][2*p*l+p];
}
}*/
// Second attempt
/* vectorized, sum by binary fan-in
* usroads.mtx
* real 0m2.627s
* user 0m0.212s
* sys 0m1.543s
*/
for (k = 1; k < WARP_SIZE; k *= 2) {
if (tx % (2*k) == 0) {
vals[ty][tx] += vals[ty][tx+k];
}
//__syncthreads();
}
// End of two attempts
y[row_in_grid] = vals[threadIdx.y][0];
}
}
/**
* Cuda kernel for: ELL(A)x = y
*/
__global__ void k_ell_mat_vec_mm ( int N, int num_cols_per_row , int * indices,
float * data , float * x , float * y ) {
//TODO, but not this time: ellpack kernel
}
/**
* Perform: CSR(A)x = y
*/
void CSRmatvecmult(int* ptr, int* J, float* Val, int N, int nnz, float* x, float *y, bool bVectorized) {
int *ptr_d, *J_d;
float *Val_d, *x_d, *y_d;
/************************/
/* copy to device */
/************************/
hipMalloc((void **) &ptr_d, (N+1) * sizeof(int));
checkCUDAError();
hipMemcpy(ptr_d, ptr, (N+1) * sizeof(int), hipMemcpyHostToDevice);
checkCUDAError();
hipMalloc((void **) &J_d, nnz * sizeof(int));
checkCUDAError();
hipMemcpy(J_d, J, nnz * sizeof(int), hipMemcpyHostToDevice);
checkCUDAError();
hipMalloc((void **) &Val_d, nnz * sizeof(float));
checkCUDAError();
hipMemcpy(Val_d, Val, nnz * sizeof(float), hipMemcpyHostToDevice);
checkCUDAError();
hipMalloc((void **) &x_d, N * sizeof(float));
checkCUDAError();
hipMemcpy(x_d, x, N * sizeof(float), hipMemcpyHostToDevice);
checkCUDAError();
hipMalloc((void **) &y_d, N * sizeof(float));
checkCUDAError();
hipMemcpy(y_d, y, N * sizeof(float) , hipMemcpyHostToDevice);
checkCUDAError();
/************************/
/* start kernel */
/************************/
if (bVectorized) {
//TODO: define grid and block size correctly
dim3 grid(1, (N - 1)/WARP_SIZE + 1, 1);
dim3 block(WARP_SIZE, WARP_SIZE, 1);
k_csr2_mat_vec_mm<<<grid, block>>>(ptr_d, J_d, Val_d, N, x_d, y_d);
} else {
dim3 grid((N - 1)/TILE_SIZE + 1, 1, 1);
dim3 block(TILE_SIZE, 1, 1);
k_csr_mat_vec_mm<<<grid, block>>>(ptr_d, J_d, Val_d, N, x_d, y_d);
}
checkCUDAError();
/************************/
/* copy back */
/************************/
hipMemcpy(y, y_d, N * sizeof(float), hipMemcpyDeviceToHost);
checkCUDAError();
/************************/
/* free memory */
/************************/
hipFree(ptr_d);
hipFree(J_d);
hipFree(Val_d);
hipFree(x_d);
hipFree(y_d);
}
/**
* Perform: ELL(A)x = y
*/
void ELLmatvecmult(int N, int num_cols_per_row , int * indices,
float * data , float * x , float * y) {
int *indices_d;
float *data_d, *x_d, *y_d;
/************************/
/* copy to device */
/************************/
hipMalloc((void **) &indices_d, N * num_cols_per_row * sizeof(int));
checkCUDAError();
hipMemcpy(indices_d, indices, N * num_cols_per_row * sizeof(int), hipMemcpyHostToDevice);
checkCUDAError();
hipMalloc((void **) &data_d, N * num_cols_per_row * sizeof(float));
checkCUDAError();
hipMemcpy(data_d, data, N * num_cols_per_row * sizeof(float), hipMemcpyHostToDevice);
checkCUDAError();
hipMalloc((void **) &x_d, N * sizeof(float));
checkCUDAError();
hipMemcpy(x_d, x, N * sizeof(float), hipMemcpyHostToDevice);
checkCUDAError();
hipMalloc((void **) &y_d, N * sizeof(float));
checkCUDAError();
hipMemcpy(y_d, y, N * sizeof(float), hipMemcpyHostToDevice);
checkCUDAError();
/************************/
/* start kernel */
/************************/
//NYI: define grid and block size
//k_ell_mat_vec_mm <<< grid, block >>> (N, num_cols_per_row, indices_d, data_d , x_d, y_d);
checkCUDAError();
/************************/
/* copy back */
/************************/
hipMemcpy(y, y_d, N * sizeof(float), hipMemcpyDeviceToHost);
checkCUDAError();
/************************/
/* free memory */
/************************/
hipFree(indices_d);
hipFree(data_d);
hipFree(x_d);
hipFree(y_d);
} | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z16k_csr_mat_vec_mmPiS_PfiS0_S0_
.globl _Z16k_csr_mat_vec_mmPiS_PfiS0_S0_
.p2align 8
.type _Z16k_csr_mat_vec_mmPiS_PfiS0_S0_,@function
_Z16k_csr_mat_vec_mmPiS_PfiS0_S0_:
s_clause 0x1
s_load_b32 s2, s[0:1], 0x3c
s_load_b32 s3, s[0:1], 0x18
s_waitcnt lgkmcnt(0)
s_and_b32 s2, s2, 0xffff
s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_mad_u64_u32 v[1:2], null, s15, s2, v[0:1]
s_mov_b32 s2, exec_lo
v_cmpx_gt_i32_e64 s3, v1
s_cbranch_execz .LBB0_6
s_load_b64 s[2:3], s[0:1], 0x0
v_ashrrev_i32_e32 v2, 31, v1
v_mov_b32_e32 v0, 0
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[3:4], 2, v[1:2]
s_waitcnt lgkmcnt(0)
v_add_co_u32 v3, vcc_lo, s2, v3
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v4, vcc_lo, s3, v4, vcc_lo
s_mov_b32 s3, exec_lo
global_load_b64 v[3:4], v[3:4], off
s_waitcnt vmcnt(0)
v_cmpx_lt_i32_e64 v3, v4
s_cbranch_execz .LBB0_5
s_clause 0x1
s_load_b128 s[8:11], s[0:1], 0x8
s_load_b64 s[4:5], s[0:1], 0x20
v_ashrrev_i32_e32 v6, 31, v3
v_dual_mov_b32 v5, v3 :: v_dual_mov_b32 v0, 0
s_mov_b32 s6, 0
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[7:8], 2, v[5:6]
s_waitcnt lgkmcnt(0)
v_add_co_u32 v5, vcc_lo, s10, v7
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v6, vcc_lo, s11, v8, vcc_lo
v_add_co_u32 v7, vcc_lo, s8, v7
v_add_co_ci_u32_e32 v8, vcc_lo, s9, v8, vcc_lo
.p2align 6
.LBB0_3:
global_load_b32 v9, v[7:8], off
s_waitcnt vmcnt(0)
v_ashrrev_i32_e32 v10, 31, v9
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[9:10], 2, v[9:10]
v_add_co_u32 v9, vcc_lo, s4, v9
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v10, vcc_lo, s5, v10, vcc_lo
global_load_b32 v11, v[5:6], off
global_load_b32 v9, v[9:10], off
v_add_nc_u32_e32 v3, 1, v3
v_add_co_u32 v5, vcc_lo, v5, 4
v_add_co_ci_u32_e32 v6, vcc_lo, 0, v6, vcc_lo
v_add_co_u32 v7, vcc_lo, v7, 4
v_add_co_ci_u32_e32 v8, vcc_lo, 0, v8, vcc_lo
s_waitcnt vmcnt(0)
v_fmac_f32_e32 v0, v11, v9
v_cmp_ge_i32_e64 s2, v3, v4
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
s_or_b32 s6, s2, s6
s_and_not1_b32 exec_lo, exec_lo, s6
s_cbranch_execnz .LBB0_3
s_or_b32 exec_lo, exec_lo, s6
.LBB0_5:
s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
s_or_b32 exec_lo, exec_lo, s3
s_load_b64 s[0:1], s[0:1], 0x28
v_lshlrev_b64 v[1:2], 2, v[1:2]
s_waitcnt lgkmcnt(0)
v_add_co_u32 v1, vcc_lo, s0, v1
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v2, vcc_lo, s1, v2, vcc_lo
global_store_b32 v[1:2], v0, off
.LBB0_6:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z16k_csr_mat_vec_mmPiS_PfiS0_S0_
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 304
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 12
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z16k_csr_mat_vec_mmPiS_PfiS0_S0_, .Lfunc_end0-_Z16k_csr_mat_vec_mmPiS_PfiS0_S0_
.section .AMDGPU.csdata,"",@progbits
.text
.protected _Z17k_csr2_mat_vec_mmPiS_PfiS0_S0_
.globl _Z17k_csr2_mat_vec_mmPiS_PfiS0_S0_
.p2align 8
.type _Z17k_csr2_mat_vec_mmPiS_PfiS0_S0_,@function
_Z17k_csr2_mat_vec_mmPiS_PfiS0_S0_:
s_clause 0x1
s_load_b32 s2, s[0:1], 0x3c
s_load_b32 s3, s[0:1], 0x18
v_bfe_u32 v1, v0, 10, 10
s_waitcnt lgkmcnt(0)
s_lshr_b32 s2, s2, 16
s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1)
v_mad_u64_u32 v[2:3], null, s15, s2, v[1:2]
s_mov_b32 s2, exec_lo
s_delay_alu instid0(VALU_DEP_1)
v_cmpx_gt_i32_e64 s3, v2
s_cbranch_execz .LBB1_10
s_load_b64 s[2:3], s[0:1], 0x0
v_ashrrev_i32_e32 v3, 31, v2
v_dual_mov_b32 v11, 0 :: v_dual_and_b32 v0, 0x3ff, v0
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[4:5], 2, v[2:3]
s_waitcnt lgkmcnt(0)
v_add_co_u32 v4, vcc_lo, s2, v4
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_4) | instid1(VALU_DEP_1)
v_add_co_ci_u32_e32 v5, vcc_lo, s3, v5, vcc_lo
s_mov_b32 s3, exec_lo
global_load_b64 v[4:5], v[4:5], off
s_waitcnt vmcnt(0)
v_add_nc_u32_e32 v10, v4, v0
v_cmpx_lt_i32_e64 v10, v5
s_cbranch_execz .LBB1_5
s_clause 0x1
s_load_b128 s[8:11], s[0:1], 0x8
s_load_b64 s[4:5], s[0:1], 0x20
v_ashrrev_i32_e32 v7, 31, v4
v_add_co_u32 v6, vcc_lo, v4, v0
v_mov_b32_e32 v11, 0
s_mov_b32 s6, 0
s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
v_add_co_ci_u32_e32 v7, vcc_lo, 0, v7, vcc_lo
v_lshlrev_b64 v[8:9], 2, v[6:7]
s_waitcnt lgkmcnt(0)
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_co_u32 v6, vcc_lo, s10, v8
v_add_co_ci_u32_e32 v7, vcc_lo, s11, v9, vcc_lo
v_add_co_u32 v8, vcc_lo, s8, v8
v_add_co_ci_u32_e32 v9, vcc_lo, s9, v9, vcc_lo
.p2align 6
.LBB1_3:
global_load_b32 v12, v[8:9], off
s_waitcnt vmcnt(0)
v_ashrrev_i32_e32 v13, 31, v12
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[12:13], 2, v[12:13]
v_add_co_u32 v12, vcc_lo, s4, v12
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v13, vcc_lo, s5, v13, vcc_lo
global_load_b32 v4, v[6:7], off
global_load_b32 v12, v[12:13], off
v_add_nc_u32_e32 v10, 32, v10
v_add_co_u32 v6, vcc_lo, v6, 0x80
v_add_co_ci_u32_e32 v7, vcc_lo, 0, v7, vcc_lo
v_add_co_u32 v8, vcc_lo, v8, 0x80
v_add_co_ci_u32_e32 v9, vcc_lo, 0, v9, vcc_lo
s_waitcnt vmcnt(0)
v_fmac_f32_e32 v11, v4, v12
v_cmp_ge_i32_e64 s2, v10, v5
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
s_or_b32 s6, s2, s6
s_and_not1_b32 exec_lo, exec_lo, s6
s_cbranch_execnz .LBB1_3
s_or_b32 exec_lo, exec_lo, s6
.LBB1_5:
s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_2) | instid1(VALU_DEP_1)
s_or_b32 exec_lo, exec_lo, s3
v_lshlrev_b32_e32 v4, 2, v0
s_mov_b32 s2, 1
v_lshl_add_u32 v4, v1, 7, v4
ds_store_b32 v4, v11
s_branch .LBB1_7
.p2align 6
.LBB1_6:
s_or_b32 exec_lo, exec_lo, s4
s_cmp_lt_u32 s2, 16
s_mov_b32 s2, s3
s_cbranch_scc0 .LBB1_9
.LBB1_7:
s_lshl_b32 s3, s2, 1
s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
s_add_i32 s4, s3, -1
v_and_b32_e32 v5, s4, v0
s_mov_b32 s4, exec_lo
s_delay_alu instid0(VALU_DEP_1)
v_cmpx_eq_u32_e32 0, v5
s_cbranch_execz .LBB1_6
v_add_lshl_u32 v5, s2, v0, 2
s_delay_alu instid0(VALU_DEP_1)
v_lshl_add_u32 v5, v1, 7, v5
ds_load_b32 v5, v5
ds_load_b32 v6, v4
s_waitcnt lgkmcnt(0)
v_add_f32_e32 v5, v5, v6
ds_store_b32 v4, v5
s_branch .LBB1_6
.LBB1_9:
s_load_b64 s[0:1], s[0:1], 0x28
v_lshlrev_b32_e32 v0, 7, v1
ds_load_b32 v4, v0
v_lshlrev_b64 v[0:1], 2, v[2:3]
s_waitcnt lgkmcnt(0)
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_co_u32 v0, vcc_lo, s0, v0
v_add_co_ci_u32_e32 v1, vcc_lo, s1, v1, vcc_lo
global_store_b32 v[0:1], v4, off
.LBB1_10:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z17k_csr2_mat_vec_mmPiS_PfiS0_S0_
.amdhsa_group_segment_fixed_size 4096
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 304
.amdhsa_user_sgpr_count 14
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 1
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 1
.amdhsa_next_free_vgpr 14
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end1:
.size _Z17k_csr2_mat_vec_mmPiS_PfiS0_S0_, .Lfunc_end1-_Z17k_csr2_mat_vec_mmPiS_PfiS0_S0_
.section .AMDGPU.csdata,"",@progbits
.text
.protected _Z16k_ell_mat_vec_mmiiPiPfS0_S0_
.globl _Z16k_ell_mat_vec_mmiiPiPfS0_S0_
.p2align 8
.type _Z16k_ell_mat_vec_mmiiPiPfS0_S0_,@function
_Z16k_ell_mat_vec_mmiiPiPfS0_S0_:
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z16k_ell_mat_vec_mmiiPiPfS0_S0_
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 40
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 1
.amdhsa_next_free_sgpr 1
.amdhsa_reserve_vcc 0
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end2:
.size _Z16k_ell_mat_vec_mmiiPiPfS0_S0_, .Lfunc_end2-_Z16k_ell_mat_vec_mmiiPiPfS0_S0_
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
- .offset: 24
.size: 4
.value_kind: by_value
- .address_space: global
.offset: 32
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 40
.size: 8
.value_kind: global_buffer
- .offset: 48
.size: 4
.value_kind: hidden_block_count_x
- .offset: 52
.size: 4
.value_kind: hidden_block_count_y
- .offset: 56
.size: 4
.value_kind: hidden_block_count_z
- .offset: 60
.size: 2
.value_kind: hidden_group_size_x
- .offset: 62
.size: 2
.value_kind: hidden_group_size_y
- .offset: 64
.size: 2
.value_kind: hidden_group_size_z
- .offset: 66
.size: 2
.value_kind: hidden_remainder_x
- .offset: 68
.size: 2
.value_kind: hidden_remainder_y
- .offset: 70
.size: 2
.value_kind: hidden_remainder_z
- .offset: 88
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 96
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 104
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 112
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 304
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z16k_csr_mat_vec_mmPiS_PfiS0_S0_
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z16k_csr_mat_vec_mmPiS_PfiS0_S0_.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 12
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
- .offset: 24
.size: 4
.value_kind: by_value
- .address_space: global
.offset: 32
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 40
.size: 8
.value_kind: global_buffer
- .offset: 48
.size: 4
.value_kind: hidden_block_count_x
- .offset: 52
.size: 4
.value_kind: hidden_block_count_y
- .offset: 56
.size: 4
.value_kind: hidden_block_count_z
- .offset: 60
.size: 2
.value_kind: hidden_group_size_x
- .offset: 62
.size: 2
.value_kind: hidden_group_size_y
- .offset: 64
.size: 2
.value_kind: hidden_group_size_z
- .offset: 66
.size: 2
.value_kind: hidden_remainder_x
- .offset: 68
.size: 2
.value_kind: hidden_remainder_y
- .offset: 70
.size: 2
.value_kind: hidden_remainder_z
- .offset: 88
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 96
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 104
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 112
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 4096
.kernarg_segment_align: 8
.kernarg_segment_size: 304
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z17k_csr2_mat_vec_mmPiS_PfiS0_S0_
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z17k_csr2_mat_vec_mmPiS_PfiS0_S0_.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 14
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
- .args:
- .offset: 0
.size: 4
.value_kind: by_value
- .offset: 4
.size: 4
.value_kind: by_value
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 24
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 32
.size: 8
.value_kind: global_buffer
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 40
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z16k_ell_mat_vec_mmiiPiPfS0_S0_
.private_segment_fixed_size: 0
.sgpr_count: 0
.sgpr_spill_count: 0
.symbol: _Z16k_ell_mat_vec_mmiiPiPfS0_S0_.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 0
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | #include <hip/hip_runtime.h>
#include "stdio.h"
#define TILE_SIZE 512
#define WARP_SIZE 32
extern "C" void CSRmatvecmult(int* ptr, int* J, float* Val, int N, int nnz, float* x, float *y, bool bVectorized);
extern "C" void ELLmatvecmult(int N, int num_cols_per_row , int * indices, float * data , float * x , float * y);
/**
* Custom CUDA error check wrapper.
*/
#define checkCUDAError() do { \
hipError_t error = hipGetLastError(); \
if (error != hipSuccess) { \
printf("(CUDA) %s", hipGetErrorString(error)); \
printf(" (" __FILE__ ":%d)\n", __LINE__); \
}\
} while (0)
/**
* Cuda kernel for: CSR_s(A)x = y
*/
__global__ void k_csr_mat_vec_mm(int *ptr, int* indices, float *data, int num_rows, float *x, float* y) {
//TODO: implement the CSR kernel
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < num_rows) {
float temp = 0;
int start = ptr[i], end = ptr[i+1]; // ptr: coalesced
for (int k = start; k < end; k++) {
temp += data[k] * x[indices[k]]; // x, data, indices: uncoalesced
}
y[i] = temp; // y: coalesced
}
}
/**
* Cuda kernel for: CSR_v(A)x = y
*/
__global__ void k_csr2_mat_vec_mm(int *ptr, int* indices, float *data, int num_rows, float *x, float* y) {
//TODO: implement the vectorized csr kernel
/*
* i) Set grid and block size in the kernel call accordingly
* ii) Assign a matrix row to each warp now
* iii) Allocate a shared array vals[] for the partial results of a block
* iv) Compute one row × vector product in a loop. This time, parallelize the loop over all 32 threads in the warp. Take care that access to the arrays indices and data is coalesced.
* v) Use a reduction of some kind (ideally: binary fan-in) to add up the partial sums in vals[] and add the output to the result vector.
*/
/* no vectorization
* usroads.mtx
* real 0m2.548s
* user 0m0.272s
* sys 0m2.212s
*/
__shared__ float vals[WARP_SIZE][WARP_SIZE];
int tx = threadIdx.x, ty = threadIdx.y;
int row_in_grid = blockIdx.y * blockDim.y + ty;
if (row_in_grid < num_rows) {
int k;
int start = ptr[row_in_grid], end = ptr[row_in_grid+1];
float temp = 0;
for (k = tx + start; k < end; k += WARP_SIZE) {
temp += data[k] * x[indices[k]];
}
vals[ty][tx] = temp;
//__syncthreads();
// First attempt
/* vectorized, sum by loop
* usroads.mtx
* real 0m2.781s
* user 0m0.288s
* sys 0m1.683s
*/
/*int l, p;
for (k = 1; k < 6; k++) {
p = (int) powf(2, k-1);
for (l = 0; l < WARP_SIZE/(2*p); l++) {
vals[ty][2*p*l] += vals[ty][2*p*l+p];
}
}*/
// Second attempt
/* vectorized, sum by binary fan-in
* usroads.mtx
* real 0m2.627s
* user 0m0.212s
* sys 0m1.543s
*/
for (k = 1; k < WARP_SIZE; k *= 2) {
if (tx % (2*k) == 0) {
vals[ty][tx] += vals[ty][tx+k];
}
//__syncthreads();
}
// End of two attempts
y[row_in_grid] = vals[threadIdx.y][0];
}
}
/**
* Cuda kernel for: ELL(A)x = y
*/
__global__ void k_ell_mat_vec_mm ( int N, int num_cols_per_row , int * indices,
float * data , float * x , float * y ) {
//TODO, but not this time: ellpack kernel
}
/**
* Perform: CSR(A)x = y
*/
void CSRmatvecmult(int* ptr, int* J, float* Val, int N, int nnz, float* x, float *y, bool bVectorized) {
int *ptr_d, *J_d;
float *Val_d, *x_d, *y_d;
/************************/
/* copy to device */
/************************/
hipMalloc((void **) &ptr_d, (N+1) * sizeof(int));
checkCUDAError();
hipMemcpy(ptr_d, ptr, (N+1) * sizeof(int), hipMemcpyHostToDevice);
checkCUDAError();
hipMalloc((void **) &J_d, nnz * sizeof(int));
checkCUDAError();
hipMemcpy(J_d, J, nnz * sizeof(int), hipMemcpyHostToDevice);
checkCUDAError();
hipMalloc((void **) &Val_d, nnz * sizeof(float));
checkCUDAError();
hipMemcpy(Val_d, Val, nnz * sizeof(float), hipMemcpyHostToDevice);
checkCUDAError();
hipMalloc((void **) &x_d, N * sizeof(float));
checkCUDAError();
hipMemcpy(x_d, x, N * sizeof(float), hipMemcpyHostToDevice);
checkCUDAError();
hipMalloc((void **) &y_d, N * sizeof(float));
checkCUDAError();
hipMemcpy(y_d, y, N * sizeof(float) , hipMemcpyHostToDevice);
checkCUDAError();
/************************/
/* start kernel */
/************************/
if (bVectorized) {
//TODO: define grid and block size correctly
dim3 grid(1, (N - 1)/WARP_SIZE + 1, 1);
dim3 block(WARP_SIZE, WARP_SIZE, 1);
k_csr2_mat_vec_mm<<<grid, block>>>(ptr_d, J_d, Val_d, N, x_d, y_d);
} else {
dim3 grid((N - 1)/TILE_SIZE + 1, 1, 1);
dim3 block(TILE_SIZE, 1, 1);
k_csr_mat_vec_mm<<<grid, block>>>(ptr_d, J_d, Val_d, N, x_d, y_d);
}
checkCUDAError();
/************************/
/* copy back */
/************************/
hipMemcpy(y, y_d, N * sizeof(float), hipMemcpyDeviceToHost);
checkCUDAError();
/************************/
/* free memory */
/************************/
hipFree(ptr_d);
hipFree(J_d);
hipFree(Val_d);
hipFree(x_d);
hipFree(y_d);
}
/**
* Perform: ELL(A)x = y
*/
void ELLmatvecmult(int N, int num_cols_per_row , int * indices,
float * data , float * x , float * y) {
int *indices_d;
float *data_d, *x_d, *y_d;
/************************/
/* copy to device */
/************************/
hipMalloc((void **) &indices_d, N * num_cols_per_row * sizeof(int));
checkCUDAError();
hipMemcpy(indices_d, indices, N * num_cols_per_row * sizeof(int), hipMemcpyHostToDevice);
checkCUDAError();
hipMalloc((void **) &data_d, N * num_cols_per_row * sizeof(float));
checkCUDAError();
hipMemcpy(data_d, data, N * num_cols_per_row * sizeof(float), hipMemcpyHostToDevice);
checkCUDAError();
hipMalloc((void **) &x_d, N * sizeof(float));
checkCUDAError();
hipMemcpy(x_d, x, N * sizeof(float), hipMemcpyHostToDevice);
checkCUDAError();
hipMalloc((void **) &y_d, N * sizeof(float));
checkCUDAError();
hipMemcpy(y_d, y, N * sizeof(float), hipMemcpyHostToDevice);
checkCUDAError();
/************************/
/* start kernel */
/************************/
//NYI: define grid and block size
//k_ell_mat_vec_mm <<< grid, block >>> (N, num_cols_per_row, indices_d, data_d , x_d, y_d);
checkCUDAError();
/************************/
/* copy back */
/************************/
hipMemcpy(y, y_d, N * sizeof(float), hipMemcpyDeviceToHost);
checkCUDAError();
/************************/
/* free memory */
/************************/
hipFree(indices_d);
hipFree(data_d);
hipFree(x_d);
hipFree(y_d);
} | .text
.file "kernels.hip"
.globl _Z31__device_stub__k_csr_mat_vec_mmPiS_PfiS0_S0_ # -- Begin function _Z31__device_stub__k_csr_mat_vec_mmPiS_PfiS0_S0_
.p2align 4, 0x90
.type _Z31__device_stub__k_csr_mat_vec_mmPiS_PfiS0_S0_,@function
_Z31__device_stub__k_csr_mat_vec_mmPiS_PfiS0_S0_: # @_Z31__device_stub__k_csr_mat_vec_mmPiS_PfiS0_S0_
.cfi_startproc
# %bb.0:
subq $152, %rsp
.cfi_def_cfa_offset 160
movq %rdi, 88(%rsp)
movq %rsi, 80(%rsp)
movq %rdx, 72(%rsp)
movl %ecx, 4(%rsp)
movq %r8, 64(%rsp)
movq %r9, 56(%rsp)
leaq 88(%rsp), %rax
movq %rax, 96(%rsp)
leaq 80(%rsp), %rax
movq %rax, 104(%rsp)
leaq 72(%rsp), %rax
movq %rax, 112(%rsp)
leaq 4(%rsp), %rax
movq %rax, 120(%rsp)
leaq 64(%rsp), %rax
movq %rax, 128(%rsp)
leaq 56(%rsp), %rax
movq %rax, 136(%rsp)
leaq 40(%rsp), %rdi
leaq 24(%rsp), %rsi
leaq 16(%rsp), %rdx
leaq 8(%rsp), %rcx
callq __hipPopCallConfiguration
movq 40(%rsp), %rsi
movl 48(%rsp), %edx
movq 24(%rsp), %rcx
movl 32(%rsp), %r8d
leaq 96(%rsp), %r9
movl $_Z16k_csr_mat_vec_mmPiS_PfiS0_S0_, %edi
pushq 8(%rsp)
.cfi_adjust_cfa_offset 8
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $168, %rsp
.cfi_adjust_cfa_offset -168
retq
.Lfunc_end0:
.size _Z31__device_stub__k_csr_mat_vec_mmPiS_PfiS0_S0_, .Lfunc_end0-_Z31__device_stub__k_csr_mat_vec_mmPiS_PfiS0_S0_
.cfi_endproc
# -- End function
.globl _Z32__device_stub__k_csr2_mat_vec_mmPiS_PfiS0_S0_ # -- Begin function _Z32__device_stub__k_csr2_mat_vec_mmPiS_PfiS0_S0_
.p2align 4, 0x90
.type _Z32__device_stub__k_csr2_mat_vec_mmPiS_PfiS0_S0_,@function
_Z32__device_stub__k_csr2_mat_vec_mmPiS_PfiS0_S0_: # @_Z32__device_stub__k_csr2_mat_vec_mmPiS_PfiS0_S0_
.cfi_startproc
# %bb.0:
subq $152, %rsp
.cfi_def_cfa_offset 160
movq %rdi, 88(%rsp)
movq %rsi, 80(%rsp)
movq %rdx, 72(%rsp)
movl %ecx, 4(%rsp)
movq %r8, 64(%rsp)
movq %r9, 56(%rsp)
leaq 88(%rsp), %rax
movq %rax, 96(%rsp)
leaq 80(%rsp), %rax
movq %rax, 104(%rsp)
leaq 72(%rsp), %rax
movq %rax, 112(%rsp)
leaq 4(%rsp), %rax
movq %rax, 120(%rsp)
leaq 64(%rsp), %rax
movq %rax, 128(%rsp)
leaq 56(%rsp), %rax
movq %rax, 136(%rsp)
leaq 40(%rsp), %rdi
leaq 24(%rsp), %rsi
leaq 16(%rsp), %rdx
leaq 8(%rsp), %rcx
callq __hipPopCallConfiguration
movq 40(%rsp), %rsi
movl 48(%rsp), %edx
movq 24(%rsp), %rcx
movl 32(%rsp), %r8d
leaq 96(%rsp), %r9
movl $_Z17k_csr2_mat_vec_mmPiS_PfiS0_S0_, %edi
pushq 8(%rsp)
.cfi_adjust_cfa_offset 8
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $168, %rsp
.cfi_adjust_cfa_offset -168
retq
.Lfunc_end1:
.size _Z32__device_stub__k_csr2_mat_vec_mmPiS_PfiS0_S0_, .Lfunc_end1-_Z32__device_stub__k_csr2_mat_vec_mmPiS_PfiS0_S0_
.cfi_endproc
# -- End function
.globl _Z31__device_stub__k_ell_mat_vec_mmiiPiPfS0_S0_ # -- Begin function _Z31__device_stub__k_ell_mat_vec_mmiiPiPfS0_S0_
.p2align 4, 0x90
.type _Z31__device_stub__k_ell_mat_vec_mmiiPiPfS0_S0_,@function
_Z31__device_stub__k_ell_mat_vec_mmiiPiPfS0_S0_: # @_Z31__device_stub__k_ell_mat_vec_mmiiPiPfS0_S0_
.cfi_startproc
# %bb.0:
subq $152, %rsp
.cfi_def_cfa_offset 160
movl %edi, 12(%rsp)
movl %esi, 8(%rsp)
movq %rdx, 88(%rsp)
movq %rcx, 80(%rsp)
movq %r8, 72(%rsp)
movq %r9, 64(%rsp)
leaq 12(%rsp), %rax
movq %rax, 96(%rsp)
leaq 8(%rsp), %rax
movq %rax, 104(%rsp)
leaq 88(%rsp), %rax
movq %rax, 112(%rsp)
leaq 80(%rsp), %rax
movq %rax, 120(%rsp)
leaq 72(%rsp), %rax
movq %rax, 128(%rsp)
leaq 64(%rsp), %rax
movq %rax, 136(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 96(%rsp), %r9
movl $_Z16k_ell_mat_vec_mmiiPiPfS0_S0_, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $168, %rsp
.cfi_adjust_cfa_offset -168
retq
.Lfunc_end2:
.size _Z31__device_stub__k_ell_mat_vec_mmiiPiPfS0_S0_, .Lfunc_end2-_Z31__device_stub__k_ell_mat_vec_mmiiPiPfS0_S0_
.cfi_endproc
# -- End function
.globl CSRmatvecmult # -- Begin function CSRmatvecmult
.p2align 4, 0x90
.type CSRmatvecmult,@function
CSRmatvecmult: # @CSRmatvecmult
.cfi_startproc
# %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
pushq %r15
.cfi_def_cfa_offset 24
pushq %r14
.cfi_def_cfa_offset 32
pushq %r13
.cfi_def_cfa_offset 40
pushq %r12
.cfi_def_cfa_offset 48
pushq %rbx
.cfi_def_cfa_offset 56
subq $216, %rsp
.cfi_def_cfa_offset 272
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
.cfi_offset %rbp, -16
movq %r9, 208(%rsp) # 8-byte Spill
movl %r8d, %ebp
# kill: def $ecx killed $ecx def $rcx
movq %rdx, 200(%rsp) # 8-byte Spill
movq %rsi, %r13
movq %rdi, %r15
movq %rcx, %r12
movslq %ecx, %rbx
leaq 4(,%rbx,4), %r14
leaq 48(%rsp), %rdi
movq %r14, %rsi
callq hipMalloc
callq hipGetLastError
testl %eax, %eax
je .LBB3_2
# %bb.1:
movl %eax, %edi
callq hipGetErrorString
movl $.L.str, %edi
movq %rax, %rsi
xorl %eax, %eax
callq printf
movl $.L.str.1, %edi
movl $121, %esi
xorl %eax, %eax
callq printf
.LBB3_2:
movq 48(%rsp), %rdi
movq %r15, %rsi
movq %r14, %rdx
movl $1, %ecx
callq hipMemcpy
callq hipGetLastError
testl %eax, %eax
je .LBB3_4
# %bb.3:
movl %eax, %edi
callq hipGetErrorString
movl $.L.str, %edi
movq %rax, %rsi
xorl %eax, %eax
callq printf
movl $.L.str.1, %edi
movl $123, %esi
xorl %eax, %eax
callq printf
.LBB3_4:
movslq %ebp, %rbp
shlq $2, %rbp
leaq 40(%rsp), %rdi
movq %rbp, %rsi
callq hipMalloc
callq hipGetLastError
testl %eax, %eax
je .LBB3_6
# %bb.5:
movl %eax, %edi
callq hipGetErrorString
movl $.L.str, %edi
movq %rax, %rsi
xorl %eax, %eax
callq printf
movl $.L.str.1, %edi
movl $126, %esi
xorl %eax, %eax
callq printf
.LBB3_6:
movq 40(%rsp), %rdi
movq %r13, %rsi
movq %rbp, %rdx
movl $1, %ecx
callq hipMemcpy
callq hipGetLastError
testl %eax, %eax
je .LBB3_8
# %bb.7:
movl %eax, %edi
callq hipGetErrorString
movl $.L.str, %edi
movq %rax, %rsi
xorl %eax, %eax
callq printf
movl $.L.str.1, %edi
movl $128, %esi
xorl %eax, %eax
callq printf
.LBB3_8:
leaq 32(%rsp), %rdi
movq %rbp, %rsi
callq hipMalloc
callq hipGetLastError
testl %eax, %eax
je .LBB3_10
# %bb.9:
movl %eax, %edi
callq hipGetErrorString
movl $.L.str, %edi
movq %rax, %rsi
xorl %eax, %eax
callq printf
movl $.L.str.1, %edi
movl $131, %esi
xorl %eax, %eax
callq printf
.LBB3_10:
movq 32(%rsp), %rdi
movq 200(%rsp), %rsi # 8-byte Reload
movq %rbp, %rdx
movl $1, %ecx
callq hipMemcpy
callq hipGetLastError
testl %eax, %eax
je .LBB3_12
# %bb.11:
movl %eax, %edi
callq hipGetErrorString
movl $.L.str, %edi
movq %rax, %rsi
xorl %eax, %eax
callq printf
movl $.L.str.1, %edi
movl $133, %esi
xorl %eax, %eax
callq printf
.LBB3_12:
shlq $2, %rbx
leaq 24(%rsp), %rdi
movq %rbx, %rsi
callq hipMalloc
callq hipGetLastError
testl %eax, %eax
je .LBB3_14
# %bb.13:
movl %eax, %edi
callq hipGetErrorString
movl $.L.str, %edi
movq %rax, %rsi
xorl %eax, %eax
callq printf
movl $.L.str.1, %edi
movl $136, %esi
xorl %eax, %eax
callq printf
.LBB3_14:
movq 24(%rsp), %rdi
movq 208(%rsp), %rsi # 8-byte Reload
movq %rbx, %rdx
movl $1, %ecx
callq hipMemcpy
callq hipGetLastError
testl %eax, %eax
je .LBB3_16
# %bb.15:
movl %eax, %edi
callq hipGetErrorString
movl $.L.str, %edi
movq %rax, %rsi
xorl %eax, %eax
callq printf
movl $.L.str.1, %edi
movl $138, %esi
xorl %eax, %eax
callq printf
.LBB3_16:
movq 272(%rsp), %r15
leaq 16(%rsp), %rdi
movq %rbx, %rsi
callq hipMalloc
callq hipGetLastError
testl %eax, %eax
je .LBB3_18
# %bb.17:
movl %eax, %edi
callq hipGetErrorString
movl $.L.str, %edi
movq %rax, %rsi
xorl %eax, %eax
callq printf
movl $.L.str.1, %edi
movl $141, %esi
xorl %eax, %eax
callq printf
.LBB3_18:
movzbl 280(%rsp), %ebp
movq 16(%rsp), %rdi
movq %r15, %rsi
movq %rbx, %rdx
movl $1, %ecx
callq hipMemcpy
callq hipGetLastError
testl %eax, %eax
je .LBB3_20
# %bb.19:
movl %eax, %edi
callq hipGetErrorString
movl $.L.str, %edi
movq %rax, %rsi
xorl %eax, %eax
callq printf
movl $.L.str.1, %edi
movl $143, %esi
xorl %eax, %eax
callq printf
.LBB3_20:
leal -1(%r12), %eax
testb %bpl, %bpl
je .LBB3_23
# %bb.21:
leal 31(%rax), %edi
testl %eax, %eax
cmovnsl %eax, %edi
sarl $5, %edi
incl %edi
shlq $32, %rdi
orq $1, %rdi
movabsq $137438953504, %rdx # imm = 0x2000000020
movl $1, %esi
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB3_26
# %bb.22:
movq 48(%rsp), %rax
movq 40(%rsp), %rcx
movq 32(%rsp), %rdx
movq 24(%rsp), %rsi
movq 16(%rsp), %rdi
movq %rax, 136(%rsp)
movq %rcx, 128(%rsp)
movq %rdx, 120(%rsp)
movl %r12d, 12(%rsp)
movq %rsi, 112(%rsp)
movq %rdi, 104(%rsp)
leaq 136(%rsp), %rax
movq %rax, 144(%rsp)
leaq 128(%rsp), %rax
movq %rax, 152(%rsp)
leaq 120(%rsp), %rax
movq %rax, 160(%rsp)
leaq 12(%rsp), %rax
movq %rax, 168(%rsp)
leaq 112(%rsp), %rax
movq %rax, 176(%rsp)
leaq 104(%rsp), %rax
movq %rax, 184(%rsp)
leaq 88(%rsp), %rdi
leaq 72(%rsp), %rsi
leaq 64(%rsp), %rdx
leaq 56(%rsp), %rcx
callq __hipPopCallConfiguration
movq 88(%rsp), %rsi
movl 96(%rsp), %edx
movq 72(%rsp), %rcx
movl 80(%rsp), %r8d
leaq 144(%rsp), %r9
movl $_Z17k_csr2_mat_vec_mmPiS_PfiS0_S0_, %edi
jmp .LBB3_25
.LBB3_23:
leal 511(%rax), %edi
testl %eax, %eax
cmovnsl %eax, %edi
sarl $9, %edi
incl %edi
movabsq $4294967296, %rdx # imm = 0x100000000
orq %rdx, %rdi
orq $512, %rdx # imm = 0x200
movl $1, %esi
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB3_26
# %bb.24:
movq 48(%rsp), %rax
movq 40(%rsp), %rcx
movq 32(%rsp), %rdx
movq 24(%rsp), %rsi
movq 16(%rsp), %rdi
movq %rax, 136(%rsp)
movq %rcx, 128(%rsp)
movq %rdx, 120(%rsp)
movl %r12d, 12(%rsp)
movq %rsi, 112(%rsp)
movq %rdi, 104(%rsp)
leaq 136(%rsp), %rax
movq %rax, 144(%rsp)
leaq 128(%rsp), %rax
movq %rax, 152(%rsp)
leaq 120(%rsp), %rax
movq %rax, 160(%rsp)
leaq 12(%rsp), %rax
movq %rax, 168(%rsp)
leaq 112(%rsp), %rax
movq %rax, 176(%rsp)
leaq 104(%rsp), %rax
movq %rax, 184(%rsp)
leaq 88(%rsp), %rdi
leaq 72(%rsp), %rsi
leaq 64(%rsp), %rdx
leaq 56(%rsp), %rcx
callq __hipPopCallConfiguration
movq 88(%rsp), %rsi
movl 96(%rsp), %edx
movq 72(%rsp), %rcx
movl 80(%rsp), %r8d
leaq 144(%rsp), %r9
movl $_Z16k_csr_mat_vec_mmPiS_PfiS0_S0_, %edi
.LBB3_25:
pushq 56(%rsp)
.cfi_adjust_cfa_offset 8
pushq 72(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB3_26:
callq hipGetLastError
testl %eax, %eax
je .LBB3_28
# %bb.27:
movl %eax, %edi
callq hipGetErrorString
movl $.L.str, %edi
movq %rax, %rsi
xorl %eax, %eax
callq printf
movl $.L.str.1, %edi
movl $162, %esi
xorl %eax, %eax
callq printf
.LBB3_28:
movq 16(%rsp), %rsi
movq %r15, %rdi
movq %rbx, %rdx
movl $2, %ecx
callq hipMemcpy
callq hipGetLastError
testl %eax, %eax
je .LBB3_30
# %bb.29:
movl %eax, %edi
callq hipGetErrorString
movl $.L.str, %edi
movq %rax, %rsi
xorl %eax, %eax
callq printf
movl $.L.str.1, %edi
movl $169, %esi
xorl %eax, %eax
callq printf
.LBB3_30:
movq 48(%rsp), %rdi
callq hipFree
movq 40(%rsp), %rdi
callq hipFree
movq 32(%rsp), %rdi
callq hipFree
movq 24(%rsp), %rdi
callq hipFree
movq 16(%rsp), %rdi
callq hipFree
addq $216, %rsp
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %r12
.cfi_def_cfa_offset 40
popq %r13
.cfi_def_cfa_offset 32
popq %r14
.cfi_def_cfa_offset 24
popq %r15
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
retq
.Lfunc_end3:
.size CSRmatvecmult, .Lfunc_end3-CSRmatvecmult
.cfi_endproc
# -- End function
.globl ELLmatvecmult # -- Begin function ELLmatvecmult
.p2align 4, 0x90
.type ELLmatvecmult,@function
ELLmatvecmult: # @ELLmatvecmult
.cfi_startproc
# %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
pushq %r15
.cfi_def_cfa_offset 24
pushq %r14
.cfi_def_cfa_offset 32
pushq %r13
.cfi_def_cfa_offset 40
pushq %r12
.cfi_def_cfa_offset 48
pushq %rbx
.cfi_def_cfa_offset 56
subq $40, %rsp
.cfi_def_cfa_offset 96
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
.cfi_offset %rbp, -16
movq %r9, %rbx
movq %r8, %r14
movq %rcx, %r15
movq %rdx, %r13
movl %edi, %ebp
imull %edi, %esi
movslq %esi, %r12
shlq $2, %r12
leaq 32(%rsp), %rdi
movq %r12, %rsi
callq hipMalloc
callq hipGetLastError
testl %eax, %eax
je .LBB4_2
# %bb.1:
movl %eax, %edi
callq hipGetErrorString
movl $.L.str, %edi
movq %rax, %rsi
xorl %eax, %eax
callq printf
movl $.L.str.1, %edi
movl $194, %esi
xorl %eax, %eax
callq printf
.LBB4_2:
movq 32(%rsp), %rdi
movq %r13, %rsi
movq %r12, %rdx
movl $1, %ecx
callq hipMemcpy
callq hipGetLastError
testl %eax, %eax
je .LBB4_4
# %bb.3:
movl %eax, %edi
callq hipGetErrorString
movl $.L.str, %edi
movq %rax, %rsi
xorl %eax, %eax
callq printf
movl $.L.str.1, %edi
movl $196, %esi
xorl %eax, %eax
callq printf
.LBB4_4:
leaq 24(%rsp), %rdi
movq %r12, %rsi
callq hipMalloc
callq hipGetLastError
testl %eax, %eax
je .LBB4_6
# %bb.5:
movl %eax, %edi
callq hipGetErrorString
movl $.L.str, %edi
movq %rax, %rsi
xorl %eax, %eax
callq printf
movl $.L.str.1, %edi
movl $199, %esi
xorl %eax, %eax
callq printf
.LBB4_6:
movq 24(%rsp), %rdi
movq %r15, %rsi
movq %r12, %rdx
movl $1, %ecx
callq hipMemcpy
callq hipGetLastError
testl %eax, %eax
je .LBB4_8
# %bb.7:
movl %eax, %edi
callq hipGetErrorString
movl $.L.str, %edi
movq %rax, %rsi
xorl %eax, %eax
callq printf
movl $.L.str.1, %edi
movl $201, %esi
xorl %eax, %eax
callq printf
.LBB4_8:
movslq %ebp, %r15
shlq $2, %r15
leaq 16(%rsp), %rdi
movq %r15, %rsi
callq hipMalloc
callq hipGetLastError
testl %eax, %eax
je .LBB4_10
# %bb.9:
movl %eax, %edi
callq hipGetErrorString
movl $.L.str, %edi
movq %rax, %rsi
xorl %eax, %eax
callq printf
movl $.L.str.1, %edi
movl $204, %esi
xorl %eax, %eax
callq printf
.LBB4_10:
movq 16(%rsp), %rdi
movq %r14, %rsi
movq %r15, %rdx
movl $1, %ecx
callq hipMemcpy
callq hipGetLastError
testl %eax, %eax
je .LBB4_12
# %bb.11:
movl %eax, %edi
callq hipGetErrorString
movl $.L.str, %edi
movq %rax, %rsi
xorl %eax, %eax
callq printf
movl $.L.str.1, %edi
movl $206, %esi
xorl %eax, %eax
callq printf
.LBB4_12:
leaq 8(%rsp), %rdi
movq %r15, %rsi
callq hipMalloc
callq hipGetLastError
testl %eax, %eax
je .LBB4_14
# %bb.13:
movl %eax, %edi
callq hipGetErrorString
movl $.L.str, %edi
movq %rax, %rsi
xorl %eax, %eax
callq printf
movl $.L.str.1, %edi
movl $209, %esi
xorl %eax, %eax
callq printf
.LBB4_14:
movq 8(%rsp), %rdi
movq %rbx, %rsi
movq %r15, %rdx
movl $1, %ecx
callq hipMemcpy
callq hipGetLastError
testl %eax, %eax
je .LBB4_16
# %bb.15:
movl %eax, %edi
callq hipGetErrorString
movl $.L.str, %edi
movq %rax, %rsi
xorl %eax, %eax
callq printf
movl $.L.str.1, %edi
movl $211, %esi
xorl %eax, %eax
callq printf
.LBB4_16:
callq hipGetLastError
testl %eax, %eax
je .LBB4_18
# %bb.17:
movl %eax, %edi
callq hipGetErrorString
movl $.L.str, %edi
movq %rax, %rsi
xorl %eax, %eax
callq printf
movl $.L.str.1, %edi
movl $219, %esi
xorl %eax, %eax
callq printf
.LBB4_18:
movq 8(%rsp), %rsi
movq %rbx, %rdi
movq %r15, %rdx
movl $2, %ecx
callq hipMemcpy
callq hipGetLastError
testl %eax, %eax
je .LBB4_20
# %bb.19:
movl %eax, %edi
callq hipGetErrorString
movl $.L.str, %edi
movq %rax, %rsi
xorl %eax, %eax
callq printf
movl $.L.str.1, %edi
movl $226, %esi
xorl %eax, %eax
callq printf
.LBB4_20:
movq 32(%rsp), %rdi
callq hipFree
movq 24(%rsp), %rdi
callq hipFree
movq 16(%rsp), %rdi
callq hipFree
movq 8(%rsp), %rdi
callq hipFree
addq $40, %rsp
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %r12
.cfi_def_cfa_offset 40
popq %r13
.cfi_def_cfa_offset 32
popq %r14
.cfi_def_cfa_offset 24
popq %r15
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
retq
.Lfunc_end4:
.size ELLmatvecmult, .Lfunc_end4-ELLmatvecmult
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
pushq %rbx
.cfi_def_cfa_offset 16
subq $32, %rsp
.cfi_def_cfa_offset 48
.cfi_offset %rbx, -16
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB5_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB5_2:
movq __hip_gpubin_handle(%rip), %rbx
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z16k_csr_mat_vec_mmPiS_PfiS0_S0_, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z17k_csr2_mat_vec_mmPiS_PfiS0_S0_, %esi
movl $.L__unnamed_2, %edx
movl $.L__unnamed_2, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z16k_ell_mat_vec_mmiiPiPfS0_S0_, %esi
movl $.L__unnamed_3, %edx
movl $.L__unnamed_3, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $32, %rsp
.cfi_def_cfa_offset 16
popq %rbx
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end5:
.size __hip_module_ctor, .Lfunc_end5-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB6_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB6_2:
retq
.Lfunc_end6:
.size __hip_module_dtor, .Lfunc_end6-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z16k_csr_mat_vec_mmPiS_PfiS0_S0_,@object # @_Z16k_csr_mat_vec_mmPiS_PfiS0_S0_
.section .rodata,"a",@progbits
.globl _Z16k_csr_mat_vec_mmPiS_PfiS0_S0_
.p2align 3, 0x0
_Z16k_csr_mat_vec_mmPiS_PfiS0_S0_:
.quad _Z31__device_stub__k_csr_mat_vec_mmPiS_PfiS0_S0_
.size _Z16k_csr_mat_vec_mmPiS_PfiS0_S0_, 8
.type _Z17k_csr2_mat_vec_mmPiS_PfiS0_S0_,@object # @_Z17k_csr2_mat_vec_mmPiS_PfiS0_S0_
.globl _Z17k_csr2_mat_vec_mmPiS_PfiS0_S0_
.p2align 3, 0x0
_Z17k_csr2_mat_vec_mmPiS_PfiS0_S0_:
.quad _Z32__device_stub__k_csr2_mat_vec_mmPiS_PfiS0_S0_
.size _Z17k_csr2_mat_vec_mmPiS_PfiS0_S0_, 8
.type _Z16k_ell_mat_vec_mmiiPiPfS0_S0_,@object # @_Z16k_ell_mat_vec_mmiiPiPfS0_S0_
.globl _Z16k_ell_mat_vec_mmiiPiPfS0_S0_
.p2align 3, 0x0
_Z16k_ell_mat_vec_mmiiPiPfS0_S0_:
.quad _Z31__device_stub__k_ell_mat_vec_mmiiPiPfS0_S0_
.size _Z16k_ell_mat_vec_mmiiPiPfS0_S0_, 8
.type .L.str,@object # @.str
.section .rodata.str1.1,"aMS",@progbits,1
.L.str:
.asciz "(CUDA) %s"
.size .L.str, 10
.type .L.str.1,@object # @.str.1
.L.str.1:
.asciz " (/home/ubuntu/Datasets/stackv2/train-structured-repos-hip/PhuNH/hpc-aa/master/t3/kernels.hip:%d)\n"
.size .L.str.1, 99
.type .L__unnamed_1,@object # @0
.L__unnamed_1:
.asciz "_Z16k_csr_mat_vec_mmPiS_PfiS0_S0_"
.size .L__unnamed_1, 34
.type .L__unnamed_2,@object # @1
.L__unnamed_2:
.asciz "_Z17k_csr2_mat_vec_mmPiS_PfiS0_S0_"
.size .L__unnamed_2, 35
.type .L__unnamed_3,@object # @2
.L__unnamed_3:
.asciz "_Z16k_ell_mat_vec_mmiiPiPfS0_S0_"
.size .L__unnamed_3, 33
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z31__device_stub__k_csr_mat_vec_mmPiS_PfiS0_S0_
.addrsig_sym _Z32__device_stub__k_csr2_mat_vec_mmPiS_PfiS0_S0_
.addrsig_sym _Z31__device_stub__k_ell_mat_vec_mmiiPiPfS0_S0_
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z16k_csr_mat_vec_mmPiS_PfiS0_S0_
.addrsig_sym _Z17k_csr2_mat_vec_mmPiS_PfiS0_S0_
.addrsig_sym _Z16k_ell_mat_vec_mmiiPiPfS0_S0_
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_0000c4a1_00000000-6_kernels.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2061:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2061:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "(CUDA) %s"
.section .rodata.str1.8,"aMS",@progbits,1
.align 8
.LC1:
.string " (/home/ubuntu/Datasets/stackv2/train-structured/PhuNH/hpc-aa/master/t3/kernels.cu:%d)\n"
.text
.globl ELLmatvecmult
.type ELLmatvecmult, @function
ELLmatvecmult:
.LFB2058:
.cfi_startproc
endbr64
pushq %r15
.cfi_def_cfa_offset 16
.cfi_offset 15, -16
pushq %r14
.cfi_def_cfa_offset 24
.cfi_offset 14, -24
pushq %r13
.cfi_def_cfa_offset 32
.cfi_offset 13, -32
pushq %r12
.cfi_def_cfa_offset 40
.cfi_offset 12, -40
pushq %rbp
.cfi_def_cfa_offset 48
.cfi_offset 6, -48
pushq %rbx
.cfi_def_cfa_offset 56
.cfi_offset 3, -56
subq $56, %rsp
.cfi_def_cfa_offset 112
movl %edi, %ebx
movq %rdx, %r15
movq %rcx, %r14
movq %r8, %r13
movq %r9, %r12
movq %fs:40, %rax
movq %rax, 40(%rsp)
xorl %eax, %eax
imull %edi, %esi
movslq %esi, %rbp
salq $2, %rbp
leaq 8(%rsp), %rdi
movq %rbp, %rsi
call cudaMalloc@PLT
call cudaGetLastError@PLT
testl %eax, %eax
jne .L16
.L4:
movl $1, %ecx
movq %rbp, %rdx
movq %r15, %rsi
movq 8(%rsp), %rdi
call cudaMemcpy@PLT
call cudaGetLastError@PLT
testl %eax, %eax
jne .L17
.L5:
leaq 16(%rsp), %rdi
movq %rbp, %rsi
call cudaMalloc@PLT
call cudaGetLastError@PLT
testl %eax, %eax
jne .L18
.L6:
movl $1, %ecx
movq %rbp, %rdx
movq %r14, %rsi
movq 16(%rsp), %rdi
call cudaMemcpy@PLT
call cudaGetLastError@PLT
testl %eax, %eax
jne .L19
.L7:
movslq %ebx, %rbx
salq $2, %rbx
leaq 24(%rsp), %rdi
movq %rbx, %rsi
call cudaMalloc@PLT
call cudaGetLastError@PLT
testl %eax, %eax
jne .L20
.L8:
movl $1, %ecx
movq %rbx, %rdx
movq %r13, %rsi
movq 24(%rsp), %rdi
call cudaMemcpy@PLT
call cudaGetLastError@PLT
testl %eax, %eax
jne .L21
.L9:
leaq 32(%rsp), %rdi
movq %rbx, %rsi
call cudaMalloc@PLT
call cudaGetLastError@PLT
testl %eax, %eax
jne .L22
.L10:
movl $1, %ecx
movq %rbx, %rdx
movq %r12, %rsi
movq 32(%rsp), %rdi
call cudaMemcpy@PLT
call cudaGetLastError@PLT
testl %eax, %eax
jne .L23
.L11:
call cudaGetLastError@PLT
testl %eax, %eax
jne .L24
.L12:
movl $2, %ecx
movq %rbx, %rdx
movq 32(%rsp), %rsi
movq %r12, %rdi
call cudaMemcpy@PLT
call cudaGetLastError@PLT
testl %eax, %eax
jne .L25
.L13:
movq 8(%rsp), %rdi
call cudaFree@PLT
movq 16(%rsp), %rdi
call cudaFree@PLT
movq 24(%rsp), %rdi
call cudaFree@PLT
movq 32(%rsp), %rdi
call cudaFree@PLT
movq 40(%rsp), %rax
subq %fs:40, %rax
jne .L26
addq $56, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %rbp
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r13
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
ret
.L16:
.cfi_restore_state
movl %eax, %edi
call cudaGetErrorString@PLT
movq %rax, %rdx
leaq .LC0(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $194, %edx
leaq .LC1(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
jmp .L4
.L17:
movl %eax, %edi
call cudaGetErrorString@PLT
movq %rax, %rdx
leaq .LC0(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $196, %edx
leaq .LC1(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
jmp .L5
.L18:
movl %eax, %edi
call cudaGetErrorString@PLT
movq %rax, %rdx
leaq .LC0(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $199, %edx
leaq .LC1(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
jmp .L6
.L19:
movl %eax, %edi
call cudaGetErrorString@PLT
movq %rax, %rdx
leaq .LC0(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $201, %edx
leaq .LC1(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
jmp .L7
.L20:
movl %eax, %edi
call cudaGetErrorString@PLT
movq %rax, %rdx
leaq .LC0(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $204, %edx
leaq .LC1(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
jmp .L8
.L21:
movl %eax, %edi
call cudaGetErrorString@PLT
movq %rax, %rdx
leaq .LC0(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $206, %edx
leaq .LC1(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
jmp .L9
.L22:
movl %eax, %edi
call cudaGetErrorString@PLT
movq %rax, %rdx
leaq .LC0(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $209, %edx
leaq .LC1(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
jmp .L10
.L23:
movl %eax, %edi
call cudaGetErrorString@PLT
movq %rax, %rdx
leaq .LC0(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $211, %edx
leaq .LC1(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
jmp .L11
.L24:
movl %eax, %edi
call cudaGetErrorString@PLT
movq %rax, %rdx
leaq .LC0(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $219, %edx
leaq .LC1(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
jmp .L12
.L25:
movl %eax, %edi
call cudaGetErrorString@PLT
movq %rax, %rdx
leaq .LC0(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $226, %edx
leaq .LC1(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
jmp .L13
.L26:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2058:
.size ELLmatvecmult, .-ELLmatvecmult
.globl _Z47__device_stub__Z16k_csr_mat_vec_mmPiS_PfiS0_S0_PiS_PfiS0_S0_
.type _Z47__device_stub__Z16k_csr_mat_vec_mmPiS_PfiS0_S0_PiS_PfiS0_S0_, @function
_Z47__device_stub__Z16k_csr_mat_vec_mmPiS_PfiS0_S0_PiS_PfiS0_S0_:
.LFB2083:
.cfi_startproc
endbr64
subq $184, %rsp
.cfi_def_cfa_offset 192
movq %rdi, 40(%rsp)
movq %rsi, 32(%rsp)
movq %rdx, 24(%rsp)
movl %ecx, 20(%rsp)
movq %r8, 8(%rsp)
movq %r9, (%rsp)
movq %fs:40, %rax
movq %rax, 168(%rsp)
xorl %eax, %eax
leaq 40(%rsp), %rax
movq %rax, 112(%rsp)
leaq 32(%rsp), %rax
movq %rax, 120(%rsp)
leaq 24(%rsp), %rax
movq %rax, 128(%rsp)
leaq 20(%rsp), %rax
movq %rax, 136(%rsp)
leaq 8(%rsp), %rax
movq %rax, 144(%rsp)
movq %rsp, %rax
movq %rax, 152(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
movl $1, 72(%rsp)
movl $1, 76(%rsp)
movl $1, 80(%rsp)
movl $1, 84(%rsp)
leaq 56(%rsp), %rcx
leaq 48(%rsp), %rdx
leaq 76(%rsp), %rsi
leaq 64(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L31
.L27:
movq 168(%rsp), %rax
subq %fs:40, %rax
jne .L32
addq $184, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L31:
.cfi_restore_state
pushq 56(%rsp)
.cfi_def_cfa_offset 200
pushq 56(%rsp)
.cfi_def_cfa_offset 208
leaq 128(%rsp), %r9
movq 92(%rsp), %rcx
movl 100(%rsp), %r8d
movq 80(%rsp), %rsi
movl 88(%rsp), %edx
leaq _Z16k_csr_mat_vec_mmPiS_PfiS0_S0_(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 192
jmp .L27
.L32:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2083:
.size _Z47__device_stub__Z16k_csr_mat_vec_mmPiS_PfiS0_S0_PiS_PfiS0_S0_, .-_Z47__device_stub__Z16k_csr_mat_vec_mmPiS_PfiS0_S0_PiS_PfiS0_S0_
.globl _Z16k_csr_mat_vec_mmPiS_PfiS0_S0_
.type _Z16k_csr_mat_vec_mmPiS_PfiS0_S0_, @function
_Z16k_csr_mat_vec_mmPiS_PfiS0_S0_:
.LFB2084:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z47__device_stub__Z16k_csr_mat_vec_mmPiS_PfiS0_S0_PiS_PfiS0_S0_
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2084:
.size _Z16k_csr_mat_vec_mmPiS_PfiS0_S0_, .-_Z16k_csr_mat_vec_mmPiS_PfiS0_S0_
.globl _Z48__device_stub__Z17k_csr2_mat_vec_mmPiS_PfiS0_S0_PiS_PfiS0_S0_
.type _Z48__device_stub__Z17k_csr2_mat_vec_mmPiS_PfiS0_S0_PiS_PfiS0_S0_, @function
_Z48__device_stub__Z17k_csr2_mat_vec_mmPiS_PfiS0_S0_PiS_PfiS0_S0_:
.LFB2085:
.cfi_startproc
endbr64
subq $184, %rsp
.cfi_def_cfa_offset 192
movq %rdi, 40(%rsp)
movq %rsi, 32(%rsp)
movq %rdx, 24(%rsp)
movl %ecx, 20(%rsp)
movq %r8, 8(%rsp)
movq %r9, (%rsp)
movq %fs:40, %rax
movq %rax, 168(%rsp)
xorl %eax, %eax
leaq 40(%rsp), %rax
movq %rax, 112(%rsp)
leaq 32(%rsp), %rax
movq %rax, 120(%rsp)
leaq 24(%rsp), %rax
movq %rax, 128(%rsp)
leaq 20(%rsp), %rax
movq %rax, 136(%rsp)
leaq 8(%rsp), %rax
movq %rax, 144(%rsp)
movq %rsp, %rax
movq %rax, 152(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
movl $1, 72(%rsp)
movl $1, 76(%rsp)
movl $1, 80(%rsp)
movl $1, 84(%rsp)
leaq 56(%rsp), %rcx
leaq 48(%rsp), %rdx
leaq 76(%rsp), %rsi
leaq 64(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L39
.L35:
movq 168(%rsp), %rax
subq %fs:40, %rax
jne .L40
addq $184, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L39:
.cfi_restore_state
pushq 56(%rsp)
.cfi_def_cfa_offset 200
pushq 56(%rsp)
.cfi_def_cfa_offset 208
leaq 128(%rsp), %r9
movq 92(%rsp), %rcx
movl 100(%rsp), %r8d
movq 80(%rsp), %rsi
movl 88(%rsp), %edx
leaq _Z17k_csr2_mat_vec_mmPiS_PfiS0_S0_(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 192
jmp .L35
.L40:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2085:
.size _Z48__device_stub__Z17k_csr2_mat_vec_mmPiS_PfiS0_S0_PiS_PfiS0_S0_, .-_Z48__device_stub__Z17k_csr2_mat_vec_mmPiS_PfiS0_S0_PiS_PfiS0_S0_
.globl _Z17k_csr2_mat_vec_mmPiS_PfiS0_S0_
.type _Z17k_csr2_mat_vec_mmPiS_PfiS0_S0_, @function
_Z17k_csr2_mat_vec_mmPiS_PfiS0_S0_:
.LFB2086:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z48__device_stub__Z17k_csr2_mat_vec_mmPiS_PfiS0_S0_PiS_PfiS0_S0_
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2086:
.size _Z17k_csr2_mat_vec_mmPiS_PfiS0_S0_, .-_Z17k_csr2_mat_vec_mmPiS_PfiS0_S0_
.globl CSRmatvecmult
.type CSRmatvecmult, @function
CSRmatvecmult:
.LFB2057:
.cfi_startproc
endbr64
pushq %r15
.cfi_def_cfa_offset 16
.cfi_offset 15, -16
pushq %r14
.cfi_def_cfa_offset 24
.cfi_offset 14, -24
pushq %r13
.cfi_def_cfa_offset 32
.cfi_offset 13, -32
pushq %r12
.cfi_def_cfa_offset 40
.cfi_offset 12, -40
pushq %rbp
.cfi_def_cfa_offset 48
.cfi_offset 6, -48
pushq %rbx
.cfi_def_cfa_offset 56
.cfi_offset 3, -56
subq $120, %rsp
.cfi_def_cfa_offset 176
movq %rdi, %r13
movq %rsi, 8(%rsp)
movq %rdx, 16(%rsp)
movl %ecx, %r12d
movl %r8d, %ebp
movq %r9, 24(%rsp)
movq 176(%rsp), %r14
movl 184(%rsp), %r15d
movq %fs:40, %rax
movq %rax, 104(%rsp)
xorl %eax, %eax
leal 1(%rcx), %ebx
movslq %ebx, %rbx
salq $2, %rbx
leaq 40(%rsp), %rdi
movq %rbx, %rsi
call cudaMalloc@PLT
call cudaGetLastError@PLT
testl %eax, %eax
jne .L62
.L44:
movl $1, %ecx
movq %rbx, %rdx
movq %r13, %rsi
movq 40(%rsp), %rdi
call cudaMemcpy@PLT
call cudaGetLastError@PLT
testl %eax, %eax
jne .L63
.L45:
movslq %ebp, %rbp
salq $2, %rbp
leaq 48(%rsp), %rdi
movq %rbp, %rsi
call cudaMalloc@PLT
call cudaGetLastError@PLT
testl %eax, %eax
jne .L64
.L46:
movl $1, %ecx
movq %rbp, %rdx
movq 8(%rsp), %rsi
movq 48(%rsp), %rdi
call cudaMemcpy@PLT
call cudaGetLastError@PLT
testl %eax, %eax
jne .L65
.L47:
leaq 56(%rsp), %rdi
movq %rbp, %rsi
call cudaMalloc@PLT
call cudaGetLastError@PLT
testl %eax, %eax
jne .L66
.L48:
movl $1, %ecx
movq %rbp, %rdx
movq 16(%rsp), %rsi
movq 56(%rsp), %rdi
call cudaMemcpy@PLT
call cudaGetLastError@PLT
testl %eax, %eax
jne .L67
.L49:
subq $4, %rbx
leaq 64(%rsp), %rdi
movq %rbx, %rsi
call cudaMalloc@PLT
call cudaGetLastError@PLT
testl %eax, %eax
jne .L68
.L50:
movl $1, %ecx
movq %rbx, %rdx
movq 24(%rsp), %rsi
movq 64(%rsp), %rdi
call cudaMemcpy@PLT
call cudaGetLastError@PLT
testl %eax, %eax
jne .L69
.L51:
leaq 72(%rsp), %rdi
movq %rbx, %rsi
call cudaMalloc@PLT
call cudaGetLastError@PLT
testl %eax, %eax
jne .L70
.L52:
movl $1, %ecx
movq %rbx, %rdx
movq %r14, %rsi
movq 72(%rsp), %rdi
call cudaMemcpy@PLT
call cudaGetLastError@PLT
testl %eax, %eax
jne .L71
.L53:
testb %r15b, %r15b
je .L54
movl $1, 80(%rsp)
leal 30(%r12), %eax
movl %r12d, %edx
subl $1, %edx
cmovns %edx, %eax
sarl $5, %eax
addl $1, %eax
movl %eax, 84(%rsp)
movl $1, 88(%rsp)
movl $32, 92(%rsp)
movl $32, 96(%rsp)
movl $1, 100(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 92(%rsp), %rdx
movl $1, %ecx
movq 80(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L72
.L56:
call cudaGetLastError@PLT
testl %eax, %eax
jne .L73
.L58:
movl $2, %ecx
movq %rbx, %rdx
movq 72(%rsp), %rsi
movq %r14, %rdi
call cudaMemcpy@PLT
call cudaGetLastError@PLT
testl %eax, %eax
jne .L74
.L59:
movq 40(%rsp), %rdi
call cudaFree@PLT
movq 48(%rsp), %rdi
call cudaFree@PLT
movq 56(%rsp), %rdi
call cudaFree@PLT
movq 64(%rsp), %rdi
call cudaFree@PLT
movq 72(%rsp), %rdi
call cudaFree@PLT
movq 104(%rsp), %rax
subq %fs:40, %rax
jne .L75
addq $120, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %rbp
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r13
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
ret
.L62:
.cfi_restore_state
movl %eax, %edi
call cudaGetErrorString@PLT
movq %rax, %rdx
leaq .LC0(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $121, %edx
leaq .LC1(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
jmp .L44
.L63:
movl %eax, %edi
call cudaGetErrorString@PLT
movq %rax, %rdx
leaq .LC0(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $123, %edx
leaq .LC1(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
jmp .L45
.L64:
movl %eax, %edi
call cudaGetErrorString@PLT
movq %rax, %rdx
leaq .LC0(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $126, %edx
leaq .LC1(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
jmp .L46
.L65:
movl %eax, %edi
call cudaGetErrorString@PLT
movq %rax, %rdx
leaq .LC0(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $128, %edx
leaq .LC1(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
jmp .L47
.L66:
movl %eax, %edi
call cudaGetErrorString@PLT
movq %rax, %rdx
leaq .LC0(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $131, %edx
leaq .LC1(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
jmp .L48
.L67:
movl %eax, %edi
call cudaGetErrorString@PLT
movq %rax, %rdx
leaq .LC0(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $133, %edx
leaq .LC1(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
jmp .L49
.L68:
movl %eax, %edi
call cudaGetErrorString@PLT
movq %rax, %rdx
leaq .LC0(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $136, %edx
leaq .LC1(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
jmp .L50
.L69:
movl %eax, %edi
call cudaGetErrorString@PLT
movq %rax, %rdx
leaq .LC0(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $138, %edx
leaq .LC1(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
jmp .L51
.L70:
movl %eax, %edi
call cudaGetErrorString@PLT
movq %rax, %rdx
leaq .LC0(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $141, %edx
leaq .LC1(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
jmp .L52
.L71:
movl %eax, %edi
call cudaGetErrorString@PLT
movq %rax, %rdx
leaq .LC0(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $143, %edx
leaq .LC1(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
jmp .L53
.L72:
movq 72(%rsp), %r9
movq 64(%rsp), %r8
movl %r12d, %ecx
movq 56(%rsp), %rdx
movq 48(%rsp), %rsi
movq 40(%rsp), %rdi
call _Z48__device_stub__Z17k_csr2_mat_vec_mmPiS_PfiS0_S0_PiS_PfiS0_S0_
jmp .L56
.L54:
leal 510(%r12), %eax
movl %r12d, %edx
subl $1, %edx
cmovns %edx, %eax
sarl $9, %eax
addl $1, %eax
movl %eax, 80(%rsp)
movl $1, 84(%rsp)
movl $1, 88(%rsp)
movl $512, 92(%rsp)
movl $1, 96(%rsp)
movl $1, 100(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 92(%rsp), %rdx
movl $1, %ecx
movq 80(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
jne .L56
movq 72(%rsp), %r9
movq 64(%rsp), %r8
movl %r12d, %ecx
movq 56(%rsp), %rdx
movq 48(%rsp), %rsi
movq 40(%rsp), %rdi
call _Z47__device_stub__Z16k_csr_mat_vec_mmPiS_PfiS0_S0_PiS_PfiS0_S0_
jmp .L56
.L73:
movl %eax, %edi
call cudaGetErrorString@PLT
movq %rax, %rdx
leaq .LC0(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $162, %edx
leaq .LC1(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
jmp .L58
.L74:
movl %eax, %edi
call cudaGetErrorString@PLT
movq %rax, %rdx
leaq .LC0(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $169, %edx
leaq .LC1(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
jmp .L59
.L75:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2057:
.size CSRmatvecmult, .-CSRmatvecmult
.globl _Z46__device_stub__Z16k_ell_mat_vec_mmiiPiPfS0_S0_iiPiPfS0_S0_
.type _Z46__device_stub__Z16k_ell_mat_vec_mmiiPiPfS0_S0_iiPiPfS0_S0_, @function
_Z46__device_stub__Z16k_ell_mat_vec_mmiiPiPfS0_S0_iiPiPfS0_S0_:
.LFB2087:
.cfi_startproc
endbr64
subq $184, %rsp
.cfi_def_cfa_offset 192
movl %edi, 44(%rsp)
movl %esi, 40(%rsp)
movq %rdx, 32(%rsp)
movq %rcx, 24(%rsp)
movq %r8, 16(%rsp)
movq %r9, 8(%rsp)
movq %fs:40, %rax
movq %rax, 168(%rsp)
xorl %eax, %eax
leaq 44(%rsp), %rax
movq %rax, 112(%rsp)
leaq 40(%rsp), %rax
movq %rax, 120(%rsp)
leaq 32(%rsp), %rax
movq %rax, 128(%rsp)
leaq 24(%rsp), %rax
movq %rax, 136(%rsp)
leaq 16(%rsp), %rax
movq %rax, 144(%rsp)
leaq 8(%rsp), %rax
movq %rax, 152(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
movl $1, 72(%rsp)
movl $1, 76(%rsp)
movl $1, 80(%rsp)
movl $1, 84(%rsp)
leaq 56(%rsp), %rcx
leaq 48(%rsp), %rdx
leaq 76(%rsp), %rsi
leaq 64(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L80
.L76:
movq 168(%rsp), %rax
subq %fs:40, %rax
jne .L81
addq $184, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L80:
.cfi_restore_state
pushq 56(%rsp)
.cfi_def_cfa_offset 200
pushq 56(%rsp)
.cfi_def_cfa_offset 208
leaq 128(%rsp), %r9
movq 92(%rsp), %rcx
movl 100(%rsp), %r8d
movq 80(%rsp), %rsi
movl 88(%rsp), %edx
leaq _Z16k_ell_mat_vec_mmiiPiPfS0_S0_(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 192
jmp .L76
.L81:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2087:
.size _Z46__device_stub__Z16k_ell_mat_vec_mmiiPiPfS0_S0_iiPiPfS0_S0_, .-_Z46__device_stub__Z16k_ell_mat_vec_mmiiPiPfS0_S0_iiPiPfS0_S0_
.globl _Z16k_ell_mat_vec_mmiiPiPfS0_S0_
.type _Z16k_ell_mat_vec_mmiiPiPfS0_S0_, @function
_Z16k_ell_mat_vec_mmiiPiPfS0_S0_:
.LFB2088:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z46__device_stub__Z16k_ell_mat_vec_mmiiPiPfS0_S0_iiPiPfS0_S0_
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2088:
.size _Z16k_ell_mat_vec_mmiiPiPfS0_S0_, .-_Z16k_ell_mat_vec_mmiiPiPfS0_S0_
.section .rodata.str1.8
.align 8
.LC2:
.string "_Z16k_ell_mat_vec_mmiiPiPfS0_S0_"
.align 8
.LC3:
.string "_Z17k_csr2_mat_vec_mmPiS_PfiS0_S0_"
.align 8
.LC4:
.string "_Z16k_csr_mat_vec_mmPiS_PfiS0_S0_"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2090:
.cfi_startproc
endbr64
pushq %rbx
.cfi_def_cfa_offset 16
.cfi_offset 3, -16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rbx
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC2(%rip), %rdx
movq %rdx, %rcx
leaq _Z16k_ell_mat_vec_mmiiPiPfS0_S0_(%rip), %rsi
movq %rax, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC3(%rip), %rdx
movq %rdx, %rcx
leaq _Z17k_csr2_mat_vec_mmPiS_PfiS0_S0_(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC4(%rip), %rdx
movq %rdx, %rcx
leaq _Z16k_csr_mat_vec_mmPiS_PfiS0_S0_(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
popq %rbx
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2090:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "kernels.hip"
.globl _Z31__device_stub__k_csr_mat_vec_mmPiS_PfiS0_S0_ # -- Begin function _Z31__device_stub__k_csr_mat_vec_mmPiS_PfiS0_S0_
.p2align 4, 0x90
.type _Z31__device_stub__k_csr_mat_vec_mmPiS_PfiS0_S0_,@function
_Z31__device_stub__k_csr_mat_vec_mmPiS_PfiS0_S0_: # @_Z31__device_stub__k_csr_mat_vec_mmPiS_PfiS0_S0_
.cfi_startproc
# %bb.0:
subq $152, %rsp
.cfi_def_cfa_offset 160
movq %rdi, 88(%rsp)
movq %rsi, 80(%rsp)
movq %rdx, 72(%rsp)
movl %ecx, 4(%rsp)
movq %r8, 64(%rsp)
movq %r9, 56(%rsp)
leaq 88(%rsp), %rax
movq %rax, 96(%rsp)
leaq 80(%rsp), %rax
movq %rax, 104(%rsp)
leaq 72(%rsp), %rax
movq %rax, 112(%rsp)
leaq 4(%rsp), %rax
movq %rax, 120(%rsp)
leaq 64(%rsp), %rax
movq %rax, 128(%rsp)
leaq 56(%rsp), %rax
movq %rax, 136(%rsp)
leaq 40(%rsp), %rdi
leaq 24(%rsp), %rsi
leaq 16(%rsp), %rdx
leaq 8(%rsp), %rcx
callq __hipPopCallConfiguration
movq 40(%rsp), %rsi
movl 48(%rsp), %edx
movq 24(%rsp), %rcx
movl 32(%rsp), %r8d
leaq 96(%rsp), %r9
movl $_Z16k_csr_mat_vec_mmPiS_PfiS0_S0_, %edi
pushq 8(%rsp)
.cfi_adjust_cfa_offset 8
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $168, %rsp
.cfi_adjust_cfa_offset -168
retq
.Lfunc_end0:
.size _Z31__device_stub__k_csr_mat_vec_mmPiS_PfiS0_S0_, .Lfunc_end0-_Z31__device_stub__k_csr_mat_vec_mmPiS_PfiS0_S0_
.cfi_endproc
# -- End function
.globl _Z32__device_stub__k_csr2_mat_vec_mmPiS_PfiS0_S0_ # -- Begin function _Z32__device_stub__k_csr2_mat_vec_mmPiS_PfiS0_S0_
.p2align 4, 0x90
.type _Z32__device_stub__k_csr2_mat_vec_mmPiS_PfiS0_S0_,@function
_Z32__device_stub__k_csr2_mat_vec_mmPiS_PfiS0_S0_: # @_Z32__device_stub__k_csr2_mat_vec_mmPiS_PfiS0_S0_
.cfi_startproc
# %bb.0:
subq $152, %rsp
.cfi_def_cfa_offset 160
movq %rdi, 88(%rsp)
movq %rsi, 80(%rsp)
movq %rdx, 72(%rsp)
movl %ecx, 4(%rsp)
movq %r8, 64(%rsp)
movq %r9, 56(%rsp)
leaq 88(%rsp), %rax
movq %rax, 96(%rsp)
leaq 80(%rsp), %rax
movq %rax, 104(%rsp)
leaq 72(%rsp), %rax
movq %rax, 112(%rsp)
leaq 4(%rsp), %rax
movq %rax, 120(%rsp)
leaq 64(%rsp), %rax
movq %rax, 128(%rsp)
leaq 56(%rsp), %rax
movq %rax, 136(%rsp)
leaq 40(%rsp), %rdi
leaq 24(%rsp), %rsi
leaq 16(%rsp), %rdx
leaq 8(%rsp), %rcx
callq __hipPopCallConfiguration
movq 40(%rsp), %rsi
movl 48(%rsp), %edx
movq 24(%rsp), %rcx
movl 32(%rsp), %r8d
leaq 96(%rsp), %r9
movl $_Z17k_csr2_mat_vec_mmPiS_PfiS0_S0_, %edi
pushq 8(%rsp)
.cfi_adjust_cfa_offset 8
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $168, %rsp
.cfi_adjust_cfa_offset -168
retq
.Lfunc_end1:
.size _Z32__device_stub__k_csr2_mat_vec_mmPiS_PfiS0_S0_, .Lfunc_end1-_Z32__device_stub__k_csr2_mat_vec_mmPiS_PfiS0_S0_
.cfi_endproc
# -- End function
.globl _Z31__device_stub__k_ell_mat_vec_mmiiPiPfS0_S0_ # -- Begin function _Z31__device_stub__k_ell_mat_vec_mmiiPiPfS0_S0_
.p2align 4, 0x90
.type _Z31__device_stub__k_ell_mat_vec_mmiiPiPfS0_S0_,@function
_Z31__device_stub__k_ell_mat_vec_mmiiPiPfS0_S0_: # @_Z31__device_stub__k_ell_mat_vec_mmiiPiPfS0_S0_
.cfi_startproc
# %bb.0:
subq $152, %rsp
.cfi_def_cfa_offset 160
movl %edi, 12(%rsp)
movl %esi, 8(%rsp)
movq %rdx, 88(%rsp)
movq %rcx, 80(%rsp)
movq %r8, 72(%rsp)
movq %r9, 64(%rsp)
leaq 12(%rsp), %rax
movq %rax, 96(%rsp)
leaq 8(%rsp), %rax
movq %rax, 104(%rsp)
leaq 88(%rsp), %rax
movq %rax, 112(%rsp)
leaq 80(%rsp), %rax
movq %rax, 120(%rsp)
leaq 72(%rsp), %rax
movq %rax, 128(%rsp)
leaq 64(%rsp), %rax
movq %rax, 136(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 96(%rsp), %r9
movl $_Z16k_ell_mat_vec_mmiiPiPfS0_S0_, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $168, %rsp
.cfi_adjust_cfa_offset -168
retq
.Lfunc_end2:
.size _Z31__device_stub__k_ell_mat_vec_mmiiPiPfS0_S0_, .Lfunc_end2-_Z31__device_stub__k_ell_mat_vec_mmiiPiPfS0_S0_
.cfi_endproc
# -- End function
.globl CSRmatvecmult # -- Begin function CSRmatvecmult
.p2align 4, 0x90
.type CSRmatvecmult,@function
CSRmatvecmult: # @CSRmatvecmult
.cfi_startproc
# %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
pushq %r15
.cfi_def_cfa_offset 24
pushq %r14
.cfi_def_cfa_offset 32
pushq %r13
.cfi_def_cfa_offset 40
pushq %r12
.cfi_def_cfa_offset 48
pushq %rbx
.cfi_def_cfa_offset 56
subq $216, %rsp
.cfi_def_cfa_offset 272
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
.cfi_offset %rbp, -16
movq %r9, 208(%rsp) # 8-byte Spill
movl %r8d, %ebp
# kill: def $ecx killed $ecx def $rcx
movq %rdx, 200(%rsp) # 8-byte Spill
movq %rsi, %r13
movq %rdi, %r15
movq %rcx, %r12
movslq %ecx, %rbx
leaq 4(,%rbx,4), %r14
leaq 48(%rsp), %rdi
movq %r14, %rsi
callq hipMalloc
callq hipGetLastError
testl %eax, %eax
je .LBB3_2
# %bb.1:
movl %eax, %edi
callq hipGetErrorString
movl $.L.str, %edi
movq %rax, %rsi
xorl %eax, %eax
callq printf
movl $.L.str.1, %edi
movl $121, %esi
xorl %eax, %eax
callq printf
.LBB3_2:
movq 48(%rsp), %rdi
movq %r15, %rsi
movq %r14, %rdx
movl $1, %ecx
callq hipMemcpy
callq hipGetLastError
testl %eax, %eax
je .LBB3_4
# %bb.3:
movl %eax, %edi
callq hipGetErrorString
movl $.L.str, %edi
movq %rax, %rsi
xorl %eax, %eax
callq printf
movl $.L.str.1, %edi
movl $123, %esi
xorl %eax, %eax
callq printf
.LBB3_4:
movslq %ebp, %rbp
shlq $2, %rbp
leaq 40(%rsp), %rdi
movq %rbp, %rsi
callq hipMalloc
callq hipGetLastError
testl %eax, %eax
je .LBB3_6
# %bb.5:
movl %eax, %edi
callq hipGetErrorString
movl $.L.str, %edi
movq %rax, %rsi
xorl %eax, %eax
callq printf
movl $.L.str.1, %edi
movl $126, %esi
xorl %eax, %eax
callq printf
.LBB3_6:
movq 40(%rsp), %rdi
movq %r13, %rsi
movq %rbp, %rdx
movl $1, %ecx
callq hipMemcpy
callq hipGetLastError
testl %eax, %eax
je .LBB3_8
# %bb.7:
movl %eax, %edi
callq hipGetErrorString
movl $.L.str, %edi
movq %rax, %rsi
xorl %eax, %eax
callq printf
movl $.L.str.1, %edi
movl $128, %esi
xorl %eax, %eax
callq printf
.LBB3_8:
leaq 32(%rsp), %rdi
movq %rbp, %rsi
callq hipMalloc
callq hipGetLastError
testl %eax, %eax
je .LBB3_10
# %bb.9:
movl %eax, %edi
callq hipGetErrorString
movl $.L.str, %edi
movq %rax, %rsi
xorl %eax, %eax
callq printf
movl $.L.str.1, %edi
movl $131, %esi
xorl %eax, %eax
callq printf
.LBB3_10:
movq 32(%rsp), %rdi
movq 200(%rsp), %rsi # 8-byte Reload
movq %rbp, %rdx
movl $1, %ecx
callq hipMemcpy
callq hipGetLastError
testl %eax, %eax
je .LBB3_12
# %bb.11:
movl %eax, %edi
callq hipGetErrorString
movl $.L.str, %edi
movq %rax, %rsi
xorl %eax, %eax
callq printf
movl $.L.str.1, %edi
movl $133, %esi
xorl %eax, %eax
callq printf
.LBB3_12:
shlq $2, %rbx
leaq 24(%rsp), %rdi
movq %rbx, %rsi
callq hipMalloc
callq hipGetLastError
testl %eax, %eax
je .LBB3_14
# %bb.13:
movl %eax, %edi
callq hipGetErrorString
movl $.L.str, %edi
movq %rax, %rsi
xorl %eax, %eax
callq printf
movl $.L.str.1, %edi
movl $136, %esi
xorl %eax, %eax
callq printf
.LBB3_14:
movq 24(%rsp), %rdi
movq 208(%rsp), %rsi # 8-byte Reload
movq %rbx, %rdx
movl $1, %ecx
callq hipMemcpy
callq hipGetLastError
testl %eax, %eax
je .LBB3_16
# %bb.15:
movl %eax, %edi
callq hipGetErrorString
movl $.L.str, %edi
movq %rax, %rsi
xorl %eax, %eax
callq printf
movl $.L.str.1, %edi
movl $138, %esi
xorl %eax, %eax
callq printf
.LBB3_16:
movq 272(%rsp), %r15
leaq 16(%rsp), %rdi
movq %rbx, %rsi
callq hipMalloc
callq hipGetLastError
testl %eax, %eax
je .LBB3_18
# %bb.17:
movl %eax, %edi
callq hipGetErrorString
movl $.L.str, %edi
movq %rax, %rsi
xorl %eax, %eax
callq printf
movl $.L.str.1, %edi
movl $141, %esi
xorl %eax, %eax
callq printf
.LBB3_18:
movzbl 280(%rsp), %ebp
movq 16(%rsp), %rdi
movq %r15, %rsi
movq %rbx, %rdx
movl $1, %ecx
callq hipMemcpy
callq hipGetLastError
testl %eax, %eax
je .LBB3_20
# %bb.19:
movl %eax, %edi
callq hipGetErrorString
movl $.L.str, %edi
movq %rax, %rsi
xorl %eax, %eax
callq printf
movl $.L.str.1, %edi
movl $143, %esi
xorl %eax, %eax
callq printf
.LBB3_20:
leal -1(%r12), %eax
testb %bpl, %bpl
je .LBB3_23
# %bb.21:
leal 31(%rax), %edi
testl %eax, %eax
cmovnsl %eax, %edi
sarl $5, %edi
incl %edi
shlq $32, %rdi
orq $1, %rdi
movabsq $137438953504, %rdx # imm = 0x2000000020
movl $1, %esi
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB3_26
# %bb.22:
movq 48(%rsp), %rax
movq 40(%rsp), %rcx
movq 32(%rsp), %rdx
movq 24(%rsp), %rsi
movq 16(%rsp), %rdi
movq %rax, 136(%rsp)
movq %rcx, 128(%rsp)
movq %rdx, 120(%rsp)
movl %r12d, 12(%rsp)
movq %rsi, 112(%rsp)
movq %rdi, 104(%rsp)
leaq 136(%rsp), %rax
movq %rax, 144(%rsp)
leaq 128(%rsp), %rax
movq %rax, 152(%rsp)
leaq 120(%rsp), %rax
movq %rax, 160(%rsp)
leaq 12(%rsp), %rax
movq %rax, 168(%rsp)
leaq 112(%rsp), %rax
movq %rax, 176(%rsp)
leaq 104(%rsp), %rax
movq %rax, 184(%rsp)
leaq 88(%rsp), %rdi
leaq 72(%rsp), %rsi
leaq 64(%rsp), %rdx
leaq 56(%rsp), %rcx
callq __hipPopCallConfiguration
movq 88(%rsp), %rsi
movl 96(%rsp), %edx
movq 72(%rsp), %rcx
movl 80(%rsp), %r8d
leaq 144(%rsp), %r9
movl $_Z17k_csr2_mat_vec_mmPiS_PfiS0_S0_, %edi
jmp .LBB3_25
.LBB3_23:
leal 511(%rax), %edi
testl %eax, %eax
cmovnsl %eax, %edi
sarl $9, %edi
incl %edi
movabsq $4294967296, %rdx # imm = 0x100000000
orq %rdx, %rdi
orq $512, %rdx # imm = 0x200
movl $1, %esi
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB3_26
# %bb.24:
movq 48(%rsp), %rax
movq 40(%rsp), %rcx
movq 32(%rsp), %rdx
movq 24(%rsp), %rsi
movq 16(%rsp), %rdi
movq %rax, 136(%rsp)
movq %rcx, 128(%rsp)
movq %rdx, 120(%rsp)
movl %r12d, 12(%rsp)
movq %rsi, 112(%rsp)
movq %rdi, 104(%rsp)
leaq 136(%rsp), %rax
movq %rax, 144(%rsp)
leaq 128(%rsp), %rax
movq %rax, 152(%rsp)
leaq 120(%rsp), %rax
movq %rax, 160(%rsp)
leaq 12(%rsp), %rax
movq %rax, 168(%rsp)
leaq 112(%rsp), %rax
movq %rax, 176(%rsp)
leaq 104(%rsp), %rax
movq %rax, 184(%rsp)
leaq 88(%rsp), %rdi
leaq 72(%rsp), %rsi
leaq 64(%rsp), %rdx
leaq 56(%rsp), %rcx
callq __hipPopCallConfiguration
movq 88(%rsp), %rsi
movl 96(%rsp), %edx
movq 72(%rsp), %rcx
movl 80(%rsp), %r8d
leaq 144(%rsp), %r9
movl $_Z16k_csr_mat_vec_mmPiS_PfiS0_S0_, %edi
.LBB3_25:
pushq 56(%rsp)
.cfi_adjust_cfa_offset 8
pushq 72(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB3_26:
callq hipGetLastError
testl %eax, %eax
je .LBB3_28
# %bb.27:
movl %eax, %edi
callq hipGetErrorString
movl $.L.str, %edi
movq %rax, %rsi
xorl %eax, %eax
callq printf
movl $.L.str.1, %edi
movl $162, %esi
xorl %eax, %eax
callq printf
.LBB3_28:
movq 16(%rsp), %rsi
movq %r15, %rdi
movq %rbx, %rdx
movl $2, %ecx
callq hipMemcpy
callq hipGetLastError
testl %eax, %eax
je .LBB3_30
# %bb.29:
movl %eax, %edi
callq hipGetErrorString
movl $.L.str, %edi
movq %rax, %rsi
xorl %eax, %eax
callq printf
movl $.L.str.1, %edi
movl $169, %esi
xorl %eax, %eax
callq printf
.LBB3_30:
movq 48(%rsp), %rdi
callq hipFree
movq 40(%rsp), %rdi
callq hipFree
movq 32(%rsp), %rdi
callq hipFree
movq 24(%rsp), %rdi
callq hipFree
movq 16(%rsp), %rdi
callq hipFree
addq $216, %rsp
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %r12
.cfi_def_cfa_offset 40
popq %r13
.cfi_def_cfa_offset 32
popq %r14
.cfi_def_cfa_offset 24
popq %r15
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
retq
.Lfunc_end3:
.size CSRmatvecmult, .Lfunc_end3-CSRmatvecmult
.cfi_endproc
# -- End function
.globl ELLmatvecmult # -- Begin function ELLmatvecmult
.p2align 4, 0x90
.type ELLmatvecmult,@function
ELLmatvecmult: # @ELLmatvecmult
.cfi_startproc
# %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
pushq %r15
.cfi_def_cfa_offset 24
pushq %r14
.cfi_def_cfa_offset 32
pushq %r13
.cfi_def_cfa_offset 40
pushq %r12
.cfi_def_cfa_offset 48
pushq %rbx
.cfi_def_cfa_offset 56
subq $40, %rsp
.cfi_def_cfa_offset 96
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
.cfi_offset %rbp, -16
movq %r9, %rbx
movq %r8, %r14
movq %rcx, %r15
movq %rdx, %r13
movl %edi, %ebp
imull %edi, %esi
movslq %esi, %r12
shlq $2, %r12
leaq 32(%rsp), %rdi
movq %r12, %rsi
callq hipMalloc
callq hipGetLastError
testl %eax, %eax
je .LBB4_2
# %bb.1:
movl %eax, %edi
callq hipGetErrorString
movl $.L.str, %edi
movq %rax, %rsi
xorl %eax, %eax
callq printf
movl $.L.str.1, %edi
movl $194, %esi
xorl %eax, %eax
callq printf
.LBB4_2:
movq 32(%rsp), %rdi
movq %r13, %rsi
movq %r12, %rdx
movl $1, %ecx
callq hipMemcpy
callq hipGetLastError
testl %eax, %eax
je .LBB4_4
# %bb.3:
movl %eax, %edi
callq hipGetErrorString
movl $.L.str, %edi
movq %rax, %rsi
xorl %eax, %eax
callq printf
movl $.L.str.1, %edi
movl $196, %esi
xorl %eax, %eax
callq printf
.LBB4_4:
leaq 24(%rsp), %rdi
movq %r12, %rsi
callq hipMalloc
callq hipGetLastError
testl %eax, %eax
je .LBB4_6
# %bb.5:
movl %eax, %edi
callq hipGetErrorString
movl $.L.str, %edi
movq %rax, %rsi
xorl %eax, %eax
callq printf
movl $.L.str.1, %edi
movl $199, %esi
xorl %eax, %eax
callq printf
.LBB4_6:
movq 24(%rsp), %rdi
movq %r15, %rsi
movq %r12, %rdx
movl $1, %ecx
callq hipMemcpy
callq hipGetLastError
testl %eax, %eax
je .LBB4_8
# %bb.7:
movl %eax, %edi
callq hipGetErrorString
movl $.L.str, %edi
movq %rax, %rsi
xorl %eax, %eax
callq printf
movl $.L.str.1, %edi
movl $201, %esi
xorl %eax, %eax
callq printf
.LBB4_8:
movslq %ebp, %r15
shlq $2, %r15
leaq 16(%rsp), %rdi
movq %r15, %rsi
callq hipMalloc
callq hipGetLastError
testl %eax, %eax
je .LBB4_10
# %bb.9:
movl %eax, %edi
callq hipGetErrorString
movl $.L.str, %edi
movq %rax, %rsi
xorl %eax, %eax
callq printf
movl $.L.str.1, %edi
movl $204, %esi
xorl %eax, %eax
callq printf
.LBB4_10:
movq 16(%rsp), %rdi
movq %r14, %rsi
movq %r15, %rdx
movl $1, %ecx
callq hipMemcpy
callq hipGetLastError
testl %eax, %eax
je .LBB4_12
# %bb.11:
movl %eax, %edi
callq hipGetErrorString
movl $.L.str, %edi
movq %rax, %rsi
xorl %eax, %eax
callq printf
movl $.L.str.1, %edi
movl $206, %esi
xorl %eax, %eax
callq printf
.LBB4_12:
leaq 8(%rsp), %rdi
movq %r15, %rsi
callq hipMalloc
callq hipGetLastError
testl %eax, %eax
je .LBB4_14
# %bb.13:
movl %eax, %edi
callq hipGetErrorString
movl $.L.str, %edi
movq %rax, %rsi
xorl %eax, %eax
callq printf
movl $.L.str.1, %edi
movl $209, %esi
xorl %eax, %eax
callq printf
.LBB4_14:
movq 8(%rsp), %rdi
movq %rbx, %rsi
movq %r15, %rdx
movl $1, %ecx
callq hipMemcpy
callq hipGetLastError
testl %eax, %eax
je .LBB4_16
# %bb.15:
movl %eax, %edi
callq hipGetErrorString
movl $.L.str, %edi
movq %rax, %rsi
xorl %eax, %eax
callq printf
movl $.L.str.1, %edi
movl $211, %esi
xorl %eax, %eax
callq printf
.LBB4_16:
callq hipGetLastError
testl %eax, %eax
je .LBB4_18
# %bb.17:
movl %eax, %edi
callq hipGetErrorString
movl $.L.str, %edi
movq %rax, %rsi
xorl %eax, %eax
callq printf
movl $.L.str.1, %edi
movl $219, %esi
xorl %eax, %eax
callq printf
.LBB4_18:
movq 8(%rsp), %rsi
movq %rbx, %rdi
movq %r15, %rdx
movl $2, %ecx
callq hipMemcpy
callq hipGetLastError
testl %eax, %eax
je .LBB4_20
# %bb.19:
movl %eax, %edi
callq hipGetErrorString
movl $.L.str, %edi
movq %rax, %rsi
xorl %eax, %eax
callq printf
movl $.L.str.1, %edi
movl $226, %esi
xorl %eax, %eax
callq printf
.LBB4_20:
movq 32(%rsp), %rdi
callq hipFree
movq 24(%rsp), %rdi
callq hipFree
movq 16(%rsp), %rdi
callq hipFree
movq 8(%rsp), %rdi
callq hipFree
addq $40, %rsp
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %r12
.cfi_def_cfa_offset 40
popq %r13
.cfi_def_cfa_offset 32
popq %r14
.cfi_def_cfa_offset 24
popq %r15
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
retq
.Lfunc_end4:
.size ELLmatvecmult, .Lfunc_end4-ELLmatvecmult
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
pushq %rbx
.cfi_def_cfa_offset 16
subq $32, %rsp
.cfi_def_cfa_offset 48
.cfi_offset %rbx, -16
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB5_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB5_2:
movq __hip_gpubin_handle(%rip), %rbx
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z16k_csr_mat_vec_mmPiS_PfiS0_S0_, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z17k_csr2_mat_vec_mmPiS_PfiS0_S0_, %esi
movl $.L__unnamed_2, %edx
movl $.L__unnamed_2, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z16k_ell_mat_vec_mmiiPiPfS0_S0_, %esi
movl $.L__unnamed_3, %edx
movl $.L__unnamed_3, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $32, %rsp
.cfi_def_cfa_offset 16
popq %rbx
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end5:
.size __hip_module_ctor, .Lfunc_end5-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB6_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB6_2:
retq
.Lfunc_end6:
.size __hip_module_dtor, .Lfunc_end6-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z16k_csr_mat_vec_mmPiS_PfiS0_S0_,@object # @_Z16k_csr_mat_vec_mmPiS_PfiS0_S0_
.section .rodata,"a",@progbits
.globl _Z16k_csr_mat_vec_mmPiS_PfiS0_S0_
.p2align 3, 0x0
_Z16k_csr_mat_vec_mmPiS_PfiS0_S0_:
.quad _Z31__device_stub__k_csr_mat_vec_mmPiS_PfiS0_S0_
.size _Z16k_csr_mat_vec_mmPiS_PfiS0_S0_, 8
.type _Z17k_csr2_mat_vec_mmPiS_PfiS0_S0_,@object # @_Z17k_csr2_mat_vec_mmPiS_PfiS0_S0_
.globl _Z17k_csr2_mat_vec_mmPiS_PfiS0_S0_
.p2align 3, 0x0
_Z17k_csr2_mat_vec_mmPiS_PfiS0_S0_:
.quad _Z32__device_stub__k_csr2_mat_vec_mmPiS_PfiS0_S0_
.size _Z17k_csr2_mat_vec_mmPiS_PfiS0_S0_, 8
.type _Z16k_ell_mat_vec_mmiiPiPfS0_S0_,@object # @_Z16k_ell_mat_vec_mmiiPiPfS0_S0_
.globl _Z16k_ell_mat_vec_mmiiPiPfS0_S0_
.p2align 3, 0x0
_Z16k_ell_mat_vec_mmiiPiPfS0_S0_:
.quad _Z31__device_stub__k_ell_mat_vec_mmiiPiPfS0_S0_
.size _Z16k_ell_mat_vec_mmiiPiPfS0_S0_, 8
.type .L.str,@object # @.str
.section .rodata.str1.1,"aMS",@progbits,1
.L.str:
.asciz "(CUDA) %s"
.size .L.str, 10
.type .L.str.1,@object # @.str.1
.L.str.1:
.asciz " (/home/ubuntu/Datasets/stackv2/train-structured-repos-hip/PhuNH/hpc-aa/master/t3/kernels.hip:%d)\n"
.size .L.str.1, 99
.type .L__unnamed_1,@object # @0
.L__unnamed_1:
.asciz "_Z16k_csr_mat_vec_mmPiS_PfiS0_S0_"
.size .L__unnamed_1, 34
.type .L__unnamed_2,@object # @1
.L__unnamed_2:
.asciz "_Z17k_csr2_mat_vec_mmPiS_PfiS0_S0_"
.size .L__unnamed_2, 35
.type .L__unnamed_3,@object # @2
.L__unnamed_3:
.asciz "_Z16k_ell_mat_vec_mmiiPiPfS0_S0_"
.size .L__unnamed_3, 33
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z31__device_stub__k_csr_mat_vec_mmPiS_PfiS0_S0_
.addrsig_sym _Z32__device_stub__k_csr2_mat_vec_mmPiS_PfiS0_S0_
.addrsig_sym _Z31__device_stub__k_ell_mat_vec_mmiiPiPfS0_S0_
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z16k_csr_mat_vec_mmPiS_PfiS0_S0_
.addrsig_sym _Z17k_csr2_mat_vec_mmPiS_PfiS0_S0_
.addrsig_sym _Z16k_ell_mat_vec_mmiiPiPfS0_S0_
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly. | #define N 1
#define FLOAT_T float
__device__ FLOAT_T work_0(FLOAT_T *data, FLOAT_T *rst, int idx)
{
FLOAT_T value = 0.0;
FLOAT_T v1 = 1, v2 = 1;
v1 = data[idx];
for (int i=0; i<N; ++i) {
v1 += v2;
v2 += v1;
v1 += v2;
v2 += v1;
v1 += v2;
v2 += v1;
}
value = v2;
return value;
}
__device__ FLOAT_T work_1(FLOAT_T *data, FLOAT_T *rst, int idx)
{
FLOAT_T value = 0.0;
FLOAT_T v1 = 1, v2 = 1;
v1 = data[idx];
for (int i=0; i<N; ++i) {
v1 += v2;
rst[threadIdx.x] = v1;
v2 += v1;
rst[threadIdx.x+1] = v2;
v1 += v2;
v2 += v1;
}
value = v2;
return value;
}
__device__ FLOAT_T work_2(FLOAT_T *data, FLOAT_T *rst, int idx)
{
FLOAT_T value = 0.0;
FLOAT_T v1 = 1, v2 = 1;
v1 = data[idx];
for (int i=0; i<N; ++i) {
rst[threadIdx.x] = v1;
v1 += v2;
rst[threadIdx.x+1] = v1;
rst[threadIdx.x+2] = v2;
v2 += v1;
rst[threadIdx.x+3] = v2;
}
value = v2;
return value;
}
__device__ FLOAT_T work_3(FLOAT_T *data, FLOAT_T *rst, int idx)
{
return 1.0;
} | code for sm_80 |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | #define N 1
#define FLOAT_T float
__device__ FLOAT_T work_0(FLOAT_T *data, FLOAT_T *rst, int idx)
{
FLOAT_T value = 0.0;
FLOAT_T v1 = 1, v2 = 1;
v1 = data[idx];
for (int i=0; i<N; ++i) {
v1 += v2;
v2 += v1;
v1 += v2;
v2 += v1;
v1 += v2;
v2 += v1;
}
value = v2;
return value;
}
__device__ FLOAT_T work_1(FLOAT_T *data, FLOAT_T *rst, int idx)
{
FLOAT_T value = 0.0;
FLOAT_T v1 = 1, v2 = 1;
v1 = data[idx];
for (int i=0; i<N; ++i) {
v1 += v2;
rst[threadIdx.x] = v1;
v2 += v1;
rst[threadIdx.x+1] = v2;
v1 += v2;
v2 += v1;
}
value = v2;
return value;
}
__device__ FLOAT_T work_2(FLOAT_T *data, FLOAT_T *rst, int idx)
{
FLOAT_T value = 0.0;
FLOAT_T v1 = 1, v2 = 1;
v1 = data[idx];
for (int i=0; i<N; ++i) {
rst[threadIdx.x] = v1;
v1 += v2;
rst[threadIdx.x+1] = v1;
rst[threadIdx.x+2] = v2;
v2 += v1;
rst[threadIdx.x+3] = v2;
}
value = v2;
return value;
}
__device__ FLOAT_T work_3(FLOAT_T *data, FLOAT_T *rst, int idx)
{
return 1.0;
} | .file "tmpxft_00087c7a_00000000-6_work.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2033:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2033:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z6work_0PfS_i
.type _Z6work_0PfS_i, @function
_Z6work_0PfS_i:
.LFB2027:
.cfi_startproc
endbr64
pushq %rax
.cfi_def_cfa_offset 16
popq %rax
.cfi_def_cfa_offset 8
subq $24, %rsp
.cfi_def_cfa_offset 32
movl $1, 12(%rsp)
movl 12(%rsp), %edi
call exit@PLT
.cfi_endproc
.LFE2027:
.size _Z6work_0PfS_i, .-_Z6work_0PfS_i
.globl _Z6work_1PfS_i
.type _Z6work_1PfS_i, @function
_Z6work_1PfS_i:
.LFB2028:
.cfi_startproc
endbr64
pushq %rax
.cfi_def_cfa_offset 16
popq %rax
.cfi_def_cfa_offset 8
subq $24, %rsp
.cfi_def_cfa_offset 32
movl $1, 12(%rsp)
movl 12(%rsp), %edi
call exit@PLT
.cfi_endproc
.LFE2028:
.size _Z6work_1PfS_i, .-_Z6work_1PfS_i
.globl _Z6work_2PfS_i
.type _Z6work_2PfS_i, @function
_Z6work_2PfS_i:
.LFB2029:
.cfi_startproc
endbr64
pushq %rax
.cfi_def_cfa_offset 16
popq %rax
.cfi_def_cfa_offset 8
subq $24, %rsp
.cfi_def_cfa_offset 32
movl $1, 12(%rsp)
movl 12(%rsp), %edi
call exit@PLT
.cfi_endproc
.LFE2029:
.size _Z6work_2PfS_i, .-_Z6work_2PfS_i
.globl _Z6work_3PfS_i
.type _Z6work_3PfS_i, @function
_Z6work_3PfS_i:
.LFB2030:
.cfi_startproc
endbr64
pushq %rax
.cfi_def_cfa_offset 16
popq %rax
.cfi_def_cfa_offset 8
subq $24, %rsp
.cfi_def_cfa_offset 32
movl $1, 12(%rsp)
movl 12(%rsp), %edi
call exit@PLT
.cfi_endproc
.LFE2030:
.size _Z6work_3PfS_i, .-_Z6work_3PfS_i
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2056:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2056:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | #define N 1
#define FLOAT_T float
__device__ FLOAT_T work_0(FLOAT_T *data, FLOAT_T *rst, int idx)
{
FLOAT_T value = 0.0;
FLOAT_T v1 = 1, v2 = 1;
v1 = data[idx];
for (int i=0; i<N; ++i) {
v1 += v2;
v2 += v1;
v1 += v2;
v2 += v1;
v1 += v2;
v2 += v1;
}
value = v2;
return value;
}
__device__ FLOAT_T work_1(FLOAT_T *data, FLOAT_T *rst, int idx)
{
FLOAT_T value = 0.0;
FLOAT_T v1 = 1, v2 = 1;
v1 = data[idx];
for (int i=0; i<N; ++i) {
v1 += v2;
rst[threadIdx.x] = v1;
v2 += v1;
rst[threadIdx.x+1] = v2;
v1 += v2;
v2 += v1;
}
value = v2;
return value;
}
__device__ FLOAT_T work_2(FLOAT_T *data, FLOAT_T *rst, int idx)
{
FLOAT_T value = 0.0;
FLOAT_T v1 = 1, v2 = 1;
v1 = data[idx];
for (int i=0; i<N; ++i) {
rst[threadIdx.x] = v1;
v1 += v2;
rst[threadIdx.x+1] = v1;
rst[threadIdx.x+2] = v2;
v2 += v1;
rst[threadIdx.x+3] = v2;
}
value = v2;
return value;
}
__device__ FLOAT_T work_3(FLOAT_T *data, FLOAT_T *rst, int idx)
{
return 1.0;
} | #include <hip/hip_runtime.h>
#define N 1
#define FLOAT_T float
__device__ FLOAT_T work_0(FLOAT_T *data, FLOAT_T *rst, int idx)
{
FLOAT_T value = 0.0;
FLOAT_T v1 = 1, v2 = 1;
v1 = data[idx];
for (int i=0; i<N; ++i) {
v1 += v2;
v2 += v1;
v1 += v2;
v2 += v1;
v1 += v2;
v2 += v1;
}
value = v2;
return value;
}
__device__ FLOAT_T work_1(FLOAT_T *data, FLOAT_T *rst, int idx)
{
FLOAT_T value = 0.0;
FLOAT_T v1 = 1, v2 = 1;
v1 = data[idx];
for (int i=0; i<N; ++i) {
v1 += v2;
rst[threadIdx.x] = v1;
v2 += v1;
rst[threadIdx.x+1] = v2;
v1 += v2;
v2 += v1;
}
value = v2;
return value;
}
__device__ FLOAT_T work_2(FLOAT_T *data, FLOAT_T *rst, int idx)
{
FLOAT_T value = 0.0;
FLOAT_T v1 = 1, v2 = 1;
v1 = data[idx];
for (int i=0; i<N; ++i) {
rst[threadIdx.x] = v1;
v1 += v2;
rst[threadIdx.x+1] = v1;
rst[threadIdx.x+2] = v2;
v2 += v1;
rst[threadIdx.x+3] = v2;
}
value = v2;
return value;
}
__device__ FLOAT_T work_3(FLOAT_T *data, FLOAT_T *rst, int idx)
{
return 1.0;
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | #include <hip/hip_runtime.h>
#define N 1
#define FLOAT_T float
__device__ FLOAT_T work_0(FLOAT_T *data, FLOAT_T *rst, int idx)
{
FLOAT_T value = 0.0;
FLOAT_T v1 = 1, v2 = 1;
v1 = data[idx];
for (int i=0; i<N; ++i) {
v1 += v2;
v2 += v1;
v1 += v2;
v2 += v1;
v1 += v2;
v2 += v1;
}
value = v2;
return value;
}
__device__ FLOAT_T work_1(FLOAT_T *data, FLOAT_T *rst, int idx)
{
FLOAT_T value = 0.0;
FLOAT_T v1 = 1, v2 = 1;
v1 = data[idx];
for (int i=0; i<N; ++i) {
v1 += v2;
rst[threadIdx.x] = v1;
v2 += v1;
rst[threadIdx.x+1] = v2;
v1 += v2;
v2 += v1;
}
value = v2;
return value;
}
__device__ FLOAT_T work_2(FLOAT_T *data, FLOAT_T *rst, int idx)
{
FLOAT_T value = 0.0;
FLOAT_T v1 = 1, v2 = 1;
v1 = data[idx];
for (int i=0; i<N; ++i) {
rst[threadIdx.x] = v1;
v1 += v2;
rst[threadIdx.x+1] = v1;
rst[threadIdx.x+2] = v2;
v2 += v1;
rst[threadIdx.x+3] = v2;
}
value = v2;
return value;
}
__device__ FLOAT_T work_3(FLOAT_T *data, FLOAT_T *rst, int idx)
{
return 1.0;
} | .text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.amdgpu_metadata
---
amdhsa.kernels: []
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | #include <hip/hip_runtime.h>
#define N 1
#define FLOAT_T float
__device__ FLOAT_T work_0(FLOAT_T *data, FLOAT_T *rst, int idx)
{
FLOAT_T value = 0.0;
FLOAT_T v1 = 1, v2 = 1;
v1 = data[idx];
for (int i=0; i<N; ++i) {
v1 += v2;
v2 += v1;
v1 += v2;
v2 += v1;
v1 += v2;
v2 += v1;
}
value = v2;
return value;
}
__device__ FLOAT_T work_1(FLOAT_T *data, FLOAT_T *rst, int idx)
{
FLOAT_T value = 0.0;
FLOAT_T v1 = 1, v2 = 1;
v1 = data[idx];
for (int i=0; i<N; ++i) {
v1 += v2;
rst[threadIdx.x] = v1;
v2 += v1;
rst[threadIdx.x+1] = v2;
v1 += v2;
v2 += v1;
}
value = v2;
return value;
}
__device__ FLOAT_T work_2(FLOAT_T *data, FLOAT_T *rst, int idx)
{
FLOAT_T value = 0.0;
FLOAT_T v1 = 1, v2 = 1;
v1 = data[idx];
for (int i=0; i<N; ++i) {
rst[threadIdx.x] = v1;
v1 += v2;
rst[threadIdx.x+1] = v1;
rst[threadIdx.x+2] = v2;
v2 += v1;
rst[threadIdx.x+3] = v2;
}
value = v2;
return value;
}
__device__ FLOAT_T work_3(FLOAT_T *data, FLOAT_T *rst, int idx)
{
return 1.0;
} | .text
.file "work.hip"
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly. | code for sm_80 | .text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.amdgpu_metadata
---
amdhsa.kernels: []
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_00087c7a_00000000-6_work.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2033:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2033:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z6work_0PfS_i
.type _Z6work_0PfS_i, @function
_Z6work_0PfS_i:
.LFB2027:
.cfi_startproc
endbr64
pushq %rax
.cfi_def_cfa_offset 16
popq %rax
.cfi_def_cfa_offset 8
subq $24, %rsp
.cfi_def_cfa_offset 32
movl $1, 12(%rsp)
movl 12(%rsp), %edi
call exit@PLT
.cfi_endproc
.LFE2027:
.size _Z6work_0PfS_i, .-_Z6work_0PfS_i
.globl _Z6work_1PfS_i
.type _Z6work_1PfS_i, @function
_Z6work_1PfS_i:
.LFB2028:
.cfi_startproc
endbr64
pushq %rax
.cfi_def_cfa_offset 16
popq %rax
.cfi_def_cfa_offset 8
subq $24, %rsp
.cfi_def_cfa_offset 32
movl $1, 12(%rsp)
movl 12(%rsp), %edi
call exit@PLT
.cfi_endproc
.LFE2028:
.size _Z6work_1PfS_i, .-_Z6work_1PfS_i
.globl _Z6work_2PfS_i
.type _Z6work_2PfS_i, @function
_Z6work_2PfS_i:
.LFB2029:
.cfi_startproc
endbr64
pushq %rax
.cfi_def_cfa_offset 16
popq %rax
.cfi_def_cfa_offset 8
subq $24, %rsp
.cfi_def_cfa_offset 32
movl $1, 12(%rsp)
movl 12(%rsp), %edi
call exit@PLT
.cfi_endproc
.LFE2029:
.size _Z6work_2PfS_i, .-_Z6work_2PfS_i
.globl _Z6work_3PfS_i
.type _Z6work_3PfS_i, @function
_Z6work_3PfS_i:
.LFB2030:
.cfi_startproc
endbr64
pushq %rax
.cfi_def_cfa_offset 16
popq %rax
.cfi_def_cfa_offset 8
subq $24, %rsp
.cfi_def_cfa_offset 32
movl $1, 12(%rsp)
movl 12(%rsp), %edi
call exit@PLT
.cfi_endproc
.LFE2030:
.size _Z6work_3PfS_i, .-_Z6work_3PfS_i
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2056:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2056:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "work.hip"
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | #include <stdio.h>
#include <stdlib.h>
#include <cuda.h>
#include <time.h>
void printDevProp(cudaDeviceProp devProp) {
printf("%s\n", devProp.name);
printf("Major revision number: %d\n", devProp.major);
printf("Minor revision number: %d\n", devProp.minor);
printf("Total global memory: %u bytes\n", devProp.totalGlobalMem);
printf("Number of multiprocessors: %d\n", devProp.multiProcessorCount);
printf("Total shared memory per block: %u\n",devProp.sharedMemPerBlock);
printf("Total registers per block: %d\n", devProp.regsPerBlock);
printf("Warp size: %d\n", devProp.warpSize);
printf("Maximum memory pitch: %u\n", devProp.memPitch);
printf("Total constant memory: %u\n", devProp.totalConstMem);
printf("Maximum threads per block: %d\n", devProp.maxThreadsPerBlock);
printf("Maximum threads per dimension: %d,%d,%d\n", devProp.maxThreadsDim[0], devProp.maxThreadsDim[1], devProp.maxThreadsDim[2]);
return;
}
void printMatrix(float *A, int dim) {
printf("[\n");
for (int i=0; i<dim; i++) {
printf(" [");
for (int j=0; j<dim; j++) {
printf("%.2f, ", A[i*dim + j]);
}
printf("]\n");
}
printf("]\n");
}
void populateMatrix(float *A, int dim) {
// Generate the values
for (int i=0; i<dim; i++) {
for (int j=0; j<dim; j++) {
A[i*dim + j] = (float) rand() / (float) (RAND_MAX / 100);
}
}
}
__global__
void kernel_1t1e(float *d_A, float *d_B, float *d_C, int size) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < size) {
d_A[idx] = d_B[idx] + d_C[idx];
}
}
__global__
void kernel_1t1r(float *d_A, float *d_B, float *d_C, int rows) {
int i = 0;
int j = blockIdx.x * blockDim.x + threadIdx.x;
if (j < rows) {
for(i = 0;i<rows;i++){
d_A[i*rows + j] = d_B[i*rows + j] + d_C[i*rows + j];
}
}
}
__global__
void kernel_1t1c(float *d_A, float *d_B, float *d_C, int rows) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = 0;
if (i < rows) {
for(j = 0;j<rows;j++){
d_A[i*rows + j] = d_B[i*rows + j] + d_C[i*rows + j];
}
}
}
double hostFunction(float *A, float *B, float *C, int rows, int blockSize, int kernel_choice) {
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
// Allocate device memory
float *d_A, *d_B, *d_C;
cudaMalloc(&d_A, rows*rows*sizeof(float));
cudaMalloc(&d_B, rows*rows*sizeof(float));
cudaMalloc(&d_C, rows*rows*sizeof(float));
// Copy values to device memory
cudaMemcpy(d_B, B, rows*rows*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_C, C, rows*rows*sizeof(float), cudaMemcpyHostToDevice);
// Call kernel function
int size = rows*rows;
int numBlocks = (int) (rows/blockSize) + 1;
dim3 threadsPerBlock(blockSize,1);
cudaEventRecord(start);
if (kernel_choice == 0) {
int numBlocks = (int) (size/blockSize) + 1;
kernel_1t1e<<<numBlocks, threadsPerBlock>>>(d_A, d_B, d_C, size);
} else if (kernel_choice == 1) {
kernel_1t1r<<<numBlocks, threadsPerBlock>>>(d_A, d_B, d_C, rows);
} else if (kernel_choice == 2) {
kernel_1t1c<<<numBlocks, threadsPerBlock>>>(d_A, d_B, d_C, rows);
}
cudaEventRecord(stop);
cudaDeviceSynchronize();
cudaEventSynchronize(stop);
float milliseconds = 0;
cudaEventElapsedTime(&milliseconds, start, stop);
// Get return value
cudaMemcpy(A, d_A, rows*rows*sizeof(float), cudaMemcpyDeviceToHost);
// Free memory
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
return milliseconds;
}
int main() {
// Device Query first
int deviceCount;
int blockSize = 1024;
cudaGetDeviceCount(&deviceCount);
int device;
for (device = 0; device < deviceCount; ++device) {
cudaDeviceProp deviceProp;
cudaGetDeviceProperties(&deviceProp, device);
printDevProp(deviceProp);
blockSize = deviceProp.maxThreadsPerBlock;
}
// In my (Francis) local machine there is only one CUDA machine, so I'll hardcode that one here
// Allocate memory
const int rows = 64;
const int cols = rows;
float *A, *B, *C;
A = (float*) malloc(sizeof(float) * rows * cols);
B = (float*) malloc(sizeof(float) * rows * cols);
C = (float*) malloc(sizeof(float) * rows * cols);
// Call the host function
// Benchmarking
int kernel = 0;
int runs = 10;
double time_spent = 0.0;
double ave_time = 0.0;
printf("\n");
while (kernel < 3) {
printf("#%d:\t", kernel);
for (int run=0; run<runs; run++) {
populateMatrix(B, rows);
populateMatrix(C, rows);
time_spent = hostFunction(A, B, C, rows, blockSize, kernel);
ave_time += time_spent;
printf("%.4f\t", time_spent);
}
ave_time /= runs;
printf("Ave: %.4f\n", ave_time);
kernel++;
}
// Free memory
free(A);
free(B);
free(C);
printf("\nDone!\n");
} | .file "tmpxft_00105364_00000000-6_cs171-me1.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2064:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2064:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "%s\n"
.section .rodata.str1.8,"aMS",@progbits,1
.align 8
.LC1:
.string "Major revision number: %d\n"
.align 8
.LC2:
.string "Minor revision number: %d\n"
.align 8
.LC3:
.string "Total global memory: %u bytes\n"
.align 8
.LC4:
.string "Number of multiprocessors: %d\n"
.align 8
.LC5:
.string "Total shared memory per block: %u\n"
.align 8
.LC6:
.string "Total registers per block: %d\n"
.align 8
.LC7:
.string "Warp size: %d\n"
.align 8
.LC8:
.string "Maximum memory pitch: %u\n"
.align 8
.LC9:
.string "Total constant memory: %u\n"
.align 8
.LC10:
.string "Maximum threads per block: %d\n"
.align 8
.LC11:
.string "Maximum threads per dimension: %d,%d,%d\n"
.text
.globl _Z12printDevProp14cudaDeviceProp
.type _Z12printDevProp14cudaDeviceProp, @function
_Z12printDevProp14cudaDeviceProp:
.LFB2057:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq 16(%rsp), %rdx
leaq .LC0(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl 376(%rsp), %edx
leaq .LC1(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl 380(%rsp), %edx
leaq .LC2(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movq 304(%rsp), %rdx
leaq .LC3(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl 404(%rsp), %edx
leaq .LC4(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movq 312(%rsp), %rdx
leaq .LC5(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl 320(%rsp), %edx
leaq .LC6(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl 324(%rsp), %edx
leaq .LC7(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movq 328(%rsp), %rdx
leaq .LC8(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movq 368(%rsp), %rdx
leaq .LC9(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl 336(%rsp), %edx
leaq .LC10(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl 348(%rsp), %r8d
movl 344(%rsp), %ecx
movl 340(%rsp), %edx
leaq .LC11(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2057:
.size _Z12printDevProp14cudaDeviceProp, .-_Z12printDevProp14cudaDeviceProp
.section .rodata.str1.1
.LC12:
.string "[\n"
.LC13:
.string " ["
.LC14:
.string "%.2f, "
.LC15:
.string "]\n"
.text
.globl _Z11printMatrixPfi
.type _Z11printMatrixPfi, @function
_Z11printMatrixPfi:
.LFB2058:
.cfi_startproc
endbr64
pushq %r15
.cfi_def_cfa_offset 16
.cfi_offset 15, -16
pushq %r14
.cfi_def_cfa_offset 24
.cfi_offset 14, -24
pushq %r13
.cfi_def_cfa_offset 32
.cfi_offset 13, -32
pushq %r12
.cfi_def_cfa_offset 40
.cfi_offset 12, -40
pushq %rbp
.cfi_def_cfa_offset 48
.cfi_offset 6, -48
pushq %rbx
.cfi_def_cfa_offset 56
.cfi_offset 3, -56
subq $24, %rsp
.cfi_def_cfa_offset 80
movq %rdi, %rbp
movl %esi, %ebx
movl %esi, 12(%rsp)
leaq .LC12(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
testl %ebx, %ebx
jle .L6
movslq %ebx, %r14
leaq 0(,%r14,4), %r15
addq %r15, %rbp
negq %r14
salq $2, %r14
movl $0, %r13d
leaq .LC14(%rip), %r12
.L8:
leaq .LC13(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
leaq 0(%rbp,%r14), %rbx
.L7:
pxor %xmm0, %xmm0
cvtss2sd (%rbx), %xmm0
movq %r12, %rsi
movl $2, %edi
movl $1, %eax
call __printf_chk@PLT
addq $4, %rbx
cmpq %rbp, %rbx
jne .L7
leaq .LC15(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
addl $1, %r13d
addq %r15, %rbp
cmpl %r13d, 12(%rsp)
jne .L8
.L6:
leaq .LC15(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
addq $24, %rsp
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %rbp
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r13
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2058:
.size _Z11printMatrixPfi, .-_Z11printMatrixPfi
.globl _Z14populateMatrixPfi
.type _Z14populateMatrixPfi, @function
_Z14populateMatrixPfi:
.LFB2059:
.cfi_startproc
endbr64
testl %esi, %esi
jle .L18
pushq %r15
.cfi_def_cfa_offset 16
.cfi_offset 15, -16
pushq %r14
.cfi_def_cfa_offset 24
.cfi_offset 14, -24
pushq %r13
.cfi_def_cfa_offset 32
.cfi_offset 13, -32
pushq %r12
.cfi_def_cfa_offset 40
.cfi_offset 12, -40
pushq %rbp
.cfi_def_cfa_offset 48
.cfi_offset 6, -48
pushq %rbx
.cfi_def_cfa_offset 56
.cfi_offset 3, -56
subq $8, %rsp
.cfi_def_cfa_offset 64
movl %esi, %r14d
movslq %esi, %r13
leaq 0(,%r13,4), %r15
leaq (%rdi,%r15), %rbp
negq %r13
salq $2, %r13
movl $0, %r12d
.L14:
leaq 0(%rbp,%r13), %rbx
.L15:
call rand@PLT
pxor %xmm0, %xmm0
cvtsi2ssl %eax, %xmm0
divss .LC16(%rip), %xmm0
movss %xmm0, (%rbx)
addq $4, %rbx
cmpq %rbp, %rbx
jne .L15
addl $1, %r12d
addq %r15, %rbp
cmpl %r12d, %r14d
jne .L14
addq $8, %rsp
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %rbp
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r13
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
ret
.L18:
.cfi_restore 3
.cfi_restore 6
.cfi_restore 12
.cfi_restore 13
.cfi_restore 14
.cfi_restore 15
ret
.cfi_endproc
.LFE2059:
.size _Z14populateMatrixPfi, .-_Z14populateMatrixPfi
.globl _Z36__device_stub__Z11kernel_1t1ePfS_S_iPfS_S_i
.type _Z36__device_stub__Z11kernel_1t1ePfS_S_iPfS_S_i, @function
_Z36__device_stub__Z11kernel_1t1ePfS_S_iPfS_S_i:
.LFB2086:
.cfi_startproc
endbr64
subq $152, %rsp
.cfi_def_cfa_offset 160
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movq %rdx, 8(%rsp)
movl %ecx, 4(%rsp)
movq %fs:40, %rax
movq %rax, 136(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 8(%rsp), %rax
movq %rax, 112(%rsp)
leaq 4(%rsp), %rax
movq %rax, 120(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L25
.L21:
movq 136(%rsp), %rax
subq %fs:40, %rax
jne .L26
addq $152, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L25:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 168
pushq 40(%rsp)
.cfi_def_cfa_offset 176
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z11kernel_1t1ePfS_S_i(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 160
jmp .L21
.L26:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2086:
.size _Z36__device_stub__Z11kernel_1t1ePfS_S_iPfS_S_i, .-_Z36__device_stub__Z11kernel_1t1ePfS_S_iPfS_S_i
.globl _Z11kernel_1t1ePfS_S_i
.type _Z11kernel_1t1ePfS_S_i, @function
_Z11kernel_1t1ePfS_S_i:
.LFB2087:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z36__device_stub__Z11kernel_1t1ePfS_S_iPfS_S_i
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2087:
.size _Z11kernel_1t1ePfS_S_i, .-_Z11kernel_1t1ePfS_S_i
.globl _Z36__device_stub__Z11kernel_1t1rPfS_S_iPfS_S_i
.type _Z36__device_stub__Z11kernel_1t1rPfS_S_iPfS_S_i, @function
_Z36__device_stub__Z11kernel_1t1rPfS_S_iPfS_S_i:
.LFB2088:
.cfi_startproc
endbr64
subq $152, %rsp
.cfi_def_cfa_offset 160
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movq %rdx, 8(%rsp)
movl %ecx, 4(%rsp)
movq %fs:40, %rax
movq %rax, 136(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 8(%rsp), %rax
movq %rax, 112(%rsp)
leaq 4(%rsp), %rax
movq %rax, 120(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L33
.L29:
movq 136(%rsp), %rax
subq %fs:40, %rax
jne .L34
addq $152, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L33:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 168
pushq 40(%rsp)
.cfi_def_cfa_offset 176
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z11kernel_1t1rPfS_S_i(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 160
jmp .L29
.L34:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2088:
.size _Z36__device_stub__Z11kernel_1t1rPfS_S_iPfS_S_i, .-_Z36__device_stub__Z11kernel_1t1rPfS_S_iPfS_S_i
.globl _Z11kernel_1t1rPfS_S_i
.type _Z11kernel_1t1rPfS_S_i, @function
_Z11kernel_1t1rPfS_S_i:
.LFB2089:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z36__device_stub__Z11kernel_1t1rPfS_S_iPfS_S_i
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2089:
.size _Z11kernel_1t1rPfS_S_i, .-_Z11kernel_1t1rPfS_S_i
.globl _Z36__device_stub__Z11kernel_1t1cPfS_S_iPfS_S_i
.type _Z36__device_stub__Z11kernel_1t1cPfS_S_iPfS_S_i, @function
_Z36__device_stub__Z11kernel_1t1cPfS_S_iPfS_S_i:
.LFB2090:
.cfi_startproc
endbr64
subq $152, %rsp
.cfi_def_cfa_offset 160
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movq %rdx, 8(%rsp)
movl %ecx, 4(%rsp)
movq %fs:40, %rax
movq %rax, 136(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 8(%rsp), %rax
movq %rax, 112(%rsp)
leaq 4(%rsp), %rax
movq %rax, 120(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L41
.L37:
movq 136(%rsp), %rax
subq %fs:40, %rax
jne .L42
addq $152, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L41:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 168
pushq 40(%rsp)
.cfi_def_cfa_offset 176
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z11kernel_1t1cPfS_S_i(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 160
jmp .L37
.L42:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2090:
.size _Z36__device_stub__Z11kernel_1t1cPfS_S_iPfS_S_i, .-_Z36__device_stub__Z11kernel_1t1cPfS_S_iPfS_S_i
.globl _Z11kernel_1t1cPfS_S_i
.type _Z11kernel_1t1cPfS_S_i, @function
_Z11kernel_1t1cPfS_S_i:
.LFB2091:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z36__device_stub__Z11kernel_1t1cPfS_S_iPfS_S_i
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2091:
.size _Z11kernel_1t1cPfS_S_i, .-_Z11kernel_1t1cPfS_S_i
.globl _Z12hostFunctionPfS_S_iii
.type _Z12hostFunctionPfS_S_iii, @function
_Z12hostFunctionPfS_S_iii:
.LFB2060:
.cfi_startproc
endbr64
pushq %r15
.cfi_def_cfa_offset 16
.cfi_offset 15, -16
pushq %r14
.cfi_def_cfa_offset 24
.cfi_offset 14, -24
pushq %r13
.cfi_def_cfa_offset 32
.cfi_offset 13, -32
pushq %r12
.cfi_def_cfa_offset 40
.cfi_offset 12, -40
pushq %rbp
.cfi_def_cfa_offset 48
.cfi_offset 6, -48
pushq %rbx
.cfi_def_cfa_offset 56
.cfi_offset 3, -56
subq $104, %rsp
.cfi_def_cfa_offset 160
movq %rdi, (%rsp)
movq %rsi, %r14
movq %rdx, 8(%rsp)
movl %ecx, %ebp
movl %r8d, %r12d
movl %r9d, %r13d
movq %fs:40, %rax
movq %rax, 88(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rdi
call cudaEventCreate@PLT
leaq 32(%rsp), %rdi
call cudaEventCreate@PLT
movl %ebp, %r15d
imull %ebp, %r15d
movslq %r15d, %rbx
salq $2, %rbx
leaq 40(%rsp), %rdi
movq %rbx, %rsi
call cudaMalloc@PLT
leaq 48(%rsp), %rdi
movq %rbx, %rsi
call cudaMalloc@PLT
leaq 56(%rsp), %rdi
movq %rbx, %rsi
call cudaMalloc@PLT
movl $1, %ecx
movq %rbx, %rdx
movq %r14, %rsi
movq 48(%rsp), %rdi
call cudaMemcpy@PLT
movl $1, %ecx
movq %rbx, %rdx
movq 8(%rsp), %rsi
movq 56(%rsp), %rdi
call cudaMemcpy@PLT
movl %r12d, 64(%rsp)
movl $1, 68(%rsp)
movl $1, 72(%rsp)
movl $0, %esi
movq 24(%rsp), %rdi
call cudaEventRecord@PLT
testl %r13d, %r13d
je .L54
movl %ebp, %eax
cltd
idivl %r12d
addl $1, %eax
cmpl $1, %r13d
je .L55
cmpl $2, %r13d
je .L56
.L48:
movl $0, %esi
movq 32(%rsp), %rdi
call cudaEventRecord@PLT
call cudaDeviceSynchronize@PLT
movq 32(%rsp), %rdi
call cudaEventSynchronize@PLT
movl $0x00000000, 76(%rsp)
leaq 76(%rsp), %rdi
movq 32(%rsp), %rdx
movq 24(%rsp), %rsi
call cudaEventElapsedTime@PLT
movl $2, %ecx
movq %rbx, %rdx
movq 40(%rsp), %rsi
movq (%rsp), %rdi
call cudaMemcpy@PLT
movq 40(%rsp), %rdi
call cudaFree@PLT
movq 48(%rsp), %rdi
call cudaFree@PLT
movq 56(%rsp), %rdi
call cudaFree@PLT
pxor %xmm0, %xmm0
cvtss2sd 76(%rsp), %xmm0
movq 88(%rsp), %rax
subq %fs:40, %rax
jne .L57
addq $104, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %rbp
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r13
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
ret
.L54:
.cfi_restore_state
movl %r15d, %eax
cltd
idivl %r12d
addl $1, %eax
movl %eax, 76(%rsp)
movl $1, 80(%rsp)
movl $1, 84(%rsp)
movl 72(%rsp), %ecx
movl $0, %r9d
movl $0, %r8d
movq 64(%rsp), %rdx
movq 76(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
jne .L48
movl %r15d, %ecx
movq 56(%rsp), %rdx
movq 48(%rsp), %rsi
movq 40(%rsp), %rdi
call _Z36__device_stub__Z11kernel_1t1ePfS_S_iPfS_S_i
jmp .L48
.L55:
movl %eax, 76(%rsp)
movl $1, 80(%rsp)
movl $1, 84(%rsp)
movl 72(%rsp), %ecx
movl $0, %r9d
movl $0, %r8d
movq 64(%rsp), %rdx
movq 76(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
jne .L48
movl %ebp, %ecx
movq 56(%rsp), %rdx
movq 48(%rsp), %rsi
movq 40(%rsp), %rdi
call _Z36__device_stub__Z11kernel_1t1rPfS_S_iPfS_S_i
jmp .L48
.L56:
movl %eax, 76(%rsp)
movl $1, 80(%rsp)
movl $1, 84(%rsp)
movl 72(%rsp), %ecx
movl $0, %r9d
movl $0, %r8d
movq 64(%rsp), %rdx
movq 76(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
jne .L48
movl %ebp, %ecx
movq 56(%rsp), %rdx
movq 48(%rsp), %rsi
movq 40(%rsp), %rdi
call _Z36__device_stub__Z11kernel_1t1cPfS_S_iPfS_S_i
jmp .L48
.L57:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2060:
.size _Z12hostFunctionPfS_S_iii, .-_Z12hostFunctionPfS_S_iii
.section .rodata.str1.1
.LC19:
.string "\n"
.LC20:
.string "#%d:\t"
.LC21:
.string "%.4f\t"
.LC23:
.string "Ave: %.4f\n"
.LC24:
.string "\nDone!\n"
.text
.globl main
.type main, @function
main:
.LFB2061:
.cfi_startproc
endbr64
pushq %r15
.cfi_def_cfa_offset 16
.cfi_offset 15, -16
pushq %r14
.cfi_def_cfa_offset 24
.cfi_offset 14, -24
pushq %r13
.cfi_def_cfa_offset 32
.cfi_offset 13, -32
pushq %r12
.cfi_def_cfa_offset 40
.cfi_offset 12, -40
pushq %rbp
.cfi_def_cfa_offset 48
.cfi_offset 6, -48
pushq %rbx
.cfi_def_cfa_offset 56
.cfi_offset 3, -56
subq $1080, %rsp
.cfi_def_cfa_offset 1136
movq %fs:40, %rax
movq %rax, 1064(%rsp)
xorl %eax, %eax
leaq 28(%rsp), %rdi
call cudaGetDeviceCount@PLT
cmpl $0, 28(%rsp)
jle .L64
movl $0, %ebx
leaq 32(%rsp), %rbp
.L60:
movl %ebx, %esi
movq %rbp, %rdi
call cudaGetDeviceProperties_v2@PLT
subq $1040, %rsp
.cfi_def_cfa_offset 2176
movl $129, %ecx
movq %rsp, %rdi
movq %rbp, %rsi
rep movsq
call _Z12printDevProp14cudaDeviceProp
movl 1392(%rsp), %r14d
addl $1, %ebx
addq $1040, %rsp
.cfi_def_cfa_offset 1136
cmpl %ebx, 28(%rsp)
jg .L60
.L59:
movl $16384, %edi
call malloc@PLT
movq %rax, %r15
movl $16384, %edi
call malloc@PLT
movq %rax, %r12
movl $16384, %edi
call malloc@PLT
movq %rax, %rbp
leaq .LC19(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movq $0x000000000, 8(%rsp)
movl $0, %r13d
.L62:
movl %r13d, %edx
leaq .LC20(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $10, %ebx
.L61:
movl $64, %esi
movq %r12, %rdi
call _Z14populateMatrixPfi
movl $64, %esi
movq %rbp, %rdi
call _Z14populateMatrixPfi
movl %r13d, %r9d
movl %r14d, %r8d
movl $64, %ecx
movq %rbp, %rdx
movq %r12, %rsi
movq %r15, %rdi
call _Z12hostFunctionPfS_S_iii
movsd 8(%rsp), %xmm1
addsd %xmm0, %xmm1
movsd %xmm1, 8(%rsp)
leaq .LC21(%rip), %rsi
movl $2, %edi
movl $1, %eax
call __printf_chk@PLT
subl $1, %ebx
jne .L61
movsd 8(%rsp), %xmm2
divsd .LC22(%rip), %xmm2
movsd %xmm2, 8(%rsp)
movapd %xmm2, %xmm0
leaq .LC23(%rip), %rsi
movl $2, %edi
movl $1, %eax
call __printf_chk@PLT
addl $1, %r13d
cmpl $3, %r13d
jne .L62
movq %r15, %rdi
call free@PLT
movq %r12, %rdi
call free@PLT
movq %rbp, %rdi
call free@PLT
leaq .LC24(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movq 1064(%rsp), %rax
subq %fs:40, %rax
jne .L69
movl $0, %eax
addq $1080, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %rbp
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r13
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
ret
.L64:
.cfi_restore_state
movl $1024, %r14d
jmp .L59
.L69:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2061:
.size main, .-main
.section .rodata.str1.1
.LC25:
.string "_Z11kernel_1t1cPfS_S_i"
.LC26:
.string "_Z11kernel_1t1rPfS_S_i"
.LC27:
.string "_Z11kernel_1t1ePfS_S_i"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2093:
.cfi_startproc
endbr64
pushq %rbx
.cfi_def_cfa_offset 16
.cfi_offset 3, -16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rbx
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC25(%rip), %rdx
movq %rdx, %rcx
leaq _Z11kernel_1t1cPfS_S_i(%rip), %rsi
movq %rax, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC26(%rip), %rdx
movq %rdx, %rcx
leaq _Z11kernel_1t1rPfS_S_i(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC27(%rip), %rdx
movq %rdx, %rcx
leaq _Z11kernel_1t1ePfS_S_i(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
popq %rbx
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2093:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.section .rodata.cst4,"aM",@progbits,4
.align 4
.LC16:
.long 1269028618
.section .rodata.cst8,"aM",@progbits,8
.align 8
.LC22:
.long 0
.long 1076101120
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | #include <stdio.h>
#include <stdlib.h>
#include <cuda.h>
#include <time.h>
void printDevProp(cudaDeviceProp devProp) {
printf("%s\n", devProp.name);
printf("Major revision number: %d\n", devProp.major);
printf("Minor revision number: %d\n", devProp.minor);
printf("Total global memory: %u bytes\n", devProp.totalGlobalMem);
printf("Number of multiprocessors: %d\n", devProp.multiProcessorCount);
printf("Total shared memory per block: %u\n",devProp.sharedMemPerBlock);
printf("Total registers per block: %d\n", devProp.regsPerBlock);
printf("Warp size: %d\n", devProp.warpSize);
printf("Maximum memory pitch: %u\n", devProp.memPitch);
printf("Total constant memory: %u\n", devProp.totalConstMem);
printf("Maximum threads per block: %d\n", devProp.maxThreadsPerBlock);
printf("Maximum threads per dimension: %d,%d,%d\n", devProp.maxThreadsDim[0], devProp.maxThreadsDim[1], devProp.maxThreadsDim[2]);
return;
}
void printMatrix(float *A, int dim) {
printf("[\n");
for (int i=0; i<dim; i++) {
printf(" [");
for (int j=0; j<dim; j++) {
printf("%.2f, ", A[i*dim + j]);
}
printf("]\n");
}
printf("]\n");
}
void populateMatrix(float *A, int dim) {
// Generate the values
for (int i=0; i<dim; i++) {
for (int j=0; j<dim; j++) {
A[i*dim + j] = (float) rand() / (float) (RAND_MAX / 100);
}
}
}
__global__
void kernel_1t1e(float *d_A, float *d_B, float *d_C, int size) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < size) {
d_A[idx] = d_B[idx] + d_C[idx];
}
}
__global__
void kernel_1t1r(float *d_A, float *d_B, float *d_C, int rows) {
int i = 0;
int j = blockIdx.x * blockDim.x + threadIdx.x;
if (j < rows) {
for(i = 0;i<rows;i++){
d_A[i*rows + j] = d_B[i*rows + j] + d_C[i*rows + j];
}
}
}
__global__
void kernel_1t1c(float *d_A, float *d_B, float *d_C, int rows) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = 0;
if (i < rows) {
for(j = 0;j<rows;j++){
d_A[i*rows + j] = d_B[i*rows + j] + d_C[i*rows + j];
}
}
}
double hostFunction(float *A, float *B, float *C, int rows, int blockSize, int kernel_choice) {
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
// Allocate device memory
float *d_A, *d_B, *d_C;
cudaMalloc(&d_A, rows*rows*sizeof(float));
cudaMalloc(&d_B, rows*rows*sizeof(float));
cudaMalloc(&d_C, rows*rows*sizeof(float));
// Copy values to device memory
cudaMemcpy(d_B, B, rows*rows*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_C, C, rows*rows*sizeof(float), cudaMemcpyHostToDevice);
// Call kernel function
int size = rows*rows;
int numBlocks = (int) (rows/blockSize) + 1;
dim3 threadsPerBlock(blockSize,1);
cudaEventRecord(start);
if (kernel_choice == 0) {
int numBlocks = (int) (size/blockSize) + 1;
kernel_1t1e<<<numBlocks, threadsPerBlock>>>(d_A, d_B, d_C, size);
} else if (kernel_choice == 1) {
kernel_1t1r<<<numBlocks, threadsPerBlock>>>(d_A, d_B, d_C, rows);
} else if (kernel_choice == 2) {
kernel_1t1c<<<numBlocks, threadsPerBlock>>>(d_A, d_B, d_C, rows);
}
cudaEventRecord(stop);
cudaDeviceSynchronize();
cudaEventSynchronize(stop);
float milliseconds = 0;
cudaEventElapsedTime(&milliseconds, start, stop);
// Get return value
cudaMemcpy(A, d_A, rows*rows*sizeof(float), cudaMemcpyDeviceToHost);
// Free memory
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
return milliseconds;
}
int main() {
// Device Query first
int deviceCount;
int blockSize = 1024;
cudaGetDeviceCount(&deviceCount);
int device;
for (device = 0; device < deviceCount; ++device) {
cudaDeviceProp deviceProp;
cudaGetDeviceProperties(&deviceProp, device);
printDevProp(deviceProp);
blockSize = deviceProp.maxThreadsPerBlock;
}
// In my (Francis) local machine there is only one CUDA machine, so I'll hardcode that one here
// Allocate memory
const int rows = 64;
const int cols = rows;
float *A, *B, *C;
A = (float*) malloc(sizeof(float) * rows * cols);
B = (float*) malloc(sizeof(float) * rows * cols);
C = (float*) malloc(sizeof(float) * rows * cols);
// Call the host function
// Benchmarking
int kernel = 0;
int runs = 10;
double time_spent = 0.0;
double ave_time = 0.0;
printf("\n");
while (kernel < 3) {
printf("#%d:\t", kernel);
for (int run=0; run<runs; run++) {
populateMatrix(B, rows);
populateMatrix(C, rows);
time_spent = hostFunction(A, B, C, rows, blockSize, kernel);
ave_time += time_spent;
printf("%.4f\t", time_spent);
}
ave_time /= runs;
printf("Ave: %.4f\n", ave_time);
kernel++;
}
// Free memory
free(A);
free(B);
free(C);
printf("\nDone!\n");
} | #include <stdio.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <time.h>
void printDevProp(hipDeviceProp_t devProp) {
printf("%s\n", devProp.name);
printf("Major revision number: %d\n", devProp.major);
printf("Minor revision number: %d\n", devProp.minor);
printf("Total global memory: %u bytes\n", devProp.totalGlobalMem);
printf("Number of multiprocessors: %d\n", devProp.multiProcessorCount);
printf("Total shared memory per block: %u\n",devProp.sharedMemPerBlock);
printf("Total registers per block: %d\n", devProp.regsPerBlock);
printf("Warp size: %d\n", devProp.warpSize);
printf("Maximum memory pitch: %u\n", devProp.memPitch);
printf("Total constant memory: %u\n", devProp.totalConstMem);
printf("Maximum threads per block: %d\n", devProp.maxThreadsPerBlock);
printf("Maximum threads per dimension: %d,%d,%d\n", devProp.maxThreadsDim[0], devProp.maxThreadsDim[1], devProp.maxThreadsDim[2]);
return;
}
void printMatrix(float *A, int dim) {
printf("[\n");
for (int i=0; i<dim; i++) {
printf(" [");
for (int j=0; j<dim; j++) {
printf("%.2f, ", A[i*dim + j]);
}
printf("]\n");
}
printf("]\n");
}
void populateMatrix(float *A, int dim) {
// Generate the values
for (int i=0; i<dim; i++) {
for (int j=0; j<dim; j++) {
A[i*dim + j] = (float) rand() / (float) (RAND_MAX / 100);
}
}
}
__global__
void kernel_1t1e(float *d_A, float *d_B, float *d_C, int size) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < size) {
d_A[idx] = d_B[idx] + d_C[idx];
}
}
__global__
void kernel_1t1r(float *d_A, float *d_B, float *d_C, int rows) {
int i = 0;
int j = blockIdx.x * blockDim.x + threadIdx.x;
if (j < rows) {
for(i = 0;i<rows;i++){
d_A[i*rows + j] = d_B[i*rows + j] + d_C[i*rows + j];
}
}
}
__global__
void kernel_1t1c(float *d_A, float *d_B, float *d_C, int rows) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = 0;
if (i < rows) {
for(j = 0;j<rows;j++){
d_A[i*rows + j] = d_B[i*rows + j] + d_C[i*rows + j];
}
}
}
double hostFunction(float *A, float *B, float *C, int rows, int blockSize, int kernel_choice) {
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
// Allocate device memory
float *d_A, *d_B, *d_C;
hipMalloc(&d_A, rows*rows*sizeof(float));
hipMalloc(&d_B, rows*rows*sizeof(float));
hipMalloc(&d_C, rows*rows*sizeof(float));
// Copy values to device memory
hipMemcpy(d_B, B, rows*rows*sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(d_C, C, rows*rows*sizeof(float), hipMemcpyHostToDevice);
// Call kernel function
int size = rows*rows;
int numBlocks = (int) (rows/blockSize) + 1;
dim3 threadsPerBlock(blockSize,1);
hipEventRecord(start);
if (kernel_choice == 0) {
int numBlocks = (int) (size/blockSize) + 1;
kernel_1t1e<<<numBlocks, threadsPerBlock>>>(d_A, d_B, d_C, size);
} else if (kernel_choice == 1) {
kernel_1t1r<<<numBlocks, threadsPerBlock>>>(d_A, d_B, d_C, rows);
} else if (kernel_choice == 2) {
kernel_1t1c<<<numBlocks, threadsPerBlock>>>(d_A, d_B, d_C, rows);
}
hipEventRecord(stop);
hipDeviceSynchronize();
hipEventSynchronize(stop);
float milliseconds = 0;
hipEventElapsedTime(&milliseconds, start, stop);
// Get return value
hipMemcpy(A, d_A, rows*rows*sizeof(float), hipMemcpyDeviceToHost);
// Free memory
hipFree(d_A);
hipFree(d_B);
hipFree(d_C);
return milliseconds;
}
int main() {
// Device Query first
int deviceCount;
int blockSize = 1024;
hipGetDeviceCount(&deviceCount);
int device;
for (device = 0; device < deviceCount; ++device) {
hipDeviceProp_t deviceProp;
hipGetDeviceProperties(&deviceProp, device);
printDevProp(deviceProp);
blockSize = deviceProp.maxThreadsPerBlock;
}
// In my (Francis) local machine there is only one CUDA machine, so I'll hardcode that one here
// Allocate memory
const int rows = 64;
const int cols = rows;
float *A, *B, *C;
A = (float*) malloc(sizeof(float) * rows * cols);
B = (float*) malloc(sizeof(float) * rows * cols);
C = (float*) malloc(sizeof(float) * rows * cols);
// Call the host function
// Benchmarking
int kernel = 0;
int runs = 10;
double time_spent = 0.0;
double ave_time = 0.0;
printf("\n");
while (kernel < 3) {
printf("#%d:\t", kernel);
for (int run=0; run<runs; run++) {
populateMatrix(B, rows);
populateMatrix(C, rows);
time_spent = hostFunction(A, B, C, rows, blockSize, kernel);
ave_time += time_spent;
printf("%.4f\t", time_spent);
}
ave_time /= runs;
printf("Ave: %.4f\n", ave_time);
kernel++;
}
// Free memory
free(A);
free(B);
free(C);
printf("\nDone!\n");
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | #include <stdio.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <time.h>
void printDevProp(hipDeviceProp_t devProp) {
printf("%s\n", devProp.name);
printf("Major revision number: %d\n", devProp.major);
printf("Minor revision number: %d\n", devProp.minor);
printf("Total global memory: %u bytes\n", devProp.totalGlobalMem);
printf("Number of multiprocessors: %d\n", devProp.multiProcessorCount);
printf("Total shared memory per block: %u\n",devProp.sharedMemPerBlock);
printf("Total registers per block: %d\n", devProp.regsPerBlock);
printf("Warp size: %d\n", devProp.warpSize);
printf("Maximum memory pitch: %u\n", devProp.memPitch);
printf("Total constant memory: %u\n", devProp.totalConstMem);
printf("Maximum threads per block: %d\n", devProp.maxThreadsPerBlock);
printf("Maximum threads per dimension: %d,%d,%d\n", devProp.maxThreadsDim[0], devProp.maxThreadsDim[1], devProp.maxThreadsDim[2]);
return;
}
void printMatrix(float *A, int dim) {
printf("[\n");
for (int i=0; i<dim; i++) {
printf(" [");
for (int j=0; j<dim; j++) {
printf("%.2f, ", A[i*dim + j]);
}
printf("]\n");
}
printf("]\n");
}
void populateMatrix(float *A, int dim) {
// Generate the values
for (int i=0; i<dim; i++) {
for (int j=0; j<dim; j++) {
A[i*dim + j] = (float) rand() / (float) (RAND_MAX / 100);
}
}
}
__global__
void kernel_1t1e(float *d_A, float *d_B, float *d_C, int size) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < size) {
d_A[idx] = d_B[idx] + d_C[idx];
}
}
__global__
void kernel_1t1r(float *d_A, float *d_B, float *d_C, int rows) {
int i = 0;
int j = blockIdx.x * blockDim.x + threadIdx.x;
if (j < rows) {
for(i = 0;i<rows;i++){
d_A[i*rows + j] = d_B[i*rows + j] + d_C[i*rows + j];
}
}
}
__global__
void kernel_1t1c(float *d_A, float *d_B, float *d_C, int rows) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = 0;
if (i < rows) {
for(j = 0;j<rows;j++){
d_A[i*rows + j] = d_B[i*rows + j] + d_C[i*rows + j];
}
}
}
double hostFunction(float *A, float *B, float *C, int rows, int blockSize, int kernel_choice) {
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
// Allocate device memory
float *d_A, *d_B, *d_C;
hipMalloc(&d_A, rows*rows*sizeof(float));
hipMalloc(&d_B, rows*rows*sizeof(float));
hipMalloc(&d_C, rows*rows*sizeof(float));
// Copy values to device memory
hipMemcpy(d_B, B, rows*rows*sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(d_C, C, rows*rows*sizeof(float), hipMemcpyHostToDevice);
// Call kernel function
int size = rows*rows;
int numBlocks = (int) (rows/blockSize) + 1;
dim3 threadsPerBlock(blockSize,1);
hipEventRecord(start);
if (kernel_choice == 0) {
int numBlocks = (int) (size/blockSize) + 1;
kernel_1t1e<<<numBlocks, threadsPerBlock>>>(d_A, d_B, d_C, size);
} else if (kernel_choice == 1) {
kernel_1t1r<<<numBlocks, threadsPerBlock>>>(d_A, d_B, d_C, rows);
} else if (kernel_choice == 2) {
kernel_1t1c<<<numBlocks, threadsPerBlock>>>(d_A, d_B, d_C, rows);
}
hipEventRecord(stop);
hipDeviceSynchronize();
hipEventSynchronize(stop);
float milliseconds = 0;
hipEventElapsedTime(&milliseconds, start, stop);
// Get return value
hipMemcpy(A, d_A, rows*rows*sizeof(float), hipMemcpyDeviceToHost);
// Free memory
hipFree(d_A);
hipFree(d_B);
hipFree(d_C);
return milliseconds;
}
int main() {
// Device Query first
int deviceCount;
int blockSize = 1024;
hipGetDeviceCount(&deviceCount);
int device;
for (device = 0; device < deviceCount; ++device) {
hipDeviceProp_t deviceProp;
hipGetDeviceProperties(&deviceProp, device);
printDevProp(deviceProp);
blockSize = deviceProp.maxThreadsPerBlock;
}
// In my (Francis) local machine there is only one CUDA machine, so I'll hardcode that one here
// Allocate memory
const int rows = 64;
const int cols = rows;
float *A, *B, *C;
A = (float*) malloc(sizeof(float) * rows * cols);
B = (float*) malloc(sizeof(float) * rows * cols);
C = (float*) malloc(sizeof(float) * rows * cols);
// Call the host function
// Benchmarking
int kernel = 0;
int runs = 10;
double time_spent = 0.0;
double ave_time = 0.0;
printf("\n");
while (kernel < 3) {
printf("#%d:\t", kernel);
for (int run=0; run<runs; run++) {
populateMatrix(B, rows);
populateMatrix(C, rows);
time_spent = hostFunction(A, B, C, rows, blockSize, kernel);
ave_time += time_spent;
printf("%.4f\t", time_spent);
}
ave_time /= runs;
printf("Ave: %.4f\n", ave_time);
kernel++;
}
// Free memory
free(A);
free(B);
free(C);
printf("\nDone!\n");
} | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z11kernel_1t1ePfS_S_i
.globl _Z11kernel_1t1ePfS_S_i
.p2align 8
.type _Z11kernel_1t1ePfS_S_i,@function
_Z11kernel_1t1ePfS_S_i:
s_clause 0x1
s_load_b32 s2, s[0:1], 0x2c
s_load_b32 s3, s[0:1], 0x18
s_waitcnt lgkmcnt(0)
s_and_b32 s2, s2, 0xffff
s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_mad_u64_u32 v[1:2], null, s15, s2, v[0:1]
s_mov_b32 s2, exec_lo
v_cmpx_gt_i32_e64 s3, v1
s_cbranch_execz .LBB0_2
s_clause 0x1
s_load_b128 s[4:7], s[0:1], 0x0
s_load_b64 s[0:1], s[0:1], 0x10
v_ashrrev_i32_e32 v2, 31, v1
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[0:1], 2, v[1:2]
s_waitcnt lgkmcnt(0)
v_add_co_u32 v2, vcc_lo, s6, v0
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v3, vcc_lo, s7, v1, vcc_lo
v_add_co_u32 v4, vcc_lo, s0, v0
v_add_co_ci_u32_e32 v5, vcc_lo, s1, v1, vcc_lo
v_add_co_u32 v0, vcc_lo, s4, v0
global_load_b32 v2, v[2:3], off
global_load_b32 v3, v[4:5], off
v_add_co_ci_u32_e32 v1, vcc_lo, s5, v1, vcc_lo
s_waitcnt vmcnt(0)
v_add_f32_e32 v2, v2, v3
global_store_b32 v[0:1], v2, off
.LBB0_2:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z11kernel_1t1ePfS_S_i
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 288
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 6
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z11kernel_1t1ePfS_S_i, .Lfunc_end0-_Z11kernel_1t1ePfS_S_i
.section .AMDGPU.csdata,"",@progbits
.text
.protected _Z11kernel_1t1rPfS_S_i
.globl _Z11kernel_1t1rPfS_S_i
.p2align 8
.type _Z11kernel_1t1rPfS_S_i,@function
_Z11kernel_1t1rPfS_S_i:
s_clause 0x1
s_load_b32 s3, s[0:1], 0x2c
s_load_b32 s2, s[0:1], 0x18
s_waitcnt lgkmcnt(0)
s_and_b32 s3, s3, 0xffff
s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_mad_u64_u32 v[1:2], null, s15, s3, v[0:1]
s_mov_b32 s3, exec_lo
v_max_i32_e32 v0, 0, v1
s_delay_alu instid0(VALU_DEP_1)
v_cmpx_gt_i32_e64 s2, v0
s_cbranch_execz .LBB1_3
s_clause 0x1
s_load_b128 s[4:7], s[0:1], 0x0
s_load_b64 s[0:1], s[0:1], 0x10
s_mov_b32 s3, s2
.p2align 6
.LBB1_2:
v_ashrrev_i32_e32 v2, 31, v1
s_add_i32 s3, s3, -1
s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_1)
s_cmp_lg_u32 s3, 0
v_lshlrev_b64 v[2:3], 2, v[1:2]
v_add_nc_u32_e32 v1, s2, v1
s_waitcnt lgkmcnt(0)
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_3)
v_add_co_u32 v4, vcc_lo, s6, v2
v_add_co_ci_u32_e32 v5, vcc_lo, s7, v3, vcc_lo
v_add_co_u32 v6, vcc_lo, s0, v2
v_add_co_ci_u32_e32 v7, vcc_lo, s1, v3, vcc_lo
global_load_b32 v0, v[4:5], off
global_load_b32 v4, v[6:7], off
v_add_co_u32 v2, vcc_lo, s4, v2
v_add_co_ci_u32_e32 v3, vcc_lo, s5, v3, vcc_lo
s_waitcnt vmcnt(0)
v_add_f32_e32 v0, v0, v4
global_store_b32 v[2:3], v0, off
s_cbranch_scc1 .LBB1_2
.LBB1_3:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z11kernel_1t1rPfS_S_i
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 288
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 8
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end1:
.size _Z11kernel_1t1rPfS_S_i, .Lfunc_end1-_Z11kernel_1t1rPfS_S_i
.section .AMDGPU.csdata,"",@progbits
.text
.protected _Z11kernel_1t1cPfS_S_i
.globl _Z11kernel_1t1cPfS_S_i
.p2align 8
.type _Z11kernel_1t1cPfS_S_i,@function
_Z11kernel_1t1cPfS_S_i:
s_clause 0x1
s_load_b32 s3, s[0:1], 0x2c
s_load_b32 s2, s[0:1], 0x18
s_waitcnt lgkmcnt(0)
s_and_b32 s3, s3, 0xffff
s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_mad_u64_u32 v[1:2], null, s15, s3, v[0:1]
s_mov_b32 s3, exec_lo
v_max_i32_e32 v0, 0, v1
s_delay_alu instid0(VALU_DEP_1)
v_cmpx_gt_i32_e64 s2, v0
s_cbranch_execz .LBB2_3
s_load_b128 s[4:7], s[0:1], 0x0
v_mul_lo_u32 v0, v1, s2
s_load_b64 s[0:1], s[0:1], 0x10
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_ashrrev_i32_e32 v1, 31, v0
v_lshlrev_b64 v[4:5], 2, v[0:1]
s_waitcnt lgkmcnt(0)
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_co_u32 v0, vcc_lo, s6, v4
v_add_co_ci_u32_e32 v1, vcc_lo, s7, v5, vcc_lo
v_add_co_u32 v2, vcc_lo, s0, v4
v_add_co_ci_u32_e32 v3, vcc_lo, s1, v5, vcc_lo
v_add_co_u32 v4, vcc_lo, s4, v4
v_add_co_ci_u32_e32 v5, vcc_lo, s5, v5, vcc_lo
.p2align 6
.LBB2_2:
global_load_b32 v6, v[0:1], off
global_load_b32 v7, v[2:3], off
v_add_co_u32 v0, vcc_lo, v0, 4
v_add_co_ci_u32_e32 v1, vcc_lo, 0, v1, vcc_lo
v_add_co_u32 v2, vcc_lo, v2, 4
v_add_co_ci_u32_e32 v3, vcc_lo, 0, v3, vcc_lo
s_add_i32 s2, s2, -1
s_delay_alu instid0(SALU_CYCLE_1)
s_cmp_lg_u32 s2, 0
s_waitcnt vmcnt(0)
v_add_f32_e32 v6, v6, v7
global_store_b32 v[4:5], v6, off
v_add_co_u32 v4, vcc_lo, v4, 4
v_add_co_ci_u32_e32 v5, vcc_lo, 0, v5, vcc_lo
s_cbranch_scc1 .LBB2_2
.LBB2_3:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z11kernel_1t1cPfS_S_i
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 288
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 8
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end2:
.size _Z11kernel_1t1cPfS_S_i, .Lfunc_end2-_Z11kernel_1t1cPfS_S_i
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
- .offset: 24
.size: 4
.value_kind: by_value
- .offset: 32
.size: 4
.value_kind: hidden_block_count_x
- .offset: 36
.size: 4
.value_kind: hidden_block_count_y
- .offset: 40
.size: 4
.value_kind: hidden_block_count_z
- .offset: 44
.size: 2
.value_kind: hidden_group_size_x
- .offset: 46
.size: 2
.value_kind: hidden_group_size_y
- .offset: 48
.size: 2
.value_kind: hidden_group_size_z
- .offset: 50
.size: 2
.value_kind: hidden_remainder_x
- .offset: 52
.size: 2
.value_kind: hidden_remainder_y
- .offset: 54
.size: 2
.value_kind: hidden_remainder_z
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 88
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 96
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 288
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z11kernel_1t1ePfS_S_i
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z11kernel_1t1ePfS_S_i.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 6
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
- .offset: 24
.size: 4
.value_kind: by_value
- .offset: 32
.size: 4
.value_kind: hidden_block_count_x
- .offset: 36
.size: 4
.value_kind: hidden_block_count_y
- .offset: 40
.size: 4
.value_kind: hidden_block_count_z
- .offset: 44
.size: 2
.value_kind: hidden_group_size_x
- .offset: 46
.size: 2
.value_kind: hidden_group_size_y
- .offset: 48
.size: 2
.value_kind: hidden_group_size_z
- .offset: 50
.size: 2
.value_kind: hidden_remainder_x
- .offset: 52
.size: 2
.value_kind: hidden_remainder_y
- .offset: 54
.size: 2
.value_kind: hidden_remainder_z
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 88
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 96
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 288
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z11kernel_1t1rPfS_S_i
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z11kernel_1t1rPfS_S_i.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 8
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
- .offset: 24
.size: 4
.value_kind: by_value
- .offset: 32
.size: 4
.value_kind: hidden_block_count_x
- .offset: 36
.size: 4
.value_kind: hidden_block_count_y
- .offset: 40
.size: 4
.value_kind: hidden_block_count_z
- .offset: 44
.size: 2
.value_kind: hidden_group_size_x
- .offset: 46
.size: 2
.value_kind: hidden_group_size_y
- .offset: 48
.size: 2
.value_kind: hidden_group_size_z
- .offset: 50
.size: 2
.value_kind: hidden_remainder_x
- .offset: 52
.size: 2
.value_kind: hidden_remainder_y
- .offset: 54
.size: 2
.value_kind: hidden_remainder_z
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 88
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 96
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 288
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z11kernel_1t1cPfS_S_i
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z11kernel_1t1cPfS_S_i.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 8
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | #include <stdio.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <time.h>
void printDevProp(hipDeviceProp_t devProp) {
printf("%s\n", devProp.name);
printf("Major revision number: %d\n", devProp.major);
printf("Minor revision number: %d\n", devProp.minor);
printf("Total global memory: %u bytes\n", devProp.totalGlobalMem);
printf("Number of multiprocessors: %d\n", devProp.multiProcessorCount);
printf("Total shared memory per block: %u\n",devProp.sharedMemPerBlock);
printf("Total registers per block: %d\n", devProp.regsPerBlock);
printf("Warp size: %d\n", devProp.warpSize);
printf("Maximum memory pitch: %u\n", devProp.memPitch);
printf("Total constant memory: %u\n", devProp.totalConstMem);
printf("Maximum threads per block: %d\n", devProp.maxThreadsPerBlock);
printf("Maximum threads per dimension: %d,%d,%d\n", devProp.maxThreadsDim[0], devProp.maxThreadsDim[1], devProp.maxThreadsDim[2]);
return;
}
void printMatrix(float *A, int dim) {
printf("[\n");
for (int i=0; i<dim; i++) {
printf(" [");
for (int j=0; j<dim; j++) {
printf("%.2f, ", A[i*dim + j]);
}
printf("]\n");
}
printf("]\n");
}
void populateMatrix(float *A, int dim) {
// Generate the values
for (int i=0; i<dim; i++) {
for (int j=0; j<dim; j++) {
A[i*dim + j] = (float) rand() / (float) (RAND_MAX / 100);
}
}
}
__global__
void kernel_1t1e(float *d_A, float *d_B, float *d_C, int size) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < size) {
d_A[idx] = d_B[idx] + d_C[idx];
}
}
__global__
void kernel_1t1r(float *d_A, float *d_B, float *d_C, int rows) {
int i = 0;
int j = blockIdx.x * blockDim.x + threadIdx.x;
if (j < rows) {
for(i = 0;i<rows;i++){
d_A[i*rows + j] = d_B[i*rows + j] + d_C[i*rows + j];
}
}
}
__global__
void kernel_1t1c(float *d_A, float *d_B, float *d_C, int rows) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = 0;
if (i < rows) {
for(j = 0;j<rows;j++){
d_A[i*rows + j] = d_B[i*rows + j] + d_C[i*rows + j];
}
}
}
double hostFunction(float *A, float *B, float *C, int rows, int blockSize, int kernel_choice) {
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
// Allocate device memory
float *d_A, *d_B, *d_C;
hipMalloc(&d_A, rows*rows*sizeof(float));
hipMalloc(&d_B, rows*rows*sizeof(float));
hipMalloc(&d_C, rows*rows*sizeof(float));
// Copy values to device memory
hipMemcpy(d_B, B, rows*rows*sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(d_C, C, rows*rows*sizeof(float), hipMemcpyHostToDevice);
// Call kernel function
int size = rows*rows;
int numBlocks = (int) (rows/blockSize) + 1;
dim3 threadsPerBlock(blockSize,1);
hipEventRecord(start);
if (kernel_choice == 0) {
int numBlocks = (int) (size/blockSize) + 1;
kernel_1t1e<<<numBlocks, threadsPerBlock>>>(d_A, d_B, d_C, size);
} else if (kernel_choice == 1) {
kernel_1t1r<<<numBlocks, threadsPerBlock>>>(d_A, d_B, d_C, rows);
} else if (kernel_choice == 2) {
kernel_1t1c<<<numBlocks, threadsPerBlock>>>(d_A, d_B, d_C, rows);
}
hipEventRecord(stop);
hipDeviceSynchronize();
hipEventSynchronize(stop);
float milliseconds = 0;
hipEventElapsedTime(&milliseconds, start, stop);
// Get return value
hipMemcpy(A, d_A, rows*rows*sizeof(float), hipMemcpyDeviceToHost);
// Free memory
hipFree(d_A);
hipFree(d_B);
hipFree(d_C);
return milliseconds;
}
int main() {
// Device Query first
int deviceCount;
int blockSize = 1024;
hipGetDeviceCount(&deviceCount);
int device;
for (device = 0; device < deviceCount; ++device) {
hipDeviceProp_t deviceProp;
hipGetDeviceProperties(&deviceProp, device);
printDevProp(deviceProp);
blockSize = deviceProp.maxThreadsPerBlock;
}
// In my (Francis) local machine there is only one CUDA machine, so I'll hardcode that one here
// Allocate memory
const int rows = 64;
const int cols = rows;
float *A, *B, *C;
A = (float*) malloc(sizeof(float) * rows * cols);
B = (float*) malloc(sizeof(float) * rows * cols);
C = (float*) malloc(sizeof(float) * rows * cols);
// Call the host function
// Benchmarking
int kernel = 0;
int runs = 10;
double time_spent = 0.0;
double ave_time = 0.0;
printf("\n");
while (kernel < 3) {
printf("#%d:\t", kernel);
for (int run=0; run<runs; run++) {
populateMatrix(B, rows);
populateMatrix(C, rows);
time_spent = hostFunction(A, B, C, rows, blockSize, kernel);
ave_time += time_spent;
printf("%.4f\t", time_spent);
}
ave_time /= runs;
printf("Ave: %.4f\n", ave_time);
kernel++;
}
// Free memory
free(A);
free(B);
free(C);
printf("\nDone!\n");
} | .text
.file "cs171-me1.hip"
.globl _Z12printDevProp20hipDeviceProp_tR0600 # -- Begin function _Z12printDevProp20hipDeviceProp_tR0600
.p2align 4, 0x90
.type _Z12printDevProp20hipDeviceProp_tR0600,@function
_Z12printDevProp20hipDeviceProp_tR0600: # @_Z12printDevProp20hipDeviceProp_tR0600
.cfi_startproc
# %bb.0:
pushq %rax
.cfi_def_cfa_offset 16
leaq 16(%rsp), %rdi
callq puts@PLT
movl 376(%rsp), %esi
movl $.L.str.1, %edi
xorl %eax, %eax
callq printf
movl 380(%rsp), %esi
movl $.L.str.2, %edi
xorl %eax, %eax
callq printf
movq 304(%rsp), %rsi
movl $.L.str.3, %edi
xorl %eax, %eax
callq printf
movl 404(%rsp), %esi
movl $.L.str.4, %edi
xorl %eax, %eax
callq printf
movq 312(%rsp), %rsi
movl $.L.str.5, %edi
xorl %eax, %eax
callq printf
movl 320(%rsp), %esi
movl $.L.str.6, %edi
xorl %eax, %eax
callq printf
movl 324(%rsp), %esi
movl $.L.str.7, %edi
xorl %eax, %eax
callq printf
movq 328(%rsp), %rsi
movl $.L.str.8, %edi
xorl %eax, %eax
callq printf
movq 368(%rsp), %rsi
movl $.L.str.9, %edi
xorl %eax, %eax
callq printf
movl 336(%rsp), %esi
movl $.L.str.10, %edi
xorl %eax, %eax
callq printf
movl 340(%rsp), %esi
movl 344(%rsp), %edx
movl 348(%rsp), %ecx
movl $.L.str.11, %edi
xorl %eax, %eax
popq %r8
.cfi_def_cfa_offset 8
jmp printf # TAILCALL
.Lfunc_end0:
.size _Z12printDevProp20hipDeviceProp_tR0600, .Lfunc_end0-_Z12printDevProp20hipDeviceProp_tR0600
.cfi_endproc
# -- End function
.globl _Z11printMatrixPfi # -- Begin function _Z11printMatrixPfi
.p2align 4, 0x90
.type _Z11printMatrixPfi,@function
_Z11printMatrixPfi: # @_Z11printMatrixPfi
.cfi_startproc
# %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
pushq %r15
.cfi_def_cfa_offset 24
pushq %r14
.cfi_def_cfa_offset 32
pushq %r13
.cfi_def_cfa_offset 40
pushq %r12
.cfi_def_cfa_offset 48
pushq %rbx
.cfi_def_cfa_offset 56
pushq %rax
.cfi_def_cfa_offset 64
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
.cfi_offset %rbp, -16
movl %esi, %ebx
movq %rdi, (%rsp) # 8-byte Spill
movl $.Lstr, %edi
callq puts@PLT
testl %ebx, %ebx
jle .LBB1_5
# %bb.1: # %.lr.ph16
movl %ebx, %r15d
xorl %ebp, %ebp
xorl %r12d, %r12d
.p2align 4, 0x90
.LBB1_2: # %.lr.ph
# =>This Loop Header: Depth=1
# Child Loop BB1_3 Depth 2
movl %ebp, %eax
movq (%rsp), %rcx # 8-byte Reload
leaq (%rcx,%rax,4), %r13
movl $.L.str.13, %edi
xorl %eax, %eax
callq printf
xorl %r14d, %r14d
.p2align 4, 0x90
.LBB1_3: # Parent Loop BB1_2 Depth=1
# => This Inner Loop Header: Depth=2
movss (%r13,%r14,4), %xmm0 # xmm0 = mem[0],zero,zero,zero
cvtss2sd %xmm0, %xmm0
movl $.L.str.14, %edi
movb $1, %al
callq printf
incq %r14
cmpq %r14, %r15
jne .LBB1_3
# %bb.4: # %._crit_edge
# in Loop: Header=BB1_2 Depth=1
movl $.Lstr.2, %edi
callq puts@PLT
incq %r12
addl %ebx, %ebp
cmpq %r15, %r12
jne .LBB1_2
.LBB1_5: # %._crit_edge17
movl $.Lstr.2, %edi
addq $8, %rsp
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %r12
.cfi_def_cfa_offset 40
popq %r13
.cfi_def_cfa_offset 32
popq %r14
.cfi_def_cfa_offset 24
popq %r15
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
jmp puts@PLT # TAILCALL
.Lfunc_end1:
.size _Z11printMatrixPfi, .Lfunc_end1-_Z11printMatrixPfi
.cfi_endproc
# -- End function
.section .rodata.cst4,"aM",@progbits,4
.p2align 2, 0x0 # -- Begin function _Z14populateMatrixPfi
.LCPI2_0:
.long 0x4ba3d70a # float 21474836
.text
.globl _Z14populateMatrixPfi
.p2align 4, 0x90
.type _Z14populateMatrixPfi,@function
_Z14populateMatrixPfi: # @_Z14populateMatrixPfi
.cfi_startproc
# %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
pushq %r15
.cfi_def_cfa_offset 24
pushq %r14
.cfi_def_cfa_offset 32
pushq %r13
.cfi_def_cfa_offset 40
pushq %r12
.cfi_def_cfa_offset 48
pushq %rbx
.cfi_def_cfa_offset 56
pushq %rax
.cfi_def_cfa_offset 64
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
.cfi_offset %rbp, -16
movq %rdi, (%rsp) # 8-byte Spill
testl %esi, %esi
jle .LBB2_5
# %bb.1: # %.preheader.lr.ph
movl %esi, %ebx
movl %esi, %r15d
xorl %r12d, %r12d
xorl %r13d, %r13d
.p2align 4, 0x90
.LBB2_2: # %.preheader
# =>This Loop Header: Depth=1
# Child Loop BB2_3 Depth 2
movl %r12d, %eax
movq (%rsp), %rcx # 8-byte Reload
leaq (%rcx,%rax,4), %rbp
xorl %r14d, %r14d
.p2align 4, 0x90
.LBB2_3: # Parent Loop BB2_2 Depth=1
# => This Inner Loop Header: Depth=2
callq rand
movss .LCPI2_0(%rip), %xmm1 # xmm1 = mem[0],zero,zero,zero
xorps %xmm0, %xmm0
cvtsi2ss %eax, %xmm0
divss %xmm1, %xmm0
movss %xmm0, (%rbp,%r14,4)
incq %r14
cmpq %r14, %r15
jne .LBB2_3
# %bb.4: # %._crit_edge
# in Loop: Header=BB2_2 Depth=1
incq %r13
addl %ebx, %r12d
cmpq %r15, %r13
jne .LBB2_2
.LBB2_5: # %._crit_edge13
addq $8, %rsp
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %r12
.cfi_def_cfa_offset 40
popq %r13
.cfi_def_cfa_offset 32
popq %r14
.cfi_def_cfa_offset 24
popq %r15
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
retq
.Lfunc_end2:
.size _Z14populateMatrixPfi, .Lfunc_end2-_Z14populateMatrixPfi
.cfi_endproc
# -- End function
.globl _Z26__device_stub__kernel_1t1ePfS_S_i # -- Begin function _Z26__device_stub__kernel_1t1ePfS_S_i
.p2align 4, 0x90
.type _Z26__device_stub__kernel_1t1ePfS_S_i,@function
_Z26__device_stub__kernel_1t1ePfS_S_i: # @_Z26__device_stub__kernel_1t1ePfS_S_i
.cfi_startproc
# %bb.0:
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movq %rdx, 56(%rsp)
movl %ecx, 4(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 56(%rsp), %rax
movq %rax, 96(%rsp)
leaq 4(%rsp), %rax
movq %rax, 104(%rsp)
leaq 40(%rsp), %rdi
leaq 24(%rsp), %rsi
leaq 16(%rsp), %rdx
leaq 8(%rsp), %rcx
callq __hipPopCallConfiguration
movq 40(%rsp), %rsi
movl 48(%rsp), %edx
movq 24(%rsp), %rcx
movl 32(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z11kernel_1t1ePfS_S_i, %edi
pushq 8(%rsp)
.cfi_adjust_cfa_offset 8
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $136, %rsp
.cfi_adjust_cfa_offset -136
retq
.Lfunc_end3:
.size _Z26__device_stub__kernel_1t1ePfS_S_i, .Lfunc_end3-_Z26__device_stub__kernel_1t1ePfS_S_i
.cfi_endproc
# -- End function
.globl _Z26__device_stub__kernel_1t1rPfS_S_i # -- Begin function _Z26__device_stub__kernel_1t1rPfS_S_i
.p2align 4, 0x90
.type _Z26__device_stub__kernel_1t1rPfS_S_i,@function
_Z26__device_stub__kernel_1t1rPfS_S_i: # @_Z26__device_stub__kernel_1t1rPfS_S_i
.cfi_startproc
# %bb.0:
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movq %rdx, 56(%rsp)
movl %ecx, 4(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 56(%rsp), %rax
movq %rax, 96(%rsp)
leaq 4(%rsp), %rax
movq %rax, 104(%rsp)
leaq 40(%rsp), %rdi
leaq 24(%rsp), %rsi
leaq 16(%rsp), %rdx
leaq 8(%rsp), %rcx
callq __hipPopCallConfiguration
movq 40(%rsp), %rsi
movl 48(%rsp), %edx
movq 24(%rsp), %rcx
movl 32(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z11kernel_1t1rPfS_S_i, %edi
pushq 8(%rsp)
.cfi_adjust_cfa_offset 8
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $136, %rsp
.cfi_adjust_cfa_offset -136
retq
.Lfunc_end4:
.size _Z26__device_stub__kernel_1t1rPfS_S_i, .Lfunc_end4-_Z26__device_stub__kernel_1t1rPfS_S_i
.cfi_endproc
# -- End function
.globl _Z26__device_stub__kernel_1t1cPfS_S_i # -- Begin function _Z26__device_stub__kernel_1t1cPfS_S_i
.p2align 4, 0x90
.type _Z26__device_stub__kernel_1t1cPfS_S_i,@function
_Z26__device_stub__kernel_1t1cPfS_S_i: # @_Z26__device_stub__kernel_1t1cPfS_S_i
.cfi_startproc
# %bb.0:
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movq %rdx, 56(%rsp)
movl %ecx, 4(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 56(%rsp), %rax
movq %rax, 96(%rsp)
leaq 4(%rsp), %rax
movq %rax, 104(%rsp)
leaq 40(%rsp), %rdi
leaq 24(%rsp), %rsi
leaq 16(%rsp), %rdx
leaq 8(%rsp), %rcx
callq __hipPopCallConfiguration
movq 40(%rsp), %rsi
movl 48(%rsp), %edx
movq 24(%rsp), %rcx
movl 32(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z11kernel_1t1cPfS_S_i, %edi
pushq 8(%rsp)
.cfi_adjust_cfa_offset 8
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $136, %rsp
.cfi_adjust_cfa_offset -136
retq
.Lfunc_end5:
.size _Z26__device_stub__kernel_1t1cPfS_S_i, .Lfunc_end5-_Z26__device_stub__kernel_1t1cPfS_S_i
.cfi_endproc
# -- End function
.globl _Z12hostFunctionPfS_S_iii # -- Begin function _Z12hostFunctionPfS_S_iii
.p2align 4, 0x90
.type _Z12hostFunctionPfS_S_iii,@function
_Z12hostFunctionPfS_S_iii: # @_Z12hostFunctionPfS_S_iii
.cfi_startproc
# %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
pushq %r15
.cfi_def_cfa_offset 24
pushq %r14
.cfi_def_cfa_offset 32
pushq %r13
.cfi_def_cfa_offset 40
pushq %r12
.cfi_def_cfa_offset 48
pushq %rbx
.cfi_def_cfa_offset 56
subq $184, %rsp
.cfi_def_cfa_offset 240
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
.cfi_offset %rbp, -16
movl %r9d, %r13d
movl %r8d, %r12d
movl %ecx, %r14d
movq %rdx, 152(%rsp) # 8-byte Spill
movq %rsi, %rbp
movq %rdi, 176(%rsp) # 8-byte Spill
leaq 168(%rsp), %rdi
callq hipEventCreate
leaq 160(%rsp), %rdi
callq hipEventCreate
movl %r14d, %r15d
imull %r15d, %r15d
leaq (,%r15,4), %rbx
leaq 24(%rsp), %rdi
movq %rbx, %rsi
callq hipMalloc
leaq 16(%rsp), %rdi
movq %rbx, %rsi
callq hipMalloc
leaq 8(%rsp), %rdi
movq %rbx, %rsi
callq hipMalloc
movq 16(%rsp), %rdi
movq %rbp, %rsi
movq %rbx, %rdx
movl $1, %ecx
callq hipMemcpy
movq 8(%rsp), %rdi
movq 152(%rsp), %rsi # 8-byte Reload
movq %rbx, 152(%rsp) # 8-byte Spill
movq %rbx, %rdx
movl $1, %ecx
callq hipMemcpy
movl %r14d, 108(%rsp) # 4-byte Spill
movl %r14d, %eax
movabsq $4294967296, %r14 # imm = 0x100000000
cltd
idivl %r12d
movl %eax, %ebp
incl %ebp
movl %r12d, %ebx
orq %r14, %rbx
movq 168(%rsp), %rdi
xorl %esi, %esi
callq hipEventRecord
cmpl $2, %r13d
je .LBB6_7
# %bb.1:
cmpl $1, %r13d
je .LBB6_5
# %bb.2:
testl %r13d, %r13d
jne .LBB6_10
# %bb.3:
movl %r15d, %eax
xorl %edx, %edx
idivl %r12d
# kill: def $eax killed $eax def $rax
leal 1(%rax), %edi
orq %r14, %rdi
movl $1, %esi
movq %rbx, %rdx
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB6_10
# %bb.4:
movq 24(%rsp), %rax
movq 16(%rsp), %rcx
movq 8(%rsp), %rdx
movq %rax, 96(%rsp)
movq %rcx, 88(%rsp)
movq %rdx, 80(%rsp)
movl %r15d, 4(%rsp)
leaq 96(%rsp), %rax
movq %rax, 112(%rsp)
leaq 88(%rsp), %rax
movq %rax, 120(%rsp)
leaq 80(%rsp), %rax
movq %rax, 128(%rsp)
leaq 4(%rsp), %rax
movq %rax, 136(%rsp)
leaq 64(%rsp), %rdi
leaq 48(%rsp), %rsi
leaq 40(%rsp), %rdx
leaq 32(%rsp), %rcx
callq __hipPopCallConfiguration
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
movq 48(%rsp), %rcx
movl 56(%rsp), %r8d
leaq 112(%rsp), %r9
movl $_Z11kernel_1t1ePfS_S_i, %edi
jmp .LBB6_9
.LBB6_5:
movl %ebp, %edi
orq %r14, %rdi
movl $1, %esi
movq %rbx, %rdx
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB6_10
# %bb.6:
movq 24(%rsp), %rax
movq 16(%rsp), %rcx
movq 8(%rsp), %rdx
movq %rax, 96(%rsp)
movq %rcx, 88(%rsp)
movq %rdx, 80(%rsp)
movl 108(%rsp), %eax # 4-byte Reload
movl %eax, 4(%rsp)
leaq 96(%rsp), %rax
movq %rax, 112(%rsp)
leaq 88(%rsp), %rax
movq %rax, 120(%rsp)
leaq 80(%rsp), %rax
movq %rax, 128(%rsp)
leaq 4(%rsp), %rax
movq %rax, 136(%rsp)
leaq 64(%rsp), %rdi
leaq 48(%rsp), %rsi
leaq 40(%rsp), %rdx
leaq 32(%rsp), %rcx
callq __hipPopCallConfiguration
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
movq 48(%rsp), %rcx
movl 56(%rsp), %r8d
leaq 112(%rsp), %r9
movl $_Z11kernel_1t1rPfS_S_i, %edi
jmp .LBB6_9
.LBB6_7:
movl %ebp, %edi
orq %r14, %rdi
movl $1, %esi
movq %rbx, %rdx
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB6_10
# %bb.8:
movq 24(%rsp), %rax
movq 16(%rsp), %rcx
movq 8(%rsp), %rdx
movq %rax, 96(%rsp)
movq %rcx, 88(%rsp)
movq %rdx, 80(%rsp)
movl 108(%rsp), %eax # 4-byte Reload
movl %eax, 4(%rsp)
leaq 96(%rsp), %rax
movq %rax, 112(%rsp)
leaq 88(%rsp), %rax
movq %rax, 120(%rsp)
leaq 80(%rsp), %rax
movq %rax, 128(%rsp)
leaq 4(%rsp), %rax
movq %rax, 136(%rsp)
leaq 64(%rsp), %rdi
leaq 48(%rsp), %rsi
leaq 40(%rsp), %rdx
leaq 32(%rsp), %rcx
callq __hipPopCallConfiguration
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
movq 48(%rsp), %rcx
movl 56(%rsp), %r8d
leaq 112(%rsp), %r9
movl $_Z11kernel_1t1cPfS_S_i, %edi
.LBB6_9:
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
pushq 48(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB6_10:
movq 160(%rsp), %rdi
xorl %esi, %esi
callq hipEventRecord
callq hipDeviceSynchronize
movq 160(%rsp), %rdi
callq hipEventSynchronize
movl $0, 112(%rsp)
movq 168(%rsp), %rsi
movq 160(%rsp), %rdx
leaq 112(%rsp), %rdi
callq hipEventElapsedTime
movq 24(%rsp), %rsi
movq 176(%rsp), %rdi # 8-byte Reload
movq 152(%rsp), %rdx # 8-byte Reload
movl $2, %ecx
callq hipMemcpy
movq 24(%rsp), %rdi
callq hipFree
movq 16(%rsp), %rdi
callq hipFree
movq 8(%rsp), %rdi
callq hipFree
movss 112(%rsp), %xmm0 # xmm0 = mem[0],zero,zero,zero
cvtss2sd %xmm0, %xmm0
addq $184, %rsp
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %r12
.cfi_def_cfa_offset 40
popq %r13
.cfi_def_cfa_offset 32
popq %r14
.cfi_def_cfa_offset 24
popq %r15
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
retq
.Lfunc_end6:
.size _Z12hostFunctionPfS_S_iii, .Lfunc_end6-_Z12hostFunctionPfS_S_iii
.cfi_endproc
# -- End function
.section .rodata.cst4,"aM",@progbits,4
.p2align 2, 0x0 # -- Begin function main
.LCPI7_0:
.long 0x4ba3d70a # float 21474836
.section .rodata.cst8,"aM",@progbits,8
.p2align 3, 0x0
.LCPI7_1:
.quad 0x4024000000000000 # double 10
.text
.globl main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
pushq %r15
.cfi_def_cfa_offset 24
pushq %r14
.cfi_def_cfa_offset 32
pushq %r13
.cfi_def_cfa_offset 40
pushq %r12
.cfi_def_cfa_offset 48
pushq %rbx
.cfi_def_cfa_offset 56
subq $2984, %rsp # imm = 0xBA8
.cfi_def_cfa_offset 3040
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
.cfi_offset %rbp, -16
leaq 1484(%rsp), %rdi
callq hipGetDeviceCount
cmpl $0, 1484(%rsp)
jle .LBB7_1
# %bb.2: # %.lr.ph
xorl %ebx, %ebx
leaq 1512(%rsp), %r14
.p2align 4, 0x90
.LBB7_3: # =>This Inner Loop Header: Depth=1
movq %r14, %rdi
movl %ebx, %esi
callq hipGetDevicePropertiesR0600
movl $184, %ecx
movq %rsp, %rdi
movq %r14, %rsi
rep
callq _Z12printDevProp20hipDeviceProp_tR0600
incl %ebx
cmpl 1484(%rsp), %ebx
jl .LBB7_3
# %bb.4:
movl 1832(%rsp), %eax
movl %eax, 1480(%rsp) # 4-byte Spill
jmp .LBB7_5
.LBB7_1:
movl $1024, 1480(%rsp) # 4-byte Folded Spill
# imm = 0x400
.LBB7_5: # %._crit_edge
movl $16384, %edi # imm = 0x4000
callq malloc
movq %rax, 1504(%rsp) # 8-byte Spill
movl $16384, %edi # imm = 0x4000
callq malloc
movq %rax, %r15
movl $16384, %edi # imm = 0x4000
callq malloc
movq %rax, 1496(%rsp) # 8-byte Spill
movl $10, %edi
callq putchar@PLT
xorpd %xmm0, %xmm0
movsd %xmm0, 1488(%rsp) # 8-byte Spill
xorl %ebx, %ebx
.p2align 4, 0x90
.LBB7_6: # =>This Loop Header: Depth=1
# Child Loop BB7_7 Depth 2
# Child Loop BB7_8 Depth 3
# Child Loop BB7_9 Depth 4
# Child Loop BB7_12 Depth 3
# Child Loop BB7_13 Depth 4
movl $.L.str.17, %edi
movl %ebx, %esi
xorl %eax, %eax
callq printf
xorl %r13d, %r13d
.p2align 4, 0x90
.LBB7_7: # %.preheader.i.preheader
# Parent Loop BB7_6 Depth=1
# => This Loop Header: Depth=2
# Child Loop BB7_8 Depth 3
# Child Loop BB7_9 Depth 4
# Child Loop BB7_12 Depth 3
# Child Loop BB7_13 Depth 4
movq %r15, %r12
xorl %r14d, %r14d
.p2align 4, 0x90
.LBB7_8: # %.preheader.i
# Parent Loop BB7_6 Depth=1
# Parent Loop BB7_7 Depth=2
# => This Loop Header: Depth=3
# Child Loop BB7_9 Depth 4
xorl %ebp, %ebp
.p2align 4, 0x90
.LBB7_9: # Parent Loop BB7_6 Depth=1
# Parent Loop BB7_7 Depth=2
# Parent Loop BB7_8 Depth=3
# => This Inner Loop Header: Depth=4
callq rand
xorps %xmm0, %xmm0
cvtsi2ss %eax, %xmm0
divss .LCPI7_0(%rip), %xmm0
movss %xmm0, (%r15,%rbp,4)
incq %rbp
cmpq $64, %rbp
jne .LBB7_9
# %bb.10: # %._crit_edge.i
# in Loop: Header=BB7_8 Depth=3
incq %r14
addq $256, %r15 # imm = 0x100
cmpq $64, %r14
jne .LBB7_8
# %bb.11: # %.preheader.i29.preheader
# in Loop: Header=BB7_7 Depth=2
movq 1496(%rsp), %r15 # 8-byte Reload
xorl %r14d, %r14d
.p2align 4, 0x90
.LBB7_12: # %.preheader.i29
# Parent Loop BB7_6 Depth=1
# Parent Loop BB7_7 Depth=2
# => This Loop Header: Depth=3
# Child Loop BB7_13 Depth 4
xorl %ebp, %ebp
.p2align 4, 0x90
.LBB7_13: # Parent Loop BB7_6 Depth=1
# Parent Loop BB7_7 Depth=2
# Parent Loop BB7_12 Depth=3
# => This Inner Loop Header: Depth=4
callq rand
xorps %xmm0, %xmm0
cvtsi2ss %eax, %xmm0
divss .LCPI7_0(%rip), %xmm0
movss %xmm0, (%r15,%rbp,4)
incq %rbp
cmpq $64, %rbp
jne .LBB7_13
# %bb.14: # %._crit_edge.i34
# in Loop: Header=BB7_12 Depth=3
incq %r14
addq $256, %r15 # imm = 0x100
cmpq $64, %r14
jne .LBB7_12
# %bb.15: # %_Z14populateMatrixPfi.exit37
# in Loop: Header=BB7_7 Depth=2
movq 1504(%rsp), %rdi # 8-byte Reload
movq %r12, %r15
movq %r12, %rsi
movq 1496(%rsp), %rdx # 8-byte Reload
movl $64, %ecx
movl 1480(%rsp), %r8d # 4-byte Reload
movl %ebx, %r9d
callq _Z12hostFunctionPfS_S_iii
movsd 1488(%rsp), %xmm1 # 8-byte Reload
# xmm1 = mem[0],zero
addsd %xmm0, %xmm1
movsd %xmm1, 1488(%rsp) # 8-byte Spill
movl $.L.str.18, %edi
movb $1, %al
callq printf
incl %r13d
cmpl $10, %r13d
jne .LBB7_7
# %bb.16: # in Loop: Header=BB7_6 Depth=1
movsd 1488(%rsp), %xmm0 # 8-byte Reload
# xmm0 = mem[0],zero
divsd .LCPI7_1(%rip), %xmm0
movl $.L.str.19, %edi
movsd %xmm0, 1488(%rsp) # 8-byte Spill
movb $1, %al
callq printf
incl %ebx
cmpl $3, %ebx
jne .LBB7_6
# %bb.17:
movq 1504(%rsp), %rdi # 8-byte Reload
callq free
movq %r15, %rdi
callq free
movq 1496(%rsp), %rdi # 8-byte Reload
callq free
movl $.Lstr.3, %edi
callq puts@PLT
xorl %eax, %eax
addq $2984, %rsp # imm = 0xBA8
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %r12
.cfi_def_cfa_offset 40
popq %r13
.cfi_def_cfa_offset 32
popq %r14
.cfi_def_cfa_offset 24
popq %r15
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
retq
.Lfunc_end7:
.size main, .Lfunc_end7-main
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
pushq %rbx
.cfi_def_cfa_offset 16
subq $32, %rsp
.cfi_def_cfa_offset 48
.cfi_offset %rbx, -16
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB8_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB8_2:
movq __hip_gpubin_handle(%rip), %rbx
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z11kernel_1t1ePfS_S_i, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z11kernel_1t1rPfS_S_i, %esi
movl $.L__unnamed_2, %edx
movl $.L__unnamed_2, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z11kernel_1t1cPfS_S_i, %esi
movl $.L__unnamed_3, %edx
movl $.L__unnamed_3, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $32, %rsp
.cfi_def_cfa_offset 16
popq %rbx
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end8:
.size __hip_module_ctor, .Lfunc_end8-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB9_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB9_2:
retq
.Lfunc_end9:
.size __hip_module_dtor, .Lfunc_end9-__hip_module_dtor
.cfi_endproc
# -- End function
.type .L.str.1,@object # @.str.1
.section .rodata.str1.1,"aMS",@progbits,1
.L.str.1:
.asciz "Major revision number: %d\n"
.size .L.str.1, 35
.type .L.str.2,@object # @.str.2
.L.str.2:
.asciz "Minor revision number: %d\n"
.size .L.str.2, 35
.type .L.str.3,@object # @.str.3
.L.str.3:
.asciz "Total global memory: %u bytes\n"
.size .L.str.3, 41
.type .L.str.4,@object # @.str.4
.L.str.4:
.asciz "Number of multiprocessors: %d\n"
.size .L.str.4, 35
.type .L.str.5,@object # @.str.5
.L.str.5:
.asciz "Total shared memory per block: %u\n"
.size .L.str.5, 35
.type .L.str.6,@object # @.str.6
.L.str.6:
.asciz "Total registers per block: %d\n"
.size .L.str.6, 35
.type .L.str.7,@object # @.str.7
.L.str.7:
.asciz "Warp size: %d\n"
.size .L.str.7, 35
.type .L.str.8,@object # @.str.8
.L.str.8:
.asciz "Maximum memory pitch: %u\n"
.size .L.str.8, 35
.type .L.str.9,@object # @.str.9
.L.str.9:
.asciz "Total constant memory: %u\n"
.size .L.str.9, 35
.type .L.str.10,@object # @.str.10
.L.str.10:
.asciz "Maximum threads per block: %d\n"
.size .L.str.10, 35
.type .L.str.11,@object # @.str.11
.L.str.11:
.asciz "Maximum threads per dimension: %d,%d,%d\n"
.size .L.str.11, 41
.type .L.str.13,@object # @.str.13
.L.str.13:
.asciz " ["
.size .L.str.13, 4
.type .L.str.14,@object # @.str.14
.L.str.14:
.asciz "%.2f, "
.size .L.str.14, 7
.type _Z11kernel_1t1ePfS_S_i,@object # @_Z11kernel_1t1ePfS_S_i
.section .rodata,"a",@progbits
.globl _Z11kernel_1t1ePfS_S_i
.p2align 3, 0x0
_Z11kernel_1t1ePfS_S_i:
.quad _Z26__device_stub__kernel_1t1ePfS_S_i
.size _Z11kernel_1t1ePfS_S_i, 8
.type _Z11kernel_1t1rPfS_S_i,@object # @_Z11kernel_1t1rPfS_S_i
.globl _Z11kernel_1t1rPfS_S_i
.p2align 3, 0x0
_Z11kernel_1t1rPfS_S_i:
.quad _Z26__device_stub__kernel_1t1rPfS_S_i
.size _Z11kernel_1t1rPfS_S_i, 8
.type _Z11kernel_1t1cPfS_S_i,@object # @_Z11kernel_1t1cPfS_S_i
.globl _Z11kernel_1t1cPfS_S_i
.p2align 3, 0x0
_Z11kernel_1t1cPfS_S_i:
.quad _Z26__device_stub__kernel_1t1cPfS_S_i
.size _Z11kernel_1t1cPfS_S_i, 8
.type .L.str.17,@object # @.str.17
.section .rodata.str1.1,"aMS",@progbits,1
.L.str.17:
.asciz "#%d:\t"
.size .L.str.17, 6
.type .L.str.18,@object # @.str.18
.L.str.18:
.asciz "%.4f\t"
.size .L.str.18, 6
.type .L.str.19,@object # @.str.19
.L.str.19:
.asciz "Ave: %.4f\n"
.size .L.str.19, 11
.type .L__unnamed_1,@object # @0
.L__unnamed_1:
.asciz "_Z11kernel_1t1ePfS_S_i"
.size .L__unnamed_1, 23
.type .L__unnamed_2,@object # @1
.L__unnamed_2:
.asciz "_Z11kernel_1t1rPfS_S_i"
.size .L__unnamed_2, 23
.type .L__unnamed_3,@object # @2
.L__unnamed_3:
.asciz "_Z11kernel_1t1cPfS_S_i"
.size .L__unnamed_3, 23
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.type .Lstr,@object # @str
.section .rodata.str1.1,"aMS",@progbits,1
.Lstr:
.asciz "["
.size .Lstr, 2
.type .Lstr.2,@object # @str.2
.Lstr.2:
.asciz "]"
.size .Lstr.2, 2
.type .Lstr.3,@object # @str.3
.Lstr.3:
.asciz "\nDone!"
.size .Lstr.3, 7
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z26__device_stub__kernel_1t1ePfS_S_i
.addrsig_sym _Z26__device_stub__kernel_1t1rPfS_S_i
.addrsig_sym _Z26__device_stub__kernel_1t1cPfS_S_i
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z11kernel_1t1ePfS_S_i
.addrsig_sym _Z11kernel_1t1rPfS_S_i
.addrsig_sym _Z11kernel_1t1cPfS_S_i
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_00105364_00000000-6_cs171-me1.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2064:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2064:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "%s\n"
.section .rodata.str1.8,"aMS",@progbits,1
.align 8
.LC1:
.string "Major revision number: %d\n"
.align 8
.LC2:
.string "Minor revision number: %d\n"
.align 8
.LC3:
.string "Total global memory: %u bytes\n"
.align 8
.LC4:
.string "Number of multiprocessors: %d\n"
.align 8
.LC5:
.string "Total shared memory per block: %u\n"
.align 8
.LC6:
.string "Total registers per block: %d\n"
.align 8
.LC7:
.string "Warp size: %d\n"
.align 8
.LC8:
.string "Maximum memory pitch: %u\n"
.align 8
.LC9:
.string "Total constant memory: %u\n"
.align 8
.LC10:
.string "Maximum threads per block: %d\n"
.align 8
.LC11:
.string "Maximum threads per dimension: %d,%d,%d\n"
.text
.globl _Z12printDevProp14cudaDeviceProp
.type _Z12printDevProp14cudaDeviceProp, @function
_Z12printDevProp14cudaDeviceProp:
.LFB2057:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq 16(%rsp), %rdx
leaq .LC0(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl 376(%rsp), %edx
leaq .LC1(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl 380(%rsp), %edx
leaq .LC2(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movq 304(%rsp), %rdx
leaq .LC3(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl 404(%rsp), %edx
leaq .LC4(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movq 312(%rsp), %rdx
leaq .LC5(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl 320(%rsp), %edx
leaq .LC6(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl 324(%rsp), %edx
leaq .LC7(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movq 328(%rsp), %rdx
leaq .LC8(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movq 368(%rsp), %rdx
leaq .LC9(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl 336(%rsp), %edx
leaq .LC10(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl 348(%rsp), %r8d
movl 344(%rsp), %ecx
movl 340(%rsp), %edx
leaq .LC11(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2057:
.size _Z12printDevProp14cudaDeviceProp, .-_Z12printDevProp14cudaDeviceProp
.section .rodata.str1.1
.LC12:
.string "[\n"
.LC13:
.string " ["
.LC14:
.string "%.2f, "
.LC15:
.string "]\n"
.text
.globl _Z11printMatrixPfi
.type _Z11printMatrixPfi, @function
_Z11printMatrixPfi:
.LFB2058:
.cfi_startproc
endbr64
pushq %r15
.cfi_def_cfa_offset 16
.cfi_offset 15, -16
pushq %r14
.cfi_def_cfa_offset 24
.cfi_offset 14, -24
pushq %r13
.cfi_def_cfa_offset 32
.cfi_offset 13, -32
pushq %r12
.cfi_def_cfa_offset 40
.cfi_offset 12, -40
pushq %rbp
.cfi_def_cfa_offset 48
.cfi_offset 6, -48
pushq %rbx
.cfi_def_cfa_offset 56
.cfi_offset 3, -56
subq $24, %rsp
.cfi_def_cfa_offset 80
movq %rdi, %rbp
movl %esi, %ebx
movl %esi, 12(%rsp)
leaq .LC12(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
testl %ebx, %ebx
jle .L6
movslq %ebx, %r14
leaq 0(,%r14,4), %r15
addq %r15, %rbp
negq %r14
salq $2, %r14
movl $0, %r13d
leaq .LC14(%rip), %r12
.L8:
leaq .LC13(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
leaq 0(%rbp,%r14), %rbx
.L7:
pxor %xmm0, %xmm0
cvtss2sd (%rbx), %xmm0
movq %r12, %rsi
movl $2, %edi
movl $1, %eax
call __printf_chk@PLT
addq $4, %rbx
cmpq %rbp, %rbx
jne .L7
leaq .LC15(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
addl $1, %r13d
addq %r15, %rbp
cmpl %r13d, 12(%rsp)
jne .L8
.L6:
leaq .LC15(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
addq $24, %rsp
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %rbp
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r13
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2058:
.size _Z11printMatrixPfi, .-_Z11printMatrixPfi
.globl _Z14populateMatrixPfi
.type _Z14populateMatrixPfi, @function
_Z14populateMatrixPfi:
.LFB2059:
.cfi_startproc
endbr64
testl %esi, %esi
jle .L18
pushq %r15
.cfi_def_cfa_offset 16
.cfi_offset 15, -16
pushq %r14
.cfi_def_cfa_offset 24
.cfi_offset 14, -24
pushq %r13
.cfi_def_cfa_offset 32
.cfi_offset 13, -32
pushq %r12
.cfi_def_cfa_offset 40
.cfi_offset 12, -40
pushq %rbp
.cfi_def_cfa_offset 48
.cfi_offset 6, -48
pushq %rbx
.cfi_def_cfa_offset 56
.cfi_offset 3, -56
subq $8, %rsp
.cfi_def_cfa_offset 64
movl %esi, %r14d
movslq %esi, %r13
leaq 0(,%r13,4), %r15
leaq (%rdi,%r15), %rbp
negq %r13
salq $2, %r13
movl $0, %r12d
.L14:
leaq 0(%rbp,%r13), %rbx
.L15:
call rand@PLT
pxor %xmm0, %xmm0
cvtsi2ssl %eax, %xmm0
divss .LC16(%rip), %xmm0
movss %xmm0, (%rbx)
addq $4, %rbx
cmpq %rbp, %rbx
jne .L15
addl $1, %r12d
addq %r15, %rbp
cmpl %r12d, %r14d
jne .L14
addq $8, %rsp
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %rbp
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r13
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
ret
.L18:
.cfi_restore 3
.cfi_restore 6
.cfi_restore 12
.cfi_restore 13
.cfi_restore 14
.cfi_restore 15
ret
.cfi_endproc
.LFE2059:
.size _Z14populateMatrixPfi, .-_Z14populateMatrixPfi
.globl _Z36__device_stub__Z11kernel_1t1ePfS_S_iPfS_S_i
.type _Z36__device_stub__Z11kernel_1t1ePfS_S_iPfS_S_i, @function
_Z36__device_stub__Z11kernel_1t1ePfS_S_iPfS_S_i:
.LFB2086:
.cfi_startproc
endbr64
subq $152, %rsp
.cfi_def_cfa_offset 160
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movq %rdx, 8(%rsp)
movl %ecx, 4(%rsp)
movq %fs:40, %rax
movq %rax, 136(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 8(%rsp), %rax
movq %rax, 112(%rsp)
leaq 4(%rsp), %rax
movq %rax, 120(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L25
.L21:
movq 136(%rsp), %rax
subq %fs:40, %rax
jne .L26
addq $152, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L25:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 168
pushq 40(%rsp)
.cfi_def_cfa_offset 176
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z11kernel_1t1ePfS_S_i(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 160
jmp .L21
.L26:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2086:
.size _Z36__device_stub__Z11kernel_1t1ePfS_S_iPfS_S_i, .-_Z36__device_stub__Z11kernel_1t1ePfS_S_iPfS_S_i
.globl _Z11kernel_1t1ePfS_S_i
.type _Z11kernel_1t1ePfS_S_i, @function
_Z11kernel_1t1ePfS_S_i:
.LFB2087:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z36__device_stub__Z11kernel_1t1ePfS_S_iPfS_S_i
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2087:
.size _Z11kernel_1t1ePfS_S_i, .-_Z11kernel_1t1ePfS_S_i
.globl _Z36__device_stub__Z11kernel_1t1rPfS_S_iPfS_S_i
.type _Z36__device_stub__Z11kernel_1t1rPfS_S_iPfS_S_i, @function
_Z36__device_stub__Z11kernel_1t1rPfS_S_iPfS_S_i:
.LFB2088:
.cfi_startproc
endbr64
subq $152, %rsp
.cfi_def_cfa_offset 160
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movq %rdx, 8(%rsp)
movl %ecx, 4(%rsp)
movq %fs:40, %rax
movq %rax, 136(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 8(%rsp), %rax
movq %rax, 112(%rsp)
leaq 4(%rsp), %rax
movq %rax, 120(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L33
.L29:
movq 136(%rsp), %rax
subq %fs:40, %rax
jne .L34
addq $152, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L33:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 168
pushq 40(%rsp)
.cfi_def_cfa_offset 176
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z11kernel_1t1rPfS_S_i(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 160
jmp .L29
.L34:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2088:
.size _Z36__device_stub__Z11kernel_1t1rPfS_S_iPfS_S_i, .-_Z36__device_stub__Z11kernel_1t1rPfS_S_iPfS_S_i
.globl _Z11kernel_1t1rPfS_S_i
.type _Z11kernel_1t1rPfS_S_i, @function
_Z11kernel_1t1rPfS_S_i:
.LFB2089:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z36__device_stub__Z11kernel_1t1rPfS_S_iPfS_S_i
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2089:
.size _Z11kernel_1t1rPfS_S_i, .-_Z11kernel_1t1rPfS_S_i
.globl _Z36__device_stub__Z11kernel_1t1cPfS_S_iPfS_S_i
.type _Z36__device_stub__Z11kernel_1t1cPfS_S_iPfS_S_i, @function
_Z36__device_stub__Z11kernel_1t1cPfS_S_iPfS_S_i:
.LFB2090:
.cfi_startproc
endbr64
subq $152, %rsp
.cfi_def_cfa_offset 160
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movq %rdx, 8(%rsp)
movl %ecx, 4(%rsp)
movq %fs:40, %rax
movq %rax, 136(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 8(%rsp), %rax
movq %rax, 112(%rsp)
leaq 4(%rsp), %rax
movq %rax, 120(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L41
.L37:
movq 136(%rsp), %rax
subq %fs:40, %rax
jne .L42
addq $152, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L41:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 168
pushq 40(%rsp)
.cfi_def_cfa_offset 176
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z11kernel_1t1cPfS_S_i(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 160
jmp .L37
.L42:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2090:
.size _Z36__device_stub__Z11kernel_1t1cPfS_S_iPfS_S_i, .-_Z36__device_stub__Z11kernel_1t1cPfS_S_iPfS_S_i
.globl _Z11kernel_1t1cPfS_S_i
.type _Z11kernel_1t1cPfS_S_i, @function
_Z11kernel_1t1cPfS_S_i:
.LFB2091:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z36__device_stub__Z11kernel_1t1cPfS_S_iPfS_S_i
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2091:
.size _Z11kernel_1t1cPfS_S_i, .-_Z11kernel_1t1cPfS_S_i
.globl _Z12hostFunctionPfS_S_iii
.type _Z12hostFunctionPfS_S_iii, @function
_Z12hostFunctionPfS_S_iii:
.LFB2060:
.cfi_startproc
endbr64
pushq %r15
.cfi_def_cfa_offset 16
.cfi_offset 15, -16
pushq %r14
.cfi_def_cfa_offset 24
.cfi_offset 14, -24
pushq %r13
.cfi_def_cfa_offset 32
.cfi_offset 13, -32
pushq %r12
.cfi_def_cfa_offset 40
.cfi_offset 12, -40
pushq %rbp
.cfi_def_cfa_offset 48
.cfi_offset 6, -48
pushq %rbx
.cfi_def_cfa_offset 56
.cfi_offset 3, -56
subq $104, %rsp
.cfi_def_cfa_offset 160
movq %rdi, (%rsp)
movq %rsi, %r14
movq %rdx, 8(%rsp)
movl %ecx, %ebp
movl %r8d, %r12d
movl %r9d, %r13d
movq %fs:40, %rax
movq %rax, 88(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rdi
call cudaEventCreate@PLT
leaq 32(%rsp), %rdi
call cudaEventCreate@PLT
movl %ebp, %r15d
imull %ebp, %r15d
movslq %r15d, %rbx
salq $2, %rbx
leaq 40(%rsp), %rdi
movq %rbx, %rsi
call cudaMalloc@PLT
leaq 48(%rsp), %rdi
movq %rbx, %rsi
call cudaMalloc@PLT
leaq 56(%rsp), %rdi
movq %rbx, %rsi
call cudaMalloc@PLT
movl $1, %ecx
movq %rbx, %rdx
movq %r14, %rsi
movq 48(%rsp), %rdi
call cudaMemcpy@PLT
movl $1, %ecx
movq %rbx, %rdx
movq 8(%rsp), %rsi
movq 56(%rsp), %rdi
call cudaMemcpy@PLT
movl %r12d, 64(%rsp)
movl $1, 68(%rsp)
movl $1, 72(%rsp)
movl $0, %esi
movq 24(%rsp), %rdi
call cudaEventRecord@PLT
testl %r13d, %r13d
je .L54
movl %ebp, %eax
cltd
idivl %r12d
addl $1, %eax
cmpl $1, %r13d
je .L55
cmpl $2, %r13d
je .L56
.L48:
movl $0, %esi
movq 32(%rsp), %rdi
call cudaEventRecord@PLT
call cudaDeviceSynchronize@PLT
movq 32(%rsp), %rdi
call cudaEventSynchronize@PLT
movl $0x00000000, 76(%rsp)
leaq 76(%rsp), %rdi
movq 32(%rsp), %rdx
movq 24(%rsp), %rsi
call cudaEventElapsedTime@PLT
movl $2, %ecx
movq %rbx, %rdx
movq 40(%rsp), %rsi
movq (%rsp), %rdi
call cudaMemcpy@PLT
movq 40(%rsp), %rdi
call cudaFree@PLT
movq 48(%rsp), %rdi
call cudaFree@PLT
movq 56(%rsp), %rdi
call cudaFree@PLT
pxor %xmm0, %xmm0
cvtss2sd 76(%rsp), %xmm0
movq 88(%rsp), %rax
subq %fs:40, %rax
jne .L57
addq $104, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %rbp
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r13
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
ret
.L54:
.cfi_restore_state
movl %r15d, %eax
cltd
idivl %r12d
addl $1, %eax
movl %eax, 76(%rsp)
movl $1, 80(%rsp)
movl $1, 84(%rsp)
movl 72(%rsp), %ecx
movl $0, %r9d
movl $0, %r8d
movq 64(%rsp), %rdx
movq 76(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
jne .L48
movl %r15d, %ecx
movq 56(%rsp), %rdx
movq 48(%rsp), %rsi
movq 40(%rsp), %rdi
call _Z36__device_stub__Z11kernel_1t1ePfS_S_iPfS_S_i
jmp .L48
.L55:
movl %eax, 76(%rsp)
movl $1, 80(%rsp)
movl $1, 84(%rsp)
movl 72(%rsp), %ecx
movl $0, %r9d
movl $0, %r8d
movq 64(%rsp), %rdx
movq 76(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
jne .L48
movl %ebp, %ecx
movq 56(%rsp), %rdx
movq 48(%rsp), %rsi
movq 40(%rsp), %rdi
call _Z36__device_stub__Z11kernel_1t1rPfS_S_iPfS_S_i
jmp .L48
.L56:
movl %eax, 76(%rsp)
movl $1, 80(%rsp)
movl $1, 84(%rsp)
movl 72(%rsp), %ecx
movl $0, %r9d
movl $0, %r8d
movq 64(%rsp), %rdx
movq 76(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
jne .L48
movl %ebp, %ecx
movq 56(%rsp), %rdx
movq 48(%rsp), %rsi
movq 40(%rsp), %rdi
call _Z36__device_stub__Z11kernel_1t1cPfS_S_iPfS_S_i
jmp .L48
.L57:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2060:
.size _Z12hostFunctionPfS_S_iii, .-_Z12hostFunctionPfS_S_iii
.section .rodata.str1.1
.LC19:
.string "\n"
.LC20:
.string "#%d:\t"
.LC21:
.string "%.4f\t"
.LC23:
.string "Ave: %.4f\n"
.LC24:
.string "\nDone!\n"
.text
.globl main
.type main, @function
main:
.LFB2061:
.cfi_startproc
endbr64
pushq %r15
.cfi_def_cfa_offset 16
.cfi_offset 15, -16
pushq %r14
.cfi_def_cfa_offset 24
.cfi_offset 14, -24
pushq %r13
.cfi_def_cfa_offset 32
.cfi_offset 13, -32
pushq %r12
.cfi_def_cfa_offset 40
.cfi_offset 12, -40
pushq %rbp
.cfi_def_cfa_offset 48
.cfi_offset 6, -48
pushq %rbx
.cfi_def_cfa_offset 56
.cfi_offset 3, -56
subq $1080, %rsp
.cfi_def_cfa_offset 1136
movq %fs:40, %rax
movq %rax, 1064(%rsp)
xorl %eax, %eax
leaq 28(%rsp), %rdi
call cudaGetDeviceCount@PLT
cmpl $0, 28(%rsp)
jle .L64
movl $0, %ebx
leaq 32(%rsp), %rbp
.L60:
movl %ebx, %esi
movq %rbp, %rdi
call cudaGetDeviceProperties_v2@PLT
subq $1040, %rsp
.cfi_def_cfa_offset 2176
movl $129, %ecx
movq %rsp, %rdi
movq %rbp, %rsi
rep movsq
call _Z12printDevProp14cudaDeviceProp
movl 1392(%rsp), %r14d
addl $1, %ebx
addq $1040, %rsp
.cfi_def_cfa_offset 1136
cmpl %ebx, 28(%rsp)
jg .L60
.L59:
movl $16384, %edi
call malloc@PLT
movq %rax, %r15
movl $16384, %edi
call malloc@PLT
movq %rax, %r12
movl $16384, %edi
call malloc@PLT
movq %rax, %rbp
leaq .LC19(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movq $0x000000000, 8(%rsp)
movl $0, %r13d
.L62:
movl %r13d, %edx
leaq .LC20(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $10, %ebx
.L61:
movl $64, %esi
movq %r12, %rdi
call _Z14populateMatrixPfi
movl $64, %esi
movq %rbp, %rdi
call _Z14populateMatrixPfi
movl %r13d, %r9d
movl %r14d, %r8d
movl $64, %ecx
movq %rbp, %rdx
movq %r12, %rsi
movq %r15, %rdi
call _Z12hostFunctionPfS_S_iii
movsd 8(%rsp), %xmm1
addsd %xmm0, %xmm1
movsd %xmm1, 8(%rsp)
leaq .LC21(%rip), %rsi
movl $2, %edi
movl $1, %eax
call __printf_chk@PLT
subl $1, %ebx
jne .L61
movsd 8(%rsp), %xmm2
divsd .LC22(%rip), %xmm2
movsd %xmm2, 8(%rsp)
movapd %xmm2, %xmm0
leaq .LC23(%rip), %rsi
movl $2, %edi
movl $1, %eax
call __printf_chk@PLT
addl $1, %r13d
cmpl $3, %r13d
jne .L62
movq %r15, %rdi
call free@PLT
movq %r12, %rdi
call free@PLT
movq %rbp, %rdi
call free@PLT
leaq .LC24(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movq 1064(%rsp), %rax
subq %fs:40, %rax
jne .L69
movl $0, %eax
addq $1080, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %rbp
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r13
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
ret
.L64:
.cfi_restore_state
movl $1024, %r14d
jmp .L59
.L69:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2061:
.size main, .-main
.section .rodata.str1.1
.LC25:
.string "_Z11kernel_1t1cPfS_S_i"
.LC26:
.string "_Z11kernel_1t1rPfS_S_i"
.LC27:
.string "_Z11kernel_1t1ePfS_S_i"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2093:
.cfi_startproc
endbr64
pushq %rbx
.cfi_def_cfa_offset 16
.cfi_offset 3, -16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rbx
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC25(%rip), %rdx
movq %rdx, %rcx
leaq _Z11kernel_1t1cPfS_S_i(%rip), %rsi
movq %rax, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC26(%rip), %rdx
movq %rdx, %rcx
leaq _Z11kernel_1t1rPfS_S_i(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC27(%rip), %rdx
movq %rdx, %rcx
leaq _Z11kernel_1t1ePfS_S_i(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
popq %rbx
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2093:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.section .rodata.cst4,"aM",@progbits,4
.align 4
.LC16:
.long 1269028618
.section .rodata.cst8,"aM",@progbits,8
.align 8
.LC22:
.long 0
.long 1076101120
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "cs171-me1.hip"
.globl _Z12printDevProp20hipDeviceProp_tR0600 # -- Begin function _Z12printDevProp20hipDeviceProp_tR0600
.p2align 4, 0x90
.type _Z12printDevProp20hipDeviceProp_tR0600,@function
_Z12printDevProp20hipDeviceProp_tR0600: # @_Z12printDevProp20hipDeviceProp_tR0600
.cfi_startproc
# %bb.0:
pushq %rax
.cfi_def_cfa_offset 16
leaq 16(%rsp), %rdi
callq puts@PLT
movl 376(%rsp), %esi
movl $.L.str.1, %edi
xorl %eax, %eax
callq printf
movl 380(%rsp), %esi
movl $.L.str.2, %edi
xorl %eax, %eax
callq printf
movq 304(%rsp), %rsi
movl $.L.str.3, %edi
xorl %eax, %eax
callq printf
movl 404(%rsp), %esi
movl $.L.str.4, %edi
xorl %eax, %eax
callq printf
movq 312(%rsp), %rsi
movl $.L.str.5, %edi
xorl %eax, %eax
callq printf
movl 320(%rsp), %esi
movl $.L.str.6, %edi
xorl %eax, %eax
callq printf
movl 324(%rsp), %esi
movl $.L.str.7, %edi
xorl %eax, %eax
callq printf
movq 328(%rsp), %rsi
movl $.L.str.8, %edi
xorl %eax, %eax
callq printf
movq 368(%rsp), %rsi
movl $.L.str.9, %edi
xorl %eax, %eax
callq printf
movl 336(%rsp), %esi
movl $.L.str.10, %edi
xorl %eax, %eax
callq printf
movl 340(%rsp), %esi
movl 344(%rsp), %edx
movl 348(%rsp), %ecx
movl $.L.str.11, %edi
xorl %eax, %eax
popq %r8
.cfi_def_cfa_offset 8
jmp printf # TAILCALL
.Lfunc_end0:
.size _Z12printDevProp20hipDeviceProp_tR0600, .Lfunc_end0-_Z12printDevProp20hipDeviceProp_tR0600
.cfi_endproc
# -- End function
.globl _Z11printMatrixPfi # -- Begin function _Z11printMatrixPfi
.p2align 4, 0x90
.type _Z11printMatrixPfi,@function
_Z11printMatrixPfi: # @_Z11printMatrixPfi
.cfi_startproc
# %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
pushq %r15
.cfi_def_cfa_offset 24
pushq %r14
.cfi_def_cfa_offset 32
pushq %r13
.cfi_def_cfa_offset 40
pushq %r12
.cfi_def_cfa_offset 48
pushq %rbx
.cfi_def_cfa_offset 56
pushq %rax
.cfi_def_cfa_offset 64
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
.cfi_offset %rbp, -16
movl %esi, %ebx
movq %rdi, (%rsp) # 8-byte Spill
movl $.Lstr, %edi
callq puts@PLT
testl %ebx, %ebx
jle .LBB1_5
# %bb.1: # %.lr.ph16
movl %ebx, %r15d
xorl %ebp, %ebp
xorl %r12d, %r12d
.p2align 4, 0x90
.LBB1_2: # %.lr.ph
# =>This Loop Header: Depth=1
# Child Loop BB1_3 Depth 2
movl %ebp, %eax
movq (%rsp), %rcx # 8-byte Reload
leaq (%rcx,%rax,4), %r13
movl $.L.str.13, %edi
xorl %eax, %eax
callq printf
xorl %r14d, %r14d
.p2align 4, 0x90
.LBB1_3: # Parent Loop BB1_2 Depth=1
# => This Inner Loop Header: Depth=2
movss (%r13,%r14,4), %xmm0 # xmm0 = mem[0],zero,zero,zero
cvtss2sd %xmm0, %xmm0
movl $.L.str.14, %edi
movb $1, %al
callq printf
incq %r14
cmpq %r14, %r15
jne .LBB1_3
# %bb.4: # %._crit_edge
# in Loop: Header=BB1_2 Depth=1
movl $.Lstr.2, %edi
callq puts@PLT
incq %r12
addl %ebx, %ebp
cmpq %r15, %r12
jne .LBB1_2
.LBB1_5: # %._crit_edge17
movl $.Lstr.2, %edi
addq $8, %rsp
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %r12
.cfi_def_cfa_offset 40
popq %r13
.cfi_def_cfa_offset 32
popq %r14
.cfi_def_cfa_offset 24
popq %r15
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
jmp puts@PLT # TAILCALL
.Lfunc_end1:
.size _Z11printMatrixPfi, .Lfunc_end1-_Z11printMatrixPfi
.cfi_endproc
# -- End function
.section .rodata.cst4,"aM",@progbits,4
.p2align 2, 0x0 # -- Begin function _Z14populateMatrixPfi
.LCPI2_0:
.long 0x4ba3d70a # float 21474836
.text
.globl _Z14populateMatrixPfi
.p2align 4, 0x90
.type _Z14populateMatrixPfi,@function
_Z14populateMatrixPfi: # @_Z14populateMatrixPfi
.cfi_startproc
# %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
pushq %r15
.cfi_def_cfa_offset 24
pushq %r14
.cfi_def_cfa_offset 32
pushq %r13
.cfi_def_cfa_offset 40
pushq %r12
.cfi_def_cfa_offset 48
pushq %rbx
.cfi_def_cfa_offset 56
pushq %rax
.cfi_def_cfa_offset 64
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
.cfi_offset %rbp, -16
movq %rdi, (%rsp) # 8-byte Spill
testl %esi, %esi
jle .LBB2_5
# %bb.1: # %.preheader.lr.ph
movl %esi, %ebx
movl %esi, %r15d
xorl %r12d, %r12d
xorl %r13d, %r13d
.p2align 4, 0x90
.LBB2_2: # %.preheader
# =>This Loop Header: Depth=1
# Child Loop BB2_3 Depth 2
movl %r12d, %eax
movq (%rsp), %rcx # 8-byte Reload
leaq (%rcx,%rax,4), %rbp
xorl %r14d, %r14d
.p2align 4, 0x90
.LBB2_3: # Parent Loop BB2_2 Depth=1
# => This Inner Loop Header: Depth=2
callq rand
movss .LCPI2_0(%rip), %xmm1 # xmm1 = mem[0],zero,zero,zero
xorps %xmm0, %xmm0
cvtsi2ss %eax, %xmm0
divss %xmm1, %xmm0
movss %xmm0, (%rbp,%r14,4)
incq %r14
cmpq %r14, %r15
jne .LBB2_3
# %bb.4: # %._crit_edge
# in Loop: Header=BB2_2 Depth=1
incq %r13
addl %ebx, %r12d
cmpq %r15, %r13
jne .LBB2_2
.LBB2_5: # %._crit_edge13
addq $8, %rsp
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %r12
.cfi_def_cfa_offset 40
popq %r13
.cfi_def_cfa_offset 32
popq %r14
.cfi_def_cfa_offset 24
popq %r15
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
retq
.Lfunc_end2:
.size _Z14populateMatrixPfi, .Lfunc_end2-_Z14populateMatrixPfi
.cfi_endproc
# -- End function
.globl _Z26__device_stub__kernel_1t1ePfS_S_i # -- Begin function _Z26__device_stub__kernel_1t1ePfS_S_i
.p2align 4, 0x90
.type _Z26__device_stub__kernel_1t1ePfS_S_i,@function
_Z26__device_stub__kernel_1t1ePfS_S_i: # @_Z26__device_stub__kernel_1t1ePfS_S_i
.cfi_startproc
# %bb.0:
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movq %rdx, 56(%rsp)
movl %ecx, 4(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 56(%rsp), %rax
movq %rax, 96(%rsp)
leaq 4(%rsp), %rax
movq %rax, 104(%rsp)
leaq 40(%rsp), %rdi
leaq 24(%rsp), %rsi
leaq 16(%rsp), %rdx
leaq 8(%rsp), %rcx
callq __hipPopCallConfiguration
movq 40(%rsp), %rsi
movl 48(%rsp), %edx
movq 24(%rsp), %rcx
movl 32(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z11kernel_1t1ePfS_S_i, %edi
pushq 8(%rsp)
.cfi_adjust_cfa_offset 8
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $136, %rsp
.cfi_adjust_cfa_offset -136
retq
.Lfunc_end3:
.size _Z26__device_stub__kernel_1t1ePfS_S_i, .Lfunc_end3-_Z26__device_stub__kernel_1t1ePfS_S_i
.cfi_endproc
# -- End function
.globl _Z26__device_stub__kernel_1t1rPfS_S_i # -- Begin function _Z26__device_stub__kernel_1t1rPfS_S_i
.p2align 4, 0x90
.type _Z26__device_stub__kernel_1t1rPfS_S_i,@function
_Z26__device_stub__kernel_1t1rPfS_S_i: # @_Z26__device_stub__kernel_1t1rPfS_S_i
.cfi_startproc
# %bb.0:
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movq %rdx, 56(%rsp)
movl %ecx, 4(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 56(%rsp), %rax
movq %rax, 96(%rsp)
leaq 4(%rsp), %rax
movq %rax, 104(%rsp)
leaq 40(%rsp), %rdi
leaq 24(%rsp), %rsi
leaq 16(%rsp), %rdx
leaq 8(%rsp), %rcx
callq __hipPopCallConfiguration
movq 40(%rsp), %rsi
movl 48(%rsp), %edx
movq 24(%rsp), %rcx
movl 32(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z11kernel_1t1rPfS_S_i, %edi
pushq 8(%rsp)
.cfi_adjust_cfa_offset 8
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $136, %rsp
.cfi_adjust_cfa_offset -136
retq
.Lfunc_end4:
.size _Z26__device_stub__kernel_1t1rPfS_S_i, .Lfunc_end4-_Z26__device_stub__kernel_1t1rPfS_S_i
.cfi_endproc
# -- End function
.globl _Z26__device_stub__kernel_1t1cPfS_S_i # -- Begin function _Z26__device_stub__kernel_1t1cPfS_S_i
.p2align 4, 0x90
.type _Z26__device_stub__kernel_1t1cPfS_S_i,@function
_Z26__device_stub__kernel_1t1cPfS_S_i: # @_Z26__device_stub__kernel_1t1cPfS_S_i
.cfi_startproc
# %bb.0:
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movq %rdx, 56(%rsp)
movl %ecx, 4(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 56(%rsp), %rax
movq %rax, 96(%rsp)
leaq 4(%rsp), %rax
movq %rax, 104(%rsp)
leaq 40(%rsp), %rdi
leaq 24(%rsp), %rsi
leaq 16(%rsp), %rdx
leaq 8(%rsp), %rcx
callq __hipPopCallConfiguration
movq 40(%rsp), %rsi
movl 48(%rsp), %edx
movq 24(%rsp), %rcx
movl 32(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z11kernel_1t1cPfS_S_i, %edi
pushq 8(%rsp)
.cfi_adjust_cfa_offset 8
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $136, %rsp
.cfi_adjust_cfa_offset -136
retq
.Lfunc_end5:
.size _Z26__device_stub__kernel_1t1cPfS_S_i, .Lfunc_end5-_Z26__device_stub__kernel_1t1cPfS_S_i
.cfi_endproc
# -- End function
.globl _Z12hostFunctionPfS_S_iii # -- Begin function _Z12hostFunctionPfS_S_iii
.p2align 4, 0x90
.type _Z12hostFunctionPfS_S_iii,@function
_Z12hostFunctionPfS_S_iii: # @_Z12hostFunctionPfS_S_iii
.cfi_startproc
# %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
pushq %r15
.cfi_def_cfa_offset 24
pushq %r14
.cfi_def_cfa_offset 32
pushq %r13
.cfi_def_cfa_offset 40
pushq %r12
.cfi_def_cfa_offset 48
pushq %rbx
.cfi_def_cfa_offset 56
subq $184, %rsp
.cfi_def_cfa_offset 240
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
.cfi_offset %rbp, -16
movl %r9d, %r13d
movl %r8d, %r12d
movl %ecx, %r14d
movq %rdx, 152(%rsp) # 8-byte Spill
movq %rsi, %rbp
movq %rdi, 176(%rsp) # 8-byte Spill
leaq 168(%rsp), %rdi
callq hipEventCreate
leaq 160(%rsp), %rdi
callq hipEventCreate
movl %r14d, %r15d
imull %r15d, %r15d
leaq (,%r15,4), %rbx
leaq 24(%rsp), %rdi
movq %rbx, %rsi
callq hipMalloc
leaq 16(%rsp), %rdi
movq %rbx, %rsi
callq hipMalloc
leaq 8(%rsp), %rdi
movq %rbx, %rsi
callq hipMalloc
movq 16(%rsp), %rdi
movq %rbp, %rsi
movq %rbx, %rdx
movl $1, %ecx
callq hipMemcpy
movq 8(%rsp), %rdi
movq 152(%rsp), %rsi # 8-byte Reload
movq %rbx, 152(%rsp) # 8-byte Spill
movq %rbx, %rdx
movl $1, %ecx
callq hipMemcpy
movl %r14d, 108(%rsp) # 4-byte Spill
movl %r14d, %eax
movabsq $4294967296, %r14 # imm = 0x100000000
cltd
idivl %r12d
movl %eax, %ebp
incl %ebp
movl %r12d, %ebx
orq %r14, %rbx
movq 168(%rsp), %rdi
xorl %esi, %esi
callq hipEventRecord
cmpl $2, %r13d
je .LBB6_7
# %bb.1:
cmpl $1, %r13d
je .LBB6_5
# %bb.2:
testl %r13d, %r13d
jne .LBB6_10
# %bb.3:
movl %r15d, %eax
xorl %edx, %edx
idivl %r12d
# kill: def $eax killed $eax def $rax
leal 1(%rax), %edi
orq %r14, %rdi
movl $1, %esi
movq %rbx, %rdx
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB6_10
# %bb.4:
movq 24(%rsp), %rax
movq 16(%rsp), %rcx
movq 8(%rsp), %rdx
movq %rax, 96(%rsp)
movq %rcx, 88(%rsp)
movq %rdx, 80(%rsp)
movl %r15d, 4(%rsp)
leaq 96(%rsp), %rax
movq %rax, 112(%rsp)
leaq 88(%rsp), %rax
movq %rax, 120(%rsp)
leaq 80(%rsp), %rax
movq %rax, 128(%rsp)
leaq 4(%rsp), %rax
movq %rax, 136(%rsp)
leaq 64(%rsp), %rdi
leaq 48(%rsp), %rsi
leaq 40(%rsp), %rdx
leaq 32(%rsp), %rcx
callq __hipPopCallConfiguration
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
movq 48(%rsp), %rcx
movl 56(%rsp), %r8d
leaq 112(%rsp), %r9
movl $_Z11kernel_1t1ePfS_S_i, %edi
jmp .LBB6_9
.LBB6_5:
movl %ebp, %edi
orq %r14, %rdi
movl $1, %esi
movq %rbx, %rdx
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB6_10
# %bb.6:
movq 24(%rsp), %rax
movq 16(%rsp), %rcx
movq 8(%rsp), %rdx
movq %rax, 96(%rsp)
movq %rcx, 88(%rsp)
movq %rdx, 80(%rsp)
movl 108(%rsp), %eax # 4-byte Reload
movl %eax, 4(%rsp)
leaq 96(%rsp), %rax
movq %rax, 112(%rsp)
leaq 88(%rsp), %rax
movq %rax, 120(%rsp)
leaq 80(%rsp), %rax
movq %rax, 128(%rsp)
leaq 4(%rsp), %rax
movq %rax, 136(%rsp)
leaq 64(%rsp), %rdi
leaq 48(%rsp), %rsi
leaq 40(%rsp), %rdx
leaq 32(%rsp), %rcx
callq __hipPopCallConfiguration
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
movq 48(%rsp), %rcx
movl 56(%rsp), %r8d
leaq 112(%rsp), %r9
movl $_Z11kernel_1t1rPfS_S_i, %edi
jmp .LBB6_9
.LBB6_7:
movl %ebp, %edi
orq %r14, %rdi
movl $1, %esi
movq %rbx, %rdx
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB6_10
# %bb.8:
movq 24(%rsp), %rax
movq 16(%rsp), %rcx
movq 8(%rsp), %rdx
movq %rax, 96(%rsp)
movq %rcx, 88(%rsp)
movq %rdx, 80(%rsp)
movl 108(%rsp), %eax # 4-byte Reload
movl %eax, 4(%rsp)
leaq 96(%rsp), %rax
movq %rax, 112(%rsp)
leaq 88(%rsp), %rax
movq %rax, 120(%rsp)
leaq 80(%rsp), %rax
movq %rax, 128(%rsp)
leaq 4(%rsp), %rax
movq %rax, 136(%rsp)
leaq 64(%rsp), %rdi
leaq 48(%rsp), %rsi
leaq 40(%rsp), %rdx
leaq 32(%rsp), %rcx
callq __hipPopCallConfiguration
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
movq 48(%rsp), %rcx
movl 56(%rsp), %r8d
leaq 112(%rsp), %r9
movl $_Z11kernel_1t1cPfS_S_i, %edi
.LBB6_9:
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
pushq 48(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB6_10:
movq 160(%rsp), %rdi
xorl %esi, %esi
callq hipEventRecord
callq hipDeviceSynchronize
movq 160(%rsp), %rdi
callq hipEventSynchronize
movl $0, 112(%rsp)
movq 168(%rsp), %rsi
movq 160(%rsp), %rdx
leaq 112(%rsp), %rdi
callq hipEventElapsedTime
movq 24(%rsp), %rsi
movq 176(%rsp), %rdi # 8-byte Reload
movq 152(%rsp), %rdx # 8-byte Reload
movl $2, %ecx
callq hipMemcpy
movq 24(%rsp), %rdi
callq hipFree
movq 16(%rsp), %rdi
callq hipFree
movq 8(%rsp), %rdi
callq hipFree
movss 112(%rsp), %xmm0 # xmm0 = mem[0],zero,zero,zero
cvtss2sd %xmm0, %xmm0
addq $184, %rsp
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %r12
.cfi_def_cfa_offset 40
popq %r13
.cfi_def_cfa_offset 32
popq %r14
.cfi_def_cfa_offset 24
popq %r15
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
retq
.Lfunc_end6:
.size _Z12hostFunctionPfS_S_iii, .Lfunc_end6-_Z12hostFunctionPfS_S_iii
.cfi_endproc
# -- End function
.section .rodata.cst4,"aM",@progbits,4
.p2align 2, 0x0 # -- Begin function main
.LCPI7_0:
.long 0x4ba3d70a # float 21474836
.section .rodata.cst8,"aM",@progbits,8
.p2align 3, 0x0
.LCPI7_1:
.quad 0x4024000000000000 # double 10
.text
.globl main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
pushq %r15
.cfi_def_cfa_offset 24
pushq %r14
.cfi_def_cfa_offset 32
pushq %r13
.cfi_def_cfa_offset 40
pushq %r12
.cfi_def_cfa_offset 48
pushq %rbx
.cfi_def_cfa_offset 56
subq $2984, %rsp # imm = 0xBA8
.cfi_def_cfa_offset 3040
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
.cfi_offset %rbp, -16
leaq 1484(%rsp), %rdi
callq hipGetDeviceCount
cmpl $0, 1484(%rsp)
jle .LBB7_1
# %bb.2: # %.lr.ph
xorl %ebx, %ebx
leaq 1512(%rsp), %r14
.p2align 4, 0x90
.LBB7_3: # =>This Inner Loop Header: Depth=1
movq %r14, %rdi
movl %ebx, %esi
callq hipGetDevicePropertiesR0600
movl $184, %ecx
movq %rsp, %rdi
movq %r14, %rsi
rep
callq _Z12printDevProp20hipDeviceProp_tR0600
incl %ebx
cmpl 1484(%rsp), %ebx
jl .LBB7_3
# %bb.4:
movl 1832(%rsp), %eax
movl %eax, 1480(%rsp) # 4-byte Spill
jmp .LBB7_5
.LBB7_1:
movl $1024, 1480(%rsp) # 4-byte Folded Spill
# imm = 0x400
.LBB7_5: # %._crit_edge
movl $16384, %edi # imm = 0x4000
callq malloc
movq %rax, 1504(%rsp) # 8-byte Spill
movl $16384, %edi # imm = 0x4000
callq malloc
movq %rax, %r15
movl $16384, %edi # imm = 0x4000
callq malloc
movq %rax, 1496(%rsp) # 8-byte Spill
movl $10, %edi
callq putchar@PLT
xorpd %xmm0, %xmm0
movsd %xmm0, 1488(%rsp) # 8-byte Spill
xorl %ebx, %ebx
.p2align 4, 0x90
.LBB7_6: # =>This Loop Header: Depth=1
# Child Loop BB7_7 Depth 2
# Child Loop BB7_8 Depth 3
# Child Loop BB7_9 Depth 4
# Child Loop BB7_12 Depth 3
# Child Loop BB7_13 Depth 4
movl $.L.str.17, %edi
movl %ebx, %esi
xorl %eax, %eax
callq printf
xorl %r13d, %r13d
.p2align 4, 0x90
.LBB7_7: # %.preheader.i.preheader
# Parent Loop BB7_6 Depth=1
# => This Loop Header: Depth=2
# Child Loop BB7_8 Depth 3
# Child Loop BB7_9 Depth 4
# Child Loop BB7_12 Depth 3
# Child Loop BB7_13 Depth 4
movq %r15, %r12
xorl %r14d, %r14d
.p2align 4, 0x90
.LBB7_8: # %.preheader.i
# Parent Loop BB7_6 Depth=1
# Parent Loop BB7_7 Depth=2
# => This Loop Header: Depth=3
# Child Loop BB7_9 Depth 4
xorl %ebp, %ebp
.p2align 4, 0x90
.LBB7_9: # Parent Loop BB7_6 Depth=1
# Parent Loop BB7_7 Depth=2
# Parent Loop BB7_8 Depth=3
# => This Inner Loop Header: Depth=4
callq rand
xorps %xmm0, %xmm0
cvtsi2ss %eax, %xmm0
divss .LCPI7_0(%rip), %xmm0
movss %xmm0, (%r15,%rbp,4)
incq %rbp
cmpq $64, %rbp
jne .LBB7_9
# %bb.10: # %._crit_edge.i
# in Loop: Header=BB7_8 Depth=3
incq %r14
addq $256, %r15 # imm = 0x100
cmpq $64, %r14
jne .LBB7_8
# %bb.11: # %.preheader.i29.preheader
# in Loop: Header=BB7_7 Depth=2
movq 1496(%rsp), %r15 # 8-byte Reload
xorl %r14d, %r14d
.p2align 4, 0x90
.LBB7_12: # %.preheader.i29
# Parent Loop BB7_6 Depth=1
# Parent Loop BB7_7 Depth=2
# => This Loop Header: Depth=3
# Child Loop BB7_13 Depth 4
xorl %ebp, %ebp
.p2align 4, 0x90
.LBB7_13: # Parent Loop BB7_6 Depth=1
# Parent Loop BB7_7 Depth=2
# Parent Loop BB7_12 Depth=3
# => This Inner Loop Header: Depth=4
callq rand
xorps %xmm0, %xmm0
cvtsi2ss %eax, %xmm0
divss .LCPI7_0(%rip), %xmm0
movss %xmm0, (%r15,%rbp,4)
incq %rbp
cmpq $64, %rbp
jne .LBB7_13
# %bb.14: # %._crit_edge.i34
# in Loop: Header=BB7_12 Depth=3
incq %r14
addq $256, %r15 # imm = 0x100
cmpq $64, %r14
jne .LBB7_12
# %bb.15: # %_Z14populateMatrixPfi.exit37
# in Loop: Header=BB7_7 Depth=2
movq 1504(%rsp), %rdi # 8-byte Reload
movq %r12, %r15
movq %r12, %rsi
movq 1496(%rsp), %rdx # 8-byte Reload
movl $64, %ecx
movl 1480(%rsp), %r8d # 4-byte Reload
movl %ebx, %r9d
callq _Z12hostFunctionPfS_S_iii
movsd 1488(%rsp), %xmm1 # 8-byte Reload
# xmm1 = mem[0],zero
addsd %xmm0, %xmm1
movsd %xmm1, 1488(%rsp) # 8-byte Spill
movl $.L.str.18, %edi
movb $1, %al
callq printf
incl %r13d
cmpl $10, %r13d
jne .LBB7_7
# %bb.16: # in Loop: Header=BB7_6 Depth=1
movsd 1488(%rsp), %xmm0 # 8-byte Reload
# xmm0 = mem[0],zero
divsd .LCPI7_1(%rip), %xmm0
movl $.L.str.19, %edi
movsd %xmm0, 1488(%rsp) # 8-byte Spill
movb $1, %al
callq printf
incl %ebx
cmpl $3, %ebx
jne .LBB7_6
# %bb.17:
movq 1504(%rsp), %rdi # 8-byte Reload
callq free
movq %r15, %rdi
callq free
movq 1496(%rsp), %rdi # 8-byte Reload
callq free
movl $.Lstr.3, %edi
callq puts@PLT
xorl %eax, %eax
addq $2984, %rsp # imm = 0xBA8
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %r12
.cfi_def_cfa_offset 40
popq %r13
.cfi_def_cfa_offset 32
popq %r14
.cfi_def_cfa_offset 24
popq %r15
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
retq
.Lfunc_end7:
.size main, .Lfunc_end7-main
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
pushq %rbx
.cfi_def_cfa_offset 16
subq $32, %rsp
.cfi_def_cfa_offset 48
.cfi_offset %rbx, -16
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB8_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB8_2:
movq __hip_gpubin_handle(%rip), %rbx
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z11kernel_1t1ePfS_S_i, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z11kernel_1t1rPfS_S_i, %esi
movl $.L__unnamed_2, %edx
movl $.L__unnamed_2, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z11kernel_1t1cPfS_S_i, %esi
movl $.L__unnamed_3, %edx
movl $.L__unnamed_3, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $32, %rsp
.cfi_def_cfa_offset 16
popq %rbx
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end8:
.size __hip_module_ctor, .Lfunc_end8-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB9_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB9_2:
retq
.Lfunc_end9:
.size __hip_module_dtor, .Lfunc_end9-__hip_module_dtor
.cfi_endproc
# -- End function
.type .L.str.1,@object # @.str.1
.section .rodata.str1.1,"aMS",@progbits,1
.L.str.1:
.asciz "Major revision number: %d\n"
.size .L.str.1, 35
.type .L.str.2,@object # @.str.2
.L.str.2:
.asciz "Minor revision number: %d\n"
.size .L.str.2, 35
.type .L.str.3,@object # @.str.3
.L.str.3:
.asciz "Total global memory: %u bytes\n"
.size .L.str.3, 41
.type .L.str.4,@object # @.str.4
.L.str.4:
.asciz "Number of multiprocessors: %d\n"
.size .L.str.4, 35
.type .L.str.5,@object # @.str.5
.L.str.5:
.asciz "Total shared memory per block: %u\n"
.size .L.str.5, 35
.type .L.str.6,@object # @.str.6
.L.str.6:
.asciz "Total registers per block: %d\n"
.size .L.str.6, 35
.type .L.str.7,@object # @.str.7
.L.str.7:
.asciz "Warp size: %d\n"
.size .L.str.7, 35
.type .L.str.8,@object # @.str.8
.L.str.8:
.asciz "Maximum memory pitch: %u\n"
.size .L.str.8, 35
.type .L.str.9,@object # @.str.9
.L.str.9:
.asciz "Total constant memory: %u\n"
.size .L.str.9, 35
.type .L.str.10,@object # @.str.10
.L.str.10:
.asciz "Maximum threads per block: %d\n"
.size .L.str.10, 35
.type .L.str.11,@object # @.str.11
.L.str.11:
.asciz "Maximum threads per dimension: %d,%d,%d\n"
.size .L.str.11, 41
.type .L.str.13,@object # @.str.13
.L.str.13:
.asciz " ["
.size .L.str.13, 4
.type .L.str.14,@object # @.str.14
.L.str.14:
.asciz "%.2f, "
.size .L.str.14, 7
.type _Z11kernel_1t1ePfS_S_i,@object # @_Z11kernel_1t1ePfS_S_i
.section .rodata,"a",@progbits
.globl _Z11kernel_1t1ePfS_S_i
.p2align 3, 0x0
_Z11kernel_1t1ePfS_S_i:
.quad _Z26__device_stub__kernel_1t1ePfS_S_i
.size _Z11kernel_1t1ePfS_S_i, 8
.type _Z11kernel_1t1rPfS_S_i,@object # @_Z11kernel_1t1rPfS_S_i
.globl _Z11kernel_1t1rPfS_S_i
.p2align 3, 0x0
_Z11kernel_1t1rPfS_S_i:
.quad _Z26__device_stub__kernel_1t1rPfS_S_i
.size _Z11kernel_1t1rPfS_S_i, 8
.type _Z11kernel_1t1cPfS_S_i,@object # @_Z11kernel_1t1cPfS_S_i
.globl _Z11kernel_1t1cPfS_S_i
.p2align 3, 0x0
_Z11kernel_1t1cPfS_S_i:
.quad _Z26__device_stub__kernel_1t1cPfS_S_i
.size _Z11kernel_1t1cPfS_S_i, 8
.type .L.str.17,@object # @.str.17
.section .rodata.str1.1,"aMS",@progbits,1
.L.str.17:
.asciz "#%d:\t"
.size .L.str.17, 6
.type .L.str.18,@object # @.str.18
.L.str.18:
.asciz "%.4f\t"
.size .L.str.18, 6
.type .L.str.19,@object # @.str.19
.L.str.19:
.asciz "Ave: %.4f\n"
.size .L.str.19, 11
.type .L__unnamed_1,@object # @0
.L__unnamed_1:
.asciz "_Z11kernel_1t1ePfS_S_i"
.size .L__unnamed_1, 23
.type .L__unnamed_2,@object # @1
.L__unnamed_2:
.asciz "_Z11kernel_1t1rPfS_S_i"
.size .L__unnamed_2, 23
.type .L__unnamed_3,@object # @2
.L__unnamed_3:
.asciz "_Z11kernel_1t1cPfS_S_i"
.size .L__unnamed_3, 23
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.type .Lstr,@object # @str
.section .rodata.str1.1,"aMS",@progbits,1
.Lstr:
.asciz "["
.size .Lstr, 2
.type .Lstr.2,@object # @str.2
.Lstr.2:
.asciz "]"
.size .Lstr.2, 2
.type .Lstr.3,@object # @str.3
.Lstr.3:
.asciz "\nDone!"
.size .Lstr.3, 7
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z26__device_stub__kernel_1t1ePfS_S_i
.addrsig_sym _Z26__device_stub__kernel_1t1rPfS_S_i
.addrsig_sym _Z26__device_stub__kernel_1t1cPfS_S_i
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z11kernel_1t1ePfS_S_i
.addrsig_sym _Z11kernel_1t1rPfS_S_i
.addrsig_sym _Z11kernel_1t1cPfS_S_i
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly. | #include <iostream>
#include <cuda_runtime.h>
using namespace std;
#define N 20
#define BLOCK_DIM 10
void random_inits(int a[N][N]){
for (int i = 0; i < N; i++){
for (int j = 0; j < N; j++){
a[i][j] = rand() % 10;
}
}
}
__global__ void add(int a[N][N], int b[N][N], int c[N][N]){
int i = threadIdx.x;
int j = threadIdx.y;
c[i][j] = a[i][j] + b[i][j];
}
void show(int a[N][N], int b[N][N], int c[N][N]){
for (int i = 0; i < N; i++){
for (int j = 0; j < N; j++){
cout << "matrix[" << i << "][" << j << "]" << " = " << a[i][j] << " + " << b[i][j] << "=" << c[i][j] <<"\t";
}
cout << endl;
}
}
int main(void){
int a[N][N];
int b[N][N];
int c[N][N];
random_inits(a);
random_inits(b);
int (*d_a)[N], (*d_b)[N], (*d_c)[N];
cudaMalloc((void**)&d_a, (N*N)*sizeof(int));
cudaMalloc((void**)&d_b, (N*N)*sizeof(int));
cudaMalloc((void**)&d_c, (N*N)*sizeof(int));
cudaMemcpy(d_a, a, (N*N)*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_b, b, (N*N)*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_c, c, (N*N)*sizeof(int), cudaMemcpyHostToDevice);
int numBlocks = 1;
dim3 threadsPerBlock(N,N);
add<<<numBlocks,threadsPerBlock>>>(d_a,d_b,d_c);
cudaMemcpy(c, d_c, (N*N)*sizeof(int), cudaMemcpyDeviceToHost);
show(a, b, c);
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
cout<< endl;
return 0;
} | code for sm_80
Function : _Z3addPA20_iS0_S0_
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ S2R R6, SR_TID.X ; /* 0x0000000000067919 */
/* 0x000e220000002100 */
/*0020*/ HFMA2.MMA R7, -RZ, RZ, 0, 4.76837158203125e-06 ; /* 0x00000050ff077435 */
/* 0x000fe200000001ff */
/*0030*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fe40000000a00 */
/*0040*/ S2R R9, SR_TID.Y ; /* 0x0000000000097919 */
/* 0x000e6e0000002200 */
/*0050*/ IMAD.WIDE R2, R6, R7, c[0x0][0x160] ; /* 0x0000580006027625 */
/* 0x001fc800078e0207 */
/*0060*/ IMAD.WIDE R4, R6, R7, c[0x0][0x168] ; /* 0x00005a0006047625 */
/* 0x000fc800078e0207 */
/*0070*/ IMAD.WIDE R2, R9, 0x4, R2 ; /* 0x0000000409027825 */
/* 0x002fc800078e0202 */
/*0080*/ IMAD.WIDE R4, R9, 0x4, R4 ; /* 0x0000000409047825 */
/* 0x000fe400078e0204 */
/*0090*/ LDG.E R2, [R2.64] ; /* 0x0000000402027981 */
/* 0x000ea8000c1e1900 */
/*00a0*/ LDG.E R5, [R4.64] ; /* 0x0000000404057981 */
/* 0x000ea2000c1e1900 */
/*00b0*/ IMAD.WIDE R6, R6, R7, c[0x0][0x170] ; /* 0x00005c0006067625 */
/* 0x000fcc00078e0207 */
/*00c0*/ IMAD.WIDE R6, R9, 0x4, R6 ; /* 0x0000000409067825 */
/* 0x000fe200078e0206 */
/*00d0*/ IADD3 R9, R2, R5, RZ ; /* 0x0000000502097210 */
/* 0x004fca0007ffe0ff */
/*00e0*/ STG.E [R6.64], R9 ; /* 0x0000000906007986 */
/* 0x000fe2000c101904 */
/*00f0*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0100*/ BRA 0x100; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0110*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0120*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0130*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0140*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0150*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0160*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0170*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0180*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0190*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | #include <iostream>
#include <cuda_runtime.h>
using namespace std;
#define N 20
#define BLOCK_DIM 10
void random_inits(int a[N][N]){
for (int i = 0; i < N; i++){
for (int j = 0; j < N; j++){
a[i][j] = rand() % 10;
}
}
}
__global__ void add(int a[N][N], int b[N][N], int c[N][N]){
int i = threadIdx.x;
int j = threadIdx.y;
c[i][j] = a[i][j] + b[i][j];
}
void show(int a[N][N], int b[N][N], int c[N][N]){
for (int i = 0; i < N; i++){
for (int j = 0; j < N; j++){
cout << "matrix[" << i << "][" << j << "]" << " = " << a[i][j] << " + " << b[i][j] << "=" << c[i][j] <<"\t";
}
cout << endl;
}
}
int main(void){
int a[N][N];
int b[N][N];
int c[N][N];
random_inits(a);
random_inits(b);
int (*d_a)[N], (*d_b)[N], (*d_c)[N];
cudaMalloc((void**)&d_a, (N*N)*sizeof(int));
cudaMalloc((void**)&d_b, (N*N)*sizeof(int));
cudaMalloc((void**)&d_c, (N*N)*sizeof(int));
cudaMemcpy(d_a, a, (N*N)*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_b, b, (N*N)*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_c, c, (N*N)*sizeof(int), cudaMemcpyHostToDevice);
int numBlocks = 1;
dim3 threadsPerBlock(N,N);
add<<<numBlocks,threadsPerBlock>>>(d_a,d_b,d_c);
cudaMemcpy(c, d_c, (N*N)*sizeof(int), cudaMemcpyDeviceToHost);
show(a, b, c);
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
cout<< endl;
return 0;
} | .file "tmpxft_00197b7e_00000000-6_add-2dArrays.cudafe1.cpp"
.text
#APP
.globl _ZSt21ios_base_library_initv
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB3674:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3674:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z12random_initsPA20_i
.type _Z12random_initsPA20_i, @function
_Z12random_initsPA20_i:
.LFB3669:
.cfi_startproc
endbr64
pushq %r12
.cfi_def_cfa_offset 16
.cfi_offset 12, -16
pushq %rbp
.cfi_def_cfa_offset 24
.cfi_offset 6, -24
pushq %rbx
.cfi_def_cfa_offset 32
.cfi_offset 3, -32
movq %rdi, %r12
leaq 80(%rdi), %rbp
addq $1680, %r12
.L4:
leaq -80(%rbp), %rbx
.L5:
call rand@PLT
movslq %eax, %rdx
imulq $1717986919, %rdx, %rdx
sarq $34, %rdx
movl %eax, %ecx
sarl $31, %ecx
subl %ecx, %edx
leal (%rdx,%rdx,4), %edx
addl %edx, %edx
subl %edx, %eax
movl %eax, (%rbx)
addq $4, %rbx
cmpq %rbp, %rbx
jne .L5
addq $80, %rbp
cmpq %r12, %rbp
jne .L4
popq %rbx
.cfi_def_cfa_offset 24
popq %rbp
.cfi_def_cfa_offset 16
popq %r12
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3669:
.size _Z12random_initsPA20_i, .-_Z12random_initsPA20_i
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "matrix["
.LC1:
.string "]["
.LC2:
.string "]"
.LC3:
.string " = "
.LC4:
.string " + "
.LC5:
.string "="
.LC6:
.string "\t"
.text
.globl _Z4showPA20_iS0_S0_
.type _Z4showPA20_iS0_S0_, @function
_Z4showPA20_iS0_S0_:
.LFB3670:
.cfi_startproc
endbr64
pushq %r15
.cfi_def_cfa_offset 16
.cfi_offset 15, -16
pushq %r14
.cfi_def_cfa_offset 24
.cfi_offset 14, -24
pushq %r13
.cfi_def_cfa_offset 32
.cfi_offset 13, -32
pushq %r12
.cfi_def_cfa_offset 40
.cfi_offset 12, -40
pushq %rbp
.cfi_def_cfa_offset 48
.cfi_offset 6, -48
pushq %rbx
.cfi_def_cfa_offset 56
.cfi_offset 3, -56
subq $40, %rsp
.cfi_def_cfa_offset 96
movq %rdi, 8(%rsp)
movl $0, 4(%rsp)
leaq _ZSt4cout(%rip), %r12
movq %rdx, 16(%rsp)
movq %rsi, %rbp
jmp .L10
.L18:
call _ZSt16__throw_bad_castv@PLT
.L13:
movq %rbx, %rdi
call _ZNKSt5ctypeIcE13_M_widen_initEv@PLT
movq (%rbx), %rax
movl $10, %esi
movq %rbx, %rdi
call *48(%rax)
movl %eax, %esi
.L14:
movsbl %sil, %esi
movq %r12, %rdi
call _ZNSo3putEc@PLT
movq %rax, %rdi
call _ZNSo5flushEv@PLT
addl $1, 4(%rsp)
movl 4(%rsp), %eax
addq $80, 8(%rsp)
addq $80, %rbp
addq $80, 16(%rsp)
cmpl $20, %eax
je .L9
.L10:
movq 8(%rsp), %r15
movq %rbp, %r14
movq 16(%rsp), %r13
movl $0, %ebx
movq %rbp, 24(%rsp)
.L11:
movl $7, %edx
leaq .LC0(%rip), %rsi
movq %r12, %rdi
call _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l@PLT
movl 4(%rsp), %esi
movq %r12, %rdi
call _ZNSolsEi@PLT
movq %rax, %rbp
movl $2, %edx
leaq .LC1(%rip), %rsi
movq %rax, %rdi
call _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l@PLT
movl %ebx, %esi
movq %rbp, %rdi
call _ZNSolsEi@PLT
movq %rax, %rbp
movl $1, %edx
leaq .LC2(%rip), %rsi
movq %rax, %rdi
call _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l@PLT
movl $3, %edx
leaq .LC3(%rip), %rsi
movq %rbp, %rdi
call _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l@PLT
movl (%r15,%rbx,4), %esi
movq %rbp, %rdi
call _ZNSolsEi@PLT
movq %rax, %rbp
movl $3, %edx
leaq .LC4(%rip), %rsi
movq %rax, %rdi
call _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l@PLT
movl (%r14,%rbx,4), %esi
movq %rbp, %rdi
call _ZNSolsEi@PLT
movq %rax, %rbp
movl $1, %edx
leaq .LC5(%rip), %rsi
movq %rax, %rdi
call _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l@PLT
movl 0(%r13,%rbx,4), %esi
movq %rbp, %rdi
call _ZNSolsEi@PLT
movq %rax, %rdi
movl $1, %edx
leaq .LC6(%rip), %rsi
call _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l@PLT
addq $1, %rbx
cmpq $20, %rbx
jne .L11
movq 24(%rsp), %rbp
movq (%r12), %rax
movq -24(%rax), %rax
movq 240(%r12,%rax), %rbx
testq %rbx, %rbx
je .L18
cmpb $0, 56(%rbx)
je .L13
movzbl 67(%rbx), %esi
jmp .L14
.L9:
addq $40, %rsp
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %rbp
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r13
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3670:
.size _Z4showPA20_iS0_S0_, .-_Z4showPA20_iS0_S0_
.globl _Z32__device_stub__Z3addPA20_iS0_S0_PA20_iS0_S0_
.type _Z32__device_stub__Z3addPA20_iS0_S0_PA20_iS0_S0_, @function
_Z32__device_stub__Z3addPA20_iS0_S0_PA20_iS0_S0_:
.LFB3696:
.cfi_startproc
endbr64
subq $136, %rsp
.cfi_def_cfa_offset 144
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movq %rdx, 8(%rsp)
movq %fs:40, %rax
movq %rax, 120(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 8(%rsp), %rax
movq %rax, 112(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L23
.L19:
movq 120(%rsp), %rax
subq %fs:40, %rax
jne .L24
addq $136, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L23:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 152
pushq 40(%rsp)
.cfi_def_cfa_offset 160
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z3addPA20_iS0_S0_(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 144
jmp .L19
.L24:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE3696:
.size _Z32__device_stub__Z3addPA20_iS0_S0_PA20_iS0_S0_, .-_Z32__device_stub__Z3addPA20_iS0_S0_PA20_iS0_S0_
.globl _Z3addPA20_iS0_S0_
.type _Z3addPA20_iS0_S0_, @function
_Z3addPA20_iS0_S0_:
.LFB3697:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z32__device_stub__Z3addPA20_iS0_S0_PA20_iS0_S0_
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3697:
.size _Z3addPA20_iS0_S0_, .-_Z3addPA20_iS0_S0_
.globl main
.type main, @function
main:
.LFB3671:
.cfi_startproc
endbr64
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset 6, -16
pushq %rbx
.cfi_def_cfa_offset 24
.cfi_offset 3, -24
subq $4096, %rsp
.cfi_def_cfa_offset 4120
orq $0, (%rsp)
subq $776, %rsp
.cfi_def_cfa_offset 4896
movq %fs:40, %rax
movq %rax, 4856(%rsp)
xorl %eax, %eax
leaq 48(%rsp), %rbp
movq %rbp, %rdi
call _Z12random_initsPA20_i
leaq 1648(%rsp), %rbx
movq %rbx, %rdi
call _Z12random_initsPA20_i
movq %rsp, %rdi
movl $1600, %esi
call cudaMalloc@PLT
leaq 8(%rsp), %rdi
movl $1600, %esi
call cudaMalloc@PLT
leaq 16(%rsp), %rdi
movl $1600, %esi
call cudaMalloc@PLT
movl $1, %ecx
movl $1600, %edx
movq %rbp, %rsi
movq (%rsp), %rdi
call cudaMemcpy@PLT
movl $1, %ecx
movl $1600, %edx
movq %rbx, %rsi
movq 8(%rsp), %rdi
call cudaMemcpy@PLT
leaq 3248(%rsp), %rsi
movl $1, %ecx
movl $1600, %edx
movq 16(%rsp), %rdi
call cudaMemcpy@PLT
movl $20, 24(%rsp)
movl $20, 28(%rsp)
movl $1, 36(%rsp)
movl $1, 40(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 24(%rsp), %rdx
movl $1, %ecx
movq 36(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L31
.L28:
leaq 3248(%rsp), %rbx
movl $2, %ecx
movl $1600, %edx
movq 16(%rsp), %rsi
movq %rbx, %rdi
call cudaMemcpy@PLT
leaq 1648(%rsp), %rsi
leaq 48(%rsp), %rdi
movq %rbx, %rdx
call _Z4showPA20_iS0_S0_
movq (%rsp), %rdi
call cudaFree@PLT
movq 8(%rsp), %rdi
call cudaFree@PLT
movq 16(%rsp), %rdi
call cudaFree@PLT
leaq _ZSt4cout(%rip), %rdi
call _ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_@PLT
movq 4856(%rsp), %rax
subq %fs:40, %rax
jne .L32
movl $0, %eax
addq $4872, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 24
popq %rbx
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
ret
.L31:
.cfi_restore_state
movq 16(%rsp), %rdx
movq 8(%rsp), %rsi
movq (%rsp), %rdi
call _Z32__device_stub__Z3addPA20_iS0_S0_PA20_iS0_S0_
jmp .L28
.L32:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE3671:
.size main, .-main
.section .rodata.str1.1
.LC7:
.string "_Z3addPA20_iS0_S0_"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB3699:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC7(%rip), %rdx
movq %rdx, %rcx
leaq _Z3addPA20_iS0_S0_(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3699:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | #include <iostream>
#include <cuda_runtime.h>
using namespace std;
#define N 20
#define BLOCK_DIM 10
void random_inits(int a[N][N]){
for (int i = 0; i < N; i++){
for (int j = 0; j < N; j++){
a[i][j] = rand() % 10;
}
}
}
__global__ void add(int a[N][N], int b[N][N], int c[N][N]){
int i = threadIdx.x;
int j = threadIdx.y;
c[i][j] = a[i][j] + b[i][j];
}
void show(int a[N][N], int b[N][N], int c[N][N]){
for (int i = 0; i < N; i++){
for (int j = 0; j < N; j++){
cout << "matrix[" << i << "][" << j << "]" << " = " << a[i][j] << " + " << b[i][j] << "=" << c[i][j] <<"\t";
}
cout << endl;
}
}
int main(void){
int a[N][N];
int b[N][N];
int c[N][N];
random_inits(a);
random_inits(b);
int (*d_a)[N], (*d_b)[N], (*d_c)[N];
cudaMalloc((void**)&d_a, (N*N)*sizeof(int));
cudaMalloc((void**)&d_b, (N*N)*sizeof(int));
cudaMalloc((void**)&d_c, (N*N)*sizeof(int));
cudaMemcpy(d_a, a, (N*N)*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_b, b, (N*N)*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_c, c, (N*N)*sizeof(int), cudaMemcpyHostToDevice);
int numBlocks = 1;
dim3 threadsPerBlock(N,N);
add<<<numBlocks,threadsPerBlock>>>(d_a,d_b,d_c);
cudaMemcpy(c, d_c, (N*N)*sizeof(int), cudaMemcpyDeviceToHost);
show(a, b, c);
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
cout<< endl;
return 0;
} | #include <iostream>
#include <hip/hip_runtime.h>
using namespace std;
#define N 20
#define BLOCK_DIM 10
void random_inits(int a[N][N]){
for (int i = 0; i < N; i++){
for (int j = 0; j < N; j++){
a[i][j] = rand() % 10;
}
}
}
__global__ void add(int a[N][N], int b[N][N], int c[N][N]){
int i = threadIdx.x;
int j = threadIdx.y;
c[i][j] = a[i][j] + b[i][j];
}
void show(int a[N][N], int b[N][N], int c[N][N]){
for (int i = 0; i < N; i++){
for (int j = 0; j < N; j++){
cout << "matrix[" << i << "][" << j << "]" << " = " << a[i][j] << " + " << b[i][j] << "=" << c[i][j] <<"\t";
}
cout << endl;
}
}
int main(void){
int a[N][N];
int b[N][N];
int c[N][N];
random_inits(a);
random_inits(b);
int (*d_a)[N], (*d_b)[N], (*d_c)[N];
hipMalloc((void**)&d_a, (N*N)*sizeof(int));
hipMalloc((void**)&d_b, (N*N)*sizeof(int));
hipMalloc((void**)&d_c, (N*N)*sizeof(int));
hipMemcpy(d_a, a, (N*N)*sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(d_b, b, (N*N)*sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(d_c, c, (N*N)*sizeof(int), hipMemcpyHostToDevice);
int numBlocks = 1;
dim3 threadsPerBlock(N,N);
add<<<numBlocks,threadsPerBlock>>>(d_a,d_b,d_c);
hipMemcpy(c, d_c, (N*N)*sizeof(int), hipMemcpyDeviceToHost);
show(a, b, c);
hipFree(d_a);
hipFree(d_b);
hipFree(d_c);
cout<< endl;
return 0;
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | #include <iostream>
#include <hip/hip_runtime.h>
using namespace std;
#define N 20
#define BLOCK_DIM 10
void random_inits(int a[N][N]){
for (int i = 0; i < N; i++){
for (int j = 0; j < N; j++){
a[i][j] = rand() % 10;
}
}
}
__global__ void add(int a[N][N], int b[N][N], int c[N][N]){
int i = threadIdx.x;
int j = threadIdx.y;
c[i][j] = a[i][j] + b[i][j];
}
void show(int a[N][N], int b[N][N], int c[N][N]){
for (int i = 0; i < N; i++){
for (int j = 0; j < N; j++){
cout << "matrix[" << i << "][" << j << "]" << " = " << a[i][j] << " + " << b[i][j] << "=" << c[i][j] <<"\t";
}
cout << endl;
}
}
int main(void){
int a[N][N];
int b[N][N];
int c[N][N];
random_inits(a);
random_inits(b);
int (*d_a)[N], (*d_b)[N], (*d_c)[N];
hipMalloc((void**)&d_a, (N*N)*sizeof(int));
hipMalloc((void**)&d_b, (N*N)*sizeof(int));
hipMalloc((void**)&d_c, (N*N)*sizeof(int));
hipMemcpy(d_a, a, (N*N)*sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(d_b, b, (N*N)*sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(d_c, c, (N*N)*sizeof(int), hipMemcpyHostToDevice);
int numBlocks = 1;
dim3 threadsPerBlock(N,N);
add<<<numBlocks,threadsPerBlock>>>(d_a,d_b,d_c);
hipMemcpy(c, d_c, (N*N)*sizeof(int), hipMemcpyDeviceToHost);
show(a, b, c);
hipFree(d_a);
hipFree(d_b);
hipFree(d_c);
cout<< endl;
return 0;
} | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z3addPA20_iS0_S0_
.globl _Z3addPA20_iS0_S0_
.p2align 8
.type _Z3addPA20_iS0_S0_,@function
_Z3addPA20_iS0_S0_:
s_load_b128 s[4:7], s[0:1], 0x0
v_and_b32_e32 v1, 0x3ff, v0
v_lshrrev_b32_e32 v0, 8, v0
s_load_b64 s[0:1], s[0:1], 0x10
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_3)
v_mul_u32_u24_e32 v4, 0x50, v1
v_mul_hi_u32_u24_e32 v5, 0x50, v1
v_and_b32_e32 v6, 0xffc, v0
s_waitcnt lgkmcnt(0)
s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
v_add_co_u32 v0, vcc_lo, s4, v4
v_add_co_ci_u32_e32 v1, vcc_lo, s5, v5, vcc_lo
v_add_co_u32 v2, vcc_lo, s6, v4
v_add_co_ci_u32_e32 v3, vcc_lo, s7, v5, vcc_lo
s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
v_add_co_u32 v0, vcc_lo, v0, v6
v_add_co_ci_u32_e32 v1, vcc_lo, 0, v1, vcc_lo
s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
v_add_co_u32 v2, vcc_lo, v2, v6
v_add_co_ci_u32_e32 v3, vcc_lo, 0, v3, vcc_lo
global_load_b32 v0, v[0:1], off
global_load_b32 v1, v[2:3], off
v_add_co_u32 v2, vcc_lo, s0, v4
v_add_co_ci_u32_e32 v3, vcc_lo, s1, v5, vcc_lo
s_waitcnt vmcnt(0)
v_add_nc_u32_e32 v4, v1, v0
s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
v_add_co_u32 v0, vcc_lo, v2, v6
v_add_co_ci_u32_e32 v1, vcc_lo, 0, v3, vcc_lo
global_store_b32 v[0:1], v4, off
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z3addPA20_iS0_S0_
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 24
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 1
.amdhsa_next_free_vgpr 7
.amdhsa_next_free_sgpr 8
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z3addPA20_iS0_S0_, .Lfunc_end0-_Z3addPA20_iS0_S0_
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 24
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z3addPA20_iS0_S0_
.private_segment_fixed_size: 0
.sgpr_count: 10
.sgpr_spill_count: 0
.symbol: _Z3addPA20_iS0_S0_.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 7
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | #include <iostream>
#include <hip/hip_runtime.h>
using namespace std;
#define N 20
#define BLOCK_DIM 10
void random_inits(int a[N][N]){
for (int i = 0; i < N; i++){
for (int j = 0; j < N; j++){
a[i][j] = rand() % 10;
}
}
}
__global__ void add(int a[N][N], int b[N][N], int c[N][N]){
int i = threadIdx.x;
int j = threadIdx.y;
c[i][j] = a[i][j] + b[i][j];
}
void show(int a[N][N], int b[N][N], int c[N][N]){
for (int i = 0; i < N; i++){
for (int j = 0; j < N; j++){
cout << "matrix[" << i << "][" << j << "]" << " = " << a[i][j] << " + " << b[i][j] << "=" << c[i][j] <<"\t";
}
cout << endl;
}
}
int main(void){
int a[N][N];
int b[N][N];
int c[N][N];
random_inits(a);
random_inits(b);
int (*d_a)[N], (*d_b)[N], (*d_c)[N];
hipMalloc((void**)&d_a, (N*N)*sizeof(int));
hipMalloc((void**)&d_b, (N*N)*sizeof(int));
hipMalloc((void**)&d_c, (N*N)*sizeof(int));
hipMemcpy(d_a, a, (N*N)*sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(d_b, b, (N*N)*sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(d_c, c, (N*N)*sizeof(int), hipMemcpyHostToDevice);
int numBlocks = 1;
dim3 threadsPerBlock(N,N);
add<<<numBlocks,threadsPerBlock>>>(d_a,d_b,d_c);
hipMemcpy(c, d_c, (N*N)*sizeof(int), hipMemcpyDeviceToHost);
show(a, b, c);
hipFree(d_a);
hipFree(d_b);
hipFree(d_c);
cout<< endl;
return 0;
} | .text
.file "add-2dArrays.hip"
# Start of file scope inline assembly
.globl _ZSt21ios_base_library_initv
# End of file scope inline assembly
.globl _Z12random_initsPA20_i # -- Begin function _Z12random_initsPA20_i
.p2align 4, 0x90
.type _Z12random_initsPA20_i,@function
_Z12random_initsPA20_i: # @_Z12random_initsPA20_i
.cfi_startproc
# %bb.0:
pushq %r15
.cfi_def_cfa_offset 16
pushq %r14
.cfi_def_cfa_offset 24
pushq %rbx
.cfi_def_cfa_offset 32
.cfi_offset %rbx, -32
.cfi_offset %r14, -24
.cfi_offset %r15, -16
movq %rdi, %rbx
xorl %r14d, %r14d
.p2align 4, 0x90
.LBB0_1: # %.preheader
# =>This Loop Header: Depth=1
# Child Loop BB0_2 Depth 2
xorl %r15d, %r15d
.p2align 4, 0x90
.LBB0_2: # Parent Loop BB0_1 Depth=1
# => This Inner Loop Header: Depth=2
callq rand
cltq
imulq $1717986919, %rax, %rcx # imm = 0x66666667
movq %rcx, %rdx
shrq $63, %rdx
sarq $34, %rcx
addl %edx, %ecx
addl %ecx, %ecx
leal (%rcx,%rcx,4), %ecx
subl %ecx, %eax
movl %eax, (%rbx,%r15,4)
incq %r15
cmpq $20, %r15
jne .LBB0_2
# %bb.3: # in Loop: Header=BB0_1 Depth=1
incq %r14
addq $80, %rbx
cmpq $20, %r14
jne .LBB0_1
# %bb.4:
popq %rbx
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
retq
.Lfunc_end0:
.size _Z12random_initsPA20_i, .Lfunc_end0-_Z12random_initsPA20_i
.cfi_endproc
# -- End function
.globl _Z18__device_stub__addPA20_iS0_S0_ # -- Begin function _Z18__device_stub__addPA20_iS0_S0_
.p2align 4, 0x90
.type _Z18__device_stub__addPA20_iS0_S0_,@function
_Z18__device_stub__addPA20_iS0_S0_: # @_Z18__device_stub__addPA20_iS0_S0_
.cfi_startproc
# %bb.0:
subq $104, %rsp
.cfi_def_cfa_offset 112
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movq %rdx, 56(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 56(%rsp), %rax
movq %rax, 96(%rsp)
leaq 40(%rsp), %rdi
leaq 24(%rsp), %rsi
leaq 16(%rsp), %rdx
leaq 8(%rsp), %rcx
callq __hipPopCallConfiguration
movq 40(%rsp), %rsi
movl 48(%rsp), %edx
movq 24(%rsp), %rcx
movl 32(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z3addPA20_iS0_S0_, %edi
pushq 8(%rsp)
.cfi_adjust_cfa_offset 8
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $120, %rsp
.cfi_adjust_cfa_offset -120
retq
.Lfunc_end1:
.size _Z18__device_stub__addPA20_iS0_S0_, .Lfunc_end1-_Z18__device_stub__addPA20_iS0_S0_
.cfi_endproc
# -- End function
.globl _Z4showPA20_iS0_S0_ # -- Begin function _Z4showPA20_iS0_S0_
.p2align 4, 0x90
.type _Z4showPA20_iS0_S0_,@function
_Z4showPA20_iS0_S0_: # @_Z4showPA20_iS0_S0_
.cfi_startproc
# %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
pushq %r15
.cfi_def_cfa_offset 24
pushq %r14
.cfi_def_cfa_offset 32
pushq %r13
.cfi_def_cfa_offset 40
pushq %r12
.cfi_def_cfa_offset 48
pushq %rbx
.cfi_def_cfa_offset 56
pushq %rax
.cfi_def_cfa_offset 64
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
.cfi_offset %rbp, -16
movq %rdx, %rbx
movq %rsi, %r14
movq %rdi, %r15
xorl %r12d, %r12d
jmp .LBB2_1
.p2align 4, 0x90
.LBB2_6: # in Loop: Header=BB2_1 Depth=1
movq %r13, %rdi
callq _ZNKSt5ctypeIcE13_M_widen_initEv
movq (%r13), %rax
movq %r13, %rdi
movl $10, %esi
callq *48(%rax)
.LBB2_7: # %_ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_.exit
# in Loop: Header=BB2_1 Depth=1
movsbl %al, %esi
movl $_ZSt4cout, %edi
callq _ZNSo3putEc
movq %rax, %rdi
callq _ZNSo5flushEv
incq %r12
addq $80, %rbx
addq $80, %r14
addq $80, %r15
cmpq $20, %r12
je .LBB2_8
.LBB2_1: # %.preheader
# =>This Loop Header: Depth=1
# Child Loop BB2_2 Depth 2
xorl %r13d, %r13d
.p2align 4, 0x90
.LBB2_2: # Parent Loop BB2_1 Depth=1
# => This Inner Loop Header: Depth=2
movl $_ZSt4cout, %edi
movl $.L.str, %esi
movl $7, %edx
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
movl $_ZSt4cout, %edi
movl %r12d, %esi
callq _ZNSolsEi
movq %rax, %rbp
movl $.L.str.1, %esi
movl $2, %edx
movq %rax, %rdi
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
movq %rbp, %rdi
movl %r13d, %esi
callq _ZNSolsEi
movq %rax, %rbp
movl $.L.str.2, %esi
movl $1, %edx
movq %rax, %rdi
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
movl $.L.str.3, %esi
movl $3, %edx
movq %rbp, %rdi
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
movl (%r15,%r13,4), %esi
movq %rbp, %rdi
callq _ZNSolsEi
movq %rax, %rbp
movl $.L.str.4, %esi
movl $3, %edx
movq %rax, %rdi
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
movl (%r14,%r13,4), %esi
movq %rbp, %rdi
callq _ZNSolsEi
movq %rax, %rbp
movl $.L.str.5, %esi
movl $1, %edx
movq %rax, %rdi
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
movl (%rbx,%r13,4), %esi
movq %rbp, %rdi
callq _ZNSolsEi
movl $.L.str.6, %esi
movl $1, %edx
movq %rax, %rdi
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
incq %r13
cmpq $20, %r13
jne .LBB2_2
# %bb.3: # in Loop: Header=BB2_1 Depth=1
movq _ZSt4cout(%rip), %rax
movq -24(%rax), %rax
movq _ZSt4cout+240(%rax), %r13
testq %r13, %r13
je .LBB2_9
# %bb.4: # %_ZSt13__check_facetISt5ctypeIcEERKT_PS3_.exit.i.i
# in Loop: Header=BB2_1 Depth=1
cmpb $0, 56(%r13)
je .LBB2_6
# %bb.5: # in Loop: Header=BB2_1 Depth=1
movzbl 67(%r13), %eax
jmp .LBB2_7
.LBB2_8:
addq $8, %rsp
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %r12
.cfi_def_cfa_offset 40
popq %r13
.cfi_def_cfa_offset 32
popq %r14
.cfi_def_cfa_offset 24
popq %r15
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
retq
.LBB2_9:
.cfi_def_cfa_offset 64
callq _ZSt16__throw_bad_castv
.Lfunc_end2:
.size _Z4showPA20_iS0_S0_, .Lfunc_end2-_Z4showPA20_iS0_S0_
.cfi_endproc
# -- End function
.globl main # -- Begin function main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
pushq %r15
.cfi_def_cfa_offset 16
pushq %r14
.cfi_def_cfa_offset 24
pushq %rbx
.cfi_def_cfa_offset 32
subq $4928, %rsp # imm = 0x1340
.cfi_def_cfa_offset 4960
.cfi_offset %rbx, -32
.cfi_offset %r14, -24
.cfi_offset %r15, -16
leaq 1728(%rsp), %rbx
xorl %r14d, %r14d
.p2align 4, 0x90
.LBB3_1: # %.preheader.i
# =>This Loop Header: Depth=1
# Child Loop BB3_2 Depth 2
xorl %r15d, %r15d
.p2align 4, 0x90
.LBB3_2: # Parent Loop BB3_1 Depth=1
# => This Inner Loop Header: Depth=2
callq rand
cltq
imulq $1717986919, %rax, %rcx # imm = 0x66666667
movq %rcx, %rdx
shrq $63, %rdx
sarq $34, %rcx
addl %edx, %ecx
addl %ecx, %ecx
leal (%rcx,%rcx,4), %ecx
subl %ecx, %eax
movl %eax, (%rbx,%r15,4)
incq %r15
cmpq $20, %r15
jne .LBB3_2
# %bb.3: # in Loop: Header=BB3_1 Depth=1
incq %r14
addq $80, %rbx
cmpq $20, %r14
jne .LBB3_1
# %bb.4: # %.preheader.i5.preheader
leaq 128(%rsp), %rbx
xorl %r14d, %r14d
.p2align 4, 0x90
.LBB3_5: # %.preheader.i5
# =>This Loop Header: Depth=1
# Child Loop BB3_6 Depth 2
xorl %r15d, %r15d
.p2align 4, 0x90
.LBB3_6: # Parent Loop BB3_5 Depth=1
# => This Inner Loop Header: Depth=2
callq rand
cltq
imulq $1717986919, %rax, %rcx # imm = 0x66666667
movq %rcx, %rdx
shrq $63, %rdx
sarq $34, %rcx
addl %edx, %ecx
addl %ecx, %ecx
leal (%rcx,%rcx,4), %ecx
subl %ecx, %eax
movl %eax, (%rbx,%r15,4)
incq %r15
cmpq $20, %r15
jne .LBB3_6
# %bb.7: # in Loop: Header=BB3_5 Depth=1
incq %r14
addq $80, %rbx
cmpq $20, %r14
jne .LBB3_5
# %bb.8: # %_Z12random_initsPA20_i.exit12
leaq 16(%rsp), %rdi
movl $1600, %esi # imm = 0x640
callq hipMalloc
leaq 8(%rsp), %rdi
movl $1600, %esi # imm = 0x640
callq hipMalloc
movq %rsp, %rdi
movl $1600, %esi # imm = 0x640
callq hipMalloc
movq 16(%rsp), %rdi
leaq 1728(%rsp), %rsi
movl $1600, %edx # imm = 0x640
movl $1, %ecx
callq hipMemcpy
movq 8(%rsp), %rdi
leaq 128(%rsp), %rsi
movl $1600, %edx # imm = 0x640
movl $1, %ecx
callq hipMemcpy
movq (%rsp), %rdi
leaq 3328(%rsp), %rsi
movl $1600, %edx # imm = 0x640
movl $1, %ecx
callq hipMemcpy
movabsq $4294967297, %rdi # imm = 0x100000001
movabsq $85899345940, %rdx # imm = 0x1400000014
movl $1, %esi
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB3_10
# %bb.9:
movq 16(%rsp), %rax
movq 8(%rsp), %rcx
movq (%rsp), %rdx
movq %rax, 88(%rsp)
movq %rcx, 80(%rsp)
movq %rdx, 72(%rsp)
leaq 88(%rsp), %rax
movq %rax, 96(%rsp)
leaq 80(%rsp), %rax
movq %rax, 104(%rsp)
leaq 72(%rsp), %rax
movq %rax, 112(%rsp)
leaq 56(%rsp), %rdi
leaq 40(%rsp), %rsi
leaq 32(%rsp), %rdx
leaq 24(%rsp), %rcx
callq __hipPopCallConfiguration
movq 56(%rsp), %rsi
movl 64(%rsp), %edx
movq 40(%rsp), %rcx
movl 48(%rsp), %r8d
leaq 96(%rsp), %r9
movl $_Z3addPA20_iS0_S0_, %edi
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
pushq 40(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB3_10:
movq (%rsp), %rsi
leaq 3328(%rsp), %rbx
movl $1600, %edx # imm = 0x640
movq %rbx, %rdi
movl $2, %ecx
callq hipMemcpy
leaq 1728(%rsp), %rdi
leaq 128(%rsp), %rsi
movq %rbx, %rdx
callq _Z4showPA20_iS0_S0_
movq 16(%rsp), %rdi
callq hipFree
movq 8(%rsp), %rdi
callq hipFree
movq (%rsp), %rdi
callq hipFree
movq _ZSt4cout(%rip), %rax
movq -24(%rax), %rax
movq _ZSt4cout+240(%rax), %rbx
testq %rbx, %rbx
je .LBB3_15
# %bb.11: # %_ZSt13__check_facetISt5ctypeIcEERKT_PS3_.exit.i.i
cmpb $0, 56(%rbx)
je .LBB3_13
# %bb.12:
movzbl 67(%rbx), %eax
jmp .LBB3_14
.LBB3_13:
movq %rbx, %rdi
callq _ZNKSt5ctypeIcE13_M_widen_initEv
movq (%rbx), %rax
movq %rbx, %rdi
movl $10, %esi
callq *48(%rax)
.LBB3_14: # %_ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_.exit
movsbl %al, %esi
movl $_ZSt4cout, %edi
callq _ZNSo3putEc
movq %rax, %rdi
callq _ZNSo5flushEv
xorl %eax, %eax
addq $4928, %rsp # imm = 0x1340
.cfi_def_cfa_offset 32
popq %rbx
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
retq
.LBB3_15:
.cfi_def_cfa_offset 4960
callq _ZSt16__throw_bad_castv
.Lfunc_end3:
.size main, .Lfunc_end3-main
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB4_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB4_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z3addPA20_iS0_S0_, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end4:
.size __hip_module_ctor, .Lfunc_end4-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB5_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB5_2:
retq
.Lfunc_end5:
.size __hip_module_dtor, .Lfunc_end5-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z3addPA20_iS0_S0_,@object # @_Z3addPA20_iS0_S0_
.section .rodata,"a",@progbits
.globl _Z3addPA20_iS0_S0_
.p2align 3, 0x0
_Z3addPA20_iS0_S0_:
.quad _Z18__device_stub__addPA20_iS0_S0_
.size _Z3addPA20_iS0_S0_, 8
.type .L.str,@object # @.str
.section .rodata.str1.1,"aMS",@progbits,1
.L.str:
.asciz "matrix["
.size .L.str, 8
.type .L.str.1,@object # @.str.1
.L.str.1:
.asciz "]["
.size .L.str.1, 3
.type .L.str.2,@object # @.str.2
.L.str.2:
.asciz "]"
.size .L.str.2, 2
.type .L.str.3,@object # @.str.3
.L.str.3:
.asciz " = "
.size .L.str.3, 4
.type .L.str.4,@object # @.str.4
.L.str.4:
.asciz " + "
.size .L.str.4, 4
.type .L.str.5,@object # @.str.5
.L.str.5:
.asciz "="
.size .L.str.5, 2
.type .L.str.6,@object # @.str.6
.L.str.6:
.asciz "\t"
.size .L.str.6, 2
.type .L__unnamed_1,@object # @0
.L__unnamed_1:
.asciz "_Z3addPA20_iS0_S0_"
.size .L__unnamed_1, 19
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z18__device_stub__addPA20_iS0_S0_
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z3addPA20_iS0_S0_
.addrsig_sym _ZSt4cout
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly. | code for sm_80
Function : _Z3addPA20_iS0_S0_
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ S2R R6, SR_TID.X ; /* 0x0000000000067919 */
/* 0x000e220000002100 */
/*0020*/ HFMA2.MMA R7, -RZ, RZ, 0, 4.76837158203125e-06 ; /* 0x00000050ff077435 */
/* 0x000fe200000001ff */
/*0030*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fe40000000a00 */
/*0040*/ S2R R9, SR_TID.Y ; /* 0x0000000000097919 */
/* 0x000e6e0000002200 */
/*0050*/ IMAD.WIDE R2, R6, R7, c[0x0][0x160] ; /* 0x0000580006027625 */
/* 0x001fc800078e0207 */
/*0060*/ IMAD.WIDE R4, R6, R7, c[0x0][0x168] ; /* 0x00005a0006047625 */
/* 0x000fc800078e0207 */
/*0070*/ IMAD.WIDE R2, R9, 0x4, R2 ; /* 0x0000000409027825 */
/* 0x002fc800078e0202 */
/*0080*/ IMAD.WIDE R4, R9, 0x4, R4 ; /* 0x0000000409047825 */
/* 0x000fe400078e0204 */
/*0090*/ LDG.E R2, [R2.64] ; /* 0x0000000402027981 */
/* 0x000ea8000c1e1900 */
/*00a0*/ LDG.E R5, [R4.64] ; /* 0x0000000404057981 */
/* 0x000ea2000c1e1900 */
/*00b0*/ IMAD.WIDE R6, R6, R7, c[0x0][0x170] ; /* 0x00005c0006067625 */
/* 0x000fcc00078e0207 */
/*00c0*/ IMAD.WIDE R6, R9, 0x4, R6 ; /* 0x0000000409067825 */
/* 0x000fe200078e0206 */
/*00d0*/ IADD3 R9, R2, R5, RZ ; /* 0x0000000502097210 */
/* 0x004fca0007ffe0ff */
/*00e0*/ STG.E [R6.64], R9 ; /* 0x0000000906007986 */
/* 0x000fe2000c101904 */
/*00f0*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0100*/ BRA 0x100; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0110*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0120*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0130*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0140*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0150*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0160*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0170*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0180*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0190*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z3addPA20_iS0_S0_
.globl _Z3addPA20_iS0_S0_
.p2align 8
.type _Z3addPA20_iS0_S0_,@function
_Z3addPA20_iS0_S0_:
s_load_b128 s[4:7], s[0:1], 0x0
v_and_b32_e32 v1, 0x3ff, v0
v_lshrrev_b32_e32 v0, 8, v0
s_load_b64 s[0:1], s[0:1], 0x10
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_3)
v_mul_u32_u24_e32 v4, 0x50, v1
v_mul_hi_u32_u24_e32 v5, 0x50, v1
v_and_b32_e32 v6, 0xffc, v0
s_waitcnt lgkmcnt(0)
s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
v_add_co_u32 v0, vcc_lo, s4, v4
v_add_co_ci_u32_e32 v1, vcc_lo, s5, v5, vcc_lo
v_add_co_u32 v2, vcc_lo, s6, v4
v_add_co_ci_u32_e32 v3, vcc_lo, s7, v5, vcc_lo
s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
v_add_co_u32 v0, vcc_lo, v0, v6
v_add_co_ci_u32_e32 v1, vcc_lo, 0, v1, vcc_lo
s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
v_add_co_u32 v2, vcc_lo, v2, v6
v_add_co_ci_u32_e32 v3, vcc_lo, 0, v3, vcc_lo
global_load_b32 v0, v[0:1], off
global_load_b32 v1, v[2:3], off
v_add_co_u32 v2, vcc_lo, s0, v4
v_add_co_ci_u32_e32 v3, vcc_lo, s1, v5, vcc_lo
s_waitcnt vmcnt(0)
v_add_nc_u32_e32 v4, v1, v0
s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
v_add_co_u32 v0, vcc_lo, v2, v6
v_add_co_ci_u32_e32 v1, vcc_lo, 0, v3, vcc_lo
global_store_b32 v[0:1], v4, off
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z3addPA20_iS0_S0_
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 24
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 1
.amdhsa_next_free_vgpr 7
.amdhsa_next_free_sgpr 8
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z3addPA20_iS0_S0_, .Lfunc_end0-_Z3addPA20_iS0_S0_
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 24
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z3addPA20_iS0_S0_
.private_segment_fixed_size: 0
.sgpr_count: 10
.sgpr_spill_count: 0
.symbol: _Z3addPA20_iS0_S0_.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 7
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.