repo_id
stringlengths 5
115
| size
int64 590
5.01M
| file_path
stringlengths 4
212
| content
stringlengths 590
5.01M
|
|---|---|---|---|
al3xtjames/Clover
| 45,313
|
Library/OpensslLib/openssl-1.0.1e/crypto/bn/asm/ia64.S
|
.explicit
.text
.ident "ia64.S, Version 2.1"
.ident "IA-64 ISA artwork by Andy Polyakov <appro@fy.chalmers.se>"
//
// ====================================================================
// Written by Andy Polyakov <appro@fy.chalmers.se> for the OpenSSL
// project.
//
// Rights for redistribution and usage in source and binary forms are
// granted according to the OpenSSL license. Warranty of any kind is
// disclaimed.
// ====================================================================
//
// Version 2.x is Itanium2 re-tune. Few words about how Itanum2 is
// different from Itanium to this module viewpoint. Most notably, is it
// "wider" than Itanium? Can you experience loop scalability as
// discussed in commentary sections? Not really:-( Itanium2 has 6
// integer ALU ports, i.e. it's 2 ports wider, but it's not enough to
// spin twice as fast, as I need 8 IALU ports. Amount of floating point
// ports is the same, i.e. 2, while I need 4. In other words, to this
// module Itanium2 remains effectively as "wide" as Itanium. Yet it's
// essentially different in respect to this module, and a re-tune was
// required. Well, because some intruction latencies has changed. Most
// noticeably those intensively used:
//
// Itanium Itanium2
// ldf8 9 6 L2 hit
// ld8 2 1 L1 hit
// getf 2 5
// xma[->getf] 7[+1] 4[+0]
// add[->st8] 1[+1] 1[+0]
//
// What does it mean? You might ratiocinate that the original code
// should run just faster... Because sum of latencies is smaller...
// Wrong! Note that getf latency increased. This means that if a loop is
// scheduled for lower latency (as they were), then it will suffer from
// stall condition and the code will therefore turn anti-scalable, e.g.
// original bn_mul_words spun at 5*n or 2.5 times slower than expected
// on Itanium2! What to do? Reschedule loops for Itanium2? But then
// Itanium would exhibit anti-scalability. So I've chosen to reschedule
// for worst latency for every instruction aiming for best *all-round*
// performance.
// Q. How much faster does it get?
// A. Here is the output from 'openssl speed rsa dsa' for vanilla
// 0.9.6a compiled with gcc version 2.96 20000731 (Red Hat
// Linux 7.1 2.96-81):
//
// sign verify sign/s verify/s
// rsa 512 bits 0.0036s 0.0003s 275.3 2999.2
// rsa 1024 bits 0.0203s 0.0011s 49.3 894.1
// rsa 2048 bits 0.1331s 0.0040s 7.5 250.9
// rsa 4096 bits 0.9270s 0.0147s 1.1 68.1
// sign verify sign/s verify/s
// dsa 512 bits 0.0035s 0.0043s 288.3 234.8
// dsa 1024 bits 0.0111s 0.0135s 90.0 74.2
//
// And here is similar output but for this assembler
// implementation:-)
//
// sign verify sign/s verify/s
// rsa 512 bits 0.0021s 0.0001s 549.4 9638.5
// rsa 1024 bits 0.0055s 0.0002s 183.8 4481.1
// rsa 2048 bits 0.0244s 0.0006s 41.4 1726.3
// rsa 4096 bits 0.1295s 0.0018s 7.7 561.5
// sign verify sign/s verify/s
// dsa 512 bits 0.0012s 0.0013s 891.9 756.6
// dsa 1024 bits 0.0023s 0.0028s 440.4 376.2
//
// Yes, you may argue that it's not fair comparison as it's
// possible to craft the C implementation with BN_UMULT_HIGH
// inline assembler macro. But of course! Here is the output
// with the macro:
//
// sign verify sign/s verify/s
// rsa 512 bits 0.0020s 0.0002s 495.0 6561.0
// rsa 1024 bits 0.0086s 0.0004s 116.2 2235.7
// rsa 2048 bits 0.0519s 0.0015s 19.3 667.3
// rsa 4096 bits 0.3464s 0.0053s 2.9 187.7
// sign verify sign/s verify/s
// dsa 512 bits 0.0016s 0.0020s 613.1 510.5
// dsa 1024 bits 0.0045s 0.0054s 221.0 183.9
//
// My code is still way faster, huh:-) And I believe that even
// higher performance can be achieved. Note that as keys get
// longer, performance gain is larger. Why? According to the
// profiler there is another player in the field, namely
// BN_from_montgomery consuming larger and larger portion of CPU
// time as keysize decreases. I therefore consider putting effort
// to assembler implementation of the following routine:
//
// void bn_mul_add_mont (BN_ULONG *rp,BN_ULONG *np,int nl,BN_ULONG n0)
// {
// int i,j;
// BN_ULONG v;
//
// for (i=0; i<nl; i++)
// {
// v=bn_mul_add_words(rp,np,nl,(rp[0]*n0)&BN_MASK2);
// nrp++;
// rp++;
// if (((nrp[-1]+=v)&BN_MASK2) < v)
// for (j=0; ((++nrp[j])&BN_MASK2) == 0; j++) ;
// }
// }
//
// It might as well be beneficial to implement even combaX
// variants, as it appears as it can literally unleash the
// performance (see comment section to bn_mul_comba8 below).
//
// And finally for your reference the output for 0.9.6a compiled
// with SGIcc version 0.01.0-12 (keep in mind that for the moment
// of this writing it's not possible to convince SGIcc to use
// BN_UMULT_HIGH inline assembler macro, yet the code is fast,
// i.e. for a compiler generated one:-):
//
// sign verify sign/s verify/s
// rsa 512 bits 0.0022s 0.0002s 452.7 5894.3
// rsa 1024 bits 0.0097s 0.0005s 102.7 2002.9
// rsa 2048 bits 0.0578s 0.0017s 17.3 600.2
// rsa 4096 bits 0.3838s 0.0061s 2.6 164.5
// sign verify sign/s verify/s
// dsa 512 bits 0.0018s 0.0022s 547.3 459.6
// dsa 1024 bits 0.0051s 0.0062s 196.6 161.3
//
// Oh! Benchmarks were performed on 733MHz Lion-class Itanium
// system running Redhat Linux 7.1 (very special thanks to Ray
// McCaffity of Williams Communications for providing an account).
//
// Q. What's the heck with 'rum 1<<5' at the end of every function?
// A. Well, by clearing the "upper FP registers written" bit of the
// User Mask I want to excuse the kernel from preserving upper
// (f32-f128) FP register bank over process context switch, thus
// minimizing bus bandwidth consumption during the switch (i.e.
// after PKI opration completes and the program is off doing
// something else like bulk symmetric encryption). Having said
// this, I also want to point out that it might be good idea
// to compile the whole toolkit (as well as majority of the
// programs for that matter) with -mfixed-range=f32-f127 command
// line option. No, it doesn't prevent the compiler from writing
// to upper bank, but at least discourages to do so. If you don't
// like the idea you have the option to compile the module with
// -Drum=nop.m in command line.
//
#if defined(_HPUX_SOURCE) && !defined(_LP64)
#define ADDP addp4
#else
#define ADDP add
#endif
#if 1
//
// bn_[add|sub]_words routines.
//
// Loops are spinning in 2*(n+5) ticks on Itanuim (provided that the
// data reside in L1 cache, i.e. 2 ticks away). It's possible to
// compress the epilogue and get down to 2*n+6, but at the cost of
// scalability (the neat feature of this implementation is that it
// shall automagically spin in n+5 on "wider" IA-64 implementations:-)
// I consider that the epilogue is short enough as it is to trade tiny
// performance loss on Itanium for scalability.
//
// BN_ULONG bn_add_words(BN_ULONG *rp, BN_ULONG *ap, BN_ULONG *bp,int num)
//
.global bn_add_words#
.proc bn_add_words#
.align 64
.skip 32 // makes the loop body aligned at 64-byte boundary
bn_add_words:
.prologue
.save ar.pfs,r2
{ .mii; alloc r2=ar.pfs,4,12,0,16
cmp4.le p6,p0=r35,r0 };;
{ .mfb; mov r8=r0 // return value
(p6) br.ret.spnt.many b0 };;
{ .mib; sub r10=r35,r0,1
.save ar.lc,r3
mov r3=ar.lc
brp.loop.imp .L_bn_add_words_ctop,.L_bn_add_words_cend-16
}
{ .mib; ADDP r14=0,r32 // rp
.save pr,r9
mov r9=pr };;
.body
{ .mii; ADDP r15=0,r33 // ap
mov ar.lc=r10
mov ar.ec=6 }
{ .mib; ADDP r16=0,r34 // bp
mov pr.rot=1<<16 };;
.L_bn_add_words_ctop:
{ .mii; (p16) ld8 r32=[r16],8 // b=*(bp++)
(p18) add r39=r37,r34
(p19) cmp.ltu.unc p56,p0=r40,r38 }
{ .mfb; (p0) nop.m 0x0
(p0) nop.f 0x0
(p0) nop.b 0x0 }
{ .mii; (p16) ld8 r35=[r15],8 // a=*(ap++)
(p58) cmp.eq.or p57,p0=-1,r41 // (p20)
(p58) add r41=1,r41 } // (p20)
{ .mfb; (p21) st8 [r14]=r42,8 // *(rp++)=r
(p0) nop.f 0x0
br.ctop.sptk .L_bn_add_words_ctop };;
.L_bn_add_words_cend:
{ .mii;
(p59) add r8=1,r8 // return value
mov pr=r9,0x1ffff
mov ar.lc=r3 }
{ .mbb; nop.b 0x0
br.ret.sptk.many b0 };;
.endp bn_add_words#
//
// BN_ULONG bn_sub_words(BN_ULONG *rp, BN_ULONG *ap, BN_ULONG *bp,int num)
//
.global bn_sub_words#
.proc bn_sub_words#
.align 64
.skip 32 // makes the loop body aligned at 64-byte boundary
bn_sub_words:
.prologue
.save ar.pfs,r2
{ .mii; alloc r2=ar.pfs,4,12,0,16
cmp4.le p6,p0=r35,r0 };;
{ .mfb; mov r8=r0 // return value
(p6) br.ret.spnt.many b0 };;
{ .mib; sub r10=r35,r0,1
.save ar.lc,r3
mov r3=ar.lc
brp.loop.imp .L_bn_sub_words_ctop,.L_bn_sub_words_cend-16
}
{ .mib; ADDP r14=0,r32 // rp
.save pr,r9
mov r9=pr };;
.body
{ .mii; ADDP r15=0,r33 // ap
mov ar.lc=r10
mov ar.ec=6 }
{ .mib; ADDP r16=0,r34 // bp
mov pr.rot=1<<16 };;
.L_bn_sub_words_ctop:
{ .mii; (p16) ld8 r32=[r16],8 // b=*(bp++)
(p18) sub r39=r37,r34
(p19) cmp.gtu.unc p56,p0=r40,r38 }
{ .mfb; (p0) nop.m 0x0
(p0) nop.f 0x0
(p0) nop.b 0x0 }
{ .mii; (p16) ld8 r35=[r15],8 // a=*(ap++)
(p58) cmp.eq.or p57,p0=0,r41 // (p20)
(p58) add r41=-1,r41 } // (p20)
{ .mbb; (p21) st8 [r14]=r42,8 // *(rp++)=r
(p0) nop.b 0x0
br.ctop.sptk .L_bn_sub_words_ctop };;
.L_bn_sub_words_cend:
{ .mii;
(p59) add r8=1,r8 // return value
mov pr=r9,0x1ffff
mov ar.lc=r3 }
{ .mbb; nop.b 0x0
br.ret.sptk.many b0 };;
.endp bn_sub_words#
#endif
#if 0
#define XMA_TEMPTATION
#endif
#if 1
//
// BN_ULONG bn_mul_words(BN_ULONG *rp, BN_ULONG *ap, int num, BN_ULONG w)
//
.global bn_mul_words#
.proc bn_mul_words#
.align 64
.skip 32 // makes the loop body aligned at 64-byte boundary
bn_mul_words:
.prologue
.save ar.pfs,r2
#ifdef XMA_TEMPTATION
{ .mfi; alloc r2=ar.pfs,4,0,0,0 };;
#else
{ .mfi; alloc r2=ar.pfs,4,12,0,16 };;
#endif
{ .mib; mov r8=r0 // return value
cmp4.le p6,p0=r34,r0
(p6) br.ret.spnt.many b0 };;
{ .mii; sub r10=r34,r0,1
.save ar.lc,r3
mov r3=ar.lc
.save pr,r9
mov r9=pr };;
.body
{ .mib; setf.sig f8=r35 // w
mov pr.rot=0x800001<<16
// ------^----- serves as (p50) at first (p27)
brp.loop.imp .L_bn_mul_words_ctop,.L_bn_mul_words_cend-16
}
#ifndef XMA_TEMPTATION
{ .mmi; ADDP r14=0,r32 // rp
ADDP r15=0,r33 // ap
mov ar.lc=r10 }
{ .mmi; mov r40=0 // serves as r35 at first (p27)
mov ar.ec=13 };;
// This loop spins in 2*(n+12) ticks. It's scheduled for data in Itanium
// L2 cache (i.e. 9 ticks away) as floating point load/store instructions
// bypass L1 cache and L2 latency is actually best-case scenario for
// ldf8. The loop is not scalable and shall run in 2*(n+12) even on
// "wider" IA-64 implementations. It's a trade-off here. n+24 loop
// would give us ~5% in *overall* performance improvement on "wider"
// IA-64, but would hurt Itanium for about same because of longer
// epilogue. As it's a matter of few percents in either case I've
// chosen to trade the scalability for development time (you can see
// this very instruction sequence in bn_mul_add_words loop which in
// turn is scalable).
.L_bn_mul_words_ctop:
{ .mfi; (p25) getf.sig r36=f52 // low
(p21) xmpy.lu f48=f37,f8
(p28) cmp.ltu p54,p50=r41,r39 }
{ .mfi; (p16) ldf8 f32=[r15],8
(p21) xmpy.hu f40=f37,f8
(p0) nop.i 0x0 };;
{ .mii; (p25) getf.sig r32=f44 // high
.pred.rel "mutex",p50,p54
(p50) add r40=r38,r35 // (p27)
(p54) add r40=r38,r35,1 } // (p27)
{ .mfb; (p28) st8 [r14]=r41,8
(p0) nop.f 0x0
br.ctop.sptk .L_bn_mul_words_ctop };;
.L_bn_mul_words_cend:
{ .mii; nop.m 0x0
.pred.rel "mutex",p51,p55
(p51) add r8=r36,r0
(p55) add r8=r36,r0,1 }
{ .mfb; nop.m 0x0
nop.f 0x0
nop.b 0x0 }
#else // XMA_TEMPTATION
setf.sig f37=r0 // serves as carry at (p18) tick
mov ar.lc=r10
mov ar.ec=5;;
// Most of you examining this code very likely wonder why in the name
// of Intel the following loop is commented out? Indeed, it looks so
// neat that you find it hard to believe that it's something wrong
// with it, right? The catch is that every iteration depends on the
// result from previous one and the latter isn't available instantly.
// The loop therefore spins at the latency of xma minus 1, or in other
// words at 6*(n+4) ticks:-( Compare to the "production" loop above
// that runs in 2*(n+11) where the low latency problem is worked around
// by moving the dependency to one-tick latent interger ALU. Note that
// "distance" between ldf8 and xma is not latency of ldf8, but the
// *difference* between xma and ldf8 latencies.
.L_bn_mul_words_ctop:
{ .mfi; (p16) ldf8 f32=[r33],8
(p18) xma.hu f38=f34,f8,f39 }
{ .mfb; (p20) stf8 [r32]=f37,8
(p18) xma.lu f35=f34,f8,f39
br.ctop.sptk .L_bn_mul_words_ctop };;
.L_bn_mul_words_cend:
getf.sig r8=f41 // the return value
#endif // XMA_TEMPTATION
{ .mii; nop.m 0x0
mov pr=r9,0x1ffff
mov ar.lc=r3 }
{ .mfb; rum 1<<5 // clear um.mfh
nop.f 0x0
br.ret.sptk.many b0 };;
.endp bn_mul_words#
#endif
#if 1
//
// BN_ULONG bn_mul_add_words(BN_ULONG *rp, BN_ULONG *ap, int num, BN_ULONG w)
//
.global bn_mul_add_words#
.proc bn_mul_add_words#
.align 64
.skip 48 // makes the loop body aligned at 64-byte boundary
bn_mul_add_words:
.prologue
.save ar.pfs,r2
{ .mmi; alloc r2=ar.pfs,4,4,0,8
cmp4.le p6,p0=r34,r0
.save ar.lc,r3
mov r3=ar.lc };;
{ .mib; mov r8=r0 // return value
sub r10=r34,r0,1
(p6) br.ret.spnt.many b0 };;
{ .mib; setf.sig f8=r35 // w
.save pr,r9
mov r9=pr
brp.loop.imp .L_bn_mul_add_words_ctop,.L_bn_mul_add_words_cend-16
}
.body
{ .mmi; ADDP r14=0,r32 // rp
ADDP r15=0,r33 // ap
mov ar.lc=r10 }
{ .mii; ADDP r16=0,r32 // rp copy
mov pr.rot=0x2001<<16
// ------^----- serves as (p40) at first (p27)
mov ar.ec=11 };;
// This loop spins in 3*(n+10) ticks on Itanium and in 2*(n+10) on
// Itanium 2. Yes, unlike previous versions it scales:-) Previous
// version was peforming *all* additions in IALU and was starving
// for those even on Itanium 2. In this version one addition is
// moved to FPU and is folded with multiplication. This is at cost
// of propogating the result from previous call to this subroutine
// to L2 cache... In other words negligible even for shorter keys.
// *Overall* performance improvement [over previous version] varies
// from 11 to 22 percent depending on key length.
.L_bn_mul_add_words_ctop:
.pred.rel "mutex",p40,p42
{ .mfi; (p23) getf.sig r36=f45 // low
(p20) xma.lu f42=f36,f8,f50 // low
(p40) add r39=r39,r35 } // (p27)
{ .mfi; (p16) ldf8 f32=[r15],8 // *(ap++)
(p20) xma.hu f36=f36,f8,f50 // high
(p42) add r39=r39,r35,1 };; // (p27)
{ .mmi; (p24) getf.sig r32=f40 // high
(p16) ldf8 f46=[r16],8 // *(rp1++)
(p40) cmp.ltu p41,p39=r39,r35 } // (p27)
{ .mib; (p26) st8 [r14]=r39,8 // *(rp2++)
(p42) cmp.leu p41,p39=r39,r35 // (p27)
br.ctop.sptk .L_bn_mul_add_words_ctop};;
.L_bn_mul_add_words_cend:
{ .mmi; .pred.rel "mutex",p40,p42
(p40) add r8=r35,r0
(p42) add r8=r35,r0,1
mov pr=r9,0x1ffff }
{ .mib; rum 1<<5 // clear um.mfh
mov ar.lc=r3
br.ret.sptk.many b0 };;
.endp bn_mul_add_words#
#endif
#if 1
//
// void bn_sqr_words(BN_ULONG *rp, BN_ULONG *ap, int num)
//
.global bn_sqr_words#
.proc bn_sqr_words#
.align 64
.skip 32 // makes the loop body aligned at 64-byte boundary
bn_sqr_words:
.prologue
.save ar.pfs,r2
{ .mii; alloc r2=ar.pfs,3,0,0,0
sxt4 r34=r34 };;
{ .mii; cmp.le p6,p0=r34,r0
mov r8=r0 } // return value
{ .mfb; ADDP r32=0,r32
nop.f 0x0
(p6) br.ret.spnt.many b0 };;
{ .mii; sub r10=r34,r0,1
.save ar.lc,r3
mov r3=ar.lc
.save pr,r9
mov r9=pr };;
.body
{ .mib; ADDP r33=0,r33
mov pr.rot=1<<16
brp.loop.imp .L_bn_sqr_words_ctop,.L_bn_sqr_words_cend-16
}
{ .mii; add r34=8,r32
mov ar.lc=r10
mov ar.ec=18 };;
// 2*(n+17) on Itanium, (n+17) on "wider" IA-64 implementations. It's
// possible to compress the epilogue (I'm getting tired to write this
// comment over and over) and get down to 2*n+16 at the cost of
// scalability. The decision will very likely be reconsidered after the
// benchmark program is profiled. I.e. if perfomance gain on Itanium
// will appear larger than loss on "wider" IA-64, then the loop should
// be explicitely split and the epilogue compressed.
.L_bn_sqr_words_ctop:
{ .mfi; (p16) ldf8 f32=[r33],8
(p25) xmpy.lu f42=f41,f41
(p0) nop.i 0x0 }
{ .mib; (p33) stf8 [r32]=f50,16
(p0) nop.i 0x0
(p0) nop.b 0x0 }
{ .mfi; (p0) nop.m 0x0
(p25) xmpy.hu f52=f41,f41
(p0) nop.i 0x0 }
{ .mib; (p33) stf8 [r34]=f60,16
(p0) nop.i 0x0
br.ctop.sptk .L_bn_sqr_words_ctop };;
.L_bn_sqr_words_cend:
{ .mii; nop.m 0x0
mov pr=r9,0x1ffff
mov ar.lc=r3 }
{ .mfb; rum 1<<5 // clear um.mfh
nop.f 0x0
br.ret.sptk.many b0 };;
.endp bn_sqr_words#
#endif
#if 1
// Apparently we win nothing by implementing special bn_sqr_comba8.
// Yes, it is possible to reduce the number of multiplications by
// almost factor of two, but then the amount of additions would
// increase by factor of two (as we would have to perform those
// otherwise performed by xma ourselves). Normally we would trade
// anyway as multiplications are way more expensive, but not this
// time... Multiplication kernel is fully pipelined and as we drain
// one 128-bit multiplication result per clock cycle multiplications
// are effectively as inexpensive as additions. Special implementation
// might become of interest for "wider" IA-64 implementation as you'll
// be able to get through the multiplication phase faster (there won't
// be any stall issues as discussed in the commentary section below and
// you therefore will be able to employ all 4 FP units)... But these
// Itanium days it's simply too hard to justify the effort so I just
// drop down to bn_mul_comba8 code:-)
//
// void bn_sqr_comba8(BN_ULONG *r, BN_ULONG *a)
//
.global bn_sqr_comba8#
.proc bn_sqr_comba8#
.align 64
bn_sqr_comba8:
.prologue
.save ar.pfs,r2
#if defined(_HPUX_SOURCE) && !defined(_LP64)
{ .mii; alloc r2=ar.pfs,2,1,0,0
addp4 r33=0,r33
addp4 r32=0,r32 };;
{ .mii;
#else
{ .mii; alloc r2=ar.pfs,2,1,0,0
#endif
mov r34=r33
add r14=8,r33 };;
.body
{ .mii; add r17=8,r34
add r15=16,r33
add r18=16,r34 }
{ .mfb; add r16=24,r33
br .L_cheat_entry_point8 };;
.endp bn_sqr_comba8#
#endif
#if 1
// I've estimated this routine to run in ~120 ticks, but in reality
// (i.e. according to ar.itc) it takes ~160 ticks. Are those extra
// cycles consumed for instructions fetch? Or did I misinterpret some
// clause in Itanium -architecture manual? Comments are welcomed and
// highly appreciated.
//
// On Itanium 2 it takes ~190 ticks. This is because of stalls on
// result from getf.sig. I do nothing about it at this point for
// reasons depicted below.
//
// However! It should be noted that even 160 ticks is darn good result
// as it's over 10 (yes, ten, spelled as t-e-n) times faster than the
// C version (compiled with gcc with inline assembler). I really
// kicked compiler's butt here, didn't I? Yeah! This brings us to the
// following statement. It's damn shame that this routine isn't called
// very often nowadays! According to the profiler most CPU time is
// consumed by bn_mul_add_words called from BN_from_montgomery. In
// order to estimate what we're missing, I've compared the performance
// of this routine against "traditional" implementation, i.e. against
// following routine:
//
// void bn_mul_comba8(BN_ULONG *r, BN_ULONG *a, BN_ULONG *b)
// { r[ 8]=bn_mul_words( &(r[0]),a,8,b[0]);
// r[ 9]=bn_mul_add_words(&(r[1]),a,8,b[1]);
// r[10]=bn_mul_add_words(&(r[2]),a,8,b[2]);
// r[11]=bn_mul_add_words(&(r[3]),a,8,b[3]);
// r[12]=bn_mul_add_words(&(r[4]),a,8,b[4]);
// r[13]=bn_mul_add_words(&(r[5]),a,8,b[5]);
// r[14]=bn_mul_add_words(&(r[6]),a,8,b[6]);
// r[15]=bn_mul_add_words(&(r[7]),a,8,b[7]);
// }
//
// The one below is over 8 times faster than the one above:-( Even
// more reasons to "combafy" bn_mul_add_mont...
//
// And yes, this routine really made me wish there were an optimizing
// assembler! It also feels like it deserves a dedication.
//
// To my wife for being there and to my kids...
//
// void bn_mul_comba8(BN_ULONG *r, BN_ULONG *a, BN_ULONG *b)
//
#define carry1 r14
#define carry2 r15
#define carry3 r34
.global bn_mul_comba8#
.proc bn_mul_comba8#
.align 64
bn_mul_comba8:
.prologue
.save ar.pfs,r2
#if defined(_HPUX_SOURCE) && !defined(_LP64)
{ .mii; alloc r2=ar.pfs,3,0,0,0
addp4 r33=0,r33
addp4 r34=0,r34 };;
{ .mii; addp4 r32=0,r32
#else
{ .mii; alloc r2=ar.pfs,3,0,0,0
#endif
add r14=8,r33
add r17=8,r34 }
.body
{ .mii; add r15=16,r33
add r18=16,r34
add r16=24,r33 }
.L_cheat_entry_point8:
{ .mmi; add r19=24,r34
ldf8 f32=[r33],32 };;
{ .mmi; ldf8 f120=[r34],32
ldf8 f121=[r17],32 }
{ .mmi; ldf8 f122=[r18],32
ldf8 f123=[r19],32 };;
{ .mmi; ldf8 f124=[r34]
ldf8 f125=[r17] }
{ .mmi; ldf8 f126=[r18]
ldf8 f127=[r19] }
{ .mmi; ldf8 f33=[r14],32
ldf8 f34=[r15],32 }
{ .mmi; ldf8 f35=[r16],32;;
ldf8 f36=[r33] }
{ .mmi; ldf8 f37=[r14]
ldf8 f38=[r15] }
{ .mfi; ldf8 f39=[r16]
// -------\ Entering multiplier's heaven /-------
// ------------\ /------------
// -----------------\ /-----------------
// ----------------------\/----------------------
xma.hu f41=f32,f120,f0 }
{ .mfi; xma.lu f40=f32,f120,f0 };; // (*)
{ .mfi; xma.hu f51=f32,f121,f0 }
{ .mfi; xma.lu f50=f32,f121,f0 };;
{ .mfi; xma.hu f61=f32,f122,f0 }
{ .mfi; xma.lu f60=f32,f122,f0 };;
{ .mfi; xma.hu f71=f32,f123,f0 }
{ .mfi; xma.lu f70=f32,f123,f0 };;
{ .mfi; xma.hu f81=f32,f124,f0 }
{ .mfi; xma.lu f80=f32,f124,f0 };;
{ .mfi; xma.hu f91=f32,f125,f0 }
{ .mfi; xma.lu f90=f32,f125,f0 };;
{ .mfi; xma.hu f101=f32,f126,f0 }
{ .mfi; xma.lu f100=f32,f126,f0 };;
{ .mfi; xma.hu f111=f32,f127,f0 }
{ .mfi; xma.lu f110=f32,f127,f0 };;//
// (*) You can argue that splitting at every second bundle would
// prevent "wider" IA-64 implementations from achieving the peak
// performance. Well, not really... The catch is that if you
// intend to keep 4 FP units busy by splitting at every fourth
// bundle and thus perform these 16 multiplications in 4 ticks,
// the first bundle *below* would stall because the result from
// the first xma bundle *above* won't be available for another 3
// ticks (if not more, being an optimist, I assume that "wider"
// implementation will have same latency:-). This stall will hold
// you back and the performance would be as if every second bundle
// were split *anyway*...
{ .mfi; getf.sig r16=f40
xma.hu f42=f33,f120,f41
add r33=8,r32 }
{ .mfi; xma.lu f41=f33,f120,f41 };;
{ .mfi; getf.sig r24=f50
xma.hu f52=f33,f121,f51 }
{ .mfi; xma.lu f51=f33,f121,f51 };;
{ .mfi; st8 [r32]=r16,16
xma.hu f62=f33,f122,f61 }
{ .mfi; xma.lu f61=f33,f122,f61 };;
{ .mfi; xma.hu f72=f33,f123,f71 }
{ .mfi; xma.lu f71=f33,f123,f71 };;
{ .mfi; xma.hu f82=f33,f124,f81 }
{ .mfi; xma.lu f81=f33,f124,f81 };;
{ .mfi; xma.hu f92=f33,f125,f91 }
{ .mfi; xma.lu f91=f33,f125,f91 };;
{ .mfi; xma.hu f102=f33,f126,f101 }
{ .mfi; xma.lu f101=f33,f126,f101 };;
{ .mfi; xma.hu f112=f33,f127,f111 }
{ .mfi; xma.lu f111=f33,f127,f111 };;//
//-------------------------------------------------//
{ .mfi; getf.sig r25=f41
xma.hu f43=f34,f120,f42 }
{ .mfi; xma.lu f42=f34,f120,f42 };;
{ .mfi; getf.sig r16=f60
xma.hu f53=f34,f121,f52 }
{ .mfi; xma.lu f52=f34,f121,f52 };;
{ .mfi; getf.sig r17=f51
xma.hu f63=f34,f122,f62
add r25=r25,r24 }
{ .mfi; xma.lu f62=f34,f122,f62
mov carry1=0 };;
{ .mfi; cmp.ltu p6,p0=r25,r24
xma.hu f73=f34,f123,f72 }
{ .mfi; xma.lu f72=f34,f123,f72 };;
{ .mfi; st8 [r33]=r25,16
xma.hu f83=f34,f124,f82
(p6) add carry1=1,carry1 }
{ .mfi; xma.lu f82=f34,f124,f82 };;
{ .mfi; xma.hu f93=f34,f125,f92 }
{ .mfi; xma.lu f92=f34,f125,f92 };;
{ .mfi; xma.hu f103=f34,f126,f102 }
{ .mfi; xma.lu f102=f34,f126,f102 };;
{ .mfi; xma.hu f113=f34,f127,f112 }
{ .mfi; xma.lu f112=f34,f127,f112 };;//
//-------------------------------------------------//
{ .mfi; getf.sig r18=f42
xma.hu f44=f35,f120,f43
add r17=r17,r16 }
{ .mfi; xma.lu f43=f35,f120,f43 };;
{ .mfi; getf.sig r24=f70
xma.hu f54=f35,f121,f53 }
{ .mfi; mov carry2=0
xma.lu f53=f35,f121,f53 };;
{ .mfi; getf.sig r25=f61
xma.hu f64=f35,f122,f63
cmp.ltu p7,p0=r17,r16 }
{ .mfi; add r18=r18,r17
xma.lu f63=f35,f122,f63 };;
{ .mfi; getf.sig r26=f52
xma.hu f74=f35,f123,f73
(p7) add carry2=1,carry2 }
{ .mfi; cmp.ltu p7,p0=r18,r17
xma.lu f73=f35,f123,f73
add r18=r18,carry1 };;
{ .mfi;
xma.hu f84=f35,f124,f83
(p7) add carry2=1,carry2 }
{ .mfi; cmp.ltu p7,p0=r18,carry1
xma.lu f83=f35,f124,f83 };;
{ .mfi; st8 [r32]=r18,16
xma.hu f94=f35,f125,f93
(p7) add carry2=1,carry2 }
{ .mfi; xma.lu f93=f35,f125,f93 };;
{ .mfi; xma.hu f104=f35,f126,f103 }
{ .mfi; xma.lu f103=f35,f126,f103 };;
{ .mfi; xma.hu f114=f35,f127,f113 }
{ .mfi; mov carry1=0
xma.lu f113=f35,f127,f113
add r25=r25,r24 };;//
//-------------------------------------------------//
{ .mfi; getf.sig r27=f43
xma.hu f45=f36,f120,f44
cmp.ltu p6,p0=r25,r24 }
{ .mfi; xma.lu f44=f36,f120,f44
add r26=r26,r25 };;
{ .mfi; getf.sig r16=f80
xma.hu f55=f36,f121,f54
(p6) add carry1=1,carry1 }
{ .mfi; xma.lu f54=f36,f121,f54 };;
{ .mfi; getf.sig r17=f71
xma.hu f65=f36,f122,f64
cmp.ltu p6,p0=r26,r25 }
{ .mfi; xma.lu f64=f36,f122,f64
add r27=r27,r26 };;
{ .mfi; getf.sig r18=f62
xma.hu f75=f36,f123,f74
(p6) add carry1=1,carry1 }
{ .mfi; cmp.ltu p6,p0=r27,r26
xma.lu f74=f36,f123,f74
add r27=r27,carry2 };;
{ .mfi; getf.sig r19=f53
xma.hu f85=f36,f124,f84
(p6) add carry1=1,carry1 }
{ .mfi; xma.lu f84=f36,f124,f84
cmp.ltu p6,p0=r27,carry2 };;
{ .mfi; st8 [r33]=r27,16
xma.hu f95=f36,f125,f94
(p6) add carry1=1,carry1 }
{ .mfi; xma.lu f94=f36,f125,f94 };;
{ .mfi; xma.hu f105=f36,f126,f104 }
{ .mfi; mov carry2=0
xma.lu f104=f36,f126,f104
add r17=r17,r16 };;
{ .mfi; xma.hu f115=f36,f127,f114
cmp.ltu p7,p0=r17,r16 }
{ .mfi; xma.lu f114=f36,f127,f114
add r18=r18,r17 };;//
//-------------------------------------------------//
{ .mfi; getf.sig r20=f44
xma.hu f46=f37,f120,f45
(p7) add carry2=1,carry2 }
{ .mfi; cmp.ltu p7,p0=r18,r17
xma.lu f45=f37,f120,f45
add r19=r19,r18 };;
{ .mfi; getf.sig r24=f90
xma.hu f56=f37,f121,f55 }
{ .mfi; xma.lu f55=f37,f121,f55 };;
{ .mfi; getf.sig r25=f81
xma.hu f66=f37,f122,f65
(p7) add carry2=1,carry2 }
{ .mfi; cmp.ltu p7,p0=r19,r18
xma.lu f65=f37,f122,f65
add r20=r20,r19 };;
{ .mfi; getf.sig r26=f72
xma.hu f76=f37,f123,f75
(p7) add carry2=1,carry2 }
{ .mfi; cmp.ltu p7,p0=r20,r19
xma.lu f75=f37,f123,f75
add r20=r20,carry1 };;
{ .mfi; getf.sig r27=f63
xma.hu f86=f37,f124,f85
(p7) add carry2=1,carry2 }
{ .mfi; xma.lu f85=f37,f124,f85
cmp.ltu p7,p0=r20,carry1 };;
{ .mfi; getf.sig r28=f54
xma.hu f96=f37,f125,f95
(p7) add carry2=1,carry2 }
{ .mfi; st8 [r32]=r20,16
xma.lu f95=f37,f125,f95 };;
{ .mfi; xma.hu f106=f37,f126,f105 }
{ .mfi; mov carry1=0
xma.lu f105=f37,f126,f105
add r25=r25,r24 };;
{ .mfi; xma.hu f116=f37,f127,f115
cmp.ltu p6,p0=r25,r24 }
{ .mfi; xma.lu f115=f37,f127,f115
add r26=r26,r25 };;//
//-------------------------------------------------//
{ .mfi; getf.sig r29=f45
xma.hu f47=f38,f120,f46
(p6) add carry1=1,carry1 }
{ .mfi; cmp.ltu p6,p0=r26,r25
xma.lu f46=f38,f120,f46
add r27=r27,r26 };;
{ .mfi; getf.sig r16=f100
xma.hu f57=f38,f121,f56
(p6) add carry1=1,carry1 }
{ .mfi; cmp.ltu p6,p0=r27,r26
xma.lu f56=f38,f121,f56
add r28=r28,r27 };;
{ .mfi; getf.sig r17=f91
xma.hu f67=f38,f122,f66
(p6) add carry1=1,carry1 }
{ .mfi; cmp.ltu p6,p0=r28,r27
xma.lu f66=f38,f122,f66
add r29=r29,r28 };;
{ .mfi; getf.sig r18=f82
xma.hu f77=f38,f123,f76
(p6) add carry1=1,carry1 }
{ .mfi; cmp.ltu p6,p0=r29,r28
xma.lu f76=f38,f123,f76
add r29=r29,carry2 };;
{ .mfi; getf.sig r19=f73
xma.hu f87=f38,f124,f86
(p6) add carry1=1,carry1 }
{ .mfi; xma.lu f86=f38,f124,f86
cmp.ltu p6,p0=r29,carry2 };;
{ .mfi; getf.sig r20=f64
xma.hu f97=f38,f125,f96
(p6) add carry1=1,carry1 }
{ .mfi; st8 [r33]=r29,16
xma.lu f96=f38,f125,f96 };;
{ .mfi; getf.sig r21=f55
xma.hu f107=f38,f126,f106 }
{ .mfi; mov carry2=0
xma.lu f106=f38,f126,f106
add r17=r17,r16 };;
{ .mfi; xma.hu f117=f38,f127,f116
cmp.ltu p7,p0=r17,r16 }
{ .mfi; xma.lu f116=f38,f127,f116
add r18=r18,r17 };;//
//-------------------------------------------------//
{ .mfi; getf.sig r22=f46
xma.hu f48=f39,f120,f47
(p7) add carry2=1,carry2 }
{ .mfi; cmp.ltu p7,p0=r18,r17
xma.lu f47=f39,f120,f47
add r19=r19,r18 };;
{ .mfi; getf.sig r24=f110
xma.hu f58=f39,f121,f57
(p7) add carry2=1,carry2 }
{ .mfi; cmp.ltu p7,p0=r19,r18
xma.lu f57=f39,f121,f57
add r20=r20,r19 };;
{ .mfi; getf.sig r25=f101
xma.hu f68=f39,f122,f67
(p7) add carry2=1,carry2 }
{ .mfi; cmp.ltu p7,p0=r20,r19
xma.lu f67=f39,f122,f67
add r21=r21,r20 };;
{ .mfi; getf.sig r26=f92
xma.hu f78=f39,f123,f77
(p7) add carry2=1,carry2 }
{ .mfi; cmp.ltu p7,p0=r21,r20
xma.lu f77=f39,f123,f77
add r22=r22,r21 };;
{ .mfi; getf.sig r27=f83
xma.hu f88=f39,f124,f87
(p7) add carry2=1,carry2 }
{ .mfi; cmp.ltu p7,p0=r22,r21
xma.lu f87=f39,f124,f87
add r22=r22,carry1 };;
{ .mfi; getf.sig r28=f74
xma.hu f98=f39,f125,f97
(p7) add carry2=1,carry2 }
{ .mfi; xma.lu f97=f39,f125,f97
cmp.ltu p7,p0=r22,carry1 };;
{ .mfi; getf.sig r29=f65
xma.hu f108=f39,f126,f107
(p7) add carry2=1,carry2 }
{ .mfi; st8 [r32]=r22,16
xma.lu f107=f39,f126,f107 };;
{ .mfi; getf.sig r30=f56
xma.hu f118=f39,f127,f117 }
{ .mfi; xma.lu f117=f39,f127,f117 };;//
//-------------------------------------------------//
// Leaving muliplier's heaven... Quite a ride, huh?
{ .mii; getf.sig r31=f47
add r25=r25,r24
mov carry1=0 };;
{ .mii; getf.sig r16=f111
cmp.ltu p6,p0=r25,r24
add r26=r26,r25 };;
{ .mfb; getf.sig r17=f102 }
{ .mii;
(p6) add carry1=1,carry1
cmp.ltu p6,p0=r26,r25
add r27=r27,r26 };;
{ .mfb; nop.m 0x0 }
{ .mii;
(p6) add carry1=1,carry1
cmp.ltu p6,p0=r27,r26
add r28=r28,r27 };;
{ .mii; getf.sig r18=f93
add r17=r17,r16
mov carry3=0 }
{ .mii;
(p6) add carry1=1,carry1
cmp.ltu p6,p0=r28,r27
add r29=r29,r28 };;
{ .mii; getf.sig r19=f84
cmp.ltu p7,p0=r17,r16 }
{ .mii;
(p6) add carry1=1,carry1
cmp.ltu p6,p0=r29,r28
add r30=r30,r29 };;
{ .mii; getf.sig r20=f75
add r18=r18,r17 }
{ .mii;
(p6) add carry1=1,carry1
cmp.ltu p6,p0=r30,r29
add r31=r31,r30 };;
{ .mfb; getf.sig r21=f66 }
{ .mii; (p7) add carry3=1,carry3
cmp.ltu p7,p0=r18,r17
add r19=r19,r18 }
{ .mfb; nop.m 0x0 }
{ .mii;
(p6) add carry1=1,carry1
cmp.ltu p6,p0=r31,r30
add r31=r31,carry2 };;
{ .mfb; getf.sig r22=f57 }
{ .mii; (p7) add carry3=1,carry3
cmp.ltu p7,p0=r19,r18
add r20=r20,r19 }
{ .mfb; nop.m 0x0 }
{ .mii;
(p6) add carry1=1,carry1
cmp.ltu p6,p0=r31,carry2 };;
{ .mfb; getf.sig r23=f48 }
{ .mii; (p7) add carry3=1,carry3
cmp.ltu p7,p0=r20,r19
add r21=r21,r20 }
{ .mii;
(p6) add carry1=1,carry1 }
{ .mfb; st8 [r33]=r31,16 };;
{ .mfb; getf.sig r24=f112 }
{ .mii; (p7) add carry3=1,carry3
cmp.ltu p7,p0=r21,r20
add r22=r22,r21 };;
{ .mfb; getf.sig r25=f103 }
{ .mii; (p7) add carry3=1,carry3
cmp.ltu p7,p0=r22,r21
add r23=r23,r22 };;
{ .mfb; getf.sig r26=f94 }
{ .mii; (p7) add carry3=1,carry3
cmp.ltu p7,p0=r23,r22
add r23=r23,carry1 };;
{ .mfb; getf.sig r27=f85 }
{ .mii; (p7) add carry3=1,carry3
cmp.ltu p7,p8=r23,carry1};;
{ .mii; getf.sig r28=f76
add r25=r25,r24
mov carry1=0 }
{ .mii; st8 [r32]=r23,16
(p7) add carry2=1,carry3
(p8) add carry2=0,carry3 };;
{ .mfb; nop.m 0x0 }
{ .mii; getf.sig r29=f67
cmp.ltu p6,p0=r25,r24
add r26=r26,r25 };;
{ .mfb; getf.sig r30=f58 }
{ .mii;
(p6) add carry1=1,carry1
cmp.ltu p6,p0=r26,r25
add r27=r27,r26 };;
{ .mfb; getf.sig r16=f113 }
{ .mii;
(p6) add carry1=1,carry1
cmp.ltu p6,p0=r27,r26
add r28=r28,r27 };;
{ .mfb; getf.sig r17=f104 }
{ .mii;
(p6) add carry1=1,carry1
cmp.ltu p6,p0=r28,r27
add r29=r29,r28 };;
{ .mfb; getf.sig r18=f95 }
{ .mii;
(p6) add carry1=1,carry1
cmp.ltu p6,p0=r29,r28
add r30=r30,r29 };;
{ .mii; getf.sig r19=f86
add r17=r17,r16
mov carry3=0 }
{ .mii;
(p6) add carry1=1,carry1
cmp.ltu p6,p0=r30,r29
add r30=r30,carry2 };;
{ .mii; getf.sig r20=f77
cmp.ltu p7,p0=r17,r16
add r18=r18,r17 }
{ .mii;
(p6) add carry1=1,carry1
cmp.ltu p6,p0=r30,carry2 };;
{ .mfb; getf.sig r21=f68 }
{ .mii; st8 [r33]=r30,16
(p6) add carry1=1,carry1 };;
{ .mfb; getf.sig r24=f114 }
{ .mii; (p7) add carry3=1,carry3
cmp.ltu p7,p0=r18,r17
add r19=r19,r18 };;
{ .mfb; getf.sig r25=f105 }
{ .mii; (p7) add carry3=1,carry3
cmp.ltu p7,p0=r19,r18
add r20=r20,r19 };;
{ .mfb; getf.sig r26=f96 }
{ .mii; (p7) add carry3=1,carry3
cmp.ltu p7,p0=r20,r19
add r21=r21,r20 };;
{ .mfb; getf.sig r27=f87 }
{ .mii; (p7) add carry3=1,carry3
cmp.ltu p7,p0=r21,r20
add r21=r21,carry1 };;
{ .mib; getf.sig r28=f78
add r25=r25,r24 }
{ .mib; (p7) add carry3=1,carry3
cmp.ltu p7,p8=r21,carry1};;
{ .mii; st8 [r32]=r21,16
(p7) add carry2=1,carry3
(p8) add carry2=0,carry3 }
{ .mii; mov carry1=0
cmp.ltu p6,p0=r25,r24
add r26=r26,r25 };;
{ .mfb; getf.sig r16=f115 }
{ .mii;
(p6) add carry1=1,carry1
cmp.ltu p6,p0=r26,r25
add r27=r27,r26 };;
{ .mfb; getf.sig r17=f106 }
{ .mii;
(p6) add carry1=1,carry1
cmp.ltu p6,p0=r27,r26
add r28=r28,r27 };;
{ .mfb; getf.sig r18=f97 }
{ .mii;
(p6) add carry1=1,carry1
cmp.ltu p6,p0=r28,r27
add r28=r28,carry2 };;
{ .mib; getf.sig r19=f88
add r17=r17,r16 }
{ .mib;
(p6) add carry1=1,carry1
cmp.ltu p6,p0=r28,carry2 };;
{ .mii; st8 [r33]=r28,16
(p6) add carry1=1,carry1 }
{ .mii; mov carry2=0
cmp.ltu p7,p0=r17,r16
add r18=r18,r17 };;
{ .mfb; getf.sig r24=f116 }
{ .mii; (p7) add carry2=1,carry2
cmp.ltu p7,p0=r18,r17
add r19=r19,r18 };;
{ .mfb; getf.sig r25=f107 }
{ .mii; (p7) add carry2=1,carry2
cmp.ltu p7,p0=r19,r18
add r19=r19,carry1 };;
{ .mfb; getf.sig r26=f98 }
{ .mii; (p7) add carry2=1,carry2
cmp.ltu p7,p0=r19,carry1};;
{ .mii; st8 [r32]=r19,16
(p7) add carry2=1,carry2 }
{ .mfb; add r25=r25,r24 };;
{ .mfb; getf.sig r16=f117 }
{ .mii; mov carry1=0
cmp.ltu p6,p0=r25,r24
add r26=r26,r25 };;
{ .mfb; getf.sig r17=f108 }
{ .mii;
(p6) add carry1=1,carry1
cmp.ltu p6,p0=r26,r25
add r26=r26,carry2 };;
{ .mfb; nop.m 0x0 }
{ .mii;
(p6) add carry1=1,carry1
cmp.ltu p6,p0=r26,carry2 };;
{ .mii; st8 [r33]=r26,16
(p6) add carry1=1,carry1 }
{ .mfb; add r17=r17,r16 };;
{ .mfb; getf.sig r24=f118 }
{ .mii; mov carry2=0
cmp.ltu p7,p0=r17,r16
add r17=r17,carry1 };;
{ .mii; (p7) add carry2=1,carry2
cmp.ltu p7,p0=r17,carry1};;
{ .mii; st8 [r32]=r17
(p7) add carry2=1,carry2 };;
{ .mfb; add r24=r24,carry2 };;
{ .mib; st8 [r33]=r24 }
{ .mib; rum 1<<5 // clear um.mfh
br.ret.sptk.many b0 };;
.endp bn_mul_comba8#
#undef carry3
#undef carry2
#undef carry1
#endif
#if 1
// It's possible to make it faster (see comment to bn_sqr_comba8), but
// I reckon it doesn't worth the effort. Basically because the routine
// (actually both of them) practically never called... So I just play
// same trick as with bn_sqr_comba8.
//
// void bn_sqr_comba4(BN_ULONG *r, BN_ULONG *a)
//
.global bn_sqr_comba4#
.proc bn_sqr_comba4#
.align 64
bn_sqr_comba4:
.prologue
.save ar.pfs,r2
#if defined(_HPUX_SOURCE) && !defined(_LP64)
{ .mii; alloc r2=ar.pfs,2,1,0,0
addp4 r32=0,r32
addp4 r33=0,r33 };;
{ .mii;
#else
{ .mii; alloc r2=ar.pfs,2,1,0,0
#endif
mov r34=r33
add r14=8,r33 };;
.body
{ .mii; add r17=8,r34
add r15=16,r33
add r18=16,r34 }
{ .mfb; add r16=24,r33
br .L_cheat_entry_point4 };;
.endp bn_sqr_comba4#
#endif
#if 1
// Runs in ~115 cycles and ~4.5 times faster than C. Well, whatever...
//
// void bn_mul_comba4(BN_ULONG *r, BN_ULONG *a, BN_ULONG *b)
//
#define carry1 r14
#define carry2 r15
.global bn_mul_comba4#
.proc bn_mul_comba4#
.align 64
bn_mul_comba4:
.prologue
.save ar.pfs,r2
#if defined(_HPUX_SOURCE) && !defined(_LP64)
{ .mii; alloc r2=ar.pfs,3,0,0,0
addp4 r33=0,r33
addp4 r34=0,r34 };;
{ .mii; addp4 r32=0,r32
#else
{ .mii; alloc r2=ar.pfs,3,0,0,0
#endif
add r14=8,r33
add r17=8,r34 }
.body
{ .mii; add r15=16,r33
add r18=16,r34
add r16=24,r33 };;
.L_cheat_entry_point4:
{ .mmi; add r19=24,r34
ldf8 f32=[r33] }
{ .mmi; ldf8 f120=[r34]
ldf8 f121=[r17] };;
{ .mmi; ldf8 f122=[r18]
ldf8 f123=[r19] }
{ .mmi; ldf8 f33=[r14]
ldf8 f34=[r15] }
{ .mfi; ldf8 f35=[r16]
xma.hu f41=f32,f120,f0 }
{ .mfi; xma.lu f40=f32,f120,f0 };;
{ .mfi; xma.hu f51=f32,f121,f0 }
{ .mfi; xma.lu f50=f32,f121,f0 };;
{ .mfi; xma.hu f61=f32,f122,f0 }
{ .mfi; xma.lu f60=f32,f122,f0 };;
{ .mfi; xma.hu f71=f32,f123,f0 }
{ .mfi; xma.lu f70=f32,f123,f0 };;//
// Major stall takes place here, and 3 more places below. Result from
// first xma is not available for another 3 ticks.
{ .mfi; getf.sig r16=f40
xma.hu f42=f33,f120,f41
add r33=8,r32 }
{ .mfi; xma.lu f41=f33,f120,f41 };;
{ .mfi; getf.sig r24=f50
xma.hu f52=f33,f121,f51 }
{ .mfi; xma.lu f51=f33,f121,f51 };;
{ .mfi; st8 [r32]=r16,16
xma.hu f62=f33,f122,f61 }
{ .mfi; xma.lu f61=f33,f122,f61 };;
{ .mfi; xma.hu f72=f33,f123,f71 }
{ .mfi; xma.lu f71=f33,f123,f71 };;//
//-------------------------------------------------//
{ .mfi; getf.sig r25=f41
xma.hu f43=f34,f120,f42 }
{ .mfi; xma.lu f42=f34,f120,f42 };;
{ .mfi; getf.sig r16=f60
xma.hu f53=f34,f121,f52 }
{ .mfi; xma.lu f52=f34,f121,f52 };;
{ .mfi; getf.sig r17=f51
xma.hu f63=f34,f122,f62
add r25=r25,r24 }
{ .mfi; mov carry1=0
xma.lu f62=f34,f122,f62 };;
{ .mfi; st8 [r33]=r25,16
xma.hu f73=f34,f123,f72
cmp.ltu p6,p0=r25,r24 }
{ .mfi; xma.lu f72=f34,f123,f72 };;//
//-------------------------------------------------//
{ .mfi; getf.sig r18=f42
xma.hu f44=f35,f120,f43
(p6) add carry1=1,carry1 }
{ .mfi; add r17=r17,r16
xma.lu f43=f35,f120,f43
mov carry2=0 };;
{ .mfi; getf.sig r24=f70
xma.hu f54=f35,f121,f53
cmp.ltu p7,p0=r17,r16 }
{ .mfi; xma.lu f53=f35,f121,f53 };;
{ .mfi; getf.sig r25=f61
xma.hu f64=f35,f122,f63
add r18=r18,r17 }
{ .mfi; xma.lu f63=f35,f122,f63
(p7) add carry2=1,carry2 };;
{ .mfi; getf.sig r26=f52
xma.hu f74=f35,f123,f73
cmp.ltu p7,p0=r18,r17 }
{ .mfi; xma.lu f73=f35,f123,f73
add r18=r18,carry1 };;
//-------------------------------------------------//
{ .mii; st8 [r32]=r18,16
(p7) add carry2=1,carry2
cmp.ltu p7,p0=r18,carry1 };;
{ .mfi; getf.sig r27=f43 // last major stall
(p7) add carry2=1,carry2 };;
{ .mii; getf.sig r16=f71
add r25=r25,r24
mov carry1=0 };;
{ .mii; getf.sig r17=f62
cmp.ltu p6,p0=r25,r24
add r26=r26,r25 };;
{ .mii;
(p6) add carry1=1,carry1
cmp.ltu p6,p0=r26,r25
add r27=r27,r26 };;
{ .mii;
(p6) add carry1=1,carry1
cmp.ltu p6,p0=r27,r26
add r27=r27,carry2 };;
{ .mii; getf.sig r18=f53
(p6) add carry1=1,carry1
cmp.ltu p6,p0=r27,carry2 };;
{ .mfi; st8 [r33]=r27,16
(p6) add carry1=1,carry1 }
{ .mii; getf.sig r19=f44
add r17=r17,r16
mov carry2=0 };;
{ .mii; getf.sig r24=f72
cmp.ltu p7,p0=r17,r16
add r18=r18,r17 };;
{ .mii; (p7) add carry2=1,carry2
cmp.ltu p7,p0=r18,r17
add r19=r19,r18 };;
{ .mii; (p7) add carry2=1,carry2
cmp.ltu p7,p0=r19,r18
add r19=r19,carry1 };;
{ .mii; getf.sig r25=f63
(p7) add carry2=1,carry2
cmp.ltu p7,p0=r19,carry1};;
{ .mii; st8 [r32]=r19,16
(p7) add carry2=1,carry2 }
{ .mii; getf.sig r26=f54
add r25=r25,r24
mov carry1=0 };;
{ .mii; getf.sig r16=f73
cmp.ltu p6,p0=r25,r24
add r26=r26,r25 };;
{ .mii;
(p6) add carry1=1,carry1
cmp.ltu p6,p0=r26,r25
add r26=r26,carry2 };;
{ .mii; getf.sig r17=f64
(p6) add carry1=1,carry1
cmp.ltu p6,p0=r26,carry2 };;
{ .mii; st8 [r33]=r26,16
(p6) add carry1=1,carry1 }
{ .mii; getf.sig r24=f74
add r17=r17,r16
mov carry2=0 };;
{ .mii; cmp.ltu p7,p0=r17,r16
add r17=r17,carry1 };;
{ .mii; (p7) add carry2=1,carry2
cmp.ltu p7,p0=r17,carry1};;
{ .mii; st8 [r32]=r17,16
(p7) add carry2=1,carry2 };;
{ .mii; add r24=r24,carry2 };;
{ .mii; st8 [r33]=r24 }
{ .mib; rum 1<<5 // clear um.mfh
br.ret.sptk.many b0 };;
.endp bn_mul_comba4#
#undef carry2
#undef carry1
#endif
#if 1
//
// BN_ULONG bn_div_words(BN_ULONG h, BN_ULONG l, BN_ULONG d)
//
// In the nutshell it's a port of my MIPS III/IV implementation.
//
#define AT r14
#define H r16
#define HH r20
#define L r17
#define D r18
#define DH r22
#define I r21
#if 0
// Some preprocessors (most notably HP-UX) appear to be allergic to
// macros enclosed to parenthesis [as these three were].
#define cont p16
#define break p0 // p20
#define equ p24
#else
cont=p16
break=p0
equ=p24
#endif
.global abort#
.global bn_div_words#
.proc bn_div_words#
.align 64
bn_div_words:
.prologue
.save ar.pfs,r2
{ .mii; alloc r2=ar.pfs,3,5,0,8
.save b0,r3
mov r3=b0
.save pr,r10
mov r10=pr };;
{ .mmb; cmp.eq p6,p0=r34,r0
mov r8=-1
(p6) br.ret.spnt.many b0 };;
.body
{ .mii; mov H=r32 // save h
mov ar.ec=0 // don't rotate at exit
mov pr.rot=0 }
{ .mii; mov L=r33 // save l
mov r36=r0 };;
.L_divw_shift: // -vv- note signed comparison
{ .mfi; (p0) cmp.lt p16,p0=r0,r34 // d
(p0) shladd r33=r34,1,r0 }
{ .mfb; (p0) add r35=1,r36
(p0) nop.f 0x0
(p16) br.wtop.dpnt .L_divw_shift };;
{ .mii; mov D=r34
shr.u DH=r34,32
sub r35=64,r36 };;
{ .mii; setf.sig f7=DH
shr.u AT=H,r35
mov I=r36 };;
{ .mib; cmp.ne p6,p0=r0,AT
shl H=H,r36
(p6) br.call.spnt.clr b0=abort };; // overflow, die...
{ .mfi; fcvt.xuf.s1 f7=f7
shr.u AT=L,r35 };;
{ .mii; shl L=L,r36
or H=H,AT };;
{ .mii; nop.m 0x0
cmp.leu p6,p0=D,H;;
(p6) sub H=H,D }
{ .mlx; setf.sig f14=D
movl AT=0xffffffff };;
///////////////////////////////////////////////////////////
{ .mii; setf.sig f6=H
shr.u HH=H,32;;
cmp.eq p6,p7=HH,DH };;
{ .mfb;
(p6) setf.sig f8=AT
(p7) fcvt.xuf.s1 f6=f6
(p7) br.call.sptk b6=.L_udiv64_32_b6 };;
{ .mfi; getf.sig r33=f8 // q
xmpy.lu f9=f8,f14 }
{ .mfi; xmpy.hu f10=f8,f14
shrp H=H,L,32 };;
{ .mmi; getf.sig r35=f9 // tl
getf.sig r31=f10 };; // th
.L_divw_1st_iter:
{ .mii; (p0) add r32=-1,r33
(p0) cmp.eq equ,cont=HH,r31 };;
{ .mii; (p0) cmp.ltu p8,p0=r35,D
(p0) sub r34=r35,D
(equ) cmp.leu break,cont=r35,H };;
{ .mib; (cont) cmp.leu cont,break=HH,r31
(p8) add r31=-1,r31
(cont) br.wtop.spnt .L_divw_1st_iter };;
///////////////////////////////////////////////////////////
{ .mii; sub H=H,r35
shl r8=r33,32
shl L=L,32 };;
///////////////////////////////////////////////////////////
{ .mii; setf.sig f6=H
shr.u HH=H,32;;
cmp.eq p6,p7=HH,DH };;
{ .mfb;
(p6) setf.sig f8=AT
(p7) fcvt.xuf.s1 f6=f6
(p7) br.call.sptk b6=.L_udiv64_32_b6 };;
{ .mfi; getf.sig r33=f8 // q
xmpy.lu f9=f8,f14 }
{ .mfi; xmpy.hu f10=f8,f14
shrp H=H,L,32 };;
{ .mmi; getf.sig r35=f9 // tl
getf.sig r31=f10 };; // th
.L_divw_2nd_iter:
{ .mii; (p0) add r32=-1,r33
(p0) cmp.eq equ,cont=HH,r31 };;
{ .mii; (p0) cmp.ltu p8,p0=r35,D
(p0) sub r34=r35,D
(equ) cmp.leu break,cont=r35,H };;
{ .mib; (cont) cmp.leu cont,break=HH,r31
(p8) add r31=-1,r31
(cont) br.wtop.spnt .L_divw_2nd_iter };;
///////////////////////////////////////////////////////////
{ .mii; sub H=H,r35
or r8=r8,r33
mov ar.pfs=r2 };;
{ .mii; shr.u r9=H,I // remainder if anybody wants it
mov pr=r10,0x1ffff }
{ .mfb; br.ret.sptk.many b0 };;
// Unsigned 64 by 32 (well, by 64 for the moment) bit integer division
// procedure.
//
// inputs: f6 = (double)a, f7 = (double)b
// output: f8 = (int)(a/b)
// clobbered: f8,f9,f10,f11,pred
pred=p15
// One can argue that this snippet is copyrighted to Intel
// Corporation, as it's essentially identical to one of those
// found in "Divide, Square Root and Remainder" section at
// http://www.intel.com/software/products/opensource/libraries/num.htm.
// Yes, I admit that the referred code was used as template,
// but after I realized that there hardly is any other instruction
// sequence which would perform this operation. I mean I figure that
// any independent attempt to implement high-performance division
// will result in code virtually identical to the Intel code. It
// should be noted though that below division kernel is 1 cycle
// faster than Intel one (note commented splits:-), not to mention
// original prologue (rather lack of one) and epilogue.
.align 32
.skip 16
.L_udiv64_32_b6:
frcpa.s1 f8,pred=f6,f7;; // [0] y0 = 1 / b
(pred) fnma.s1 f9=f7,f8,f1 // [5] e0 = 1 - b * y0
(pred) fmpy.s1 f10=f6,f8;; // [5] q0 = a * y0
(pred) fmpy.s1 f11=f9,f9 // [10] e1 = e0 * e0
(pred) fma.s1 f10=f9,f10,f10;; // [10] q1 = q0 + e0 * q0
(pred) fma.s1 f8=f9,f8,f8 //;; // [15] y1 = y0 + e0 * y0
(pred) fma.s1 f9=f11,f10,f10;; // [15] q2 = q1 + e1 * q1
(pred) fma.s1 f8=f11,f8,f8 //;; // [20] y2 = y1 + e1 * y1
(pred) fnma.s1 f10=f7,f9,f6;; // [20] r2 = a - b * q2
(pred) fma.s1 f8=f10,f8,f9;; // [25] q3 = q2 + r2 * y2
fcvt.fxu.trunc.s1 f8=f8 // [30] q = trunc(q3)
br.ret.sptk.many b6;;
.endp bn_div_words#
#endif
|
al3xtjames/Clover
| 46,674
|
Library/OpensslLib/openssl-1.0.1e/crypto/bn/asm/pa-risc2W.s
|
;
; PA-RISC 64-bit implementation of bn_asm code
;
; This code is approximately 2x faster than the C version
; for RSA/DSA.
;
; See http://devresource.hp.com/ for more details on the PA-RISC
; architecture. Also see the book "PA-RISC 2.0 Architecture"
; by Gerry Kane for information on the instruction set architecture.
;
; Code written by Chris Ruemmler (with some help from the HP C
; compiler).
;
; The code compiles with HP's assembler
;
.level 2.0W
.space $TEXT$
.subspa $CODE$,QUAD=0,ALIGN=8,ACCESS=0x2c,CODE_ONLY
;
; Global Register definitions used for the routines.
;
; Some information about HP's runtime architecture for 64-bits.
;
; "Caller save" means the calling function must save the register
; if it wants the register to be preserved.
; "Callee save" means if a function uses the register, it must save
; the value before using it.
;
; For the floating point registers
;
; "caller save" registers: fr4-fr11, fr22-fr31
; "callee save" registers: fr12-fr21
; "special" registers: fr0-fr3 (status and exception registers)
;
; For the integer registers
; value zero : r0
; "caller save" registers: r1,r19-r26
; "callee save" registers: r3-r18
; return register : r2 (rp)
; return values ; r28 (ret0,ret1)
; Stack pointer ; r30 (sp)
; global data pointer ; r27 (dp)
; argument pointer ; r29 (ap)
; millicode return ptr ; r31 (also a caller save register)
;
; Arguments to the routines
;
r_ptr .reg %r26
a_ptr .reg %r25
b_ptr .reg %r24
num .reg %r24
w .reg %r23
n .reg %r23
;
; Globals used in some routines
;
top_overflow .reg %r29
high_mask .reg %r22 ; value 0xffffffff80000000L
;------------------------------------------------------------------------------
;
; bn_mul_add_words
;
;BN_ULONG bn_mul_add_words(BN_ULONG *r_ptr, BN_ULONG *a_ptr,
; int num, BN_ULONG w)
;
; arg0 = r_ptr
; arg1 = a_ptr
; arg2 = num
; arg3 = w
;
; Local register definitions
;
fm1 .reg %fr22
fm .reg %fr23
ht_temp .reg %fr24
ht_temp_1 .reg %fr25
lt_temp .reg %fr26
lt_temp_1 .reg %fr27
fm1_1 .reg %fr28
fm_1 .reg %fr29
fw_h .reg %fr7L
fw_l .reg %fr7R
fw .reg %fr7
fht_0 .reg %fr8L
flt_0 .reg %fr8R
t_float_0 .reg %fr8
fht_1 .reg %fr9L
flt_1 .reg %fr9R
t_float_1 .reg %fr9
tmp_0 .reg %r31
tmp_1 .reg %r21
m_0 .reg %r20
m_1 .reg %r19
ht_0 .reg %r1
ht_1 .reg %r3
lt_0 .reg %r4
lt_1 .reg %r5
m1_0 .reg %r6
m1_1 .reg %r7
rp_val .reg %r8
rp_val_1 .reg %r9
bn_mul_add_words
.export bn_mul_add_words,entry,NO_RELOCATION,LONG_RETURN
.proc
.callinfo frame=128
.entry
.align 64
STD %r3,0(%sp) ; save r3
STD %r4,8(%sp) ; save r4
NOP ; Needed to make the loop 16-byte aligned
NOP ; Needed to make the loop 16-byte aligned
STD %r5,16(%sp) ; save r5
STD %r6,24(%sp) ; save r6
STD %r7,32(%sp) ; save r7
STD %r8,40(%sp) ; save r8
STD %r9,48(%sp) ; save r9
COPY %r0,%ret0 ; return 0 by default
DEPDI,Z 1,31,1,top_overflow ; top_overflow = 1 << 32
STD w,56(%sp) ; store w on stack
CMPIB,>= 0,num,bn_mul_add_words_exit ; if (num <= 0) then exit
LDO 128(%sp),%sp ; bump stack
;
; The loop is unrolled twice, so if there is only 1 number
; then go straight to the cleanup code.
;
CMPIB,= 1,num,bn_mul_add_words_single_top
FLDD -72(%sp),fw ; load up w into fp register fw (fw_h/fw_l)
;
; This loop is unrolled 2 times (64-byte aligned as well)
;
; PA-RISC 2.0 chips have two fully pipelined multipliers, thus
; two 32-bit mutiplies can be issued per cycle.
;
bn_mul_add_words_unroll2
FLDD 0(a_ptr),t_float_0 ; load up 64-bit value (fr8L) ht(L)/lt(R)
FLDD 8(a_ptr),t_float_1 ; load up 64-bit value (fr8L) ht(L)/lt(R)
LDD 0(r_ptr),rp_val ; rp[0]
LDD 8(r_ptr),rp_val_1 ; rp[1]
XMPYU fht_0,fw_l,fm1 ; m1[0] = fht_0*fw_l
XMPYU fht_1,fw_l,fm1_1 ; m1[1] = fht_1*fw_l
FSTD fm1,-16(%sp) ; -16(sp) = m1[0]
FSTD fm1_1,-48(%sp) ; -48(sp) = m1[1]
XMPYU flt_0,fw_h,fm ; m[0] = flt_0*fw_h
XMPYU flt_1,fw_h,fm_1 ; m[1] = flt_1*fw_h
FSTD fm,-8(%sp) ; -8(sp) = m[0]
FSTD fm_1,-40(%sp) ; -40(sp) = m[1]
XMPYU fht_0,fw_h,ht_temp ; ht_temp = fht_0*fw_h
XMPYU fht_1,fw_h,ht_temp_1 ; ht_temp_1 = fht_1*fw_h
FSTD ht_temp,-24(%sp) ; -24(sp) = ht_temp
FSTD ht_temp_1,-56(%sp) ; -56(sp) = ht_temp_1
XMPYU flt_0,fw_l,lt_temp ; lt_temp = lt*fw_l
XMPYU flt_1,fw_l,lt_temp_1 ; lt_temp = lt*fw_l
FSTD lt_temp,-32(%sp) ; -32(sp) = lt_temp
FSTD lt_temp_1,-64(%sp) ; -64(sp) = lt_temp_1
LDD -8(%sp),m_0 ; m[0]
LDD -40(%sp),m_1 ; m[1]
LDD -16(%sp),m1_0 ; m1[0]
LDD -48(%sp),m1_1 ; m1[1]
LDD -24(%sp),ht_0 ; ht[0]
LDD -56(%sp),ht_1 ; ht[1]
ADD,L m1_0,m_0,tmp_0 ; tmp_0 = m[0] + m1[0];
ADD,L m1_1,m_1,tmp_1 ; tmp_1 = m[1] + m1[1];
LDD -32(%sp),lt_0
LDD -64(%sp),lt_1
CMPCLR,*>>= tmp_0,m1_0, %r0 ; if (m[0] < m1[0])
ADD,L ht_0,top_overflow,ht_0 ; ht[0] += (1<<32)
CMPCLR,*>>= tmp_1,m1_1,%r0 ; if (m[1] < m1[1])
ADD,L ht_1,top_overflow,ht_1 ; ht[1] += (1<<32)
EXTRD,U tmp_0,31,32,m_0 ; m[0]>>32
DEPD,Z tmp_0,31,32,m1_0 ; m1[0] = m[0]<<32
EXTRD,U tmp_1,31,32,m_1 ; m[1]>>32
DEPD,Z tmp_1,31,32,m1_1 ; m1[1] = m[1]<<32
ADD,L ht_0,m_0,ht_0 ; ht[0]+= (m[0]>>32)
ADD,L ht_1,m_1,ht_1 ; ht[1]+= (m[1]>>32)
ADD lt_0,m1_0,lt_0 ; lt[0] = lt[0]+m1[0];
ADD,DC ht_0,%r0,ht_0 ; ht[0]++
ADD lt_1,m1_1,lt_1 ; lt[1] = lt[1]+m1[1];
ADD,DC ht_1,%r0,ht_1 ; ht[1]++
ADD %ret0,lt_0,lt_0 ; lt[0] = lt[0] + c;
ADD,DC ht_0,%r0,ht_0 ; ht[0]++
ADD lt_0,rp_val,lt_0 ; lt[0] = lt[0]+rp[0]
ADD,DC ht_0,%r0,ht_0 ; ht[0]++
LDO -2(num),num ; num = num - 2;
ADD ht_0,lt_1,lt_1 ; lt[1] = lt[1] + ht_0 (c);
ADD,DC ht_1,%r0,ht_1 ; ht[1]++
STD lt_0,0(r_ptr) ; rp[0] = lt[0]
ADD lt_1,rp_val_1,lt_1 ; lt[1] = lt[1]+rp[1]
ADD,DC ht_1,%r0,%ret0 ; ht[1]++
LDO 16(a_ptr),a_ptr ; a_ptr += 2
STD lt_1,8(r_ptr) ; rp[1] = lt[1]
CMPIB,<= 2,num,bn_mul_add_words_unroll2 ; go again if more to do
LDO 16(r_ptr),r_ptr ; r_ptr += 2
CMPIB,=,N 0,num,bn_mul_add_words_exit ; are we done, or cleanup last one
;
; Top of loop aligned on 64-byte boundary
;
bn_mul_add_words_single_top
FLDD 0(a_ptr),t_float_0 ; load up 64-bit value (fr8L) ht(L)/lt(R)
LDD 0(r_ptr),rp_val ; rp[0]
LDO 8(a_ptr),a_ptr ; a_ptr++
XMPYU fht_0,fw_l,fm1 ; m1 = ht*fw_l
FSTD fm1,-16(%sp) ; -16(sp) = m1
XMPYU flt_0,fw_h,fm ; m = lt*fw_h
FSTD fm,-8(%sp) ; -8(sp) = m
XMPYU fht_0,fw_h,ht_temp ; ht_temp = ht*fw_h
FSTD ht_temp,-24(%sp) ; -24(sp) = ht
XMPYU flt_0,fw_l,lt_temp ; lt_temp = lt*fw_l
FSTD lt_temp,-32(%sp) ; -32(sp) = lt
LDD -8(%sp),m_0
LDD -16(%sp),m1_0 ; m1 = temp1
ADD,L m_0,m1_0,tmp_0 ; tmp_0 = m + m1;
LDD -24(%sp),ht_0
LDD -32(%sp),lt_0
CMPCLR,*>>= tmp_0,m1_0,%r0 ; if (m < m1)
ADD,L ht_0,top_overflow,ht_0 ; ht += (1<<32)
EXTRD,U tmp_0,31,32,m_0 ; m>>32
DEPD,Z tmp_0,31,32,m1_0 ; m1 = m<<32
ADD,L ht_0,m_0,ht_0 ; ht+= (m>>32)
ADD lt_0,m1_0,tmp_0 ; tmp_0 = lt+m1;
ADD,DC ht_0,%r0,ht_0 ; ht++
ADD %ret0,tmp_0,lt_0 ; lt = lt + c;
ADD,DC ht_0,%r0,ht_0 ; ht++
ADD lt_0,rp_val,lt_0 ; lt = lt+rp[0]
ADD,DC ht_0,%r0,%ret0 ; ht++
STD lt_0,0(r_ptr) ; rp[0] = lt
bn_mul_add_words_exit
.EXIT
LDD -80(%sp),%r9 ; restore r9
LDD -88(%sp),%r8 ; restore r8
LDD -96(%sp),%r7 ; restore r7
LDD -104(%sp),%r6 ; restore r6
LDD -112(%sp),%r5 ; restore r5
LDD -120(%sp),%r4 ; restore r4
BVE (%rp)
LDD,MB -128(%sp),%r3 ; restore r3
.PROCEND ;in=23,24,25,26,29;out=28;
;----------------------------------------------------------------------------
;
;BN_ULONG bn_mul_words(BN_ULONG *rp, BN_ULONG *ap, int num, BN_ULONG w)
;
; arg0 = rp
; arg1 = ap
; arg2 = num
; arg3 = w
bn_mul_words
.proc
.callinfo frame=128
.entry
.EXPORT bn_mul_words,ENTRY,PRIV_LEV=3,NO_RELOCATION,LONG_RETURN
.align 64
STD %r3,0(%sp) ; save r3
STD %r4,8(%sp) ; save r4
STD %r5,16(%sp) ; save r5
STD %r6,24(%sp) ; save r6
STD %r7,32(%sp) ; save r7
COPY %r0,%ret0 ; return 0 by default
DEPDI,Z 1,31,1,top_overflow ; top_overflow = 1 << 32
STD w,56(%sp) ; w on stack
CMPIB,>= 0,num,bn_mul_words_exit
LDO 128(%sp),%sp ; bump stack
;
; See if only 1 word to do, thus just do cleanup
;
CMPIB,= 1,num,bn_mul_words_single_top
FLDD -72(%sp),fw ; load up w into fp register fw (fw_h/fw_l)
;
; This loop is unrolled 2 times (64-byte aligned as well)
;
; PA-RISC 2.0 chips have two fully pipelined multipliers, thus
; two 32-bit mutiplies can be issued per cycle.
;
bn_mul_words_unroll2
FLDD 0(a_ptr),t_float_0 ; load up 64-bit value (fr8L) ht(L)/lt(R)
FLDD 8(a_ptr),t_float_1 ; load up 64-bit value (fr8L) ht(L)/lt(R)
XMPYU fht_0,fw_l,fm1 ; m1[0] = fht_0*fw_l
XMPYU fht_1,fw_l,fm1_1 ; m1[1] = ht*fw_l
FSTD fm1,-16(%sp) ; -16(sp) = m1
FSTD fm1_1,-48(%sp) ; -48(sp) = m1
XMPYU flt_0,fw_h,fm ; m = lt*fw_h
XMPYU flt_1,fw_h,fm_1 ; m = lt*fw_h
FSTD fm,-8(%sp) ; -8(sp) = m
FSTD fm_1,-40(%sp) ; -40(sp) = m
XMPYU fht_0,fw_h,ht_temp ; ht_temp = fht_0*fw_h
XMPYU fht_1,fw_h,ht_temp_1 ; ht_temp = ht*fw_h
FSTD ht_temp,-24(%sp) ; -24(sp) = ht
FSTD ht_temp_1,-56(%sp) ; -56(sp) = ht
XMPYU flt_0,fw_l,lt_temp ; lt_temp = lt*fw_l
XMPYU flt_1,fw_l,lt_temp_1 ; lt_temp = lt*fw_l
FSTD lt_temp,-32(%sp) ; -32(sp) = lt
FSTD lt_temp_1,-64(%sp) ; -64(sp) = lt
LDD -8(%sp),m_0
LDD -40(%sp),m_1
LDD -16(%sp),m1_0
LDD -48(%sp),m1_1
LDD -24(%sp),ht_0
LDD -56(%sp),ht_1
ADD,L m1_0,m_0,tmp_0 ; tmp_0 = m + m1;
ADD,L m1_1,m_1,tmp_1 ; tmp_1 = m + m1;
LDD -32(%sp),lt_0
LDD -64(%sp),lt_1
CMPCLR,*>>= tmp_0,m1_0, %r0 ; if (m < m1)
ADD,L ht_0,top_overflow,ht_0 ; ht += (1<<32)
CMPCLR,*>>= tmp_1,m1_1,%r0 ; if (m < m1)
ADD,L ht_1,top_overflow,ht_1 ; ht += (1<<32)
EXTRD,U tmp_0,31,32,m_0 ; m>>32
DEPD,Z tmp_0,31,32,m1_0 ; m1 = m<<32
EXTRD,U tmp_1,31,32,m_1 ; m>>32
DEPD,Z tmp_1,31,32,m1_1 ; m1 = m<<32
ADD,L ht_0,m_0,ht_0 ; ht+= (m>>32)
ADD,L ht_1,m_1,ht_1 ; ht+= (m>>32)
ADD lt_0,m1_0,lt_0 ; lt = lt+m1;
ADD,DC ht_0,%r0,ht_0 ; ht++
ADD lt_1,m1_1,lt_1 ; lt = lt+m1;
ADD,DC ht_1,%r0,ht_1 ; ht++
ADD %ret0,lt_0,lt_0 ; lt = lt + c (ret0);
ADD,DC ht_0,%r0,ht_0 ; ht++
ADD ht_0,lt_1,lt_1 ; lt = lt + c (ht_0)
ADD,DC ht_1,%r0,ht_1 ; ht++
STD lt_0,0(r_ptr) ; rp[0] = lt
STD lt_1,8(r_ptr) ; rp[1] = lt
COPY ht_1,%ret0 ; carry = ht
LDO -2(num),num ; num = num - 2;
LDO 16(a_ptr),a_ptr ; ap += 2
CMPIB,<= 2,num,bn_mul_words_unroll2
LDO 16(r_ptr),r_ptr ; rp++
CMPIB,=,N 0,num,bn_mul_words_exit ; are we done?
;
; Top of loop aligned on 64-byte boundary
;
bn_mul_words_single_top
FLDD 0(a_ptr),t_float_0 ; load up 64-bit value (fr8L) ht(L)/lt(R)
XMPYU fht_0,fw_l,fm1 ; m1 = ht*fw_l
FSTD fm1,-16(%sp) ; -16(sp) = m1
XMPYU flt_0,fw_h,fm ; m = lt*fw_h
FSTD fm,-8(%sp) ; -8(sp) = m
XMPYU fht_0,fw_h,ht_temp ; ht_temp = ht*fw_h
FSTD ht_temp,-24(%sp) ; -24(sp) = ht
XMPYU flt_0,fw_l,lt_temp ; lt_temp = lt*fw_l
FSTD lt_temp,-32(%sp) ; -32(sp) = lt
LDD -8(%sp),m_0
LDD -16(%sp),m1_0
ADD,L m_0,m1_0,tmp_0 ; tmp_0 = m + m1;
LDD -24(%sp),ht_0
LDD -32(%sp),lt_0
CMPCLR,*>>= tmp_0,m1_0,%r0 ; if (m < m1)
ADD,L ht_0,top_overflow,ht_0 ; ht += (1<<32)
EXTRD,U tmp_0,31,32,m_0 ; m>>32
DEPD,Z tmp_0,31,32,m1_0 ; m1 = m<<32
ADD,L ht_0,m_0,ht_0 ; ht+= (m>>32)
ADD lt_0,m1_0,lt_0 ; lt= lt+m1;
ADD,DC ht_0,%r0,ht_0 ; ht++
ADD %ret0,lt_0,lt_0 ; lt = lt + c;
ADD,DC ht_0,%r0,ht_0 ; ht++
COPY ht_0,%ret0 ; copy carry
STD lt_0,0(r_ptr) ; rp[0] = lt
bn_mul_words_exit
.EXIT
LDD -96(%sp),%r7 ; restore r7
LDD -104(%sp),%r6 ; restore r6
LDD -112(%sp),%r5 ; restore r5
LDD -120(%sp),%r4 ; restore r4
BVE (%rp)
LDD,MB -128(%sp),%r3 ; restore r3
.PROCEND ;in=23,24,25,26,29;out=28;
;----------------------------------------------------------------------------
;
;void bn_sqr_words(BN_ULONG *rp, BN_ULONG *ap, int num)
;
; arg0 = rp
; arg1 = ap
; arg2 = num
;
bn_sqr_words
.proc
.callinfo FRAME=128,ENTRY_GR=%r3,ARGS_SAVED,ORDERING_AWARE
.EXPORT bn_sqr_words,ENTRY,PRIV_LEV=3,NO_RELOCATION,LONG_RETURN
.entry
.align 64
STD %r3,0(%sp) ; save r3
STD %r4,8(%sp) ; save r4
NOP
STD %r5,16(%sp) ; save r5
CMPIB,>= 0,num,bn_sqr_words_exit
LDO 128(%sp),%sp ; bump stack
;
; If only 1, the goto straight to cleanup
;
CMPIB,= 1,num,bn_sqr_words_single_top
DEPDI,Z -1,32,33,high_mask ; Create Mask 0xffffffff80000000L
;
; This loop is unrolled 2 times (64-byte aligned as well)
;
bn_sqr_words_unroll2
FLDD 0(a_ptr),t_float_0 ; a[0]
FLDD 8(a_ptr),t_float_1 ; a[1]
XMPYU fht_0,flt_0,fm ; m[0]
XMPYU fht_1,flt_1,fm_1 ; m[1]
FSTD fm,-24(%sp) ; store m[0]
FSTD fm_1,-56(%sp) ; store m[1]
XMPYU flt_0,flt_0,lt_temp ; lt[0]
XMPYU flt_1,flt_1,lt_temp_1 ; lt[1]
FSTD lt_temp,-16(%sp) ; store lt[0]
FSTD lt_temp_1,-48(%sp) ; store lt[1]
XMPYU fht_0,fht_0,ht_temp ; ht[0]
XMPYU fht_1,fht_1,ht_temp_1 ; ht[1]
FSTD ht_temp,-8(%sp) ; store ht[0]
FSTD ht_temp_1,-40(%sp) ; store ht[1]
LDD -24(%sp),m_0
LDD -56(%sp),m_1
AND m_0,high_mask,tmp_0 ; m[0] & Mask
AND m_1,high_mask,tmp_1 ; m[1] & Mask
DEPD,Z m_0,30,31,m_0 ; m[0] << 32+1
DEPD,Z m_1,30,31,m_1 ; m[1] << 32+1
LDD -16(%sp),lt_0
LDD -48(%sp),lt_1
EXTRD,U tmp_0,32,33,tmp_0 ; tmp_0 = m[0]&Mask >> 32-1
EXTRD,U tmp_1,32,33,tmp_1 ; tmp_1 = m[1]&Mask >> 32-1
LDD -8(%sp),ht_0
LDD -40(%sp),ht_1
ADD,L ht_0,tmp_0,ht_0 ; ht[0] += tmp_0
ADD,L ht_1,tmp_1,ht_1 ; ht[1] += tmp_1
ADD lt_0,m_0,lt_0 ; lt = lt+m
ADD,DC ht_0,%r0,ht_0 ; ht[0]++
STD lt_0,0(r_ptr) ; rp[0] = lt[0]
STD ht_0,8(r_ptr) ; rp[1] = ht[1]
ADD lt_1,m_1,lt_1 ; lt = lt+m
ADD,DC ht_1,%r0,ht_1 ; ht[1]++
STD lt_1,16(r_ptr) ; rp[2] = lt[1]
STD ht_1,24(r_ptr) ; rp[3] = ht[1]
LDO -2(num),num ; num = num - 2;
LDO 16(a_ptr),a_ptr ; ap += 2
CMPIB,<= 2,num,bn_sqr_words_unroll2
LDO 32(r_ptr),r_ptr ; rp += 4
CMPIB,=,N 0,num,bn_sqr_words_exit ; are we done?
;
; Top of loop aligned on 64-byte boundary
;
bn_sqr_words_single_top
FLDD 0(a_ptr),t_float_0 ; load up 64-bit value (fr8L) ht(L)/lt(R)
XMPYU fht_0,flt_0,fm ; m
FSTD fm,-24(%sp) ; store m
XMPYU flt_0,flt_0,lt_temp ; lt
FSTD lt_temp,-16(%sp) ; store lt
XMPYU fht_0,fht_0,ht_temp ; ht
FSTD ht_temp,-8(%sp) ; store ht
LDD -24(%sp),m_0 ; load m
AND m_0,high_mask,tmp_0 ; m & Mask
DEPD,Z m_0,30,31,m_0 ; m << 32+1
LDD -16(%sp),lt_0 ; lt
LDD -8(%sp),ht_0 ; ht
EXTRD,U tmp_0,32,33,tmp_0 ; tmp_0 = m&Mask >> 32-1
ADD m_0,lt_0,lt_0 ; lt = lt+m
ADD,L ht_0,tmp_0,ht_0 ; ht += tmp_0
ADD,DC ht_0,%r0,ht_0 ; ht++
STD lt_0,0(r_ptr) ; rp[0] = lt
STD ht_0,8(r_ptr) ; rp[1] = ht
bn_sqr_words_exit
.EXIT
LDD -112(%sp),%r5 ; restore r5
LDD -120(%sp),%r4 ; restore r4
BVE (%rp)
LDD,MB -128(%sp),%r3
.PROCEND ;in=23,24,25,26,29;out=28;
;----------------------------------------------------------------------------
;
;BN_ULONG bn_add_words(BN_ULONG *r, BN_ULONG *a, BN_ULONG *b, int n)
;
; arg0 = rp
; arg1 = ap
; arg2 = bp
; arg3 = n
t .reg %r22
b .reg %r21
l .reg %r20
bn_add_words
.proc
.entry
.callinfo
.EXPORT bn_add_words,ENTRY,PRIV_LEV=3,NO_RELOCATION,LONG_RETURN
.align 64
CMPIB,>= 0,n,bn_add_words_exit
COPY %r0,%ret0 ; return 0 by default
;
; If 2 or more numbers do the loop
;
CMPIB,= 1,n,bn_add_words_single_top
NOP
;
; This loop is unrolled 2 times (64-byte aligned as well)
;
bn_add_words_unroll2
LDD 0(a_ptr),t
LDD 0(b_ptr),b
ADD t,%ret0,t ; t = t+c;
ADD,DC %r0,%r0,%ret0 ; set c to carry
ADD t,b,l ; l = t + b[0]
ADD,DC %ret0,%r0,%ret0 ; c+= carry
STD l,0(r_ptr)
LDD 8(a_ptr),t
LDD 8(b_ptr),b
ADD t,%ret0,t ; t = t+c;
ADD,DC %r0,%r0,%ret0 ; set c to carry
ADD t,b,l ; l = t + b[0]
ADD,DC %ret0,%r0,%ret0 ; c+= carry
STD l,8(r_ptr)
LDO -2(n),n
LDO 16(a_ptr),a_ptr
LDO 16(b_ptr),b_ptr
CMPIB,<= 2,n,bn_add_words_unroll2
LDO 16(r_ptr),r_ptr
CMPIB,=,N 0,n,bn_add_words_exit ; are we done?
bn_add_words_single_top
LDD 0(a_ptr),t
LDD 0(b_ptr),b
ADD t,%ret0,t ; t = t+c;
ADD,DC %r0,%r0,%ret0 ; set c to carry (could use CMPCLR??)
ADD t,b,l ; l = t + b[0]
ADD,DC %ret0,%r0,%ret0 ; c+= carry
STD l,0(r_ptr)
bn_add_words_exit
.EXIT
BVE (%rp)
NOP
.PROCEND ;in=23,24,25,26,29;out=28;
;----------------------------------------------------------------------------
;
;BN_ULONG bn_sub_words(BN_ULONG *r, BN_ULONG *a, BN_ULONG *b, int n)
;
; arg0 = rp
; arg1 = ap
; arg2 = bp
; arg3 = n
t1 .reg %r22
t2 .reg %r21
sub_tmp1 .reg %r20
sub_tmp2 .reg %r19
bn_sub_words
.proc
.callinfo
.EXPORT bn_sub_words,ENTRY,PRIV_LEV=3,NO_RELOCATION,LONG_RETURN
.entry
.align 64
CMPIB,>= 0,n,bn_sub_words_exit
COPY %r0,%ret0 ; return 0 by default
;
; If 2 or more numbers do the loop
;
CMPIB,= 1,n,bn_sub_words_single_top
NOP
;
; This loop is unrolled 2 times (64-byte aligned as well)
;
bn_sub_words_unroll2
LDD 0(a_ptr),t1
LDD 0(b_ptr),t2
SUB t1,t2,sub_tmp1 ; t3 = t1-t2;
SUB sub_tmp1,%ret0,sub_tmp1 ; t3 = t3- c;
CMPCLR,*>> t1,t2,sub_tmp2 ; clear if t1 > t2
LDO 1(%r0),sub_tmp2
CMPCLR,*= t1,t2,%r0
COPY sub_tmp2,%ret0
STD sub_tmp1,0(r_ptr)
LDD 8(a_ptr),t1
LDD 8(b_ptr),t2
SUB t1,t2,sub_tmp1 ; t3 = t1-t2;
SUB sub_tmp1,%ret0,sub_tmp1 ; t3 = t3- c;
CMPCLR,*>> t1,t2,sub_tmp2 ; clear if t1 > t2
LDO 1(%r0),sub_tmp2
CMPCLR,*= t1,t2,%r0
COPY sub_tmp2,%ret0
STD sub_tmp1,8(r_ptr)
LDO -2(n),n
LDO 16(a_ptr),a_ptr
LDO 16(b_ptr),b_ptr
CMPIB,<= 2,n,bn_sub_words_unroll2
LDO 16(r_ptr),r_ptr
CMPIB,=,N 0,n,bn_sub_words_exit ; are we done?
bn_sub_words_single_top
LDD 0(a_ptr),t1
LDD 0(b_ptr),t2
SUB t1,t2,sub_tmp1 ; t3 = t1-t2;
SUB sub_tmp1,%ret0,sub_tmp1 ; t3 = t3- c;
CMPCLR,*>> t1,t2,sub_tmp2 ; clear if t1 > t2
LDO 1(%r0),sub_tmp2
CMPCLR,*= t1,t2,%r0
COPY sub_tmp2,%ret0
STD sub_tmp1,0(r_ptr)
bn_sub_words_exit
.EXIT
BVE (%rp)
NOP
.PROCEND ;in=23,24,25,26,29;out=28;
;------------------------------------------------------------------------------
;
; unsigned long bn_div_words(unsigned long h, unsigned long l, unsigned long d)
;
; arg0 = h
; arg1 = l
; arg2 = d
;
; This is mainly just modified assembly from the compiler, thus the
; lack of variable names.
;
;------------------------------------------------------------------------------
bn_div_words
.proc
.callinfo CALLER,FRAME=272,ENTRY_GR=%r10,SAVE_RP,ARGS_SAVED,ORDERING_AWARE
.EXPORT bn_div_words,ENTRY,PRIV_LEV=3,NO_RELOCATION,LONG_RETURN
.IMPORT BN_num_bits_word,CODE,NO_RELOCATION
.IMPORT __iob,DATA
.IMPORT fprintf,CODE,NO_RELOCATION
.IMPORT abort,CODE,NO_RELOCATION
.IMPORT $$div2U,MILLICODE
.entry
STD %r2,-16(%r30)
STD,MA %r3,352(%r30)
STD %r4,-344(%r30)
STD %r5,-336(%r30)
STD %r6,-328(%r30)
STD %r7,-320(%r30)
STD %r8,-312(%r30)
STD %r9,-304(%r30)
STD %r10,-296(%r30)
STD %r27,-288(%r30) ; save gp
COPY %r24,%r3 ; save d
COPY %r26,%r4 ; save h (high 64-bits)
LDO -1(%r0),%ret0 ; return -1 by default
CMPB,*= %r0,%arg2,$D3 ; if (d == 0)
COPY %r25,%r5 ; save l (low 64-bits)
LDO -48(%r30),%r29 ; create ap
.CALL ;in=26,29;out=28;
B,L BN_num_bits_word,%r2
COPY %r3,%r26
LDD -288(%r30),%r27 ; restore gp
LDI 64,%r21
CMPB,= %r21,%ret0,$00000012 ;if (i == 64) (forward)
COPY %ret0,%r24 ; i
MTSARCM %r24
DEPDI,Z -1,%sar,1,%r29
CMPB,*<<,N %r29,%r4,bn_div_err_case ; if (h > 1<<i) (forward)
$00000012
SUBI 64,%r24,%r31 ; i = 64 - i;
CMPCLR,*<< %r4,%r3,%r0 ; if (h >= d)
SUB %r4,%r3,%r4 ; h -= d
CMPB,= %r31,%r0,$0000001A ; if (i)
COPY %r0,%r10 ; ret = 0
MTSARCM %r31 ; i to shift
DEPD,Z %r3,%sar,64,%r3 ; d <<= i;
SUBI 64,%r31,%r19 ; 64 - i; redundent
MTSAR %r19 ; (64 -i) to shift
SHRPD %r4,%r5,%sar,%r4 ; l>> (64-i)
MTSARCM %r31 ; i to shift
DEPD,Z %r5,%sar,64,%r5 ; l <<= i;
$0000001A
DEPDI,Z -1,31,32,%r19
EXTRD,U %r3,31,32,%r6 ; dh=(d&0xfff)>>32
EXTRD,U %r3,63,32,%r8 ; dl = d&0xffffff
LDO 2(%r0),%r9
STD %r3,-280(%r30) ; "d" to stack
$0000001C
DEPDI,Z -1,63,32,%r29 ;
EXTRD,U %r4,31,32,%r31 ; h >> 32
CMPB,*=,N %r31,%r6,$D2 ; if ((h>>32) != dh)(forward) div
COPY %r4,%r26
EXTRD,U %r4,31,32,%r25
COPY %r6,%r24
.CALL ;in=23,24,25,26;out=20,21,22,28,29; (MILLICALL)
B,L $$div2U,%r2
EXTRD,U %r6,31,32,%r23
DEPD %r28,31,32,%r29
$D2
STD %r29,-272(%r30) ; q
AND %r5,%r19,%r24 ; t & 0xffffffff00000000;
EXTRD,U %r24,31,32,%r24 ; ???
FLDD -272(%r30),%fr7 ; q
FLDD -280(%r30),%fr8 ; d
XMPYU %fr8L,%fr7L,%fr10
FSTD %fr10,-256(%r30)
XMPYU %fr8L,%fr7R,%fr22
FSTD %fr22,-264(%r30)
XMPYU %fr8R,%fr7L,%fr11
XMPYU %fr8R,%fr7R,%fr23
FSTD %fr11,-232(%r30)
FSTD %fr23,-240(%r30)
LDD -256(%r30),%r28
DEPD,Z %r28,31,32,%r2
LDD -264(%r30),%r20
ADD,L %r20,%r2,%r31
LDD -232(%r30),%r22
DEPD,Z %r22,31,32,%r22
LDD -240(%r30),%r21
B $00000024 ; enter loop
ADD,L %r21,%r22,%r23
$0000002A
LDO -1(%r29),%r29
SUB %r23,%r8,%r23
$00000024
SUB %r4,%r31,%r25
AND %r25,%r19,%r26
CMPB,*<>,N %r0,%r26,$00000046 ; (forward)
DEPD,Z %r25,31,32,%r20
OR %r20,%r24,%r21
CMPB,*<<,N %r21,%r23,$0000002A ;(backward)
SUB %r31,%r6,%r31
;-------------Break path---------------------
$00000046
DEPD,Z %r23,31,32,%r25 ;tl
EXTRD,U %r23,31,32,%r26 ;t
AND %r25,%r19,%r24 ;tl = (tl<<32)&0xfffffff0000000L
ADD,L %r31,%r26,%r31 ;th += t;
CMPCLR,*>>= %r5,%r24,%r0 ;if (l<tl)
LDO 1(%r31),%r31 ; th++;
CMPB,*<<=,N %r31,%r4,$00000036 ;if (n < th) (forward)
LDO -1(%r29),%r29 ;q--;
ADD,L %r4,%r3,%r4 ;h += d;
$00000036
ADDIB,=,N -1,%r9,$D1 ;if (--count == 0) break (forward)
SUB %r5,%r24,%r28 ; l -= tl;
SUB %r4,%r31,%r24 ; h -= th;
SHRPD %r24,%r28,32,%r4 ; h = ((h<<32)|(l>>32));
DEPD,Z %r29,31,32,%r10 ; ret = q<<32
b $0000001C
DEPD,Z %r28,31,32,%r5 ; l = l << 32
$D1
OR %r10,%r29,%r28 ; ret |= q
$D3
LDD -368(%r30),%r2
$D0
LDD -296(%r30),%r10
LDD -304(%r30),%r9
LDD -312(%r30),%r8
LDD -320(%r30),%r7
LDD -328(%r30),%r6
LDD -336(%r30),%r5
LDD -344(%r30),%r4
BVE (%r2)
.EXIT
LDD,MB -352(%r30),%r3
bn_div_err_case
MFIA %r6
ADDIL L'bn_div_words-bn_div_err_case,%r6,%r1
LDO R'bn_div_words-bn_div_err_case(%r1),%r6
ADDIL LT'__iob,%r27,%r1
LDD RT'__iob(%r1),%r26
ADDIL L'C$4-bn_div_words,%r6,%r1
LDO R'C$4-bn_div_words(%r1),%r25
LDO 64(%r26),%r26
.CALL ;in=24,25,26,29;out=28;
B,L fprintf,%r2
LDO -48(%r30),%r29
LDD -288(%r30),%r27
.CALL ;in=29;
B,L abort,%r2
LDO -48(%r30),%r29
LDD -288(%r30),%r27
B $D0
LDD -368(%r30),%r2
.PROCEND ;in=24,25,26,29;out=28;
;----------------------------------------------------------------------------
;
; Registers to hold 64-bit values to manipulate. The "L" part
; of the register corresponds to the upper 32-bits, while the "R"
; part corresponds to the lower 32-bits
;
; Note, that when using b6 and b7, the code must save these before
; using them because they are callee save registers
;
;
; Floating point registers to use to save values that
; are manipulated. These don't collide with ftemp1-6 and
; are all caller save registers
;
a0 .reg %fr22
a0L .reg %fr22L
a0R .reg %fr22R
a1 .reg %fr23
a1L .reg %fr23L
a1R .reg %fr23R
a2 .reg %fr24
a2L .reg %fr24L
a2R .reg %fr24R
a3 .reg %fr25
a3L .reg %fr25L
a3R .reg %fr25R
a4 .reg %fr26
a4L .reg %fr26L
a4R .reg %fr26R
a5 .reg %fr27
a5L .reg %fr27L
a5R .reg %fr27R
a6 .reg %fr28
a6L .reg %fr28L
a6R .reg %fr28R
a7 .reg %fr29
a7L .reg %fr29L
a7R .reg %fr29R
b0 .reg %fr30
b0L .reg %fr30L
b0R .reg %fr30R
b1 .reg %fr31
b1L .reg %fr31L
b1R .reg %fr31R
;
; Temporary floating point variables, these are all caller save
; registers
;
ftemp1 .reg %fr4
ftemp2 .reg %fr5
ftemp3 .reg %fr6
ftemp4 .reg %fr7
;
; The B set of registers when used.
;
b2 .reg %fr8
b2L .reg %fr8L
b2R .reg %fr8R
b3 .reg %fr9
b3L .reg %fr9L
b3R .reg %fr9R
b4 .reg %fr10
b4L .reg %fr10L
b4R .reg %fr10R
b5 .reg %fr11
b5L .reg %fr11L
b5R .reg %fr11R
b6 .reg %fr12
b6L .reg %fr12L
b6R .reg %fr12R
b7 .reg %fr13
b7L .reg %fr13L
b7R .reg %fr13R
c1 .reg %r21 ; only reg
temp1 .reg %r20 ; only reg
temp2 .reg %r19 ; only reg
temp3 .reg %r31 ; only reg
m1 .reg %r28
c2 .reg %r23
high_one .reg %r1
ht .reg %r6
lt .reg %r5
m .reg %r4
c3 .reg %r3
SQR_ADD_C .macro A0L,A0R,C1,C2,C3
XMPYU A0L,A0R,ftemp1 ; m
FSTD ftemp1,-24(%sp) ; store m
XMPYU A0R,A0R,ftemp2 ; lt
FSTD ftemp2,-16(%sp) ; store lt
XMPYU A0L,A0L,ftemp3 ; ht
FSTD ftemp3,-8(%sp) ; store ht
LDD -24(%sp),m ; load m
AND m,high_mask,temp2 ; m & Mask
DEPD,Z m,30,31,temp3 ; m << 32+1
LDD -16(%sp),lt ; lt
LDD -8(%sp),ht ; ht
EXTRD,U temp2,32,33,temp1 ; temp1 = m&Mask >> 32-1
ADD temp3,lt,lt ; lt = lt+m
ADD,L ht,temp1,ht ; ht += temp1
ADD,DC ht,%r0,ht ; ht++
ADD C1,lt,C1 ; c1=c1+lt
ADD,DC ht,%r0,ht ; ht++
ADD C2,ht,C2 ; c2=c2+ht
ADD,DC C3,%r0,C3 ; c3++
.endm
SQR_ADD_C2 .macro A0L,A0R,A1L,A1R,C1,C2,C3
XMPYU A0L,A1R,ftemp1 ; m1 = bl*ht
FSTD ftemp1,-16(%sp) ;
XMPYU A0R,A1L,ftemp2 ; m = bh*lt
FSTD ftemp2,-8(%sp) ;
XMPYU A0R,A1R,ftemp3 ; lt = bl*lt
FSTD ftemp3,-32(%sp)
XMPYU A0L,A1L,ftemp4 ; ht = bh*ht
FSTD ftemp4,-24(%sp) ;
LDD -8(%sp),m ; r21 = m
LDD -16(%sp),m1 ; r19 = m1
ADD,L m,m1,m ; m+m1
DEPD,Z m,31,32,temp3 ; (m+m1<<32)
LDD -24(%sp),ht ; r24 = ht
CMPCLR,*>>= m,m1,%r0 ; if (m < m1)
ADD,L ht,high_one,ht ; ht+=high_one
EXTRD,U m,31,32,temp1 ; m >> 32
LDD -32(%sp),lt ; lt
ADD,L ht,temp1,ht ; ht+= m>>32
ADD lt,temp3,lt ; lt = lt+m1
ADD,DC ht,%r0,ht ; ht++
ADD ht,ht,ht ; ht=ht+ht;
ADD,DC C3,%r0,C3 ; add in carry (c3++)
ADD lt,lt,lt ; lt=lt+lt;
ADD,DC ht,%r0,ht ; add in carry (ht++)
ADD C1,lt,C1 ; c1=c1+lt
ADD,DC,*NUV ht,%r0,ht ; add in carry (ht++)
LDO 1(C3),C3 ; bump c3 if overflow,nullify otherwise
ADD C2,ht,C2 ; c2 = c2 + ht
ADD,DC C3,%r0,C3 ; add in carry (c3++)
.endm
;
;void bn_sqr_comba8(BN_ULONG *r, BN_ULONG *a)
; arg0 = r_ptr
; arg1 = a_ptr
;
bn_sqr_comba8
.PROC
.CALLINFO FRAME=128,ENTRY_GR=%r3,ARGS_SAVED,ORDERING_AWARE
.EXPORT bn_sqr_comba8,ENTRY,PRIV_LEV=3,NO_RELOCATION,LONG_RETURN
.ENTRY
.align 64
STD %r3,0(%sp) ; save r3
STD %r4,8(%sp) ; save r4
STD %r5,16(%sp) ; save r5
STD %r6,24(%sp) ; save r6
;
; Zero out carries
;
COPY %r0,c1
COPY %r0,c2
COPY %r0,c3
LDO 128(%sp),%sp ; bump stack
DEPDI,Z -1,32,33,high_mask ; Create Mask 0xffffffff80000000L
DEPDI,Z 1,31,1,high_one ; Create Value 1 << 32
;
; Load up all of the values we are going to use
;
FLDD 0(a_ptr),a0
FLDD 8(a_ptr),a1
FLDD 16(a_ptr),a2
FLDD 24(a_ptr),a3
FLDD 32(a_ptr),a4
FLDD 40(a_ptr),a5
FLDD 48(a_ptr),a6
FLDD 56(a_ptr),a7
SQR_ADD_C a0L,a0R,c1,c2,c3
STD c1,0(r_ptr) ; r[0] = c1;
COPY %r0,c1
SQR_ADD_C2 a1L,a1R,a0L,a0R,c2,c3,c1
STD c2,8(r_ptr) ; r[1] = c2;
COPY %r0,c2
SQR_ADD_C a1L,a1R,c3,c1,c2
SQR_ADD_C2 a2L,a2R,a0L,a0R,c3,c1,c2
STD c3,16(r_ptr) ; r[2] = c3;
COPY %r0,c3
SQR_ADD_C2 a3L,a3R,a0L,a0R,c1,c2,c3
SQR_ADD_C2 a2L,a2R,a1L,a1R,c1,c2,c3
STD c1,24(r_ptr) ; r[3] = c1;
COPY %r0,c1
SQR_ADD_C a2L,a2R,c2,c3,c1
SQR_ADD_C2 a3L,a3R,a1L,a1R,c2,c3,c1
SQR_ADD_C2 a4L,a4R,a0L,a0R,c2,c3,c1
STD c2,32(r_ptr) ; r[4] = c2;
COPY %r0,c2
SQR_ADD_C2 a5L,a5R,a0L,a0R,c3,c1,c2
SQR_ADD_C2 a4L,a4R,a1L,a1R,c3,c1,c2
SQR_ADD_C2 a3L,a3R,a2L,a2R,c3,c1,c2
STD c3,40(r_ptr) ; r[5] = c3;
COPY %r0,c3
SQR_ADD_C a3L,a3R,c1,c2,c3
SQR_ADD_C2 a4L,a4R,a2L,a2R,c1,c2,c3
SQR_ADD_C2 a5L,a5R,a1L,a1R,c1,c2,c3
SQR_ADD_C2 a6L,a6R,a0L,a0R,c1,c2,c3
STD c1,48(r_ptr) ; r[6] = c1;
COPY %r0,c1
SQR_ADD_C2 a7L,a7R,a0L,a0R,c2,c3,c1
SQR_ADD_C2 a6L,a6R,a1L,a1R,c2,c3,c1
SQR_ADD_C2 a5L,a5R,a2L,a2R,c2,c3,c1
SQR_ADD_C2 a4L,a4R,a3L,a3R,c2,c3,c1
STD c2,56(r_ptr) ; r[7] = c2;
COPY %r0,c2
SQR_ADD_C a4L,a4R,c3,c1,c2
SQR_ADD_C2 a5L,a5R,a3L,a3R,c3,c1,c2
SQR_ADD_C2 a6L,a6R,a2L,a2R,c3,c1,c2
SQR_ADD_C2 a7L,a7R,a1L,a1R,c3,c1,c2
STD c3,64(r_ptr) ; r[8] = c3;
COPY %r0,c3
SQR_ADD_C2 a7L,a7R,a2L,a2R,c1,c2,c3
SQR_ADD_C2 a6L,a6R,a3L,a3R,c1,c2,c3
SQR_ADD_C2 a5L,a5R,a4L,a4R,c1,c2,c3
STD c1,72(r_ptr) ; r[9] = c1;
COPY %r0,c1
SQR_ADD_C a5L,a5R,c2,c3,c1
SQR_ADD_C2 a6L,a6R,a4L,a4R,c2,c3,c1
SQR_ADD_C2 a7L,a7R,a3L,a3R,c2,c3,c1
STD c2,80(r_ptr) ; r[10] = c2;
COPY %r0,c2
SQR_ADD_C2 a7L,a7R,a4L,a4R,c3,c1,c2
SQR_ADD_C2 a6L,a6R,a5L,a5R,c3,c1,c2
STD c3,88(r_ptr) ; r[11] = c3;
COPY %r0,c3
SQR_ADD_C a6L,a6R,c1,c2,c3
SQR_ADD_C2 a7L,a7R,a5L,a5R,c1,c2,c3
STD c1,96(r_ptr) ; r[12] = c1;
COPY %r0,c1
SQR_ADD_C2 a7L,a7R,a6L,a6R,c2,c3,c1
STD c2,104(r_ptr) ; r[13] = c2;
COPY %r0,c2
SQR_ADD_C a7L,a7R,c3,c1,c2
STD c3, 112(r_ptr) ; r[14] = c3
STD c1, 120(r_ptr) ; r[15] = c1
.EXIT
LDD -104(%sp),%r6 ; restore r6
LDD -112(%sp),%r5 ; restore r5
LDD -120(%sp),%r4 ; restore r4
BVE (%rp)
LDD,MB -128(%sp),%r3
.PROCEND
;-----------------------------------------------------------------------------
;
;void bn_sqr_comba4(BN_ULONG *r, BN_ULONG *a)
; arg0 = r_ptr
; arg1 = a_ptr
;
bn_sqr_comba4
.proc
.callinfo FRAME=128,ENTRY_GR=%r3,ARGS_SAVED,ORDERING_AWARE
.EXPORT bn_sqr_comba4,ENTRY,PRIV_LEV=3,NO_RELOCATION,LONG_RETURN
.entry
.align 64
STD %r3,0(%sp) ; save r3
STD %r4,8(%sp) ; save r4
STD %r5,16(%sp) ; save r5
STD %r6,24(%sp) ; save r6
;
; Zero out carries
;
COPY %r0,c1
COPY %r0,c2
COPY %r0,c3
LDO 128(%sp),%sp ; bump stack
DEPDI,Z -1,32,33,high_mask ; Create Mask 0xffffffff80000000L
DEPDI,Z 1,31,1,high_one ; Create Value 1 << 32
;
; Load up all of the values we are going to use
;
FLDD 0(a_ptr),a0
FLDD 8(a_ptr),a1
FLDD 16(a_ptr),a2
FLDD 24(a_ptr),a3
FLDD 32(a_ptr),a4
FLDD 40(a_ptr),a5
FLDD 48(a_ptr),a6
FLDD 56(a_ptr),a7
SQR_ADD_C a0L,a0R,c1,c2,c3
STD c1,0(r_ptr) ; r[0] = c1;
COPY %r0,c1
SQR_ADD_C2 a1L,a1R,a0L,a0R,c2,c3,c1
STD c2,8(r_ptr) ; r[1] = c2;
COPY %r0,c2
SQR_ADD_C a1L,a1R,c3,c1,c2
SQR_ADD_C2 a2L,a2R,a0L,a0R,c3,c1,c2
STD c3,16(r_ptr) ; r[2] = c3;
COPY %r0,c3
SQR_ADD_C2 a3L,a3R,a0L,a0R,c1,c2,c3
SQR_ADD_C2 a2L,a2R,a1L,a1R,c1,c2,c3
STD c1,24(r_ptr) ; r[3] = c1;
COPY %r0,c1
SQR_ADD_C a2L,a2R,c2,c3,c1
SQR_ADD_C2 a3L,a3R,a1L,a1R,c2,c3,c1
STD c2,32(r_ptr) ; r[4] = c2;
COPY %r0,c2
SQR_ADD_C2 a3L,a3R,a2L,a2R,c3,c1,c2
STD c3,40(r_ptr) ; r[5] = c3;
COPY %r0,c3
SQR_ADD_C a3L,a3R,c1,c2,c3
STD c1,48(r_ptr) ; r[6] = c1;
STD c2,56(r_ptr) ; r[7] = c2;
.EXIT
LDD -104(%sp),%r6 ; restore r6
LDD -112(%sp),%r5 ; restore r5
LDD -120(%sp),%r4 ; restore r4
BVE (%rp)
LDD,MB -128(%sp),%r3
.PROCEND
;---------------------------------------------------------------------------
MUL_ADD_C .macro A0L,A0R,B0L,B0R,C1,C2,C3
XMPYU A0L,B0R,ftemp1 ; m1 = bl*ht
FSTD ftemp1,-16(%sp) ;
XMPYU A0R,B0L,ftemp2 ; m = bh*lt
FSTD ftemp2,-8(%sp) ;
XMPYU A0R,B0R,ftemp3 ; lt = bl*lt
FSTD ftemp3,-32(%sp)
XMPYU A0L,B0L,ftemp4 ; ht = bh*ht
FSTD ftemp4,-24(%sp) ;
LDD -8(%sp),m ; r21 = m
LDD -16(%sp),m1 ; r19 = m1
ADD,L m,m1,m ; m+m1
DEPD,Z m,31,32,temp3 ; (m+m1<<32)
LDD -24(%sp),ht ; r24 = ht
CMPCLR,*>>= m,m1,%r0 ; if (m < m1)
ADD,L ht,high_one,ht ; ht+=high_one
EXTRD,U m,31,32,temp1 ; m >> 32
LDD -32(%sp),lt ; lt
ADD,L ht,temp1,ht ; ht+= m>>32
ADD lt,temp3,lt ; lt = lt+m1
ADD,DC ht,%r0,ht ; ht++
ADD C1,lt,C1 ; c1=c1+lt
ADD,DC ht,%r0,ht ; bump c3 if overflow,nullify otherwise
ADD C2,ht,C2 ; c2 = c2 + ht
ADD,DC C3,%r0,C3 ; add in carry (c3++)
.endm
;
;void bn_mul_comba8(BN_ULONG *r, BN_ULONG *a, BN_ULONG *b)
; arg0 = r_ptr
; arg1 = a_ptr
; arg2 = b_ptr
;
bn_mul_comba8
.proc
.callinfo FRAME=128,ENTRY_GR=%r3,ARGS_SAVED,ORDERING_AWARE
.EXPORT bn_mul_comba8,ENTRY,PRIV_LEV=3,NO_RELOCATION,LONG_RETURN
.entry
.align 64
STD %r3,0(%sp) ; save r3
STD %r4,8(%sp) ; save r4
STD %r5,16(%sp) ; save r5
STD %r6,24(%sp) ; save r6
FSTD %fr12,32(%sp) ; save r6
FSTD %fr13,40(%sp) ; save r7
;
; Zero out carries
;
COPY %r0,c1
COPY %r0,c2
COPY %r0,c3
LDO 128(%sp),%sp ; bump stack
DEPDI,Z 1,31,1,high_one ; Create Value 1 << 32
;
; Load up all of the values we are going to use
;
FLDD 0(a_ptr),a0
FLDD 8(a_ptr),a1
FLDD 16(a_ptr),a2
FLDD 24(a_ptr),a3
FLDD 32(a_ptr),a4
FLDD 40(a_ptr),a5
FLDD 48(a_ptr),a6
FLDD 56(a_ptr),a7
FLDD 0(b_ptr),b0
FLDD 8(b_ptr),b1
FLDD 16(b_ptr),b2
FLDD 24(b_ptr),b3
FLDD 32(b_ptr),b4
FLDD 40(b_ptr),b5
FLDD 48(b_ptr),b6
FLDD 56(b_ptr),b7
MUL_ADD_C a0L,a0R,b0L,b0R,c1,c2,c3
STD c1,0(r_ptr)
COPY %r0,c1
MUL_ADD_C a0L,a0R,b1L,b1R,c2,c3,c1
MUL_ADD_C a1L,a1R,b0L,b0R,c2,c3,c1
STD c2,8(r_ptr)
COPY %r0,c2
MUL_ADD_C a2L,a2R,b0L,b0R,c3,c1,c2
MUL_ADD_C a1L,a1R,b1L,b1R,c3,c1,c2
MUL_ADD_C a0L,a0R,b2L,b2R,c3,c1,c2
STD c3,16(r_ptr)
COPY %r0,c3
MUL_ADD_C a0L,a0R,b3L,b3R,c1,c2,c3
MUL_ADD_C a1L,a1R,b2L,b2R,c1,c2,c3
MUL_ADD_C a2L,a2R,b1L,b1R,c1,c2,c3
MUL_ADD_C a3L,a3R,b0L,b0R,c1,c2,c3
STD c1,24(r_ptr)
COPY %r0,c1
MUL_ADD_C a4L,a4R,b0L,b0R,c2,c3,c1
MUL_ADD_C a3L,a3R,b1L,b1R,c2,c3,c1
MUL_ADD_C a2L,a2R,b2L,b2R,c2,c3,c1
MUL_ADD_C a1L,a1R,b3L,b3R,c2,c3,c1
MUL_ADD_C a0L,a0R,b4L,b4R,c2,c3,c1
STD c2,32(r_ptr)
COPY %r0,c2
MUL_ADD_C a0L,a0R,b5L,b5R,c3,c1,c2
MUL_ADD_C a1L,a1R,b4L,b4R,c3,c1,c2
MUL_ADD_C a2L,a2R,b3L,b3R,c3,c1,c2
MUL_ADD_C a3L,a3R,b2L,b2R,c3,c1,c2
MUL_ADD_C a4L,a4R,b1L,b1R,c3,c1,c2
MUL_ADD_C a5L,a5R,b0L,b0R,c3,c1,c2
STD c3,40(r_ptr)
COPY %r0,c3
MUL_ADD_C a6L,a6R,b0L,b0R,c1,c2,c3
MUL_ADD_C a5L,a5R,b1L,b1R,c1,c2,c3
MUL_ADD_C a4L,a4R,b2L,b2R,c1,c2,c3
MUL_ADD_C a3L,a3R,b3L,b3R,c1,c2,c3
MUL_ADD_C a2L,a2R,b4L,b4R,c1,c2,c3
MUL_ADD_C a1L,a1R,b5L,b5R,c1,c2,c3
MUL_ADD_C a0L,a0R,b6L,b6R,c1,c2,c3
STD c1,48(r_ptr)
COPY %r0,c1
MUL_ADD_C a0L,a0R,b7L,b7R,c2,c3,c1
MUL_ADD_C a1L,a1R,b6L,b6R,c2,c3,c1
MUL_ADD_C a2L,a2R,b5L,b5R,c2,c3,c1
MUL_ADD_C a3L,a3R,b4L,b4R,c2,c3,c1
MUL_ADD_C a4L,a4R,b3L,b3R,c2,c3,c1
MUL_ADD_C a5L,a5R,b2L,b2R,c2,c3,c1
MUL_ADD_C a6L,a6R,b1L,b1R,c2,c3,c1
MUL_ADD_C a7L,a7R,b0L,b0R,c2,c3,c1
STD c2,56(r_ptr)
COPY %r0,c2
MUL_ADD_C a7L,a7R,b1L,b1R,c3,c1,c2
MUL_ADD_C a6L,a6R,b2L,b2R,c3,c1,c2
MUL_ADD_C a5L,a5R,b3L,b3R,c3,c1,c2
MUL_ADD_C a4L,a4R,b4L,b4R,c3,c1,c2
MUL_ADD_C a3L,a3R,b5L,b5R,c3,c1,c2
MUL_ADD_C a2L,a2R,b6L,b6R,c3,c1,c2
MUL_ADD_C a1L,a1R,b7L,b7R,c3,c1,c2
STD c3,64(r_ptr)
COPY %r0,c3
MUL_ADD_C a2L,a2R,b7L,b7R,c1,c2,c3
MUL_ADD_C a3L,a3R,b6L,b6R,c1,c2,c3
MUL_ADD_C a4L,a4R,b5L,b5R,c1,c2,c3
MUL_ADD_C a5L,a5R,b4L,b4R,c1,c2,c3
MUL_ADD_C a6L,a6R,b3L,b3R,c1,c2,c3
MUL_ADD_C a7L,a7R,b2L,b2R,c1,c2,c3
STD c1,72(r_ptr)
COPY %r0,c1
MUL_ADD_C a7L,a7R,b3L,b3R,c2,c3,c1
MUL_ADD_C a6L,a6R,b4L,b4R,c2,c3,c1
MUL_ADD_C a5L,a5R,b5L,b5R,c2,c3,c1
MUL_ADD_C a4L,a4R,b6L,b6R,c2,c3,c1
MUL_ADD_C a3L,a3R,b7L,b7R,c2,c3,c1
STD c2,80(r_ptr)
COPY %r0,c2
MUL_ADD_C a4L,a4R,b7L,b7R,c3,c1,c2
MUL_ADD_C a5L,a5R,b6L,b6R,c3,c1,c2
MUL_ADD_C a6L,a6R,b5L,b5R,c3,c1,c2
MUL_ADD_C a7L,a7R,b4L,b4R,c3,c1,c2
STD c3,88(r_ptr)
COPY %r0,c3
MUL_ADD_C a7L,a7R,b5L,b5R,c1,c2,c3
MUL_ADD_C a6L,a6R,b6L,b6R,c1,c2,c3
MUL_ADD_C a5L,a5R,b7L,b7R,c1,c2,c3
STD c1,96(r_ptr)
COPY %r0,c1
MUL_ADD_C a6L,a6R,b7L,b7R,c2,c3,c1
MUL_ADD_C a7L,a7R,b6L,b6R,c2,c3,c1
STD c2,104(r_ptr)
COPY %r0,c2
MUL_ADD_C a7L,a7R,b7L,b7R,c3,c1,c2
STD c3,112(r_ptr)
STD c1,120(r_ptr)
.EXIT
FLDD -88(%sp),%fr13
FLDD -96(%sp),%fr12
LDD -104(%sp),%r6 ; restore r6
LDD -112(%sp),%r5 ; restore r5
LDD -120(%sp),%r4 ; restore r4
BVE (%rp)
LDD,MB -128(%sp),%r3
.PROCEND
;-----------------------------------------------------------------------------
;
;void bn_mul_comba4(BN_ULONG *r, BN_ULONG *a, BN_ULONG *b)
; arg0 = r_ptr
; arg1 = a_ptr
; arg2 = b_ptr
;
bn_mul_comba4
.proc
.callinfo FRAME=128,ENTRY_GR=%r3,ARGS_SAVED,ORDERING_AWARE
.EXPORT bn_mul_comba4,ENTRY,PRIV_LEV=3,NO_RELOCATION,LONG_RETURN
.entry
.align 64
STD %r3,0(%sp) ; save r3
STD %r4,8(%sp) ; save r4
STD %r5,16(%sp) ; save r5
STD %r6,24(%sp) ; save r6
FSTD %fr12,32(%sp) ; save r6
FSTD %fr13,40(%sp) ; save r7
;
; Zero out carries
;
COPY %r0,c1
COPY %r0,c2
COPY %r0,c3
LDO 128(%sp),%sp ; bump stack
DEPDI,Z 1,31,1,high_one ; Create Value 1 << 32
;
; Load up all of the values we are going to use
;
FLDD 0(a_ptr),a0
FLDD 8(a_ptr),a1
FLDD 16(a_ptr),a2
FLDD 24(a_ptr),a3
FLDD 0(b_ptr),b0
FLDD 8(b_ptr),b1
FLDD 16(b_ptr),b2
FLDD 24(b_ptr),b3
MUL_ADD_C a0L,a0R,b0L,b0R,c1,c2,c3
STD c1,0(r_ptr)
COPY %r0,c1
MUL_ADD_C a0L,a0R,b1L,b1R,c2,c3,c1
MUL_ADD_C a1L,a1R,b0L,b0R,c2,c3,c1
STD c2,8(r_ptr)
COPY %r0,c2
MUL_ADD_C a2L,a2R,b0L,b0R,c3,c1,c2
MUL_ADD_C a1L,a1R,b1L,b1R,c3,c1,c2
MUL_ADD_C a0L,a0R,b2L,b2R,c3,c1,c2
STD c3,16(r_ptr)
COPY %r0,c3
MUL_ADD_C a0L,a0R,b3L,b3R,c1,c2,c3
MUL_ADD_C a1L,a1R,b2L,b2R,c1,c2,c3
MUL_ADD_C a2L,a2R,b1L,b1R,c1,c2,c3
MUL_ADD_C a3L,a3R,b0L,b0R,c1,c2,c3
STD c1,24(r_ptr)
COPY %r0,c1
MUL_ADD_C a3L,a3R,b1L,b1R,c2,c3,c1
MUL_ADD_C a2L,a2R,b2L,b2R,c2,c3,c1
MUL_ADD_C a1L,a1R,b3L,b3R,c2,c3,c1
STD c2,32(r_ptr)
COPY %r0,c2
MUL_ADD_C a2L,a2R,b3L,b3R,c3,c1,c2
MUL_ADD_C a3L,a3R,b2L,b2R,c3,c1,c2
STD c3,40(r_ptr)
COPY %r0,c3
MUL_ADD_C a3L,a3R,b3L,b3R,c1,c2,c3
STD c1,48(r_ptr)
STD c2,56(r_ptr)
.EXIT
FLDD -88(%sp),%fr13
FLDD -96(%sp),%fr12
LDD -104(%sp),%r6 ; restore r6
LDD -112(%sp),%r5 ; restore r5
LDD -120(%sp),%r4 ; restore r4
BVE (%rp)
LDD,MB -128(%sp),%r3
.PROCEND
.SPACE $TEXT$
.SUBSPA $CODE$
.SPACE $PRIVATE$,SORT=16
.IMPORT $global$,DATA
.SPACE $TEXT$
.SUBSPA $CODE$
.SUBSPA $LIT$,ACCESS=0x2c
C$4
.ALIGN 8
.STRINGZ "Division would overflow (%d)\n"
.END
|
al3xtjames/Clover
| 48,599
|
Library/OpensslLib/openssl-1.0.1e/crypto/bn/asm/pa-risc2.s
|
;
; PA-RISC 2.0 implementation of bn_asm code, based on the
; 64-bit version of the code. This code is effectively the
; same as the 64-bit version except the register model is
; slightly different given all values must be 32-bit between
; function calls. Thus the 64-bit return values are returned
; in %ret0 and %ret1 vs just %ret0 as is done in 64-bit
;
;
; This code is approximately 2x faster than the C version
; for RSA/DSA.
;
; See http://devresource.hp.com/ for more details on the PA-RISC
; architecture. Also see the book "PA-RISC 2.0 Architecture"
; by Gerry Kane for information on the instruction set architecture.
;
; Code written by Chris Ruemmler (with some help from the HP C
; compiler).
;
; The code compiles with HP's assembler
;
.level 2.0N
.space $TEXT$
.subspa $CODE$,QUAD=0,ALIGN=8,ACCESS=0x2c,CODE_ONLY
;
; Global Register definitions used for the routines.
;
; Some information about HP's runtime architecture for 32-bits.
;
; "Caller save" means the calling function must save the register
; if it wants the register to be preserved.
; "Callee save" means if a function uses the register, it must save
; the value before using it.
;
; For the floating point registers
;
; "caller save" registers: fr4-fr11, fr22-fr31
; "callee save" registers: fr12-fr21
; "special" registers: fr0-fr3 (status and exception registers)
;
; For the integer registers
; value zero : r0
; "caller save" registers: r1,r19-r26
; "callee save" registers: r3-r18
; return register : r2 (rp)
; return values ; r28,r29 (ret0,ret1)
; Stack pointer ; r30 (sp)
; millicode return ptr ; r31 (also a caller save register)
;
; Arguments to the routines
;
r_ptr .reg %r26
a_ptr .reg %r25
b_ptr .reg %r24
num .reg %r24
n .reg %r23
;
; Note that the "w" argument for bn_mul_add_words and bn_mul_words
; is passed on the stack at a delta of -56 from the top of stack
; as the routine is entered.
;
;
; Globals used in some routines
;
top_overflow .reg %r23
high_mask .reg %r22 ; value 0xffffffff80000000L
;------------------------------------------------------------------------------
;
; bn_mul_add_words
;
;BN_ULONG bn_mul_add_words(BN_ULONG *r_ptr, BN_ULONG *a_ptr,
; int num, BN_ULONG w)
;
; arg0 = r_ptr
; arg1 = a_ptr
; arg3 = num
; -56(sp) = w
;
; Local register definitions
;
fm1 .reg %fr22
fm .reg %fr23
ht_temp .reg %fr24
ht_temp_1 .reg %fr25
lt_temp .reg %fr26
lt_temp_1 .reg %fr27
fm1_1 .reg %fr28
fm_1 .reg %fr29
fw_h .reg %fr7L
fw_l .reg %fr7R
fw .reg %fr7
fht_0 .reg %fr8L
flt_0 .reg %fr8R
t_float_0 .reg %fr8
fht_1 .reg %fr9L
flt_1 .reg %fr9R
t_float_1 .reg %fr9
tmp_0 .reg %r31
tmp_1 .reg %r21
m_0 .reg %r20
m_1 .reg %r19
ht_0 .reg %r1
ht_1 .reg %r3
lt_0 .reg %r4
lt_1 .reg %r5
m1_0 .reg %r6
m1_1 .reg %r7
rp_val .reg %r8
rp_val_1 .reg %r9
bn_mul_add_words
.export bn_mul_add_words,entry,NO_RELOCATION,LONG_RETURN
.proc
.callinfo frame=128
.entry
.align 64
STD %r3,0(%sp) ; save r3
STD %r4,8(%sp) ; save r4
NOP ; Needed to make the loop 16-byte aligned
NOP ; needed to make the loop 16-byte aligned
STD %r5,16(%sp) ; save r5
NOP
STD %r6,24(%sp) ; save r6
STD %r7,32(%sp) ; save r7
STD %r8,40(%sp) ; save r8
STD %r9,48(%sp) ; save r9
COPY %r0,%ret1 ; return 0 by default
DEPDI,Z 1,31,1,top_overflow ; top_overflow = 1 << 32
CMPIB,>= 0,num,bn_mul_add_words_exit ; if (num <= 0) then exit
LDO 128(%sp),%sp ; bump stack
;
; The loop is unrolled twice, so if there is only 1 number
; then go straight to the cleanup code.
;
CMPIB,= 1,num,bn_mul_add_words_single_top
FLDD -184(%sp),fw ; (-56-128) load up w into fw (fw_h/fw_l)
;
; This loop is unrolled 2 times (64-byte aligned as well)
;
; PA-RISC 2.0 chips have two fully pipelined multipliers, thus
; two 32-bit mutiplies can be issued per cycle.
;
bn_mul_add_words_unroll2
FLDD 0(a_ptr),t_float_0 ; load up 64-bit value (fr8L) ht(L)/lt(R)
FLDD 8(a_ptr),t_float_1 ; load up 64-bit value (fr8L) ht(L)/lt(R)
LDD 0(r_ptr),rp_val ; rp[0]
LDD 8(r_ptr),rp_val_1 ; rp[1]
XMPYU fht_0,fw_l,fm1 ; m1[0] = fht_0*fw_l
XMPYU fht_1,fw_l,fm1_1 ; m1[1] = fht_1*fw_l
FSTD fm1,-16(%sp) ; -16(sp) = m1[0]
FSTD fm1_1,-48(%sp) ; -48(sp) = m1[1]
XMPYU flt_0,fw_h,fm ; m[0] = flt_0*fw_h
XMPYU flt_1,fw_h,fm_1 ; m[1] = flt_1*fw_h
FSTD fm,-8(%sp) ; -8(sp) = m[0]
FSTD fm_1,-40(%sp) ; -40(sp) = m[1]
XMPYU fht_0,fw_h,ht_temp ; ht_temp = fht_0*fw_h
XMPYU fht_1,fw_h,ht_temp_1 ; ht_temp_1 = fht_1*fw_h
FSTD ht_temp,-24(%sp) ; -24(sp) = ht_temp
FSTD ht_temp_1,-56(%sp) ; -56(sp) = ht_temp_1
XMPYU flt_0,fw_l,lt_temp ; lt_temp = lt*fw_l
XMPYU flt_1,fw_l,lt_temp_1 ; lt_temp = lt*fw_l
FSTD lt_temp,-32(%sp) ; -32(sp) = lt_temp
FSTD lt_temp_1,-64(%sp) ; -64(sp) = lt_temp_1
LDD -8(%sp),m_0 ; m[0]
LDD -40(%sp),m_1 ; m[1]
LDD -16(%sp),m1_0 ; m1[0]
LDD -48(%sp),m1_1 ; m1[1]
LDD -24(%sp),ht_0 ; ht[0]
LDD -56(%sp),ht_1 ; ht[1]
ADD,L m1_0,m_0,tmp_0 ; tmp_0 = m[0] + m1[0];
ADD,L m1_1,m_1,tmp_1 ; tmp_1 = m[1] + m1[1];
LDD -32(%sp),lt_0
LDD -64(%sp),lt_1
CMPCLR,*>>= tmp_0,m1_0, %r0 ; if (m[0] < m1[0])
ADD,L ht_0,top_overflow,ht_0 ; ht[0] += (1<<32)
CMPCLR,*>>= tmp_1,m1_1,%r0 ; if (m[1] < m1[1])
ADD,L ht_1,top_overflow,ht_1 ; ht[1] += (1<<32)
EXTRD,U tmp_0,31,32,m_0 ; m[0]>>32
DEPD,Z tmp_0,31,32,m1_0 ; m1[0] = m[0]<<32
EXTRD,U tmp_1,31,32,m_1 ; m[1]>>32
DEPD,Z tmp_1,31,32,m1_1 ; m1[1] = m[1]<<32
ADD,L ht_0,m_0,ht_0 ; ht[0]+= (m[0]>>32)
ADD,L ht_1,m_1,ht_1 ; ht[1]+= (m[1]>>32)
ADD lt_0,m1_0,lt_0 ; lt[0] = lt[0]+m1[0];
ADD,DC ht_0,%r0,ht_0 ; ht[0]++
ADD lt_1,m1_1,lt_1 ; lt[1] = lt[1]+m1[1];
ADD,DC ht_1,%r0,ht_1 ; ht[1]++
ADD %ret1,lt_0,lt_0 ; lt[0] = lt[0] + c;
ADD,DC ht_0,%r0,ht_0 ; ht[0]++
ADD lt_0,rp_val,lt_0 ; lt[0] = lt[0]+rp[0]
ADD,DC ht_0,%r0,ht_0 ; ht[0]++
LDO -2(num),num ; num = num - 2;
ADD ht_0,lt_1,lt_1 ; lt[1] = lt[1] + ht_0 (c);
ADD,DC ht_1,%r0,ht_1 ; ht[1]++
STD lt_0,0(r_ptr) ; rp[0] = lt[0]
ADD lt_1,rp_val_1,lt_1 ; lt[1] = lt[1]+rp[1]
ADD,DC ht_1,%r0,%ret1 ; ht[1]++
LDO 16(a_ptr),a_ptr ; a_ptr += 2
STD lt_1,8(r_ptr) ; rp[1] = lt[1]
CMPIB,<= 2,num,bn_mul_add_words_unroll2 ; go again if more to do
LDO 16(r_ptr),r_ptr ; r_ptr += 2
CMPIB,=,N 0,num,bn_mul_add_words_exit ; are we done, or cleanup last one
;
; Top of loop aligned on 64-byte boundary
;
bn_mul_add_words_single_top
FLDD 0(a_ptr),t_float_0 ; load up 64-bit value (fr8L) ht(L)/lt(R)
LDD 0(r_ptr),rp_val ; rp[0]
LDO 8(a_ptr),a_ptr ; a_ptr++
XMPYU fht_0,fw_l,fm1 ; m1 = ht*fw_l
FSTD fm1,-16(%sp) ; -16(sp) = m1
XMPYU flt_0,fw_h,fm ; m = lt*fw_h
FSTD fm,-8(%sp) ; -8(sp) = m
XMPYU fht_0,fw_h,ht_temp ; ht_temp = ht*fw_h
FSTD ht_temp,-24(%sp) ; -24(sp) = ht
XMPYU flt_0,fw_l,lt_temp ; lt_temp = lt*fw_l
FSTD lt_temp,-32(%sp) ; -32(sp) = lt
LDD -8(%sp),m_0
LDD -16(%sp),m1_0 ; m1 = temp1
ADD,L m_0,m1_0,tmp_0 ; tmp_0 = m + m1;
LDD -24(%sp),ht_0
LDD -32(%sp),lt_0
CMPCLR,*>>= tmp_0,m1_0,%r0 ; if (m < m1)
ADD,L ht_0,top_overflow,ht_0 ; ht += (1<<32)
EXTRD,U tmp_0,31,32,m_0 ; m>>32
DEPD,Z tmp_0,31,32,m1_0 ; m1 = m<<32
ADD,L ht_0,m_0,ht_0 ; ht+= (m>>32)
ADD lt_0,m1_0,tmp_0 ; tmp_0 = lt+m1;
ADD,DC ht_0,%r0,ht_0 ; ht++
ADD %ret1,tmp_0,lt_0 ; lt = lt + c;
ADD,DC ht_0,%r0,ht_0 ; ht++
ADD lt_0,rp_val,lt_0 ; lt = lt+rp[0]
ADD,DC ht_0,%r0,%ret1 ; ht++
STD lt_0,0(r_ptr) ; rp[0] = lt
bn_mul_add_words_exit
.EXIT
EXTRD,U %ret1,31,32,%ret0 ; for 32-bit, return in ret0/ret1
LDD -80(%sp),%r9 ; restore r9
LDD -88(%sp),%r8 ; restore r8
LDD -96(%sp),%r7 ; restore r7
LDD -104(%sp),%r6 ; restore r6
LDD -112(%sp),%r5 ; restore r5
LDD -120(%sp),%r4 ; restore r4
BVE (%rp)
LDD,MB -128(%sp),%r3 ; restore r3
.PROCEND ;in=23,24,25,26,29;out=28;
;----------------------------------------------------------------------------
;
;BN_ULONG bn_mul_words(BN_ULONG *rp, BN_ULONG *ap, int num, BN_ULONG w)
;
; arg0 = rp
; arg1 = ap
; arg3 = num
; w on stack at -56(sp)
bn_mul_words
.proc
.callinfo frame=128
.entry
.EXPORT bn_mul_words,ENTRY,PRIV_LEV=3,NO_RELOCATION,LONG_RETURN
.align 64
STD %r3,0(%sp) ; save r3
STD %r4,8(%sp) ; save r4
NOP
STD %r5,16(%sp) ; save r5
STD %r6,24(%sp) ; save r6
STD %r7,32(%sp) ; save r7
COPY %r0,%ret1 ; return 0 by default
DEPDI,Z 1,31,1,top_overflow ; top_overflow = 1 << 32
CMPIB,>= 0,num,bn_mul_words_exit
LDO 128(%sp),%sp ; bump stack
;
; See if only 1 word to do, thus just do cleanup
;
CMPIB,= 1,num,bn_mul_words_single_top
FLDD -184(%sp),fw ; (-56-128) load up w into fw (fw_h/fw_l)
;
; This loop is unrolled 2 times (64-byte aligned as well)
;
; PA-RISC 2.0 chips have two fully pipelined multipliers, thus
; two 32-bit mutiplies can be issued per cycle.
;
bn_mul_words_unroll2
FLDD 0(a_ptr),t_float_0 ; load up 64-bit value (fr8L) ht(L)/lt(R)
FLDD 8(a_ptr),t_float_1 ; load up 64-bit value (fr8L) ht(L)/lt(R)
XMPYU fht_0,fw_l,fm1 ; m1[0] = fht_0*fw_l
XMPYU fht_1,fw_l,fm1_1 ; m1[1] = ht*fw_l
FSTD fm1,-16(%sp) ; -16(sp) = m1
FSTD fm1_1,-48(%sp) ; -48(sp) = m1
XMPYU flt_0,fw_h,fm ; m = lt*fw_h
XMPYU flt_1,fw_h,fm_1 ; m = lt*fw_h
FSTD fm,-8(%sp) ; -8(sp) = m
FSTD fm_1,-40(%sp) ; -40(sp) = m
XMPYU fht_0,fw_h,ht_temp ; ht_temp = fht_0*fw_h
XMPYU fht_1,fw_h,ht_temp_1 ; ht_temp = ht*fw_h
FSTD ht_temp,-24(%sp) ; -24(sp) = ht
FSTD ht_temp_1,-56(%sp) ; -56(sp) = ht
XMPYU flt_0,fw_l,lt_temp ; lt_temp = lt*fw_l
XMPYU flt_1,fw_l,lt_temp_1 ; lt_temp = lt*fw_l
FSTD lt_temp,-32(%sp) ; -32(sp) = lt
FSTD lt_temp_1,-64(%sp) ; -64(sp) = lt
LDD -8(%sp),m_0
LDD -40(%sp),m_1
LDD -16(%sp),m1_0
LDD -48(%sp),m1_1
LDD -24(%sp),ht_0
LDD -56(%sp),ht_1
ADD,L m1_0,m_0,tmp_0 ; tmp_0 = m + m1;
ADD,L m1_1,m_1,tmp_1 ; tmp_1 = m + m1;
LDD -32(%sp),lt_0
LDD -64(%sp),lt_1
CMPCLR,*>>= tmp_0,m1_0, %r0 ; if (m < m1)
ADD,L ht_0,top_overflow,ht_0 ; ht += (1<<32)
CMPCLR,*>>= tmp_1,m1_1,%r0 ; if (m < m1)
ADD,L ht_1,top_overflow,ht_1 ; ht += (1<<32)
EXTRD,U tmp_0,31,32,m_0 ; m>>32
DEPD,Z tmp_0,31,32,m1_0 ; m1 = m<<32
EXTRD,U tmp_1,31,32,m_1 ; m>>32
DEPD,Z tmp_1,31,32,m1_1 ; m1 = m<<32
ADD,L ht_0,m_0,ht_0 ; ht+= (m>>32)
ADD,L ht_1,m_1,ht_1 ; ht+= (m>>32)
ADD lt_0,m1_0,lt_0 ; lt = lt+m1;
ADD,DC ht_0,%r0,ht_0 ; ht++
ADD lt_1,m1_1,lt_1 ; lt = lt+m1;
ADD,DC ht_1,%r0,ht_1 ; ht++
ADD %ret1,lt_0,lt_0 ; lt = lt + c (ret1);
ADD,DC ht_0,%r0,ht_0 ; ht++
ADD ht_0,lt_1,lt_1 ; lt = lt + c (ht_0)
ADD,DC ht_1,%r0,ht_1 ; ht++
STD lt_0,0(r_ptr) ; rp[0] = lt
STD lt_1,8(r_ptr) ; rp[1] = lt
COPY ht_1,%ret1 ; carry = ht
LDO -2(num),num ; num = num - 2;
LDO 16(a_ptr),a_ptr ; ap += 2
CMPIB,<= 2,num,bn_mul_words_unroll2
LDO 16(r_ptr),r_ptr ; rp++
CMPIB,=,N 0,num,bn_mul_words_exit ; are we done?
;
; Top of loop aligned on 64-byte boundary
;
bn_mul_words_single_top
FLDD 0(a_ptr),t_float_0 ; load up 64-bit value (fr8L) ht(L)/lt(R)
XMPYU fht_0,fw_l,fm1 ; m1 = ht*fw_l
FSTD fm1,-16(%sp) ; -16(sp) = m1
XMPYU flt_0,fw_h,fm ; m = lt*fw_h
FSTD fm,-8(%sp) ; -8(sp) = m
XMPYU fht_0,fw_h,ht_temp ; ht_temp = ht*fw_h
FSTD ht_temp,-24(%sp) ; -24(sp) = ht
XMPYU flt_0,fw_l,lt_temp ; lt_temp = lt*fw_l
FSTD lt_temp,-32(%sp) ; -32(sp) = lt
LDD -8(%sp),m_0
LDD -16(%sp),m1_0
ADD,L m_0,m1_0,tmp_0 ; tmp_0 = m + m1;
LDD -24(%sp),ht_0
LDD -32(%sp),lt_0
CMPCLR,*>>= tmp_0,m1_0,%r0 ; if (m < m1)
ADD,L ht_0,top_overflow,ht_0 ; ht += (1<<32)
EXTRD,U tmp_0,31,32,m_0 ; m>>32
DEPD,Z tmp_0,31,32,m1_0 ; m1 = m<<32
ADD,L ht_0,m_0,ht_0 ; ht+= (m>>32)
ADD lt_0,m1_0,lt_0 ; lt= lt+m1;
ADD,DC ht_0,%r0,ht_0 ; ht++
ADD %ret1,lt_0,lt_0 ; lt = lt + c;
ADD,DC ht_0,%r0,ht_0 ; ht++
COPY ht_0,%ret1 ; copy carry
STD lt_0,0(r_ptr) ; rp[0] = lt
bn_mul_words_exit
.EXIT
EXTRD,U %ret1,31,32,%ret0 ; for 32-bit, return in ret0/ret1
LDD -96(%sp),%r7 ; restore r7
LDD -104(%sp),%r6 ; restore r6
LDD -112(%sp),%r5 ; restore r5
LDD -120(%sp),%r4 ; restore r4
BVE (%rp)
LDD,MB -128(%sp),%r3 ; restore r3
.PROCEND
;----------------------------------------------------------------------------
;
;void bn_sqr_words(BN_ULONG *rp, BN_ULONG *ap, int num)
;
; arg0 = rp
; arg1 = ap
; arg2 = num
;
bn_sqr_words
.proc
.callinfo FRAME=128,ENTRY_GR=%r3,ARGS_SAVED,ORDERING_AWARE
.EXPORT bn_sqr_words,ENTRY,PRIV_LEV=3,NO_RELOCATION,LONG_RETURN
.entry
.align 64
STD %r3,0(%sp) ; save r3
STD %r4,8(%sp) ; save r4
NOP
STD %r5,16(%sp) ; save r5
CMPIB,>= 0,num,bn_sqr_words_exit
LDO 128(%sp),%sp ; bump stack
;
; If only 1, the goto straight to cleanup
;
CMPIB,= 1,num,bn_sqr_words_single_top
DEPDI,Z -1,32,33,high_mask ; Create Mask 0xffffffff80000000L
;
; This loop is unrolled 2 times (64-byte aligned as well)
;
bn_sqr_words_unroll2
FLDD 0(a_ptr),t_float_0 ; a[0]
FLDD 8(a_ptr),t_float_1 ; a[1]
XMPYU fht_0,flt_0,fm ; m[0]
XMPYU fht_1,flt_1,fm_1 ; m[1]
FSTD fm,-24(%sp) ; store m[0]
FSTD fm_1,-56(%sp) ; store m[1]
XMPYU flt_0,flt_0,lt_temp ; lt[0]
XMPYU flt_1,flt_1,lt_temp_1 ; lt[1]
FSTD lt_temp,-16(%sp) ; store lt[0]
FSTD lt_temp_1,-48(%sp) ; store lt[1]
XMPYU fht_0,fht_0,ht_temp ; ht[0]
XMPYU fht_1,fht_1,ht_temp_1 ; ht[1]
FSTD ht_temp,-8(%sp) ; store ht[0]
FSTD ht_temp_1,-40(%sp) ; store ht[1]
LDD -24(%sp),m_0
LDD -56(%sp),m_1
AND m_0,high_mask,tmp_0 ; m[0] & Mask
AND m_1,high_mask,tmp_1 ; m[1] & Mask
DEPD,Z m_0,30,31,m_0 ; m[0] << 32+1
DEPD,Z m_1,30,31,m_1 ; m[1] << 32+1
LDD -16(%sp),lt_0
LDD -48(%sp),lt_1
EXTRD,U tmp_0,32,33,tmp_0 ; tmp_0 = m[0]&Mask >> 32-1
EXTRD,U tmp_1,32,33,tmp_1 ; tmp_1 = m[1]&Mask >> 32-1
LDD -8(%sp),ht_0
LDD -40(%sp),ht_1
ADD,L ht_0,tmp_0,ht_0 ; ht[0] += tmp_0
ADD,L ht_1,tmp_1,ht_1 ; ht[1] += tmp_1
ADD lt_0,m_0,lt_0 ; lt = lt+m
ADD,DC ht_0,%r0,ht_0 ; ht[0]++
STD lt_0,0(r_ptr) ; rp[0] = lt[0]
STD ht_0,8(r_ptr) ; rp[1] = ht[1]
ADD lt_1,m_1,lt_1 ; lt = lt+m
ADD,DC ht_1,%r0,ht_1 ; ht[1]++
STD lt_1,16(r_ptr) ; rp[2] = lt[1]
STD ht_1,24(r_ptr) ; rp[3] = ht[1]
LDO -2(num),num ; num = num - 2;
LDO 16(a_ptr),a_ptr ; ap += 2
CMPIB,<= 2,num,bn_sqr_words_unroll2
LDO 32(r_ptr),r_ptr ; rp += 4
CMPIB,=,N 0,num,bn_sqr_words_exit ; are we done?
;
; Top of loop aligned on 64-byte boundary
;
bn_sqr_words_single_top
FLDD 0(a_ptr),t_float_0 ; load up 64-bit value (fr8L) ht(L)/lt(R)
XMPYU fht_0,flt_0,fm ; m
FSTD fm,-24(%sp) ; store m
XMPYU flt_0,flt_0,lt_temp ; lt
FSTD lt_temp,-16(%sp) ; store lt
XMPYU fht_0,fht_0,ht_temp ; ht
FSTD ht_temp,-8(%sp) ; store ht
LDD -24(%sp),m_0 ; load m
AND m_0,high_mask,tmp_0 ; m & Mask
DEPD,Z m_0,30,31,m_0 ; m << 32+1
LDD -16(%sp),lt_0 ; lt
LDD -8(%sp),ht_0 ; ht
EXTRD,U tmp_0,32,33,tmp_0 ; tmp_0 = m&Mask >> 32-1
ADD m_0,lt_0,lt_0 ; lt = lt+m
ADD,L ht_0,tmp_0,ht_0 ; ht += tmp_0
ADD,DC ht_0,%r0,ht_0 ; ht++
STD lt_0,0(r_ptr) ; rp[0] = lt
STD ht_0,8(r_ptr) ; rp[1] = ht
bn_sqr_words_exit
.EXIT
LDD -112(%sp),%r5 ; restore r5
LDD -120(%sp),%r4 ; restore r4
BVE (%rp)
LDD,MB -128(%sp),%r3
.PROCEND ;in=23,24,25,26,29;out=28;
;----------------------------------------------------------------------------
;
;BN_ULONG bn_add_words(BN_ULONG *r, BN_ULONG *a, BN_ULONG *b, int n)
;
; arg0 = rp
; arg1 = ap
; arg2 = bp
; arg3 = n
t .reg %r22
b .reg %r21
l .reg %r20
bn_add_words
.proc
.entry
.callinfo
.EXPORT bn_add_words,ENTRY,PRIV_LEV=3,NO_RELOCATION,LONG_RETURN
.align 64
CMPIB,>= 0,n,bn_add_words_exit
COPY %r0,%ret1 ; return 0 by default
;
; If 2 or more numbers do the loop
;
CMPIB,= 1,n,bn_add_words_single_top
NOP
;
; This loop is unrolled 2 times (64-byte aligned as well)
;
bn_add_words_unroll2
LDD 0(a_ptr),t
LDD 0(b_ptr),b
ADD t,%ret1,t ; t = t+c;
ADD,DC %r0,%r0,%ret1 ; set c to carry
ADD t,b,l ; l = t + b[0]
ADD,DC %ret1,%r0,%ret1 ; c+= carry
STD l,0(r_ptr)
LDD 8(a_ptr),t
LDD 8(b_ptr),b
ADD t,%ret1,t ; t = t+c;
ADD,DC %r0,%r0,%ret1 ; set c to carry
ADD t,b,l ; l = t + b[0]
ADD,DC %ret1,%r0,%ret1 ; c+= carry
STD l,8(r_ptr)
LDO -2(n),n
LDO 16(a_ptr),a_ptr
LDO 16(b_ptr),b_ptr
CMPIB,<= 2,n,bn_add_words_unroll2
LDO 16(r_ptr),r_ptr
CMPIB,=,N 0,n,bn_add_words_exit ; are we done?
bn_add_words_single_top
LDD 0(a_ptr),t
LDD 0(b_ptr),b
ADD t,%ret1,t ; t = t+c;
ADD,DC %r0,%r0,%ret1 ; set c to carry (could use CMPCLR??)
ADD t,b,l ; l = t + b[0]
ADD,DC %ret1,%r0,%ret1 ; c+= carry
STD l,0(r_ptr)
bn_add_words_exit
.EXIT
BVE (%rp)
EXTRD,U %ret1,31,32,%ret0 ; for 32-bit, return in ret0/ret1
.PROCEND ;in=23,24,25,26,29;out=28;
;----------------------------------------------------------------------------
;
;BN_ULONG bn_sub_words(BN_ULONG *r, BN_ULONG *a, BN_ULONG *b, int n)
;
; arg0 = rp
; arg1 = ap
; arg2 = bp
; arg3 = n
t1 .reg %r22
t2 .reg %r21
sub_tmp1 .reg %r20
sub_tmp2 .reg %r19
bn_sub_words
.proc
.callinfo
.EXPORT bn_sub_words,ENTRY,PRIV_LEV=3,NO_RELOCATION,LONG_RETURN
.entry
.align 64
CMPIB,>= 0,n,bn_sub_words_exit
COPY %r0,%ret1 ; return 0 by default
;
; If 2 or more numbers do the loop
;
CMPIB,= 1,n,bn_sub_words_single_top
NOP
;
; This loop is unrolled 2 times (64-byte aligned as well)
;
bn_sub_words_unroll2
LDD 0(a_ptr),t1
LDD 0(b_ptr),t2
SUB t1,t2,sub_tmp1 ; t3 = t1-t2;
SUB sub_tmp1,%ret1,sub_tmp1 ; t3 = t3- c;
CMPCLR,*>> t1,t2,sub_tmp2 ; clear if t1 > t2
LDO 1(%r0),sub_tmp2
CMPCLR,*= t1,t2,%r0
COPY sub_tmp2,%ret1
STD sub_tmp1,0(r_ptr)
LDD 8(a_ptr),t1
LDD 8(b_ptr),t2
SUB t1,t2,sub_tmp1 ; t3 = t1-t2;
SUB sub_tmp1,%ret1,sub_tmp1 ; t3 = t3- c;
CMPCLR,*>> t1,t2,sub_tmp2 ; clear if t1 > t2
LDO 1(%r0),sub_tmp2
CMPCLR,*= t1,t2,%r0
COPY sub_tmp2,%ret1
STD sub_tmp1,8(r_ptr)
LDO -2(n),n
LDO 16(a_ptr),a_ptr
LDO 16(b_ptr),b_ptr
CMPIB,<= 2,n,bn_sub_words_unroll2
LDO 16(r_ptr),r_ptr
CMPIB,=,N 0,n,bn_sub_words_exit ; are we done?
bn_sub_words_single_top
LDD 0(a_ptr),t1
LDD 0(b_ptr),t2
SUB t1,t2,sub_tmp1 ; t3 = t1-t2;
SUB sub_tmp1,%ret1,sub_tmp1 ; t3 = t3- c;
CMPCLR,*>> t1,t2,sub_tmp2 ; clear if t1 > t2
LDO 1(%r0),sub_tmp2
CMPCLR,*= t1,t2,%r0
COPY sub_tmp2,%ret1
STD sub_tmp1,0(r_ptr)
bn_sub_words_exit
.EXIT
BVE (%rp)
EXTRD,U %ret1,31,32,%ret0 ; for 32-bit, return in ret0/ret1
.PROCEND ;in=23,24,25,26,29;out=28;
;------------------------------------------------------------------------------
;
; unsigned long bn_div_words(unsigned long h, unsigned long l, unsigned long d)
;
; arg0 = h
; arg1 = l
; arg2 = d
;
; This is mainly just output from the HP C compiler.
;
;------------------------------------------------------------------------------
bn_div_words
.PROC
.EXPORT bn_div_words,ENTRY,PRIV_LEV=3,ARGW0=GR,ARGW1=GR,ARGW2=GR,ARGW3=GR,RTNVAL=GR,LONG_RETURN
.IMPORT BN_num_bits_word,CODE
;--- not PIC .IMPORT __iob,DATA
;--- not PIC .IMPORT fprintf,CODE
.IMPORT abort,CODE
.IMPORT $$div2U,MILLICODE
.CALLINFO CALLER,FRAME=144,ENTRY_GR=%r9,SAVE_RP,ARGS_SAVED,ORDERING_AWARE
.ENTRY
STW %r2,-20(%r30) ;offset 0x8ec
STW,MA %r3,192(%r30) ;offset 0x8f0
STW %r4,-188(%r30) ;offset 0x8f4
DEPD %r5,31,32,%r6 ;offset 0x8f8
STD %r6,-184(%r30) ;offset 0x8fc
DEPD %r7,31,32,%r8 ;offset 0x900
STD %r8,-176(%r30) ;offset 0x904
STW %r9,-168(%r30) ;offset 0x908
LDD -248(%r30),%r3 ;offset 0x90c
COPY %r26,%r4 ;offset 0x910
COPY %r24,%r5 ;offset 0x914
DEPD %r25,31,32,%r4 ;offset 0x918
CMPB,*<> %r3,%r0,$0006000C ;offset 0x91c
DEPD %r23,31,32,%r5 ;offset 0x920
MOVIB,TR -1,%r29,$00060002 ;offset 0x924
EXTRD,U %r29,31,32,%r28 ;offset 0x928
$0006002A
LDO -1(%r29),%r29 ;offset 0x92c
SUB %r23,%r7,%r23 ;offset 0x930
$00060024
SUB %r4,%r31,%r25 ;offset 0x934
AND %r25,%r19,%r26 ;offset 0x938
CMPB,*<>,N %r0,%r26,$00060046 ;offset 0x93c
DEPD,Z %r25,31,32,%r20 ;offset 0x940
OR %r20,%r24,%r21 ;offset 0x944
CMPB,*<<,N %r21,%r23,$0006002A ;offset 0x948
SUB %r31,%r2,%r31 ;offset 0x94c
$00060046
$0006002E
DEPD,Z %r23,31,32,%r25 ;offset 0x950
EXTRD,U %r23,31,32,%r26 ;offset 0x954
AND %r25,%r19,%r24 ;offset 0x958
ADD,L %r31,%r26,%r31 ;offset 0x95c
CMPCLR,*>>= %r5,%r24,%r0 ;offset 0x960
LDO 1(%r31),%r31 ;offset 0x964
$00060032
CMPB,*<<=,N %r31,%r4,$00060036 ;offset 0x968
LDO -1(%r29),%r29 ;offset 0x96c
ADD,L %r4,%r3,%r4 ;offset 0x970
$00060036
ADDIB,=,N -1,%r8,$D0 ;offset 0x974
SUB %r5,%r24,%r28 ;offset 0x978
$0006003A
SUB %r4,%r31,%r24 ;offset 0x97c
SHRPD %r24,%r28,32,%r4 ;offset 0x980
DEPD,Z %r29,31,32,%r9 ;offset 0x984
DEPD,Z %r28,31,32,%r5 ;offset 0x988
$0006001C
EXTRD,U %r4,31,32,%r31 ;offset 0x98c
CMPB,*<>,N %r31,%r2,$00060020 ;offset 0x990
MOVB,TR %r6,%r29,$D1 ;offset 0x994
STD %r29,-152(%r30) ;offset 0x998
$0006000C
EXTRD,U %r3,31,32,%r25 ;offset 0x99c
COPY %r3,%r26 ;offset 0x9a0
EXTRD,U %r3,31,32,%r9 ;offset 0x9a4
EXTRD,U %r4,31,32,%r8 ;offset 0x9a8
.CALL ARGW0=GR,ARGW1=GR,RTNVAL=GR ;in=25,26;out=28;
B,L BN_num_bits_word,%r2 ;offset 0x9ac
EXTRD,U %r5,31,32,%r7 ;offset 0x9b0
LDI 64,%r20 ;offset 0x9b4
DEPD %r7,31,32,%r5 ;offset 0x9b8
DEPD %r8,31,32,%r4 ;offset 0x9bc
DEPD %r9,31,32,%r3 ;offset 0x9c0
CMPB,= %r28,%r20,$00060012 ;offset 0x9c4
COPY %r28,%r24 ;offset 0x9c8
MTSARCM %r24 ;offset 0x9cc
DEPDI,Z -1,%sar,1,%r19 ;offset 0x9d0
CMPB,*>>,N %r4,%r19,$D2 ;offset 0x9d4
$00060012
SUBI 64,%r24,%r31 ;offset 0x9d8
CMPCLR,*<< %r4,%r3,%r0 ;offset 0x9dc
SUB %r4,%r3,%r4 ;offset 0x9e0
$00060016
CMPB,= %r31,%r0,$0006001A ;offset 0x9e4
COPY %r0,%r9 ;offset 0x9e8
MTSARCM %r31 ;offset 0x9ec
DEPD,Z %r3,%sar,64,%r3 ;offset 0x9f0
SUBI 64,%r31,%r26 ;offset 0x9f4
MTSAR %r26 ;offset 0x9f8
SHRPD %r4,%r5,%sar,%r4 ;offset 0x9fc
MTSARCM %r31 ;offset 0xa00
DEPD,Z %r5,%sar,64,%r5 ;offset 0xa04
$0006001A
DEPDI,Z -1,31,32,%r19 ;offset 0xa08
AND %r3,%r19,%r29 ;offset 0xa0c
EXTRD,U %r29,31,32,%r2 ;offset 0xa10
DEPDI,Z -1,63,32,%r6 ;offset 0xa14
MOVIB,TR 2,%r8,$0006001C ;offset 0xa18
EXTRD,U %r3,63,32,%r7 ;offset 0xa1c
$D2
;--- not PIC ADDIL LR'__iob-$global$,%r27,%r1 ;offset 0xa20
;--- not PIC LDIL LR'C$7,%r21 ;offset 0xa24
;--- not PIC LDO RR'__iob-$global$+32(%r1),%r26 ;offset 0xa28
;--- not PIC .CALL ARGW0=GR,ARGW1=GR,ARGW2=GR,RTNVAL=GR ;in=24,25,26;out=28;
;--- not PIC B,L fprintf,%r2 ;offset 0xa2c
;--- not PIC LDO RR'C$7(%r21),%r25 ;offset 0xa30
.CALL ;
B,L abort,%r2 ;offset 0xa34
NOP ;offset 0xa38
B $D3 ;offset 0xa3c
LDW -212(%r30),%r2 ;offset 0xa40
$00060020
COPY %r4,%r26 ;offset 0xa44
EXTRD,U %r4,31,32,%r25 ;offset 0xa48
COPY %r2,%r24 ;offset 0xa4c
.CALL ;in=23,24,25,26;out=20,21,22,28,29; (MILLICALL)
B,L $$div2U,%r31 ;offset 0xa50
EXTRD,U %r2,31,32,%r23 ;offset 0xa54
DEPD %r28,31,32,%r29 ;offset 0xa58
$00060022
STD %r29,-152(%r30) ;offset 0xa5c
$D1
AND %r5,%r19,%r24 ;offset 0xa60
EXTRD,U %r24,31,32,%r24 ;offset 0xa64
STW %r2,-160(%r30) ;offset 0xa68
STW %r7,-128(%r30) ;offset 0xa6c
FLDD -152(%r30),%fr4 ;offset 0xa70
FLDD -152(%r30),%fr7 ;offset 0xa74
FLDW -160(%r30),%fr8L ;offset 0xa78
FLDW -128(%r30),%fr5L ;offset 0xa7c
XMPYU %fr8L,%fr7L,%fr10 ;offset 0xa80
FSTD %fr10,-136(%r30) ;offset 0xa84
XMPYU %fr8L,%fr7R,%fr22 ;offset 0xa88
FSTD %fr22,-144(%r30) ;offset 0xa8c
XMPYU %fr5L,%fr4L,%fr11 ;offset 0xa90
XMPYU %fr5L,%fr4R,%fr23 ;offset 0xa94
FSTD %fr11,-112(%r30) ;offset 0xa98
FSTD %fr23,-120(%r30) ;offset 0xa9c
LDD -136(%r30),%r28 ;offset 0xaa0
DEPD,Z %r28,31,32,%r31 ;offset 0xaa4
LDD -144(%r30),%r20 ;offset 0xaa8
ADD,L %r20,%r31,%r31 ;offset 0xaac
LDD -112(%r30),%r22 ;offset 0xab0
DEPD,Z %r22,31,32,%r22 ;offset 0xab4
LDD -120(%r30),%r21 ;offset 0xab8
B $00060024 ;offset 0xabc
ADD,L %r21,%r22,%r23 ;offset 0xac0
$D0
OR %r9,%r29,%r29 ;offset 0xac4
$00060040
EXTRD,U %r29,31,32,%r28 ;offset 0xac8
$00060002
$L2
LDW -212(%r30),%r2 ;offset 0xacc
$D3
LDW -168(%r30),%r9 ;offset 0xad0
LDD -176(%r30),%r8 ;offset 0xad4
EXTRD,U %r8,31,32,%r7 ;offset 0xad8
LDD -184(%r30),%r6 ;offset 0xadc
EXTRD,U %r6,31,32,%r5 ;offset 0xae0
LDW -188(%r30),%r4 ;offset 0xae4
BVE (%r2) ;offset 0xae8
.EXIT
LDW,MB -192(%r30),%r3 ;offset 0xaec
.PROCEND ;in=23,25;out=28,29;fpin=105,107;
;----------------------------------------------------------------------------
;
; Registers to hold 64-bit values to manipulate. The "L" part
; of the register corresponds to the upper 32-bits, while the "R"
; part corresponds to the lower 32-bits
;
; Note, that when using b6 and b7, the code must save these before
; using them because they are callee save registers
;
;
; Floating point registers to use to save values that
; are manipulated. These don't collide with ftemp1-6 and
; are all caller save registers
;
a0 .reg %fr22
a0L .reg %fr22L
a0R .reg %fr22R
a1 .reg %fr23
a1L .reg %fr23L
a1R .reg %fr23R
a2 .reg %fr24
a2L .reg %fr24L
a2R .reg %fr24R
a3 .reg %fr25
a3L .reg %fr25L
a3R .reg %fr25R
a4 .reg %fr26
a4L .reg %fr26L
a4R .reg %fr26R
a5 .reg %fr27
a5L .reg %fr27L
a5R .reg %fr27R
a6 .reg %fr28
a6L .reg %fr28L
a6R .reg %fr28R
a7 .reg %fr29
a7L .reg %fr29L
a7R .reg %fr29R
b0 .reg %fr30
b0L .reg %fr30L
b0R .reg %fr30R
b1 .reg %fr31
b1L .reg %fr31L
b1R .reg %fr31R
;
; Temporary floating point variables, these are all caller save
; registers
;
ftemp1 .reg %fr4
ftemp2 .reg %fr5
ftemp3 .reg %fr6
ftemp4 .reg %fr7
;
; The B set of registers when used.
;
b2 .reg %fr8
b2L .reg %fr8L
b2R .reg %fr8R
b3 .reg %fr9
b3L .reg %fr9L
b3R .reg %fr9R
b4 .reg %fr10
b4L .reg %fr10L
b4R .reg %fr10R
b5 .reg %fr11
b5L .reg %fr11L
b5R .reg %fr11R
b6 .reg %fr12
b6L .reg %fr12L
b6R .reg %fr12R
b7 .reg %fr13
b7L .reg %fr13L
b7R .reg %fr13R
c1 .reg %r21 ; only reg
temp1 .reg %r20 ; only reg
temp2 .reg %r19 ; only reg
temp3 .reg %r31 ; only reg
m1 .reg %r28
c2 .reg %r23
high_one .reg %r1
ht .reg %r6
lt .reg %r5
m .reg %r4
c3 .reg %r3
SQR_ADD_C .macro A0L,A0R,C1,C2,C3
XMPYU A0L,A0R,ftemp1 ; m
FSTD ftemp1,-24(%sp) ; store m
XMPYU A0R,A0R,ftemp2 ; lt
FSTD ftemp2,-16(%sp) ; store lt
XMPYU A0L,A0L,ftemp3 ; ht
FSTD ftemp3,-8(%sp) ; store ht
LDD -24(%sp),m ; load m
AND m,high_mask,temp2 ; m & Mask
DEPD,Z m,30,31,temp3 ; m << 32+1
LDD -16(%sp),lt ; lt
LDD -8(%sp),ht ; ht
EXTRD,U temp2,32,33,temp1 ; temp1 = m&Mask >> 32-1
ADD temp3,lt,lt ; lt = lt+m
ADD,L ht,temp1,ht ; ht += temp1
ADD,DC ht,%r0,ht ; ht++
ADD C1,lt,C1 ; c1=c1+lt
ADD,DC ht,%r0,ht ; ht++
ADD C2,ht,C2 ; c2=c2+ht
ADD,DC C3,%r0,C3 ; c3++
.endm
SQR_ADD_C2 .macro A0L,A0R,A1L,A1R,C1,C2,C3
XMPYU A0L,A1R,ftemp1 ; m1 = bl*ht
FSTD ftemp1,-16(%sp) ;
XMPYU A0R,A1L,ftemp2 ; m = bh*lt
FSTD ftemp2,-8(%sp) ;
XMPYU A0R,A1R,ftemp3 ; lt = bl*lt
FSTD ftemp3,-32(%sp)
XMPYU A0L,A1L,ftemp4 ; ht = bh*ht
FSTD ftemp4,-24(%sp) ;
LDD -8(%sp),m ; r21 = m
LDD -16(%sp),m1 ; r19 = m1
ADD,L m,m1,m ; m+m1
DEPD,Z m,31,32,temp3 ; (m+m1<<32)
LDD -24(%sp),ht ; r24 = ht
CMPCLR,*>>= m,m1,%r0 ; if (m < m1)
ADD,L ht,high_one,ht ; ht+=high_one
EXTRD,U m,31,32,temp1 ; m >> 32
LDD -32(%sp),lt ; lt
ADD,L ht,temp1,ht ; ht+= m>>32
ADD lt,temp3,lt ; lt = lt+m1
ADD,DC ht,%r0,ht ; ht++
ADD ht,ht,ht ; ht=ht+ht;
ADD,DC C3,%r0,C3 ; add in carry (c3++)
ADD lt,lt,lt ; lt=lt+lt;
ADD,DC ht,%r0,ht ; add in carry (ht++)
ADD C1,lt,C1 ; c1=c1+lt
ADD,DC,*NUV ht,%r0,ht ; add in carry (ht++)
LDO 1(C3),C3 ; bump c3 if overflow,nullify otherwise
ADD C2,ht,C2 ; c2 = c2 + ht
ADD,DC C3,%r0,C3 ; add in carry (c3++)
.endm
;
;void bn_sqr_comba8(BN_ULONG *r, BN_ULONG *a)
; arg0 = r_ptr
; arg1 = a_ptr
;
bn_sqr_comba8
.PROC
.CALLINFO FRAME=128,ENTRY_GR=%r3,ARGS_SAVED,ORDERING_AWARE
.EXPORT bn_sqr_comba8,ENTRY,PRIV_LEV=3,NO_RELOCATION,LONG_RETURN
.ENTRY
.align 64
STD %r3,0(%sp) ; save r3
STD %r4,8(%sp) ; save r4
STD %r5,16(%sp) ; save r5
STD %r6,24(%sp) ; save r6
;
; Zero out carries
;
COPY %r0,c1
COPY %r0,c2
COPY %r0,c3
LDO 128(%sp),%sp ; bump stack
DEPDI,Z -1,32,33,high_mask ; Create Mask 0xffffffff80000000L
DEPDI,Z 1,31,1,high_one ; Create Value 1 << 32
;
; Load up all of the values we are going to use
;
FLDD 0(a_ptr),a0
FLDD 8(a_ptr),a1
FLDD 16(a_ptr),a2
FLDD 24(a_ptr),a3
FLDD 32(a_ptr),a4
FLDD 40(a_ptr),a5
FLDD 48(a_ptr),a6
FLDD 56(a_ptr),a7
SQR_ADD_C a0L,a0R,c1,c2,c3
STD c1,0(r_ptr) ; r[0] = c1;
COPY %r0,c1
SQR_ADD_C2 a1L,a1R,a0L,a0R,c2,c3,c1
STD c2,8(r_ptr) ; r[1] = c2;
COPY %r0,c2
SQR_ADD_C a1L,a1R,c3,c1,c2
SQR_ADD_C2 a2L,a2R,a0L,a0R,c3,c1,c2
STD c3,16(r_ptr) ; r[2] = c3;
COPY %r0,c3
SQR_ADD_C2 a3L,a3R,a0L,a0R,c1,c2,c3
SQR_ADD_C2 a2L,a2R,a1L,a1R,c1,c2,c3
STD c1,24(r_ptr) ; r[3] = c1;
COPY %r0,c1
SQR_ADD_C a2L,a2R,c2,c3,c1
SQR_ADD_C2 a3L,a3R,a1L,a1R,c2,c3,c1
SQR_ADD_C2 a4L,a4R,a0L,a0R,c2,c3,c1
STD c2,32(r_ptr) ; r[4] = c2;
COPY %r0,c2
SQR_ADD_C2 a5L,a5R,a0L,a0R,c3,c1,c2
SQR_ADD_C2 a4L,a4R,a1L,a1R,c3,c1,c2
SQR_ADD_C2 a3L,a3R,a2L,a2R,c3,c1,c2
STD c3,40(r_ptr) ; r[5] = c3;
COPY %r0,c3
SQR_ADD_C a3L,a3R,c1,c2,c3
SQR_ADD_C2 a4L,a4R,a2L,a2R,c1,c2,c3
SQR_ADD_C2 a5L,a5R,a1L,a1R,c1,c2,c3
SQR_ADD_C2 a6L,a6R,a0L,a0R,c1,c2,c3
STD c1,48(r_ptr) ; r[6] = c1;
COPY %r0,c1
SQR_ADD_C2 a7L,a7R,a0L,a0R,c2,c3,c1
SQR_ADD_C2 a6L,a6R,a1L,a1R,c2,c3,c1
SQR_ADD_C2 a5L,a5R,a2L,a2R,c2,c3,c1
SQR_ADD_C2 a4L,a4R,a3L,a3R,c2,c3,c1
STD c2,56(r_ptr) ; r[7] = c2;
COPY %r0,c2
SQR_ADD_C a4L,a4R,c3,c1,c2
SQR_ADD_C2 a5L,a5R,a3L,a3R,c3,c1,c2
SQR_ADD_C2 a6L,a6R,a2L,a2R,c3,c1,c2
SQR_ADD_C2 a7L,a7R,a1L,a1R,c3,c1,c2
STD c3,64(r_ptr) ; r[8] = c3;
COPY %r0,c3
SQR_ADD_C2 a7L,a7R,a2L,a2R,c1,c2,c3
SQR_ADD_C2 a6L,a6R,a3L,a3R,c1,c2,c3
SQR_ADD_C2 a5L,a5R,a4L,a4R,c1,c2,c3
STD c1,72(r_ptr) ; r[9] = c1;
COPY %r0,c1
SQR_ADD_C a5L,a5R,c2,c3,c1
SQR_ADD_C2 a6L,a6R,a4L,a4R,c2,c3,c1
SQR_ADD_C2 a7L,a7R,a3L,a3R,c2,c3,c1
STD c2,80(r_ptr) ; r[10] = c2;
COPY %r0,c2
SQR_ADD_C2 a7L,a7R,a4L,a4R,c3,c1,c2
SQR_ADD_C2 a6L,a6R,a5L,a5R,c3,c1,c2
STD c3,88(r_ptr) ; r[11] = c3;
COPY %r0,c3
SQR_ADD_C a6L,a6R,c1,c2,c3
SQR_ADD_C2 a7L,a7R,a5L,a5R,c1,c2,c3
STD c1,96(r_ptr) ; r[12] = c1;
COPY %r0,c1
SQR_ADD_C2 a7L,a7R,a6L,a6R,c2,c3,c1
STD c2,104(r_ptr) ; r[13] = c2;
COPY %r0,c2
SQR_ADD_C a7L,a7R,c3,c1,c2
STD c3, 112(r_ptr) ; r[14] = c3
STD c1, 120(r_ptr) ; r[15] = c1
.EXIT
LDD -104(%sp),%r6 ; restore r6
LDD -112(%sp),%r5 ; restore r5
LDD -120(%sp),%r4 ; restore r4
BVE (%rp)
LDD,MB -128(%sp),%r3
.PROCEND
;-----------------------------------------------------------------------------
;
;void bn_sqr_comba4(BN_ULONG *r, BN_ULONG *a)
; arg0 = r_ptr
; arg1 = a_ptr
;
bn_sqr_comba4
.proc
.callinfo FRAME=128,ENTRY_GR=%r3,ARGS_SAVED,ORDERING_AWARE
.EXPORT bn_sqr_comba4,ENTRY,PRIV_LEV=3,NO_RELOCATION,LONG_RETURN
.entry
.align 64
STD %r3,0(%sp) ; save r3
STD %r4,8(%sp) ; save r4
STD %r5,16(%sp) ; save r5
STD %r6,24(%sp) ; save r6
;
; Zero out carries
;
COPY %r0,c1
COPY %r0,c2
COPY %r0,c3
LDO 128(%sp),%sp ; bump stack
DEPDI,Z -1,32,33,high_mask ; Create Mask 0xffffffff80000000L
DEPDI,Z 1,31,1,high_one ; Create Value 1 << 32
;
; Load up all of the values we are going to use
;
FLDD 0(a_ptr),a0
FLDD 8(a_ptr),a1
FLDD 16(a_ptr),a2
FLDD 24(a_ptr),a3
FLDD 32(a_ptr),a4
FLDD 40(a_ptr),a5
FLDD 48(a_ptr),a6
FLDD 56(a_ptr),a7
SQR_ADD_C a0L,a0R,c1,c2,c3
STD c1,0(r_ptr) ; r[0] = c1;
COPY %r0,c1
SQR_ADD_C2 a1L,a1R,a0L,a0R,c2,c3,c1
STD c2,8(r_ptr) ; r[1] = c2;
COPY %r0,c2
SQR_ADD_C a1L,a1R,c3,c1,c2
SQR_ADD_C2 a2L,a2R,a0L,a0R,c3,c1,c2
STD c3,16(r_ptr) ; r[2] = c3;
COPY %r0,c3
SQR_ADD_C2 a3L,a3R,a0L,a0R,c1,c2,c3
SQR_ADD_C2 a2L,a2R,a1L,a1R,c1,c2,c3
STD c1,24(r_ptr) ; r[3] = c1;
COPY %r0,c1
SQR_ADD_C a2L,a2R,c2,c3,c1
SQR_ADD_C2 a3L,a3R,a1L,a1R,c2,c3,c1
STD c2,32(r_ptr) ; r[4] = c2;
COPY %r0,c2
SQR_ADD_C2 a3L,a3R,a2L,a2R,c3,c1,c2
STD c3,40(r_ptr) ; r[5] = c3;
COPY %r0,c3
SQR_ADD_C a3L,a3R,c1,c2,c3
STD c1,48(r_ptr) ; r[6] = c1;
STD c2,56(r_ptr) ; r[7] = c2;
.EXIT
LDD -104(%sp),%r6 ; restore r6
LDD -112(%sp),%r5 ; restore r5
LDD -120(%sp),%r4 ; restore r4
BVE (%rp)
LDD,MB -128(%sp),%r3
.PROCEND
;---------------------------------------------------------------------------
MUL_ADD_C .macro A0L,A0R,B0L,B0R,C1,C2,C3
XMPYU A0L,B0R,ftemp1 ; m1 = bl*ht
FSTD ftemp1,-16(%sp) ;
XMPYU A0R,B0L,ftemp2 ; m = bh*lt
FSTD ftemp2,-8(%sp) ;
XMPYU A0R,B0R,ftemp3 ; lt = bl*lt
FSTD ftemp3,-32(%sp)
XMPYU A0L,B0L,ftemp4 ; ht = bh*ht
FSTD ftemp4,-24(%sp) ;
LDD -8(%sp),m ; r21 = m
LDD -16(%sp),m1 ; r19 = m1
ADD,L m,m1,m ; m+m1
DEPD,Z m,31,32,temp3 ; (m+m1<<32)
LDD -24(%sp),ht ; r24 = ht
CMPCLR,*>>= m,m1,%r0 ; if (m < m1)
ADD,L ht,high_one,ht ; ht+=high_one
EXTRD,U m,31,32,temp1 ; m >> 32
LDD -32(%sp),lt ; lt
ADD,L ht,temp1,ht ; ht+= m>>32
ADD lt,temp3,lt ; lt = lt+m1
ADD,DC ht,%r0,ht ; ht++
ADD C1,lt,C1 ; c1=c1+lt
ADD,DC ht,%r0,ht ; bump c3 if overflow,nullify otherwise
ADD C2,ht,C2 ; c2 = c2 + ht
ADD,DC C3,%r0,C3 ; add in carry (c3++)
.endm
;
;void bn_mul_comba8(BN_ULONG *r, BN_ULONG *a, BN_ULONG *b)
; arg0 = r_ptr
; arg1 = a_ptr
; arg2 = b_ptr
;
bn_mul_comba8
.proc
.callinfo FRAME=128,ENTRY_GR=%r3,ARGS_SAVED,ORDERING_AWARE
.EXPORT bn_mul_comba8,ENTRY,PRIV_LEV=3,NO_RELOCATION,LONG_RETURN
.entry
.align 64
STD %r3,0(%sp) ; save r3
STD %r4,8(%sp) ; save r4
STD %r5,16(%sp) ; save r5
STD %r6,24(%sp) ; save r6
FSTD %fr12,32(%sp) ; save r6
FSTD %fr13,40(%sp) ; save r7
;
; Zero out carries
;
COPY %r0,c1
COPY %r0,c2
COPY %r0,c3
LDO 128(%sp),%sp ; bump stack
DEPDI,Z 1,31,1,high_one ; Create Value 1 << 32
;
; Load up all of the values we are going to use
;
FLDD 0(a_ptr),a0
FLDD 8(a_ptr),a1
FLDD 16(a_ptr),a2
FLDD 24(a_ptr),a3
FLDD 32(a_ptr),a4
FLDD 40(a_ptr),a5
FLDD 48(a_ptr),a6
FLDD 56(a_ptr),a7
FLDD 0(b_ptr),b0
FLDD 8(b_ptr),b1
FLDD 16(b_ptr),b2
FLDD 24(b_ptr),b3
FLDD 32(b_ptr),b4
FLDD 40(b_ptr),b5
FLDD 48(b_ptr),b6
FLDD 56(b_ptr),b7
MUL_ADD_C a0L,a0R,b0L,b0R,c1,c2,c3
STD c1,0(r_ptr)
COPY %r0,c1
MUL_ADD_C a0L,a0R,b1L,b1R,c2,c3,c1
MUL_ADD_C a1L,a1R,b0L,b0R,c2,c3,c1
STD c2,8(r_ptr)
COPY %r0,c2
MUL_ADD_C a2L,a2R,b0L,b0R,c3,c1,c2
MUL_ADD_C a1L,a1R,b1L,b1R,c3,c1,c2
MUL_ADD_C a0L,a0R,b2L,b2R,c3,c1,c2
STD c3,16(r_ptr)
COPY %r0,c3
MUL_ADD_C a0L,a0R,b3L,b3R,c1,c2,c3
MUL_ADD_C a1L,a1R,b2L,b2R,c1,c2,c3
MUL_ADD_C a2L,a2R,b1L,b1R,c1,c2,c3
MUL_ADD_C a3L,a3R,b0L,b0R,c1,c2,c3
STD c1,24(r_ptr)
COPY %r0,c1
MUL_ADD_C a4L,a4R,b0L,b0R,c2,c3,c1
MUL_ADD_C a3L,a3R,b1L,b1R,c2,c3,c1
MUL_ADD_C a2L,a2R,b2L,b2R,c2,c3,c1
MUL_ADD_C a1L,a1R,b3L,b3R,c2,c3,c1
MUL_ADD_C a0L,a0R,b4L,b4R,c2,c3,c1
STD c2,32(r_ptr)
COPY %r0,c2
MUL_ADD_C a0L,a0R,b5L,b5R,c3,c1,c2
MUL_ADD_C a1L,a1R,b4L,b4R,c3,c1,c2
MUL_ADD_C a2L,a2R,b3L,b3R,c3,c1,c2
MUL_ADD_C a3L,a3R,b2L,b2R,c3,c1,c2
MUL_ADD_C a4L,a4R,b1L,b1R,c3,c1,c2
MUL_ADD_C a5L,a5R,b0L,b0R,c3,c1,c2
STD c3,40(r_ptr)
COPY %r0,c3
MUL_ADD_C a6L,a6R,b0L,b0R,c1,c2,c3
MUL_ADD_C a5L,a5R,b1L,b1R,c1,c2,c3
MUL_ADD_C a4L,a4R,b2L,b2R,c1,c2,c3
MUL_ADD_C a3L,a3R,b3L,b3R,c1,c2,c3
MUL_ADD_C a2L,a2R,b4L,b4R,c1,c2,c3
MUL_ADD_C a1L,a1R,b5L,b5R,c1,c2,c3
MUL_ADD_C a0L,a0R,b6L,b6R,c1,c2,c3
STD c1,48(r_ptr)
COPY %r0,c1
MUL_ADD_C a0L,a0R,b7L,b7R,c2,c3,c1
MUL_ADD_C a1L,a1R,b6L,b6R,c2,c3,c1
MUL_ADD_C a2L,a2R,b5L,b5R,c2,c3,c1
MUL_ADD_C a3L,a3R,b4L,b4R,c2,c3,c1
MUL_ADD_C a4L,a4R,b3L,b3R,c2,c3,c1
MUL_ADD_C a5L,a5R,b2L,b2R,c2,c3,c1
MUL_ADD_C a6L,a6R,b1L,b1R,c2,c3,c1
MUL_ADD_C a7L,a7R,b0L,b0R,c2,c3,c1
STD c2,56(r_ptr)
COPY %r0,c2
MUL_ADD_C a7L,a7R,b1L,b1R,c3,c1,c2
MUL_ADD_C a6L,a6R,b2L,b2R,c3,c1,c2
MUL_ADD_C a5L,a5R,b3L,b3R,c3,c1,c2
MUL_ADD_C a4L,a4R,b4L,b4R,c3,c1,c2
MUL_ADD_C a3L,a3R,b5L,b5R,c3,c1,c2
MUL_ADD_C a2L,a2R,b6L,b6R,c3,c1,c2
MUL_ADD_C a1L,a1R,b7L,b7R,c3,c1,c2
STD c3,64(r_ptr)
COPY %r0,c3
MUL_ADD_C a2L,a2R,b7L,b7R,c1,c2,c3
MUL_ADD_C a3L,a3R,b6L,b6R,c1,c2,c3
MUL_ADD_C a4L,a4R,b5L,b5R,c1,c2,c3
MUL_ADD_C a5L,a5R,b4L,b4R,c1,c2,c3
MUL_ADD_C a6L,a6R,b3L,b3R,c1,c2,c3
MUL_ADD_C a7L,a7R,b2L,b2R,c1,c2,c3
STD c1,72(r_ptr)
COPY %r0,c1
MUL_ADD_C a7L,a7R,b3L,b3R,c2,c3,c1
MUL_ADD_C a6L,a6R,b4L,b4R,c2,c3,c1
MUL_ADD_C a5L,a5R,b5L,b5R,c2,c3,c1
MUL_ADD_C a4L,a4R,b6L,b6R,c2,c3,c1
MUL_ADD_C a3L,a3R,b7L,b7R,c2,c3,c1
STD c2,80(r_ptr)
COPY %r0,c2
MUL_ADD_C a4L,a4R,b7L,b7R,c3,c1,c2
MUL_ADD_C a5L,a5R,b6L,b6R,c3,c1,c2
MUL_ADD_C a6L,a6R,b5L,b5R,c3,c1,c2
MUL_ADD_C a7L,a7R,b4L,b4R,c3,c1,c2
STD c3,88(r_ptr)
COPY %r0,c3
MUL_ADD_C a7L,a7R,b5L,b5R,c1,c2,c3
MUL_ADD_C a6L,a6R,b6L,b6R,c1,c2,c3
MUL_ADD_C a5L,a5R,b7L,b7R,c1,c2,c3
STD c1,96(r_ptr)
COPY %r0,c1
MUL_ADD_C a6L,a6R,b7L,b7R,c2,c3,c1
MUL_ADD_C a7L,a7R,b6L,b6R,c2,c3,c1
STD c2,104(r_ptr)
COPY %r0,c2
MUL_ADD_C a7L,a7R,b7L,b7R,c3,c1,c2
STD c3,112(r_ptr)
STD c1,120(r_ptr)
.EXIT
FLDD -88(%sp),%fr13
FLDD -96(%sp),%fr12
LDD -104(%sp),%r6 ; restore r6
LDD -112(%sp),%r5 ; restore r5
LDD -120(%sp),%r4 ; restore r4
BVE (%rp)
LDD,MB -128(%sp),%r3
.PROCEND
;-----------------------------------------------------------------------------
;
;void bn_mul_comba4(BN_ULONG *r, BN_ULONG *a, BN_ULONG *b)
; arg0 = r_ptr
; arg1 = a_ptr
; arg2 = b_ptr
;
bn_mul_comba4
.proc
.callinfo FRAME=128,ENTRY_GR=%r3,ARGS_SAVED,ORDERING_AWARE
.EXPORT bn_mul_comba4,ENTRY,PRIV_LEV=3,NO_RELOCATION,LONG_RETURN
.entry
.align 64
STD %r3,0(%sp) ; save r3
STD %r4,8(%sp) ; save r4
STD %r5,16(%sp) ; save r5
STD %r6,24(%sp) ; save r6
FSTD %fr12,32(%sp) ; save r6
FSTD %fr13,40(%sp) ; save r7
;
; Zero out carries
;
COPY %r0,c1
COPY %r0,c2
COPY %r0,c3
LDO 128(%sp),%sp ; bump stack
DEPDI,Z 1,31,1,high_one ; Create Value 1 << 32
;
; Load up all of the values we are going to use
;
FLDD 0(a_ptr),a0
FLDD 8(a_ptr),a1
FLDD 16(a_ptr),a2
FLDD 24(a_ptr),a3
FLDD 0(b_ptr),b0
FLDD 8(b_ptr),b1
FLDD 16(b_ptr),b2
FLDD 24(b_ptr),b3
MUL_ADD_C a0L,a0R,b0L,b0R,c1,c2,c3
STD c1,0(r_ptr)
COPY %r0,c1
MUL_ADD_C a0L,a0R,b1L,b1R,c2,c3,c1
MUL_ADD_C a1L,a1R,b0L,b0R,c2,c3,c1
STD c2,8(r_ptr)
COPY %r0,c2
MUL_ADD_C a2L,a2R,b0L,b0R,c3,c1,c2
MUL_ADD_C a1L,a1R,b1L,b1R,c3,c1,c2
MUL_ADD_C a0L,a0R,b2L,b2R,c3,c1,c2
STD c3,16(r_ptr)
COPY %r0,c3
MUL_ADD_C a0L,a0R,b3L,b3R,c1,c2,c3
MUL_ADD_C a1L,a1R,b2L,b2R,c1,c2,c3
MUL_ADD_C a2L,a2R,b1L,b1R,c1,c2,c3
MUL_ADD_C a3L,a3R,b0L,b0R,c1,c2,c3
STD c1,24(r_ptr)
COPY %r0,c1
MUL_ADD_C a3L,a3R,b1L,b1R,c2,c3,c1
MUL_ADD_C a2L,a2R,b2L,b2R,c2,c3,c1
MUL_ADD_C a1L,a1R,b3L,b3R,c2,c3,c1
STD c2,32(r_ptr)
COPY %r0,c2
MUL_ADD_C a2L,a2R,b3L,b3R,c3,c1,c2
MUL_ADD_C a3L,a3R,b2L,b2R,c3,c1,c2
STD c3,40(r_ptr)
COPY %r0,c3
MUL_ADD_C a3L,a3R,b3L,b3R,c1,c2,c3
STD c1,48(r_ptr)
STD c2,56(r_ptr)
.EXIT
FLDD -88(%sp),%fr13
FLDD -96(%sp),%fr12
LDD -104(%sp),%r6 ; restore r6
LDD -112(%sp),%r5 ; restore r5
LDD -120(%sp),%r4 ; restore r4
BVE (%rp)
LDD,MB -128(%sp),%r3
.PROCEND
;--- not PIC .SPACE $TEXT$
;--- not PIC .SUBSPA $CODE$
;--- not PIC .SPACE $PRIVATE$,SORT=16
;--- not PIC .IMPORT $global$,DATA
;--- not PIC .SPACE $TEXT$
;--- not PIC .SUBSPA $CODE$
;--- not PIC .SUBSPA $LIT$,ACCESS=0x2c
;--- not PIC C$7
;--- not PIC .ALIGN 8
;--- not PIC .STRINGZ "Division would overflow (%d)\n"
.END
|
al3xtjames/Clover
| 37,614
|
Library/OpensslLib/openssl-1.0.1e/crypto/bn/asm/mips3.s
|
.rdata
.asciiz "mips3.s, Version 1.1"
.asciiz "MIPS III/IV ISA artwork by Andy Polyakov <appro@fy.chalmers.se>"
/*
* ====================================================================
* Written by Andy Polyakov <appro@fy.chalmers.se> for the OpenSSL
* project.
*
* Rights for redistribution and usage in source and binary forms are
* granted according to the OpenSSL license. Warranty of any kind is
* disclaimed.
* ====================================================================
*/
/*
* This is my modest contributon to the OpenSSL project (see
* http://www.openssl.org/ for more information about it) and is
* a drop-in MIPS III/IV ISA replacement for crypto/bn/bn_asm.c
* module. For updates see http://fy.chalmers.se/~appro/hpe/.
*
* The module is designed to work with either of the "new" MIPS ABI(5),
* namely N32 or N64, offered by IRIX 6.x. It's not ment to work under
* IRIX 5.x not only because it doesn't support new ABIs but also
* because 5.x kernels put R4x00 CPU into 32-bit mode and all those
* 64-bit instructions (daddu, dmultu, etc.) found below gonna only
* cause illegal instruction exception:-(
*
* In addition the code depends on preprocessor flags set up by MIPSpro
* compiler driver (either as or cc) and therefore (probably?) can't be
* compiled by the GNU assembler. GNU C driver manages fine though...
* I mean as long as -mmips-as is specified or is the default option,
* because then it simply invokes /usr/bin/as which in turn takes
* perfect care of the preprocessor definitions. Another neat feature
* offered by the MIPSpro assembler is an optimization pass. This gave
* me the opportunity to have the code looking more regular as all those
* architecture dependent instruction rescheduling details were left to
* the assembler. Cool, huh?
*
* Performance improvement is astonishing! 'apps/openssl speed rsa dsa'
* goes way over 3 times faster!
*
* <appro@fy.chalmers.se>
*/
#include <asm.h>
#include <regdef.h>
#if _MIPS_ISA>=4
#define MOVNZ(cond,dst,src) \
movn dst,src,cond
#else
#define MOVNZ(cond,dst,src) \
.set noreorder; \
bnezl cond,.+8; \
move dst,src; \
.set reorder
#endif
.text
.set noat
.set reorder
#define MINUS4 v1
.align 5
LEAF(bn_mul_add_words)
.set noreorder
bgtzl a2,.L_bn_mul_add_words_proceed
ld t0,0(a1)
jr ra
move v0,zero
.set reorder
.L_bn_mul_add_words_proceed:
li MINUS4,-4
and ta0,a2,MINUS4
move v0,zero
beqz ta0,.L_bn_mul_add_words_tail
.L_bn_mul_add_words_loop:
dmultu t0,a3
ld t1,0(a0)
ld t2,8(a1)
ld t3,8(a0)
ld ta0,16(a1)
ld ta1,16(a0)
daddu t1,v0
sltu v0,t1,v0 /* All manuals say it "compares 32-bit
* values", but it seems to work fine
* even on 64-bit registers. */
mflo AT
mfhi t0
daddu t1,AT
daddu v0,t0
sltu AT,t1,AT
sd t1,0(a0)
daddu v0,AT
dmultu t2,a3
ld ta2,24(a1)
ld ta3,24(a0)
daddu t3,v0
sltu v0,t3,v0
mflo AT
mfhi t2
daddu t3,AT
daddu v0,t2
sltu AT,t3,AT
sd t3,8(a0)
daddu v0,AT
dmultu ta0,a3
subu a2,4
PTR_ADD a0,32
PTR_ADD a1,32
daddu ta1,v0
sltu v0,ta1,v0
mflo AT
mfhi ta0
daddu ta1,AT
daddu v0,ta0
sltu AT,ta1,AT
sd ta1,-16(a0)
daddu v0,AT
dmultu ta2,a3
and ta0,a2,MINUS4
daddu ta3,v0
sltu v0,ta3,v0
mflo AT
mfhi ta2
daddu ta3,AT
daddu v0,ta2
sltu AT,ta3,AT
sd ta3,-8(a0)
daddu v0,AT
.set noreorder
bgtzl ta0,.L_bn_mul_add_words_loop
ld t0,0(a1)
bnezl a2,.L_bn_mul_add_words_tail
ld t0,0(a1)
.set reorder
.L_bn_mul_add_words_return:
jr ra
.L_bn_mul_add_words_tail:
dmultu t0,a3
ld t1,0(a0)
subu a2,1
daddu t1,v0
sltu v0,t1,v0
mflo AT
mfhi t0
daddu t1,AT
daddu v0,t0
sltu AT,t1,AT
sd t1,0(a0)
daddu v0,AT
beqz a2,.L_bn_mul_add_words_return
ld t0,8(a1)
dmultu t0,a3
ld t1,8(a0)
subu a2,1
daddu t1,v0
sltu v0,t1,v0
mflo AT
mfhi t0
daddu t1,AT
daddu v0,t0
sltu AT,t1,AT
sd t1,8(a0)
daddu v0,AT
beqz a2,.L_bn_mul_add_words_return
ld t0,16(a1)
dmultu t0,a3
ld t1,16(a0)
daddu t1,v0
sltu v0,t1,v0
mflo AT
mfhi t0
daddu t1,AT
daddu v0,t0
sltu AT,t1,AT
sd t1,16(a0)
daddu v0,AT
jr ra
END(bn_mul_add_words)
.align 5
LEAF(bn_mul_words)
.set noreorder
bgtzl a2,.L_bn_mul_words_proceed
ld t0,0(a1)
jr ra
move v0,zero
.set reorder
.L_bn_mul_words_proceed:
li MINUS4,-4
and ta0,a2,MINUS4
move v0,zero
beqz ta0,.L_bn_mul_words_tail
.L_bn_mul_words_loop:
dmultu t0,a3
ld t2,8(a1)
ld ta0,16(a1)
ld ta2,24(a1)
mflo AT
mfhi t0
daddu v0,AT
sltu t1,v0,AT
sd v0,0(a0)
daddu v0,t1,t0
dmultu t2,a3
subu a2,4
PTR_ADD a0,32
PTR_ADD a1,32
mflo AT
mfhi t2
daddu v0,AT
sltu t3,v0,AT
sd v0,-24(a0)
daddu v0,t3,t2
dmultu ta0,a3
mflo AT
mfhi ta0
daddu v0,AT
sltu ta1,v0,AT
sd v0,-16(a0)
daddu v0,ta1,ta0
dmultu ta2,a3
and ta0,a2,MINUS4
mflo AT
mfhi ta2
daddu v0,AT
sltu ta3,v0,AT
sd v0,-8(a0)
daddu v0,ta3,ta2
.set noreorder
bgtzl ta0,.L_bn_mul_words_loop
ld t0,0(a1)
bnezl a2,.L_bn_mul_words_tail
ld t0,0(a1)
.set reorder
.L_bn_mul_words_return:
jr ra
.L_bn_mul_words_tail:
dmultu t0,a3
subu a2,1
mflo AT
mfhi t0
daddu v0,AT
sltu t1,v0,AT
sd v0,0(a0)
daddu v0,t1,t0
beqz a2,.L_bn_mul_words_return
ld t0,8(a1)
dmultu t0,a3
subu a2,1
mflo AT
mfhi t0
daddu v0,AT
sltu t1,v0,AT
sd v0,8(a0)
daddu v0,t1,t0
beqz a2,.L_bn_mul_words_return
ld t0,16(a1)
dmultu t0,a3
mflo AT
mfhi t0
daddu v0,AT
sltu t1,v0,AT
sd v0,16(a0)
daddu v0,t1,t0
jr ra
END(bn_mul_words)
.align 5
LEAF(bn_sqr_words)
.set noreorder
bgtzl a2,.L_bn_sqr_words_proceed
ld t0,0(a1)
jr ra
move v0,zero
.set reorder
.L_bn_sqr_words_proceed:
li MINUS4,-4
and ta0,a2,MINUS4
move v0,zero
beqz ta0,.L_bn_sqr_words_tail
.L_bn_sqr_words_loop:
dmultu t0,t0
ld t2,8(a1)
ld ta0,16(a1)
ld ta2,24(a1)
mflo t1
mfhi t0
sd t1,0(a0)
sd t0,8(a0)
dmultu t2,t2
subu a2,4
PTR_ADD a0,64
PTR_ADD a1,32
mflo t3
mfhi t2
sd t3,-48(a0)
sd t2,-40(a0)
dmultu ta0,ta0
mflo ta1
mfhi ta0
sd ta1,-32(a0)
sd ta0,-24(a0)
dmultu ta2,ta2
and ta0,a2,MINUS4
mflo ta3
mfhi ta2
sd ta3,-16(a0)
sd ta2,-8(a0)
.set noreorder
bgtzl ta0,.L_bn_sqr_words_loop
ld t0,0(a1)
bnezl a2,.L_bn_sqr_words_tail
ld t0,0(a1)
.set reorder
.L_bn_sqr_words_return:
move v0,zero
jr ra
.L_bn_sqr_words_tail:
dmultu t0,t0
subu a2,1
mflo t1
mfhi t0
sd t1,0(a0)
sd t0,8(a0)
beqz a2,.L_bn_sqr_words_return
ld t0,8(a1)
dmultu t0,t0
subu a2,1
mflo t1
mfhi t0
sd t1,16(a0)
sd t0,24(a0)
beqz a2,.L_bn_sqr_words_return
ld t0,16(a1)
dmultu t0,t0
mflo t1
mfhi t0
sd t1,32(a0)
sd t0,40(a0)
jr ra
END(bn_sqr_words)
.align 5
LEAF(bn_add_words)
.set noreorder
bgtzl a3,.L_bn_add_words_proceed
ld t0,0(a1)
jr ra
move v0,zero
.set reorder
.L_bn_add_words_proceed:
li MINUS4,-4
and AT,a3,MINUS4
move v0,zero
beqz AT,.L_bn_add_words_tail
.L_bn_add_words_loop:
ld ta0,0(a2)
subu a3,4
ld t1,8(a1)
and AT,a3,MINUS4
ld t2,16(a1)
PTR_ADD a2,32
ld t3,24(a1)
PTR_ADD a0,32
ld ta1,-24(a2)
PTR_ADD a1,32
ld ta2,-16(a2)
ld ta3,-8(a2)
daddu ta0,t0
sltu t8,ta0,t0
daddu t0,ta0,v0
sltu v0,t0,ta0
sd t0,-32(a0)
daddu v0,t8
daddu ta1,t1
sltu t9,ta1,t1
daddu t1,ta1,v0
sltu v0,t1,ta1
sd t1,-24(a0)
daddu v0,t9
daddu ta2,t2
sltu t8,ta2,t2
daddu t2,ta2,v0
sltu v0,t2,ta2
sd t2,-16(a0)
daddu v0,t8
daddu ta3,t3
sltu t9,ta3,t3
daddu t3,ta3,v0
sltu v0,t3,ta3
sd t3,-8(a0)
daddu v0,t9
.set noreorder
bgtzl AT,.L_bn_add_words_loop
ld t0,0(a1)
bnezl a3,.L_bn_add_words_tail
ld t0,0(a1)
.set reorder
.L_bn_add_words_return:
jr ra
.L_bn_add_words_tail:
ld ta0,0(a2)
daddu ta0,t0
subu a3,1
sltu t8,ta0,t0
daddu t0,ta0,v0
sltu v0,t0,ta0
sd t0,0(a0)
daddu v0,t8
beqz a3,.L_bn_add_words_return
ld t1,8(a1)
ld ta1,8(a2)
daddu ta1,t1
subu a3,1
sltu t9,ta1,t1
daddu t1,ta1,v0
sltu v0,t1,ta1
sd t1,8(a0)
daddu v0,t9
beqz a3,.L_bn_add_words_return
ld t2,16(a1)
ld ta2,16(a2)
daddu ta2,t2
sltu t8,ta2,t2
daddu t2,ta2,v0
sltu v0,t2,ta2
sd t2,16(a0)
daddu v0,t8
jr ra
END(bn_add_words)
.align 5
LEAF(bn_sub_words)
.set noreorder
bgtzl a3,.L_bn_sub_words_proceed
ld t0,0(a1)
jr ra
move v0,zero
.set reorder
.L_bn_sub_words_proceed:
li MINUS4,-4
and AT,a3,MINUS4
move v0,zero
beqz AT,.L_bn_sub_words_tail
.L_bn_sub_words_loop:
ld ta0,0(a2)
subu a3,4
ld t1,8(a1)
and AT,a3,MINUS4
ld t2,16(a1)
PTR_ADD a2,32
ld t3,24(a1)
PTR_ADD a0,32
ld ta1,-24(a2)
PTR_ADD a1,32
ld ta2,-16(a2)
ld ta3,-8(a2)
sltu t8,t0,ta0
dsubu t0,ta0
dsubu ta0,t0,v0
sd ta0,-32(a0)
MOVNZ (t0,v0,t8)
sltu t9,t1,ta1
dsubu t1,ta1
dsubu ta1,t1,v0
sd ta1,-24(a0)
MOVNZ (t1,v0,t9)
sltu t8,t2,ta2
dsubu t2,ta2
dsubu ta2,t2,v0
sd ta2,-16(a0)
MOVNZ (t2,v0,t8)
sltu t9,t3,ta3
dsubu t3,ta3
dsubu ta3,t3,v0
sd ta3,-8(a0)
MOVNZ (t3,v0,t9)
.set noreorder
bgtzl AT,.L_bn_sub_words_loop
ld t0,0(a1)
bnezl a3,.L_bn_sub_words_tail
ld t0,0(a1)
.set reorder
.L_bn_sub_words_return:
jr ra
.L_bn_sub_words_tail:
ld ta0,0(a2)
subu a3,1
sltu t8,t0,ta0
dsubu t0,ta0
dsubu ta0,t0,v0
MOVNZ (t0,v0,t8)
sd ta0,0(a0)
beqz a3,.L_bn_sub_words_return
ld t1,8(a1)
subu a3,1
ld ta1,8(a2)
sltu t9,t1,ta1
dsubu t1,ta1
dsubu ta1,t1,v0
MOVNZ (t1,v0,t9)
sd ta1,8(a0)
beqz a3,.L_bn_sub_words_return
ld t2,16(a1)
ld ta2,16(a2)
sltu t8,t2,ta2
dsubu t2,ta2
dsubu ta2,t2,v0
MOVNZ (t2,v0,t8)
sd ta2,16(a0)
jr ra
END(bn_sub_words)
#undef MINUS4
.align 5
LEAF(bn_div_3_words)
.set reorder
move a3,a0 /* we know that bn_div_words doesn't
* touch a3, ta2, ta3 and preserves a2
* so that we can save two arguments
* and return address in registers
* instead of stack:-)
*/
ld a0,(a3)
move ta2,a1
ld a1,-8(a3)
bne a0,a2,.L_bn_div_3_words_proceed
li v0,-1
jr ra
.L_bn_div_3_words_proceed:
move ta3,ra
bal bn_div_words
move ra,ta3
dmultu ta2,v0
ld t2,-16(a3)
move ta0,zero
mfhi t1
mflo t0
sltu t8,t1,v1
.L_bn_div_3_words_inner_loop:
bnez t8,.L_bn_div_3_words_inner_loop_done
sgeu AT,t2,t0
seq t9,t1,v1
and AT,t9
sltu t3,t0,ta2
daddu v1,a2
dsubu t1,t3
dsubu t0,ta2
sltu t8,t1,v1
sltu ta0,v1,a2
or t8,ta0
.set noreorder
beqzl AT,.L_bn_div_3_words_inner_loop
dsubu v0,1
.set reorder
.L_bn_div_3_words_inner_loop_done:
jr ra
END(bn_div_3_words)
.align 5
LEAF(bn_div_words)
.set noreorder
bnezl a2,.L_bn_div_words_proceed
move v1,zero
jr ra
li v0,-1 /* I'd rather signal div-by-zero
* which can be done with 'break 7' */
.L_bn_div_words_proceed:
bltz a2,.L_bn_div_words_body
move t9,v1
dsll a2,1
bgtz a2,.-4
addu t9,1
.set reorder
negu t1,t9
li t2,-1
dsll t2,t1
and t2,a0
dsrl AT,a1,t1
.set noreorder
bnezl t2,.+8
break 6 /* signal overflow */
.set reorder
dsll a0,t9
dsll a1,t9
or a0,AT
#define QT ta0
#define HH ta1
#define DH v1
.L_bn_div_words_body:
dsrl DH,a2,32
sgeu AT,a0,a2
.set noreorder
bnezl AT,.+8
dsubu a0,a2
.set reorder
li QT,-1
dsrl HH,a0,32
dsrl QT,32 /* q=0xffffffff */
beq DH,HH,.L_bn_div_words_skip_div1
ddivu zero,a0,DH
mflo QT
.L_bn_div_words_skip_div1:
dmultu a2,QT
dsll t3,a0,32
dsrl AT,a1,32
or t3,AT
mflo t0
mfhi t1
.L_bn_div_words_inner_loop1:
sltu t2,t3,t0
seq t8,HH,t1
sltu AT,HH,t1
and t2,t8
sltu v0,t0,a2
or AT,t2
.set noreorder
beqz AT,.L_bn_div_words_inner_loop1_done
dsubu t1,v0
dsubu t0,a2
b .L_bn_div_words_inner_loop1
dsubu QT,1
.set reorder
.L_bn_div_words_inner_loop1_done:
dsll a1,32
dsubu a0,t3,t0
dsll v0,QT,32
li QT,-1
dsrl HH,a0,32
dsrl QT,32 /* q=0xffffffff */
beq DH,HH,.L_bn_div_words_skip_div2
ddivu zero,a0,DH
mflo QT
.L_bn_div_words_skip_div2:
#undef DH
dmultu a2,QT
dsll t3,a0,32
dsrl AT,a1,32
or t3,AT
mflo t0
mfhi t1
.L_bn_div_words_inner_loop2:
sltu t2,t3,t0
seq t8,HH,t1
sltu AT,HH,t1
and t2,t8
sltu v1,t0,a2
or AT,t2
.set noreorder
beqz AT,.L_bn_div_words_inner_loop2_done
dsubu t1,v1
dsubu t0,a2
b .L_bn_div_words_inner_loop2
dsubu QT,1
.set reorder
.L_bn_div_words_inner_loop2_done:
#undef HH
dsubu a0,t3,t0
or v0,QT
dsrl v1,a0,t9 /* v1 contains remainder if anybody wants it */
dsrl a2,t9 /* restore a2 */
jr ra
#undef QT
END(bn_div_words)
#define a_0 t0
#define a_1 t1
#define a_2 t2
#define a_3 t3
#define b_0 ta0
#define b_1 ta1
#define b_2 ta2
#define b_3 ta3
#define a_4 s0
#define a_5 s2
#define a_6 s4
#define a_7 a1 /* once we load a[7] we don't need a anymore */
#define b_4 s1
#define b_5 s3
#define b_6 s5
#define b_7 a2 /* once we load b[7] we don't need b anymore */
#define t_1 t8
#define t_2 t9
#define c_1 v0
#define c_2 v1
#define c_3 a3
#define FRAME_SIZE 48
.align 5
LEAF(bn_mul_comba8)
.set noreorder
PTR_SUB sp,FRAME_SIZE
.frame sp,64,ra
.set reorder
ld a_0,0(a1) /* If compiled with -mips3 option on
* R5000 box assembler barks on this
* line with "shouldn't have mult/div
* as last instruction in bb (R10K
* bug)" warning. If anybody out there
* has a clue about how to circumvent
* this do send me a note.
* <appro@fy.chalmers.se>
*/
ld b_0,0(a2)
ld a_1,8(a1)
ld a_2,16(a1)
ld a_3,24(a1)
ld b_1,8(a2)
ld b_2,16(a2)
ld b_3,24(a2)
dmultu a_0,b_0 /* mul_add_c(a[0],b[0],c1,c2,c3); */
sd s0,0(sp)
sd s1,8(sp)
sd s2,16(sp)
sd s3,24(sp)
sd s4,32(sp)
sd s5,40(sp)
mflo c_1
mfhi c_2
dmultu a_0,b_1 /* mul_add_c(a[0],b[1],c2,c3,c1); */
ld a_4,32(a1)
ld a_5,40(a1)
ld a_6,48(a1)
ld a_7,56(a1)
ld b_4,32(a2)
ld b_5,40(a2)
mflo t_1
mfhi t_2
daddu c_2,t_1
sltu AT,c_2,t_1
daddu c_3,t_2,AT
dmultu a_1,b_0 /* mul_add_c(a[1],b[0],c2,c3,c1); */
ld b_6,48(a2)
ld b_7,56(a2)
sd c_1,0(a0) /* r[0]=c1; */
mflo t_1
mfhi t_2
daddu c_2,t_1
sltu AT,c_2,t_1
daddu t_2,AT
daddu c_3,t_2
sltu c_1,c_3,t_2
sd c_2,8(a0) /* r[1]=c2; */
dmultu a_2,b_0 /* mul_add_c(a[2],b[0],c3,c1,c2); */
mflo t_1
mfhi t_2
daddu c_3,t_1
sltu AT,c_3,t_1
daddu t_2,AT
daddu c_1,t_2
dmultu a_1,b_1 /* mul_add_c(a[1],b[1],c3,c1,c2); */
mflo t_1
mfhi t_2
daddu c_3,t_1
sltu AT,c_3,t_1
daddu t_2,AT
daddu c_1,t_2
sltu c_2,c_1,t_2
dmultu a_0,b_2 /* mul_add_c(a[0],b[2],c3,c1,c2); */
mflo t_1
mfhi t_2
daddu c_3,t_1
sltu AT,c_3,t_1
daddu t_2,AT
daddu c_1,t_2
sltu AT,c_1,t_2
daddu c_2,AT
sd c_3,16(a0) /* r[2]=c3; */
dmultu a_0,b_3 /* mul_add_c(a[0],b[3],c1,c2,c3); */
mflo t_1
mfhi t_2
daddu c_1,t_1
sltu AT,c_1,t_1
daddu t_2,AT
daddu c_2,t_2
sltu c_3,c_2,t_2
dmultu a_1,b_2 /* mul_add_c(a[1],b[2],c1,c2,c3); */
mflo t_1
mfhi t_2
daddu c_1,t_1
sltu AT,c_1,t_1
daddu t_2,AT
daddu c_2,t_2
sltu AT,c_2,t_2
daddu c_3,AT
dmultu a_2,b_1 /* mul_add_c(a[2],b[1],c1,c2,c3); */
mflo t_1
mfhi t_2
daddu c_1,t_1
sltu AT,c_1,t_1
daddu t_2,AT
daddu c_2,t_2
sltu AT,c_2,t_2
daddu c_3,AT
dmultu a_3,b_0 /* mul_add_c(a[3],b[0],c1,c2,c3); */
mflo t_1
mfhi t_2
daddu c_1,t_1
sltu AT,c_1,t_1
daddu t_2,AT
daddu c_2,t_2
sltu AT,c_2,t_2
daddu c_3,AT
sd c_1,24(a0) /* r[3]=c1; */
dmultu a_4,b_0 /* mul_add_c(a[4],b[0],c2,c3,c1); */
mflo t_1
mfhi t_2
daddu c_2,t_1
sltu AT,c_2,t_1
daddu t_2,AT
daddu c_3,t_2
sltu c_1,c_3,t_2
dmultu a_3,b_1 /* mul_add_c(a[3],b[1],c2,c3,c1); */
mflo t_1
mfhi t_2
daddu c_2,t_1
sltu AT,c_2,t_1
daddu t_2,AT
daddu c_3,t_2
sltu AT,c_3,t_2
daddu c_1,AT
dmultu a_2,b_2 /* mul_add_c(a[2],b[2],c2,c3,c1); */
mflo t_1
mfhi t_2
daddu c_2,t_1
sltu AT,c_2,t_1
daddu t_2,AT
daddu c_3,t_2
sltu AT,c_3,t_2
daddu c_1,AT
dmultu a_1,b_3 /* mul_add_c(a[1],b[3],c2,c3,c1); */
mflo t_1
mfhi t_2
daddu c_2,t_1
sltu AT,c_2,t_1
daddu t_2,AT
daddu c_3,t_2
sltu AT,c_3,t_2
daddu c_1,AT
dmultu a_0,b_4 /* mul_add_c(a[0],b[4],c2,c3,c1); */
mflo t_1
mfhi t_2
daddu c_2,t_1
sltu AT,c_2,t_1
daddu t_2,AT
daddu c_3,t_2
sltu AT,c_3,t_2
daddu c_1,AT
sd c_2,32(a0) /* r[4]=c2; */
dmultu a_0,b_5 /* mul_add_c(a[0],b[5],c3,c1,c2); */
mflo t_1
mfhi t_2
daddu c_3,t_1
sltu AT,c_3,t_1
daddu t_2,AT
daddu c_1,t_2
sltu c_2,c_1,t_2
dmultu a_1,b_4 /* mul_add_c(a[1],b[4],c3,c1,c2); */
mflo t_1
mfhi t_2
daddu c_3,t_1
sltu AT,c_3,t_1
daddu t_2,AT
daddu c_1,t_2
sltu AT,c_1,t_2
daddu c_2,AT
dmultu a_2,b_3 /* mul_add_c(a[2],b[3],c3,c1,c2); */
mflo t_1
mfhi t_2
daddu c_3,t_1
sltu AT,c_3,t_1
daddu t_2,AT
daddu c_1,t_2
sltu AT,c_1,t_2
daddu c_2,AT
dmultu a_3,b_2 /* mul_add_c(a[3],b[2],c3,c1,c2); */
mflo t_1
mfhi t_2
daddu c_3,t_1
sltu AT,c_3,t_1
daddu t_2,AT
daddu c_1,t_2
sltu AT,c_1,t_2
daddu c_2,AT
dmultu a_4,b_1 /* mul_add_c(a[4],b[1],c3,c1,c2); */
mflo t_1
mfhi t_2
daddu c_3,t_1
sltu AT,c_3,t_1
daddu t_2,AT
daddu c_1,t_2
sltu AT,c_1,t_2
daddu c_2,AT
dmultu a_5,b_0 /* mul_add_c(a[5],b[0],c3,c1,c2); */
mflo t_1
mfhi t_2
daddu c_3,t_1
sltu AT,c_3,t_1
daddu t_2,AT
daddu c_1,t_2
sltu AT,c_1,t_2
daddu c_2,AT
sd c_3,40(a0) /* r[5]=c3; */
dmultu a_6,b_0 /* mul_add_c(a[6],b[0],c1,c2,c3); */
mflo t_1
mfhi t_2
daddu c_1,t_1
sltu AT,c_1,t_1
daddu t_2,AT
daddu c_2,t_2
sltu c_3,c_2,t_2
dmultu a_5,b_1 /* mul_add_c(a[5],b[1],c1,c2,c3); */
mflo t_1
mfhi t_2
daddu c_1,t_1
sltu AT,c_1,t_1
daddu t_2,AT
daddu c_2,t_2
sltu AT,c_2,t_2
daddu c_3,AT
dmultu a_4,b_2 /* mul_add_c(a[4],b[2],c1,c2,c3); */
mflo t_1
mfhi t_2
daddu c_1,t_1
sltu AT,c_1,t_1
daddu t_2,AT
daddu c_2,t_2
sltu AT,c_2,t_2
daddu c_3,AT
dmultu a_3,b_3 /* mul_add_c(a[3],b[3],c1,c2,c3); */
mflo t_1
mfhi t_2
daddu c_1,t_1
sltu AT,c_1,t_1
daddu t_2,AT
daddu c_2,t_2
sltu AT,c_2,t_2
daddu c_3,AT
dmultu a_2,b_4 /* mul_add_c(a[2],b[4],c1,c2,c3); */
mflo t_1
mfhi t_2
daddu c_1,t_1
sltu AT,c_1,t_1
daddu t_2,AT
daddu c_2,t_2
sltu AT,c_2,t_2
daddu c_3,AT
dmultu a_1,b_5 /* mul_add_c(a[1],b[5],c1,c2,c3); */
mflo t_1
mfhi t_2
daddu c_1,t_1
sltu AT,c_1,t_1
daddu t_2,AT
daddu c_2,t_2
sltu AT,c_2,t_2
daddu c_3,AT
dmultu a_0,b_6 /* mul_add_c(a[0],b[6],c1,c2,c3); */
mflo t_1
mfhi t_2
daddu c_1,t_1
sltu AT,c_1,t_1
daddu t_2,AT
daddu c_2,t_2
sltu AT,c_2,t_2
daddu c_3,AT
sd c_1,48(a0) /* r[6]=c1; */
dmultu a_0,b_7 /* mul_add_c(a[0],b[7],c2,c3,c1); */
mflo t_1
mfhi t_2
daddu c_2,t_1
sltu AT,c_2,t_1
daddu t_2,AT
daddu c_3,t_2
sltu c_1,c_3,t_2
dmultu a_1,b_6 /* mul_add_c(a[1],b[6],c2,c3,c1); */
mflo t_1
mfhi t_2
daddu c_2,t_1
sltu AT,c_2,t_1
daddu t_2,AT
daddu c_3,t_2
sltu AT,c_3,t_2
daddu c_1,AT
dmultu a_2,b_5 /* mul_add_c(a[2],b[5],c2,c3,c1); */
mflo t_1
mfhi t_2
daddu c_2,t_1
sltu AT,c_2,t_1
daddu t_2,AT
daddu c_3,t_2
sltu AT,c_3,t_2
daddu c_1,AT
dmultu a_3,b_4 /* mul_add_c(a[3],b[4],c2,c3,c1); */
mflo t_1
mfhi t_2
daddu c_2,t_1
sltu AT,c_2,t_1
daddu t_2,AT
daddu c_3,t_2
sltu AT,c_3,t_2
daddu c_1,AT
dmultu a_4,b_3 /* mul_add_c(a[4],b[3],c2,c3,c1); */
mflo t_1
mfhi t_2
daddu c_2,t_1
sltu AT,c_2,t_1
daddu t_2,AT
daddu c_3,t_2
sltu AT,c_3,t_2
daddu c_1,AT
dmultu a_5,b_2 /* mul_add_c(a[5],b[2],c2,c3,c1); */
mflo t_1
mfhi t_2
daddu c_2,t_1
sltu AT,c_2,t_1
daddu t_2,AT
daddu c_3,t_2
sltu AT,c_3,t_2
daddu c_1,AT
dmultu a_6,b_1 /* mul_add_c(a[6],b[1],c2,c3,c1); */
mflo t_1
mfhi t_2
daddu c_2,t_1
sltu AT,c_2,t_1
daddu t_2,AT
daddu c_3,t_2
sltu AT,c_3,t_2
daddu c_1,AT
dmultu a_7,b_0 /* mul_add_c(a[7],b[0],c2,c3,c1); */
mflo t_1
mfhi t_2
daddu c_2,t_1
sltu AT,c_2,t_1
daddu t_2,AT
daddu c_3,t_2
sltu AT,c_3,t_2
daddu c_1,AT
sd c_2,56(a0) /* r[7]=c2; */
dmultu a_7,b_1 /* mul_add_c(a[7],b[1],c3,c1,c2); */
mflo t_1
mfhi t_2
daddu c_3,t_1
sltu AT,c_3,t_1
daddu t_2,AT
daddu c_1,t_2
sltu c_2,c_1,t_2
dmultu a_6,b_2 /* mul_add_c(a[6],b[2],c3,c1,c2); */
mflo t_1
mfhi t_2
daddu c_3,t_1
sltu AT,c_3,t_1
daddu t_2,AT
daddu c_1,t_2
sltu AT,c_1,t_2
daddu c_2,AT
dmultu a_5,b_3 /* mul_add_c(a[5],b[3],c3,c1,c2); */
mflo t_1
mfhi t_2
daddu c_3,t_1
sltu AT,c_3,t_1
daddu t_2,AT
daddu c_1,t_2
sltu AT,c_1,t_2
daddu c_2,AT
dmultu a_4,b_4 /* mul_add_c(a[4],b[4],c3,c1,c2); */
mflo t_1
mfhi t_2
daddu c_3,t_1
sltu AT,c_3,t_1
daddu t_2,AT
daddu c_1,t_2
sltu AT,c_1,t_2
daddu c_2,AT
dmultu a_3,b_5 /* mul_add_c(a[3],b[5],c3,c1,c2); */
mflo t_1
mfhi t_2
daddu c_3,t_1
sltu AT,c_3,t_1
daddu t_2,AT
daddu c_1,t_2
sltu AT,c_1,t_2
daddu c_2,AT
dmultu a_2,b_6 /* mul_add_c(a[2],b[6],c3,c1,c2); */
mflo t_1
mfhi t_2
daddu c_3,t_1
sltu AT,c_3,t_1
daddu t_2,AT
daddu c_1,t_2
sltu AT,c_1,t_2
daddu c_2,AT
dmultu a_1,b_7 /* mul_add_c(a[1],b[7],c3,c1,c2); */
mflo t_1
mfhi t_2
daddu c_3,t_1
sltu AT,c_3,t_1
daddu t_2,AT
daddu c_1,t_2
sltu AT,c_1,t_2
daddu c_2,AT
sd c_3,64(a0) /* r[8]=c3; */
dmultu a_2,b_7 /* mul_add_c(a[2],b[7],c1,c2,c3); */
mflo t_1
mfhi t_2
daddu c_1,t_1
sltu AT,c_1,t_1
daddu t_2,AT
daddu c_2,t_2
sltu c_3,c_2,t_2
dmultu a_3,b_6 /* mul_add_c(a[3],b[6],c1,c2,c3); */
mflo t_1
mfhi t_2
daddu c_1,t_1
sltu AT,c_1,t_1
daddu t_2,AT
daddu c_2,t_2
sltu AT,c_2,t_2
daddu c_3,AT
dmultu a_4,b_5 /* mul_add_c(a[4],b[5],c1,c2,c3); */
mflo t_1
mfhi t_2
daddu c_1,t_1
sltu AT,c_1,t_1
daddu t_2,AT
daddu c_2,t_2
sltu AT,c_2,t_2
daddu c_3,AT
dmultu a_5,b_4 /* mul_add_c(a[5],b[4],c1,c2,c3); */
mflo t_1
mfhi t_2
daddu c_1,t_1
sltu AT,c_1,t_1
daddu t_2,AT
daddu c_2,t_2
sltu AT,c_2,t_2
daddu c_3,AT
dmultu a_6,b_3 /* mul_add_c(a[6],b[3],c1,c2,c3); */
mflo t_1
mfhi t_2
daddu c_1,t_1
sltu AT,c_1,t_1
daddu t_2,AT
daddu c_2,t_2
sltu AT,c_2,t_2
daddu c_3,AT
dmultu a_7,b_2 /* mul_add_c(a[7],b[2],c1,c2,c3); */
mflo t_1
mfhi t_2
daddu c_1,t_1
sltu AT,c_1,t_1
daddu t_2,AT
daddu c_2,t_2
sltu AT,c_2,t_2
daddu c_3,AT
sd c_1,72(a0) /* r[9]=c1; */
dmultu a_7,b_3 /* mul_add_c(a[7],b[3],c2,c3,c1); */
mflo t_1
mfhi t_2
daddu c_2,t_1
sltu AT,c_2,t_1
daddu t_2,AT
daddu c_3,t_2
sltu c_1,c_3,t_2
dmultu a_6,b_4 /* mul_add_c(a[6],b[4],c2,c3,c1); */
mflo t_1
mfhi t_2
daddu c_2,t_1
sltu AT,c_2,t_1
daddu t_2,AT
daddu c_3,t_2
sltu AT,c_3,t_2
daddu c_1,AT
dmultu a_5,b_5 /* mul_add_c(a[5],b[5],c2,c3,c1); */
mflo t_1
mfhi t_2
daddu c_2,t_1
sltu AT,c_2,t_1
daddu t_2,AT
daddu c_3,t_2
sltu AT,c_3,t_2
daddu c_1,AT
dmultu a_4,b_6 /* mul_add_c(a[4],b[6],c2,c3,c1); */
mflo t_1
mfhi t_2
daddu c_2,t_1
sltu AT,c_2,t_1
daddu t_2,AT
daddu c_3,t_2
sltu AT,c_3,t_2
daddu c_1,AT
dmultu a_3,b_7 /* mul_add_c(a[3],b[7],c2,c3,c1); */
mflo t_1
mfhi t_2
daddu c_2,t_1
sltu AT,c_2,t_1
daddu t_2,AT
daddu c_3,t_2
sltu AT,c_3,t_2
daddu c_1,AT
sd c_2,80(a0) /* r[10]=c2; */
dmultu a_4,b_7 /* mul_add_c(a[4],b[7],c3,c1,c2); */
mflo t_1
mfhi t_2
daddu c_3,t_1
sltu AT,c_3,t_1
daddu t_2,AT
daddu c_1,t_2
sltu c_2,c_1,t_2
dmultu a_5,b_6 /* mul_add_c(a[5],b[6],c3,c1,c2); */
mflo t_1
mfhi t_2
daddu c_3,t_1
sltu AT,c_3,t_1
daddu t_2,AT
daddu c_1,t_2
sltu AT,c_1,t_2
daddu c_2,AT
dmultu a_6,b_5 /* mul_add_c(a[6],b[5],c3,c1,c2); */
mflo t_1
mfhi t_2
daddu c_3,t_1
sltu AT,c_3,t_1
daddu t_2,AT
daddu c_1,t_2
sltu AT,c_1,t_2
daddu c_2,AT
dmultu a_7,b_4 /* mul_add_c(a[7],b[4],c3,c1,c2); */
mflo t_1
mfhi t_2
daddu c_3,t_1
sltu AT,c_3,t_1
daddu t_2,AT
daddu c_1,t_2
sltu AT,c_1,t_2
daddu c_2,AT
sd c_3,88(a0) /* r[11]=c3; */
dmultu a_7,b_5 /* mul_add_c(a[7],b[5],c1,c2,c3); */
mflo t_1
mfhi t_2
daddu c_1,t_1
sltu AT,c_1,t_1
daddu t_2,AT
daddu c_2,t_2
sltu c_3,c_2,t_2
dmultu a_6,b_6 /* mul_add_c(a[6],b[6],c1,c2,c3); */
mflo t_1
mfhi t_2
daddu c_1,t_1
sltu AT,c_1,t_1
daddu t_2,AT
daddu c_2,t_2
sltu AT,c_2,t_2
daddu c_3,AT
dmultu a_5,b_7 /* mul_add_c(a[5],b[7],c1,c2,c3); */
mflo t_1
mfhi t_2
daddu c_1,t_1
sltu AT,c_1,t_1
daddu t_2,AT
daddu c_2,t_2
sltu AT,c_2,t_2
daddu c_3,AT
sd c_1,96(a0) /* r[12]=c1; */
dmultu a_6,b_7 /* mul_add_c(a[6],b[7],c2,c3,c1); */
mflo t_1
mfhi t_2
daddu c_2,t_1
sltu AT,c_2,t_1
daddu t_2,AT
daddu c_3,t_2
sltu c_1,c_3,t_2
dmultu a_7,b_6 /* mul_add_c(a[7],b[6],c2,c3,c1); */
mflo t_1
mfhi t_2
daddu c_2,t_1
sltu AT,c_2,t_1
daddu t_2,AT
daddu c_3,t_2
sltu AT,c_3,t_2
daddu c_1,AT
sd c_2,104(a0) /* r[13]=c2; */
dmultu a_7,b_7 /* mul_add_c(a[7],b[7],c3,c1,c2); */
ld s0,0(sp)
ld s1,8(sp)
ld s2,16(sp)
ld s3,24(sp)
ld s4,32(sp)
ld s5,40(sp)
mflo t_1
mfhi t_2
daddu c_3,t_1
sltu AT,c_3,t_1
daddu t_2,AT
daddu c_1,t_2
sd c_3,112(a0) /* r[14]=c3; */
sd c_1,120(a0) /* r[15]=c1; */
PTR_ADD sp,FRAME_SIZE
jr ra
END(bn_mul_comba8)
.align 5
LEAF(bn_mul_comba4)
.set reorder
ld a_0,0(a1)
ld b_0,0(a2)
ld a_1,8(a1)
ld a_2,16(a1)
dmultu a_0,b_0 /* mul_add_c(a[0],b[0],c1,c2,c3); */
ld a_3,24(a1)
ld b_1,8(a2)
ld b_2,16(a2)
ld b_3,24(a2)
mflo c_1
mfhi c_2
sd c_1,0(a0)
dmultu a_0,b_1 /* mul_add_c(a[0],b[1],c2,c3,c1); */
mflo t_1
mfhi t_2
daddu c_2,t_1
sltu AT,c_2,t_1
daddu c_3,t_2,AT
dmultu a_1,b_0 /* mul_add_c(a[1],b[0],c2,c3,c1); */
mflo t_1
mfhi t_2
daddu c_2,t_1
sltu AT,c_2,t_1
daddu t_2,AT
daddu c_3,t_2
sltu c_1,c_3,t_2
sd c_2,8(a0)
dmultu a_2,b_0 /* mul_add_c(a[2],b[0],c3,c1,c2); */
mflo t_1
mfhi t_2
daddu c_3,t_1
sltu AT,c_3,t_1
daddu t_2,AT
daddu c_1,t_2
dmultu a_1,b_1 /* mul_add_c(a[1],b[1],c3,c1,c2); */
mflo t_1
mfhi t_2
daddu c_3,t_1
sltu AT,c_3,t_1
daddu t_2,AT
daddu c_1,t_2
sltu c_2,c_1,t_2
dmultu a_0,b_2 /* mul_add_c(a[0],b[2],c3,c1,c2); */
mflo t_1
mfhi t_2
daddu c_3,t_1
sltu AT,c_3,t_1
daddu t_2,AT
daddu c_1,t_2
sltu AT,c_1,t_2
daddu c_2,AT
sd c_3,16(a0)
dmultu a_0,b_3 /* mul_add_c(a[0],b[3],c1,c2,c3); */
mflo t_1
mfhi t_2
daddu c_1,t_1
sltu AT,c_1,t_1
daddu t_2,AT
daddu c_2,t_2
sltu c_3,c_2,t_2
dmultu a_1,b_2 /* mul_add_c(a[1],b[2],c1,c2,c3); */
mflo t_1
mfhi t_2
daddu c_1,t_1
sltu AT,c_1,t_1
daddu t_2,AT
daddu c_2,t_2
sltu AT,c_2,t_2
daddu c_3,AT
dmultu a_2,b_1 /* mul_add_c(a[2],b[1],c1,c2,c3); */
mflo t_1
mfhi t_2
daddu c_1,t_1
sltu AT,c_1,t_1
daddu t_2,AT
daddu c_2,t_2
sltu AT,c_2,t_2
daddu c_3,AT
dmultu a_3,b_0 /* mul_add_c(a[3],b[0],c1,c2,c3); */
mflo t_1
mfhi t_2
daddu c_1,t_1
sltu AT,c_1,t_1
daddu t_2,AT
daddu c_2,t_2
sltu AT,c_2,t_2
daddu c_3,AT
sd c_1,24(a0)
dmultu a_3,b_1 /* mul_add_c(a[3],b[1],c2,c3,c1); */
mflo t_1
mfhi t_2
daddu c_2,t_1
sltu AT,c_2,t_1
daddu t_2,AT
daddu c_3,t_2
sltu c_1,c_3,t_2
dmultu a_2,b_2 /* mul_add_c(a[2],b[2],c2,c3,c1); */
mflo t_1
mfhi t_2
daddu c_2,t_1
sltu AT,c_2,t_1
daddu t_2,AT
daddu c_3,t_2
sltu AT,c_3,t_2
daddu c_1,AT
dmultu a_1,b_3 /* mul_add_c(a[1],b[3],c2,c3,c1); */
mflo t_1
mfhi t_2
daddu c_2,t_1
sltu AT,c_2,t_1
daddu t_2,AT
daddu c_3,t_2
sltu AT,c_3,t_2
daddu c_1,AT
sd c_2,32(a0)
dmultu a_2,b_3 /* mul_add_c(a[2],b[3],c3,c1,c2); */
mflo t_1
mfhi t_2
daddu c_3,t_1
sltu AT,c_3,t_1
daddu t_2,AT
daddu c_1,t_2
sltu c_2,c_1,t_2
dmultu a_3,b_2 /* mul_add_c(a[3],b[2],c3,c1,c2); */
mflo t_1
mfhi t_2
daddu c_3,t_1
sltu AT,c_3,t_1
daddu t_2,AT
daddu c_1,t_2
sltu AT,c_1,t_2
daddu c_2,AT
sd c_3,40(a0)
dmultu a_3,b_3 /* mul_add_c(a[3],b[3],c1,c2,c3); */
mflo t_1
mfhi t_2
daddu c_1,t_1
sltu AT,c_1,t_1
daddu t_2,AT
daddu c_2,t_2
sd c_1,48(a0)
sd c_2,56(a0)
jr ra
END(bn_mul_comba4)
#undef a_4
#undef a_5
#undef a_6
#undef a_7
#define a_4 b_0
#define a_5 b_1
#define a_6 b_2
#define a_7 b_3
.align 5
LEAF(bn_sqr_comba8)
.set reorder
ld a_0,0(a1)
ld a_1,8(a1)
ld a_2,16(a1)
ld a_3,24(a1)
dmultu a_0,a_0 /* mul_add_c(a[0],b[0],c1,c2,c3); */
ld a_4,32(a1)
ld a_5,40(a1)
ld a_6,48(a1)
ld a_7,56(a1)
mflo c_1
mfhi c_2
sd c_1,0(a0)
dmultu a_0,a_1 /* mul_add_c2(a[0],b[1],c2,c3,c1); */
mflo t_1
mfhi t_2
slt c_1,t_2,zero
dsll t_2,1
slt a2,t_1,zero
daddu t_2,a2
dsll t_1,1
daddu c_2,t_1
sltu AT,c_2,t_1
daddu c_3,t_2,AT
sd c_2,8(a0)
dmultu a_2,a_0 /* mul_add_c2(a[2],b[0],c3,c1,c2); */
mflo t_1
mfhi t_2
slt c_2,t_2,zero
dsll t_2,1
slt a2,t_1,zero
daddu t_2,a2
dsll t_1,1
daddu c_3,t_1
sltu AT,c_3,t_1
daddu t_2,AT
daddu c_1,t_2
sltu AT,c_1,t_2
daddu c_2,AT
dmultu a_1,a_1 /* mul_add_c(a[1],b[1],c3,c1,c2); */
mflo t_1
mfhi t_2
daddu c_3,t_1
sltu AT,c_3,t_1
daddu t_2,AT
daddu c_1,t_2
sltu AT,c_1,t_2
daddu c_2,AT
sd c_3,16(a0)
dmultu a_0,a_3 /* mul_add_c2(a[0],b[3],c1,c2,c3); */
mflo t_1
mfhi t_2
slt c_3,t_2,zero
dsll t_2,1
slt a2,t_1,zero
daddu t_2,a2
dsll t_1,1
daddu c_1,t_1
sltu AT,c_1,t_1
daddu t_2,AT
daddu c_2,t_2
sltu AT,c_2,t_2
daddu c_3,AT
dmultu a_1,a_2 /* mul_add_c2(a[1],b[2],c1,c2,c3); */
mflo t_1
mfhi t_2
slt AT,t_2,zero
daddu c_3,AT
dsll t_2,1
slt a2,t_1,zero
daddu t_2,a2
dsll t_1,1
daddu c_1,t_1
sltu AT,c_1,t_1
daddu t_2,AT
daddu c_2,t_2
sltu AT,c_2,t_2
daddu c_3,AT
sd c_1,24(a0)
dmultu a_4,a_0 /* mul_add_c2(a[4],b[0],c2,c3,c1); */
mflo t_1
mfhi t_2
slt c_1,t_2,zero
dsll t_2,1
slt a2,t_1,zero
daddu t_2,a2
dsll t_1,1
daddu c_2,t_1
sltu AT,c_2,t_1
daddu t_2,AT
daddu c_3,t_2
sltu AT,c_3,t_2
daddu c_1,AT
dmultu a_3,a_1 /* mul_add_c2(a[3],b[1],c2,c3,c1); */
mflo t_1
mfhi t_2
slt AT,t_2,zero
daddu c_1,AT
dsll t_2,1
slt a2,t_1,zero
daddu t_2,a2
dsll t_1,1
daddu c_2,t_1
sltu AT,c_2,t_1
daddu t_2,AT
daddu c_3,t_2
sltu AT,c_3,t_2
daddu c_1,AT
dmultu a_2,a_2 /* mul_add_c(a[2],b[2],c2,c3,c1); */
mflo t_1
mfhi t_2
daddu c_2,t_1
sltu AT,c_2,t_1
daddu t_2,AT
daddu c_3,t_2
sltu AT,c_3,t_2
daddu c_1,AT
sd c_2,32(a0)
dmultu a_0,a_5 /* mul_add_c2(a[0],b[5],c3,c1,c2); */
mflo t_1
mfhi t_2
slt c_2,t_2,zero
dsll t_2,1
slt a2,t_1,zero
daddu t_2,a2
dsll t_1,1
daddu c_3,t_1
sltu AT,c_3,t_1
daddu t_2,AT
daddu c_1,t_2
sltu AT,c_1,t_2
daddu c_2,AT
dmultu a_1,a_4 /* mul_add_c2(a[1],b[4],c3,c1,c2); */
mflo t_1
mfhi t_2
slt AT,t_2,zero
daddu c_2,AT
dsll t_2,1
slt a2,t_1,zero
daddu t_2,a2
dsll t_1,1
daddu c_3,t_1
sltu AT,c_3,t_1
daddu t_2,AT
daddu c_1,t_2
sltu AT,c_1,t_2
daddu c_2,AT
dmultu a_2,a_3 /* mul_add_c2(a[2],b[3],c3,c1,c2); */
mflo t_1
mfhi t_2
slt AT,t_2,zero
daddu c_2,AT
dsll t_2,1
slt a2,t_1,zero
daddu t_2,a2
dsll t_1,1
daddu c_3,t_1
sltu AT,c_3,t_1
daddu t_2,AT
daddu c_1,t_2
sltu AT,c_1,t_2
daddu c_2,AT
sd c_3,40(a0)
dmultu a_6,a_0 /* mul_add_c2(a[6],b[0],c1,c2,c3); */
mflo t_1
mfhi t_2
slt c_3,t_2,zero
dsll t_2,1
slt a2,t_1,zero
daddu t_2,a2
dsll t_1,1
daddu c_1,t_1
sltu AT,c_1,t_1
daddu t_2,AT
daddu c_2,t_2
sltu AT,c_2,t_2
daddu c_3,AT
dmultu a_5,a_1 /* mul_add_c2(a[5],b[1],c1,c2,c3); */
mflo t_1
mfhi t_2
slt AT,t_2,zero
daddu c_3,AT
dsll t_2,1
slt a2,t_1,zero
daddu t_2,a2
dsll t_1,1
daddu c_1,t_1
sltu AT,c_1,t_1
daddu t_2,AT
daddu c_2,t_2
sltu AT,c_2,t_2
daddu c_3,AT
dmultu a_4,a_2 /* mul_add_c2(a[4],b[2],c1,c2,c3); */
mflo t_1
mfhi t_2
slt AT,t_2,zero
daddu c_3,AT
dsll t_2,1
slt a2,t_1,zero
daddu t_2,a2
dsll t_1,1
daddu c_1,t_1
sltu AT,c_1,t_1
daddu t_2,AT
daddu c_2,t_2
sltu AT,c_2,t_2
daddu c_3,AT
dmultu a_3,a_3 /* mul_add_c(a[3],b[3],c1,c2,c3); */
mflo t_1
mfhi t_2
daddu c_1,t_1
sltu AT,c_1,t_1
daddu t_2,AT
daddu c_2,t_2
sltu AT,c_2,t_2
daddu c_3,AT
sd c_1,48(a0)
dmultu a_0,a_7 /* mul_add_c2(a[0],b[7],c2,c3,c1); */
mflo t_1
mfhi t_2
slt c_1,t_2,zero
dsll t_2,1
slt a2,t_1,zero
daddu t_2,a2
dsll t_1,1
daddu c_2,t_1
sltu AT,c_2,t_1
daddu t_2,AT
daddu c_3,t_2
sltu AT,c_3,t_2
daddu c_1,AT
dmultu a_1,a_6 /* mul_add_c2(a[1],b[6],c2,c3,c1); */
mflo t_1
mfhi t_2
slt AT,t_2,zero
daddu c_1,AT
dsll t_2,1
slt a2,t_1,zero
daddu t_2,a2
dsll t_1,1
daddu c_2,t_1
sltu AT,c_2,t_1
daddu t_2,AT
daddu c_3,t_2
sltu AT,c_3,t_2
daddu c_1,AT
dmultu a_2,a_5 /* mul_add_c2(a[2],b[5],c2,c3,c1); */
mflo t_1
mfhi t_2
slt AT,t_2,zero
daddu c_1,AT
dsll t_2,1
slt a2,t_1,zero
daddu t_2,a2
dsll t_1,1
daddu c_2,t_1
sltu AT,c_2,t_1
daddu t_2,AT
daddu c_3,t_2
sltu AT,c_3,t_2
daddu c_1,AT
dmultu a_3,a_4 /* mul_add_c2(a[3],b[4],c2,c3,c1); */
mflo t_1
mfhi t_2
slt AT,t_2,zero
daddu c_1,AT
dsll t_2,1
slt a2,t_1,zero
daddu t_2,a2
dsll t_1,1
daddu c_2,t_1
sltu AT,c_2,t_1
daddu t_2,AT
daddu c_3,t_2
sltu AT,c_3,t_2
daddu c_1,AT
sd c_2,56(a0)
dmultu a_7,a_1 /* mul_add_c2(a[7],b[1],c3,c1,c2); */
mflo t_1
mfhi t_2
slt c_2,t_2,zero
dsll t_2,1
slt a2,t_1,zero
daddu t_2,a2
dsll t_1,1
daddu c_3,t_1
sltu AT,c_3,t_1
daddu t_2,AT
daddu c_1,t_2
sltu AT,c_1,t_2
daddu c_2,AT
dmultu a_6,a_2 /* mul_add_c2(a[6],b[2],c3,c1,c2); */
mflo t_1
mfhi t_2
slt AT,t_2,zero
daddu c_2,AT
dsll t_2,1
slt a2,t_1,zero
daddu t_2,a2
dsll t_1,1
daddu c_3,t_1
sltu AT,c_3,t_1
daddu t_2,AT
daddu c_1,t_2
sltu AT,c_1,t_2
daddu c_2,AT
dmultu a_5,a_3 /* mul_add_c2(a[5],b[3],c3,c1,c2); */
mflo t_1
mfhi t_2
slt AT,t_2,zero
daddu c_2,AT
dsll t_2,1
slt a2,t_1,zero
daddu t_2,a2
dsll t_1,1
daddu c_3,t_1
sltu AT,c_3,t_1
daddu t_2,AT
daddu c_1,t_2
sltu AT,c_1,t_2
daddu c_2,AT
dmultu a_4,a_4 /* mul_add_c(a[4],b[4],c3,c1,c2); */
mflo t_1
mfhi t_2
daddu c_3,t_1
sltu AT,c_3,t_1
daddu t_2,AT
daddu c_1,t_2
sltu AT,c_1,t_2
daddu c_2,AT
sd c_3,64(a0)
dmultu a_2,a_7 /* mul_add_c2(a[2],b[7],c1,c2,c3); */
mflo t_1
mfhi t_2
slt c_3,t_2,zero
dsll t_2,1
slt a2,t_1,zero
daddu t_2,a2
dsll t_1,1
daddu c_1,t_1
sltu AT,c_1,t_1
daddu t_2,AT
daddu c_2,t_2
sltu AT,c_2,t_2
daddu c_3,AT
dmultu a_3,a_6 /* mul_add_c2(a[3],b[6],c1,c2,c3); */
mflo t_1
mfhi t_2
slt AT,t_2,zero
daddu c_3,AT
dsll t_2,1
slt a2,t_1,zero
daddu t_2,a2
dsll t_1,1
daddu c_1,t_1
sltu AT,c_1,t_1
daddu t_2,AT
daddu c_2,t_2
sltu AT,c_2,t_2
daddu c_3,AT
dmultu a_4,a_5 /* mul_add_c2(a[4],b[5],c1,c2,c3); */
mflo t_1
mfhi t_2
slt AT,t_2,zero
daddu c_3,AT
dsll t_2,1
slt a2,t_1,zero
daddu t_2,a2
dsll t_1,1
daddu c_1,t_1
sltu AT,c_1,t_1
daddu t_2,AT
daddu c_2,t_2
sltu AT,c_2,t_2
daddu c_3,AT
sd c_1,72(a0)
dmultu a_7,a_3 /* mul_add_c2(a[7],b[3],c2,c3,c1); */
mflo t_1
mfhi t_2
slt c_1,t_2,zero
dsll t_2,1
slt a2,t_1,zero
daddu t_2,a2
dsll t_1,1
daddu c_2,t_1
sltu AT,c_2,t_1
daddu t_2,AT
daddu c_3,t_2
sltu AT,c_3,t_2
daddu c_1,AT
dmultu a_6,a_4 /* mul_add_c2(a[6],b[4],c2,c3,c1); */
mflo t_1
mfhi t_2
slt AT,t_2,zero
daddu c_1,AT
dsll t_2,1
slt a2,t_1,zero
daddu t_2,a2
dsll t_1,1
daddu c_2,t_1
sltu AT,c_2,t_1
daddu t_2,AT
daddu c_3,t_2
sltu AT,c_3,t_2
daddu c_1,AT
dmultu a_5,a_5 /* mul_add_c(a[5],b[5],c2,c3,c1); */
mflo t_1
mfhi t_2
daddu c_2,t_1
sltu AT,c_2,t_1
daddu t_2,AT
daddu c_3,t_2
sltu AT,c_3,t_2
daddu c_1,AT
sd c_2,80(a0)
dmultu a_4,a_7 /* mul_add_c2(a[4],b[7],c3,c1,c2); */
mflo t_1
mfhi t_2
slt c_2,t_2,zero
dsll t_2,1
slt a2,t_1,zero
daddu t_2,a2
dsll t_1,1
daddu c_3,t_1
sltu AT,c_3,t_1
daddu t_2,AT
daddu c_1,t_2
sltu AT,c_1,t_2
daddu c_2,AT
dmultu a_5,a_6 /* mul_add_c2(a[5],b[6],c3,c1,c2); */
mflo t_1
mfhi t_2
slt AT,t_2,zero
daddu c_2,AT
dsll t_2,1
slt a2,t_1,zero
daddu t_2,a2
dsll t_1,1
daddu c_3,t_1
sltu AT,c_3,t_1
daddu t_2,AT
daddu c_1,t_2
sltu AT,c_1,t_2
daddu c_2,AT
sd c_3,88(a0)
dmultu a_7,a_5 /* mul_add_c2(a[7],b[5],c1,c2,c3); */
mflo t_1
mfhi t_2
slt c_3,t_2,zero
dsll t_2,1
slt a2,t_1,zero
daddu t_2,a2
dsll t_1,1
daddu c_1,t_1
sltu AT,c_1,t_1
daddu t_2,AT
daddu c_2,t_2
sltu AT,c_2,t_2
daddu c_3,AT
dmultu a_6,a_6 /* mul_add_c(a[6],b[6],c1,c2,c3); */
mflo t_1
mfhi t_2
daddu c_1,t_1
sltu AT,c_1,t_1
daddu t_2,AT
daddu c_2,t_2
sltu AT,c_2,t_2
daddu c_3,AT
sd c_1,96(a0)
dmultu a_6,a_7 /* mul_add_c2(a[6],b[7],c2,c3,c1); */
mflo t_1
mfhi t_2
slt c_1,t_2,zero
dsll t_2,1
slt a2,t_1,zero
daddu t_2,a2
dsll t_1,1
daddu c_2,t_1
sltu AT,c_2,t_1
daddu t_2,AT
daddu c_3,t_2
sltu AT,c_3,t_2
daddu c_1,AT
sd c_2,104(a0)
dmultu a_7,a_7 /* mul_add_c(a[7],b[7],c3,c1,c2); */
mflo t_1
mfhi t_2
daddu c_3,t_1
sltu AT,c_3,t_1
daddu t_2,AT
daddu c_1,t_2
sd c_3,112(a0)
sd c_1,120(a0)
jr ra
END(bn_sqr_comba8)
.align 5
LEAF(bn_sqr_comba4)
.set reorder
ld a_0,0(a1)
ld a_1,8(a1)
ld a_2,16(a1)
ld a_3,24(a1)
dmultu a_0,a_0 /* mul_add_c(a[0],b[0],c1,c2,c3); */
mflo c_1
mfhi c_2
sd c_1,0(a0)
dmultu a_0,a_1 /* mul_add_c2(a[0],b[1],c2,c3,c1); */
mflo t_1
mfhi t_2
slt c_1,t_2,zero
dsll t_2,1
slt a2,t_1,zero
daddu t_2,a2
dsll t_1,1
daddu c_2,t_1
sltu AT,c_2,t_1
daddu c_3,t_2,AT
sd c_2,8(a0)
dmultu a_2,a_0 /* mul_add_c2(a[2],b[0],c3,c1,c2); */
mflo t_1
mfhi t_2
slt c_2,t_2,zero
dsll t_2,1
slt a2,t_1,zero
daddu t_2,a2
dsll t_1,1
daddu c_3,t_1
sltu AT,c_3,t_1
daddu t_2,AT
daddu c_1,t_2
sltu AT,c_1,t_2
daddu c_2,AT
dmultu a_1,a_1 /* mul_add_c(a[1],b[1],c3,c1,c2); */
mflo t_1
mfhi t_2
daddu c_3,t_1
sltu AT,c_3,t_1
daddu t_2,AT
daddu c_1,t_2
sltu AT,c_1,t_2
daddu c_2,AT
sd c_3,16(a0)
dmultu a_0,a_3 /* mul_add_c2(a[0],b[3],c1,c2,c3); */
mflo t_1
mfhi t_2
slt c_3,t_2,zero
dsll t_2,1
slt a2,t_1,zero
daddu t_2,a2
dsll t_1,1
daddu c_1,t_1
sltu AT,c_1,t_1
daddu t_2,AT
daddu c_2,t_2
sltu AT,c_2,t_2
daddu c_3,AT
dmultu a_1,a_2 /* mul_add_c(a2[1],b[2],c1,c2,c3); */
mflo t_1
mfhi t_2
slt AT,t_2,zero
daddu c_3,AT
dsll t_2,1
slt a2,t_1,zero
daddu t_2,a2
dsll t_1,1
daddu c_1,t_1
sltu AT,c_1,t_1
daddu t_2,AT
daddu c_2,t_2
sltu AT,c_2,t_2
daddu c_3,AT
sd c_1,24(a0)
dmultu a_3,a_1 /* mul_add_c2(a[3],b[1],c2,c3,c1); */
mflo t_1
mfhi t_2
slt c_1,t_2,zero
dsll t_2,1
slt a2,t_1,zero
daddu t_2,a2
dsll t_1,1
daddu c_2,t_1
sltu AT,c_2,t_1
daddu t_2,AT
daddu c_3,t_2
sltu AT,c_3,t_2
daddu c_1,AT
dmultu a_2,a_2 /* mul_add_c(a[2],b[2],c2,c3,c1); */
mflo t_1
mfhi t_2
daddu c_2,t_1
sltu AT,c_2,t_1
daddu t_2,AT
daddu c_3,t_2
sltu AT,c_3,t_2
daddu c_1,AT
sd c_2,32(a0)
dmultu a_2,a_3 /* mul_add_c2(a[2],b[3],c3,c1,c2); */
mflo t_1
mfhi t_2
slt c_2,t_2,zero
dsll t_2,1
slt a2,t_1,zero
daddu t_2,a2
dsll t_1,1
daddu c_3,t_1
sltu AT,c_3,t_1
daddu t_2,AT
daddu c_1,t_2
sltu AT,c_1,t_2
daddu c_2,AT
sd c_3,40(a0)
dmultu a_3,a_3 /* mul_add_c(a[3],b[3],c1,c2,c3); */
mflo t_1
mfhi t_2
daddu c_1,t_1
sltu AT,c_1,t_1
daddu t_2,AT
daddu c_2,t_2
sd c_1,48(a0)
sd c_2,56(a0)
jr ra
END(bn_sqr_comba4)
|
al3xtjames/Clover
| 33,130
|
Library/OpensslLib/openssl-1.0.1e/crypto/bn/asm/sparcv8plus.S
|
.ident "sparcv8plus.s, Version 1.4"
.ident "SPARC v9 ISA artwork by Andy Polyakov <appro@fy.chalmers.se>"
/*
* ====================================================================
* Written by Andy Polyakov <appro@fy.chalmers.se> for the OpenSSL
* project.
*
* Rights for redistribution and usage in source and binary forms are
* granted according to the OpenSSL license. Warranty of any kind is
* disclaimed.
* ====================================================================
*/
/*
* This is my modest contributon to OpenSSL project (see
* http://www.openssl.org/ for more information about it) and is
* a drop-in UltraSPARC ISA replacement for crypto/bn/bn_asm.c
* module. For updates see http://fy.chalmers.se/~appro/hpe/.
*
* Questions-n-answers.
*
* Q. How to compile?
* A. With SC4.x/SC5.x:
*
* cc -xarch=v8plus -c bn_asm.sparc.v8plus.S -o bn_asm.o
*
* and with gcc:
*
* gcc -mcpu=ultrasparc -c bn_asm.sparc.v8plus.S -o bn_asm.o
*
* or if above fails (it does if you have gas installed):
*
* gcc -E bn_asm.sparc.v8plus.S | as -xarch=v8plus /dev/fd/0 -o bn_asm.o
*
* Quick-n-dirty way to fuse the module into the library.
* Provided that the library is already configured and built
* (in 0.9.2 case with no-asm option):
*
* # cd crypto/bn
* # cp /some/place/bn_asm.sparc.v8plus.S .
* # cc -xarch=v8plus -c bn_asm.sparc.v8plus.S -o bn_asm.o
* # make
* # cd ../..
* # make; make test
*
* Quick-n-dirty way to get rid of it:
*
* # cd crypto/bn
* # touch bn_asm.c
* # make
* # cd ../..
* # make; make test
*
* Q. V8plus achitecture? What kind of beast is that?
* A. Well, it's rather a programming model than an architecture...
* It's actually v9-compliant, i.e. *any* UltraSPARC, CPU under
* special conditions, namely when kernel doesn't preserve upper
* 32 bits of otherwise 64-bit registers during a context switch.
*
* Q. Why just UltraSPARC? What about SuperSPARC?
* A. Original release did target UltraSPARC only. Now SuperSPARC
* version is provided along. Both version share bn_*comba[48]
* implementations (see comment later in code for explanation).
* But what's so special about this UltraSPARC implementation?
* Why didn't I let compiler do the job? Trouble is that most of
* available compilers (well, SC5.0 is the only exception) don't
* attempt to take advantage of UltraSPARC's 64-bitness under
* 32-bit kernels even though it's perfectly possible (see next
* question).
*
* Q. 64-bit registers under 32-bit kernels? Didn't you just say it
* doesn't work?
* A. You can't adress *all* registers as 64-bit wide:-( The catch is
* that you actually may rely upon %o0-%o5 and %g1-%g4 being fully
* preserved if you're in a leaf function, i.e. such never calling
* any other functions. All functions in this module are leaf and
* 10 registers is a handful. And as a matter of fact none-"comba"
* routines don't require even that much and I could even afford to
* not allocate own stack frame for 'em:-)
*
* Q. What about 64-bit kernels?
* A. What about 'em? Just kidding:-) Pure 64-bit version is currently
* under evaluation and development...
*
* Q. What about shared libraries?
* A. What about 'em? Kidding again:-) Code does *not* contain any
* code position dependencies and it's safe to include it into
* shared library as is.
*
* Q. How much faster does it go?
* A. Do you have a good benchmark? In either case below is what I
* experience with crypto/bn/expspeed.c test program:
*
* v8plus module on U10/300MHz against bn_asm.c compiled with:
*
* cc-5.0 -xarch=v8plus -xO5 -xdepend +7-12%
* cc-4.2 -xarch=v8plus -xO5 -xdepend +25-35%
* egcs-1.1.2 -mcpu=ultrasparc -O3 +35-45%
*
* v8 module on SS10/60MHz against bn_asm.c compiled with:
*
* cc-5.0 -xarch=v8 -xO5 -xdepend +7-10%
* cc-4.2 -xarch=v8 -xO5 -xdepend +10%
* egcs-1.1.2 -mv8 -O3 +35-45%
*
* As you can see it's damn hard to beat the new Sun C compiler
* and it's in first place GNU C users who will appreciate this
* assembler implementation:-)
*/
/*
* Revision history.
*
* 1.0 - initial release;
* 1.1 - new loop unrolling model(*);
* - some more fine tuning;
* 1.2 - made gas friendly;
* - updates to documentation concerning v9;
* - new performance comparison matrix;
* 1.3 - fixed problem with /usr/ccs/lib/cpp;
* 1.4 - native V9 bn_*_comba[48] implementation (15% more efficient)
* resulting in slight overall performance kick;
* - some retunes;
* - support for GNU as added;
*
* (*) Originally unrolled loop looked like this:
* for (;;) {
* op(p+0); if (--n==0) break;
* op(p+1); if (--n==0) break;
* op(p+2); if (--n==0) break;
* op(p+3); if (--n==0) break;
* p+=4;
* }
* I unroll according to following:
* while (n&~3) {
* op(p+0); op(p+1); op(p+2); op(p+3);
* p+=4; n=-4;
* }
* if (n) {
* op(p+0); if (--n==0) return;
* op(p+2); if (--n==0) return;
* op(p+3); return;
* }
*/
#if defined(__SUNPRO_C) && defined(__sparcv9)
/* They've said -xarch=v9 at command line */
.register %g2,#scratch
.register %g3,#scratch
# define FRAME_SIZE -192
#elif defined(__GNUC__) && defined(__arch64__)
/* They've said -m64 at command line */
.register %g2,#scratch
.register %g3,#scratch
# define FRAME_SIZE -192
#else
# define FRAME_SIZE -96
#endif
/*
* GNU assembler can't stand stuw:-(
*/
#define stuw st
.section ".text",#alloc,#execinstr
.file "bn_asm.sparc.v8plus.S"
.align 32
.global bn_mul_add_words
/*
* BN_ULONG bn_mul_add_words(rp,ap,num,w)
* BN_ULONG *rp,*ap;
* int num;
* BN_ULONG w;
*/
bn_mul_add_words:
sra %o2,%g0,%o2 ! signx %o2
brgz,a %o2,.L_bn_mul_add_words_proceed
lduw [%o1],%g2
retl
clr %o0
nop
nop
nop
.L_bn_mul_add_words_proceed:
srl %o3,%g0,%o3 ! clruw %o3
andcc %o2,-4,%g0
bz,pn %icc,.L_bn_mul_add_words_tail
clr %o5
.L_bn_mul_add_words_loop: ! wow! 32 aligned!
lduw [%o0],%g1
lduw [%o1+4],%g3
mulx %o3,%g2,%g2
add %g1,%o5,%o4
nop
add %o4,%g2,%o4
stuw %o4,[%o0]
srlx %o4,32,%o5
lduw [%o0+4],%g1
lduw [%o1+8],%g2
mulx %o3,%g3,%g3
add %g1,%o5,%o4
dec 4,%o2
add %o4,%g3,%o4
stuw %o4,[%o0+4]
srlx %o4,32,%o5
lduw [%o0+8],%g1
lduw [%o1+12],%g3
mulx %o3,%g2,%g2
add %g1,%o5,%o4
inc 16,%o1
add %o4,%g2,%o4
stuw %o4,[%o0+8]
srlx %o4,32,%o5
lduw [%o0+12],%g1
mulx %o3,%g3,%g3
add %g1,%o5,%o4
inc 16,%o0
add %o4,%g3,%o4
andcc %o2,-4,%g0
stuw %o4,[%o0-4]
srlx %o4,32,%o5
bnz,a,pt %icc,.L_bn_mul_add_words_loop
lduw [%o1],%g2
brnz,a,pn %o2,.L_bn_mul_add_words_tail
lduw [%o1],%g2
.L_bn_mul_add_words_return:
retl
mov %o5,%o0
.L_bn_mul_add_words_tail:
lduw [%o0],%g1
mulx %o3,%g2,%g2
add %g1,%o5,%o4
dec %o2
add %o4,%g2,%o4
srlx %o4,32,%o5
brz,pt %o2,.L_bn_mul_add_words_return
stuw %o4,[%o0]
lduw [%o1+4],%g2
lduw [%o0+4],%g1
mulx %o3,%g2,%g2
add %g1,%o5,%o4
dec %o2
add %o4,%g2,%o4
srlx %o4,32,%o5
brz,pt %o2,.L_bn_mul_add_words_return
stuw %o4,[%o0+4]
lduw [%o1+8],%g2
lduw [%o0+8],%g1
mulx %o3,%g2,%g2
add %g1,%o5,%o4
add %o4,%g2,%o4
stuw %o4,[%o0+8]
retl
srlx %o4,32,%o0
.type bn_mul_add_words,#function
.size bn_mul_add_words,(.-bn_mul_add_words)
.align 32
.global bn_mul_words
/*
* BN_ULONG bn_mul_words(rp,ap,num,w)
* BN_ULONG *rp,*ap;
* int num;
* BN_ULONG w;
*/
bn_mul_words:
sra %o2,%g0,%o2 ! signx %o2
brgz,a %o2,.L_bn_mul_words_proceeed
lduw [%o1],%g2
retl
clr %o0
nop
nop
nop
.L_bn_mul_words_proceeed:
srl %o3,%g0,%o3 ! clruw %o3
andcc %o2,-4,%g0
bz,pn %icc,.L_bn_mul_words_tail
clr %o5
.L_bn_mul_words_loop: ! wow! 32 aligned!
lduw [%o1+4],%g3
mulx %o3,%g2,%g2
add %g2,%o5,%o4
nop
stuw %o4,[%o0]
srlx %o4,32,%o5
lduw [%o1+8],%g2
mulx %o3,%g3,%g3
add %g3,%o5,%o4
dec 4,%o2
stuw %o4,[%o0+4]
srlx %o4,32,%o5
lduw [%o1+12],%g3
mulx %o3,%g2,%g2
add %g2,%o5,%o4
inc 16,%o1
stuw %o4,[%o0+8]
srlx %o4,32,%o5
mulx %o3,%g3,%g3
add %g3,%o5,%o4
inc 16,%o0
stuw %o4,[%o0-4]
srlx %o4,32,%o5
andcc %o2,-4,%g0
bnz,a,pt %icc,.L_bn_mul_words_loop
lduw [%o1],%g2
nop
nop
brnz,a,pn %o2,.L_bn_mul_words_tail
lduw [%o1],%g2
.L_bn_mul_words_return:
retl
mov %o5,%o0
.L_bn_mul_words_tail:
mulx %o3,%g2,%g2
add %g2,%o5,%o4
dec %o2
srlx %o4,32,%o5
brz,pt %o2,.L_bn_mul_words_return
stuw %o4,[%o0]
lduw [%o1+4],%g2
mulx %o3,%g2,%g2
add %g2,%o5,%o4
dec %o2
srlx %o4,32,%o5
brz,pt %o2,.L_bn_mul_words_return
stuw %o4,[%o0+4]
lduw [%o1+8],%g2
mulx %o3,%g2,%g2
add %g2,%o5,%o4
stuw %o4,[%o0+8]
retl
srlx %o4,32,%o0
.type bn_mul_words,#function
.size bn_mul_words,(.-bn_mul_words)
.align 32
.global bn_sqr_words
/*
* void bn_sqr_words(r,a,n)
* BN_ULONG *r,*a;
* int n;
*/
bn_sqr_words:
sra %o2,%g0,%o2 ! signx %o2
brgz,a %o2,.L_bn_sqr_words_proceeed
lduw [%o1],%g2
retl
clr %o0
nop
nop
nop
.L_bn_sqr_words_proceeed:
andcc %o2,-4,%g0
nop
bz,pn %icc,.L_bn_sqr_words_tail
nop
.L_bn_sqr_words_loop: ! wow! 32 aligned!
lduw [%o1+4],%g3
mulx %g2,%g2,%o4
stuw %o4,[%o0]
srlx %o4,32,%o5
stuw %o5,[%o0+4]
nop
lduw [%o1+8],%g2
mulx %g3,%g3,%o4
dec 4,%o2
stuw %o4,[%o0+8]
srlx %o4,32,%o5
stuw %o5,[%o0+12]
lduw [%o1+12],%g3
mulx %g2,%g2,%o4
srlx %o4,32,%o5
stuw %o4,[%o0+16]
inc 16,%o1
stuw %o5,[%o0+20]
mulx %g3,%g3,%o4
inc 32,%o0
stuw %o4,[%o0-8]
srlx %o4,32,%o5
andcc %o2,-4,%g2
stuw %o5,[%o0-4]
bnz,a,pt %icc,.L_bn_sqr_words_loop
lduw [%o1],%g2
nop
brnz,a,pn %o2,.L_bn_sqr_words_tail
lduw [%o1],%g2
.L_bn_sqr_words_return:
retl
clr %o0
.L_bn_sqr_words_tail:
mulx %g2,%g2,%o4
dec %o2
stuw %o4,[%o0]
srlx %o4,32,%o5
brz,pt %o2,.L_bn_sqr_words_return
stuw %o5,[%o0+4]
lduw [%o1+4],%g2
mulx %g2,%g2,%o4
dec %o2
stuw %o4,[%o0+8]
srlx %o4,32,%o5
brz,pt %o2,.L_bn_sqr_words_return
stuw %o5,[%o0+12]
lduw [%o1+8],%g2
mulx %g2,%g2,%o4
srlx %o4,32,%o5
stuw %o4,[%o0+16]
stuw %o5,[%o0+20]
retl
clr %o0
.type bn_sqr_words,#function
.size bn_sqr_words,(.-bn_sqr_words)
.align 32
.global bn_div_words
/*
* BN_ULONG bn_div_words(h,l,d)
* BN_ULONG h,l,d;
*/
bn_div_words:
sllx %o0,32,%o0
or %o0,%o1,%o0
udivx %o0,%o2,%o0
retl
srl %o0,%g0,%o0 ! clruw %o0
.type bn_div_words,#function
.size bn_div_words,(.-bn_div_words)
.align 32
.global bn_add_words
/*
* BN_ULONG bn_add_words(rp,ap,bp,n)
* BN_ULONG *rp,*ap,*bp;
* int n;
*/
bn_add_words:
sra %o3,%g0,%o3 ! signx %o3
brgz,a %o3,.L_bn_add_words_proceed
lduw [%o1],%o4
retl
clr %o0
.L_bn_add_words_proceed:
andcc %o3,-4,%g0
bz,pn %icc,.L_bn_add_words_tail
addcc %g0,0,%g0 ! clear carry flag
.L_bn_add_words_loop: ! wow! 32 aligned!
dec 4,%o3
lduw [%o2],%o5
lduw [%o1+4],%g1
lduw [%o2+4],%g2
lduw [%o1+8],%g3
lduw [%o2+8],%g4
addccc %o5,%o4,%o5
stuw %o5,[%o0]
lduw [%o1+12],%o4
lduw [%o2+12],%o5
inc 16,%o1
addccc %g1,%g2,%g1
stuw %g1,[%o0+4]
inc 16,%o2
addccc %g3,%g4,%g3
stuw %g3,[%o0+8]
inc 16,%o0
addccc %o5,%o4,%o5
stuw %o5,[%o0-4]
and %o3,-4,%g1
brnz,a,pt %g1,.L_bn_add_words_loop
lduw [%o1],%o4
brnz,a,pn %o3,.L_bn_add_words_tail
lduw [%o1],%o4
.L_bn_add_words_return:
clr %o0
retl
movcs %icc,1,%o0
nop
.L_bn_add_words_tail:
lduw [%o2],%o5
dec %o3
addccc %o5,%o4,%o5
brz,pt %o3,.L_bn_add_words_return
stuw %o5,[%o0]
lduw [%o1+4],%o4
lduw [%o2+4],%o5
dec %o3
addccc %o5,%o4,%o5
brz,pt %o3,.L_bn_add_words_return
stuw %o5,[%o0+4]
lduw [%o1+8],%o4
lduw [%o2+8],%o5
addccc %o5,%o4,%o5
stuw %o5,[%o0+8]
clr %o0
retl
movcs %icc,1,%o0
.type bn_add_words,#function
.size bn_add_words,(.-bn_add_words)
.global bn_sub_words
/*
* BN_ULONG bn_sub_words(rp,ap,bp,n)
* BN_ULONG *rp,*ap,*bp;
* int n;
*/
bn_sub_words:
sra %o3,%g0,%o3 ! signx %o3
brgz,a %o3,.L_bn_sub_words_proceed
lduw [%o1],%o4
retl
clr %o0
.L_bn_sub_words_proceed:
andcc %o3,-4,%g0
bz,pn %icc,.L_bn_sub_words_tail
addcc %g0,0,%g0 ! clear carry flag
.L_bn_sub_words_loop: ! wow! 32 aligned!
dec 4,%o3
lduw [%o2],%o5
lduw [%o1+4],%g1
lduw [%o2+4],%g2
lduw [%o1+8],%g3
lduw [%o2+8],%g4
subccc %o4,%o5,%o5
stuw %o5,[%o0]
lduw [%o1+12],%o4
lduw [%o2+12],%o5
inc 16,%o1
subccc %g1,%g2,%g2
stuw %g2,[%o0+4]
inc 16,%o2
subccc %g3,%g4,%g4
stuw %g4,[%o0+8]
inc 16,%o0
subccc %o4,%o5,%o5
stuw %o5,[%o0-4]
and %o3,-4,%g1
brnz,a,pt %g1,.L_bn_sub_words_loop
lduw [%o1],%o4
brnz,a,pn %o3,.L_bn_sub_words_tail
lduw [%o1],%o4
.L_bn_sub_words_return:
clr %o0
retl
movcs %icc,1,%o0
nop
.L_bn_sub_words_tail: ! wow! 32 aligned!
lduw [%o2],%o5
dec %o3
subccc %o4,%o5,%o5
brz,pt %o3,.L_bn_sub_words_return
stuw %o5,[%o0]
lduw [%o1+4],%o4
lduw [%o2+4],%o5
dec %o3
subccc %o4,%o5,%o5
brz,pt %o3,.L_bn_sub_words_return
stuw %o5,[%o0+4]
lduw [%o1+8],%o4
lduw [%o2+8],%o5
subccc %o4,%o5,%o5
stuw %o5,[%o0+8]
clr %o0
retl
movcs %icc,1,%o0
.type bn_sub_words,#function
.size bn_sub_words,(.-bn_sub_words)
/*
* Code below depends on the fact that upper parts of the %l0-%l7
* and %i0-%i7 are zeroed by kernel after context switch. In
* previous versions this comment stated that "the trouble is that
* it's not feasible to implement the mumbo-jumbo in less V9
* instructions:-(" which apparently isn't true thanks to
* 'bcs,a %xcc,.+8; inc %rd' pair. But the performance improvement
* results not from the shorter code, but from elimination of
* multicycle none-pairable 'rd %y,%rd' instructions.
*
* Andy.
*/
/*
* Here is register usage map for *all* routines below.
*/
#define t_1 %o0
#define t_2 %o1
#define c_12 %o2
#define c_3 %o3
#define ap(I) [%i1+4*I]
#define bp(I) [%i2+4*I]
#define rp(I) [%i0+4*I]
#define a_0 %l0
#define a_1 %l1
#define a_2 %l2
#define a_3 %l3
#define a_4 %l4
#define a_5 %l5
#define a_6 %l6
#define a_7 %l7
#define b_0 %i3
#define b_1 %i4
#define b_2 %i5
#define b_3 %o4
#define b_4 %o5
#define b_5 %o7
#define b_6 %g1
#define b_7 %g4
.align 32
.global bn_mul_comba8
/*
* void bn_mul_comba8(r,a,b)
* BN_ULONG *r,*a,*b;
*/
bn_mul_comba8:
save %sp,FRAME_SIZE,%sp
mov 1,t_2
lduw ap(0),a_0
sllx t_2,32,t_2
lduw bp(0),b_0 !=
lduw bp(1),b_1
mulx a_0,b_0,t_1 !mul_add_c(a[0],b[0],c1,c2,c3);
srlx t_1,32,c_12
stuw t_1,rp(0) !=!r[0]=c1;
lduw ap(1),a_1
mulx a_0,b_1,t_1 !mul_add_c(a[0],b[1],c2,c3,c1);
addcc c_12,t_1,c_12
clr c_3 !=
bcs,a %xcc,.+8
add c_3,t_2,c_3
lduw ap(2),a_2
mulx a_1,b_0,t_1 !=!mul_add_c(a[1],b[0],c2,c3,c1);
addcc c_12,t_1,t_1
bcs,a %xcc,.+8
add c_3,t_2,c_3
srlx t_1,32,c_12 !=
stuw t_1,rp(1) !r[1]=c2;
or c_12,c_3,c_12
mulx a_2,b_0,t_1 !mul_add_c(a[2],b[0],c3,c1,c2);
addcc c_12,t_1,c_12 !=
clr c_3
bcs,a %xcc,.+8
add c_3,t_2,c_3
lduw bp(2),b_2 !=
mulx a_1,b_1,t_1 !mul_add_c(a[1],b[1],c3,c1,c2);
addcc c_12,t_1,c_12
bcs,a %xcc,.+8
add c_3,t_2,c_3 !=
lduw bp(3),b_3
mulx a_0,b_2,t_1 !mul_add_c(a[0],b[2],c3,c1,c2);
addcc c_12,t_1,t_1
bcs,a %xcc,.+8 !=
add c_3,t_2,c_3
srlx t_1,32,c_12
stuw t_1,rp(2) !r[2]=c3;
or c_12,c_3,c_12 !=
mulx a_0,b_3,t_1 !mul_add_c(a[0],b[3],c1,c2,c3);
addcc c_12,t_1,c_12
clr c_3
bcs,a %xcc,.+8 !=
add c_3,t_2,c_3
mulx a_1,b_2,t_1 !=!mul_add_c(a[1],b[2],c1,c2,c3);
addcc c_12,t_1,c_12
bcs,a %xcc,.+8 !=
add c_3,t_2,c_3
lduw ap(3),a_3
mulx a_2,b_1,t_1 !mul_add_c(a[2],b[1],c1,c2,c3);
addcc c_12,t_1,c_12 !=
bcs,a %xcc,.+8
add c_3,t_2,c_3
lduw ap(4),a_4
mulx a_3,b_0,t_1 !=!mul_add_c(a[3],b[0],c1,c2,c3);!=
addcc c_12,t_1,t_1
bcs,a %xcc,.+8
add c_3,t_2,c_3
srlx t_1,32,c_12 !=
stuw t_1,rp(3) !r[3]=c1;
or c_12,c_3,c_12
mulx a_4,b_0,t_1 !mul_add_c(a[4],b[0],c2,c3,c1);
addcc c_12,t_1,c_12 !=
clr c_3
bcs,a %xcc,.+8
add c_3,t_2,c_3
mulx a_3,b_1,t_1 !=!mul_add_c(a[3],b[1],c2,c3,c1);
addcc c_12,t_1,c_12
bcs,a %xcc,.+8
add c_3,t_2,c_3
mulx a_2,b_2,t_1 !=!mul_add_c(a[2],b[2],c2,c3,c1);
addcc c_12,t_1,c_12
bcs,a %xcc,.+8
add c_3,t_2,c_3
lduw bp(4),b_4 !=
mulx a_1,b_3,t_1 !mul_add_c(a[1],b[3],c2,c3,c1);
addcc c_12,t_1,c_12
bcs,a %xcc,.+8
add c_3,t_2,c_3 !=
lduw bp(5),b_5
mulx a_0,b_4,t_1 !mul_add_c(a[0],b[4],c2,c3,c1);
addcc c_12,t_1,t_1
bcs,a %xcc,.+8 !=
add c_3,t_2,c_3
srlx t_1,32,c_12
stuw t_1,rp(4) !r[4]=c2;
or c_12,c_3,c_12 !=
mulx a_0,b_5,t_1 !mul_add_c(a[0],b[5],c3,c1,c2);
addcc c_12,t_1,c_12
clr c_3
bcs,a %xcc,.+8 !=
add c_3,t_2,c_3
mulx a_1,b_4,t_1 !mul_add_c(a[1],b[4],c3,c1,c2);
addcc c_12,t_1,c_12
bcs,a %xcc,.+8 !=
add c_3,t_2,c_3
mulx a_2,b_3,t_1 !mul_add_c(a[2],b[3],c3,c1,c2);
addcc c_12,t_1,c_12
bcs,a %xcc,.+8 !=
add c_3,t_2,c_3
mulx a_3,b_2,t_1 !mul_add_c(a[3],b[2],c3,c1,c2);
addcc c_12,t_1,c_12
bcs,a %xcc,.+8 !=
add c_3,t_2,c_3
lduw ap(5),a_5
mulx a_4,b_1,t_1 !mul_add_c(a[4],b[1],c3,c1,c2);
addcc c_12,t_1,c_12 !=
bcs,a %xcc,.+8
add c_3,t_2,c_3
lduw ap(6),a_6
mulx a_5,b_0,t_1 !=!mul_add_c(a[5],b[0],c3,c1,c2);
addcc c_12,t_1,t_1
bcs,a %xcc,.+8
add c_3,t_2,c_3
srlx t_1,32,c_12 !=
stuw t_1,rp(5) !r[5]=c3;
or c_12,c_3,c_12
mulx a_6,b_0,t_1 !mul_add_c(a[6],b[0],c1,c2,c3);
addcc c_12,t_1,c_12 !=
clr c_3
bcs,a %xcc,.+8
add c_3,t_2,c_3
mulx a_5,b_1,t_1 !=!mul_add_c(a[5],b[1],c1,c2,c3);
addcc c_12,t_1,c_12
bcs,a %xcc,.+8
add c_3,t_2,c_3
mulx a_4,b_2,t_1 !=!mul_add_c(a[4],b[2],c1,c2,c3);
addcc c_12,t_1,c_12
bcs,a %xcc,.+8
add c_3,t_2,c_3
mulx a_3,b_3,t_1 !=!mul_add_c(a[3],b[3],c1,c2,c3);
addcc c_12,t_1,c_12
bcs,a %xcc,.+8
add c_3,t_2,c_3
mulx a_2,b_4,t_1 !=!mul_add_c(a[2],b[4],c1,c2,c3);
addcc c_12,t_1,c_12
bcs,a %xcc,.+8
add c_3,t_2,c_3
lduw bp(6),b_6 !=
mulx a_1,b_5,t_1 !mul_add_c(a[1],b[5],c1,c2,c3);
addcc c_12,t_1,c_12
bcs,a %xcc,.+8
add c_3,t_2,c_3 !=
lduw bp(7),b_7
mulx a_0,b_6,t_1 !mul_add_c(a[0],b[6],c1,c2,c3);
addcc c_12,t_1,t_1
bcs,a %xcc,.+8 !=
add c_3,t_2,c_3
srlx t_1,32,c_12
stuw t_1,rp(6) !r[6]=c1;
or c_12,c_3,c_12 !=
mulx a_0,b_7,t_1 !mul_add_c(a[0],b[7],c2,c3,c1);
addcc c_12,t_1,c_12
clr c_3
bcs,a %xcc,.+8 !=
add c_3,t_2,c_3
mulx a_1,b_6,t_1 !mul_add_c(a[1],b[6],c2,c3,c1);
addcc c_12,t_1,c_12
bcs,a %xcc,.+8 !=
add c_3,t_2,c_3
mulx a_2,b_5,t_1 !mul_add_c(a[2],b[5],c2,c3,c1);
addcc c_12,t_1,c_12
bcs,a %xcc,.+8 !=
add c_3,t_2,c_3
mulx a_3,b_4,t_1 !mul_add_c(a[3],b[4],c2,c3,c1);
addcc c_12,t_1,c_12
bcs,a %xcc,.+8 !=
add c_3,t_2,c_3
mulx a_4,b_3,t_1 !mul_add_c(a[4],b[3],c2,c3,c1);
addcc c_12,t_1,c_12
bcs,a %xcc,.+8 !=
add c_3,t_2,c_3
mulx a_5,b_2,t_1 !mul_add_c(a[5],b[2],c2,c3,c1);
addcc c_12,t_1,c_12
bcs,a %xcc,.+8 !=
add c_3,t_2,c_3
lduw ap(7),a_7
mulx a_6,b_1,t_1 !=!mul_add_c(a[6],b[1],c2,c3,c1);
addcc c_12,t_1,c_12
bcs,a %xcc,.+8
add c_3,t_2,c_3
mulx a_7,b_0,t_1 !=!mul_add_c(a[7],b[0],c2,c3,c1);
addcc c_12,t_1,t_1
bcs,a %xcc,.+8
add c_3,t_2,c_3
srlx t_1,32,c_12 !=
stuw t_1,rp(7) !r[7]=c2;
or c_12,c_3,c_12
mulx a_7,b_1,t_1 !=!mul_add_c(a[7],b[1],c3,c1,c2);
addcc c_12,t_1,c_12
clr c_3
bcs,a %xcc,.+8
add c_3,t_2,c_3 !=
mulx a_6,b_2,t_1 !mul_add_c(a[6],b[2],c3,c1,c2);
addcc c_12,t_1,c_12
bcs,a %xcc,.+8
add c_3,t_2,c_3 !=
mulx a_5,b_3,t_1 !mul_add_c(a[5],b[3],c3,c1,c2);
addcc c_12,t_1,c_12
bcs,a %xcc,.+8
add c_3,t_2,c_3 !=
mulx a_4,b_4,t_1 !mul_add_c(a[4],b[4],c3,c1,c2);
addcc c_12,t_1,c_12
bcs,a %xcc,.+8
add c_3,t_2,c_3 !=
mulx a_3,b_5,t_1 !mul_add_c(a[3],b[5],c3,c1,c2);
addcc c_12,t_1,c_12
bcs,a %xcc,.+8
add c_3,t_2,c_3 !=
mulx a_2,b_6,t_1 !mul_add_c(a[2],b[6],c3,c1,c2);
addcc c_12,t_1,c_12
bcs,a %xcc,.+8
add c_3,t_2,c_3 !=
mulx a_1,b_7,t_1 !mul_add_c(a[1],b[7],c3,c1,c2);
addcc c_12,t_1,t_1
bcs,a %xcc,.+8
add c_3,t_2,c_3 !=
srlx t_1,32,c_12
stuw t_1,rp(8) !r[8]=c3;
or c_12,c_3,c_12
mulx a_2,b_7,t_1 !=!mul_add_c(a[2],b[7],c1,c2,c3);
addcc c_12,t_1,c_12
clr c_3
bcs,a %xcc,.+8
add c_3,t_2,c_3 !=
mulx a_3,b_6,t_1 !mul_add_c(a[3],b[6],c1,c2,c3);
addcc c_12,t_1,c_12
bcs,a %xcc,.+8 !=
add c_3,t_2,c_3
mulx a_4,b_5,t_1 !mul_add_c(a[4],b[5],c1,c2,c3);
addcc c_12,t_1,c_12
bcs,a %xcc,.+8 !=
add c_3,t_2,c_3
mulx a_5,b_4,t_1 !mul_add_c(a[5],b[4],c1,c2,c3);
addcc c_12,t_1,c_12
bcs,a %xcc,.+8 !=
add c_3,t_2,c_3
mulx a_6,b_3,t_1 !mul_add_c(a[6],b[3],c1,c2,c3);
addcc c_12,t_1,c_12
bcs,a %xcc,.+8 !=
add c_3,t_2,c_3
mulx a_7,b_2,t_1 !mul_add_c(a[7],b[2],c1,c2,c3);
addcc c_12,t_1,t_1
bcs,a %xcc,.+8 !=
add c_3,t_2,c_3
srlx t_1,32,c_12
stuw t_1,rp(9) !r[9]=c1;
or c_12,c_3,c_12 !=
mulx a_7,b_3,t_1 !mul_add_c(a[7],b[3],c2,c3,c1);
addcc c_12,t_1,c_12
clr c_3
bcs,a %xcc,.+8 !=
add c_3,t_2,c_3
mulx a_6,b_4,t_1 !mul_add_c(a[6],b[4],c2,c3,c1);
addcc c_12,t_1,c_12
bcs,a %xcc,.+8 !=
add c_3,t_2,c_3
mulx a_5,b_5,t_1 !mul_add_c(a[5],b[5],c2,c3,c1);
addcc c_12,t_1,c_12
bcs,a %xcc,.+8 !=
add c_3,t_2,c_3
mulx a_4,b_6,t_1 !mul_add_c(a[4],b[6],c2,c3,c1);
addcc c_12,t_1,c_12
bcs,a %xcc,.+8 !=
add c_3,t_2,c_3
mulx a_3,b_7,t_1 !mul_add_c(a[3],b[7],c2,c3,c1);
addcc c_12,t_1,t_1
bcs,a %xcc,.+8 !=
add c_3,t_2,c_3
srlx t_1,32,c_12
stuw t_1,rp(10) !r[10]=c2;
or c_12,c_3,c_12 !=
mulx a_4,b_7,t_1 !mul_add_c(a[4],b[7],c3,c1,c2);
addcc c_12,t_1,c_12
clr c_3
bcs,a %xcc,.+8 !=
add c_3,t_2,c_3
mulx a_5,b_6,t_1 !mul_add_c(a[5],b[6],c3,c1,c2);
addcc c_12,t_1,c_12
bcs,a %xcc,.+8 !=
add c_3,t_2,c_3
mulx a_6,b_5,t_1 !mul_add_c(a[6],b[5],c3,c1,c2);
addcc c_12,t_1,c_12
bcs,a %xcc,.+8 !=
add c_3,t_2,c_3
mulx a_7,b_4,t_1 !mul_add_c(a[7],b[4],c3,c1,c2);
addcc c_12,t_1,t_1
bcs,a %xcc,.+8 !=
add c_3,t_2,c_3
srlx t_1,32,c_12
stuw t_1,rp(11) !r[11]=c3;
or c_12,c_3,c_12 !=
mulx a_7,b_5,t_1 !mul_add_c(a[7],b[5],c1,c2,c3);
addcc c_12,t_1,c_12
clr c_3
bcs,a %xcc,.+8 !=
add c_3,t_2,c_3
mulx a_6,b_6,t_1 !mul_add_c(a[6],b[6],c1,c2,c3);
addcc c_12,t_1,c_12
bcs,a %xcc,.+8 !=
add c_3,t_2,c_3
mulx a_5,b_7,t_1 !mul_add_c(a[5],b[7],c1,c2,c3);
addcc c_12,t_1,t_1
bcs,a %xcc,.+8 !=
add c_3,t_2,c_3
srlx t_1,32,c_12
stuw t_1,rp(12) !r[12]=c1;
or c_12,c_3,c_12 !=
mulx a_6,b_7,t_1 !mul_add_c(a[6],b[7],c2,c3,c1);
addcc c_12,t_1,c_12
clr c_3
bcs,a %xcc,.+8 !=
add c_3,t_2,c_3
mulx a_7,b_6,t_1 !mul_add_c(a[7],b[6],c2,c3,c1);
addcc c_12,t_1,t_1
bcs,a %xcc,.+8 !=
add c_3,t_2,c_3
srlx t_1,32,c_12
st t_1,rp(13) !r[13]=c2;
or c_12,c_3,c_12 !=
mulx a_7,b_7,t_1 !mul_add_c(a[7],b[7],c3,c1,c2);
addcc c_12,t_1,t_1
srlx t_1,32,c_12 !=
stuw t_1,rp(14) !r[14]=c3;
stuw c_12,rp(15) !r[15]=c1;
ret
restore %g0,%g0,%o0 !=
.type bn_mul_comba8,#function
.size bn_mul_comba8,(.-bn_mul_comba8)
.align 32
.global bn_mul_comba4
/*
* void bn_mul_comba4(r,a,b)
* BN_ULONG *r,*a,*b;
*/
bn_mul_comba4:
save %sp,FRAME_SIZE,%sp
lduw ap(0),a_0
mov 1,t_2
lduw bp(0),b_0
sllx t_2,32,t_2 !=
lduw bp(1),b_1
mulx a_0,b_0,t_1 !mul_add_c(a[0],b[0],c1,c2,c3);
srlx t_1,32,c_12
stuw t_1,rp(0) !=!r[0]=c1;
lduw ap(1),a_1
mulx a_0,b_1,t_1 !mul_add_c(a[0],b[1],c2,c3,c1);
addcc c_12,t_1,c_12
clr c_3 !=
bcs,a %xcc,.+8
add c_3,t_2,c_3
lduw ap(2),a_2
mulx a_1,b_0,t_1 !=!mul_add_c(a[1],b[0],c2,c3,c1);
addcc c_12,t_1,t_1
bcs,a %xcc,.+8
add c_3,t_2,c_3
srlx t_1,32,c_12 !=
stuw t_1,rp(1) !r[1]=c2;
or c_12,c_3,c_12
mulx a_2,b_0,t_1 !mul_add_c(a[2],b[0],c3,c1,c2);
addcc c_12,t_1,c_12 !=
clr c_3
bcs,a %xcc,.+8
add c_3,t_2,c_3
lduw bp(2),b_2 !=
mulx a_1,b_1,t_1 !mul_add_c(a[1],b[1],c3,c1,c2);
addcc c_12,t_1,c_12
bcs,a %xcc,.+8
add c_3,t_2,c_3 !=
lduw bp(3),b_3
mulx a_0,b_2,t_1 !mul_add_c(a[0],b[2],c3,c1,c2);
addcc c_12,t_1,t_1
bcs,a %xcc,.+8 !=
add c_3,t_2,c_3
srlx t_1,32,c_12
stuw t_1,rp(2) !r[2]=c3;
or c_12,c_3,c_12 !=
mulx a_0,b_3,t_1 !mul_add_c(a[0],b[3],c1,c2,c3);
addcc c_12,t_1,c_12
clr c_3
bcs,a %xcc,.+8 !=
add c_3,t_2,c_3
mulx a_1,b_2,t_1 !mul_add_c(a[1],b[2],c1,c2,c3);
addcc c_12,t_1,c_12
bcs,a %xcc,.+8 !=
add c_3,t_2,c_3
lduw ap(3),a_3
mulx a_2,b_1,t_1 !mul_add_c(a[2],b[1],c1,c2,c3);
addcc c_12,t_1,c_12 !=
bcs,a %xcc,.+8
add c_3,t_2,c_3
mulx a_3,b_0,t_1 !mul_add_c(a[3],b[0],c1,c2,c3);!=
addcc c_12,t_1,t_1 !=
bcs,a %xcc,.+8
add c_3,t_2,c_3
srlx t_1,32,c_12
stuw t_1,rp(3) !=!r[3]=c1;
or c_12,c_3,c_12
mulx a_3,b_1,t_1 !mul_add_c(a[3],b[1],c2,c3,c1);
addcc c_12,t_1,c_12
clr c_3 !=
bcs,a %xcc,.+8
add c_3,t_2,c_3
mulx a_2,b_2,t_1 !mul_add_c(a[2],b[2],c2,c3,c1);
addcc c_12,t_1,c_12 !=
bcs,a %xcc,.+8
add c_3,t_2,c_3
mulx a_1,b_3,t_1 !mul_add_c(a[1],b[3],c2,c3,c1);
addcc c_12,t_1,t_1 !=
bcs,a %xcc,.+8
add c_3,t_2,c_3
srlx t_1,32,c_12
stuw t_1,rp(4) !=!r[4]=c2;
or c_12,c_3,c_12
mulx a_2,b_3,t_1 !mul_add_c(a[2],b[3],c3,c1,c2);
addcc c_12,t_1,c_12
clr c_3 !=
bcs,a %xcc,.+8
add c_3,t_2,c_3
mulx a_3,b_2,t_1 !mul_add_c(a[3],b[2],c3,c1,c2);
addcc c_12,t_1,t_1 !=
bcs,a %xcc,.+8
add c_3,t_2,c_3
srlx t_1,32,c_12
stuw t_1,rp(5) !=!r[5]=c3;
or c_12,c_3,c_12
mulx a_3,b_3,t_1 !mul_add_c(a[3],b[3],c1,c2,c3);
addcc c_12,t_1,t_1
srlx t_1,32,c_12 !=
stuw t_1,rp(6) !r[6]=c1;
stuw c_12,rp(7) !r[7]=c2;
ret
restore %g0,%g0,%o0
.type bn_mul_comba4,#function
.size bn_mul_comba4,(.-bn_mul_comba4)
.align 32
.global bn_sqr_comba8
bn_sqr_comba8:
save %sp,FRAME_SIZE,%sp
mov 1,t_2
lduw ap(0),a_0
sllx t_2,32,t_2
lduw ap(1),a_1
mulx a_0,a_0,t_1 !sqr_add_c(a,0,c1,c2,c3);
srlx t_1,32,c_12
stuw t_1,rp(0) !r[0]=c1;
lduw ap(2),a_2
mulx a_0,a_1,t_1 !=!sqr_add_c2(a,1,0,c2,c3,c1);
addcc c_12,t_1,c_12
clr c_3
bcs,a %xcc,.+8
add c_3,t_2,c_3
addcc c_12,t_1,t_1
bcs,a %xcc,.+8
add c_3,t_2,c_3
srlx t_1,32,c_12
stuw t_1,rp(1) !r[1]=c2;
or c_12,c_3,c_12
mulx a_2,a_0,t_1 !sqr_add_c2(a,2,0,c3,c1,c2);
addcc c_12,t_1,c_12
clr c_3
bcs,a %xcc,.+8
add c_3,t_2,c_3
addcc c_12,t_1,c_12
bcs,a %xcc,.+8
add c_3,t_2,c_3
lduw ap(3),a_3
mulx a_1,a_1,t_1 !sqr_add_c(a,1,c3,c1,c2);
addcc c_12,t_1,t_1
bcs,a %xcc,.+8
add c_3,t_2,c_3
srlx t_1,32,c_12
stuw t_1,rp(2) !r[2]=c3;
or c_12,c_3,c_12
mulx a_0,a_3,t_1 !sqr_add_c2(a,3,0,c1,c2,c3);
addcc c_12,t_1,c_12
clr c_3
bcs,a %xcc,.+8
add c_3,t_2,c_3
addcc c_12,t_1,c_12
bcs,a %xcc,.+8
add c_3,t_2,c_3
lduw ap(4),a_4
mulx a_1,a_2,t_1 !sqr_add_c2(a,2,1,c1,c2,c3);
addcc c_12,t_1,c_12
bcs,a %xcc,.+8
add c_3,t_2,c_3
addcc c_12,t_1,t_1
bcs,a %xcc,.+8
add c_3,t_2,c_3
srlx t_1,32,c_12
st t_1,rp(3) !r[3]=c1;
or c_12,c_3,c_12
mulx a_4,a_0,t_1 !sqr_add_c2(a,4,0,c2,c3,c1);
addcc c_12,t_1,c_12
clr c_3
bcs,a %xcc,.+8
add c_3,t_2,c_3
addcc c_12,t_1,c_12
bcs,a %xcc,.+8
add c_3,t_2,c_3
mulx a_3,a_1,t_1 !sqr_add_c2(a,3,1,c2,c3,c1);
addcc c_12,t_1,c_12
bcs,a %xcc,.+8
add c_3,t_2,c_3
addcc c_12,t_1,c_12
bcs,a %xcc,.+8
add c_3,t_2,c_3
lduw ap(5),a_5
mulx a_2,a_2,t_1 !sqr_add_c(a,2,c2,c3,c1);
addcc c_12,t_1,t_1
bcs,a %xcc,.+8
add c_3,t_2,c_3
srlx t_1,32,c_12
stuw t_1,rp(4) !r[4]=c2;
or c_12,c_3,c_12
mulx a_0,a_5,t_1 !sqr_add_c2(a,5,0,c3,c1,c2);
addcc c_12,t_1,c_12
clr c_3
bcs,a %xcc,.+8
add c_3,t_2,c_3
addcc c_12,t_1,c_12
bcs,a %xcc,.+8
add c_3,t_2,c_3
mulx a_1,a_4,t_1 !sqr_add_c2(a,4,1,c3,c1,c2);
addcc c_12,t_1,c_12
bcs,a %xcc,.+8
add c_3,t_2,c_3
addcc c_12,t_1,c_12
bcs,a %xcc,.+8
add c_3,t_2,c_3
lduw ap(6),a_6
mulx a_2,a_3,t_1 !sqr_add_c2(a,3,2,c3,c1,c2);
addcc c_12,t_1,c_12
bcs,a %xcc,.+8
add c_3,t_2,c_3
addcc c_12,t_1,t_1
bcs,a %xcc,.+8
add c_3,t_2,c_3
srlx t_1,32,c_12
stuw t_1,rp(5) !r[5]=c3;
or c_12,c_3,c_12
mulx a_6,a_0,t_1 !sqr_add_c2(a,6,0,c1,c2,c3);
addcc c_12,t_1,c_12
clr c_3
bcs,a %xcc,.+8
add c_3,t_2,c_3
addcc c_12,t_1,c_12
bcs,a %xcc,.+8
add c_3,t_2,c_3
mulx a_5,a_1,t_1 !sqr_add_c2(a,5,1,c1,c2,c3);
addcc c_12,t_1,c_12
bcs,a %xcc,.+8
add c_3,t_2,c_3
addcc c_12,t_1,c_12
bcs,a %xcc,.+8
add c_3,t_2,c_3
mulx a_4,a_2,t_1 !sqr_add_c2(a,4,2,c1,c2,c3);
addcc c_12,t_1,c_12
bcs,a %xcc,.+8
add c_3,t_2,c_3
addcc c_12,t_1,c_12
bcs,a %xcc,.+8
add c_3,t_2,c_3
lduw ap(7),a_7
mulx a_3,a_3,t_1 !=!sqr_add_c(a,3,c1,c2,c3);
addcc c_12,t_1,t_1
bcs,a %xcc,.+8
add c_3,t_2,c_3
srlx t_1,32,c_12
stuw t_1,rp(6) !r[6]=c1;
or c_12,c_3,c_12
mulx a_0,a_7,t_1 !sqr_add_c2(a,7,0,c2,c3,c1);
addcc c_12,t_1,c_12
clr c_3
bcs,a %xcc,.+8
add c_3,t_2,c_3
addcc c_12,t_1,c_12
bcs,a %xcc,.+8
add c_3,t_2,c_3
mulx a_1,a_6,t_1 !sqr_add_c2(a,6,1,c2,c3,c1);
addcc c_12,t_1,c_12
bcs,a %xcc,.+8
add c_3,t_2,c_3
addcc c_12,t_1,c_12
bcs,a %xcc,.+8
add c_3,t_2,c_3
mulx a_2,a_5,t_1 !sqr_add_c2(a,5,2,c2,c3,c1);
addcc c_12,t_1,c_12
bcs,a %xcc,.+8
add c_3,t_2,c_3
addcc c_12,t_1,c_12
bcs,a %xcc,.+8
add c_3,t_2,c_3
mulx a_3,a_4,t_1 !sqr_add_c2(a,4,3,c2,c3,c1);
addcc c_12,t_1,c_12
bcs,a %xcc,.+8
add c_3,t_2,c_3
addcc c_12,t_1,t_1
bcs,a %xcc,.+8
add c_3,t_2,c_3
srlx t_1,32,c_12
stuw t_1,rp(7) !r[7]=c2;
or c_12,c_3,c_12
mulx a_7,a_1,t_1 !sqr_add_c2(a,7,1,c3,c1,c2);
addcc c_12,t_1,c_12
clr c_3
bcs,a %xcc,.+8
add c_3,t_2,c_3
addcc c_12,t_1,c_12
bcs,a %xcc,.+8
add c_3,t_2,c_3
mulx a_6,a_2,t_1 !sqr_add_c2(a,6,2,c3,c1,c2);
addcc c_12,t_1,c_12
bcs,a %xcc,.+8
add c_3,t_2,c_3
addcc c_12,t_1,c_12
bcs,a %xcc,.+8
add c_3,t_2,c_3
mulx a_5,a_3,t_1 !sqr_add_c2(a,5,3,c3,c1,c2);
addcc c_12,t_1,c_12
bcs,a %xcc,.+8
add c_3,t_2,c_3
addcc c_12,t_1,c_12
bcs,a %xcc,.+8
add c_3,t_2,c_3
mulx a_4,a_4,t_1 !sqr_add_c(a,4,c3,c1,c2);
addcc c_12,t_1,t_1
bcs,a %xcc,.+8
add c_3,t_2,c_3
srlx t_1,32,c_12
stuw t_1,rp(8) !r[8]=c3;
or c_12,c_3,c_12
mulx a_2,a_7,t_1 !sqr_add_c2(a,7,2,c1,c2,c3);
addcc c_12,t_1,c_12
clr c_3
bcs,a %xcc,.+8
add c_3,t_2,c_3
addcc c_12,t_1,c_12
bcs,a %xcc,.+8
add c_3,t_2,c_3
mulx a_3,a_6,t_1 !sqr_add_c2(a,6,3,c1,c2,c3);
addcc c_12,t_1,c_12
bcs,a %xcc,.+8
add c_3,t_2,c_3
addcc c_12,t_1,c_12
bcs,a %xcc,.+8
add c_3,t_2,c_3
mulx a_4,a_5,t_1 !sqr_add_c2(a,5,4,c1,c2,c3);
addcc c_12,t_1,c_12
bcs,a %xcc,.+8
add c_3,t_2,c_3
addcc c_12,t_1,t_1
bcs,a %xcc,.+8
add c_3,t_2,c_3
srlx t_1,32,c_12
stuw t_1,rp(9) !r[9]=c1;
or c_12,c_3,c_12
mulx a_7,a_3,t_1 !sqr_add_c2(a,7,3,c2,c3,c1);
addcc c_12,t_1,c_12
clr c_3
bcs,a %xcc,.+8
add c_3,t_2,c_3
addcc c_12,t_1,c_12
bcs,a %xcc,.+8
add c_3,t_2,c_3
mulx a_6,a_4,t_1 !sqr_add_c2(a,6,4,c2,c3,c1);
addcc c_12,t_1,c_12
bcs,a %xcc,.+8
add c_3,t_2,c_3
addcc c_12,t_1,c_12
bcs,a %xcc,.+8
add c_3,t_2,c_3
mulx a_5,a_5,t_1 !sqr_add_c(a,5,c2,c3,c1);
addcc c_12,t_1,t_1
bcs,a %xcc,.+8
add c_3,t_2,c_3
srlx t_1,32,c_12
stuw t_1,rp(10) !r[10]=c2;
or c_12,c_3,c_12
mulx a_4,a_7,t_1 !sqr_add_c2(a,7,4,c3,c1,c2);
addcc c_12,t_1,c_12
clr c_3
bcs,a %xcc,.+8
add c_3,t_2,c_3
addcc c_12,t_1,c_12
bcs,a %xcc,.+8
add c_3,t_2,c_3
mulx a_5,a_6,t_1 !sqr_add_c2(a,6,5,c3,c1,c2);
addcc c_12,t_1,c_12
bcs,a %xcc,.+8
add c_3,t_2,c_3
addcc c_12,t_1,t_1
bcs,a %xcc,.+8
add c_3,t_2,c_3
srlx t_1,32,c_12
stuw t_1,rp(11) !r[11]=c3;
or c_12,c_3,c_12
mulx a_7,a_5,t_1 !sqr_add_c2(a,7,5,c1,c2,c3);
addcc c_12,t_1,c_12
clr c_3
bcs,a %xcc,.+8
add c_3,t_2,c_3
addcc c_12,t_1,c_12
bcs,a %xcc,.+8
add c_3,t_2,c_3
mulx a_6,a_6,t_1 !sqr_add_c(a,6,c1,c2,c3);
addcc c_12,t_1,t_1
bcs,a %xcc,.+8
add c_3,t_2,c_3
srlx t_1,32,c_12
stuw t_1,rp(12) !r[12]=c1;
or c_12,c_3,c_12
mulx a_6,a_7,t_1 !sqr_add_c2(a,7,6,c2,c3,c1);
addcc c_12,t_1,c_12
clr c_3
bcs,a %xcc,.+8
add c_3,t_2,c_3
addcc c_12,t_1,t_1
bcs,a %xcc,.+8
add c_3,t_2,c_3
srlx t_1,32,c_12
stuw t_1,rp(13) !r[13]=c2;
or c_12,c_3,c_12
mulx a_7,a_7,t_1 !sqr_add_c(a,7,c3,c1,c2);
addcc c_12,t_1,t_1
srlx t_1,32,c_12
stuw t_1,rp(14) !r[14]=c3;
stuw c_12,rp(15) !r[15]=c1;
ret
restore %g0,%g0,%o0
.type bn_sqr_comba8,#function
.size bn_sqr_comba8,(.-bn_sqr_comba8)
.align 32
.global bn_sqr_comba4
/*
* void bn_sqr_comba4(r,a)
* BN_ULONG *r,*a;
*/
bn_sqr_comba4:
save %sp,FRAME_SIZE,%sp
mov 1,t_2
lduw ap(0),a_0
sllx t_2,32,t_2
lduw ap(1),a_1
mulx a_0,a_0,t_1 !sqr_add_c(a,0,c1,c2,c3);
srlx t_1,32,c_12
stuw t_1,rp(0) !r[0]=c1;
lduw ap(2),a_2
mulx a_0,a_1,t_1 !sqr_add_c2(a,1,0,c2,c3,c1);
addcc c_12,t_1,c_12
clr c_3
bcs,a %xcc,.+8
add c_3,t_2,c_3
addcc c_12,t_1,t_1
bcs,a %xcc,.+8
add c_3,t_2,c_3
srlx t_1,32,c_12
stuw t_1,rp(1) !r[1]=c2;
or c_12,c_3,c_12
mulx a_2,a_0,t_1 !sqr_add_c2(a,2,0,c3,c1,c2);
addcc c_12,t_1,c_12
clr c_3
bcs,a %xcc,.+8
add c_3,t_2,c_3
addcc c_12,t_1,c_12
bcs,a %xcc,.+8
add c_3,t_2,c_3
lduw ap(3),a_3
mulx a_1,a_1,t_1 !sqr_add_c(a,1,c3,c1,c2);
addcc c_12,t_1,t_1
bcs,a %xcc,.+8
add c_3,t_2,c_3
srlx t_1,32,c_12
stuw t_1,rp(2) !r[2]=c3;
or c_12,c_3,c_12
mulx a_0,a_3,t_1 !sqr_add_c2(a,3,0,c1,c2,c3);
addcc c_12,t_1,c_12
clr c_3
bcs,a %xcc,.+8
add c_3,t_2,c_3
addcc c_12,t_1,c_12
bcs,a %xcc,.+8
add c_3,t_2,c_3
mulx a_1,a_2,t_1 !sqr_add_c2(a,2,1,c1,c2,c3);
addcc c_12,t_1,c_12
bcs,a %xcc,.+8
add c_3,t_2,c_3
addcc c_12,t_1,t_1
bcs,a %xcc,.+8
add c_3,t_2,c_3
srlx t_1,32,c_12
stuw t_1,rp(3) !r[3]=c1;
or c_12,c_3,c_12
mulx a_3,a_1,t_1 !sqr_add_c2(a,3,1,c2,c3,c1);
addcc c_12,t_1,c_12
clr c_3
bcs,a %xcc,.+8
add c_3,t_2,c_3
addcc c_12,t_1,c_12
bcs,a %xcc,.+8
add c_3,t_2,c_3
mulx a_2,a_2,t_1 !sqr_add_c(a,2,c2,c3,c1);
addcc c_12,t_1,t_1
bcs,a %xcc,.+8
add c_3,t_2,c_3
srlx t_1,32,c_12
stuw t_1,rp(4) !r[4]=c2;
or c_12,c_3,c_12
mulx a_2,a_3,t_1 !sqr_add_c2(a,3,2,c3,c1,c2);
addcc c_12,t_1,c_12
clr c_3
bcs,a %xcc,.+8
add c_3,t_2,c_3
addcc c_12,t_1,t_1
bcs,a %xcc,.+8
add c_3,t_2,c_3
srlx t_1,32,c_12
stuw t_1,rp(5) !r[5]=c3;
or c_12,c_3,c_12
mulx a_3,a_3,t_1 !sqr_add_c(a,3,c1,c2,c3);
addcc c_12,t_1,t_1
srlx t_1,32,c_12
stuw t_1,rp(6) !r[6]=c1;
stuw c_12,rp(7) !r[7]=c2;
ret
restore %g0,%g0,%o0
.type bn_sqr_comba4,#function
.size bn_sqr_comba4,(.-bn_sqr_comba4)
.align 32
|
al3xtjames/Clover
| 12,636
|
Library/OpensslLib/openssl-1.0.1e/crypto/bn/asm/s390x.S
|
.ident "s390x.S, version 1.1"
// ====================================================================
// Written by Andy Polyakov <appro@fy.chalmers.se> for the OpenSSL
// project.
//
// Rights for redistribution and usage in source and binary forms are
// granted according to the OpenSSL license. Warranty of any kind is
// disclaimed.
// ====================================================================
.text
#define zero %r0
// BN_ULONG bn_mul_add_words(BN_ULONG *r2,BN_ULONG *r3,int r4,BN_ULONG r5);
.globl bn_mul_add_words
.type bn_mul_add_words,@function
.align 4
bn_mul_add_words:
lghi zero,0 // zero = 0
la %r1,0(%r2) // put rp aside
lghi %r2,0 // i=0;
ltgfr %r4,%r4
bler %r14 // if (len<=0) return 0;
stmg %r6,%r10,48(%r15)
lghi %r10,3
lghi %r8,0 // carry = 0
nr %r10,%r4 // len%4
sra %r4,2 // cnt=len/4
jz .Loop1_madd // carry is incidentally cleared if branch taken
algr zero,zero // clear carry
.Loop4_madd:
lg %r7,0(%r2,%r3) // ap[i]
mlgr %r6,%r5 // *=w
alcgr %r7,%r8 // +=carry
alcgr %r6,zero
alg %r7,0(%r2,%r1) // +=rp[i]
stg %r7,0(%r2,%r1) // rp[i]=
lg %r9,8(%r2,%r3)
mlgr %r8,%r5
alcgr %r9,%r6
alcgr %r8,zero
alg %r9,8(%r2,%r1)
stg %r9,8(%r2,%r1)
lg %r7,16(%r2,%r3)
mlgr %r6,%r5
alcgr %r7,%r8
alcgr %r6,zero
alg %r7,16(%r2,%r1)
stg %r7,16(%r2,%r1)
lg %r9,24(%r2,%r3)
mlgr %r8,%r5
alcgr %r9,%r6
alcgr %r8,zero
alg %r9,24(%r2,%r1)
stg %r9,24(%r2,%r1)
la %r2,32(%r2) // i+=4
brct %r4,.Loop4_madd
la %r10,1(%r10) // see if len%4 is zero ...
brct %r10,.Loop1_madd // without touching condition code:-)
.Lend_madd:
alcgr %r8,zero // collect carry bit
lgr %r2,%r8
lmg %r6,%r10,48(%r15)
br %r14
.Loop1_madd:
lg %r7,0(%r2,%r3) // ap[i]
mlgr %r6,%r5 // *=w
alcgr %r7,%r8 // +=carry
alcgr %r6,zero
alg %r7,0(%r2,%r1) // +=rp[i]
stg %r7,0(%r2,%r1) // rp[i]=
lgr %r8,%r6
la %r2,8(%r2) // i++
brct %r10,.Loop1_madd
j .Lend_madd
.size bn_mul_add_words,.-bn_mul_add_words
// BN_ULONG bn_mul_words(BN_ULONG *r2,BN_ULONG *r3,int r4,BN_ULONG r5);
.globl bn_mul_words
.type bn_mul_words,@function
.align 4
bn_mul_words:
lghi zero,0 // zero = 0
la %r1,0(%r2) // put rp aside
lghi %r2,0 // i=0;
ltgfr %r4,%r4
bler %r14 // if (len<=0) return 0;
stmg %r6,%r10,48(%r15)
lghi %r10,3
lghi %r8,0 // carry = 0
nr %r10,%r4 // len%4
sra %r4,2 // cnt=len/4
jz .Loop1_mul // carry is incidentally cleared if branch taken
algr zero,zero // clear carry
.Loop4_mul:
lg %r7,0(%r2,%r3) // ap[i]
mlgr %r6,%r5 // *=w
alcgr %r7,%r8 // +=carry
stg %r7,0(%r2,%r1) // rp[i]=
lg %r9,8(%r2,%r3)
mlgr %r8,%r5
alcgr %r9,%r6
stg %r9,8(%r2,%r1)
lg %r7,16(%r2,%r3)
mlgr %r6,%r5
alcgr %r7,%r8
stg %r7,16(%r2,%r1)
lg %r9,24(%r2,%r3)
mlgr %r8,%r5
alcgr %r9,%r6
stg %r9,24(%r2,%r1)
la %r2,32(%r2) // i+=4
brct %r4,.Loop4_mul
la %r10,1(%r10) // see if len%4 is zero ...
brct %r10,.Loop1_mul // without touching condition code:-)
.Lend_mul:
alcgr %r8,zero // collect carry bit
lgr %r2,%r8
lmg %r6,%r10,48(%r15)
br %r14
.Loop1_mul:
lg %r7,0(%r2,%r3) // ap[i]
mlgr %r6,%r5 // *=w
alcgr %r7,%r8 // +=carry
stg %r7,0(%r2,%r1) // rp[i]=
lgr %r8,%r6
la %r2,8(%r2) // i++
brct %r10,.Loop1_mul
j .Lend_mul
.size bn_mul_words,.-bn_mul_words
// void bn_sqr_words(BN_ULONG *r2,BN_ULONG *r2,int r4)
.globl bn_sqr_words
.type bn_sqr_words,@function
.align 4
bn_sqr_words:
ltgfr %r4,%r4
bler %r14
stmg %r6,%r7,48(%r15)
srag %r1,%r4,2 // cnt=len/4
jz .Loop1_sqr
.Loop4_sqr:
lg %r7,0(%r3)
mlgr %r6,%r7
stg %r7,0(%r2)
stg %r6,8(%r2)
lg %r7,8(%r3)
mlgr %r6,%r7
stg %r7,16(%r2)
stg %r6,24(%r2)
lg %r7,16(%r3)
mlgr %r6,%r7
stg %r7,32(%r2)
stg %r6,40(%r2)
lg %r7,24(%r3)
mlgr %r6,%r7
stg %r7,48(%r2)
stg %r6,56(%r2)
la %r3,32(%r3)
la %r2,64(%r2)
brct %r1,.Loop4_sqr
lghi %r1,3
nr %r4,%r1 // cnt=len%4
jz .Lend_sqr
.Loop1_sqr:
lg %r7,0(%r3)
mlgr %r6,%r7
stg %r7,0(%r2)
stg %r6,8(%r2)
la %r3,8(%r3)
la %r2,16(%r2)
brct %r4,.Loop1_sqr
.Lend_sqr:
lmg %r6,%r7,48(%r15)
br %r14
.size bn_sqr_words,.-bn_sqr_words
// BN_ULONG bn_div_words(BN_ULONG h,BN_ULONG l,BN_ULONG d);
.globl bn_div_words
.type bn_div_words,@function
.align 4
bn_div_words:
dlgr %r2,%r4
lgr %r2,%r3
br %r14
.size bn_div_words,.-bn_div_words
// BN_ULONG bn_add_words(BN_ULONG *r2,BN_ULONG *r3,BN_ULONG *r4,int r5);
.globl bn_add_words
.type bn_add_words,@function
.align 4
bn_add_words:
la %r1,0(%r2) // put rp aside
lghi %r2,0 // i=0
ltgfr %r5,%r5
bler %r14 // if (len<=0) return 0;
stg %r6,48(%r15)
lghi %r6,3
nr %r6,%r5 // len%4
sra %r5,2 // len/4, use sra because it sets condition code
jz .Loop1_add // carry is incidentally cleared if branch taken
algr %r2,%r2 // clear carry
.Loop4_add:
lg %r0,0(%r2,%r3)
alcg %r0,0(%r2,%r4)
stg %r0,0(%r2,%r1)
lg %r0,8(%r2,%r3)
alcg %r0,8(%r2,%r4)
stg %r0,8(%r2,%r1)
lg %r0,16(%r2,%r3)
alcg %r0,16(%r2,%r4)
stg %r0,16(%r2,%r1)
lg %r0,24(%r2,%r3)
alcg %r0,24(%r2,%r4)
stg %r0,24(%r2,%r1)
la %r2,32(%r2) // i+=4
brct %r5,.Loop4_add
la %r6,1(%r6) // see if len%4 is zero ...
brct %r6,.Loop1_add // without touching condition code:-)
.Lexit_add:
lghi %r2,0
alcgr %r2,%r2
lg %r6,48(%r15)
br %r14
.Loop1_add:
lg %r0,0(%r2,%r3)
alcg %r0,0(%r2,%r4)
stg %r0,0(%r2,%r1)
la %r2,8(%r2) // i++
brct %r6,.Loop1_add
j .Lexit_add
.size bn_add_words,.-bn_add_words
// BN_ULONG bn_sub_words(BN_ULONG *r2,BN_ULONG *r3,BN_ULONG *r4,int r5);
.globl bn_sub_words
.type bn_sub_words,@function
.align 4
bn_sub_words:
la %r1,0(%r2) // put rp aside
lghi %r2,0 // i=0
ltgfr %r5,%r5
bler %r14 // if (len<=0) return 0;
stg %r6,48(%r15)
lghi %r6,3
nr %r6,%r5 // len%4
sra %r5,2 // len/4, use sra because it sets condition code
jnz .Loop4_sub // borrow is incidentally cleared if branch taken
slgr %r2,%r2 // clear borrow
.Loop1_sub:
lg %r0,0(%r2,%r3)
slbg %r0,0(%r2,%r4)
stg %r0,0(%r2,%r1)
la %r2,8(%r2) // i++
brct %r6,.Loop1_sub
j .Lexit_sub
.Loop4_sub:
lg %r0,0(%r2,%r3)
slbg %r0,0(%r2,%r4)
stg %r0,0(%r2,%r1)
lg %r0,8(%r2,%r3)
slbg %r0,8(%r2,%r4)
stg %r0,8(%r2,%r1)
lg %r0,16(%r2,%r3)
slbg %r0,16(%r2,%r4)
stg %r0,16(%r2,%r1)
lg %r0,24(%r2,%r3)
slbg %r0,24(%r2,%r4)
stg %r0,24(%r2,%r1)
la %r2,32(%r2) // i+=4
brct %r5,.Loop4_sub
la %r6,1(%r6) // see if len%4 is zero ...
brct %r6,.Loop1_sub // without touching condition code:-)
.Lexit_sub:
lghi %r2,0
slbgr %r2,%r2
lcgr %r2,%r2
lg %r6,48(%r15)
br %r14
.size bn_sub_words,.-bn_sub_words
#define c1 %r1
#define c2 %r5
#define c3 %r8
#define mul_add_c(ai,bi,c1,c2,c3) \
lg %r7,ai*8(%r3); \
mlg %r6,bi*8(%r4); \
algr c1,%r7; \
alcgr c2,%r6; \
alcgr c3,zero
// void bn_mul_comba8(BN_ULONG *r2,BN_ULONG *r3,BN_ULONG *r4);
.globl bn_mul_comba8
.type bn_mul_comba8,@function
.align 4
bn_mul_comba8:
stmg %r6,%r8,48(%r15)
lghi c1,0
lghi c2,0
lghi c3,0
lghi zero,0
mul_add_c(0,0,c1,c2,c3);
stg c1,0*8(%r2)
lghi c1,0
mul_add_c(0,1,c2,c3,c1);
mul_add_c(1,0,c2,c3,c1);
stg c2,1*8(%r2)
lghi c2,0
mul_add_c(2,0,c3,c1,c2);
mul_add_c(1,1,c3,c1,c2);
mul_add_c(0,2,c3,c1,c2);
stg c3,2*8(%r2)
lghi c3,0
mul_add_c(0,3,c1,c2,c3);
mul_add_c(1,2,c1,c2,c3);
mul_add_c(2,1,c1,c2,c3);
mul_add_c(3,0,c1,c2,c3);
stg c1,3*8(%r2)
lghi c1,0
mul_add_c(4,0,c2,c3,c1);
mul_add_c(3,1,c2,c3,c1);
mul_add_c(2,2,c2,c3,c1);
mul_add_c(1,3,c2,c3,c1);
mul_add_c(0,4,c2,c3,c1);
stg c2,4*8(%r2)
lghi c2,0
mul_add_c(0,5,c3,c1,c2);
mul_add_c(1,4,c3,c1,c2);
mul_add_c(2,3,c3,c1,c2);
mul_add_c(3,2,c3,c1,c2);
mul_add_c(4,1,c3,c1,c2);
mul_add_c(5,0,c3,c1,c2);
stg c3,5*8(%r2)
lghi c3,0
mul_add_c(6,0,c1,c2,c3);
mul_add_c(5,1,c1,c2,c3);
mul_add_c(4,2,c1,c2,c3);
mul_add_c(3,3,c1,c2,c3);
mul_add_c(2,4,c1,c2,c3);
mul_add_c(1,5,c1,c2,c3);
mul_add_c(0,6,c1,c2,c3);
stg c1,6*8(%r2)
lghi c1,0
mul_add_c(0,7,c2,c3,c1);
mul_add_c(1,6,c2,c3,c1);
mul_add_c(2,5,c2,c3,c1);
mul_add_c(3,4,c2,c3,c1);
mul_add_c(4,3,c2,c3,c1);
mul_add_c(5,2,c2,c3,c1);
mul_add_c(6,1,c2,c3,c1);
mul_add_c(7,0,c2,c3,c1);
stg c2,7*8(%r2)
lghi c2,0
mul_add_c(7,1,c3,c1,c2);
mul_add_c(6,2,c3,c1,c2);
mul_add_c(5,3,c3,c1,c2);
mul_add_c(4,4,c3,c1,c2);
mul_add_c(3,5,c3,c1,c2);
mul_add_c(2,6,c3,c1,c2);
mul_add_c(1,7,c3,c1,c2);
stg c3,8*8(%r2)
lghi c3,0
mul_add_c(2,7,c1,c2,c3);
mul_add_c(3,6,c1,c2,c3);
mul_add_c(4,5,c1,c2,c3);
mul_add_c(5,4,c1,c2,c3);
mul_add_c(6,3,c1,c2,c3);
mul_add_c(7,2,c1,c2,c3);
stg c1,9*8(%r2)
lghi c1,0
mul_add_c(7,3,c2,c3,c1);
mul_add_c(6,4,c2,c3,c1);
mul_add_c(5,5,c2,c3,c1);
mul_add_c(4,6,c2,c3,c1);
mul_add_c(3,7,c2,c3,c1);
stg c2,10*8(%r2)
lghi c2,0
mul_add_c(4,7,c3,c1,c2);
mul_add_c(5,6,c3,c1,c2);
mul_add_c(6,5,c3,c1,c2);
mul_add_c(7,4,c3,c1,c2);
stg c3,11*8(%r2)
lghi c3,0
mul_add_c(7,5,c1,c2,c3);
mul_add_c(6,6,c1,c2,c3);
mul_add_c(5,7,c1,c2,c3);
stg c1,12*8(%r2)
lghi c1,0
mul_add_c(6,7,c2,c3,c1);
mul_add_c(7,6,c2,c3,c1);
stg c2,13*8(%r2)
lghi c2,0
mul_add_c(7,7,c3,c1,c2);
stg c3,14*8(%r2)
stg c1,15*8(%r2)
lmg %r6,%r8,48(%r15)
br %r14
.size bn_mul_comba8,.-bn_mul_comba8
// void bn_mul_comba4(BN_ULONG *r2,BN_ULONG *r3,BN_ULONG *r4);
.globl bn_mul_comba4
.type bn_mul_comba4,@function
.align 4
bn_mul_comba4:
stmg %r6,%r8,48(%r15)
lghi c1,0
lghi c2,0
lghi c3,0
lghi zero,0
mul_add_c(0,0,c1,c2,c3);
stg c1,0*8(%r3)
lghi c1,0
mul_add_c(0,1,c2,c3,c1);
mul_add_c(1,0,c2,c3,c1);
stg c2,1*8(%r2)
lghi c2,0
mul_add_c(2,0,c3,c1,c2);
mul_add_c(1,1,c3,c1,c2);
mul_add_c(0,2,c3,c1,c2);
stg c3,2*8(%r2)
lghi c3,0
mul_add_c(0,3,c1,c2,c3);
mul_add_c(1,2,c1,c2,c3);
mul_add_c(2,1,c1,c2,c3);
mul_add_c(3,0,c1,c2,c3);
stg c1,3*8(%r2)
lghi c1,0
mul_add_c(3,1,c2,c3,c1);
mul_add_c(2,2,c2,c3,c1);
mul_add_c(1,3,c2,c3,c1);
stg c2,4*8(%r2)
lghi c2,0
mul_add_c(2,3,c3,c1,c2);
mul_add_c(3,2,c3,c1,c2);
stg c3,5*8(%r2)
lghi c3,0
mul_add_c(3,3,c1,c2,c3);
stg c1,6*8(%r2)
stg c2,7*8(%r2)
stmg %r6,%r8,48(%r15)
br %r14
.size bn_mul_comba4,.-bn_mul_comba4
#define sqr_add_c(ai,c1,c2,c3) \
lg %r7,ai*8(%r3); \
mlgr %r6,%r7; \
algr c1,%r7; \
alcgr c2,%r6; \
alcgr c3,zero
#define sqr_add_c2(ai,aj,c1,c2,c3) \
lg %r7,ai*8(%r3); \
mlg %r6,aj*8(%r3); \
algr c1,%r7; \
alcgr c2,%r6; \
alcgr c3,zero; \
algr c1,%r7; \
alcgr c2,%r6; \
alcgr c3,zero
// void bn_sqr_comba8(BN_ULONG *r2,BN_ULONG *r3);
.globl bn_sqr_comba8
.type bn_sqr_comba8,@function
.align 4
bn_sqr_comba8:
stmg %r6,%r8,48(%r15)
lghi c1,0
lghi c2,0
lghi c3,0
lghi zero,0
sqr_add_c(0,c1,c2,c3);
stg c1,0*8(%r2)
lghi c1,0
sqr_add_c2(1,0,c2,c3,c1);
stg c2,1*8(%r2)
lghi c2,0
sqr_add_c(1,c3,c1,c2);
sqr_add_c2(2,0,c3,c1,c2);
stg c3,2*8(%r2)
lghi c3,0
sqr_add_c2(3,0,c1,c2,c3);
sqr_add_c2(2,1,c1,c2,c3);
stg c1,3*8(%r2)
lghi c1,0
sqr_add_c(2,c2,c3,c1);
sqr_add_c2(3,1,c2,c3,c1);
sqr_add_c2(4,0,c2,c3,c1);
stg c2,4*8(%r2)
lghi c2,0
sqr_add_c2(5,0,c3,c1,c2);
sqr_add_c2(4,1,c3,c1,c2);
sqr_add_c2(3,2,c3,c1,c2);
stg c3,5*8(%r2)
lghi c3,0
sqr_add_c(3,c1,c2,c3);
sqr_add_c2(4,2,c1,c2,c3);
sqr_add_c2(5,1,c1,c2,c3);
sqr_add_c2(6,0,c1,c2,c3);
stg c1,6*8(%r2)
lghi c1,0
sqr_add_c2(7,0,c2,c3,c1);
sqr_add_c2(6,1,c2,c3,c1);
sqr_add_c2(5,2,c2,c3,c1);
sqr_add_c2(4,3,c2,c3,c1);
stg c2,7*8(%r2)
lghi c2,0
sqr_add_c(4,c3,c1,c2);
sqr_add_c2(5,3,c3,c1,c2);
sqr_add_c2(6,2,c3,c1,c2);
sqr_add_c2(7,1,c3,c1,c2);
stg c3,8*8(%r2)
lghi c3,0
sqr_add_c2(7,2,c1,c2,c3);
sqr_add_c2(6,3,c1,c2,c3);
sqr_add_c2(5,4,c1,c2,c3);
stg c1,9*8(%r2)
lghi c1,0
sqr_add_c(5,c2,c3,c1);
sqr_add_c2(6,4,c2,c3,c1);
sqr_add_c2(7,3,c2,c3,c1);
stg c2,10*8(%r2)
lghi c2,0
sqr_add_c2(7,4,c3,c1,c2);
sqr_add_c2(6,5,c3,c1,c2);
stg c3,11*8(%r2)
lghi c3,0
sqr_add_c(6,c1,c2,c3);
sqr_add_c2(7,5,c1,c2,c3);
stg c1,12*8(%r2)
lghi c1,0
sqr_add_c2(7,6,c2,c3,c1);
stg c2,13*8(%r2)
lghi c2,0
sqr_add_c(7,c3,c1,c2);
stg c3,14*8(%r2)
stg c1,15*8(%r2)
lmg %r6,%r8,48(%r15)
br %r14
.size bn_sqr_comba8,.-bn_sqr_comba8
// void bn_sqr_comba4(BN_ULONG *r2,BN_ULONG *r3);
.globl bn_sqr_comba4
.type bn_sqr_comba4,@function
.align 4
bn_sqr_comba4:
stmg %r6,%r8,48(%r15)
lghi c1,0
lghi c2,0
lghi c3,0
lghi zero,0
sqr_add_c(0,c1,c2,c3);
stg c1,0*8(%r2)
lghi c1,0
sqr_add_c2(1,0,c2,c3,c1);
stg c2,1*8(%r2)
lghi c2,0
sqr_add_c(1,c3,c1,c2);
sqr_add_c2(2,0,c3,c1,c2);
stg c3,2*8(%r2)
lghi c3,0
sqr_add_c2(3,0,c1,c2,c3);
sqr_add_c2(2,1,c1,c2,c3);
stg c1,3*8(%r2)
lghi c1,0
sqr_add_c(2,c2,c3,c1);
sqr_add_c2(3,1,c2,c3,c1);
stg c2,4*8(%r2)
lghi c2,0
sqr_add_c2(3,2,c3,c1,c2);
stg c3,5*8(%r2)
lghi c3,0
sqr_add_c(3,c1,c2,c3);
stg c1,6*8(%r2)
stg c2,7*8(%r2)
lmg %r6,%r8,48(%r15)
br %r14
.size bn_sqr_comba4,.-bn_sqr_comba4
|
al3xtjames/Clover
| 41,292
|
Library/OpensslLib/openssl-1.0.1e/crypto/aes/asm/aes-ia64.S
|
// ====================================================================
// Written by Andy Polyakov <appro@fy.chalmers.se> for the OpenSSL
// project. Rights for redistribution and usage in source and binary
// forms are granted according to the OpenSSL license.
// ====================================================================
//
// What's wrong with compiler generated code? Compiler never uses
// variable 'shr' which is pairable with 'extr'/'dep' instructions.
// Then it uses 'zxt' which is an I-type, but can be replaced with
// 'and' which in turn can be assigned to M-port [there're double as
// much M-ports as there're I-ports on Itanium 2]. By sacrificing few
// registers for small constants (255, 24 and 16) to be used with
// 'shr' and 'and' instructions I can achieve better ILP, Intruction
// Level Parallelism, and performance. This code outperforms GCC 3.3
// generated code by over factor of 2 (two), GCC 3.4 - by 70% and
// HP C - by 40%. Measured best-case scenario, i.e. aligned
// big-endian input, ECB timing on Itanium 2 is (18 + 13*rounds)
// ticks per block, or 9.25 CPU cycles per byte for 128 bit key.
// Version 1.2 mitigates the hazard of cache-timing attacks by
// a) compressing S-boxes from 8KB to 2KB+256B, b) scheduling
// references to S-boxes for L2 cache latency, c) prefetching T[ed]4
// prior last round. As result performance dropped to (26 + 15*rounds)
// ticks per block or 11 cycles per byte processed with 128-bit key.
// This is ~16% deterioration. For reference Itanium 2 L1 cache has
// 64 bytes line size and L2 - 128 bytes...
.ident "aes-ia64.S, version 1.2"
.ident "IA-64 ISA artwork by Andy Polyakov <appro@fy.chalmers.se>"
.explicit
.text
rk0=r8; rk1=r9;
pfssave=r2;
lcsave=r10;
prsave=r3;
maskff=r11;
twenty4=r14;
sixteen=r15;
te00=r16; te11=r17; te22=r18; te33=r19;
te01=r20; te12=r21; te23=r22; te30=r23;
te02=r24; te13=r25; te20=r26; te31=r27;
te03=r28; te10=r29; te21=r30; te32=r31;
// these are rotating...
t0=r32; s0=r33;
t1=r34; s1=r35;
t2=r36; s2=r37;
t3=r38; s3=r39;
te0=r40; te1=r41; te2=r42; te3=r43;
#if defined(_HPUX_SOURCE) && !defined(_LP64)
# define ADDP addp4
#else
# define ADDP add
#endif
// Offsets from Te0
#define TE0 0
#define TE2 2
#if defined(_HPUX_SOURCE) || defined(B_ENDIAN)
#define TE1 3
#define TE3 1
#else
#define TE1 1
#define TE3 3
#endif
// This implies that AES_KEY comprises 32-bit key schedule elements
// even on LP64 platforms.
#ifndef KSZ
# define KSZ 4
# define LDKEY ld4
#endif
.proc _ia64_AES_encrypt#
// Input: rk0-rk1
// te0
// te3 as AES_KEY->rounds!!!
// s0-s3
// maskff,twenty4,sixteen
// Output: r16,r20,r24,r28 as s0-s3
// Clobber: r16-r31,rk0-rk1,r32-r43
.align 32
_ia64_AES_encrypt:
.prologue
.altrp b6
.body
{ .mmi; alloc r16=ar.pfs,12,0,0,8
LDKEY t0=[rk0],2*KSZ
mov pr.rot=1<<16 }
{ .mmi; LDKEY t1=[rk1],2*KSZ
add te1=TE1,te0
add te3=-3,te3 };;
{ .mib; LDKEY t2=[rk0],2*KSZ
mov ar.ec=2 }
{ .mib; LDKEY t3=[rk1],2*KSZ
add te2=TE2,te0
brp.loop.imp .Le_top,.Le_end-16 };;
{ .mmi; xor s0=s0,t0
xor s1=s1,t1
mov ar.lc=te3 }
{ .mmi; xor s2=s2,t2
xor s3=s3,t3
add te3=TE3,te0 };;
.align 32
.Le_top:
{ .mmi; (p0) LDKEY t0=[rk0],2*KSZ // 0/0:rk[0]
(p0) and te33=s3,maskff // 0/0:s3&0xff
(p0) extr.u te22=s2,8,8 } // 0/0:s2>>8&0xff
{ .mmi; (p0) LDKEY t1=[rk1],2*KSZ // 0/1:rk[1]
(p0) and te30=s0,maskff // 0/1:s0&0xff
(p0) shr.u te00=s0,twenty4 };; // 0/0:s0>>24
{ .mmi; (p0) LDKEY t2=[rk0],2*KSZ // 1/2:rk[2]
(p0) shladd te33=te33,3,te3 // 1/0:te0+s0>>24
(p0) extr.u te23=s3,8,8 } // 1/1:s3>>8&0xff
{ .mmi; (p0) LDKEY t3=[rk1],2*KSZ // 1/3:rk[3]
(p0) shladd te30=te30,3,te3 // 1/1:te3+s0
(p0) shr.u te01=s1,twenty4 };; // 1/1:s1>>24
{ .mmi; (p0) ld4 te33=[te33] // 2/0:te3[s3&0xff]
(p0) shladd te22=te22,3,te2 // 2/0:te2+s2>>8&0xff
(p0) extr.u te20=s0,8,8 } // 2/2:s0>>8&0xff
{ .mmi; (p0) ld4 te30=[te30] // 2/1:te3[s0]
(p0) shladd te23=te23,3,te2 // 2/1:te2+s3>>8
(p0) shr.u te02=s2,twenty4 };; // 2/2:s2>>24
{ .mmi; (p0) ld4 te22=[te22] // 3/0:te2[s2>>8]
(p0) shladd te20=te20,3,te2 // 3/2:te2+s0>>8
(p0) extr.u te21=s1,8,8 } // 3/3:s1>>8&0xff
{ .mmi; (p0) ld4 te23=[te23] // 3/1:te2[s3>>8]
(p0) shladd te00=te00,3,te0 // 3/0:te0+s0>>24
(p0) shr.u te03=s3,twenty4 };; // 3/3:s3>>24
{ .mmi; (p0) ld4 te20=[te20] // 4/2:te2[s0>>8]
(p0) shladd te21=te21,3,te2 // 4/3:te3+s2
(p0) extr.u te11=s1,16,8 } // 4/0:s1>>16&0xff
{ .mmi; (p0) ld4 te00=[te00] // 4/0:te0[s0>>24]
(p0) shladd te01=te01,3,te0 // 4/1:te0+s1>>24
(p0) shr.u te13=s3,sixteen };; // 4/2:s3>>16
{ .mmi; (p0) ld4 te21=[te21] // 5/3:te2[s1>>8]
(p0) shladd te11=te11,3,te1 // 5/0:te1+s1>>16
(p0) extr.u te12=s2,16,8 } // 5/1:s2>>16&0xff
{ .mmi; (p0) ld4 te01=[te01] // 5/1:te0[s1>>24]
(p0) shladd te02=te02,3,te0 // 5/2:te0+s2>>24
(p0) and te31=s1,maskff };; // 5/2:s1&0xff
{ .mmi; (p0) ld4 te11=[te11] // 6/0:te1[s1>>16]
(p0) shladd te12=te12,3,te1 // 6/1:te1+s2>>16
(p0) extr.u te10=s0,16,8 } // 6/3:s0>>16&0xff
{ .mmi; (p0) ld4 te02=[te02] // 6/2:te0[s2>>24]
(p0) shladd te03=te03,3,te0 // 6/3:te1+s0>>16
(p0) and te32=s2,maskff };; // 6/3:s2&0xff
{ .mmi; (p0) ld4 te12=[te12] // 7/1:te1[s2>>16]
(p0) shladd te31=te31,3,te3 // 7/2:te3+s1&0xff
(p0) and te13=te13,maskff} // 7/2:s3>>16&0xff
{ .mmi; (p0) ld4 te03=[te03] // 7/3:te0[s3>>24]
(p0) shladd te32=te32,3,te3 // 7/3:te3+s2
(p0) xor t0=t0,te33 };; // 7/0:
{ .mmi; (p0) ld4 te31=[te31] // 8/2:te3[s1]
(p0) shladd te13=te13,3,te1 // 8/2:te1+s3>>16
(p0) xor t0=t0,te22 } // 8/0:
{ .mmi; (p0) ld4 te32=[te32] // 8/3:te3[s2]
(p0) shladd te10=te10,3,te1 // 8/3:te1+s0>>16
(p0) xor t1=t1,te30 };; // 8/1:
{ .mmi; (p0) ld4 te13=[te13] // 9/2:te1[s3>>16]
(p0) ld4 te10=[te10] // 9/3:te1[s0>>16]
(p0) xor t0=t0,te00 };; // 9/0: !L2 scheduling
{ .mmi; (p0) xor t1=t1,te23 // 10[9]/1:
(p0) xor t2=t2,te20 // 10[9]/2:
(p0) xor t3=t3,te21 };; // 10[9]/3:
{ .mmi; (p0) xor t0=t0,te11 // 11[10]/0:done!
(p0) xor t1=t1,te01 // 11[10]/1:
(p0) xor t2=t2,te02 };; // 11[10]/2: !L2 scheduling
{ .mmi; (p0) xor t3=t3,te03 // 12[10]/3:
(p16) cmp.eq p0,p17=r0,r0 };; // 12[10]/clear (p17)
{ .mmi; (p0) xor t1=t1,te12 // 13[11]/1:done!
(p0) xor t2=t2,te31 // 13[11]/2:
(p0) xor t3=t3,te32 } // 13[11]/3:
{ .mmi; (p17) add te0=2048,te0 // 13[11]/
(p17) add te1=2048+64-TE1,te1};; // 13[11]/
{ .mib; (p0) xor t2=t2,te13 // 14[12]/2:done!
(p17) add te2=2048+128-TE2,te2} // 14[12]/
{ .mib; (p0) xor t3=t3,te10 // 14[12]/3:done!
(p17) add te3=2048+192-TE3,te3 // 14[12]/
br.ctop.sptk .Le_top };;
.Le_end:
{ .mmi; ld8 te12=[te0] // prefetch Te4
ld8 te31=[te1] }
{ .mmi; ld8 te10=[te2]
ld8 te32=[te3] }
{ .mmi; LDKEY t0=[rk0],2*KSZ // 0/0:rk[0]
and te33=s3,maskff // 0/0:s3&0xff
extr.u te22=s2,8,8 } // 0/0:s2>>8&0xff
{ .mmi; LDKEY t1=[rk1],2*KSZ // 0/1:rk[1]
and te30=s0,maskff // 0/1:s0&0xff
shr.u te00=s0,twenty4 };; // 0/0:s0>>24
{ .mmi; LDKEY t2=[rk0],2*KSZ // 1/2:rk[2]
add te33=te33,te0 // 1/0:te0+s0>>24
extr.u te23=s3,8,8 } // 1/1:s3>>8&0xff
{ .mmi; LDKEY t3=[rk1],2*KSZ // 1/3:rk[3]
add te30=te30,te0 // 1/1:te0+s0
shr.u te01=s1,twenty4 };; // 1/1:s1>>24
{ .mmi; ld1 te33=[te33] // 2/0:te0[s3&0xff]
add te22=te22,te0 // 2/0:te0+s2>>8&0xff
extr.u te20=s0,8,8 } // 2/2:s0>>8&0xff
{ .mmi; ld1 te30=[te30] // 2/1:te0[s0]
add te23=te23,te0 // 2/1:te0+s3>>8
shr.u te02=s2,twenty4 };; // 2/2:s2>>24
{ .mmi; ld1 te22=[te22] // 3/0:te0[s2>>8]
add te20=te20,te0 // 3/2:te0+s0>>8
extr.u te21=s1,8,8 } // 3/3:s1>>8&0xff
{ .mmi; ld1 te23=[te23] // 3/1:te0[s3>>8]
add te00=te00,te0 // 3/0:te0+s0>>24
shr.u te03=s3,twenty4 };; // 3/3:s3>>24
{ .mmi; ld1 te20=[te20] // 4/2:te0[s0>>8]
add te21=te21,te0 // 4/3:te0+s2
extr.u te11=s1,16,8 } // 4/0:s1>>16&0xff
{ .mmi; ld1 te00=[te00] // 4/0:te0[s0>>24]
add te01=te01,te0 // 4/1:te0+s1>>24
shr.u te13=s3,sixteen };; // 4/2:s3>>16
{ .mmi; ld1 te21=[te21] // 5/3:te0[s1>>8]
add te11=te11,te0 // 5/0:te0+s1>>16
extr.u te12=s2,16,8 } // 5/1:s2>>16&0xff
{ .mmi; ld1 te01=[te01] // 5/1:te0[s1>>24]
add te02=te02,te0 // 5/2:te0+s2>>24
and te31=s1,maskff };; // 5/2:s1&0xff
{ .mmi; ld1 te11=[te11] // 6/0:te0[s1>>16]
add te12=te12,te0 // 6/1:te0+s2>>16
extr.u te10=s0,16,8 } // 6/3:s0>>16&0xff
{ .mmi; ld1 te02=[te02] // 6/2:te0[s2>>24]
add te03=te03,te0 // 6/3:te0+s0>>16
and te32=s2,maskff };; // 6/3:s2&0xff
{ .mmi; ld1 te12=[te12] // 7/1:te0[s2>>16]
add te31=te31,te0 // 7/2:te0+s1&0xff
dep te33=te22,te33,8,8} // 7/0:
{ .mmi; ld1 te03=[te03] // 7/3:te0[s3>>24]
add te32=te32,te0 // 7/3:te0+s2
and te13=te13,maskff};; // 7/2:s3>>16&0xff
{ .mmi; ld1 te31=[te31] // 8/2:te0[s1]
add te13=te13,te0 // 8/2:te0+s3>>16
dep te30=te23,te30,8,8} // 8/1:
{ .mmi; ld1 te32=[te32] // 8/3:te0[s2]
add te10=te10,te0 // 8/3:te0+s0>>16
shl te00=te00,twenty4};; // 8/0:
{ .mii; ld1 te13=[te13] // 9/2:te0[s3>>16]
dep te33=te11,te33,16,8 // 9/0:
shl te01=te01,twenty4};; // 9/1:
{ .mii; ld1 te10=[te10] // 10/3:te0[s0>>16]
dep te31=te20,te31,8,8 // 10/2:
shl te02=te02,twenty4};; // 10/2:
{ .mii; xor t0=t0,te33 // 11/0:
dep te32=te21,te32,8,8 // 11/3:
shl te12=te12,sixteen};; // 11/1:
{ .mii; xor r16=t0,te00 // 12/0:done!
dep te31=te13,te31,16,8 // 12/2:
shl te03=te03,twenty4};; // 12/3:
{ .mmi; xor t1=t1,te01 // 13/1:
xor t2=t2,te02 // 13/2:
dep te32=te10,te32,16,8};; // 13/3:
{ .mmi; xor t1=t1,te30 // 14/1:
xor r24=t2,te31 // 14/2:done!
xor t3=t3,te32 };; // 14/3:
{ .mib; xor r20=t1,te12 // 15/1:done!
xor r28=t3,te03 // 15/3:done!
br.ret.sptk b6 };;
.endp _ia64_AES_encrypt#
// void AES_encrypt (const void *in,void *out,const AES_KEY *key);
.global AES_encrypt#
.proc AES_encrypt#
.align 32
AES_encrypt:
.prologue
.save ar.pfs,pfssave
{ .mmi; alloc pfssave=ar.pfs,3,1,12,0
and out0=3,in0
mov r3=ip }
{ .mmi; ADDP in0=0,in0
mov loc0=psr.um
ADDP out11=KSZ*60,in2 };; // &AES_KEY->rounds
{ .mmi; ld4 out11=[out11] // AES_KEY->rounds
add out8=(AES_Te#-AES_encrypt#),r3 // Te0
.save pr,prsave
mov prsave=pr }
{ .mmi; rum 1<<3 // clear um.ac
.save ar.lc,lcsave
mov lcsave=ar.lc };;
.body
#if defined(_HPUX_SOURCE) // HPUX is big-endian, cut 15+15 cycles...
{ .mib; cmp.ne p6,p0=out0,r0
add out0=4,in0
(p6) br.dpnt.many .Le_i_unaligned };;
{ .mmi; ld4 out1=[in0],8 // s0
and out9=3,in1
mov twenty4=24 }
{ .mmi; ld4 out3=[out0],8 // s1
ADDP rk0=0,in2
mov sixteen=16 };;
{ .mmi; ld4 out5=[in0] // s2
cmp.ne p6,p0=out9,r0
mov maskff=0xff }
{ .mmb; ld4 out7=[out0] // s3
ADDP rk1=KSZ,in2
br.call.sptk.many b6=_ia64_AES_encrypt };;
{ .mib; ADDP in0=4,in1
ADDP in1=0,in1
(p6) br.spnt .Le_o_unaligned };;
{ .mii; mov psr.um=loc0
mov ar.pfs=pfssave
mov ar.lc=lcsave };;
{ .mmi; st4 [in1]=r16,8 // s0
st4 [in0]=r20,8 // s1
mov pr=prsave,0x1ffff };;
{ .mmb; st4 [in1]=r24 // s2
st4 [in0]=r28 // s3
br.ret.sptk.many b0 };;
#endif
.align 32
.Le_i_unaligned:
{ .mmi; add out0=1,in0
add out2=2,in0
add out4=3,in0 };;
{ .mmi; ld1 r16=[in0],4
ld1 r17=[out0],4 }//;;
{ .mmi; ld1 r18=[out2],4
ld1 out1=[out4],4 };; // s0
{ .mmi; ld1 r20=[in0],4
ld1 r21=[out0],4 }//;;
{ .mmi; ld1 r22=[out2],4
ld1 out3=[out4],4 };; // s1
{ .mmi; ld1 r24=[in0],4
ld1 r25=[out0],4 }//;;
{ .mmi; ld1 r26=[out2],4
ld1 out5=[out4],4 };; // s2
{ .mmi; ld1 r28=[in0]
ld1 r29=[out0] }//;;
{ .mmi; ld1 r30=[out2]
ld1 out7=[out4] };; // s3
{ .mii;
dep out1=r16,out1,24,8 //;;
dep out3=r20,out3,24,8 }//;;
{ .mii; ADDP rk0=0,in2
dep out5=r24,out5,24,8 //;;
dep out7=r28,out7,24,8 };;
{ .mii; ADDP rk1=KSZ,in2
dep out1=r17,out1,16,8 //;;
dep out3=r21,out3,16,8 }//;;
{ .mii; mov twenty4=24
dep out5=r25,out5,16,8 //;;
dep out7=r29,out7,16,8 };;
{ .mii; mov sixteen=16
dep out1=r18,out1,8,8 //;;
dep out3=r22,out3,8,8 }//;;
{ .mii; mov maskff=0xff
dep out5=r26,out5,8,8 //;;
dep out7=r30,out7,8,8 };;
{ .mib; br.call.sptk.many b6=_ia64_AES_encrypt };;
.Le_o_unaligned:
{ .mii; ADDP out0=0,in1
extr.u r17=r16,8,8 // s0
shr.u r19=r16,twenty4 }//;;
{ .mii; ADDP out1=1,in1
extr.u r18=r16,16,8
shr.u r23=r20,twenty4 }//;; // s1
{ .mii; ADDP out2=2,in1
extr.u r21=r20,8,8
shr.u r22=r20,sixteen }//;;
{ .mii; ADDP out3=3,in1
extr.u r25=r24,8,8 // s2
shr.u r27=r24,twenty4 };;
{ .mii; st1 [out3]=r16,4
extr.u r26=r24,16,8
shr.u r31=r28,twenty4 }//;; // s3
{ .mii; st1 [out2]=r17,4
extr.u r29=r28,8,8
shr.u r30=r28,sixteen }//;;
{ .mmi; st1 [out1]=r18,4
st1 [out0]=r19,4 };;
{ .mmi; st1 [out3]=r20,4
st1 [out2]=r21,4 }//;;
{ .mmi; st1 [out1]=r22,4
st1 [out0]=r23,4 };;
{ .mmi; st1 [out3]=r24,4
st1 [out2]=r25,4
mov pr=prsave,0x1ffff }//;;
{ .mmi; st1 [out1]=r26,4
st1 [out0]=r27,4
mov ar.pfs=pfssave };;
{ .mmi; st1 [out3]=r28
st1 [out2]=r29
mov ar.lc=lcsave }//;;
{ .mmi; st1 [out1]=r30
st1 [out0]=r31 }
{ .mfb; mov psr.um=loc0 // restore user mask
br.ret.sptk.many b0 };;
.endp AES_encrypt#
// *AES_decrypt are autogenerated by the following script:
#if 0
#!/usr/bin/env perl
print "// *AES_decrypt are autogenerated by the following script:\n#if 0\n";
open(PROG,'<'.$0); while(<PROG>) { print; } close(PROG);
print "#endif\n";
while(<>) {
$process=1 if (/\.proc\s+_ia64_AES_encrypt/);
next if (!$process);
#s/te00=s0/td00=s0/; s/te00/td00/g;
s/te11=s1/td13=s3/; s/te11/td13/g;
#s/te22=s2/td22=s2/; s/te22/td22/g;
s/te33=s3/td31=s1/; s/te33/td31/g;
#s/te01=s1/td01=s1/; s/te01/td01/g;
s/te12=s2/td10=s0/; s/te12/td10/g;
#s/te23=s3/td23=s3/; s/te23/td23/g;
s/te30=s0/td32=s2/; s/te30/td32/g;
#s/te02=s2/td02=s2/; s/te02/td02/g;
s/te13=s3/td11=s1/; s/te13/td11/g;
#s/te20=s0/td20=s0/; s/te20/td20/g;
s/te31=s1/td33=s3/; s/te31/td33/g;
#s/te03=s3/td03=s3/; s/te03/td03/g;
s/te10=s0/td12=s2/; s/te10/td12/g;
#s/te21=s1/td21=s1/; s/te21/td21/g;
s/te32=s2/td30=s0/; s/te32/td30/g;
s/td/te/g;
s/AES_encrypt/AES_decrypt/g;
s/\.Le_/.Ld_/g;
s/AES_Te#/AES_Td#/g;
print;
exit if (/\.endp\s+AES_decrypt/);
}
#endif
.proc _ia64_AES_decrypt#
// Input: rk0-rk1
// te0
// te3 as AES_KEY->rounds!!!
// s0-s3
// maskff,twenty4,sixteen
// Output: r16,r20,r24,r28 as s0-s3
// Clobber: r16-r31,rk0-rk1,r32-r43
.align 32
_ia64_AES_decrypt:
.prologue
.altrp b6
.body
{ .mmi; alloc r16=ar.pfs,12,0,0,8
LDKEY t0=[rk0],2*KSZ
mov pr.rot=1<<16 }
{ .mmi; LDKEY t1=[rk1],2*KSZ
add te1=TE1,te0
add te3=-3,te3 };;
{ .mib; LDKEY t2=[rk0],2*KSZ
mov ar.ec=2 }
{ .mib; LDKEY t3=[rk1],2*KSZ
add te2=TE2,te0
brp.loop.imp .Ld_top,.Ld_end-16 };;
{ .mmi; xor s0=s0,t0
xor s1=s1,t1
mov ar.lc=te3 }
{ .mmi; xor s2=s2,t2
xor s3=s3,t3
add te3=TE3,te0 };;
.align 32
.Ld_top:
{ .mmi; (p0) LDKEY t0=[rk0],2*KSZ // 0/0:rk[0]
(p0) and te31=s1,maskff // 0/0:s3&0xff
(p0) extr.u te22=s2,8,8 } // 0/0:s2>>8&0xff
{ .mmi; (p0) LDKEY t1=[rk1],2*KSZ // 0/1:rk[1]
(p0) and te32=s2,maskff // 0/1:s0&0xff
(p0) shr.u te00=s0,twenty4 };; // 0/0:s0>>24
{ .mmi; (p0) LDKEY t2=[rk0],2*KSZ // 1/2:rk[2]
(p0) shladd te31=te31,3,te3 // 1/0:te0+s0>>24
(p0) extr.u te23=s3,8,8 } // 1/1:s3>>8&0xff
{ .mmi; (p0) LDKEY t3=[rk1],2*KSZ // 1/3:rk[3]
(p0) shladd te32=te32,3,te3 // 1/1:te3+s0
(p0) shr.u te01=s1,twenty4 };; // 1/1:s1>>24
{ .mmi; (p0) ld4 te31=[te31] // 2/0:te3[s3&0xff]
(p0) shladd te22=te22,3,te2 // 2/0:te2+s2>>8&0xff
(p0) extr.u te20=s0,8,8 } // 2/2:s0>>8&0xff
{ .mmi; (p0) ld4 te32=[te32] // 2/1:te3[s0]
(p0) shladd te23=te23,3,te2 // 2/1:te2+s3>>8
(p0) shr.u te02=s2,twenty4 };; // 2/2:s2>>24
{ .mmi; (p0) ld4 te22=[te22] // 3/0:te2[s2>>8]
(p0) shladd te20=te20,3,te2 // 3/2:te2+s0>>8
(p0) extr.u te21=s1,8,8 } // 3/3:s1>>8&0xff
{ .mmi; (p0) ld4 te23=[te23] // 3/1:te2[s3>>8]
(p0) shladd te00=te00,3,te0 // 3/0:te0+s0>>24
(p0) shr.u te03=s3,twenty4 };; // 3/3:s3>>24
{ .mmi; (p0) ld4 te20=[te20] // 4/2:te2[s0>>8]
(p0) shladd te21=te21,3,te2 // 4/3:te3+s2
(p0) extr.u te13=s3,16,8 } // 4/0:s1>>16&0xff
{ .mmi; (p0) ld4 te00=[te00] // 4/0:te0[s0>>24]
(p0) shladd te01=te01,3,te0 // 4/1:te0+s1>>24
(p0) shr.u te11=s1,sixteen };; // 4/2:s3>>16
{ .mmi; (p0) ld4 te21=[te21] // 5/3:te2[s1>>8]
(p0) shladd te13=te13,3,te1 // 5/0:te1+s1>>16
(p0) extr.u te10=s0,16,8 } // 5/1:s2>>16&0xff
{ .mmi; (p0) ld4 te01=[te01] // 5/1:te0[s1>>24]
(p0) shladd te02=te02,3,te0 // 5/2:te0+s2>>24
(p0) and te33=s3,maskff };; // 5/2:s1&0xff
{ .mmi; (p0) ld4 te13=[te13] // 6/0:te1[s1>>16]
(p0) shladd te10=te10,3,te1 // 6/1:te1+s2>>16
(p0) extr.u te12=s2,16,8 } // 6/3:s0>>16&0xff
{ .mmi; (p0) ld4 te02=[te02] // 6/2:te0[s2>>24]
(p0) shladd te03=te03,3,te0 // 6/3:te1+s0>>16
(p0) and te30=s0,maskff };; // 6/3:s2&0xff
{ .mmi; (p0) ld4 te10=[te10] // 7/1:te1[s2>>16]
(p0) shladd te33=te33,3,te3 // 7/2:te3+s1&0xff
(p0) and te11=te11,maskff} // 7/2:s3>>16&0xff
{ .mmi; (p0) ld4 te03=[te03] // 7/3:te0[s3>>24]
(p0) shladd te30=te30,3,te3 // 7/3:te3+s2
(p0) xor t0=t0,te31 };; // 7/0:
{ .mmi; (p0) ld4 te33=[te33] // 8/2:te3[s1]
(p0) shladd te11=te11,3,te1 // 8/2:te1+s3>>16
(p0) xor t0=t0,te22 } // 8/0:
{ .mmi; (p0) ld4 te30=[te30] // 8/3:te3[s2]
(p0) shladd te12=te12,3,te1 // 8/3:te1+s0>>16
(p0) xor t1=t1,te32 };; // 8/1:
{ .mmi; (p0) ld4 te11=[te11] // 9/2:te1[s3>>16]
(p0) ld4 te12=[te12] // 9/3:te1[s0>>16]
(p0) xor t0=t0,te00 };; // 9/0: !L2 scheduling
{ .mmi; (p0) xor t1=t1,te23 // 10[9]/1:
(p0) xor t2=t2,te20 // 10[9]/2:
(p0) xor t3=t3,te21 };; // 10[9]/3:
{ .mmi; (p0) xor t0=t0,te13 // 11[10]/0:done!
(p0) xor t1=t1,te01 // 11[10]/1:
(p0) xor t2=t2,te02 };; // 11[10]/2: !L2 scheduling
{ .mmi; (p0) xor t3=t3,te03 // 12[10]/3:
(p16) cmp.eq p0,p17=r0,r0 };; // 12[10]/clear (p17)
{ .mmi; (p0) xor t1=t1,te10 // 13[11]/1:done!
(p0) xor t2=t2,te33 // 13[11]/2:
(p0) xor t3=t3,te30 } // 13[11]/3:
{ .mmi; (p17) add te0=2048,te0 // 13[11]/
(p17) add te1=2048+64-TE1,te1};; // 13[11]/
{ .mib; (p0) xor t2=t2,te11 // 14[12]/2:done!
(p17) add te2=2048+128-TE2,te2} // 14[12]/
{ .mib; (p0) xor t3=t3,te12 // 14[12]/3:done!
(p17) add te3=2048+192-TE3,te3 // 14[12]/
br.ctop.sptk .Ld_top };;
.Ld_end:
{ .mmi; ld8 te10=[te0] // prefetch Td4
ld8 te33=[te1] }
{ .mmi; ld8 te12=[te2]
ld8 te30=[te3] }
{ .mmi; LDKEY t0=[rk0],2*KSZ // 0/0:rk[0]
and te31=s1,maskff // 0/0:s3&0xff
extr.u te22=s2,8,8 } // 0/0:s2>>8&0xff
{ .mmi; LDKEY t1=[rk1],2*KSZ // 0/1:rk[1]
and te32=s2,maskff // 0/1:s0&0xff
shr.u te00=s0,twenty4 };; // 0/0:s0>>24
{ .mmi; LDKEY t2=[rk0],2*KSZ // 1/2:rk[2]
add te31=te31,te0 // 1/0:te0+s0>>24
extr.u te23=s3,8,8 } // 1/1:s3>>8&0xff
{ .mmi; LDKEY t3=[rk1],2*KSZ // 1/3:rk[3]
add te32=te32,te0 // 1/1:te0+s0
shr.u te01=s1,twenty4 };; // 1/1:s1>>24
{ .mmi; ld1 te31=[te31] // 2/0:te0[s3&0xff]
add te22=te22,te0 // 2/0:te0+s2>>8&0xff
extr.u te20=s0,8,8 } // 2/2:s0>>8&0xff
{ .mmi; ld1 te32=[te32] // 2/1:te0[s0]
add te23=te23,te0 // 2/1:te0+s3>>8
shr.u te02=s2,twenty4 };; // 2/2:s2>>24
{ .mmi; ld1 te22=[te22] // 3/0:te0[s2>>8]
add te20=te20,te0 // 3/2:te0+s0>>8
extr.u te21=s1,8,8 } // 3/3:s1>>8&0xff
{ .mmi; ld1 te23=[te23] // 3/1:te0[s3>>8]
add te00=te00,te0 // 3/0:te0+s0>>24
shr.u te03=s3,twenty4 };; // 3/3:s3>>24
{ .mmi; ld1 te20=[te20] // 4/2:te0[s0>>8]
add te21=te21,te0 // 4/3:te0+s2
extr.u te13=s3,16,8 } // 4/0:s1>>16&0xff
{ .mmi; ld1 te00=[te00] // 4/0:te0[s0>>24]
add te01=te01,te0 // 4/1:te0+s1>>24
shr.u te11=s1,sixteen };; // 4/2:s3>>16
{ .mmi; ld1 te21=[te21] // 5/3:te0[s1>>8]
add te13=te13,te0 // 5/0:te0+s1>>16
extr.u te10=s0,16,8 } // 5/1:s2>>16&0xff
{ .mmi; ld1 te01=[te01] // 5/1:te0[s1>>24]
add te02=te02,te0 // 5/2:te0+s2>>24
and te33=s3,maskff };; // 5/2:s1&0xff
{ .mmi; ld1 te13=[te13] // 6/0:te0[s1>>16]
add te10=te10,te0 // 6/1:te0+s2>>16
extr.u te12=s2,16,8 } // 6/3:s0>>16&0xff
{ .mmi; ld1 te02=[te02] // 6/2:te0[s2>>24]
add te03=te03,te0 // 6/3:te0+s0>>16
and te30=s0,maskff };; // 6/3:s2&0xff
{ .mmi; ld1 te10=[te10] // 7/1:te0[s2>>16]
add te33=te33,te0 // 7/2:te0+s1&0xff
dep te31=te22,te31,8,8} // 7/0:
{ .mmi; ld1 te03=[te03] // 7/3:te0[s3>>24]
add te30=te30,te0 // 7/3:te0+s2
and te11=te11,maskff};; // 7/2:s3>>16&0xff
{ .mmi; ld1 te33=[te33] // 8/2:te0[s1]
add te11=te11,te0 // 8/2:te0+s3>>16
dep te32=te23,te32,8,8} // 8/1:
{ .mmi; ld1 te30=[te30] // 8/3:te0[s2]
add te12=te12,te0 // 8/3:te0+s0>>16
shl te00=te00,twenty4};; // 8/0:
{ .mii; ld1 te11=[te11] // 9/2:te0[s3>>16]
dep te31=te13,te31,16,8 // 9/0:
shl te01=te01,twenty4};; // 9/1:
{ .mii; ld1 te12=[te12] // 10/3:te0[s0>>16]
dep te33=te20,te33,8,8 // 10/2:
shl te02=te02,twenty4};; // 10/2:
{ .mii; xor t0=t0,te31 // 11/0:
dep te30=te21,te30,8,8 // 11/3:
shl te10=te10,sixteen};; // 11/1:
{ .mii; xor r16=t0,te00 // 12/0:done!
dep te33=te11,te33,16,8 // 12/2:
shl te03=te03,twenty4};; // 12/3:
{ .mmi; xor t1=t1,te01 // 13/1:
xor t2=t2,te02 // 13/2:
dep te30=te12,te30,16,8};; // 13/3:
{ .mmi; xor t1=t1,te32 // 14/1:
xor r24=t2,te33 // 14/2:done!
xor t3=t3,te30 };; // 14/3:
{ .mib; xor r20=t1,te10 // 15/1:done!
xor r28=t3,te03 // 15/3:done!
br.ret.sptk b6 };;
.endp _ia64_AES_decrypt#
// void AES_decrypt (const void *in,void *out,const AES_KEY *key);
.global AES_decrypt#
.proc AES_decrypt#
.align 32
AES_decrypt:
.prologue
.save ar.pfs,pfssave
{ .mmi; alloc pfssave=ar.pfs,3,1,12,0
and out0=3,in0
mov r3=ip }
{ .mmi; ADDP in0=0,in0
mov loc0=psr.um
ADDP out11=KSZ*60,in2 };; // &AES_KEY->rounds
{ .mmi; ld4 out11=[out11] // AES_KEY->rounds
add out8=(AES_Td#-AES_decrypt#),r3 // Te0
.save pr,prsave
mov prsave=pr }
{ .mmi; rum 1<<3 // clear um.ac
.save ar.lc,lcsave
mov lcsave=ar.lc };;
.body
#if defined(_HPUX_SOURCE) // HPUX is big-endian, cut 15+15 cycles...
{ .mib; cmp.ne p6,p0=out0,r0
add out0=4,in0
(p6) br.dpnt.many .Ld_i_unaligned };;
{ .mmi; ld4 out1=[in0],8 // s0
and out9=3,in1
mov twenty4=24 }
{ .mmi; ld4 out3=[out0],8 // s1
ADDP rk0=0,in2
mov sixteen=16 };;
{ .mmi; ld4 out5=[in0] // s2
cmp.ne p6,p0=out9,r0
mov maskff=0xff }
{ .mmb; ld4 out7=[out0] // s3
ADDP rk1=KSZ,in2
br.call.sptk.many b6=_ia64_AES_decrypt };;
{ .mib; ADDP in0=4,in1
ADDP in1=0,in1
(p6) br.spnt .Ld_o_unaligned };;
{ .mii; mov psr.um=loc0
mov ar.pfs=pfssave
mov ar.lc=lcsave };;
{ .mmi; st4 [in1]=r16,8 // s0
st4 [in0]=r20,8 // s1
mov pr=prsave,0x1ffff };;
{ .mmb; st4 [in1]=r24 // s2
st4 [in0]=r28 // s3
br.ret.sptk.many b0 };;
#endif
.align 32
.Ld_i_unaligned:
{ .mmi; add out0=1,in0
add out2=2,in0
add out4=3,in0 };;
{ .mmi; ld1 r16=[in0],4
ld1 r17=[out0],4 }//;;
{ .mmi; ld1 r18=[out2],4
ld1 out1=[out4],4 };; // s0
{ .mmi; ld1 r20=[in0],4
ld1 r21=[out0],4 }//;;
{ .mmi; ld1 r22=[out2],4
ld1 out3=[out4],4 };; // s1
{ .mmi; ld1 r24=[in0],4
ld1 r25=[out0],4 }//;;
{ .mmi; ld1 r26=[out2],4
ld1 out5=[out4],4 };; // s2
{ .mmi; ld1 r28=[in0]
ld1 r29=[out0] }//;;
{ .mmi; ld1 r30=[out2]
ld1 out7=[out4] };; // s3
{ .mii;
dep out1=r16,out1,24,8 //;;
dep out3=r20,out3,24,8 }//;;
{ .mii; ADDP rk0=0,in2
dep out5=r24,out5,24,8 //;;
dep out7=r28,out7,24,8 };;
{ .mii; ADDP rk1=KSZ,in2
dep out1=r17,out1,16,8 //;;
dep out3=r21,out3,16,8 }//;;
{ .mii; mov twenty4=24
dep out5=r25,out5,16,8 //;;
dep out7=r29,out7,16,8 };;
{ .mii; mov sixteen=16
dep out1=r18,out1,8,8 //;;
dep out3=r22,out3,8,8 }//;;
{ .mii; mov maskff=0xff
dep out5=r26,out5,8,8 //;;
dep out7=r30,out7,8,8 };;
{ .mib; br.call.sptk.many b6=_ia64_AES_decrypt };;
.Ld_o_unaligned:
{ .mii; ADDP out0=0,in1
extr.u r17=r16,8,8 // s0
shr.u r19=r16,twenty4 }//;;
{ .mii; ADDP out1=1,in1
extr.u r18=r16,16,8
shr.u r23=r20,twenty4 }//;; // s1
{ .mii; ADDP out2=2,in1
extr.u r21=r20,8,8
shr.u r22=r20,sixteen }//;;
{ .mii; ADDP out3=3,in1
extr.u r25=r24,8,8 // s2
shr.u r27=r24,twenty4 };;
{ .mii; st1 [out3]=r16,4
extr.u r26=r24,16,8
shr.u r31=r28,twenty4 }//;; // s3
{ .mii; st1 [out2]=r17,4
extr.u r29=r28,8,8
shr.u r30=r28,sixteen }//;;
{ .mmi; st1 [out1]=r18,4
st1 [out0]=r19,4 };;
{ .mmi; st1 [out3]=r20,4
st1 [out2]=r21,4 }//;;
{ .mmi; st1 [out1]=r22,4
st1 [out0]=r23,4 };;
{ .mmi; st1 [out3]=r24,4
st1 [out2]=r25,4
mov pr=prsave,0x1ffff }//;;
{ .mmi; st1 [out1]=r26,4
st1 [out0]=r27,4
mov ar.pfs=pfssave };;
{ .mmi; st1 [out3]=r28
st1 [out2]=r29
mov ar.lc=lcsave }//;;
{ .mmi; st1 [out1]=r30
st1 [out0]=r31 }
{ .mfb; mov psr.um=loc0 // restore user mask
br.ret.sptk.many b0 };;
.endp AES_decrypt#
// leave it in .text segment...
.align 64
.global AES_Te#
.type AES_Te#,@object
AES_Te: data4 0xc66363a5,0xc66363a5, 0xf87c7c84,0xf87c7c84
data4 0xee777799,0xee777799, 0xf67b7b8d,0xf67b7b8d
data4 0xfff2f20d,0xfff2f20d, 0xd66b6bbd,0xd66b6bbd
data4 0xde6f6fb1,0xde6f6fb1, 0x91c5c554,0x91c5c554
data4 0x60303050,0x60303050, 0x02010103,0x02010103
data4 0xce6767a9,0xce6767a9, 0x562b2b7d,0x562b2b7d
data4 0xe7fefe19,0xe7fefe19, 0xb5d7d762,0xb5d7d762
data4 0x4dababe6,0x4dababe6, 0xec76769a,0xec76769a
data4 0x8fcaca45,0x8fcaca45, 0x1f82829d,0x1f82829d
data4 0x89c9c940,0x89c9c940, 0xfa7d7d87,0xfa7d7d87
data4 0xeffafa15,0xeffafa15, 0xb25959eb,0xb25959eb
data4 0x8e4747c9,0x8e4747c9, 0xfbf0f00b,0xfbf0f00b
data4 0x41adadec,0x41adadec, 0xb3d4d467,0xb3d4d467
data4 0x5fa2a2fd,0x5fa2a2fd, 0x45afafea,0x45afafea
data4 0x239c9cbf,0x239c9cbf, 0x53a4a4f7,0x53a4a4f7
data4 0xe4727296,0xe4727296, 0x9bc0c05b,0x9bc0c05b
data4 0x75b7b7c2,0x75b7b7c2, 0xe1fdfd1c,0xe1fdfd1c
data4 0x3d9393ae,0x3d9393ae, 0x4c26266a,0x4c26266a
data4 0x6c36365a,0x6c36365a, 0x7e3f3f41,0x7e3f3f41
data4 0xf5f7f702,0xf5f7f702, 0x83cccc4f,0x83cccc4f
data4 0x6834345c,0x6834345c, 0x51a5a5f4,0x51a5a5f4
data4 0xd1e5e534,0xd1e5e534, 0xf9f1f108,0xf9f1f108
data4 0xe2717193,0xe2717193, 0xabd8d873,0xabd8d873
data4 0x62313153,0x62313153, 0x2a15153f,0x2a15153f
data4 0x0804040c,0x0804040c, 0x95c7c752,0x95c7c752
data4 0x46232365,0x46232365, 0x9dc3c35e,0x9dc3c35e
data4 0x30181828,0x30181828, 0x379696a1,0x379696a1
data4 0x0a05050f,0x0a05050f, 0x2f9a9ab5,0x2f9a9ab5
data4 0x0e070709,0x0e070709, 0x24121236,0x24121236
data4 0x1b80809b,0x1b80809b, 0xdfe2e23d,0xdfe2e23d
data4 0xcdebeb26,0xcdebeb26, 0x4e272769,0x4e272769
data4 0x7fb2b2cd,0x7fb2b2cd, 0xea75759f,0xea75759f
data4 0x1209091b,0x1209091b, 0x1d83839e,0x1d83839e
data4 0x582c2c74,0x582c2c74, 0x341a1a2e,0x341a1a2e
data4 0x361b1b2d,0x361b1b2d, 0xdc6e6eb2,0xdc6e6eb2
data4 0xb45a5aee,0xb45a5aee, 0x5ba0a0fb,0x5ba0a0fb
data4 0xa45252f6,0xa45252f6, 0x763b3b4d,0x763b3b4d
data4 0xb7d6d661,0xb7d6d661, 0x7db3b3ce,0x7db3b3ce
data4 0x5229297b,0x5229297b, 0xdde3e33e,0xdde3e33e
data4 0x5e2f2f71,0x5e2f2f71, 0x13848497,0x13848497
data4 0xa65353f5,0xa65353f5, 0xb9d1d168,0xb9d1d168
data4 0x00000000,0x00000000, 0xc1eded2c,0xc1eded2c
data4 0x40202060,0x40202060, 0xe3fcfc1f,0xe3fcfc1f
data4 0x79b1b1c8,0x79b1b1c8, 0xb65b5bed,0xb65b5bed
data4 0xd46a6abe,0xd46a6abe, 0x8dcbcb46,0x8dcbcb46
data4 0x67bebed9,0x67bebed9, 0x7239394b,0x7239394b
data4 0x944a4ade,0x944a4ade, 0x984c4cd4,0x984c4cd4
data4 0xb05858e8,0xb05858e8, 0x85cfcf4a,0x85cfcf4a
data4 0xbbd0d06b,0xbbd0d06b, 0xc5efef2a,0xc5efef2a
data4 0x4faaaae5,0x4faaaae5, 0xedfbfb16,0xedfbfb16
data4 0x864343c5,0x864343c5, 0x9a4d4dd7,0x9a4d4dd7
data4 0x66333355,0x66333355, 0x11858594,0x11858594
data4 0x8a4545cf,0x8a4545cf, 0xe9f9f910,0xe9f9f910
data4 0x04020206,0x04020206, 0xfe7f7f81,0xfe7f7f81
data4 0xa05050f0,0xa05050f0, 0x783c3c44,0x783c3c44
data4 0x259f9fba,0x259f9fba, 0x4ba8a8e3,0x4ba8a8e3
data4 0xa25151f3,0xa25151f3, 0x5da3a3fe,0x5da3a3fe
data4 0x804040c0,0x804040c0, 0x058f8f8a,0x058f8f8a
data4 0x3f9292ad,0x3f9292ad, 0x219d9dbc,0x219d9dbc
data4 0x70383848,0x70383848, 0xf1f5f504,0xf1f5f504
data4 0x63bcbcdf,0x63bcbcdf, 0x77b6b6c1,0x77b6b6c1
data4 0xafdada75,0xafdada75, 0x42212163,0x42212163
data4 0x20101030,0x20101030, 0xe5ffff1a,0xe5ffff1a
data4 0xfdf3f30e,0xfdf3f30e, 0xbfd2d26d,0xbfd2d26d
data4 0x81cdcd4c,0x81cdcd4c, 0x180c0c14,0x180c0c14
data4 0x26131335,0x26131335, 0xc3ecec2f,0xc3ecec2f
data4 0xbe5f5fe1,0xbe5f5fe1, 0x359797a2,0x359797a2
data4 0x884444cc,0x884444cc, 0x2e171739,0x2e171739
data4 0x93c4c457,0x93c4c457, 0x55a7a7f2,0x55a7a7f2
data4 0xfc7e7e82,0xfc7e7e82, 0x7a3d3d47,0x7a3d3d47
data4 0xc86464ac,0xc86464ac, 0xba5d5de7,0xba5d5de7
data4 0x3219192b,0x3219192b, 0xe6737395,0xe6737395
data4 0xc06060a0,0xc06060a0, 0x19818198,0x19818198
data4 0x9e4f4fd1,0x9e4f4fd1, 0xa3dcdc7f,0xa3dcdc7f
data4 0x44222266,0x44222266, 0x542a2a7e,0x542a2a7e
data4 0x3b9090ab,0x3b9090ab, 0x0b888883,0x0b888883
data4 0x8c4646ca,0x8c4646ca, 0xc7eeee29,0xc7eeee29
data4 0x6bb8b8d3,0x6bb8b8d3, 0x2814143c,0x2814143c
data4 0xa7dede79,0xa7dede79, 0xbc5e5ee2,0xbc5e5ee2
data4 0x160b0b1d,0x160b0b1d, 0xaddbdb76,0xaddbdb76
data4 0xdbe0e03b,0xdbe0e03b, 0x64323256,0x64323256
data4 0x743a3a4e,0x743a3a4e, 0x140a0a1e,0x140a0a1e
data4 0x924949db,0x924949db, 0x0c06060a,0x0c06060a
data4 0x4824246c,0x4824246c, 0xb85c5ce4,0xb85c5ce4
data4 0x9fc2c25d,0x9fc2c25d, 0xbdd3d36e,0xbdd3d36e
data4 0x43acacef,0x43acacef, 0xc46262a6,0xc46262a6
data4 0x399191a8,0x399191a8, 0x319595a4,0x319595a4
data4 0xd3e4e437,0xd3e4e437, 0xf279798b,0xf279798b
data4 0xd5e7e732,0xd5e7e732, 0x8bc8c843,0x8bc8c843
data4 0x6e373759,0x6e373759, 0xda6d6db7,0xda6d6db7
data4 0x018d8d8c,0x018d8d8c, 0xb1d5d564,0xb1d5d564
data4 0x9c4e4ed2,0x9c4e4ed2, 0x49a9a9e0,0x49a9a9e0
data4 0xd86c6cb4,0xd86c6cb4, 0xac5656fa,0xac5656fa
data4 0xf3f4f407,0xf3f4f407, 0xcfeaea25,0xcfeaea25
data4 0xca6565af,0xca6565af, 0xf47a7a8e,0xf47a7a8e
data4 0x47aeaee9,0x47aeaee9, 0x10080818,0x10080818
data4 0x6fbabad5,0x6fbabad5, 0xf0787888,0xf0787888
data4 0x4a25256f,0x4a25256f, 0x5c2e2e72,0x5c2e2e72
data4 0x381c1c24,0x381c1c24, 0x57a6a6f1,0x57a6a6f1
data4 0x73b4b4c7,0x73b4b4c7, 0x97c6c651,0x97c6c651
data4 0xcbe8e823,0xcbe8e823, 0xa1dddd7c,0xa1dddd7c
data4 0xe874749c,0xe874749c, 0x3e1f1f21,0x3e1f1f21
data4 0x964b4bdd,0x964b4bdd, 0x61bdbddc,0x61bdbddc
data4 0x0d8b8b86,0x0d8b8b86, 0x0f8a8a85,0x0f8a8a85
data4 0xe0707090,0xe0707090, 0x7c3e3e42,0x7c3e3e42
data4 0x71b5b5c4,0x71b5b5c4, 0xcc6666aa,0xcc6666aa
data4 0x904848d8,0x904848d8, 0x06030305,0x06030305
data4 0xf7f6f601,0xf7f6f601, 0x1c0e0e12,0x1c0e0e12
data4 0xc26161a3,0xc26161a3, 0x6a35355f,0x6a35355f
data4 0xae5757f9,0xae5757f9, 0x69b9b9d0,0x69b9b9d0
data4 0x17868691,0x17868691, 0x99c1c158,0x99c1c158
data4 0x3a1d1d27,0x3a1d1d27, 0x279e9eb9,0x279e9eb9
data4 0xd9e1e138,0xd9e1e138, 0xebf8f813,0xebf8f813
data4 0x2b9898b3,0x2b9898b3, 0x22111133,0x22111133
data4 0xd26969bb,0xd26969bb, 0xa9d9d970,0xa9d9d970
data4 0x078e8e89,0x078e8e89, 0x339494a7,0x339494a7
data4 0x2d9b9bb6,0x2d9b9bb6, 0x3c1e1e22,0x3c1e1e22
data4 0x15878792,0x15878792, 0xc9e9e920,0xc9e9e920
data4 0x87cece49,0x87cece49, 0xaa5555ff,0xaa5555ff
data4 0x50282878,0x50282878, 0xa5dfdf7a,0xa5dfdf7a
data4 0x038c8c8f,0x038c8c8f, 0x59a1a1f8,0x59a1a1f8
data4 0x09898980,0x09898980, 0x1a0d0d17,0x1a0d0d17
data4 0x65bfbfda,0x65bfbfda, 0xd7e6e631,0xd7e6e631
data4 0x844242c6,0x844242c6, 0xd06868b8,0xd06868b8
data4 0x824141c3,0x824141c3, 0x299999b0,0x299999b0
data4 0x5a2d2d77,0x5a2d2d77, 0x1e0f0f11,0x1e0f0f11
data4 0x7bb0b0cb,0x7bb0b0cb, 0xa85454fc,0xa85454fc
data4 0x6dbbbbd6,0x6dbbbbd6, 0x2c16163a,0x2c16163a
// Te4:
data1 0x63, 0x7c, 0x77, 0x7b, 0xf2, 0x6b, 0x6f, 0xc5
data1 0x30, 0x01, 0x67, 0x2b, 0xfe, 0xd7, 0xab, 0x76
data1 0xca, 0x82, 0xc9, 0x7d, 0xfa, 0x59, 0x47, 0xf0
data1 0xad, 0xd4, 0xa2, 0xaf, 0x9c, 0xa4, 0x72, 0xc0
data1 0xb7, 0xfd, 0x93, 0x26, 0x36, 0x3f, 0xf7, 0xcc
data1 0x34, 0xa5, 0xe5, 0xf1, 0x71, 0xd8, 0x31, 0x15
data1 0x04, 0xc7, 0x23, 0xc3, 0x18, 0x96, 0x05, 0x9a
data1 0x07, 0x12, 0x80, 0xe2, 0xeb, 0x27, 0xb2, 0x75
data1 0x09, 0x83, 0x2c, 0x1a, 0x1b, 0x6e, 0x5a, 0xa0
data1 0x52, 0x3b, 0xd6, 0xb3, 0x29, 0xe3, 0x2f, 0x84
data1 0x53, 0xd1, 0x00, 0xed, 0x20, 0xfc, 0xb1, 0x5b
data1 0x6a, 0xcb, 0xbe, 0x39, 0x4a, 0x4c, 0x58, 0xcf
data1 0xd0, 0xef, 0xaa, 0xfb, 0x43, 0x4d, 0x33, 0x85
data1 0x45, 0xf9, 0x02, 0x7f, 0x50, 0x3c, 0x9f, 0xa8
data1 0x51, 0xa3, 0x40, 0x8f, 0x92, 0x9d, 0x38, 0xf5
data1 0xbc, 0xb6, 0xda, 0x21, 0x10, 0xff, 0xf3, 0xd2
data1 0xcd, 0x0c, 0x13, 0xec, 0x5f, 0x97, 0x44, 0x17
data1 0xc4, 0xa7, 0x7e, 0x3d, 0x64, 0x5d, 0x19, 0x73
data1 0x60, 0x81, 0x4f, 0xdc, 0x22, 0x2a, 0x90, 0x88
data1 0x46, 0xee, 0xb8, 0x14, 0xde, 0x5e, 0x0b, 0xdb
data1 0xe0, 0x32, 0x3a, 0x0a, 0x49, 0x06, 0x24, 0x5c
data1 0xc2, 0xd3, 0xac, 0x62, 0x91, 0x95, 0xe4, 0x79
data1 0xe7, 0xc8, 0x37, 0x6d, 0x8d, 0xd5, 0x4e, 0xa9
data1 0x6c, 0x56, 0xf4, 0xea, 0x65, 0x7a, 0xae, 0x08
data1 0xba, 0x78, 0x25, 0x2e, 0x1c, 0xa6, 0xb4, 0xc6
data1 0xe8, 0xdd, 0x74, 0x1f, 0x4b, 0xbd, 0x8b, 0x8a
data1 0x70, 0x3e, 0xb5, 0x66, 0x48, 0x03, 0xf6, 0x0e
data1 0x61, 0x35, 0x57, 0xb9, 0x86, 0xc1, 0x1d, 0x9e
data1 0xe1, 0xf8, 0x98, 0x11, 0x69, 0xd9, 0x8e, 0x94
data1 0x9b, 0x1e, 0x87, 0xe9, 0xce, 0x55, 0x28, 0xdf
data1 0x8c, 0xa1, 0x89, 0x0d, 0xbf, 0xe6, 0x42, 0x68
data1 0x41, 0x99, 0x2d, 0x0f, 0xb0, 0x54, 0xbb, 0x16
.size AES_Te#,2048+256 // HP-UX assembler fails to ".-AES_Te#"
.align 64
.global AES_Td#
.type AES_Td#,@object
AES_Td: data4 0x51f4a750,0x51f4a750, 0x7e416553,0x7e416553
data4 0x1a17a4c3,0x1a17a4c3, 0x3a275e96,0x3a275e96
data4 0x3bab6bcb,0x3bab6bcb, 0x1f9d45f1,0x1f9d45f1
data4 0xacfa58ab,0xacfa58ab, 0x4be30393,0x4be30393
data4 0x2030fa55,0x2030fa55, 0xad766df6,0xad766df6
data4 0x88cc7691,0x88cc7691, 0xf5024c25,0xf5024c25
data4 0x4fe5d7fc,0x4fe5d7fc, 0xc52acbd7,0xc52acbd7
data4 0x26354480,0x26354480, 0xb562a38f,0xb562a38f
data4 0xdeb15a49,0xdeb15a49, 0x25ba1b67,0x25ba1b67
data4 0x45ea0e98,0x45ea0e98, 0x5dfec0e1,0x5dfec0e1
data4 0xc32f7502,0xc32f7502, 0x814cf012,0x814cf012
data4 0x8d4697a3,0x8d4697a3, 0x6bd3f9c6,0x6bd3f9c6
data4 0x038f5fe7,0x038f5fe7, 0x15929c95,0x15929c95
data4 0xbf6d7aeb,0xbf6d7aeb, 0x955259da,0x955259da
data4 0xd4be832d,0xd4be832d, 0x587421d3,0x587421d3
data4 0x49e06929,0x49e06929, 0x8ec9c844,0x8ec9c844
data4 0x75c2896a,0x75c2896a, 0xf48e7978,0xf48e7978
data4 0x99583e6b,0x99583e6b, 0x27b971dd,0x27b971dd
data4 0xbee14fb6,0xbee14fb6, 0xf088ad17,0xf088ad17
data4 0xc920ac66,0xc920ac66, 0x7dce3ab4,0x7dce3ab4
data4 0x63df4a18,0x63df4a18, 0xe51a3182,0xe51a3182
data4 0x97513360,0x97513360, 0x62537f45,0x62537f45
data4 0xb16477e0,0xb16477e0, 0xbb6bae84,0xbb6bae84
data4 0xfe81a01c,0xfe81a01c, 0xf9082b94,0xf9082b94
data4 0x70486858,0x70486858, 0x8f45fd19,0x8f45fd19
data4 0x94de6c87,0x94de6c87, 0x527bf8b7,0x527bf8b7
data4 0xab73d323,0xab73d323, 0x724b02e2,0x724b02e2
data4 0xe31f8f57,0xe31f8f57, 0x6655ab2a,0x6655ab2a
data4 0xb2eb2807,0xb2eb2807, 0x2fb5c203,0x2fb5c203
data4 0x86c57b9a,0x86c57b9a, 0xd33708a5,0xd33708a5
data4 0x302887f2,0x302887f2, 0x23bfa5b2,0x23bfa5b2
data4 0x02036aba,0x02036aba, 0xed16825c,0xed16825c
data4 0x8acf1c2b,0x8acf1c2b, 0xa779b492,0xa779b492
data4 0xf307f2f0,0xf307f2f0, 0x4e69e2a1,0x4e69e2a1
data4 0x65daf4cd,0x65daf4cd, 0x0605bed5,0x0605bed5
data4 0xd134621f,0xd134621f, 0xc4a6fe8a,0xc4a6fe8a
data4 0x342e539d,0x342e539d, 0xa2f355a0,0xa2f355a0
data4 0x058ae132,0x058ae132, 0xa4f6eb75,0xa4f6eb75
data4 0x0b83ec39,0x0b83ec39, 0x4060efaa,0x4060efaa
data4 0x5e719f06,0x5e719f06, 0xbd6e1051,0xbd6e1051
data4 0x3e218af9,0x3e218af9, 0x96dd063d,0x96dd063d
data4 0xdd3e05ae,0xdd3e05ae, 0x4de6bd46,0x4de6bd46
data4 0x91548db5,0x91548db5, 0x71c45d05,0x71c45d05
data4 0x0406d46f,0x0406d46f, 0x605015ff,0x605015ff
data4 0x1998fb24,0x1998fb24, 0xd6bde997,0xd6bde997
data4 0x894043cc,0x894043cc, 0x67d99e77,0x67d99e77
data4 0xb0e842bd,0xb0e842bd, 0x07898b88,0x07898b88
data4 0xe7195b38,0xe7195b38, 0x79c8eedb,0x79c8eedb
data4 0xa17c0a47,0xa17c0a47, 0x7c420fe9,0x7c420fe9
data4 0xf8841ec9,0xf8841ec9, 0x00000000,0x00000000
data4 0x09808683,0x09808683, 0x322bed48,0x322bed48
data4 0x1e1170ac,0x1e1170ac, 0x6c5a724e,0x6c5a724e
data4 0xfd0efffb,0xfd0efffb, 0x0f853856,0x0f853856
data4 0x3daed51e,0x3daed51e, 0x362d3927,0x362d3927
data4 0x0a0fd964,0x0a0fd964, 0x685ca621,0x685ca621
data4 0x9b5b54d1,0x9b5b54d1, 0x24362e3a,0x24362e3a
data4 0x0c0a67b1,0x0c0a67b1, 0x9357e70f,0x9357e70f
data4 0xb4ee96d2,0xb4ee96d2, 0x1b9b919e,0x1b9b919e
data4 0x80c0c54f,0x80c0c54f, 0x61dc20a2,0x61dc20a2
data4 0x5a774b69,0x5a774b69, 0x1c121a16,0x1c121a16
data4 0xe293ba0a,0xe293ba0a, 0xc0a02ae5,0xc0a02ae5
data4 0x3c22e043,0x3c22e043, 0x121b171d,0x121b171d
data4 0x0e090d0b,0x0e090d0b, 0xf28bc7ad,0xf28bc7ad
data4 0x2db6a8b9,0x2db6a8b9, 0x141ea9c8,0x141ea9c8
data4 0x57f11985,0x57f11985, 0xaf75074c,0xaf75074c
data4 0xee99ddbb,0xee99ddbb, 0xa37f60fd,0xa37f60fd
data4 0xf701269f,0xf701269f, 0x5c72f5bc,0x5c72f5bc
data4 0x44663bc5,0x44663bc5, 0x5bfb7e34,0x5bfb7e34
data4 0x8b432976,0x8b432976, 0xcb23c6dc,0xcb23c6dc
data4 0xb6edfc68,0xb6edfc68, 0xb8e4f163,0xb8e4f163
data4 0xd731dcca,0xd731dcca, 0x42638510,0x42638510
data4 0x13972240,0x13972240, 0x84c61120,0x84c61120
data4 0x854a247d,0x854a247d, 0xd2bb3df8,0xd2bb3df8
data4 0xaef93211,0xaef93211, 0xc729a16d,0xc729a16d
data4 0x1d9e2f4b,0x1d9e2f4b, 0xdcb230f3,0xdcb230f3
data4 0x0d8652ec,0x0d8652ec, 0x77c1e3d0,0x77c1e3d0
data4 0x2bb3166c,0x2bb3166c, 0xa970b999,0xa970b999
data4 0x119448fa,0x119448fa, 0x47e96422,0x47e96422
data4 0xa8fc8cc4,0xa8fc8cc4, 0xa0f03f1a,0xa0f03f1a
data4 0x567d2cd8,0x567d2cd8, 0x223390ef,0x223390ef
data4 0x87494ec7,0x87494ec7, 0xd938d1c1,0xd938d1c1
data4 0x8ccaa2fe,0x8ccaa2fe, 0x98d40b36,0x98d40b36
data4 0xa6f581cf,0xa6f581cf, 0xa57ade28,0xa57ade28
data4 0xdab78e26,0xdab78e26, 0x3fadbfa4,0x3fadbfa4
data4 0x2c3a9de4,0x2c3a9de4, 0x5078920d,0x5078920d
data4 0x6a5fcc9b,0x6a5fcc9b, 0x547e4662,0x547e4662
data4 0xf68d13c2,0xf68d13c2, 0x90d8b8e8,0x90d8b8e8
data4 0x2e39f75e,0x2e39f75e, 0x82c3aff5,0x82c3aff5
data4 0x9f5d80be,0x9f5d80be, 0x69d0937c,0x69d0937c
data4 0x6fd52da9,0x6fd52da9, 0xcf2512b3,0xcf2512b3
data4 0xc8ac993b,0xc8ac993b, 0x10187da7,0x10187da7
data4 0xe89c636e,0xe89c636e, 0xdb3bbb7b,0xdb3bbb7b
data4 0xcd267809,0xcd267809, 0x6e5918f4,0x6e5918f4
data4 0xec9ab701,0xec9ab701, 0x834f9aa8,0x834f9aa8
data4 0xe6956e65,0xe6956e65, 0xaaffe67e,0xaaffe67e
data4 0x21bccf08,0x21bccf08, 0xef15e8e6,0xef15e8e6
data4 0xbae79bd9,0xbae79bd9, 0x4a6f36ce,0x4a6f36ce
data4 0xea9f09d4,0xea9f09d4, 0x29b07cd6,0x29b07cd6
data4 0x31a4b2af,0x31a4b2af, 0x2a3f2331,0x2a3f2331
data4 0xc6a59430,0xc6a59430, 0x35a266c0,0x35a266c0
data4 0x744ebc37,0x744ebc37, 0xfc82caa6,0xfc82caa6
data4 0xe090d0b0,0xe090d0b0, 0x33a7d815,0x33a7d815
data4 0xf104984a,0xf104984a, 0x41ecdaf7,0x41ecdaf7
data4 0x7fcd500e,0x7fcd500e, 0x1791f62f,0x1791f62f
data4 0x764dd68d,0x764dd68d, 0x43efb04d,0x43efb04d
data4 0xccaa4d54,0xccaa4d54, 0xe49604df,0xe49604df
data4 0x9ed1b5e3,0x9ed1b5e3, 0x4c6a881b,0x4c6a881b
data4 0xc12c1fb8,0xc12c1fb8, 0x4665517f,0x4665517f
data4 0x9d5eea04,0x9d5eea04, 0x018c355d,0x018c355d
data4 0xfa877473,0xfa877473, 0xfb0b412e,0xfb0b412e
data4 0xb3671d5a,0xb3671d5a, 0x92dbd252,0x92dbd252
data4 0xe9105633,0xe9105633, 0x6dd64713,0x6dd64713
data4 0x9ad7618c,0x9ad7618c, 0x37a10c7a,0x37a10c7a
data4 0x59f8148e,0x59f8148e, 0xeb133c89,0xeb133c89
data4 0xcea927ee,0xcea927ee, 0xb761c935,0xb761c935
data4 0xe11ce5ed,0xe11ce5ed, 0x7a47b13c,0x7a47b13c
data4 0x9cd2df59,0x9cd2df59, 0x55f2733f,0x55f2733f
data4 0x1814ce79,0x1814ce79, 0x73c737bf,0x73c737bf
data4 0x53f7cdea,0x53f7cdea, 0x5ffdaa5b,0x5ffdaa5b
data4 0xdf3d6f14,0xdf3d6f14, 0x7844db86,0x7844db86
data4 0xcaaff381,0xcaaff381, 0xb968c43e,0xb968c43e
data4 0x3824342c,0x3824342c, 0xc2a3405f,0xc2a3405f
data4 0x161dc372,0x161dc372, 0xbce2250c,0xbce2250c
data4 0x283c498b,0x283c498b, 0xff0d9541,0xff0d9541
data4 0x39a80171,0x39a80171, 0x080cb3de,0x080cb3de
data4 0xd8b4e49c,0xd8b4e49c, 0x6456c190,0x6456c190
data4 0x7bcb8461,0x7bcb8461, 0xd532b670,0xd532b670
data4 0x486c5c74,0x486c5c74, 0xd0b85742,0xd0b85742
// Td4:
data1 0x52, 0x09, 0x6a, 0xd5, 0x30, 0x36, 0xa5, 0x38
data1 0xbf, 0x40, 0xa3, 0x9e, 0x81, 0xf3, 0xd7, 0xfb
data1 0x7c, 0xe3, 0x39, 0x82, 0x9b, 0x2f, 0xff, 0x87
data1 0x34, 0x8e, 0x43, 0x44, 0xc4, 0xde, 0xe9, 0xcb
data1 0x54, 0x7b, 0x94, 0x32, 0xa6, 0xc2, 0x23, 0x3d
data1 0xee, 0x4c, 0x95, 0x0b, 0x42, 0xfa, 0xc3, 0x4e
data1 0x08, 0x2e, 0xa1, 0x66, 0x28, 0xd9, 0x24, 0xb2
data1 0x76, 0x5b, 0xa2, 0x49, 0x6d, 0x8b, 0xd1, 0x25
data1 0x72, 0xf8, 0xf6, 0x64, 0x86, 0x68, 0x98, 0x16
data1 0xd4, 0xa4, 0x5c, 0xcc, 0x5d, 0x65, 0xb6, 0x92
data1 0x6c, 0x70, 0x48, 0x50, 0xfd, 0xed, 0xb9, 0xda
data1 0x5e, 0x15, 0x46, 0x57, 0xa7, 0x8d, 0x9d, 0x84
data1 0x90, 0xd8, 0xab, 0x00, 0x8c, 0xbc, 0xd3, 0x0a
data1 0xf7, 0xe4, 0x58, 0x05, 0xb8, 0xb3, 0x45, 0x06
data1 0xd0, 0x2c, 0x1e, 0x8f, 0xca, 0x3f, 0x0f, 0x02
data1 0xc1, 0xaf, 0xbd, 0x03, 0x01, 0x13, 0x8a, 0x6b
data1 0x3a, 0x91, 0x11, 0x41, 0x4f, 0x67, 0xdc, 0xea
data1 0x97, 0xf2, 0xcf, 0xce, 0xf0, 0xb4, 0xe6, 0x73
data1 0x96, 0xac, 0x74, 0x22, 0xe7, 0xad, 0x35, 0x85
data1 0xe2, 0xf9, 0x37, 0xe8, 0x1c, 0x75, 0xdf, 0x6e
data1 0x47, 0xf1, 0x1a, 0x71, 0x1d, 0x29, 0xc5, 0x89
data1 0x6f, 0xb7, 0x62, 0x0e, 0xaa, 0x18, 0xbe, 0x1b
data1 0xfc, 0x56, 0x3e, 0x4b, 0xc6, 0xd2, 0x79, 0x20
data1 0x9a, 0xdb, 0xc0, 0xfe, 0x78, 0xcd, 0x5a, 0xf4
data1 0x1f, 0xdd, 0xa8, 0x33, 0x88, 0x07, 0xc7, 0x31
data1 0xb1, 0x12, 0x10, 0x59, 0x27, 0x80, 0xec, 0x5f
data1 0x60, 0x51, 0x7f, 0xa9, 0x19, 0xb5, 0x4a, 0x0d
data1 0x2d, 0xe5, 0x7a, 0x9f, 0x93, 0xc9, 0x9c, 0xef
data1 0xa0, 0xe0, 0x3b, 0x4d, 0xae, 0x2a, 0xf5, 0xb0
data1 0xc8, 0xeb, 0xbb, 0x3c, 0x83, 0x53, 0x99, 0x61
data1 0x17, 0x2b, 0x04, 0x7e, 0xba, 0x77, 0xd6, 0x26
data1 0xe1, 0x69, 0x14, 0x63, 0x55, 0x21, 0x0c, 0x7d
.size AES_Td#,2048+256 // HP-UX assembler fails to ".-AES_Td#"
|
al3xtjames/Clover
| 17,199
|
OsxAptioFixDrv/X64/AsmFuncsX64.S
|
#------------------------------------------------------------------------------
#
# Some assembler helper functions plus boot.efi kernel jump callback
#
# by dmazar
#
#------------------------------------------------------------------------------
# C callback method called on jump to kernel after boot.efi finishes
#.extern KernelEntryPatchJumpBack
# saved 64bit state
ASM_GLOBAL ASM_PFX(SavedCR3)
ASM_GLOBAL ASM_PFX(SavedGDTR)
ASM_GLOBAL ASM_PFX(SavedIDTR)
# addresses of relocated MyAsmCopyAndJumpToKernel code - filled by PrepareJumpFromKernel()
ASM_GLOBAL ASM_PFX(MyAsmCopyAndJumpToKernel32Addr)
ASM_GLOBAL ASM_PFX(MyAsmCopyAndJumpToKernel64Addr)
# kernel entry address - filled by KernelEntryPatchJump()
ASM_GLOBAL ASM_PFX(AsmKernelEntry)
# params for kernel image relocation - filled by KernelEntryPatchJumpBack()
ASM_GLOBAL ASM_PFX(AsmKernelImageStartReloc)
ASM_GLOBAL ASM_PFX(AsmKernelImageStart)
ASM_GLOBAL ASM_PFX(AsmKernelImageSize)
.data
# variables accessed from both 32 and 64 bit code
# need to have this exactly in this order
DataBase:
# 64 bit state
SavedGDTROff = . - DataBase
ASM_PFX(SavedGDTR): .word 0
.quad 0
SavedIDTROff = . - DataBase
ASM_PFX(SavedIDTR): .word 0
.quad 0
.p2align 3
SavedCR3Off = . - DataBase
ASM_PFX(SavedCR3): .quad 0
SavedCSOff = . - DataBase
SavedCS: .word 0
SavedDSOff = . - DataBase
SavedDS: .word 0
# 32 bit state
SavedGDTR32Off = . - DataBase
SavedGDTR32: .word 0
.quad 0 # 32 bit is W, L, but not sure about 32/64 bit ldgt/sdgt
SavedIDTR32Off = . - DataBase
SavedIDTR32: .word 0
.quad 0
SavedCS32Off = . - DataBase
SavedCS32: .word 0
SavedDS32Off = . - DataBase
SavedDS32: .word 0
SavedESP32Off = . - DataBase
SavedESP32: .long 0
.p2align 3
# address of relocated MyAsmCopyAndJumpToKernel32 - 64 bit
MyAsmCopyAndJumpToKernel32AddrOff = . - DataBase
ASM_PFX(MyAsmCopyAndJumpToKernel32Addr): .quad 0
# address of relocated MyAsmCopyAndJumpToKernel64 - 64 bit
MyAsmCopyAndJumpToKernel64AddrOff = . - DataBase
ASM_PFX(MyAsmCopyAndJumpToKernel64Addr): .quad 0
# kernel entry - 64 bit
AsmKernelEntryOff = . - DataBase
ASM_PFX(AsmKernelEntry): .quad 0
#
# for copying kernel image from reloc block to proper mem place
#
# kernel image start in reloc block (source) - 64 bit
AsmKernelImageStartRelocOff = . - DataBase
ASM_PFX(AsmKernelImageStartReloc): .quad 0
# kernel image start (destination) - 64 bit
AsmKernelImageStartOff = . - DataBase
ASM_PFX(AsmKernelImageStart): .quad 0
# kernel image size - 64 bit
AsmKernelImageSizeOff = . - DataBase
ASM_PFX(AsmKernelImageSize): .quad 0
.p2align 3
# GDT not used since we are reusing UEFI state
# but left here in case will be needed.
#
# GDR record
GDTROff = . - DataBase
GDTR: .word L_GDT_LEN # GDT limit
GDTR_BASE: .quad 0 # GDT base - needs to be set in code
.p2align 3
# GDT table
GDT_BASE:
# null descriptor
NULL_SEL = . - GDT_BASE # 0x00
.word 0 # limit 15:0
.word 0 # base 15:0
.byte 0 # base 23:16
.byte 0 # type
.byte 0 # limit 19:16, flags
.byte 0 # base 31:24
# 64 bit code segment descriptor
CODE64_SEL = . - GDT_BASE # 0x08
.word 0xFFFF # limit 0xFFFFF
.word 0 # base 0
.byte 0
.byte 0x9A # P=1 | DPL=00 | S=1 (User) # Type=A=1010: Code/Data=1 | C:Conforming=0 | R:Readable=1 | A:Accessed=0
.byte 0xAF # Flags=A=1010: G:Granularity=1 (4K) | D:Default Operand Size=0 (in long mode) | L:Long=1 (64 bit) | AVL=0
.byte 0
# 32 bit and 64 bit data segment descriptor (in 64 bit almost all is ignored, so can be reused)
DATA_SEL = . - GDT_BASE # 0x10
.word 0xFFFF # limit 0xFFFFF
.word 0 # base 0
.byte 0
.byte 0x92 # P=1 | DPL=00 | S=1 (User) # Type=2=0010: Code/Data=0 | E:Expand-Down=0 | W:Writable=1 | A:Accessed=0
.byte 0xCF # Flags=C=1100: G:Granularity=1 (4K) | D/B=1 D not used when E=0, for stack B=1 means 32 bit stack | L:Long=0 not used | AVL=0
.byte 0
# 32 bit code segment descriptor
CODE32_SEL = . - GDT_BASE # 0x18
.word 0xFFFF # limit 0xFFFFF
.word 0 # base 0
.byte 0
.byte 0x9A # P=1 | DPL=00 | S=1 (User) # Type=A=1010: Code/Data=1 | C:Conforming=0 | R:Readable=1 | A:Accessed=0
.byte 0xCF # Flags=C=1100: G:Granularity=1 (4K) | D:Default Operand Size=0 (in long mode) | L:Long=0 (32 bit) | AVL=0
.byte 0
GDT_END:
L_GDT_LEN = . - GDT_BASE - 1
.text
.code64
#------------------------------------------------------------------------------
# UINT64
# EFIAPI
# MyAsmReadSp (
# VOID
# );
#------------------------------------------------------------------------------
ASM_GLOBAL ASM_PFX(MyAsmReadSp)
ASM_PFX(MyAsmReadSp):
movq %rsp, %rax
add $8, %rax # return SP as caller see it
ret
#------------------------------------------------------------------------------
# VOID
# EFIAPI
# MyAsmPrepareJumpFromKernel (
# );
#------------------------------------------------------------------------------
ASM_GLOBAL ASM_PFX(MyAsmPrepareJumpFromKernel)
ASM_PFX(MyAsmPrepareJumpFromKernel):
# save 64 bit state
sgdt ASM_PFX(SavedGDTR)(%rip)
sidt ASM_PFX(SavedIDTR)(%rip)
movq %cr3, %rax
movq %rax, ASM_PFX(SavedCR3)(%rip)
mov %cs, SavedCS(%rip)
mov %ds, SavedDS(%rip)
# pass DataBase to 32 bit code
lea DataBase(%rip), %rax
movl %eax, DataBaseAdr(%rip)
# prepare MyAsmEntryPatchCode:
# patch MyAsmEntryPatchCode with address of MyAsmJumpFromKernel
lea ASM_PFX(MyAsmJumpFromKernel)(%rip), %rax
movl %eax, MyAsmEntryPatchCodeJumpFromKernelPlaceholder(%rip)
ret
#------------------------------------------------------------------------------
# Code that is used for patching kernel entry to jump back
# to our code (to MyAsmJumpFromKernel):
# - load ecx (rcx) with address to MyAsmJumpFromKernel
# - jump to MyAsmJumpFromKernel
# The same generated opcode must run properly in both 32 and 64 bit.
# 64 bit:
# - we must set rcx to 0 (upper 4 bytes) before loading ecx with address (lower 4 bytes of rcx)
# - this requires xor %rcx, %rcx
# - and that opcode contains 0x48 in front of 32 bit xor %ecx, %ecx
# 32 bit:
# - 0x48 opcode is dec %eax in 32 bit
# - and then we must inc %eax later if 32 bit is detected in MyAsmJumpFromKernel
#
# This code is patched with address of MyAsmJumpFromKernel
# (into MyAsmEntryPatchCodeJumpFromKernelPlaceholder)
# and then copied to kernel entry address by KernelEntryPatchJump()
#------------------------------------------------------------------------------
ASM_GLOBAL ASM_PFX(MyAsmEntryPatchCode)
ASM_PFX(MyAsmEntryPatchCode):
.code32
dec %eax # -> 48
xor %ecx, %ecx # -> 31 C9
.byte 0xb9 # movl $0x11223344, %ecx -> B9 44 33 22 11
MyAsmEntryPatchCodeJumpFromKernelPlaceholder:
.long 0x11223344
call *%ecx # -> FF D1
# jmp *%ecx # -> FF E1
# .code64
# xor %rcx, %rcx # -> 48 31 C9
# movl $0x11223344, %ecx # -> B9 44 33 22 11
# call *%rcx # -> FF D1
# #jmp *%rcx # -> FF E1
ASM_GLOBAL ASM_PFX(MyAsmEntryPatchCodeEnd)
ASM_PFX(MyAsmEntryPatchCodeEnd):
#------------------------------------------------------------------------------
# MyAsmJumpFromKernel
#
# Callback from boot.efi - this is where we jump when boot.efi jumps to kernel.
#
# - test if we are in 32 bit or in 64 bit
# - if 64 bit, then jump to MyAsmJumpFromKernel64
# - else just continue with MyAsmJumpFromKernel32
#------------------------------------------------------------------------------
ASM_GLOBAL ASM_PFX(MyAsmJumpFromKernel)
ASM_PFX(MyAsmJumpFromKernel):
# writing in 32 bit, but code must run in 64 bit also
.code32
push %eax # save bootArgs pointer to stack
movl $0xc0000080, %ecx # EFER MSR number.
rdmsr # Read EFER.
bt $8, %eax # Check if LME==1 -> CF=1.
pop %eax
jc MyAsmJumpFromKernel64 # LME==1 -> jump to 64 bit code
# otherwise, continue with MyAsmJumpFromKernel32
# but first add 1 to it since it was decremented in 32 bit
# in MyAsmEntryPatchCode
inc %eax
# test the above code in 64 bit - above 32 bit code gives opcode
# that is equivalent to following in 64 bit
#.code64
# push %rax # save bootArgs pointer to stack
# movl $0xc0000080, %ecx # EFER MSR number.
# rdmsr # Read EFER.
# bt $8, %eax # Check if LME==1 -> CF=1.
# pop %rax
# jnc MyAsmJumpFromKernel64 # LME==1 -> jump to 64 bit code
#------------------------------------------------------------------------------
# MyAsmJumpFromKernel32
#
# Callback from boot.efi in 32 bit mode.
# State is prepared for kernel: 32 bit, no paging, pointer to bootArgs in eax.
#------------------------------------------------------------------------------
MyAsmJumpFromKernel32:
.code32
# save bootArgs pointer to edi
mov %eax, %edi
# load ebx with DataBase - we'll access our saved data with it
.byte 0xBB # mov ebx, OFFSET DataBase
DataBaseAdr: .long 0
# let's find out kernel entry point - we'll need it to jump back.
# we are called with
# dec %eax
# xor %ecx, %ecx
# mov ecx, 0x11223344
# call ecx
# and that left return addr on stack. those instructions
# are 10 bytes long, and if we take address from stack and
# substitute 10 from it, we will get kernel entry point.
pop %ecx
sub $10, %ecx
# and save it
movl %ecx, AsmKernelEntryOff(%ebx)
# lets save 32 bit state to be able to recover it later
sgdt SavedGDTR32Off(%ebx)
sidt SavedIDTR32Off(%ebx)
mov %cs, SavedCS32Off(%ebx)
mov %ds, SavedDS32Off(%ebx)
movl %esp, SavedESP32Off(%ebx)
#
# move to 64 bit mode ...
#
# FIXME: all this with interrupts enabled? no-no
# load saved UEFI GDT, IDT
# will become active after code segment is changed in long jump
lgdt SavedGDTROff(%ebx)
lidt SavedIDTROff(%ebx)
# enable the 64-bit page-translation-table entries by setting CR4.PAE=1
movl %cr4, %eax
bts $5, %eax
movl %eax, %cr4
# set the long-mode page tables - reuse saved UEFI tables
movl SavedCR3Off(%ebx), %eax
movl %eax, %cr3
# enable long mode (set EFER.LME=1).
movl $0xc0000080, %ecx # EFER MSR number.
rdmsr # Read EFER.
bts $8, %eax # Set LME=1.
wrmsr # Write EFER.
# enable paging to activate long mode (set CR0.PG=1)
movl %cr0, %eax # Read CR0.
bts $31, %eax # Set PG=1.
movl %eax, %cr0 # Write CR0.
# jump to the 64-bit code segment
movw SavedCSOff(%ebx), %ax
push %eax
call _RETF32
#
# aloha!
# - if there is any luck, we are in 64 bit mode now
#
.code64
#hlt # uncomment to stop here for test
# set segments
movw SavedDSOff(%rbx), %ax
movl %eax, %ds
# set up stack ...
# not sure if needed, but lets set ss to ds
movl %eax, %ss # disables interrupts for 1 instruction to load rsp
# lets align the stack
# movq %rsp, %rax
# andq $0xfffffffffffffff0, %rax
# movq %rax, %rsp
andq $0xfffffffffffffff0, %rsp
# call our C code
# (calling conv.: always reserve place for 4 args on stack)
# KernelEntryPatchJumpBack (rcx = rax = bootArgs, rdx = 0 = 32 bit kernel jump)
movq %rdi, %rcx
xor %rdx, %rdx
push %rdx
push %rdx
push %rdx
push %rcx
# TEST 64 bit jump
# movq %rdi, %rax
# movq ASM_PFX(AsmKernelEntry)(%rip), %rdx
# jmp *%rdx
# TEST end
# KernelEntryPatchJumpBack should be EFIAPI
# and rbx should not be changed by EFIAPI calling convention
call ASM_PFX(KernelEntryPatchJumpBack)
#hlt # uncomment to stop here for test
# return value in rax is bootArgs pointer
mov %rax, %rdi
#
# time to go back to 32 bit
#
# FIXME: all this with interrupts enabled? no-no
# load saved 32 bit gdtr
lgdt SavedGDTR32Off(%rbx)
# push saved cs and rip (with call) to stack and do retf
movw SavedCS32Off(%rbx), %ax
push %rax
call _RETF64
#
# ok, 32 bit opcode again from here
#
.code32
# disable paging (set CR0.PG=0)
movl %cr0, %eax # Read CR0.
btr $31, %eax # Set PG=0.
movl %eax, %cr0 # Write CR0.
# disable long mode (set EFER.LME=0).
movl $0xc0000080, %ecx # EFER MSR number.
rdmsr # Read EFER.
btr $8, %eax # Set LME=0.
wrmsr # Write EFER.
jmp toNext
toNext:
#
# we are in 32 bit protected mode, no paging
#
# now reload saved 32 bit state data
lidt SavedIDTR32Off(%ebx)
movw SavedDS32Off(%ebx), %ax
movl %eax, %ds
movl %eax, %es
movl %eax, %fs
movl %eax, %gs
movl %eax, %ss # disables interrupts for 1 instruction to load esp
movl SavedESP32Off(%ebx), %esp
#
# prepare vars for copying kernel to proper mem
# and jump to kernel: set registers as needed
# by MyAsmCopyAndJumpToKernel32
#
# boot args back from edi
movl %edi, %eax
# kernel entry point
movl AsmKernelEntryOff(%ebx), %edx
# source, destination and size for kernel copy
movl AsmKernelImageStartRelocOff(%ebx), %esi
movl AsmKernelImageStartOff(%ebx), %edi
movl AsmKernelImageSizeOff(%ebx), %ecx
# address of relocated MyAsmCopyAndJumpToKernel32
movl MyAsmCopyAndJumpToKernel32AddrOff(%ebx), %ebx
# note: ebx not valid as a pointer to DataBase any more
#
# jump to MyAsmCopyAndJumpToKernel32
#
jmp *%ebx
_RETF64:
.byte 0x48
_RETF32:
lret
#------------------------------------------------------------------------------
# MyAsmJumpFromKernel64
#
# Callback from boot.efi in 64 bit mode.
# State is prepared for kernel: 64 bit, pointer to bootArgs in rax.
#------------------------------------------------------------------------------
MyAsmJumpFromKernel64:
.code64
# let's find out kernel entry point - we'll need it to jump back.
pop %rcx
sub $10, %rcx
# and save it
movq %rcx, ASM_PFX(AsmKernelEntry)(%rip)
# call our C code
# (calling conv.: always reserve place for 4 args on stack)
# KernelEntryPatchJumpBack (rcx = rax = bootArgs, rdx = 1 = 64 bit kernel jump)
movq %rax, %rcx
xor %rdx, %rdx
inc %edx
push %rdx
push %rdx
push %rdx
push %rcx
# KernelEntryPatchJumpBack should be EFIAPI
call ASM_PFX(KernelEntryPatchJumpBack)
#hlt # uncomment to stop here for test
# return value in rax is bootArgs pointer
#
# prepare vars for copying kernel to proper mem
# and jump to kernel: set registers as needed
# by MyAsmCopyAndJumpToKernel64
#
# kernel entry point
movq ASM_PFX(AsmKernelEntry)(%rip), %rdx
# source, destination and size for kernel copy
movq ASM_PFX(AsmKernelImageStartReloc)(%rip), %rsi
movq ASM_PFX(AsmKernelImageStart)(%rip), %rdi
movq ASM_PFX(AsmKernelImageSize)(%rip), %rcx
# address of relocated MyAsmCopyAndJumpToKernel64
movq ASM_PFX(MyAsmCopyAndJumpToKernel64Addr)(%rip), %rbx
#
# jump to MyAsmCopyAndJumpToKernel64
#
jmp *%rbx
ret
.p2align 3
#------------------------------------------------------------------------------
# MyAsmCopyAndJumpToKernel
#
# This is the last part of the code - it will copy kernel image from reloc
# block to proper mem place and jump to kernel.
# There are separate versions for 32 and 64 bit.
# This code will be relocated (copied) to higher mem by PrepareJumpFromKernel().
#------------------------------------------------------------------------------
ASM_GLOBAL ASM_PFX(MyAsmCopyAndJumpToKernel)
ASM_PFX(MyAsmCopyAndJumpToKernel):
#------------------------------------------------------------------------------
# MyAsmCopyAndJumpToKernel32
#
# Expects:
# EAX = address of boot args (proper address, not from reloc block)
# EDX = kernel entry point
# ESI = start of kernel image in reloc block (source)
# EDI = proper start of kernel image (destination)
# ECX = kernel image size in bytes
#------------------------------------------------------------------------------
ASM_GLOBAL ASM_PFX(MyAsmCopyAndJumpToKernel32)
ASM_PFX(MyAsmCopyAndJumpToKernel32):
.code32
#
# we will move double words (4 bytes)
# so ajust ECX to number of double words.
# just in case ECX is not multiple of 4 - inc by 1
#
shrl $2, %ecx
incl %ecx
#
# copy kernel image from reloc block to proper mem place.
# all params should be already set:
# ECX = number of double words
# DS:ESI = source
# ES:EDI = destination
#
cld # direction is up
rep movsl
#
# and finally jump to kernel:
# EAX already contains bootArgs pointer,
# and EDX contains kernel entry point
#
jmp *%edx
#------------------------------------------------------------------------------
# MyAsmCopyAndJumpToKernel64
#
# Expects:
# RAX = address of boot args (proper address, not from reloc block)
# RDX = kernel entry point
# RSI = start of kernel image in reloc block (source)
# RDI = proper start of kernel image (destination)
# RCX = kernel image size in bytes
#------------------------------------------------------------------------------
.p2align 3
ASM_GLOBAL ASM_PFX(MyAsmCopyAndJumpToKernel64)
ASM_PFX(MyAsmCopyAndJumpToKernel64):
.code64
#
# we will move quad words (8 bytes)
# so ajust RCX to number of double words.
# just in case RCX is not multiple of 8 - inc by 1
#
shr $3, %rcx
inc %rcx
#
# copy kernel image from reloc block to proper mem place.
# all params should be already set:
# RCX = number of double words
# RSI = source
# RDI = destination
#
cld # direction is up
rep movsq
#
# and finally jump to kernel:
# RAX already contains bootArgs pointer,
# and RDX contains kernel entry point
#
# hlt
jmp *%rdx
ASM_GLOBAL ASM_PFX(MyAsmCopyAndJumpToKernelEnd)
ASM_PFX(MyAsmCopyAndJumpToKernelEnd):
|
al3xtjames/Clover
| 35,744
|
CloverEFI/BootSector/boot1.s
|
; Copyright (c) 1999-2003 Apple Computer, Inc. All rights reserved.
;
; @APPLE_LICENSE_HEADER_START@
;
; Portions Copyright (c) 1999-2003 Apple Computer, Inc. All Rights
; Reserved. This file contains Original Code and/or Modifications of
; Original Code as defined in and that are subject to the Apple Public
; Source License Version 2.0 (the "License"). You may not use this file
; except in compliance with the License. Please obtain a copy of the
; License at http://www.apple.com/publicsource and read it before using
; this file.
;
; The Original Code and all software distributed under the License are
; distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
; EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
; INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
; FITNESS FOR A PARTICULAR PURPOSE OR NON- INFRINGEMENT. Please see the
; License for the specific language governing rights and limitations
; under the License.
;
; @APPLE_LICENSE_HEADER_END@
;
; Partition Boot Loader: boot1h
;
; This program is designed to reside in sector 0+1 of an HFS+ partition.
; It expects that the MBR has left the drive number in DL
; and a pointer to the partition entry in SI.
;
; This version requires a BIOS with EBIOS (LBA) support.
;
; This code is written for the NASM assembler.
; nasm boot1.s -o boot1h
;
; This version of boot1h tries to find a stage2 boot file in the root folder.
;
; NOTE: this is an experimental version with multiple extent support.
;
; Written by Tams Kosrszky on 2008-04-14
;
;
; Set to 1 to enable obscure debug messages.
;
DEBUG EQU 0
;
; Set to 1 to enable unused code.
;
UNUSED EQU 0
;
; Set to 1 to enable verbose mode.
;
VERBOSE EQU 0
;
; Various constants.
;
NULL EQU 0
CR EQU 0x0D
LF EQU 0x0A
mallocStart EQU 0x1000 ; start address of local workspace area
maxSectorCount EQU 64 ; maximum sector count for readSectors
maxNodeSize EQU 16384
kSectorBytes EQU 512 ; sector size in bytes
kBootSignature EQU 0xAA55 ; boot sector signature
kBoot1StackAddress EQU 0xFFF0 ; boot1 stack pointer
kBoot1LoadAddr EQU 0x7C00 ; boot1 load address
kBoot1RelocAddr EQU 0xE000 ; boot1 relocated address
kBoot1Sector1Addr EQU kBoot1RelocAddr + kSectorBytes ; boot1 load address for sector 1
kHFSPlusBuffer EQU kBoot1Sector1Addr + kSectorBytes ; HFS+ Volume Header address
kBoot2Sectors EQU (480 * 1024 - 512) / kSectorBytes ; max size of 'boot' file in sectors = 448 but I want 472
kBoot2Segment EQU 0x2000 ; boot2 load segment
kBoot2Address EQU kSectorBytes ; boot2 load address
;
; Format of fdisk partition entry.
;
; The symbol 'part_size' is automatically defined as an `EQU'
; giving the size of the structure.
;
struc part
.bootid resb 1 ; bootable or not
.head resb 1 ; starting head, sector, cylinder
.sect resb 1 ;
.cyl resb 1 ;
.type resb 1 ; partition type
.endhead resb 1 ; ending head, sector, cylinder
.endsect resb 1 ;
.endcyl resb 1 ;
.lba resd 1 ; starting lba
.sectors resd 1 ; size in sectors
endstruc
;-------------------------------------------------------------------------
; HFS+ related structures and constants
;
kHFSPlusSignature EQU 'H+' ; HFS+ volume signature
kHFSPlusCaseSignature EQU 'HX' ; HFS+ volume case-sensitive signature
kHFSPlusCaseSigX EQU 'X' ; upper byte of HFS+ volume case-sensitive signature
kHFSPlusExtentDensity EQU 8 ; 8 extent descriptors / extent record
;
; HFSUniStr255
;
struc HFSUniStr255
.length resw 1
.unicode resw 255
endstruc
;
; HFSPlusExtentDescriptor
;
struc HFSPlusExtentDescriptor
.startBlock resd 1
.blockCount resd 1
endstruc
;
; HFSPlusForkData
;
struc HFSPlusForkData
.logicalSize resq 1
.clumpSize resd 1
.totalBlocks resd 1
.extents resb kHFSPlusExtentDensity * HFSPlusExtentDescriptor_size
endstruc
;
; HFSPlusVolumeHeader
;
struc HFSPlusVolumeHeader
.signature resw 1
.version resw 1
.attributes resd 1
.lastMountedVersion resd 1
.journalInfoBlock resd 1
.createDate resd 1
.modifyDate resd 1
.backupDate resd 1
.checkedDate resd 1
.fileCount resd 1
.folderCount resd 1
.blockSize resd 1
.totalBlocks resd 1
.freeBlocks resd 1
.nextAllocation resd 1
.rsrcClumpSize resd 1
.dataClumpSize resd 1
.nextCatalogID resd 1
.writeCount resd 1
.encodingsBitmap resq 1
.finderInfo resd 8
.allocationFile resb HFSPlusForkData_size
.extentsFile resb HFSPlusForkData_size
.catalogFile resb HFSPlusForkData_size
.attributesFile resb HFSPlusForkData_size
.startupFile resb HFSPlusForkData_size
endstruc
;
; B-tree related structures and constants
;
kBTIndexNode EQU 0
kBTMaxRecordLength EQU 264 ; sizeof(kHFSPlusFileThreadRecord)
kHFSRootParentID EQU 1 ; Parent ID of the root folder
kHFSRootFolderID EQU 2 ; Folder ID of the root folder
kHFSExtentsFileID EQU 3 ; File ID of the extents overflow file
kHFSCatalogFileID EQU 4 ; File ID of the catalog file
kHFSPlusFileRecord EQU 0x200
kForkTypeData EQU 0
kForkTypeResource EQU 0xFF
;
; BTNodeDescriptor
;
struc BTNodeDescriptor
.fLink resd 1
.bLink resd 1
.kind resb 1
.height resb 1
.numRecords resw 1
.reserved resw 1
endstruc
;
; BTHeaderRec
;
struc BTHeaderRec
.treeDepth resw 1
.rootNode resd 1
.leafRecords resd 1
.firstLeafNode resd 1
.lastLeafNode resd 1
.nodeSize resw 1
.maxKeyLength resw 1
.totalNodes resd 1
.freeNodes resd 1
.reserved1 resw 1
.clumpSize resd 1
.btreeType resb 1
.keyCompareType resb 1
.attributes resd 1
.reserved3 resd 16
endstruc
;
; BTIndexRec
;
struc BTIndexRec
.childID resd 1
endstruc
;
; HFSPlusCatalogKey
;
struc HFSPlusCatalogKey
;
; won't use the keyLength field for easier addressing data inside this structure
;
;.keyLength resw 1
.parentID resd 1
.nodeName resb HFSUniStr255_size
endstruc
;
; HFSPlusExtentKey
;
struc HFSPlusExtentKey
;
; won't use the keyLength field for easier addressing data inside this structure
;
;.keyLength resw 1
.forkType resb 1
.pad resb 1
.fileID resd 1
.startBlock resd 1
endstruc
;
; HFSPlusBSDInfo
;
struc HFSPlusBSDInfo
.ownerID resd 1
.groupID resd 1
.adminFlags resb 1
.ownerFlags resb 1
.fileMode resw 1
.special resd 1
endstruc
;
; FileInfo
;
struc FileInfo
.fileType resd 1
.fileCreator resd 1
.finderFlags resw 1
.location resw 2
.reservedField resw 1
endstruc
;
; ExtendedFileInfo
;
struc ExtendedFileInfo
.reserved1 resw 4
.extFinderFlags resw 1
.reserved2 resw 1
.putAwayFolderID resd 1
endstruc
;
; HFSPlusCatalogFile
;
struc HFSPlusCatalogFile
.recordType resw 1
.flags resw 1
.reserved1 resd 1
.fileID resd 1
.createDate resd 1
.contentModDate resd 1
.attributeModDate resd 1
.accessDate resd 1
.backupDate resd 1
.permissions resb HFSPlusBSDInfo_size
.userInfo resb FileInfo_size
.finderInfo resb ExtendedFileInfo_size
.textEncoding resd 1
.reserved2 resd 1
.dataFork resb HFSPlusForkData_size
.resourceFork resb HFSPlusForkData_size
endstruc
;
; Macros.
;
%macro jmpabs 1
push WORD %1
ret
%endmacro
%macro DebugCharMacro 1
pushad
mov al, %1
call print_char
call getc
popad
%endmacro
%macro PrintCharMacro 1
pushad
mov al, %1
call print_char
popad
%endmacro
%macro PutCharMacro 1
call print_char
%endmacro
%macro PrintHexMacro 1
call print_hex
%endmacro
%macro PrintString 1
mov si, %1
call print_string
%endmacro
%macro LogString 1
mov di, %1
call log_string
%endmacro
%if DEBUG
%define DebugChar(x) DebugCharMacro x
%define PrintChar(x) PrintCharMacro x
%define PutChar(x) PutCharMacro
%define PrintHex(x) PrintHexMacro x
%else
%define DebugChar(x)
%define PrintChar(x)
%define PutChar(x)
%define PrintHex(x)
%endif
;--------------------------------------------------------------------------
; Start of text segment.
SEGMENT .text
ORG kBoot1RelocAddr
;--------------------------------------------------------------------------
; Boot code is loaded at 0:7C00h.
;
start:
;
; Set up the stack to grow down from kBoot1StackSegment:kBoot1StackAddress.
; Interrupts should be off while the stack is being manipulated.
;
cli ; interrupts off
xor ax, ax ; zero ax
mov ss, ax ; ss <- 0
mov sp, kBoot1StackAddress ; sp <- top of stack
sti ; reenable interrupts
mov ds, ax ; ds <- 0
mov es, ax ; es <- 0
;
; Relocate boot1 code.
;
push si
mov si, kBoot1LoadAddr ; si <- source
mov di, kBoot1RelocAddr ; di <- destination
cld ; auto-increment SI and/or DI registers
mov cx, kSectorBytes ; copy 256 words
rep movsb ; repeat string move (word) operation
pop si
;
; Code relocated, jump to startReloc in relocated location.
;
; FIXME: Is there any way to instruct NASM to compile a near jump
; using absolute address instead of relative displacement?
;
jmpabs startReloc
;--------------------------------------------------------------------------
; Start execution from the relocated location.
;
startReloc:
;
; Initializing global variables.
;
mov eax, [si + part.lba]
mov [gPartLBA], eax ; save the current partition LBA offset
mov [gBIOSDriveNumber], dl ; save BIOS drive number
mov WORD [gMallocPtr], mallocStart ; set free space pointer
;
; Loading upper 512 bytes of boot1h and HFS+ Volume Header.
;
xor ecx, ecx ; sector 1 of current partition
inc ecx
mov al, 2 ; read 2 sectors: sector 1 of boot1h + HFS+ Volume Header
mov edx, kBoot1Sector1Addr
call readLBA
;
; Initializing more global variables.
;
mov eax, [kHFSPlusBuffer + HFSPlusVolumeHeader.blockSize]
bswap eax ; convert to little-endian
shr eax, 9 ; convert to sector unit
mov [gBlockSize], eax ; save blockSize as little-endian sector unit!
;
; Looking for HFSPlus ('H+') or HFSPlus case-sensitive ('HX') signature.
;
mov ax, [kHFSPlusBuffer + HFSPlusVolumeHeader.signature]
cmp ax, kHFSPlusCaseSignature
je findRootBoot
cmp ax, kHFSPlusSignature
jne error
;--------------------------------------------------------------------------
; Find stage2 boot file in a HFS+ Volume's root folder.
;
findRootBoot:
mov al, kHFSCatalogFileID
lea si, [searchCatalogKey]
lea di, [kHFSPlusBuffer + HFSPlusVolumeHeader.catalogFile + HFSPlusForkData.extents]
call lookUpBTree
jne error
lea si, [bp + BTree.recordDataPtr]
mov si, [si]
cmp WORD [si], kHFSPlusFileRecord
jne error
; EAX = Catalog File ID
; BX = read size in sectors
; ECX = file offset in sectors
; EDX = address of read buffer
; DI = address of HFSPlusForkData
;
; Use the second big-endian double-word as the file length in HFSPlusForkData.logicalSize
;
mov ebx, [si + HFSPlusCatalogFile.dataFork + HFSPlusForkData.logicalSize + 4]
bswap ebx ; convert file size to little-endian
add ebx, kSectorBytes - 1 ; adjust size before unit conversion
shr ebx, 9 ; convert file size to sector unit
cmp bx, kBoot2Sectors ; check if bigger than max stage2 size
ja error
mov eax, [si + HFSPlusCatalogFile.fileID]
bswap eax ; convert fileID to little-endian
xor ecx, ecx
mov edx, (kBoot2Segment << 4) + kBoot2Address
lea di, [si + HFSPlusCatalogFile.dataFork + HFSPlusForkData.extents]
call readExtent
%if VERBOSE
LogString(root_str)
%endif
boot2:
%if DEBUG
DebugChar ('!')
%endif
%if UNUSED
;
; Waiting for a key press.
;
mov ah, 0
int 0x16
%endif
mov ax, 0x1900
mov es, ax
mov BYTE [es:4], 1
mov dl, [gBIOSDriveNumber] ; load BIOS drive number
jmp kBoot2Segment:kBoot2Address
error:
%if VERBOSE
LogString(error_str)
%endif
hang:
hlt
jmp hang
;--------------------------------------------------------------------------
; readSectors - Reads more than 127 sectors using LBA addressing.
;
; Arguments:
; AX = number of 512-byte sectors to read (valid from 1-1280).
; EDX = pointer to where the sectors should be stored.
; ECX = sector offset in partition
;
; Returns:
; CF = 0 success
; 1 error
;
readSectors:
pushad
mov bx, ax
.loop:
xor eax, eax ; EAX = 0
mov al, bl ; assume we reached the last block.
cmp bx, maxSectorCount ; check if we really reached the last block
jb .readBlock ; yes, BX < MaxSectorCount
mov al, maxSectorCount ; no, read MaxSectorCount
.readBlock:
call readLBA
sub bx, ax ; decrease remaning sectors with the read amount
jz .exit ; exit if no more sectors left to be loaded
add ecx, eax ; adjust LBA sector offset
shl ax, 9 ; convert sectors to bytes
add edx, eax ; adjust target memory location
jmp .loop ; read remaining sectors
.exit:
popad
ret
;--------------------------------------------------------------------------
; readLBA - Read sectors from a partition using LBA addressing.
;
; Arguments:
; AL = number of 512-byte sectors to read (valid from 1-127).
; EDX = pointer to where the sectors should be stored.
; ECX = sector offset in partition
; [bios_drive_number] = drive number (0x80 + unit number)
;
; Returns:
; CF = 0 success
; 1 error
;
readLBA:
pushad ; save all registers
push es ; save ES
mov bp, sp ; save current SP
;
; Convert EDX to segment:offset model and set ES:BX
;
; Some BIOSes do not like offset to be negative while reading
; from hard drives. This usually leads to "boot1: error" when trying
; to boot from hard drive, while booting normally from USB flash.
; The routines, responsible for this are apparently different.
; Thus we split linear address slightly differently for these
; capricious BIOSes to make sure offset is always positive.
;
mov bx, dx ; save offset to BX
and bh, 0x0f ; keep low 12 bits
shr edx, 4 ; adjust linear address to segment base
xor dl, dl ; mask low 8 bits
mov es, dx ; save segment to ES
;
; Create the Disk Address Packet structure for the
; INT13/F42 (Extended Read Sectors) on the stack.
;
; push DWORD 0 ; offset 12, upper 32-bit LBA
push ds ; For sake of saving memory,
push ds ; push DS register, which is 0.
add ecx, [gPartLBA] ; offset 8, lower 32-bit LBA
push ecx
push es ; offset 6, memory segment
push bx ; offset 4, memory offset
xor ah, ah ; offset 3, must be 0
push ax ; offset 2, number of sectors
push WORD 16 ; offset 0-1, packet size
;
; INT13 Func 42 - Extended Read Sectors
;
; Arguments:
; AH = 0x42
; [bios_drive_number] = drive number (0x80 + unit number)
; DS:SI = pointer to Disk Address Packet
;
; Returns:
; AH = return status (sucess is 0)
; carry = 0 success
; 1 error
;
; Packet offset 2 indicates the number of sectors read
; successfully.
;
mov dl, [gBIOSDriveNumber] ; load BIOS drive number
mov si, sp
mov ah, 0x42
int 0x13
jc error
;
; Issue a disk reset on error.
; Should this be changed to Func 0xD to skip the diskette controller
; reset?
;
; xor ax, ax ; Func 0
; int 0x13 ; INT 13
; stc ; set carry to indicate error
.exit:
mov sp, bp ; restore SP
pop es ; restore ES
popad
ret
%if VERBOSE
;--------------------------------------------------------------------------
; Write a string with 'boot1: ' prefix to the console.
;
; Arguments:
; ES:DI pointer to a NULL terminated string.
;
; Clobber list:
; DI
;
log_string:
pushad
push di
mov si, log_title_str
call print_string
pop si
call print_string
popad
ret
;-------------------------------------------------------------------------
; Write a string to the console.
;
; Arguments:
; DS:SI pointer to a NULL terminated string.
;
; Clobber list:
; AX, BX, SI
;
print_string:
mov bx, 1 ; BH=0, BL=1 (blue)
.loop:
lodsb ; load a byte from DS:SI into AL
cmp al, 0 ; Is it a NULL?
je .exit ; yes, all done
mov ah, 0xE ; INT10 Func 0xE
int 0x10 ; display byte in tty mode
jmp .loop
.exit:
ret
%endif ; VERBOSE
%if DEBUG
;--------------------------------------------------------------------------
; Write the 4-byte value to the console in hex.
;
; Arguments:
; EAX = Value to be displayed in hex.
;
print_hex:
pushad
mov cx, WORD 4
bswap eax
.loop:
push ax
ror al, 4
call print_nibble ; display upper nibble
pop ax
call print_nibble ; display lower nibble
ror eax, 8
loop .loop
%if UNUSED
mov al, 10 ; carriage return
call print_char
mov al, 13
call print_char
%endif ; UNUSED
popad
ret
print_nibble:
and al, 0x0f
add al, '0'
cmp al, '9'
jna .print_ascii
add al, 'A' - '9' - 1
.print_ascii:
call print_char
ret
;--------------------------------------------------------------------------
; getc - wait for a key press
;
getc:
pushad
mov ah, 0
int 0x16
popad
ret
;--------------------------------------------------------------------------
; Write a ASCII character to the console.
;
; Arguments:
; AL = ASCII character.
;
print_char:
pushad
mov bx, 1 ; BH=0, BL=1 (blue)
mov ah, 0x0e ; bios INT 10, Function 0xE
int 0x10 ; display byte in tty mode
popad
ret
%endif ; DEBUG
%if UNUSED
;--------------------------------------------------------------------------
; Convert null terminated string to HFSUniStr255
;
; Arguments:
; DS:DX pointer to a NULL terminated string.
; ES:DI pointer to result.
;
ConvertStrToUni:
pushad ; save registers
push di ; save DI for unicode string length pointer
mov si, dx ; use SI as source string pointer
xor ax, ax ; AX = unicode character
mov cl, al ; CL = string length
.loop:
stosw ; store unicode character (length 0 at first run)
lodsb ; load next character to AL
inc cl ; increment string length count
cmp al, NULL ; check for string terminator
jne .loop
pop di ; restore unicode string length pointer
dec cl ; ignoring terminator from length count
mov [di], cl ; save string length
popad ; restore registers
ret
%endif ; UNUSED
;--------------------------------------------------------------------------
; Convert big-endian HFSUniStr255 to little-endian
;
; Arguments:
; DS:SI = pointer to big-endian HFSUniStr255
; ES:DI = pointer to result buffer
;
ConvertHFSUniStr255ToLE:
pushad
lodsw
xchg ah, al
stosw
cmp al, 0
je .exit
mov cx, ax
.loop:
lodsw
xchg ah, al ; convert AX to little-endian
;
; When working with a case-sensitive HFS+ (HX) filesystem, we shouldn't change the case.
;
cmp BYTE [kHFSPlusBuffer + HFSPlusVolumeHeader.signature + 1], kHFSPlusCaseSigX
je .keepcase
or ax, ax
jne .convertToLE
dec ax ; NULL must be the strongest char
.convertToLE:
cmp ah, 0
ja .keepcase
cmp al, 'A'
jb .keepcase
cmp al, 'Z'
ja .keepcase
add al, 32 ; convert to lower-case
.keepcase:
stosw
loop .loop
.exit:
popad
ret
;--------------------------------------------------------------------------
; compare HFSPlusExtentKey structures
;
; Arguments:
; DS:SI = search key
; ES:DI = trial key
;
; Returns:
; [BTree.searchResult] = result
; FLAGS = relation between search and trial keys
;
compareHFSPlusExtentKeys:
pushad
mov dl, 0 ; DL = result of comparison, DH = bestGuess
mov eax, [si + HFSPlusExtentKey.fileID]
cmp eax, [di + HFSPlusExtentKey.fileID]
jne .checkFlags
cmp BYTE [si + HFSPlusExtentKey.forkType], kForkTypeData
jne .checkFlags
mov eax, [si + HFSPlusExtentKey.startBlock]
cmp eax, [di + HFSPlusExtentKey.startBlock]
je compareHFSPlusCatalogKeys.exit
.checkFlags:
ja compareHFSPlusCatalogKeys.searchKeyGreater ; search key > trial key
jb compareHFSPlusCatalogKeys.trialKeyGreater ; search key < trial key
;--------------------------------------------------------------------------
; Compare HFSPlusCatalogKey structures
;
; Arguments:
; DS:SI = search key
; ES:DI = trial key
;
; Returns:
; [BTree.searchResult] = result
; FLAGS = relation between search and trial keys
;
compareHFSPlusCatalogKeys:
pushad
xor dx, dx ; DL = result of comparison, DH = bestGuess
xchg si, di
lodsd
mov ecx, eax ; ECX = trial parentID
xchg si, di
lodsd ; EAX = search parentID
cmp eax, ecx
ja .searchKeyGreater ; search parentID > trial parentID
jb .trialKeyGreater ; search parentID < trial parentID
.compareNodeName: ; search parentID = trial parentID
xchg si, di
lodsw
mov cx, ax ; CX = trial nodeName.length
xchg si, di
lodsw ; AX = search nodeName.length
cmp cl, 0 ; trial nodeName.length = 0?
je .searchKeyGreater
cmp ax, cx
je .strCompare
ja .searchStrLonger
.trialStrLonger:
dec dh
mov cx, ax
jmp .strCompare
.searchStrLonger:
inc dh
.strCompare:
repe cmpsw
ja .searchKeyGreater
jb .trialKeyGreater
mov dl, dh
jmp .exit
.trialKeyGreater:
dec dl
jmp .exit
.searchKeyGreater:
inc dl
.exit:
mov [bp + BTree.searchResult], dl
cmp dl, 0 ; set flags to check relation between keys
popad
ret
;--------------------------------------------------------------------------
; Allocate memory
;
; Arguments:
; CX = size of requested memory
;
; Returns:
; BP = start address of allocated memory
;
; Clobber list:
; CX
;
malloc:
push ax ; save AX
push di ; save DI
mov di, [gMallocPtr] ; start address of free space
push di ; save free space start address
inc di ;
inc di ; keep the first word untouched
dec cx ; for the last memory block pointer.
dec cx ;
mov al, NULL ; fill with zero
rep stosb ; repeat fill
mov [gMallocPtr], di ; adjust free space pointer
pop bp ; BP = start address of allocated memory
mov [di], bp ; set start address of allocated memory at next
; allocation block's free space address.
pop di ; restore DI
pop ax ; restore AX
ret
%if UNUSED
;--------------------------------------------------------------------------
; Free allocated memory
;
; Returns:
; BP = start address of previously allocated memory
;
free:
lea bp, [gMallocPtr]
mov bp, [bp]
mov [gMallocPtr], bp
ret
%endif ; UNUSED
;--------------------------------------------------------------------------
; Static data.
;
%if VERBOSE
root_str db '/boot', NULL
%endif
;--------------------------------------------------------------------------
; Pad the rest of the 512 byte sized sector with zeroes. The last
; two bytes is the mandatory boot sector signature.
;
; If the booter code becomes too large, then nasm will complain
; that the 'times' argument is negative.
pad_table_and_sig:
times 510-($-$$) db 0
dw kBootSignature
;
; Sector 1 code area
;
;--------------------------------------------------------------------------
; lookUpBTree - initializes a new BTree instance and
; look up for HFSPlus Catalog File or Extent Overflow keys
;
; Arguments:
; AL = kHFSPlusFileID (Catalog or Extents Overflow)
; SI = address of searchKey
; DI = address of HFSPlusForkData.extents
;
; Returns:
; BP = address of BTree instance
; ECX = rootNode's logical offset in sectors
;
lookUpBTree:
mov cx, BTree_size ; allocate memory with BTree_size
call malloc ; BP = start address of allocated memory.
mov [bp + BTree.fileID], al ; save fileFileID
mov edx, [di] ; first extent of current file
call blockToSector ; ECX = converted to sector unit
mov al, 1 ; 1 sector is enough for
xor edx, edx ; reading current file's header.
lea dx, [bp + BTree.BTHeaderBuffer] ; load into BTreeHeaderBuffer
call readLBA ; read
mov ax, [bp + BTree.BTHeaderBuffer + BTNodeDescriptor_size + BTHeaderRec.nodeSize]
xchg ah, al ; convert to little-endian
mov [bp + BTree.nodeSize], ax ; save nodeSize
;
; Always start the lookup process with the root node.
;
mov edx, [bp + BTree.BTHeaderBuffer + BTNodeDescriptor_size + BTHeaderRec.rootNode]
.readNode:
;
; Converting nodeID to sector unit
;
mov ax, [bp + BTree.nodeSize]
shr ax, 9 ; convert nodeSize to sectors
mov bx, ax ; BX = read sector count
cwde
bswap edx ; convert node ID to little-endian
mul edx ; multiply with nodeSize converted to sector unit
mov ecx, eax ; ECX = file offset in BTree
mov eax, [bp + BTree.fileID]
lea edx, [bp + BTree.nodeBuffer]
call readExtent
;
; AX = lowerBound = 0
;
xor ax, ax
;
; BX = upperBound = numRecords - 1
;
mov bx, [bp + BTree.nodeBuffer + BTNodeDescriptor.numRecords]
xchg bh, bl
dec bx
.bsearch:
cmp ax, bx
ja .checkResult ; jump if lowerBound > upperBound
mov cx, ax
add cx, bx
shr cx, 1 ; test index = (lowerBound + upperBound / 2)
call getBTreeRecord
%if UNUSED
pushad
jl .csearchLessThanTrial
jg .csearchGreaterThanTrial
PrintChar('=')
jmp .csearchCont
.csearchGreaterThanTrial:
PrintChar('>')
jmp .csearchCont
.csearchLessThanTrial:
PrintChar('<')
.csearchCont:
popad
%endif ; UNUSED
.adjustBounds:
je .checkResult
jl .searchLessThanTrial
jg .searchGreaterThanTrial
jmp .bsearch
.searchLessThanTrial:
mov bx, cx
dec bx ; upperBound = index - 1
jmp .bsearch
.searchGreaterThanTrial:
mov ax, cx
inc ax ; lowerBound = index + 1
jmp .bsearch
.checkResult:
cmp BYTE [bp + BTree.searchResult], 0
jge .foundKey
mov cx, bx
call getBTreeRecord
.foundKey:
cmp BYTE [bp + BTree.nodeBuffer + BTNodeDescriptor.kind], kBTIndexNode
jne .exit
lea bx, [bp + BTree.recordDataPtr]
mov bx, [bx]
mov edx, [bx]
jmp .readNode
.exit:
cmp BYTE [bp + BTree.searchResult], 0
ret
;--------------------------------------------------------------------------
; getBTreeRecord - read and compare BTree record
;
; Arguments:
; CX = record index
; SI = address of search key
;
; Returns:
; [BTree.searchResult] = result of key compare
; [BTree.recordDataPtr] = address of record data
;
getBTreeRecord:
pushad
push si ; save SI
lea di, [bp + BTree.nodeBuffer] ; DI = start of nodeBuffer
push di ; use later
mov ax, [bp + BTree.nodeSize] ; get nodeSize
add di, ax ; DI = beyond nodeBuffer
inc cx ; increment index
shl cx, 1 ; * 2
sub di, cx ; DI = pointer to record
mov ax, [di] ; offset to record
xchg ah, al ; convert to little-endian
pop di ; start of nodeBuffer
add di, ax ; DI = address of record key
mov si, di ; save to SI
mov ax, [di] ; keyLength
xchg ah, al ; convert to little-endian
inc ax ; suppress keySize (2 bytes)
inc ax ;
add di, ax ; DI = address of record data
mov [bp + BTree.recordDataPtr], di ; save address of record data
lea di, [bp + BTree.trialKey]
push di ; save address of trialKey
lodsw ; suppress keySize (2 bytes)
;
; Don't need to compare as DWORD since all reserved CNIDs fits to a single byte
;
cmp BYTE [bp + BTree.fileID], kHFSCatalogFileID
je .prepareTrialCatalogKey
.prepareTrialExtentKey:
mov bx, compareHFSPlusExtentKeys
movsw ; copy forkType + pad
mov cx, 2 ; copy fileID + startBlock
.extentLoop:
lodsd
bswap eax ; convert to little-endian
stosd
loop .extentLoop
jmp .exit
.prepareTrialCatalogKey:
mov bx, compareHFSPlusCatalogKeys
lodsd
bswap eax ; convert ParentID to little-endian
stosd
call ConvertHFSUniStr255ToLE ; convert nodeName to little-endian
.exit:
pop di ; restore address of trialKey
%if UNUSED
;
; Print catalog trial key
;
pushad
mov si, di
lodsd
PrintChar('k')
PrintHex()
lodsw
cmp ax, 0
je .printExit
mov cx, ax
.printLoop:
lodsw
call print_char
loop .printLoop
.printExit:
popad
;
;
;
%endif ; UNUSED
%if UNUSED
;
; Print extent trial key
;
pushad
PrintChar('k')
mov si, di
xor eax, eax
lodsw
PrintHex()
lodsd
PrintHex()
lodsd
PrintHex()
popad
;
;
;
%endif ; UNUSED
pop si ; restore SI
call bx ; call key compare proc
popad
ret
;--------------------------------------------------------------------------
; readExtent - read extents from a HFS+ file (multiple extent support)
;
; Arguments:
; EAX = Catalog File ID
; BX = read size in sectors
; ECX = file offset in sectors
; EDX = address of read buffer
; DI = address of HFSPlusForkData.extents
;
readExtent:
pushad
;
; Save Catalog File ID as part of a search HFSPlusExtentKey
; for a possible Extents Overflow lookup.
;
mov [bp + BTree.searchExtentKey + HFSPlusExtentKey.fileID], eax
mov [bp + BTree.readBufferPtr], edx
mov ax, bx
cwde
mov [bp + BTree.readSize], eax
mov ebx, ecx ; EBX = file offset
xor eax, eax
mov [bp + BTree.currentExtentOffs], eax
.beginExtentBlock:
mov BYTE [bp + BTree.extentCount], 0
.extentSearch:
cmp BYTE [bp + BTree.extentCount], kHFSPlusExtentDensity
jb .continue
.getNextExtentBlock:
push ebx
mov eax, [bp + BTree.currentExtentOffs]
;
; Converting sector unit to HFS+ allocation block unit.
;
xor edx, edx
div DWORD [gBlockSize] ; divide with blockSize
;
; Preparing searchExtentKey's startBlock field.
;
mov [bp + BTree.searchExtentKey + HFSPlusExtentKey.startBlock], eax
mov al, kHFSExtentsFileID
lea si, [bp + BTree.searchExtentKey]
lea di, [kHFSPlusBuffer + HFSPlusVolumeHeader.extentsFile + HFSPlusForkData.extents]
call lookUpBTree
jnz NEAR .exit
;
; BP points to the new workspace allocated by lookUpBTree.
;
lea di, [bp + BTree.recordDataPtr]
mov di, [di]
;
; Switch back to the previous workspace.
;
lea bp, [gMallocPtr]
mov bp, [bp]
mov [gMallocPtr], bp
pop ebx
jmp .beginExtentBlock
.continue:
mov edx, [di + HFSPlusExtentDescriptor.blockCount]
call blockToSector ; ECX = converted current extent's blockCount to sectors
mov eax, [bp + BTree.currentExtentOffs] ; EAX = current extent's start offset (sector)
mov edx, eax
add edx, ecx ; EDX = next extent's start offset (sector)
cmp ebx, edx
mov [bp + BTree.currentExtentOffs], edx ; set currentExtentOffs as the next extent's start offset
jae .nextExtent ; jump to next extent if file offset > next extent's start offset
.foundExtent:
mov edx, ebx
sub edx, eax ; EDX = relative offset within current extent
mov eax, edx ; will be used below to determine read size
mov esi, [bp + BTree.readSize] ; ESI = remaining sectors to be read
add edx, esi
cmp edx, ecx ; test if relative offset + readSize fits to this extent
jbe .read ; read all remaining sectors from this extent
.splitRead:
sub ecx, eax ; read amount of sectors beginning at relative offset
mov esi, ecx ; of current extent up to the end of current extent
.read:
mov edx, [di + HFSPlusExtentDescriptor.startBlock]
call blockToSector ; ECX = converted to sectors
add ecx, eax ; file offset converted to sectors
push si
mov ax, si
mov edx, [bp + BTree.readBufferPtr]
call readSectors
pop si
add ebx, esi
mov ax, si
cwde
shl ax, 9 ; convert SI (read sector count) to byte unit
add [bp + BTree.readBufferPtr], eax
sub [bp + BTree.readSize], esi
jz .exit
.nextExtent:
add di, kHFSPlusExtentDensity
inc BYTE [bp + BTree.extentCount]
jmp .extentSearch
.exit:
popad
ret
;--------------------------------------------------------------------------
; Convert big-endian HFSPlus allocation block to sector unit
;
; Arguments:
; EDX = allocation block
;
; Returns:
; ECX = allocation block converted to sector unit
;
; Clobber list:
; EDX
;
blockToSector:
push eax
mov eax, [gBlockSize]
bswap edx ; convert allocation block to little-endian
mul edx ; multiply with block number
mov ecx, eax ; result in EAX
pop eax
ret
%if UNUSED
;--------------------------------------------------------------------------
; Convert sector unit to HFSPlus allocation block unit
;
; Arguments:
; EDX = sector
;
; Returns:
; ECX = converted to allocation block unit
;
; Clobber list:
; EDX
;
sectorToBlock:
push eax
mov eax, edx
xor edx, edx
div DWORD [gBlockSize] ; divide with blockSize
mov ecx, eax ; result in EAX
pop eax
ret
%endif ; UNUSED
%if UNUSED
;--------------------------------------------------------------------------
; Convert big-endian BTree node ID to sector unit
;
; Arguments:
; EDX = node ID
;
; Returns:
; ECX = node ID converted to sector unit
;
; Clobber list:
; EDX
;
nodeToSector:
push eax
mov ax, [bp + BTree.nodeSize]
shr ax, 9 ; convert nodeSize to sectors
cwde
bswap edx ; convert node ID to little-endian
mul edx ; multiply with node ID
mov ecx, eax ; result in EAX
pop eax
ret
%endif ; UNUSED
;--------------------------------------------------------------------------
; Static data.
;
%if VERBOSE
log_title_str db CR, LF, 'boot1: ', NULL
error_str db 'error', NULL
%endif
searchCatalogKey dd kHFSRootFolderID
dw searchCatKeyNameLen
searchCatKeyName dw 'b', 'o', 'o', 't' ; must be lower case
searchCatKeyNameLen EQU ($ - searchCatKeyName) / 2
;--------------------------------------------------------------------------
; Pad the rest of the 512 byte sized sector with zeroes. The last
; two bytes is the mandatory boot sector signature.
;
pad_sector_1:
times 1022-($-$$) db 0
dw kBootSignature
;
; Local BTree variables
;
struc BTree
.mallocLink resw 1 ; pointer to previously allocated memory block
.fileID resd 1 ; will use as BYTE
.nodeSize resd 1 ; will use as WORD
.searchExtentKey resb HFSPlusExtentKey_size
.searchResult resb 1
.trialKey resb kBTMaxRecordLength
.recordDataPtr resw 1
.readBufferPtr resd 1
.currentExtentOffs resd 1
.readSize resd 1
.extentCount resb 1
ALIGNB 2
.BTHeaderBuffer resb kSectorBytes
.nodeBuffer resb maxNodeSize
endstruc
;
; Global variables
;
ABSOLUTE kHFSPlusBuffer + HFSPlusVolumeHeader_size
gPartLBA resd 1
gBIOSDriveNumber resw 1
gBlockSize resd 1
gMallocPtr resw 1
; END
|
al3xtjames/Clover
| 36,668
|
CloverEFI/BootSector/start32.S
|
#------------------------------------------------------------------------------
#*
#* Copyright (c) 2006 - 2011, Intel Corporation. All rights reserved.<BR>
#* This program and the accompanying materials
#* are licensed and made available under the terms and conditions of the BSD License
#* which accompanies this distribution. The full text of the license may be found at
#* http://opensource.org/licenses/bsd-license.php
#*
#* THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
#* WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
#*
#* start32.asm
#*
#* Abstract:
#*
#------------------------------------------------------------------------------
#.MODEL small
.stack:
.486p:
.code16
.equ FAT_DIRECTORY_ENTRY_SIZE, 0x020
.equ FAT_DIRECTORY_ENTRY_SHIFT, 5
.equ BLOCK_SIZE, 0x0200
.equ BLOCK_MASK, 0x01ff
.equ BLOCK_SHIFT, 9
.org 0x0
.global _start
_start:
Ia32Jump:
jmp BootSectorEntryPoint # JMP inst - 3 bytes
nop
OemId: .ascii "INTEL " # OemId - 8 bytes
SectorSize: .word 0 # Sector Size - 2 bytes
SectorsPerCluster: .byte 0 # Sector Per Cluster - 1 byte
ReservedSectors: .word 0 # Reserved Sectors - 2 bytes
NoFats: .byte 0 # Number of FATs - 1 byte
RootEntries: .word 0 # Root Entries - 2 bytes
Sectors: .word 0 # Number of Sectors - 2 bytes
Media: .byte 0 # Media - 1 byte
SectorsPerFat16: .word 0 # Sectors Per FAT for FAT12/FAT16 - 2 byte
SectorsPerTrack: .word 0 # Sectors Per Track - 2 bytes
Heads: .word 0 # Heads - 2 bytes
HiddenSectors: .long 0 # Hidden Sectors - 4 bytes
LargeSectors: .long 0 # Large Sectors - 4 bytes
#******************************************************************************
#
#The structure for FAT32 starting at offset 36 of the boot sector. (At this point,
#the BPB/boot sector for FAT12 and FAT16 differs from the BPB/boot sector for FAT32.)
#
#******************************************************************************
SectorsPerFat32: .long 0 # Sectors Per FAT for FAT32 - 4 bytes
ExtFlags: .word 0 # Mirror Flag - 2 bytes
FSVersion: .word 0 # File System Version - 2 bytes
RootCluster: .long 0 # 1st Cluster Number of Root Dir - 4 bytes
FSInfo: .word 0 # Sector Number of FSINFO - 2 bytes
BkBootSector: .word 0 # Sector Number of Bk BootSector - 2 bytes
Reserved: .fill 12,1,0 # Reserved Field - 12 bytes
PhysicalDrive: .byte 0 # Physical Drive Number - 1 byte
Reserved1: .byte 0 # Reserved Field - 1 byte
Signature: .byte 0 # Extended Boot Signature - 1 byte
VolId: .ascii " " # Volume Serial Number - 4 bytes
FatLabel: .ascii " " # Volume Label - 11 bytes
FileSystemType: .ascii "FAT32 " # File System Type - 8 bytes
BootSectorEntryPoint:
#ASSUME ds:@code
#ASSUME ss:@code
# ds = 1000, es = 2000 + x (size of first cluster >> 4)
# cx = Start Cluster of EfiLdr
# dx = Start Cluster of Efivar.bin
# Re use the BPB data stored in Boot Sector
movw $0x7c00, %bp
pushw %cx
# Read Efivar.bin
# 1000:dx = DirectoryEntry of Efivar.bin -> BS.com has filled already
movw $0x1900, %ax
movw %ax, %es
testw %dx, %dx
# jnz CheckVarStoreSize
movb $1, %al
NoVarStore:
pushw %es
# Set the 5th byte start @ 0:19000 to non-zero indicating we should init var store header in DxeIpl
movb %al, %es:4
jmp SaveVolumeId
CheckVarStoreSize:
movw %dx, %di
cmpl $0x4000, %ds:2(%di)
movb $2, %al
jne NoVarStore
LoadVarStore:
movb $0, %al
movb %al, %es:4
movw (%di), %cx
# ES:DI = 1500:0
xorw %di, %di
pushw %es
movw $0x1500, %ax
movw %ax, %es
call ReadFile
SaveVolumeId:
popw %es
movw VolId(%bp), %ax
movw %ax, %es:0 # Save Volume Id to 0:19000. we will find the correct volume according to this VolumeId
movw VolId+2(%bp), %ax
movw %ax, %es:2
# Read Efildr
popw %cx
# cx = Start Cluster of Efildr -> BS.com has filled already
# ES:DI = 2000:0, first cluster will be read again
xorw %di, %di # di = 0
movw $0x2000, %ax
movw %ax, %es
call ReadFile
movw %cs, %ax
movw %ax, %cs:JumpSegment
JumpFarInstruction:
.byte 0xea
JumpOffset:
.word 0x200
JumpSegment:
.word 0x2000
# ****************************************************************************
# ReadFile
#
# Arguments:
# CX = Start Cluster of File
# ES:DI = Buffer to store file content read from disk
#
# Return:
# (ES << 4 + DI) = end of file content Buffer
#
# ****************************************************************************
ReadFile:
# si = NumberOfClusters
# cx = ClusterNumber
# dx = CachedFatSectorNumber
# ds:0000 = CacheFatSectorBuffer
# es:di = Buffer to load file
# bx = NextClusterNumber
pusha
movw $1, %si # NumberOfClusters = 1
pushw %cx # Push Start Cluster onto stack
movw $0xfff, %dx # CachedFatSectorNumber = 0xfff
FatChainLoop:
movw %cx, %ax # ax = ClusterNumber
andw $0xfff8, %ax # ax = ax & 0xfff8
cmpw $0xfff8, %ax # See if this is the last cluster
je FoundLastCluster # Jump if last cluster found
movw %cx, %ax # ax = ClusterNumber
shlw $2, %ax # FatOffset = ClusterNumber * 4
pushw %si # Save si
movw %ax, %si # si = FatOffset
shrw $BLOCK_SHIFT, %ax # ax = FatOffset >> BLOCK_SHIFT
addw ReservedSectors(%bp), %ax # ax = FatSectorNumber = ReservedSectors + (FatOffset >> BLOCK_OFFSET)
andw $BLOCK_MASK, %si # si = FatOffset & BLOCK_MASK
cmpw %dx, %ax # Compare FatSectorNumber to CachedFatSectorNumber
je SkipFatRead
movw $2, %bx
pushw %es
pushw %ds
popw %es
call ReadBlocks # Read 2 blocks starting at AX storing at ES:DI
popw %es
movw %ax, %dx # CachedFatSectorNumber = FatSectorNumber
SkipFatRead:
movw (%si), %bx # bx = NextClusterNumber
movw %cx, %ax # ax = ClusterNumber
popw %si # Restore si
decw %bx # bx = NextClusterNumber - 1
cmpw %cx, %bx # See if (NextClusterNumber-1)==ClusterNumber
jne ReadClusters
incw %bx # bx = NextClusterNumber
incw %si # NumberOfClusters++
movw %bx, %cx # ClusterNumber = NextClusterNumber
jmp FatChainLoop
ReadClusters:
incw %bx
popw %ax # ax = StartCluster
pushw %bx # StartCluster = NextClusterNumber
movw %bx, %cx # ClusterNumber = NextClusterNumber
subw $2, %ax # ax = StartCluster - 2
xorb %bh, %bh
movb SectorsPerCluster(%bp), %bl # bx = SectorsPerCluster
mulw %bx # ax = (StartCluster - 2) * SectorsPerCluster
addw (%bp), %ax # ax = FirstClusterLBA + (StartCluster-2)*SectorsPerCluster
pushw %ax # save start sector
movw %si, %ax # ax = NumberOfClusters
mulw %bx # ax = NumberOfClusters * SectorsPerCluster
movw %ax, %bx # bx = Number of Sectors
popw %ax # ax = Start Sector
call ReadBlocks
movw $1, %si # NumberOfClusters = 1
jmp FatChainLoop
FoundLastCluster:
popw %cx
popa
ret
# ****************************************************************************
# ReadBlocks - Reads a set of blocks from a block device
#
# AX = Start LBA
# BX = Number of Blocks to Read
# ES:DI = Buffer to store sectors read from disk
# ****************************************************************************
# cx = Blocks
# bx = NumberOfBlocks
# si = StartLBA
ReadBlocks:
pusha
addl LBAOffsetForBootSector(%bp), %eax # Add LBAOffsetForBootSector to Start LBA
addl HiddenSectors(%bp), %eax # Add HiddenSectors to Start LBA
movl %eax, %esi # esi = Start LBA
movw %bx, %cx # cx = Number of blocks to read
ReadCylinderLoop:
movw $0x7bfc, %bp # bp = 0x7bfc
movl %esi, %eax # eax = Start LBA
xorl %edx, %edx # edx = 0
movzwl (%bp), %ebx # bx = MaxSector
divl %ebx # ax = StartLBA / MaxSector
incw %dx # dx = (StartLBA % MaxSector) + 1
movw (%bp), %bx # bx = MaxSector
subw %dx, %bx # bx = MaxSector - Sector
incw %bx # bx = MaxSector - Sector + 1
cmpw %bx, %cx # Compare (Blocks) to (MaxSector - Sector + 1)
jg LimitTransfer
movw %cx, %bx # bx = Blocks
LimitTransfer:
pushw %ax # save ax
movw %es, %ax # ax = es
shrw $(BLOCK_SHIFT-4), %ax # ax = Number of blocks into mem system
andw $0x7f, %ax # ax = Number of blocks into current seg
addw %bx, %ax # ax = End Block number of transfer
cmpw $0x80, %ax # See if it crosses a 64K boundry
jle NotCrossing64KBoundry # Branch if not crossing 64K boundry
subw $0x80, %ax # ax = Number of blocks past 64K boundry
subw %ax, %bx # Decrease transfer size by block overage
NotCrossing64KBoundry:
popw %ax # restore ax
pushw %cx
movb %dl, %cl # cl = (StartLBA % MaxSector) + 1 = Sector
xorw %dx, %dx # dx = 0
divw 2(%bp) # ax = ax / (MaxHead + 1) = Cylinder
# dx = ax % (MaxHead + 1) = Head
pushw %bx # Save number of blocks to transfer
movb %dl, %dh # dh = Head
movw $0x7c00, %bp # bp = 0x7c00
movb PhysicalDrive(%bp), %dl # dl = Drive Number
movb %al, %ch # ch = Cylinder
movb %bl, %al # al = Blocks
movb $2, %ah # ah = Function 2
movw %di, %bx # es:bx = Buffer address
int $0x13
jc DiskError
popw %bx
popw %cx
movzwl %bx, %ebx
addl %ebx, %esi # StartLBA = StartLBA + NumberOfBlocks
subw %bx, %cx # Blocks = Blocks - NumberOfBlocks
movw %es, %ax
shlw $(BLOCK_SHIFT-4), %bx
addw %bx, %ax
movw %ax, %es # es:di = es:di + NumberOfBlocks*BLOCK_SIZE
cmpw $0, %cx
jne ReadCylinderLoop
popa
ret
DiskError:
pushw %cs
popw %ds
leaw ErrorString, %si
movw $7, %cx
jmp PrintStringAndHalt
PrintStringAndHalt:
movw $0xb800, %ax
movw %ax, %es
movw $160, %di
rep
movsw
Halt:
jmp Halt
ErrorString:
.byte 'S', 0x0c, 'E', 0x0c, 'r', 0x0c, 'r', 0x0c, 'o', 0x0c, 'r', 0x0c, '!', 0x0c
# .org 0x01fa # Will cause build break
LBAOffsetForBootSector:
.long 0x0
# .org 0x01fe # Will cause build break
.word 0xaa55
#******************************************************************************
#******************************************************************************
#******************************************************************************
.equ DELAY_PORT, 0x0ed # Port to use for 1uS delay
.equ KBD_CONTROL_PORT, 0x060 # 8042 control port
.equ KBD_STATUS_PORT, 0x064 # 8042 status port
.equ WRITE_DATA_PORT_CMD, 0x0d1 # 8042 command to write the data port
.equ ENABLE_A20_CMD, 0x0df # 8042 command to enable A20
#wiki
#enable_A20:
# cli
# call a20wait
# mov al,0xAD
# out 0x64,al
# call a20wait
# mov al,0xD0
# out 0x64,al
# call a20wait2
# in al,0x60
# push eax
# call a20wait
# mov al,0xD1
# out 0x64,al
# call a20wait
# pop eax
# or al,2
# out 0x60,al
# call a20wait
# mov al,0xAE
# out 0x64,al
# call a20wait
# sti
# ret
#a20wait:
# in al,0x64
# test al,2
# jnz a20wait
# ret
#a20wait2:
# in al,0x64
# test al,1
# jz a20wait2
# ret
# .org 0x200 # Will cause build break
jmp start
Em64String:
.byte 'E', 0x0c, 'm', 0x0c, '6', 0x0c, '4', 0x0c, 'T', 0x0c, ' ', 0x0c, 'U', 0x0c, 'n', 0x0c, 's', 0x0c, 'u', 0x0c, 'p', 0x0c, 'p', 0x0c, 'o', 0x0c, 'r', 0x0c, 't', 0x0c, 'e', 0x0c, 'd', 0x0c, '!', 0x0c
start:
movw %cs, %ax
movw %ax, %ds
movw %ax, %es
movw %ax, %ss
movw $MyStack, %sp
# mov ax,0b800h
# mov es,ax
# mov byte ptr es:[160],'a'
# mov ax,cs
# mov es,ax
movl $0, %ebx
leal MemoryMap, %edi
MemMapLoop:
movl $0xe820, %eax
movl $20, %ecx
movl $0x534d4150, %edx # 0x534d4150 = 'SMAP'
int $0x15
jc MemMapDone
addl $20, %edi
cmpl $0, %ebx
je MemMapDone
jmp MemMapLoop
MemMapDone:
leal MemoryMap, %eax
subl %eax, %edi # Get the address of the memory map
movl %edi, MemoryMapSize # Save the size of the memory map
xorl %ebx, %ebx
movw %cs, %bx # BX=segment
shll $4, %ebx # BX="linear" address of segment base
leal GDT_BASE(%ebx), %eax # EAX=PHYSICAL address of gdt
movl %eax, gdtr + 2 # Put address of gdt into the gdtr
leal IDT_BASE(%ebx), %eax # EAX=PHYSICAL address of idt
movl %eax, idtr + 2 # Put address of idt into the idtr
leal MemoryMapSize(%ebx), %edx # Physical base address of the memory map
addl $0x1000, %ebx # Source of EFI32
movl %ebx, JUMP+2
addl $0x1000, %ebx
movl %ebx, %esi # Source of EFILDR32
# mov ax,0b800h
# mov es,ax
# mov byte ptr es:[162],'b'
# mov ax,cs
# mov es,ax
#
# Enable A20 Gate
#
movw $0x2401, %ax # Enable A20 Gate
int $0x15
jnc A20GateEnabled # Jump if it suceeded
#WIKI -fast A20gate
#in al, 0x92
#or al, 2
#out 0x92, al
#
# If INT 15 Function 2401 is not supported, then attempt to Enable A20 manually.
#
call Empty8042InputBuffer # Empty the Input Buffer on the 8042 controller
jnz Timeout8042 # Jump if the 8042 timed out
outw %ax, $DELAY_PORT # Delay 1 uS
movb $WRITE_DATA_PORT_CMD, %al # 8042 cmd to write output port
outb %al, $KBD_STATUS_PORT # Send command to the 8042
call Empty8042InputBuffer # Empty the Input Buffer on the 8042 controller
jnz Timeout8042 # Jump if the 8042 timed out
movb $ENABLE_A20_CMD, %al # gate address bit 20 on
outb %al, $KBD_CONTROL_PORT # Send command to thre 8042
call Empty8042InputBuffer # Empty the Input Buffer on the 8042 controller
movw $25, %cx # Delay 25 uS for the command to complete on the 8042
Delay25uS:
outw %ax, $DELAY_PORT # Delay 1 uS
loopl Delay25uS
Timeout8042:
A20GateEnabled:
movw $0x0008, %bx # Flat data descriptor
#
# DISABLE INTERRUPTS - Entering Protected Mode
#
cli
# mov ax,0b800h
# mov es,ax
# mov byte ptr es:[164],'c'
# mov ax,cs
# mov es,ax
.byte 0x66
lgdt gdtr
.byte 0x66
lidt idtr
movl %cr0, %eax
orb $1, %al
movl %eax, %cr0
JUMP:
# jmp far 0010:00020000
.byte 0x66
.byte 0xea
.long 0x00020000
.word 0x0010
Empty8042InputBuffer:
movw $0, %cx
Empty8042Loop:
outw %ax, $DELAY_PORT # Delay 1us
inb $KBD_STATUS_PORT, %al # Read the 8042 Status Port
andb $0x2, %al # Check the Input Buffer Full Flag
loopnz Empty8042Loop # Loop until the input buffer is empty or a timout of 65536 uS
ret
##############################################################################
# data
##############################################################################
.p2align 1
gdtr: .word GDT_END - GDT_BASE - 1
.long 0 # (GDT base gets set above)
##############################################################################
# global descriptor table (GDT)
##############################################################################
.p2align 1
GDT_BASE:
# null descriptor
.equ NULL_SEL, .-GDT_BASE
.word 0 # limit 15:0
.word 0 # base 15:0
.byte 0 # base 23:16
.byte 0 # type
.byte 0 # limit 19:16, flags
.byte 0 # base 31:24
# linear data segment descriptor
.equ LINEAR_SEL, .-GDT_BASE
.word 0xFFFF # limit 0xFFFFF
.word 0 # base 0
.byte 0
.byte 0x92 # present, ring 0, data, expand-up, writable
.byte 0xCF # page-granular, 32-bit
.byte 0
# linear code segment descriptor
.equ LINEAR_CODE_SEL, .-GDT_BASE
.word 0xFFFF # limit 0xFFFFF
.word 0 # base 0
.byte 0
.byte 0x9A # present, ring 0, data, expand-up, writable
.byte 0xCF # page-granular, 32-bit
.byte 0
# system data segment descriptor
.equ SYS_DATA_SEL, .-GDT_BASE
.word 0xFFFF # limit 0xFFFFF
.word 0 # base 0
.byte 0
.byte 0x92 # present, ring 0, data, expand-up, writable
.byte 0xCF # page-granular, 32-bit
.byte 0
# system code segment descriptor
.equ SYS_CODE_SEL, .-GDT_BASE
.word 0xFFFF # limit 0xFFFFF
.word 0 # base 0
.byte 0
.byte 0x9A # present, ring 0, data, expand-up, writable
.byte 0xCF # page-granular, 32-bit
.byte 0
# spare segment descriptor
.equ SPARE3_SEL, .-GDT_BASE
.word 0 # limit 0xFFFFF
.word 0 # base 0
.byte 0
.byte 0 # present, ring 0, data, expand-up, writable
.byte 0 # page-granular, 32-bit
.byte 0
# spare segment descriptor
.equ SPARE4_SEL, .-GDT_BASE
.word 0 # limit 0xFFFFF
.word 0 # base 0
.byte 0
.byte 0 # present, ring 0, data, expand-up, writable
.byte 0 # page-granular, 32-bit
.byte 0
# spare segment descriptor
.equ SPARE5_SEL, .-GDT_BASE
.word 0 # limit 0xFFFFF
.word 0 # base 0
.byte 0
.byte 0 # present, ring 0, data, expand-up, writable
.byte 0 # page-granular, 32-bit
.byte 0
GDT_END:
.p2align 1
idtr: .word IDT_END - IDT_BASE - 1
.long 0 # (IDT base gets set above)
##############################################################################
# interrupt descriptor table (IDT)
#
# Note: The hardware IRQ's specified in this table are the normal PC/AT IRQ
# mappings. This implementation only uses the system timer and all other
# IRQs will remain masked. The descriptors for vectors 33+ are provided
# for convenience.
##############################################################################
#idt_tag db "IDT",0
.p2align 1
IDT_BASE:
# divide by zero (INT 0)
.equ DIV_ZERO_SEL, .-IDT_BASE
.word 0 # offset 15:0
.word SYS_CODE_SEL # selector 15:0
.byte 0 # 0 for interrupt gate
.byte 0x0e | 0x80 # type = 386 interrupt gate, present
.word 0 # offset 31:16
# debug exception (INT 1)
.equ DEBUG_EXCEPT_SEL, .-IDT_BASE
.word 0 # offset 15:0
.word SYS_CODE_SEL # selector 15:0
.byte 0 # 0 for interrupt gate
.byte 0x0e | 0x80 # type = 386 interrupt gate, present
.word 0 # offset 31:16
# NMI (INT 2)
.equ NMI_SEL, .-IDT_BASE
.word 0 # offset 15:0
.word SYS_CODE_SEL # selector 15:0
.byte 0 # 0 for interrupt gate
.byte 0x0e | 0x80 # type = 386 interrupt gate, present
.word 0 # offset 31:16
# soft breakpoint (INT 3)
.equ BREAKPOINT_SEL, .-IDT_BASE
.word 0 # offset 15:0
.word SYS_CODE_SEL # selector 15:0
.byte 0 # 0 for interrupt gate
.byte 0x0e | 0x80 # type = 386 interrupt gate, present
.word 0 # offset 31:16
# overflow (INT 4)
.equ OVERFLOW_SEL, .-IDT_BASE
.word 0 # offset 15:0
.word SYS_CODE_SEL # selector 15:0
.byte 0 # 0 for interrupt gate
.byte 0x0e | 0x80 # type = 386 interrupt gate, present
.word 0 # offset 31:16
# bounds check (INT 5)
.equ BOUNDS_CHECK_SEL, .-IDT_BASE
.word 0 # offset 15:0
.word SYS_CODE_SEL # selector 15:0
.byte 0 # 0 for interrupt gate
.byte 0x0e | 0x80 # type = 386 interrupt gate, present
.word 0 # offset 31:16
# invalid opcode (INT 6)
.equ INVALID_OPCODE_SEL, .-IDT_BASE
.word 0 # offset 15:0
.word SYS_CODE_SEL # selector 15:0
.byte 0 # 0 for interrupt gate
.byte 0x0e | 0x80 # type = 386 interrupt gate, present
.word 0 # offset 31:16
# device not available (INT 7)
.equ DEV_NOT_AVAIL_SEL, .-IDT_BASE
.word 0 # offset 15:0
.word SYS_CODE_SEL # selector 15:0
.byte 0 # 0 for interrupt gate
.byte 0x0e | 0x80 # type = 386 interrupt gate, present
.word 0 # offset 31:16
# double fault (INT 8)
.equ DOUBLE_FAULT_SEL, .-IDT_BASE
.word 0 # offset 15:0
.word SYS_CODE_SEL # selector 15:0
.byte 0 # 0 for interrupt gate
.byte 0x0e | 0x80 # type = 386 interrupt gate, present
.word 0 # offset 31:16
# Coprocessor segment overrun - reserved (INT 9)
.equ RSVD_INTR_SEL1, .-IDT_BASE
.word 0 # offset 15:0
.word SYS_CODE_SEL # selector 15:0
.byte 0 # 0 for interrupt gate
.byte 0x0e | 0x80 # type = 386 interrupt gate, present
.word 0 # offset 31:16
# invalid TSS (INT 0ah)
.equ INVALID_TSS_SEL, .-IDT_BASE
.word 0 # offset 15:0
.word SYS_CODE_SEL # selector 15:0
.byte 0 # 0 for interrupt gate
.byte 0x0e | 0x80 # type = 386 interrupt gate, present
.word 0 # offset 31:16
# segment not present (INT 0bh)
.equ SEG_NOT_PRESENT_SEL, .-IDT_BASE
.word 0 # offset 15:0
.word SYS_CODE_SEL # selector 15:0
.byte 0 # 0 for interrupt gate
.byte 0x0e | 0x80 # type = 386 interrupt gate, present
.word 0 # offset 31:16
# stack fault (INT 0ch)
.equ STACK_FAULT_SEL, .-IDT_BASE
.word 0 # offset 15:0
.word SYS_CODE_SEL # selector 15:0
.byte 0 # 0 for interrupt gate
.byte 0x0e | 0x80 # type = 386 interrupt gate, present
.word 0 # offset 31:16
# general protection (INT 0dh)
.equ GP_FAULT_SEL, .-IDT_BASE
.word 0 # offset 15:0
.word SYS_CODE_SEL # selector 15:0
.byte 0 # 0 for interrupt gate
.byte 0x0e | 0x80 # type = 386 interrupt gate, present
.word 0 # offset 31:16
# page fault (INT 0eh)
.equ PAGE_FAULT_SEL, .-IDT_BASE
.word 0 # offset 15:0
.word SYS_CODE_SEL # selector 15:0
.byte 0 # 0 for interrupt gate
.byte 0x0e | 0x80 # type = 386 interrupt gate, present
.word 0 # offset 31:16
# Intel reserved - do not use (INT 0fh)
.equ RSVD_INTR_SEL2, .-IDT_BASE
.word 0 # offset 15:0
.word SYS_CODE_SEL # selector 15:0
.byte 0 # 0 for interrupt gate
.byte 0x0e | 0x80 # type = 386 interrupt gate, present
.word 0 # offset 31:16
# floating point error (INT 10h)
.equ FLT_POINT_ERR_SEL, .-IDT_BASE
.word 0 # offset 15:0
.word SYS_CODE_SEL # selector 15:0
.byte 0 # 0 for interrupt gate
.byte 0x0e | 0x80 # type = 386 interrupt gate, present
.word 0 # offset 31:16
# alignment check (INT 11h)
.equ ALIGNMENT_CHECK_SEL, .-IDT_BASE
.word 0 # offset 15:0
.word SYS_CODE_SEL # selector 15:0
.byte 0 # 0 for interrupt gate
.byte 0x0e | 0x80 # type = 386 interrupt gate, present
.word 0 # offset 31:16
# machine check (INT 12h)
.equ MACHINE_CHECK_SEL, .-IDT_BASE
.word 0 # offset 15:0
.word SYS_CODE_SEL # selector 15:0
.byte 0 # 0 for interrupt gate
.byte 0x0e | 0x80 # type = 386 interrupt gate, present
.word 0 # offset 31:16
# SIMD floating-point exception (INT 13h)
.equ SIMD_EXCEPTION_SEL, .-IDT_BASE
.word 0 # offset 15:0
.word SYS_CODE_SEL # selector 15:0
.byte 0 # 0 for interrupt gate
.byte 0x0e | 0x80 # type = 386 interrupt gate, present
.word 0 # offset 31:16
# 84 unspecified descriptors, First 12 of them are reserved, the rest are avail
.fill 84 * 8, 1, 0
# IRQ 0 (System timer) - (INT 68h)
.equ IRQ0_SEL, .-IDT_BASE
.word 0 # offset 15:0
.word SYS_CODE_SEL # selector 15:0
.byte 0 # 0 for interrupt gate
.byte 0x0e | 0x80 # type = 386 interrupt gate, present
.word 0 # offset 31:16
# IRQ 1 (8042 Keyboard controller) - (INT 69h)
.equ IRQ1_SEL, .-IDT_BASE
.word 0 # offset 15:0
.word SYS_CODE_SEL # selector 15:0
.byte 0 # 0 for interrupt gate
.byte 0x0e | 0x80 # type = 386 interrupt gate, present
.word 0 # offset 31:16
# Reserved - IRQ 2 redirect (IRQ 2) - DO NOT USE!!! - (INT 6ah)
.equ IRQ2_SEL, .-IDT_BASE
.word 0 # offset 15:0
.word SYS_CODE_SEL # selector 15:0
.byte 0 # 0 for interrupt gate
.byte 0x0e | 0x80 # type = 386 interrupt gate, present
.word 0 # offset 31:16
# IRQ 3 (COM 2) - (INT 6bh)
.equ IRQ3_SEL, .-IDT_BASE
.word 0 # offset 15:0
.word SYS_CODE_SEL # selector 15:0
.byte 0 # 0 for interrupt gate
.byte 0x0e | 0x80 # type = 386 interrupt gate, present
.word 0 # offset 31:16
# IRQ 4 (COM 1) - (INT 6ch)
.equ IRQ4_SEL, .-IDT_BASE
.word 0 # offset 15:0
.word SYS_CODE_SEL # selector 15:0
.byte 0 # 0 for interrupt gate
.byte 0x0e | 0x80 # type = 386 interrupt gate, present
.word 0 # offset 31:16
# IRQ 5 (LPT 2) - (INT 6dh)
.equ IRQ5_SEL, .-IDT_BASE
.word 0 # offset 15:0
.word SYS_CODE_SEL # selector 15:0
.byte 0 # 0 for interrupt gate
.byte 0x0e | 0x80 # type = 386 interrupt gate, present
.word 0 # offset 31:16
# IRQ 6 (Floppy controller) - (INT 6eh)
.equ IRQ6_SEL, .-IDT_BASE
.word 0 # offset 15:0
.word SYS_CODE_SEL # selector 15:0
.byte 0 # 0 for interrupt gate
.byte 0x0e | 0x80 # type = 386 interrupt gate, present
.word 0 # offset 31:16
# IRQ 7 (LPT 1) - (INT 6fh)
.equ IRQ7_SEL, .-IDT_BASE
.word 0 # offset 15:0
.word SYS_CODE_SEL # selector 15:0
.byte 0 # 0 for interrupt gate
.byte 0x0e | 0x80 # type = 386 interrupt gate, present
.word 0 # offset 31:16
# IRQ 8 (RTC Alarm) - (INT 70h)
.equ IRQ8_SEL, .-IDT_BASE
.word 0 # offset 15:0
.word SYS_CODE_SEL # selector 15:0
.byte 0 # 0 for interrupt gate
.byte 0x0e | 0x80 # type = 386 interrupt gate, present
.word 0 # offset 31:16
# IRQ 9 - (INT 71h)
.equ IRQ9_SEL, .-IDT_BASE
.word 0 # offset 15:0
.word SYS_CODE_SEL # selector 15:0
.byte 0 # 0 for interrupt gate
.byte 0x0e | 0x80 # type = 386 interrupt gate, present
.word 0 # offset 31:16
# IRQ 10 - (INT 72h)
.equ IRQ10_SEL, .-IDT_BASE
.word 0 # offset 15:0
.word SYS_CODE_SEL # selector 15:0
.byte 0 # 0 for interrupt gate
.byte 0x0e | 0x80 # type = 386 interrupt gate, present
.word 0 # offset 31:16
# IRQ 11 - (INT 73h)
.equ IRQ11_SEL, .-IDT_BASE
.word 0 # offset 15:0
.word SYS_CODE_SEL # selector 15:0
.byte 0 # 0 for interrupt gate
.byte 0x0e | 0x80 # type = 386 interrupt gate, present
.word 0 # offset 31:16
# IRQ 12 (PS/2 mouse) - (INT 74h)
.equ IRQ12_SEL, .-IDT_BASE
.word 0 # offset 15:0
.word SYS_CODE_SEL # selector 15:0
.byte 0 # 0 for interrupt gate
.byte 0x0e | 0x80 # type = 386 interrupt gate, present
.word 0 # offset 31:16
# IRQ 13 (Floating point error) - (INT 75h)
.equ IRQ13_SEL, .-IDT_BASE
.word 0 # offset 15:0
.word SYS_CODE_SEL # selector 15:0
.byte 0 # 0 for interrupt gate
.byte 0x0e | 0x80 # type = 386 interrupt gate, present
.word 0 # offset 31:16
# IRQ 14 (Secondary IDE) - (INT 76h)
.equ IRQ14_SEL, .-IDT_BASE
.word 0 # offset 15:0
.word SYS_CODE_SEL # selector 15:0
.byte 0 # 0 for interrupt gate
.byte 0x0e | 0x80 # type = 386 interrupt gate, present
.word 0 # offset 31:16
# IRQ 15 (Primary IDE) - (INT 77h)
.equ IRQ15_SEL, .-IDT_BASE
.word 0 # offset 15:0
.word SYS_CODE_SEL # selector 15:0
.byte 0 # 0 for interrupt gate
.byte 0x0e | 0x80 # type = 386 interrupt gate, present
.word 0 # offset 31:16
.fill 8, 1, 0
IDT_END:
.p2align 1
MemoryMapSize: .long 0
MemoryMap: .long 0,0,0,0,0,0,0,0
.long 0,0,0,0,0,0,0,0
.long 0,0,0,0,0,0,0,0
.long 0,0,0,0,0,0,0,0
.long 0,0,0,0,0,0,0,0
.long 0,0,0,0,0,0,0,0
.long 0,0,0,0,0,0,0,0
.long 0,0,0,0,0,0,0,0
.long 0,0,0,0,0,0,0,0
.long 0,0,0,0,0,0,0,0
.long 0,0,0,0,0,0,0,0
.long 0,0,0,0,0,0,0,0
.long 0,0,0,0,0,0,0,0
.long 0,0,0,0,0,0,0,0
.long 0,0,0,0,0,0,0,0
.long 0,0,0,0,0,0,0,0
.long 0,0,0,0,0,0,0,0
.long 0,0,0,0,0,0,0,0
.long 0,0,0,0,0,0,0,0
.long 0,0,0,0,0,0,0,0
.long 0,0,0,0,0,0,0,0
.long 0,0,0,0,0,0,0,0
.long 0,0,0,0,0,0,0,0
.long 0,0,0,0,0,0,0,0
.long 0,0,0,0,0,0,0,0
.long 0,0,0,0,0,0,0,0
.long 0,0,0,0,0,0,0,0
.long 0,0,0,0,0,0,0,0
.long 0,0,0,0,0,0,0,0
.long 0,0,0,0,0,0,0,0
.long 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0
.long 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0
.org 0x0fe0
MyStack:
# below is the pieces of the IVT that is used to redirect INT 68h - 6fh
# back to INT 08h - 0fh when in real mode... It is 'org'ed to a
# known low address (20f00) so it can be set up by PlMapIrqToVect in
# 8259.c
int $8
iret
int $9
iret
int $10
iret
int $11
iret
int $12
iret
int $13
iret
int $14
iret
int $15
iret
.org 0x0ffe
BlockSignature:
.word 0xaa55
|
al3xtjames/Clover
| 24,619
|
CloverEFI/BootSector/BootDuet.S
|
/*
* BootDuet - Replacement boot program for DUET.
* Copyright 2011 Miguel Lopes Santos Ramos <mail@miguel.ramos.name>.
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*
* See:
* - README for a description of this project
* - INSTALL for installation instructions
*/
/*
* The Master Boot Record (MBR) will have loaded this program into physical
* address 0x7c00 and jumped into 0000:7c00 just as the BIOS would.
* The environment that EFILDR expects us to have prepared is as follows:
*
* 0x015000 - 0x019000 EFIVAR.BIN loaded here, if the file exists
* 0x019000 - 0x019004 The serial number of the FAT boot volume
* 0x019004 - 0x019005 One byte indicating the following conditions:
* - 0 - EFIVAR.BIN was loaded
* - 1 - EFIVAR.BIN does not exist
* - 2 - EFIVAR.BIN exists but is not exactly 16k
* 0x020000 - 0x0a0000 EFILDR loaded here
*
* Given those constraints, we define our memory map as follows. Note that we
* work mostly in the lower 64k of physical memory with segment registers set
* to zero.
*
* 0x006000 - 0x007000 Room for several FAT sectors (4k) (FAT12)
* 0x007000 - 0x007c00 We have 3k here for stack and local variables
* 0x007c00 - 0x007e00 Where BootDuet is loaded (512b)
* 0x007e00 - 0x008000 Room for one FAT sector (FAT32 and FAT16)
* 0x008000 - 0x010000 Room for root directory entries (32k at most)
* 0x010000 - 0x015000 Unused space (20k)
* 0x015000 - 0x019000 Room for EFIVAR.BIN (16k at most)
* 0x019000 - 0x019005 Parameters for EFILDR (5 bytes)
* 0x019005 - 0x020000 Unused space (almost 4k)
* 0x020000 - 0x0a0000 Room for EFILDR (512k at most)
*/
.equ BaseAddress, 0x7c00 # Offset address for our own code and data
#if FAT == 32 || FAT == 16
.equ FatOffset, 0x7e00 # Offset address for the FAT buffer
#else
.equ FatOffset, 0x6000 # Offset address for the FAT buffer
#endif
.equ RootOffset, 0x8000 # Offset address for the root dir buffer
/*
* Constants everyone knows about.
*/
.equ DirEntrySize, 32 # Size of directory entry for all types of FAT
.equ DirEntryShift, 5 # Shift factor for directory entry
#if defined(WITH_LBA_64BIT)
.equ SizeOfLBA, 8 # Bytes in a 64-bit LBA variable
#else
.equ SizeOfLBA, 4 # Bytes in a 32-bit LBA variable
#endif
#if FAT == 32
.equ SizeOfCluster, 4 # Bytes in a cluster pointer variable
#elif FAT == 16 || FAT == 12
.equ SizeOfCluster, 2 # Bytes in a cluster pointer variable
#endif
/*
* This is a 16-bit code segment and we're in real mode.
*/
.code16
/*
* This jump is canonical, don't change the target.
*/
.global _start
_start: jmp main
nop
/*
* BIOS Parameter Block (BPB)
*
* The BPB is the same for all types of FAT filesystems.
* The following labels are placeholders that make writing the code that
* accesses this data easier.
* The real data is what is filled in by the format program.
* The intention is that you only overwrite the part of your volume boot
* sector that contains the code and leave this part unchanged.
*/
.space 8, 0x20 # OEM ID waste (OS or format program)
.word 512 # Bytes per sector (will anything other than 512 work?)
.byte 0 # Sectors per cluster
.word 0 # Reserved sectors before first FAT including boot
.byte 0 # Number of FATs (usually 1 or 2)
.word 0 # Root directory entries (only FAT12 or FAT16)
.word 0 # Number of sectors or 0 (not used)
.byte 0 # Media descriptor (irrelevant)
.word 0 # Sectors per FAT (for FAT12 or FAT16)
.word 0 # Sectors per track (irrelevant for LBA)
.word 0 # Number of heads (irrelevant for LBA)
.long 0 # Hidden sectors before partition
.long 0 # Number of sectors (if wSectors = 0)
/*
* The next symbols allow us to reference exactly that same data in the BPB,
* but relatively to our main stack frame.
* Addressing relative to the frame pointer saves us a byte on each memory
* reference, however, GNU as makes it hard for us to do it.
*/
.equ main_wBps, 0x0b
.equ main_bSpc, 0x0d
.equ main_wReserved, 0x0e
.equ main_bFats, 0x10
.equ main_wRootEntries, 0x11
.equ main_wSectsPerFat, 0x16
.equ main_lHidden, 0x1c
#if FAT == 32
/*
* Extended BIOS Parameter Block (EBPB) for FAT32.
*/
.long 0 # Sectors per FAT (for FAT32)
.word 0 # FAT flags (irrelevant)
.word 0 # FAT32 version (only known is 0)
.long 0 # First cluster of the root directory
.word 0 # Sector number of FSINFO (irrelevant)
.word 0 # Backup boot sector number or 0 for no backup
.space 12, 0 # another 12 bytes gone to waste
bDrive: .byte 0 # BIOS drive number (we use what comes in DL)
.byte 0 # another byte gone to waste
.byte 0 # EBPB signature (should be 0x29)
.long 0 # Volume ID (serial number)
.space 11, 0x20# Volume label (8.3) (irrelevant)
.space 8, 0x20 # another 8 bytes gone to waste ("FAT32 ")
/*
* As before, symbols for memory references relative to the main stack frame.
*/
.equ main_lSectsPerFat, 0x24
.equ main_lRootCluster, 0x2c
.equ main_bDrive, 0x40
.equ main_bSignature, 0x42
.equ main_lVolId, 0x43
.equ main_FsType, 0x52
#elif FAT == 16 || FAT == 12
/*
* Extended BIOS Parameter Block (EBPB) for recent versions of FAT12 and FAT16.
*/
bDrive: .byte 0 # BIOS drive number (we use what comes in DL)
.byte 0 # another byte gone to waste
.byte 0 # EBPB signature (should be 0x29)
.long 0 # Volume ID (serial number)
.space 11, 0x20# Volume label (8.3) (irrelevant)
.space 8, 0x20 # File system type ("FAT12 " or "FAT16 ")
/*
* As before, symbols for memory references relative to the main stack frame.
*/
.equ main_bDrive, 0x24
.equ main_bSignature, 0x26
.equ main_lVolId, 0x27
.equ main_FsType, 0x36
#endif
/*
* And we finally start our main function.
* We have 420 bytes left for the boot code, those wasted bytes would have
* come in handy.
* We'll probably have DL set with the BIOS boot disk number. If this is an MBR
* disk, we may even have SI pointing to the right partition entry, if it is GPT
* maybe a boot loader faked a partition entry. Anyway, we don't trust that.
*/
main:
/*
* Initialize our memory model with all segment registers at 0000.
*/
cli # no stack, no interrupts
pushw %cs
popw %ss # SS = CS = 0000
movw $BaseAddress,%sp # stack goes down from 7c00
sti # got stack, got interrupts
cld # we work upward
pushw %cs
popw %ds # DS = CS = 0000
movw %sp,%bp # yes, we use a frame pointer
#if !defined(WITH_HARDCODED_DRIVE)
movb %dl,main_bDrive(%bp) # store drive number, which the
# MBR should have passed us on
# DL, to the EBPB.
#endif
#if defined(WITH_VALIDATION) && !defined(DEBUG)
/*
* fsck - boot sector validation. This validation is only to help troubleshoot.
* On tighter versions, such as FAT12 with LBA 64 bit, we skip this, there's no
* space.
*/
fsck:
/*
* - check that file system signature is 0x29
* - check that hidden sectors isn't zero
*/
cmpb $0x29,main_bSignature(%bp)
jne fsck.1
cmpl $0,main_lHidden(%bp)
jne fsck.2
/*
* Something went wrong with the installation, tell the user and halt.
*/
fsck.1:
movw $Invalid,%si # SI = error message
movb $InvalidLen,%cl # CL = length
call print
fsck.h: jmp fsck.h # Hot halt
fsck.2:
/*
* Fall through to fsinit.
*/
#endif
/*
* init - initializes variables on the main stack frame.
*
* returns:
* sFat, sRoot, wRootSects and sData variables are set,
* wRootEnd, bEfiLdr, bEfiVar and cFatCache are set to harmless defaults.
*
* registers trashed: EAX, EBX, ECX, EDX
*/
init:
/*
* The following assembler symbol is a neat trick that helps us not
* getting lost when computing frame offsets of local variables.
*/
main_frame = 0
/*
* Compute the LBA of the first sector of the first FAT.
*/
main_frame = main_frame - SizeOfLBA
main_sFat = main_frame # LBA at which FAT starts
#if !defined(WITH_LBA_64BIT)
movzwl main_wReserved(%bp),%eax
addl main_lHidden(%bp),%eax # EAX = hidden + reserved
pushl %eax # store to sFat
#else
movzwl main_wReserved(%bp),%eax
xorl %edx,%edx
addl main_lHidden(%bp),%eax
adcl lHiddenHigh,%edx # EDX:EAX = hidden + reserved
pushl %edx # store to sFat
pushl %eax
#endif
/*
* Compute the LBA of the first sector after all copies of FAT. This
* will be, on FAT12 and FAT16, the first sector of the root directory
* and on FAT32 the first sector of the first cluster (the data region).
*/
#if FAT == 32
main_frame = main_frame - SizeOfLBA
main_sData = main_frame # LBA at which data starts
movl main_lSectsPerFat(%bp),%ecx
#elif FAT == 16 || FAT == 12
main_frame = main_frame - SizeOfLBA
main_sRoot = main_frame # LBA at which root starts
movzwl main_wSectsPerFat(%bp),%ecx
#endif
movzbl main_bFats(%bp),%eax # EAX = number of FATs
mull %ecx # EDX:EAX = total FAT sectors
#if !defined(WITH_LBA_64BIT)
addl main_sFat(%bp),%eax # EAX = sFat + total FAT
pushl %eax # store to sData/sRoot
#else
addl main_sFat(%bp),%eax
adcl main_sFat+4(%bp),%edx # EDX:EAX = sFat + total FAT
pushl %edx # store to sData/sRoot
pushl %eax
#endif
#if FAT == 12 || FAT == 16
/*
* Compute the LBA of the first sector of the root directory, which
* comes immediately after all copies of the FAT.
*/
main_frame = main_frame - 2
main_wRootSects = main_frame # number of sectors in root
main_frame = main_frame - SizeOfLBA
main_sData = main_frame # LBA at which data starts
movzwl main_wRootEntries(%bp),%eax
bsrw main_wBps(%bp),%cx
subw $DirEntryShift,%cx
shrw %cl,%ax
pushw %ax # store to wRootSects
#if !defined(WITH_LBA_64BIT)
addl main_sRoot(%bp),%eax # EAX = sRoot + wRootSects
pushl %eax # store to sData
#else
xorl %edx,%edx
addl main_sRoot(%bp),%eax
adcl main_sRoot+4(%bp),%edx # EDX:EAX = sRoot + wRootSects
pushl %edx # store to sData
pushl %eax
#endif
#endif
/*
* Other variables that must be initialized.
* cEfiVar must come lower in memory than cEfiLdr. Together, they form
* a sorted array of two elements.
*/
main_frame = main_frame - 2
main_wRootEnd = main_frame # ptr to end off root buffer
main_frame = main_frame - 1
main_bEfiLdr = main_frame # EFILDR present?
main_frame = main_frame - 1
main_bEfiVar = main_frame # EFIVAR.BIN present?
main_frame = main_frame - SizeOfCluster
main_cFatCache = main_frame # loaded/cached FAT sector
pushw %cs # zero wRootEnd
pushw $1 # bEfiLdr = 0, bEfiVar = 1
#if FAT == 32
pushw %cs # set cFatCache to funny value
pushw $0xffff
#elif FAT == 16 || FAT == 12
pushw $0xffff # set cFatCache to funny value
#endif
/*
* Fall through to readroot.
*/
/*
* readroot - reads the root directory into the predetermined buffer.
*
* returns:
* wRootEnd variable is set
*
* registers trashed: EAX, EBX, ECX, EDX, SI
*/
readroot:
/*
* We read the root directory to a predifined buffer and hope that it
* won't cross the boundary of the first 64k.
*/
pushw %cs
popw %es # ES = CS = 0000
movw $RootOffset,%di # ES:DI = root buffer
#if FAT == 32
/*
* On FAT32, root is a normal file with the start cluster registered on
* the EBPB.
*/
movl main_lRootCluster(%bp),%eax
call fread # read the root directory
movw %di,main_wRootEnd(%bp) # store DI to wRootEnd
#elif FAT == 16 || FAT == 12
/*
* On FAT16 and FAT12, we must compute the number of sectors taken by
* the root directory and read them from the disk.
*/
movw main_wRootSects(%bp),%cx
movl main_sRoot(%bp),%eax # EDX:EAX = start LBA of root
#if !defined(WITH_LBA_64BIT)
xorl %edx,%edx
#else
movl main_sRoot+4(%bp),%edx # EDX:EAX = start LBA of root
#endif
/*
* Read all sectors on the root directory.
*/
call read
/*
* After reading, advance DI by the number of bytes read and return.
* We don't advance ES. Even if we did, the next routine would have
* a problem if the root directory was superimposed by the files
* that it reads.
*/
movw main_wBps(%bp),%ax
mulw %cx # DX:AX = bytes read
addw %ax,%di # advance DI by AX bytes
movw %di,main_wRootEnd(%bp) # store DI to wRootEnd
#endif
/*
* Fall through to scanroot.
*/
/*
* scanroot - scans the root directory for EFILDR and EFIVAR.BIN, if any of
* these files is found, it is read and bEfiLdr or bEfiVar variables are
* set appropriately
*
* returns:
* bEfiLdr and bEfiVar variables are set
*
* registers trashed: EAX, EBX, ECX, EDX, SI, DI, ES
*/
scanroot:
movw $RootOffset,%si # DS:SI = root buffer
scanroot.1:
/*
* Check if this directory entry is EFILDR
*/
pushw %cs
popw %es # ES = CS
movw $EfiLdr,%di # ES:DI = &EfiLdr
call fncmp
jne scanroot.2 # jump if this isn't EFILDR
movb $1,main_bEfiLdr(%bp) # bEfiLdr = 1, we have it
pushw $0x2000 # read EFILDR to 2000:0000
jmp scanroot.r
scanroot.2:
/*
* Check if this directory entry is EFIVAR.BIN and, if so, check its
* size to see if it is 16k and then set the bHaveVar variable
* accordingly.
* If the size is right, then proceed to reading it.
*/
movw $EfiVar,%di # ES:DI = &EfiVar (assume ES=CS)
call fncmp
jne scanroot.n # jump if this isn't EFIVAR.BIN
movb $2,main_bEfiVar(%bp) # bEfiVar = 2, maybe size is bad
#if FAT == 12
cmpl $0,0x1c(%si) # is zero size?
je scanroot.n # it is, skip it
#else
cmpl $0x4000,0x1c(%si) # compare size to 16k
jne scanroot.n # size is not exactly 16k...
#endif
movb $0,main_bEfiVar(%bp) # bEfiVar = 0, we have it
pushw $0x1500 # read EFIVAR.BIN to 1500:0000
scanroot.r:
/*
* Found interesting file, print its name, so the user knows something
* if we halt for some reason, and read it to ES:0000, where ES is the
* word on top of the stack.
*/
movb $FilenameLen,%cl # CL = number of chars
call print # print the file name
popw %es # restore ES from the stack
xorw %di,%di # ES:DI = xxxx:0000
#if FAT == 32
pushw 0x14(%si) # push high 16 bits of cluster
pushw 0x1a(%si) # push low 16 bits of cluster
popl %eax # EAX = 32-bit cluster number
#elif FAT == 16 || FAT == 12
movw 0x1a(%si),%ax # AX = 16-bit cluster number
#endif
call fread # read file EAX into ES:DI
scanroot.n:
/*
* Proceed to the next directory entry.
*/
addw $DirEntrySize,%si # advance SI
cmpw main_wRootEnd(%bp),%si # have we reached the end?
jb scanroot.1 # if not, loop
/*
* Fall through to setup.
*/
/*
* setup - sets up environment to what EFILDR expects to find
*/
setup:
/*
* Check that EFILDR was found and that we have something to jump into.
*/
movw main_bEfiVar(%bp),%ax # AL = bEfiVar, AH = bEfiLdr
or %ah,%ah # is bEfiLdr set?
jnz setup.1
/*
* Print error message and halt.
*/
movw $Missing,%si # SI = Missing
movb $MissingLen,%cl # CL = length
call print
setup.h: jmp setup.h # Hot halt
setup.1:
#if !defined(DEBUG)
/*
* Copy the volume id (serial number) of the FAT file system to the
* physical address 19000 and the value of bEfiVar to 19004.
*/
pushw $0x1900
popw %es # ES = 1900
movb %al,%es:(4) # store AL to 1900:0004
movl main_lVolId(%bp),%eax # EAX = volume serial number
movl %eax,%es:(0) # store EAX to 1900:0000
/*
* Jump into EFILDR at 2000:0200, the second sector of start.com.
*/
ljmp $0x2000,$0x0200
#endif
#if defined(DEBUG)
/*
* printn - print a 32-bit number on the screen in decimal and halt
*
* parameters:
* EAX number to print
*/
printn:
leaw main_FsType(%bp),%si # Overwrite volume label and id
xorl %ebx,%ebx
movb $10,%bl # EBX = decimal base
xorw %cx,%cx # CX will count digits
printn.1:
xorl %edx,%edx # satisfy the DIV instruction
divl %ebx # EAX = EAX / 10, EDX = EAX % 10
decw %si # --SI
incw %cx # ++CX
addb $'0',%dl # DL = decimal digit to print
movb %dl,(%si) # *SI == DL
testl %eax,%eax # did we reach zero?
jnz printn.1
call print
printn.h: jmp printn.h # halt
#endif
/*
* print - print a message on the screen
*
* parameters:
* DS:SI pointer to the message being written
* CL number of characters to write
*
* registers trashed: AL, CX, DI, ES
*/
print:
pushw %si
pushw $0xb800
popw %es
xorb %ch,%ch # high 16 bits of counter = 0
xorw %di,%di # ES:DI = b800:0000
movb $0x07,%al # AL = 0x07 (white, non-blink)
print.1:
movsb # move one byte of text
stosb # store one byte of attributes
loop print.1
popw %si
ret
/*
* fncmp - compares two 8.3 style filenames
*
* parameters:
* DS:SI pointer to one filename
* ES:DI pointer the other filename
*
* returns:
* zero flag set if filenames are equal
*
* registers trashed: CX, DI
*/
fncmp:
pushw %si
movw $FilenameLen,%cx # compare all 11 characters
repe cmpsb
popw %si # pop won't affect flags
ret
/*
* fread - reads a file given its first cluster
*
* parameters:
* EAX first cluster to read (AX on FAT12/FAT16)
* ES:DI destination buffer
*
* returns:
* ES:DI pointer past the used portion of the destination buffer
*
* registers trashed: EAX, EBX, ECX, EDX
*/
fread:
/*
* First we establish a stack frame here. We will assume, for
* size optimization, that the stack frame above is the main stack
* frame. That means this function won't work if it's not called in that
* context.
* The margin between frames contains IP and BP only (4 bytes).
*/
pushw %bp
movw %sp,%bp
fread_frame = 0
fread_wBps = main_wBps - main_frame + 4
fread_bSpc = main_bSpc - main_frame + 4
fread_sFat = main_sFat - main_frame + 4
fread_sData = main_sData - main_frame + 4
fread_cFatCache = main_cFatCache - main_frame + 4
fread.1:
/*
* Check if the cluster is something crazy, such as the last cluster in
* file or a bad sector or anything strange.
*/
#if FAT == 32
cmpl $2,%eax
jb fread.2
cmpl $0x0ffffff0,%eax
jb fread.3
#elif FAT == 16
cmpw $2,%ax
jb fread.2
cmpw $0xfff0,%ax
jb fread.3
#elif FAT == 12
cmpw $2,%ax
jb fread.2
cmpw $0x0ff0,%ax
jb fread.3
#endif
fread.2:
/*
* If the cluster number is crazy, then we return and that's it.
*/
leave
ret
fread.3:
/*
* Clean the cluster number, on FAT12 and FAT16, we may have trash
* on the upper 16 bits of EAX.
* Then save the cluster number on a local variable.
*/
fread_frame = fread_frame - 4 # 4 bytes even for FAT12/16
fread_cluster = fread_frame # new local var cluster
#if FAT == 16 || FAT == 12
movzwl %ax,%eax # zero extend AX
#endif
pushl %eax # store to var cluster
/*
* Compute the start LBA for this cluster, first the offset from the
* data region, then the LBA.
* ECX is kept with the number of sectors per cluster.
*/
#if FAT == 32
subl $2,%eax
#elif FAT == 16 || FAT == 12
decw %ax
decw %ax
#endif
movzbl fread_bSpc(%bp),%ecx # ECX = sectors per cluster
mull %ecx # EDX:EAX = (cluster - 2) * ECX
addl fread_sData(%bp),%eax # EDX:EAX = start LBA
#if defined(WITH_LBA_64BIT)
adcl fread_sData+4(%bp),%edx
#endif
/*
* Read the cluster, the number of sectors to read is bSpc (in CX).
*/
call read
/*
* After reading, advance DI by the number of bytes read and on overflow
* advance ES too.
*/
movw fread_wBps(%bp),%ax
mulw %cx # AX = bytes per cluster (bSpc * wBps)
addw %ax,%di # advance DI by AX bytes
jnc fread.4 # check for carry
pushw %es # advance ES by 64k
addw $0x1000,fread_frame-2(%bp)
popw %es
fread.4:
/*
* Now we must locate the next cluster in the file, and for that, first
* locate the FAT sector that we must read by obtaining the quotient and
* remainder of the division of the cluster number by the number of
* cluster pointers per FAT sector.
*/
popl %eax # restore from fread_cluster
fread_frame = fread_frame + 4 # fread_cluster goes away
fread_frame = fread_frame - 2
fread_index = fread_frame # new local var index
#if FAT == 32
movzwl fread_wBps(%bp),%ebx # EBX = bytes per sector
shrw $2,%bx # EBX = ptrs per sector
xorl %edx,%edx
divl %ebx # EAX = FAT sector, EDX = ptr
pushw %dx # save cluster pointer index
#elif FAT == 16
movw fread_wBps(%bp),%bx # BX = bytes per sector
shrw $1,%bx # BX = ptrs per sector
xorw %dx,%dx
divw %bx # AX = FAT sector, DX = ptr
pushw %dx # save cluster pointer index
#elif FAT == 12
movw fread_wBps(%bp),%bx # BX = bytes per sector
shlw $1,%bx # BX = ptrs per trio
xorw %dx,%dx
divw %bx # AX = FAT trio, DX = ptr
pushw %dx # save cluster pointer index
movw $3,%bx
mulw %bx # AX = first sector of trio
#endif
/*
* Check if this FAT sector was already loaded and, if so, skip to the
* next section.
*/
#if FAT == 32
cmpl fread_cFatCache(%bp),%eax
#elif FAT == 16 || FAT == 12
cmpw fread_cFatCache(%bp),%ax
#endif
je fread.5
/*
* We have to load the relevant sector of FAT. We keep the sector that
* we will load in cFatCache (if the read fails we will abort anyway).
*/
#if FAT == 32
movl %eax,fread_cFatCache(%bp)
#elif FAT == 16 || FAT == 12
movw %ax,fread_cFatCache(%bp)
#endif
xorl %edx,%edx
#if !defined(WITH_LBA_64BIT)
addl fread_sFat(%bp),%eax # EDX:EAX = FAT LBA
#else
addl fread_sFat(%bp),%eax
adcl fread_sFat+4(%bp),%edx # EDX:EAX = FAT LBA
#endif
/*
* Now read that sector into the buffer for FAT sectors (0000:7e00).
*/
pushw %es
pushw %di
pushw %cs
popw %es
movw $FatOffset,%di # ES:DI = 0000:xxxx
#if FAT == 12
movw $3,%cx # on FAT12, read 3 sectors
#else
movw $1,%cx
#endif
call read # read the FAT sector
popw %di
popw %es
fread.5:
/*
* Finally we read the cluster pointer in this FAT sector and repeat the
* whole thing.
*/
popw %bx # restore fread_index to BX
fread_frame = fread_frame + 2 # fread_index goes away
#if FAT == 32
shlw $2,%bx # BX = index * 4
movl FatOffset(%bx),%eax # EAX = next cluster in file
jmp fread.1
#elif FAT == 16
shlw $1,%bx # BX = index * 2
movw FatOffset(%bx),%ax # AX = next cluster in file
jmp fread.1
#elif FAT == 12
xorw %cx,%cx # CX = default shift factor 0
movw %bx,%ax # AX = index too
testw $1,%ax # is it even or odd?
jz fread.6 # if even, CL (shift factor) = 0
movb $4,%cl # if odd, CL (shift factor) = 4
fread.6:
shrw $1,%ax
addw %ax,%bx # BX = 3 * (index / 2) + odd?
movw FatOffset(%bx),%ax # low or high 12 of AX is ptr
shrw %cl,%ax # shift right by CL
andw $0x0fff,%ax # trim high bits
jmp fread.1
#endif
/*
* read - reads sectors by 64-bit Logical Block Address (LBA)
*
* parameters:
* EDX:EAX LBA of first sector to read
* CX number of sectors to read
* ES:DI destination buffer
*
* registers trashed: AX, DL
*/
read:
pushw %si
/*
* Fill in Disk Address Packet (DAP) structure. This is filled on the
* stack from top to bottom.
*/
pushl %edx
pushl %eax
pushw %es
pushw %di
pushw %cx
pushw $16
/*
* Call BIOS int 13h AH=42h: Extended Read Sectors From Drive.
*/
movb $0x42,%ah
movb bDrive,%dl
movw %sp,%si # SI = SP (base of DAP)
int $0x13
/*
* Return on success or halt on error (an indicative message would have
* already been written on the screen).
*/
read.h: jc read.h # Halt if carry
addw $16,%sp
popw %si
ret
/*
* String constants for the whole program and their length.
*/
#if defined(WITH_VALIDATION) && !defined(DEBUG)
.equ InvalidLen, 3
Invalid: .ascii "Bad"
#endif
.equ MissingLen, 19 # "Missing " + 8.3 filename
Missing: .ascii "Missing "
.equ FilenameLen, 11 # 8.3 filename (11)
#if FAT == 32
EfiLdr: .ascii "EFILDR20 "
#elif FAT == 16
EfiLdr: .ascii "EFILDR16 "
#elif FAT == 12
EfiLdr: .ascii "EFILDR "
#endif
EfiVar: .ascii "EFIVAR BIN"
/*
* Since the BPB only has room for a 32-bit number of hidden sectors, if we need
* 64-bit LBA, we need room for storing the high-order 32 bits of hidden
* sectors.
*/
#if defined(WITH_LBA_64BIT)
.org 0x01fa
lHiddenHigh: .long 0
#endif
/*
* The boot sector signature at the end of the sector.
*/
.org 0x01fe
wSignature: .word 0xaa55
|
al3xtjames/Clover
| 43,644
|
CloverEFI/BootSector/st32_64.S
|
#------------------------------------------------------------------------------
#*
#* Copyright (c) 2006 - 2012, Intel Corporation. All rights reserved.<BR>
#* This program and the accompanying materials
#* are licensed and made available under the terms and conditions of the BSD License
#* which accompanies this distribution. The full text of the license may be found at
#* http://opensource.org/licenses/bsd-license.php
#*
#* THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
#* WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
#*
#* st32_64.asm
#*
#* Abstract:
#*
#------------------------------------------------------------------------------
.stack:
.486p:
.code16
.equ FAT_DIRECTORY_ENTRY_SIZE, 0x020
.equ FAT_DIRECTORY_ENTRY_SHIFT, 5
.equ BLOCK_SIZE, 0x0200
.equ BLOCK_MASK, 0x01ff
.equ BLOCK_SHIFT, 9
.org 0x0
.global _start
_start:
Ia32Jump:
jmp BootSectorEntryPoint # JMP inst - 3 bytes
nop
OemId: .ascii "INTEL " # OemId - 8 bytes
SectorSize: .word 0 # Sector Size - 2 bytes
SectorsPerCluster: .byte 0 # Sector Per Cluster - 1 byte
ReservedSectors: .word 0 # Reserved Sectors - 2 bytes
NoFats: .byte 0 # Number of FATs - 1 byte
RootEntries: .word 0 # Root Entries - 2 bytes
Sectors: .word 0 # Number of Sectors - 2 bytes
Media: .byte 0 # Media - 1 byte
SectorsPerFat16: .word 0 # Sectors Per FAT for FAT12/FAT16 - 2 byte
SectorsPerTrack: .word 0 # Sectors Per Track - 2 bytes
Heads: .word 0 # Heads - 2 bytes
HiddenSectors: .long 0 # Hidden Sectors - 4 bytes
LargeSectors: .long 0 # Large Sectors - 4 bytes
#******************************************************************************
#
#The structure for FAT32 starting at offset 36 of the boot sector. (At this point,
#the BPB/boot sector for FAT12 and FAT16 differs from the BPB/boot sector for FAT32.)
#
#******************************************************************************
SectorsPerFat32: .long 0 # Sectors Per FAT for FAT32 - 4 bytes
ExtFlags: .word 0 # Mirror Flag - 2 bytes
FSVersion: .word 0 # File System Version - 2 bytes
RootCluster: .long 0 # 1st Cluster Number of Root Dir - 4 bytes
FSInfo: .word 0 # Sector Number of FSINFO - 2 bytes
BkBootSector: .word 0 # Sector Number of Bk BootSector - 2 bytes
Reserved: .fill 12,1,0 # Reserved Field - 12 bytes
PhysicalDrive: .byte 0 # Physical Drive Number - 1 byte
Reserved1: .byte 0 # Reserved Field - 1 byte
Signature: .byte 0 # Extended Boot Signature - 1 byte
VolId: .ascii " " # Volume Serial Number - 4 bytes
FatLabel: .ascii " " # Volume Label - 11 bytes
FileSystemType: .ascii "FAT32 " # File System Type - 8 bytes
BootSectorEntryPoint:
# ASSUME ds:@code
# ASSUME ss:@code
# ds = 1000, es = 2000 + x (size of first cluster >> 4)
# cx = Start Cluster of EfiLdr
# dx = Start Cluster of Efivar.bin
# Re use the BPB data stored in Boot Sector
movw $0x7c00,%bp
pushw %cx
# Read Efivar.bin
# 1000:dx = DirectoryEntry of Efivar.bin -> BS.com has filled already
movw $0x1900,%ax
movw %ax,%es
testw %dx,%dx
jnz CheckVarStoreSize
movb $1,%al
NoVarStore:
pushw %es
# Set the 5th byte start @ 0:19000 to non-zero indicating we should init var store header in DxeIpl
movb %al, %es:(4)
jmp SaveVolumeId
CheckVarStoreSize:
movw %dx,%di
cmpl $0x4000, %ds:2(%di)
movb $2,%al
jne NoVarStore
LoadVarStore:
movb $0,%al
movb %al, %es:(4)
movw (%di), %cx
# ES:DI = 1500:0
xorw %di,%di
pushw %es
movw $0x1500,%ax
movw %ax,%es
call ReadFile
SaveVolumeId:
popw %es
movw VolId(%bp), %ax
movw %ax, %es:(0) # Save Volume Id to 0:19000. we will find the correct volume according to this VolumeId
movw VolId+2(%bp), %ax
movw %ax, %es:(2)
# Read Efildr
popw %cx
# cx = Start Cluster of Efildr -> BS.com has filled already
# ES:DI = 2000:0, first cluster will be read again
xorw %di,%di # di = 0
movw $0x2000,%ax
movw %ax,%es
call ReadFile
movw %cs,%ax
movw %ax, %cs:JumpSegment
CheckEm64T:
movl $0x80000001,%eax
# cpuid
.word 0xA20F
btl $29,%edx
jc CheckEm64TPass
pushw %cs
popw %ds
leaw Em64String,%si
movw $18,%cx
jmp PrintStringAndHalt
CheckEm64TPass:
JumpFarInstruction:
.byte 0xea
JumpOffset:
.word 0x200
JumpSegment:
.word 0x2000
# ****************************************************************************
# ReadFile
#
# Arguments:
# CX = Start Cluster of File
# ES:DI = Buffer to store file content read from disk
#
# Return:
# (ES << 4 + DI) = end of file content Buffer
#
# ****************************************************************************
ReadFile:
# si = NumberOfClusters
# cx = ClusterNumber
# dx = CachedFatSectorNumber
# ds:0000 = CacheFatSectorBuffer
# es:di = Buffer to load file
# bx = NextClusterNumber
pusha
movw $1,%si # NumberOfClusters = 1
pushw %cx # Push Start Cluster onto stack
movw $0xfff,%dx # CachedFatSectorNumber = 0xfff
FatChainLoop:
movw %cx,%ax # ax = ClusterNumber
andw $0xfff8,%ax # ax = ax & 0xfff8
cmpw $0xfff8,%ax # See if this is the last cluster
je FoundLastCluster # Jump if last cluster found
movw %cx,%ax # ax = ClusterNumber
shlw $2, %ax # FatOffset = ClusterNumber * 2
pushw %si # Save si
movw %ax,%si # si = FatOffset
shrw $BLOCK_SHIFT, %ax # ax = FatOffset >> BLOCK_SHIFT
addw ReservedSectors(%bp), %ax # ax = FatSectorNumber = ReservedSectors + (FatOffset >> BLOCK_OFFSET)
andw $BLOCK_MASK, %si # si = FatOffset & BLOCK_MASK
cmpw %dx,%ax # Compare FatSectorNumber to CachedFatSectorNumber
je SkipFatRead
movw $2,%bx
pushw %es
pushw %ds
popw %es
call ReadBlocks # Read 2 blocks starting at AX storing at ES:DI
popw %es
movw %ax,%dx # CachedFatSectorNumber = FatSectorNumber
SkipFatRead:
movw (%si), %bx # bx = NextClusterNumber
movw %cx,%ax # ax = ClusterNumber
popw %si # Restore si
decw %bx # bx = NextClusterNumber - 1
cmpw %cx,%bx # See if (NextClusterNumber-1)==ClusterNumber
jne ReadClusters
incw %bx # bx = NextClusterNumber
incw %si # NumberOfClusters++
movw %bx,%cx # ClusterNumber = NextClusterNumber
jmp FatChainLoop
ReadClusters:
incw %bx
popw %ax # ax = StartCluster
pushw %bx # StartCluster = NextClusterNumber
movw %bx,%cx # ClusterNumber = NextClusterNumber
subw $2,%ax # ax = StartCluster - 2
xorb %bh,%bh
movb SectorsPerCluster(%bp), %bl # bx = SectorsPerCluster
mulw %bx # ax = (StartCluster - 2) * SectorsPerCluster
addw (%bp), %ax # ax = FirstClusterLBA + (StartCluster-2)*SectorsPerCluster
pushw %ax # save start sector
movw %si,%ax # ax = NumberOfClusters
mulw %bx # ax = NumberOfClusters * SectorsPerCluster
movw %ax,%bx # bx = Number of Sectors
popw %ax # ax = Start Sector
call ReadBlocks
movw $1,%si # NumberOfClusters = 1
jmp FatChainLoop
FoundLastCluster:
popw %cx
popa
ret
# ****************************************************************************
# ReadBlocks - Reads a set of blocks from a block device
#
# AX = Start LBA
# BX = Number of Blocks to Read
# ES:DI = Buffer to store sectors read from disk
# ****************************************************************************
# cx = Blocks
# bx = NumberOfBlocks
# si = StartLBA
ReadBlocks:
pusha
addl LBAOffsetForBootSector(%bp), %eax # Add LBAOffsetForBootSector to Start LBA
addl HiddenSectors(%bp), %eax # Add HiddenSectors to Start LBA
movl %eax,%esi # esi = Start LBA
movw %bx,%cx # cx = Number of blocks to read
ReadCylinderLoop:
movw $0x7bfc,%bp # bp = 0x7bfc
movl %esi,%eax # eax = Start LBA
xorl %edx,%edx # edx = 0
movzwl (%bp), %ebx # bx = MaxSector
divl %ebx # ax = StartLBA / MaxSector
incw %dx # dx = (StartLBA % MaxSector) + 1
movw (%bp), %bx # bx = MaxSector
subw %dx,%bx # bx = MaxSector - Sector
incw %bx # bx = MaxSector - Sector + 1
cmpw %bx,%cx # Compare (Blocks) to (MaxSector - Sector + 1)
jg LimitTransfer
movw %cx,%bx # bx = Blocks
LimitTransfer:
pushw %ax # save ax
movw %es,%ax # ax = es
shrw $(BLOCK_SHIFT-4), %ax # ax = Number of blocks into mem system
andw $0x7f,%ax # ax = Number of blocks into current seg
addw %bx,%ax # ax = End Block number of transfer
cmpw $0x80,%ax # See if it crosses a 64K boundry
jle NotCrossing64KBoundry # Branch if not crossing 64K boundry
subw $0x80,%ax # ax = Number of blocks past 64K boundry
subw %ax,%bx # Decrease transfer size by block overage
NotCrossing64KBoundry:
popw %ax # restore ax
pushw %cx
movb %dl,%cl # cl = (StartLBA % MaxSector) + 1 = Sector
xorw %dx,%dx # dx = 0
divw 2(%bp) # ax = ax / (MaxHead + 1) = Cylinder
# dx = ax % (MaxHead + 1) = Head
pushw %bx # Save number of blocks to transfer
movb %dl,%dh # dh = Head
movw $0x7c00,%bp # bp = 0x7c00
movb PhysicalDrive(%bp), %dl # dl = Drive Number
movb %al,%ch # ch = Cylinder
movb %bl,%al # al = Blocks
movb $2,%ah # ah = Function 2
movw %di,%bx # es:bx = Buffer address
int $0x13
jc DiskError
popw %bx
popw %cx
movzwl %bx,%ebx
addl %ebx,%esi # StartLBA = StartLBA + NumberOfBlocks
subw %bx,%cx # Blocks = Blocks - NumberOfBlocks
movw %es,%ax
shlw $(BLOCK_SHIFT-4), %bx
addw %bx,%ax
movw %ax,%es # es:di = es:di + NumberOfBlocks*BLOCK_SIZE
cmpw $0,%cx
jne ReadCylinderLoop
popa
ret
DiskError:
pushw %cs
popw %ds
leaw ErrorString,%si
movw $7,%cx
jmp PrintStringAndHalt
PrintStringAndHalt:
movw $0xb800,%ax
movw %ax,%es
movw $160,%di
rep
movsw
Halt:
jmp Halt
ErrorString:
.byte 'S', 0x0c, 'E', 0x0c, 'r', 0x0c, 'r', 0x0c, 'o', 0x0c, 'r', 0x0c, '!',0x0c
.org 0x01fa
LBAOffsetForBootSector:
.long 0x0
.org 0x01fe
.word 0xaa55
#******************************************************************************
#******************************************************************************
#******************************************************************************
.equ DELAY_PORT, 0x0ed # Port to use for 1uS delay
.equ KBD_CONTROL_PORT, 0x060 # 8042 control port
.equ KBD_STATUS_PORT, 0x064 # 8042 status port
.equ WRITE_DATA_PORT_CMD, 0x0d1 # 8042 command to write the data port
.equ ENABLE_A20_CMD, 0x0df # 8042 command to enable A20
.ifndef CHARACTER_TO_SHOW
.equ CHARACTER_TO_SHOW, 0x37
.endif
.org 0x200
jmp start
Em64String:
.byte 'E', 0x0c, 'm', 0x0c, '6', 0x0c, '4', 0x0c, 'T', 0x0c, ' ', 0x0c, 'U', 0x0c, 'n', 0x0c, 's', 0x0c, 'u', 0x0c, 'p', 0x0c, 'p', 0x0c, 'o', 0x0c, 'r', 0x0c, 't', 0x0c, 'e', 0x0c, 'd', 0x0c, '!', 0x0c
start:
movw %cs,%ax
movw %ax,%ds
movw %ax,%es
movw %ax,%ss
movw $MyStack, %sp
# mov ax,0b800h
# mov es,ax
# mov byte ptr es:[160],'a'
# mov ax,cs
# mov es,ax
movl $0,%ebx
leal MemoryMap, %edi
MemMapLoop:
movl $0xe820,%eax
movl $20,%ecx
movl $0x534d4150, %edx # SMAP
int $0x15
jc MemMapDone
addl $20,%edi
cmpl $0,%ebx
je MemMapDone
jmp MemMapLoop
MemMapDone:
leal MemoryMap, %eax
subl %eax,%edi # Get the address of the memory map
movl %edi, MemoryMapSize # Save the size of the memory map
xorl %ebx,%ebx
movw %cs,%bx # BX=segment
shll $4,%ebx # BX="linear" address of segment base
leal GDT_BASE(%ebx), %eax # EAX=PHYSICAL address of gdt
movl %eax, (gdtr + 2) # Put address of gdt into the gdtr
leal IDT_BASE(%ebx), %eax # EAX=PHYSICAL address of idt
movl %eax, (idtr + 2) # Put address of idt into the idtr
leal MemoryMapSize(%ebx), %edx # Physical base address of the memory map
# mov ax,0b800h
# mov es,ax
# mov byte ptr es:[162],'b'
# mov ax,cs
# mov es,ax
#
# Enable A20 Gate
#
movw $0x2401,%ax # Enable A20 Gate
int $0x15
jnc A20GateEnabled # Jump if it suceeded
#
# If INT 15 Function 2401 is not supported, then attempt to Enable A20 manually.
#
/*
call Empty8042InputBuffer # Empty the Input Buffer on the 8042 controller
jnz Timeout8042 # Jump if the 8042 timed out
outw %ax, $DELAY_PORT # Delay 1 uS
movb $WRITE_DATA_PORT_CMD, %al # 8042 cmd to write output port
outb %al, $KBD_STATUS_PORT # Send command to the 8042
call Empty8042InputBuffer # Empty the Input Buffer on the 8042 controller
jnz Timeout8042 # Jump if the 8042 timed out
movb $ENABLE_A20_CMD, %al # gate address bit 20 on
outb %al, $KBD_CONTROL_PORT # Send command to thre 8042
call Empty8042InputBuffer # Empty the Input Buffer on the 8042 controller
movw $25,%cx # Delay 25 uS for the command to complete on the 8042
Delay25uS:
outw %ax, $DELAY_PORT # Delay 1 uS
loop Delay25uS
Timeout8042:
*/
#WIKI -fast A20gate
inb $0x92, %al
orb $2, %al
outb %al, $0x92
A20GateEnabled:
#
# DISABLE INTERRUPTS - Entering Protected Mode
# 253668.pdf page 401
movl $0x000F, %ebx
movl $(0x0E00 | (CHARACTER_TO_SHOW & 255)), %eax
movl $0x0010, %ecx
int $0x10
cli
# mov ax,0b800h
# mov es,ax
# mov byte ptr es:[164],'c'
# mov ax,cs
# mov es,ax
leal OffsetIn32BitProtectedMode, %eax
addl $0x20000+0x6,%eax
movl %eax, OffsetIn32BitProtectedMode
leal OffsetInLongMode, %eax
addl $0x20000+0x6,%eax
movl %eax, OffsetInLongMode
#
# load GDT
#
.byte 0x66
lgdt gdtr
#
# Enable Protect Mode (set CR0.PE=1)
#
movl %cr0, %eax # Read CR0.
orb $0x1,%al # Set PE=1
movl %eax, %cr0 # Write CR0.
.byte 0x66
.byte 0xea # jmp far 16:32
OffsetIn32BitProtectedMode:
.long 0x0000000 # offset $+8 (In32BitProtectedMode)
.word 0x10 # selector (flat CS)
In32BitProtectedMode:
#
# Entering Long Mode
#
.byte 0x66
movw $8,%ax
movw %ax,%ds
movw %ax,%es
movw %ax,%ss
#
# Disable paging before activate long mode (set CR0.PG=0)
#
/* movl %cr0, %eax # Read CR0.
.byte 0xf
.byte 0xba
.byte 0xf8
.byte 0x1f
# btc eax, 31 ; Set PG=0.
movl %eax, %cr0 # Write CR0.
Cl: jmp Cl */
#
# Enable the 64-bit page-translation-table entries by
# setting CR4.PAE=1 (this is _required_ before activating
# long mode). Paging is not enabled until after long mode
# is enabled.
#
.byte 0xf
.byte 0x20
.byte 0xe0
# mov eax, cr4
btsl $5,%eax
.byte 0xf
.byte 0x22
.byte 0xe0
# mov cr4, eax
#
# This is the Trapolean Page Tables that are guarenteed
# under 4GB.
#
# Address Map:
# 10000 ~ 12000 - efildr (loaded)
# 20000 ~ 21000 - start64.com
# 21000 ~ 22000 - efi64.com
# 22000 ~ 90000 - efildr
# 90000 ~ 96000 - 4G pagetable (will be reload later)
#
.byte 0xb8
.ifdef USE_LOW_EBDA
.long 0x88000
# mov eax, 88000h
.else
.long 0x90000
# mov eax, 90000h
.endif
movl %eax, %cr3
#
# Enable long mode (set EFER.LME=1).
#
.byte 0xb9
.long 0xc0000080
# mov ecx, 0c0000080h ; EFER MSR number.
.byte 0xf
.byte 0x32
# rdmsr ; Read EFER.
.byte 0xf
.byte 0xba
.byte 0xe8
.byte 0x8
# bts eax, 8 ; Set LME=1.
.byte 0xf
.byte 0x30
# wrmsr ; Write EFER.
#
# Enable paging to activate long mode (set CR0.PG=1)
#
movl %cr0, %eax # Read CR0.
.byte 0xf
.byte 0xba
.byte 0xe8
.byte 0x1f
# bts eax, 31 ; Set PG=1.
movl %eax, %cr0 # Write CR0.
# jmp GoToLongMode
#GoToLongMode:
.byte 0x67
.byte 0xea # Far Jump $+9:Selector to reload CS
OffsetInLongMode:
.long 00000000 # $+9 Offset is ensuing instruction boundary
.word 0x38 # Selector is our code selector, 38h
InLongMode:
.byte 0x66
movw $0x30,%ax
movw %ax,%ds
.byte 0x66
movw $0x18,%ax
movw %ax,%es
movw %ax,%ss
movw %ax,%ds
.byte 0xbd
.long 0x400000
# mov ebp,000400000h ; Destination of EFILDR32
.byte 0xbb
.long 0x70000
# mov ebx,000070000h ; Length of copy
#
# load idt later
#
.byte 0x48
.byte 0x33
.byte 0xc0
# xor rax, rax
.byte 0x66
movw $idtr, %ax
.byte 0x48
.byte 0x5
.long 0x20000
# add rax, 20000h
.byte 0xf
.byte 0x1
.byte 0x18
# lidt fword ptr [rax]
.byte 0x48
.byte 0xc7
.byte 0xc0
.long 0x21000
# mov rax, 21000h
.byte 0x50
# push rax
# ret
.byte 0xc3
Empty8042InputBuffer:
movw $0,%cx
Empty8042Loop:
outw %ax, $DELAY_PORT # Delay 1us
inb $KBD_STATUS_PORT, %al # Read the 8042 Status Port
andb $0x2,%al # Check the Input Buffer Full Flag
loopnz Empty8042Loop # Loop until the input buffer is empty or a timout of 65536 uS
ret
##############################################################################
# data
##############################################################################
.p2align 1
gdtr: .word GDT_END - GDT_BASE - 1 # GDT limit
.long 0 # (GDT base gets set above)
##############################################################################
# global descriptor table (GDT)
##############################################################################
.p2align 1
GDT_BASE:
# null descriptor
.equ NULL_SEL, .-GDT_BASE # Selector [0x0]
.word 0 # limit 15:0
.word 0 # base 15:0
.byte 0 # base 23:16
.byte 0 # type
.byte 0 # limit 19:16, flags
.byte 0 # base 31:24
# linear data segment descriptor
.equ LINEAR_SEL, .-GDT_BASE # Selector [0x8]
.word 0xFFFF # limit 0xFFFFF
.word 0 # base 0
.byte 0
.byte 0x92 # present, ring 0, data, expand-up, writable
.byte 0xCF # page-granular, 32-bit
.byte 0
# linear code segment descriptor
.equ LINEAR_CODE_SEL, .-GDT_BASE # Selector [0x10]
.word 0xFFFF # limit 0xFFFFF
.word 0 # base 0
.byte 0
.byte 0x9A # present, ring 0, data, expand-up, writable
.byte 0xCF # page-granular, 32-bit
.byte 0
# system data segment descriptor
.equ SYS_DATA_SEL, .-GDT_BASE # Selector [0x18]
.word 0xFFFF # limit 0xFFFFF
.word 0 # base 0
.byte 0
.byte 0x92 # present, ring 0, data, expand-up, writable
.byte 0xCF # page-granular, 32-bit
.byte 0
# system code segment descriptor
.equ SYS_CODE_SEL, .-GDT_BASE # Selector [0x20]
.word 0xFFFF # limit 0xFFFFF
.word 0 # base 0
.byte 0
.byte 0x9A # present, ring 0, data, expand-up, writable
.byte 0xCF # page-granular, 32-bit
.byte 0
# spare segment descriptor
.equ SPARE3_SEL, .-GDT_BASE # Selector [0x28]
.word 0 # limit 0xFFFFF
.word 0 # base 0
.byte 0
.byte 0 # present, ring 0, data, expand-up, writable
.byte 0 # page-granular, 32-bit
.byte 0
#
# system data segment descriptor
#
.equ SYS_DATA64_SEL, .-GDT_BASE # Selector [0x30]
.word 0xFFFF # limit 0xFFFFF
.word 0 # base 0
.byte 0
.byte 0x92 # P | DPL [1..2] | 1 | 1 | C | R | A
.byte 0xCF # G | D | L | AVL | Segment [19..16]
.byte 0
#
# system code segment descriptor
#
.equ SYS_CODE64_SEL, .-GDT_BASE # Selector [0x38]
.word 0xFFFF # limit 0xFFFFF
.word 0 # base 0
.byte 0
.byte 0x9A # P | DPL [1..2] | 1 | 1 | C | R | A
.byte 0xAF # G | D | L | AVL | Segment [19..16]
.byte 0
# spare segment descriptor
.equ SPARE4_SEL, .-GDT_BASE # Selector [0x40]
.word 0 # limit 0xFFFFF
.word 0 # base 0
.byte 0
.byte 0 # present, ring 0, data, expand-up, writable
.byte 0 # page-granular, 32-bit
.byte 0
GDT_END:
.p2align 1
idtr: .long IDT_END - IDT_BASE - 1 # IDT limit
.quad 0 # (IDT base gets set above)
##############################################################################
# interrupt descriptor table (IDT)
#
# Note: The hardware IRQ's specified in this table are the normal PC/AT IRQ
# mappings. This implementation only uses the system timer and all other
# IRQs will remain masked. The descriptors for vectors 33+ are provided
# for convenience.
##############################################################################
#idt_tag db "IDT",0
.p2align 1
IDT_BASE:
# divide by zero (INT 0)
.equ DIV_ZERO_SEL, .-IDT_BASE
.word 0 # offset 15:0
.word SYS_CODE64_SEL # selector 15:0
.byte 0 # 0 for interrupt gate
.byte 0x0e | 0x80 # type = 386 interrupt gate, present
.word 0 # offset 31:16
.long 0 # offset 63:32
.long 0 # 0 for reserved
# debug exception (INT 1)
.equ DEBUG_EXCEPT_SEL, .-IDT_BASE
.word 0 # offset 15:0
.word SYS_CODE64_SEL # selector 15:0
.byte 0 # 0 for interrupt gate
.byte 0x0e | 0x80 # type = 386 interrupt gate, present
.word 0 # offset 31:16
.long 0 # offset 63:32
.long 0 # 0 for reserved
# NMI (INT 2)
.equ NMI_SEL, .-IDT_BASE
.word 0 # offset 15:0
.word SYS_CODE64_SEL # selector 15:0
.byte 0 # 0 for interrupt gate
.byte 0x0e | 0x80 # type = 386 interrupt gate, present
.word 0 # offset 31:16
.long 0 # offset 63:32
.long 0 # 0 for reserved
# soft breakpoint (INT 3)
.equ BREAKPOINT_SEL, .-IDT_BASE
.word 0 # offset 15:0
.word SYS_CODE64_SEL # selector 15:0
.byte 0 # 0 for interrupt gate
.byte 0x0e | 0x80 # type = 386 interrupt gate, present
.word 0 # offset 31:16
.long 0 # offset 63:32
.long 0 # 0 for reserved
# overflow (INT 4)
.equ OVERFLOW_SEL, .-IDT_BASE
.word 0 # offset 15:0
.word SYS_CODE64_SEL # selector 15:0
.byte 0 # 0 for interrupt gate
.byte 0x0e | 0x80 # type = 386 interrupt gate, present
.word 0 # offset 31:16
.long 0 # offset 63:32
.long 0 # 0 for reserved
# bounds check (INT 5)
.equ BOUNDS_CHECK_SEL, .-IDT_BASE
.word 0 # offset 15:0
.word SYS_CODE64_SEL # selector 15:0
.byte 0 # 0 for interrupt gate
.byte 0x0e | 0x80 # type = 386 interrupt gate, present
.word 0 # offset 31:16
.long 0 # offset 63:32
.long 0 # 0 for reserved
# invalid opcode (INT 6)
.equ INVALID_OPCODE_SEL, .-IDT_BASE
.word 0 # offset 15:0
.word SYS_CODE64_SEL # selector 15:0
.byte 0 # 0 for interrupt gate
.byte 0x0e | 0x80 # type = 386 interrupt gate, present
.word 0 # offset 31:16
.long 0 # offset 63:32
.long 0 # 0 for reserved
# device not available (INT 7)
.equ DEV_NOT_AVAIL_SEL, .-IDT_BASE
.word 0 # offset 15:0
.word SYS_CODE64_SEL # selector 15:0
.byte 0 # 0 for interrupt gate
.byte 0x0e | 0x80 # type = 386 interrupt gate, present
.word 0 # offset 31:16
.long 0 # offset 63:32
.long 0 # 0 for reserved
# double fault (INT 8)
.equ DOUBLE_FAULT_SEL, .-IDT_BASE
.word 0 # offset 15:0
.word SYS_CODE64_SEL # selector 15:0
.byte 0 # 0 for interrupt gate
.byte 0x0e | 0x80 # type = 386 interrupt gate, present
.word 0 # offset 31:16
.long 0 # offset 63:32
.long 0 # 0 for reserved
# Coprocessor segment overrun - reserved (INT 9)
.equ RSVD_INTR_SEL1, .-IDT_BASE
.word 0 # offset 15:0
.word SYS_CODE64_SEL # selector 15:0
.byte 0 # 0 for interrupt gate
.byte 0x0e | 0x80 # type = 386 interrupt gate, present
.word 0 # offset 31:16
.long 0 # offset 63:32
.long 0 # 0 for reserved
# invalid TSS (INT 0ah)
.equ INVALID_TSS_SEL, .-IDT_BASE
.word 0 # offset 15:0
.word SYS_CODE64_SEL # selector 15:0
.byte 0 # 0 for interrupt gate
.byte 0x0e | 0x80 # type = 386 interrupt gate, present
.word 0 # offset 31:16
.long 0 # offset 63:32
.long 0 # 0 for reserved
# segment not present (INT 0bh)
.equ SEG_NOT_PRESENT_SEL, .-IDT_BASE
.word 0 # offset 15:0
.word SYS_CODE64_SEL # selector 15:0
.byte 0 # 0 for interrupt gate
.byte 0x0e | 0x80 # type = 386 interrupt gate, present
.word 0 # offset 31:16
.long 0 # offset 63:32
.long 0 # 0 for reserved
# stack fault (INT 0ch)
.equ STACK_FAULT_SEL, .-IDT_BASE
.word 0 # offset 15:0
.word SYS_CODE64_SEL # selector 15:0
.byte 0 # 0 for interrupt gate
.byte 0x0e | 0x80 # type = 386 interrupt gate, present
.word 0 # offset 31:16
.long 0 # offset 63:32
.long 0 # 0 for reserved
# general protection (INT 0dh)
.equ GP_FAULT_SEL, .-IDT_BASE
.word 0 # offset 15:0
.word SYS_CODE64_SEL # selector 15:0
.byte 0 # 0 for interrupt gate
.byte 0x0e | 0x80 # type = 386 interrupt gate, present
.word 0 # offset 31:16
.long 0 # offset 63:32
.long 0 # 0 for reserved
# page fault (INT 0eh)
.equ PAGE_FAULT_SEL, .-IDT_BASE
.word 0 # offset 15:0
.word SYS_CODE64_SEL # selector 15:0
.byte 0 # 0 for interrupt gate
.byte 0x0e | 0x80 # type = 386 interrupt gate, present
.word 0 # offset 31:16
.long 0 # offset 63:32
.long 0 # 0 for reserved
# Intel reserved - do not use (INT 0fh)
.equ RSVD_INTR_SEL2, .-IDT_BASE
.word 0 # offset 15:0
.word SYS_CODE64_SEL # selector 15:0
.byte 0 # 0 for interrupt gate
.byte 0x0e | 0x80 # type = 386 interrupt gate, present
.word 0 # offset 31:16
.long 0 # offset 63:32
.long 0 # 0 for reserved
# floating point error (INT 10h)
.equ FLT_POINT_ERR_SEL, .-IDT_BASE
.word 0 # offset 15:0
.word SYS_CODE64_SEL # selector 15:0
.byte 0 # 0 for interrupt gate
.byte 0x0e | 0x80 # type = 386 interrupt gate, present
.word 0 # offset 31:16
.long 0 # offset 63:32
.long 0 # 0 for reserved
# alignment check (INT 11h)
.equ ALIGNMENT_CHECK_SEL, .-IDT_BASE
.word 0 # offset 15:0
.word SYS_CODE64_SEL # selector 15:0
.byte 0 # 0 for interrupt gate
.byte 0x0e | 0x80 # type = 386 interrupt gate, present
.word 0 # offset 31:16
.long 0 # offset 63:32
.long 0 # 0 for reserved
# machine check (INT 12h)
.equ MACHINE_CHECK_SEL, .-IDT_BASE
.word 0 # offset 15:0
.word SYS_CODE64_SEL # selector 15:0
.byte 0 # 0 for interrupt gate
.byte 0x0e | 0x80 # type = 386 interrupt gate, present
.word 0 # offset 31:16
.long 0 # offset 63:32
.long 0 # 0 for reserved
# SIMD floating-point exception (INT 13h)
.equ SIMD_EXCEPTION_SEL, .-IDT_BASE
.word 0 # offset 15:0
.word SYS_CODE64_SEL # selector 15:0
.byte 0 # 0 for interrupt gate
.byte 0x0e | 0x80 # type = 386 interrupt gate, present
.word 0 # offset 31:16
.long 0 # offset 63:32
.long 0 # 0 for reserved
# 84 unspecified descriptors, First 12 of them are reserved, the rest are avail
.fill 84 * 16, 1, 0 # db (84 * 16) dup(0)
# IRQ 0 (System timer) - (INT 68h)
.equ IRQ0_SEL, .-IDT_BASE
.word 0 # offset 15:0
.word SYS_CODE64_SEL # selector 15:0
.byte 0 # 0 for interrupt gate
.byte 0x0e | 0x80 # type = 386 interrupt gate, present
.word 0 # offset 31:16
.long 0 # offset 63:32
.long 0 # 0 for reserved
# IRQ 1 (8042 Keyboard controller) - (INT 69h)
.equ IRQ1_SEL, .-IDT_BASE
.word 0 # offset 15:0
.word SYS_CODE64_SEL # selector 15:0
.byte 0 # 0 for interrupt gate
.byte 0x0e | 0x80 # type = 386 interrupt gate, present
.word 0 # offset 31:16
.long 0 # offset 63:32
.long 0 # 0 for reserved
# Reserved - IRQ 2 redirect (IRQ 2) - DO NOT USE!!! - (INT 6ah)
.equ IRQ2_SEL, .-IDT_BASE
.word 0 # offset 15:0
.word SYS_CODE64_SEL # selector 15:0
.byte 0 # 0 for interrupt gate
.byte 0x0e | 0x80 # type = 386 interrupt gate, present
.word 0 # offset 31:16
.long 0 # offset 63:32
.long 0 # 0 for reserved
# IRQ 3 (COM 2) - (INT 6bh)
.equ IRQ3_SEL, .-IDT_BASE
.word 0 # offset 15:0
.word SYS_CODE64_SEL # selector 15:0
.byte 0 # 0 for interrupt gate
.byte 0x0e | 0x80 # type = 386 interrupt gate, present
.word 0 # offset 31:16
.long 0 # offset 63:32
.long 0 # 0 for reserved
# IRQ 4 (COM 1) - (INT 6ch)
.equ IRQ4_SEL, .-IDT_BASE
.word 0 # offset 15:0
.word SYS_CODE64_SEL # selector 15:0
.byte 0 # 0 for interrupt gate
.byte 0x0e | 0x80 # type = 386 interrupt gate, present
.word 0 # offset 31:16
.long 0 # offset 63:32
.long 0 # 0 for reserved
# IRQ 5 (LPT 2) - (INT 6dh)
.equ IRQ5_SEL, .-IDT_BASE
.word 0 # offset 15:0
.word SYS_CODE64_SEL # selector 15:0
.byte 0 # 0 for interrupt gate
.byte 0x0e | 0x80 # type = 386 interrupt gate, present
.word 0 # offset 31:16
.long 0 # offset 63:32
.long 0 # 0 for reserved
# IRQ 6 (Floppy controller) - (INT 6eh)
.equ IRQ6_SEL, .-IDT_BASE
.word 0 # offset 15:0
.word SYS_CODE64_SEL # selector 15:0
.byte 0 # 0 for interrupt gate
.byte 0x0e | 0x80 # type = 386 interrupt gate, present
.word 0 # offset 31:16
.long 0 # offset 63:32
.long 0 # 0 for reserved
# IRQ 7 (LPT 1) - (INT 6fh)
.equ IRQ7_SEL, .-IDT_BASE
.word 0 # offset 15:0
.word SYS_CODE64_SEL # selector 15:0
.byte 0 # 0 for interrupt gate
.byte 0x0e | 0x80 # type = 386 interrupt gate, present
.word 0 # offset 31:16
.long 0 # offset 63:32
.long 0 # 0 for reserved
# IRQ 8 (RTC Alarm) - (INT 70h)
.equ IRQ8_SEL, .-IDT_BASE
.word 0 # offset 15:0
.word SYS_CODE64_SEL # selector 15:0
.byte 0 # 0 for interrupt gate
.byte 0x0e | 0x80 # type = 386 interrupt gate, present
.word 0 # offset 31:16
.long 0 # offset 63:32
.long 0 # 0 for reserved
# IRQ 9 - (INT 71h)
.equ IRQ9_SEL, .-IDT_BASE
.word 0 # offset 15:0
.word SYS_CODE64_SEL # selector 15:0
.byte 0 # 0 for interrupt gate
.byte 0x0e | 0x80 # type = 386 interrupt gate, present
.word 0 # offset 31:16
.long 0 # offset 63:32
.long 0 # 0 for reserved
# IRQ 10 - (INT 72h)
.equ IRQ10_SEL, .-IDT_BASE
.word 0 # offset 15:0
.word SYS_CODE64_SEL # selector 15:0
.byte 0 # 0 for interrupt gate
.byte 0x0e | 0x80 # type = 386 interrupt gate, present
.word 0 # offset 31:16
.long 0 # offset 63:32
.long 0 # 0 for reserved
# IRQ 11 - (INT 73h)
.equ IRQ11_SEL, .-IDT_BASE
.word 0 # offset 15:0
.word SYS_CODE64_SEL # selector 15:0
.byte 0 # 0 for interrupt gate
.byte 0x0e | 0x80 # type = 386 interrupt gate, present
.word 0 # offset 31:16
.long 0 # offset 63:32
.long 0 # 0 for reserved
# IRQ 12 (PS/2 mouse) - (INT 74h)
.equ IRQ12_SEL, .-IDT_BASE
.word 0 # offset 15:0
.word SYS_CODE64_SEL # selector 15:0
.byte 0 # 0 for interrupt gate
.byte 0x0e | 0x80 # type = 386 interrupt gate, present
.word 0 # offset 31:16
.long 0 # offset 63:32
.long 0 # 0 for reserved
# IRQ 13 (Floating point error) - (INT 75h)
.equ IRQ13_SEL, .-IDT_BASE
.word 0 # offset 15:0
.word SYS_CODE64_SEL # selector 15:0
.byte 0 # 0 for interrupt gate
.byte 0x0e | 0x80 # type = 386 interrupt gate, present
.word 0 # offset 31:16
.long 0 # offset 63:32
.long 0 # 0 for reserved
# IRQ 14 (Secondary IDE) - (INT 76h)
.equ IRQ14_SEL, .-IDT_BASE
.word 0 # offset 15:0
.word SYS_CODE64_SEL # selector 15:0
.byte 0 # 0 for interrupt gate
.byte 0x0e | 0x80 # type = 386 interrupt gate, present
.word 0 # offset 31:16
.long 0 # offset 63:32
.long 0 # 0 for reserved
# IRQ 15 (Primary IDE) - (INT 77h)
.equ IRQ15_SEL, .-IDT_BASE
.word 0 # offset 15:0
.word SYS_CODE64_SEL # selector 15:0
.byte 0 # 0 for interrupt gate
.byte 0x0e | 0x80 # type = 386 interrupt gate, present
.word 0 # offset 31:16
.long 0 # offset 63:32
.long 0 # 0 for reserved
.fill 16, 1, 0
IDT_END:
.p2align 1
MemoryMapSize: .long 0
MemoryMap: .long 0,0,0,0,0,0,0,0
.long 0,0,0,0,0,0,0,0
.long 0,0,0,0,0,0,0,0
.long 0,0,0,0,0,0,0,0
.long 0,0,0,0,0,0,0,0
.long 0,0,0,0,0,0,0,0
.long 0,0,0,0,0,0,0,0
.long 0,0,0,0,0,0,0,0
.long 0,0,0,0,0,0,0,0
.long 0,0,0,0,0,0,0,0
.long 0,0,0,0,0,0,0,0
.long 0,0,0,0,0,0,0,0
.long 0,0,0,0,0,0,0,0
.long 0,0,0,0,0,0,0,0
.long 0,0,0,0,0,0,0,0
.long 0,0,0,0,0,0,0,0
.long 0,0,0,0,0,0,0,0
.long 0,0,0,0,0,0,0,0
.long 0,0,0,0,0,0,0,0
.long 0,0,0,0,0,0,0,0
.long 0,0,0,0,0,0,0,0
.long 0,0,0,0,0,0,0,0
.long 0,0,0,0,0,0,0,0
.long 0,0,0,0,0,0,0,0
.long 0,0,0,0,0,0,0,0
.long 0,0,0,0,0,0,0,0
.long 0,0,0,0,0,0,0,0
.long 0,0,0,0,0,0,0,0
.long 0,0,0,0,0,0,0,0
.long 0,0,0,0,0,0,0,0
.long 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0
.org 0x0fe0
MyStack:
# below is the pieces of the IVT that is used to redirect INT 68h - 6fh
# back to INT 08h - 0fh when in real mode... It is 'org'ed to a
# known low address (20f00) so it can be set up by PlMapIrqToVect in
# 8259.c
int $8
iret
int $9
iret
int $10
iret
int $11
iret
int $12
iret
int $13
iret
int $14
iret
int $15
iret
.org 0x0ffe
BlockSignature:
.word 0xaa55
|
al3xtjames/Clover
| 30,458
|
CloverEFI/BootSector/start32H.S
|
#------------------------------------------------------------------------------
#*
#* Copyright (c) 2006 - 2007, Intel Corporation. All rights reserved.<BR>
#* This program and the accompanying materials
#* are licensed and made available under the terms and conditions of the BSD License
#* which accompanies this distribution. The full text of the license may be found at
#* http://opensource.org/licenses/bsd-license.php
#*
#* THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
#* WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
#*
#* start32.asm
#*
#* Abstract:
#*
#------------------------------------------------------------------------------
#define data32 .byte 0x66
#define addr32 .byte 0x67
#.MODEL small
.stack:
.486p:
.code16
.equ FAT_DIRECTORY_ENTRY_SIZE, 0x020
.equ FAT_DIRECTORY_ENTRY_SHIFT, 5
.equ BLOCK_SIZE, 0x0200
.equ BLOCK_MASK, 0x01ff
.equ BLOCK_SHIFT, 9
.org 0x0
.global _start
_start:
Ia32Jump:
jmp BootSectorEntryPoint # JMP inst - 3 bytes
nop
OemId: .ascii "INTEL " # OemId - 8 bytes
SectorSize: .word 0 # Sector Size - 2 bytes
SectorsPerCluster: .byte 0 # Sector Per Cluster - 1 byte
ReservedSectors: .word 0 # Reserved Sectors - 2 bytes
NoFats: .byte 0 # Number of FATs - 1 byte
RootEntries: .word 0 # Root Entries - 2 bytes
Sectors: .word 0 # Number of Sectors - 2 bytes
Media: .byte 0 # Media - 1 byte
SectorsPerFat16: .word 0 # Sectors Per FAT for FAT12/FAT16 - 2 byte
SectorsPerTrack: .word 0 # Sectors Per Track - 2 bytes
Heads: .word 0 # Heads - 2 bytes
HiddenSectors: .long 0 # Hidden Sectors - 4 bytes
LargeSectors: .long 0 # Large Sectors - 4 bytes
#******************************************************************************
#
#The structure for FAT32 starting at offset 36 of the boot sector. (At this point,
#the BPB/boot sector for FAT12 and FAT16 differs from the BPB/boot sector for FAT32.)
#
#******************************************************************************
SectorsPerFat32: .long 0 # Sectors Per FAT for FAT32 - 4 bytes
ExtFlags: .word 0 # Mirror Flag - 2 bytes
FSVersion: .word 0 # File System Version - 2 bytes
RootCluster: .long 0 # 1st Cluster Number of Root Dir - 4 bytes
FSInfo: .word 0 # Sector Number of FSINFO - 2 bytes
BkBootSector: .word 0 # Sector Number of Bk BootSector - 2 bytes
Reserved: .fill 12,1,0 # Reserved Field - 12 bytes
PhysicalDrive: .byte 0 # Physical Drive Number - 1 byte
Reserved1: .byte 0 # Reserved Field - 1 byte
Signature: .byte 0 # Extended Boot Signature - 1 byte
VolId: .ascii " " # Volume Serial Number - 4 bytes
FatLabel: .ascii "Clover " # Volume Label - 11 bytes
FileSystemType: .ascii "HFSPlus " # File System Type - 8 bytes
BootSectorEntryPoint:
#ASSUME ds:@code
#ASSUME ss:@code
# ds = 1000, es = 2000 + x (size of first cluster >> 4)
# cx = Start Cluster of EfiLdr
# dx = Start Cluster of Efivar.bin
# Re use the BPB data stored in Boot Sector
movw $0x7c00, %bp
JumpFarInstruction:
.byte 0xea
JumpOffset:
.word 0x200
JumpSegment:
.word 0x2000
/*
PrintStringAndHalt:
movw $0xb800, %ax
movw %ax, %es
movw $160, %di
rep
movsw
Halt:
jmp Halt
ErrorString:
.byte 'S', 0x0c, 'E', 0x0c, 'r', 0x0c, 'r', 0x0c, 'o', 0x0c, 'r', 0x0c, '!', 0x0c
*/
.org 0x01fa # Will cause build break
LBAOffsetForBootSector:
.long 0x0
.org 0x01fe # Will cause build break
.word 0xaa55
#******************************************************************************
#******************************************************************************
#******************************************************************************
.equ DELAY_PORT, 0x0ed # Port to use for 1uS delay
.equ KBD_CONTROL_PORT, 0x060 # 8042 control port
.equ KBD_STATUS_PORT, 0x064 # 8042 status port
.equ WRITE_DATA_PORT_CMD, 0x0d1 # 8042 command to write the data port
.equ ENABLE_A20_CMD, 0x0df # 8042 command to enable A20
.org 0x200 # Will cause build break
.code16
jmp start
#Em64String:
# .byte 'E', 0x0c, 'm', 0x0c, '6', 0x0c, '4', 0x0c, 'T', 0x0c, ' ', 0x0c, 'U', 0x0c, 'n', 0x0c, 's', 0x0c, 'u', 0x0c, 'p', 0x0c, 'p', 0x0c, 'o', 0x0c, 'r', 0x0c, 't', 0x0c, 'e', 0x0c, 'd', 0x0c, '!', 0x0c
Label: .ascii "Clover " # Bootloader Label
start:
movw %cs, %ax
movw %ax, %ds
movw %ax, %es
movw %ax, %ss
movw $MyStack, %sp
# mov ax,0b800h
# mov es,ax
# mov byte ptr es:[160],'a'
# mov ax,cs
# mov es,ax
# movw $0xb800, %ax
# movw %ax, %es
# movw $0x61, byte ptr %es:[160]
# movw %cs, %ax
# movw %ax, %es
//advanced algo
/*
; use the INT 0x15, eax= 0xE820 BIOS function to get a memory map
; inputs: es:di -> destination buffer for 24 byte entries
; outputs: bp = entry count, trashes all registers except esi
do_e820:
xor ebx, ebx ; ebx must be 0 to start
xor bp, bp ; keep an entry count in bp
mov edx, 0x0534D4150 ; Place "SMAP" into edx
mov eax, 0xe820
mov [es:di + 20], dword 1 ; force a valid ACPI 3.X entry
mov ecx, 24 ; ask for 24 bytes
int 0x15
jc short .failed ; carry set on first call means "unsupported function"
mov edx, 0x0534D4150 ; Some BIOSes apparently trash this register?
cmp eax, edx ; on success, eax must have been reset to "SMAP"
jne short .failed
test ebx, ebx ; ebx = 0 implies list is only 1 entry long (worthless)
je short .failed
jmp short .jmpin
.e820lp:
mov eax, 0xe820 ; eax, ecx get trashed on every int 0x15 call
mov [es:di + 20], dword 1 ; force a valid ACPI 3.X entry
mov ecx, 24 ; ask for 24 bytes again
int 0x15
jc short .e820f ; carry set means "end of list already reached"
mov edx, 0x0534D4150 ; repair potentially trashed register
.jmpin:
jcxz .skipent ; skip any 0 length entries
cmp cl, 20 ; got a 24 byte ACPI 3.X response?
jbe short .notext
test byte [es:di + 20], 1 ; if so: is the "ignore this data" bit clear?
je short .skipent
.notext:
mov ecx, [es:di + 8] ; get lower dword of memory region length
or ecx, [es:di + 12] ; "or" it with upper dword to test for zero
jz .skipent ; if length qword is 0, skip entry
inc bp ; got a good entry: ++count, move to next storage spot
add di, 24
.skipent:
test ebx, ebx ; if ebx resets to 0, list is complete
jne short .e820lp
.e820f:
mov [mmap_ent], bp ; store the entry count
clc ; there is "jc" on end of list to this point, so the carry must be cleared
ret
.failed:
stc ; "function unsupported" error exit
ret
*/
/* xorl %ebx, %ebx
xorl %ebp, %ebp
movl $0x534d4150, %edx # 0x534d4150 = 'SMAP'
movl $0xe820, %eax
leal MemoryMap, %edi
movl $1, %es:20(%di)
movl $24, %ecx
int $0x15
jc MemMapDone
movl $0x534d4150, %edx
cmpl %edx, %eax
jne MemMapDone
testl %ebx, %ebx
je MemMapDone
jmp JumpIn
MemMapLoop:
movl $0xe820, %eax
movl $1, %es:20(%di)
int $0x15
jc MemMapDone
movl $0x534d4150, %edx
JumpIn:
jcxz SkipEntry
cmpb $20, %cl
jbe NoText
# movl 20(%edi), %eax
testb $1, 20(%edi)
je SkipEntry
NoText:
movl %es:8(%di), %ecx
orl %es:12(%di), %ecx
jz SkipEntry
incl %ebp
addl $24, %edi
SkipEntry:
test %ebx, %ebx
jne MemMapLoop
MemMapDone:
movl %ebp, MemoryMapSize
clc
*/
movl $0, %ebx
leal MemoryMap, %edi
MemMapLoop:
movl $0xe820, %eax
movl $20, %ecx #WIKI said $24
movl $0x534d4150, %edx # 0x534d4150 = 'SMAP'
int $0x15
jc MemMapDone
addl $20, %edi
cmpl $0, %ebx
je MemMapDone
jmp MemMapLoop
MemMapDone:
leal MemoryMap, %eax
subl %eax, %edi # Get the address of the memory map
movl %edi, MemoryMapSize # Save the size of the memory map
xorl %ebx, %ebx
movw %cs, %bx # BX=segment
shll $4, %ebx # BX="linear" address of segment base
leal GDT_BASE(%ebx), %eax # EAX=PHYSICAL address of gdt
movl %eax, gdtr + 2 # Put address of gdt into the gdtr
leal IDT_BASE(%ebx), %eax # EAX=PHYSICAL address of idt
movl %eax, idtr + 2 # Put address of idt into the idtr
leal MemoryMapSize(%ebx), %edx # Physical base address of the memory map
addl $0x1000, %ebx # Source of EFI32 = $0x21000
movl %ebx, JUMP+2
addl $0x1000, %ebx
movl %ebx, %esi # Source of EFILDR32 = $0x22000
# mov ax,0b800h
# mov es,ax
# mov byte ptr es:[162],'b'
# mov ax,cs
# mov es,ax
# movw $0xb800, %ax
# movw %ax, %es
# movw $0x62, byte ptr %es:[162]
# movw %cs, %ax
# movw %ax, %es
#
# Enable A20 Gate
#
movw $0x2401, %ax # Enable A20 Gate
int $0x15
jnc A20GateEnabled # Jump if it succeeded
#
# If INT 15 Function 2401 is not supported, then attempt to Enable A20 manually.
#
#New algo from WIKI
/* cli
call a20wait
movb $0xAD, %al
outb %al, $KBD_STATUS_PORT
call a20wait
movb $0xD0, %al
outb %al, $KBD_STATUS_PORT
call a20wait2
inb $KBD_CONTROL_PORT, %al
pushl %eax
call a20wait
movb $0xD1, %al
outb %al, $KBD_STATUS_PORT
call a20wait
popl %eax
orb $2, %al
outb %al, $KBD_CONTROL_PORT
call a20wait
movb $0xAE, %al
outb %al, $KBD_STATUS_PORT
call a20wait
*/
#UEFI/DUET
call Empty8042InputBuffer # Empty the Input Buffer on the 8042 controller
jnz Timeout8042 # Jump if the 8042 timed out
outw %ax, $DELAY_PORT # Delay 1 uS
movb $WRITE_DATA_PORT_CMD, %al # 8042 cmd to write output port
outb %al, $KBD_STATUS_PORT # Send command to the 8042
call Empty8042InputBuffer # Empty the Input Buffer on the 8042 controller
jnz Timeout8042 # Jump if the 8042 timed out
movb $ENABLE_A20_CMD, %al # gate address bit 20 on
outb %al, $KBD_CONTROL_PORT # Send command to thre 8042
call Empty8042InputBuffer # Empty the Input Buffer on the 8042 controller
movw $25, %cx # Delay 25 uS for the command to complete on the 8042
Delay25uS:
outw %ax, $DELAY_PORT # Delay 1 uS
loopl Delay25uS
Timeout8042:
#WIKI -fast A20gate
# inb $0x92, %al
# orb $2, %al
# outb %al, $0x92
A20GateEnabled:
#//Slice - switch to graphics mode (1), or 80x25 mono text mode (2)
movw $0x0002, %ax
int $0x10
#put char 6
movl $0x000F, %ebx
movl $0x0E33, %eax
movl $0x0010, %ecx
int $0x10
movw $0x0008, %bx # Flat data descriptor
#PAUSE1:
# jmp PAUSE1
#
# DISABLE INTERRUPTS - Entering Protected Mode
#
cli
.byte 0x66
lgdt gdtr
#PAUSE2:
# jmp PAUSE2
# addr32
data32
lidt idtr
#PAUSE3:
# jmp PAUSE3
movl %cr0, %eax
orb $1, %al
# data32
# or $1, %eax
movl %eax, %cr0
JUMP:
# jmp far 0010:00020000
data32
.byte 0xea
.long 0x00020000
.word 0x0010
Empty8042InputBuffer:
movw $0, %cx
Empty8042Loop:
outw %ax, $DELAY_PORT # Delay 1us
inb $KBD_STATUS_PORT, %al # Read the 8042 Status Port
andb $0x2, %al # Check the Input Buffer Full Flag
loopnz Empty8042Loop # Loop until the input buffer is empty or a timout of 65536 uS
ret
/*
a20wait:
inb $KBD_STATUS_PORT, %al
testb $2, %al
jnz a20wait
ret
a20wait2:
inb $KBD_STATUS_PORT, %al
testb $1, %al
jz a20wait2
ret
*/
/*
PrintStringAndHalt:
leaw ErrorString, %si
movw $7, %cx
movw $0xb800, %ax
movw %ax, %es
movw $160, %di
rep
movsw
Halt:
jmp Halt
ErrorString:
.byte 'S', 0x0c, 'E', 0x0c, 'r', 0x0c, 'r', 0x0c, 'o', 0x0c, 'r', 0x0c, '!', 0x0c
*/
##############################################################################
# data
##############################################################################
.p2align 1
gdtr: .word GDT_END - GDT_BASE - 1
.long 0 # (GDT base gets set above)
##############################################################################
# global descriptor table (GDT)
##############################################################################
.p2align 1
GDT_BASE:
# null descriptor
.equ NULL_SEL, .-GDT_BASE
.word 0 # limit 15:0
.word 0 # base 15:0
.byte 0 # base 23:16
.byte 0 # type
.byte 0 # limit 19:16, flags
.byte 0 # base 31:24
# linear data segment descriptor
.equ LINEAR_SEL, .-GDT_BASE
.word 0xFFFF # limit 0xFFFFF
.word 0 # base 0
.byte 0
.byte 0x92 # present, ring 0, data, expand-up, writable
.byte 0xCF # page-granular, 32-bit
.byte 0
# linear code segment descriptor
.equ LINEAR_CODE_SEL, .-GDT_BASE
.word 0xFFFF # limit 0xFFFFF
.word 0 # base 0
.byte 0
.byte 0x9A # present, ring 0, data, expand-up, writable
.byte 0xCF # page-granular, 32-bit
.byte 0
# system data segment descriptor
.equ SYS_DATA_SEL, .-GDT_BASE
.word 0xFFFF # limit 0xFFFFF
.word 0 # base 0
.byte 0
.byte 0x92 # present, ring 0, data, expand-up, writable
.byte 0xCF # page-granular, 32-bit
.byte 0
# system code segment descriptor
.equ SYS_CODE_SEL, .-GDT_BASE
.word 0xFFFF # limit 0xFFFFF
.word 0 # base 0
.byte 0
.byte 0x9A # present, ring 0, data, expand-up, writable
.byte 0xCF # page-granular, 32-bit
.byte 0
# spare segment descriptor
.equ SPARE3_SEL, .-GDT_BASE
.word 0 # limit 0xFFFFF
.word 0 # base 0
.byte 0
.byte 0 # present, ring 0, data, expand-up, writable
.byte 0 # page-granular, 32-bit
.byte 0
# spare segment descriptor
.equ SPARE4_SEL, .-GDT_BASE
.word 0 # limit 0xFFFFF
.word 0 # base 0
.byte 0
.byte 0 # present, ring 0, data, expand-up, writable
.byte 0 # page-granular, 32-bit
.byte 0
# spare segment descriptor
.equ SPARE5_SEL, .-GDT_BASE
.word 0 # limit 0xFFFFF
.word 0 # base 0
.byte 0
.byte 0 # present, ring 0, data, expand-up, writable
.byte 0 # page-granular, 32-bit
.byte 0
GDT_END:
.p2align 1
idtr: .word IDT_END - IDT_BASE - 1
.long 0 # (IDT base gets set above)
##############################################################################
# interrupt descriptor table (IDT)
#
# Note: The hardware IRQ's specified in this table are the normal PC/AT IRQ
# mappings. This implementation only uses the system timer and all other
# IRQs will remain masked. The descriptors for vectors 33+ are provided
# for convenience.
##############################################################################
#idt_tag db "IDT",0
.p2align 1
IDT_BASE:
# divide by zero (INT 0)
.equ DIV_ZERO_SEL, .-IDT_BASE
.word 0 # offset 15:0
.word SYS_CODE_SEL # selector 15:0
.byte 0 # 0 for interrupt gate
.byte 0x0e | 0x80 # type = 386 interrupt gate, present
.word 0 # offset 31:16
# debug exception (INT 1)
.equ DEBUG_EXCEPT_SEL, .-IDT_BASE
.word 0 # offset 15:0
.word SYS_CODE_SEL # selector 15:0
.byte 0 # 0 for interrupt gate
.byte 0x0e | 0x80 # type = 386 interrupt gate, present
.word 0 # offset 31:16
# NMI (INT 2)
.equ NMI_SEL, .-IDT_BASE
.word 0 # offset 15:0
.word SYS_CODE_SEL # selector 15:0
.byte 0 # 0 for interrupt gate
.byte 0x0e | 0x80 # type = 386 interrupt gate, present
.word 0 # offset 31:16
# soft breakpoint (INT 3)
.equ BREAKPOINT_SEL, .-IDT_BASE
.word 0 # offset 15:0
.word SYS_CODE_SEL # selector 15:0
.byte 0 # 0 for interrupt gate
.byte 0x0e | 0x80 # type = 386 interrupt gate, present
.word 0 # offset 31:16
# overflow (INT 4)
.equ OVERFLOW_SEL, .-IDT_BASE
.word 0 # offset 15:0
.word SYS_CODE_SEL # selector 15:0
.byte 0 # 0 for interrupt gate
.byte 0x0e | 0x80 # type = 386 interrupt gate, present
.word 0 # offset 31:16
# bounds check (INT 5)
.equ BOUNDS_CHECK_SEL, .-IDT_BASE
.word 0 # offset 15:0
.word SYS_CODE_SEL # selector 15:0
.byte 0 # 0 for interrupt gate
.byte 0x0e | 0x80 # type = 386 interrupt gate, present
.word 0 # offset 31:16
# invalid opcode (INT 6)
.equ INVALID_OPCODE_SEL, .-IDT_BASE
.word 0 # offset 15:0
.word SYS_CODE_SEL # selector 15:0
.byte 0 # 0 for interrupt gate
.byte 0x0e | 0x80 # type = 386 interrupt gate, present
.word 0 # offset 31:16
# device not available (INT 7)
.equ DEV_NOT_AVAIL_SEL, .-IDT_BASE
.word 0 # offset 15:0
.word SYS_CODE_SEL # selector 15:0
.byte 0 # 0 for interrupt gate
.byte 0x0e | 0x80 # type = 386 interrupt gate, present
.word 0 # offset 31:16
# double fault (INT 8)
.equ DOUBLE_FAULT_SEL, .-IDT_BASE
.word 0 # offset 15:0
.word SYS_CODE_SEL # selector 15:0
.byte 0 # 0 for interrupt gate
.byte 0x0e | 0x80 # type = 386 interrupt gate, present
.word 0 # offset 31:16
# Coprocessor segment overrun - reserved (INT 9)
.equ RSVD_INTR_SEL1, .-IDT_BASE
.word 0 # offset 15:0
.word SYS_CODE_SEL # selector 15:0
.byte 0 # 0 for interrupt gate
.byte 0x0e | 0x80 # type = 386 interrupt gate, present
.word 0 # offset 31:16
# invalid TSS (INT 0ah)
.equ INVALID_TSS_SEL, .-IDT_BASE
.word 0 # offset 15:0
.word SYS_CODE_SEL # selector 15:0
.byte 0 # 0 for interrupt gate
.byte 0x0e | 0x80 # type = 386 interrupt gate, present
.word 0 # offset 31:16
# segment not present (INT 0bh)
.equ SEG_NOT_PRESENT_SEL, .-IDT_BASE
.word 0 # offset 15:0
.word SYS_CODE_SEL # selector 15:0
.byte 0 # 0 for interrupt gate
.byte 0x0e | 0x80 # type = 386 interrupt gate, present
.word 0 # offset 31:16
# stack fault (INT 0ch)
.equ STACK_FAULT_SEL, .-IDT_BASE
.word 0 # offset 15:0
.word SYS_CODE_SEL # selector 15:0
.byte 0 # 0 for interrupt gate
.byte 0x0e | 0x80 # type = 386 interrupt gate, present
.word 0 # offset 31:16
# general protection (INT 0dh)
.equ GP_FAULT_SEL, .-IDT_BASE
.word 0 # offset 15:0
.word SYS_CODE_SEL # selector 15:0
.byte 0 # 0 for interrupt gate
.byte 0x0e | 0x80 # type = 386 interrupt gate, present
.word 0 # offset 31:16
# page fault (INT 0eh)
.equ PAGE_FAULT_SEL, .-IDT_BASE
.word 0 # offset 15:0
.word SYS_CODE_SEL # selector 15:0
.byte 0 # 0 for interrupt gate
.byte 0x0e | 0x80 # type = 386 interrupt gate, present
.word 0 # offset 31:16
# Intel reserved - do not use (INT 0fh)
.equ RSVD_INTR_SEL2, .-IDT_BASE
.word 0 # offset 15:0
.word SYS_CODE_SEL # selector 15:0
.byte 0 # 0 for interrupt gate
.byte 0x0e | 0x80 # type = 386 interrupt gate, present
.word 0 # offset 31:16
# floating point error (INT 10h)
.equ FLT_POINT_ERR_SEL, .-IDT_BASE
.word 0 # offset 15:0
.word SYS_CODE_SEL # selector 15:0
.byte 0 # 0 for interrupt gate
.byte 0x0e | 0x80 # type = 386 interrupt gate, present
.word 0 # offset 31:16
# alignment check (INT 11h)
.equ ALIGNMENT_CHECK_SEL, .-IDT_BASE
.word 0 # offset 15:0
.word SYS_CODE_SEL # selector 15:0
.byte 0 # 0 for interrupt gate
.byte 0x0e | 0x80 # type = 386 interrupt gate, present
.word 0 # offset 31:16
# machine check (INT 12h)
.equ MACHINE_CHECK_SEL, .-IDT_BASE
.word 0 # offset 15:0
.word SYS_CODE_SEL # selector 15:0
.byte 0 # 0 for interrupt gate
.byte 0x0e | 0x80 # type = 386 interrupt gate, present
.word 0 # offset 31:16
# SIMD floating-point exception (INT 13h)
.equ SIMD_EXCEPTION_SEL, .-IDT_BASE
.word 0 # offset 15:0
.word SYS_CODE_SEL # selector 15:0
.byte 0 # 0 for interrupt gate
.byte 0x0e | 0x80 # type = 386 interrupt gate, present
.word 0 # offset 31:16
# 84 unspecified descriptors, First 12 of them are reserved, the rest are avail
.fill 84 * 8, 1, 0
# IRQ 0 (System timer) - (INT 68h)
.equ IRQ0_SEL, .-IDT_BASE
.word 0 # offset 15:0
.word SYS_CODE_SEL # selector 15:0
.byte 0 # 0 for interrupt gate
.byte 0x0e | 0x80 # type = 386 interrupt gate, present
.word 0 # offset 31:16
# IRQ 1 (8042 Keyboard controller) - (INT 69h)
.equ IRQ1_SEL, .-IDT_BASE
.word 0 # offset 15:0
.word SYS_CODE_SEL # selector 15:0
.byte 0 # 0 for interrupt gate
.byte 0x0e | 0x80 # type = 386 interrupt gate, present
.word 0 # offset 31:16
# Reserved - IRQ 2 redirect (IRQ 2) - DO NOT USE!!! - (INT 6ah)
.equ IRQ2_SEL, .-IDT_BASE
.word 0 # offset 15:0
.word SYS_CODE_SEL # selector 15:0
.byte 0 # 0 for interrupt gate
.byte 0x0e | 0x80 # type = 386 interrupt gate, present
.word 0 # offset 31:16
# IRQ 3 (COM 2) - (INT 6bh)
.equ IRQ3_SEL, .-IDT_BASE
.word 0 # offset 15:0
.word SYS_CODE_SEL # selector 15:0
.byte 0 # 0 for interrupt gate
.byte 0x0e | 0x80 # type = 386 interrupt gate, present
.word 0 # offset 31:16
# IRQ 4 (COM 1) - (INT 6ch)
.equ IRQ4_SEL, .-IDT_BASE
.word 0 # offset 15:0
.word SYS_CODE_SEL # selector 15:0
.byte 0 # 0 for interrupt gate
.byte 0x0e | 0x80 # type = 386 interrupt gate, present
.word 0 # offset 31:16
# IRQ 5 (LPT 2) - (INT 6dh)
.equ IRQ5_SEL, .-IDT_BASE
.word 0 # offset 15:0
.word SYS_CODE_SEL # selector 15:0
.byte 0 # 0 for interrupt gate
.byte 0x0e | 0x80 # type = 386 interrupt gate, present
.word 0 # offset 31:16
# IRQ 6 (Floppy controller) - (INT 6eh)
.equ IRQ6_SEL, .-IDT_BASE
.word 0 # offset 15:0
.word SYS_CODE_SEL # selector 15:0
.byte 0 # 0 for interrupt gate
.byte 0x0e | 0x80 # type = 386 interrupt gate, present
.word 0 # offset 31:16
# IRQ 7 (LPT 1) - (INT 6fh)
.equ IRQ7_SEL, .-IDT_BASE
.word 0 # offset 15:0
.word SYS_CODE_SEL # selector 15:0
.byte 0 # 0 for interrupt gate
.byte 0x0e | 0x80 # type = 386 interrupt gate, present
.word 0 # offset 31:16
# IRQ 8 (RTC Alarm) - (INT 70h)
.equ IRQ8_SEL, .-IDT_BASE
.word 0 # offset 15:0
.word SYS_CODE_SEL # selector 15:0
.byte 0 # 0 for interrupt gate
.byte 0x0e | 0x80 # type = 386 interrupt gate, present
.word 0 # offset 31:16
# IRQ 9 - (INT 71h)
.equ IRQ9_SEL, .-IDT_BASE
.word 0 # offset 15:0
.word SYS_CODE_SEL # selector 15:0
.byte 0 # 0 for interrupt gate
.byte 0x0e | 0x80 # type = 386 interrupt gate, present
.word 0 # offset 31:16
# IRQ 10 - (INT 72h)
.equ IRQ10_SEL, .-IDT_BASE
.word 0 # offset 15:0
.word SYS_CODE_SEL # selector 15:0
.byte 0 # 0 for interrupt gate
.byte 0x0e | 0x80 # type = 386 interrupt gate, present
.word 0 # offset 31:16
# IRQ 11 - (INT 73h)
.equ IRQ11_SEL, .-IDT_BASE
.word 0 # offset 15:0
.word SYS_CODE_SEL # selector 15:0
.byte 0 # 0 for interrupt gate
.byte 0x0e | 0x80 # type = 386 interrupt gate, present
.word 0 # offset 31:16
# IRQ 12 (PS/2 mouse) - (INT 74h)
.equ IRQ12_SEL, .-IDT_BASE
.word 0 # offset 15:0
.word SYS_CODE_SEL # selector 15:0
.byte 0 # 0 for interrupt gate
.byte 0x0e | 0x80 # type = 386 interrupt gate, present
.word 0 # offset 31:16
# IRQ 13 (Floating point error) - (INT 75h)
.equ IRQ13_SEL, .-IDT_BASE
.word 0 # offset 15:0
.word SYS_CODE_SEL # selector 15:0
.byte 0 # 0 for interrupt gate
.byte 0x0e | 0x80 # type = 386 interrupt gate, present
.word 0 # offset 31:16
# IRQ 14 (Secondary IDE) - (INT 76h)
.equ IRQ14_SEL, .-IDT_BASE
.word 0 # offset 15:0
.word SYS_CODE_SEL # selector 15:0
.byte 0 # 0 for interrupt gate
.byte 0x0e | 0x80 # type = 386 interrupt gate, present
.word 0 # offset 31:16
# IRQ 15 (Primary IDE) - (INT 77h)
.equ IRQ15_SEL, .-IDT_BASE
.word 0 # offset 15:0
.word SYS_CODE_SEL # selector 15:0
.byte 0 # 0 for interrupt gate
.byte 0x0e | 0x80 # type = 386 interrupt gate, present
.word 0 # offset 31:16
.fill 8, 1, 0
IDT_END:
.p2align 1
MemoryMapSize: .long 0
MemoryMap: .long 0,0,0,0,0,0,0,0
.long 0,0,0,0,0,0,0,0
.long 0,0,0,0,0,0,0,0
.long 0,0,0,0,0,0,0,0
.long 0,0,0,0,0,0,0,0
.long 0,0,0,0,0,0,0,0
.long 0,0,0,0,0,0,0,0
.long 0,0,0,0,0,0,0,0
.long 0,0,0,0,0,0,0,0
.long 0,0,0,0,0,0,0,0
.long 0,0,0,0,0,0,0,0
.long 0,0,0,0,0,0,0,0
.long 0,0,0,0,0,0,0,0
.long 0,0,0,0,0,0,0,0
.long 0,0,0,0,0,0,0,0
.long 0,0,0,0,0,0,0,0
.long 0,0,0,0,0,0,0,0
.long 0,0,0,0,0,0,0,0
.long 0,0,0,0,0,0,0,0
.long 0,0,0,0,0,0,0,0
.long 0,0,0,0,0,0,0,0
.long 0,0,0,0,0,0,0,0
.long 0,0,0,0,0,0,0,0
.long 0,0,0,0,0,0,0,0
.long 0,0,0,0,0,0,0,0
.long 0,0,0,0,0,0,0,0
.long 0,0,0,0,0,0,0,0
.long 0,0,0,0,0,0,0,0
.long 0,0,0,0,0,0,0,0
.long 0,0,0,0,0,0,0,0
.long 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0
.long 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0
.org 0x0fe0
MyStack:
# below is the pieces of the IVT that is used to redirect INT 68h - 6fh
# back to INT 08h - 0fh when in real mode... It is 'org'ed to a
# known low address (20f00) so it can be set up by PlMapIrqToVect in
# 8259.c
int $8
iret
int $9
iret
int $10
iret
int $11
iret
int $12
iret
int $13
iret
int $14
iret
int $15
iret
.org 0x0ffe
BlockSignature:
.word 0xaa55
|
al3xtjames/Clover
| 42,105
|
CloverEFI/BootSector/efi64.S
|
#------------------------------------------------------------------------------
#*
#* Copyright (c) 2006 - 2012, Intel Corporation. All rights reserved.<BR>
#* This program and the accompanying materials
#* are licensed and made available under the terms and conditions of the BSD License
#* which accompanies this distribution. The full text of the license may be found at
#* http://opensource.org/licenses/bsd-license.php
#*
#* THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
#* WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
#*
#* efi64.asm
#*
#* Abstract:
#*
#------------------------------------------------------------------------------
##############################################################################
# Now in 64-bit long mode.
##############################################################################
.486:
.stack:
.code:
.org 0x21000
.global _start
_start:
.equ DEFAULT_HANDLER_SIZE, INT1 - INT0
.macro jmpCommonIdtEntry
# jmp commonIdtEntry - this must be hand coded to keep the assembler from
# using a 8 bit reletive jump when the entries are
# within 255 bytes of the common entry. This must
# be done to maintain the consistency of the size
# of entry points...
.byte 0xe9 # jmp 16 bit relative
.long commonIdtEntry - . - 4 # offset to jump to
.endm
Start:
# btcl $31,%eax
movl $0x001fffe8,%esp # make final stack aligned
# set OSFXSR and OSXMMEXCPT because some code will use XMM register
.byte 0xf
.byte 0x20
.byte 0xe0
# mov rax, cr4
btsl $9,%eax
btsl $0xa,%eax
.byte 0xf
.byte 0x22
.byte 0xe0
# mov cr4, rax
# call ClearScreen
# movl $0xb8000, %edi
# movl $String1, %esi
# call PrintString
#Cyc: jmp Cyc
# .byte 0x90, 0x90, 0x90, 0x90, 0x90
# Populate IDT with meaningful offsets for exception handlers...
movl $Idtr, %eax
sidt (%eax) # get fword address of IDT
movl $Halt, %eax
movl %eax,%ebx # use bx to copy 15..0 to descriptors
shrl $16,%eax # use ax to copy 31..16 to descriptors
# 63..32 of descriptors is 0
movl $0x78,%ecx # 78h IDT entries to initialize with unique entry points (exceptions)
movl $(Idtr + 2), %esi
movl (%esi),%edi
LOOP_1: # loop through all IDT entries exception handlers and initialize to default handler
movw %bx, (%edi) # write bits 15..0 of offset
movw $0x38, 2(%edi) # SYS_CODE_SEL64 from GDT
movw $(0x0e00 | 0x8000), 4(%edi) # type = 386 interrupt gate, present
movw %ax, 6(%edi) # write bits 31..16 of offset
movl $0, 8(%edi) # write bits 31..16 of offset
addl $16, %edi # move up to next descriptor
addw $DEFAULT_HANDLER_SIZE, %bx # move to next entry point
loopl LOOP_1 # loop back through again until all descriptors are initialized
## at this point edi contains the offset of the descriptor for INT 20
## and bx contains the low 16 bits of the offset of the default handler
## so initialize all the rest of the descriptors with these two values...
# mov ecx, 101 ; there are 100 descriptors left (INT 20 (14h) - INT 119 (77h)
#@@: ; loop through all IDT entries exception handlers and initialize to default handler
# mov word ptr [edi], bx ; write bits 15..0 of offset
# mov word ptr [edi+2], 38h ; SYS_CODE64_SEL from GDT
# mov word ptr [edi+4], 0e00h OR 8000h ; type = 386 interrupt gate, present
# mov word ptr [edi+6], ax ; write bits 31..16 of offset
# mov dword ptr [edi+8], 0 ; write bits 63..32 of offset
# add edi, 16 ; move up to next descriptor
# loop @b ; loop back through again until all descriptors are initialized
## DUMP location of IDT and several of the descriptors
# mov ecx, 8
# mov eax, [offset Idtr + 2]
# mov eax, [eax]
# mov edi, 0b8000h
# call PrintQword
# mov esi, eax
# mov edi, 0b80a0h
# jmp OuterLoop
##
## just for fun, let's do a software interrupt to see if we correctly land in the exception handler...
# mov eax, 011111111h
# mov ebx, 022222222h
# mov ecx, 033333333h
# mov edx, 044444444h
# mov ebp, 055555555h
# mov esi, 066666666h
# mov edi, 077777777h
# push 011111111h
# push 022222222h
# push 033333333h
# int 119
movl $0x22000,%esi # esi = 22000
movl 0x14(%esi),%eax # eax = [22014]
addl %eax,%esi # esi = 22000 + [22014] = Base of EFILDR.C
movl 0x3c(%esi),%ebp # ebp = [22000 + [22014] + 3c] = NT Image Header for EFILDR.C
addl %esi,%ebp
movl 0x30(%ebp),%edi # edi = [[22000 + [22014] + 3c] + 2c] = ImageBase (63..32 is zero, ignore)
movl 0x28(%ebp),%eax # eax = [[22000 + [22014] + 3c] + 24] = EntryPoint
addl %edi,%eax # eax = ImageBase + EntryPoint
movl $EfiLdrOffset, %ebx
movl %eax, (%ebx) # Modify far jump instruction for correct entry point
movw 6(%ebp), %bx # bx = Number of sections
xorl %eax,%eax
movw 0x14(%ebp), %ax # ax = Optional Header Size
addl %eax,%ebp
addl $0x18,%ebp # ebp = Start of 1st Section
SectionLoop:
pushl %esi # Save Base of EFILDR.C
pushl %edi # Save ImageBase
addl 0x14(%ebp),%esi # esi = Base of EFILDR.C + PointerToRawData
addl 0x0c(%ebp),%edi # edi = ImageBase + VirtualAddress
movl 0x10(%ebp),%ecx # ecs = SizeOfRawData
cld
shrl $2,%ecx
rep
movsl
popl %edi # Restore ImageBase
popl %esi # Restore Base of EFILDR.C
addw $0x28,%bp # ebp = ebp + 028h = Pointer to next section record
.byte 0x66
.byte 0xff
.byte 0xcb
# dec bx
cmpw $0,%bx
jne SectionLoop
movl $Idtr, %edx # get size of IDT
movzxw (%edx), %eax
.byte 0xff
.byte 0xc0
# inc eax
addl 2(%edx), %eax # add to base of IDT to get location of memory map...
xorl %ecx,%ecx
movl %eax,%ecx # put argument to RCX
#Cyc: jmp Cyc
.byte 0x48
.byte 0xc7
.byte 0xc0
EfiLdrOffset:
.long 0x00401000 # Offset of EFILDR
# mov rax, 401000h
.byte 0x50
# push rax
# ret
.byte 0xc3
# db "**** DEFAULT IDT ENTRY ***",0
.p2align 1
Halt:
INT0:
pushl $0x0 # push error code place holder on the stack
pushl $0x0
jmpCommonIdtEntry
# db 0e9h ; jmp 16 bit reletive
# dd commonIdtEntry - $ - 4 ; offset to jump to
INT1:
pushl $0x0 # push error code place holder on the stack
pushl $0x1
jmpCommonIdtEntry
INT2:
pushl $0x0 # push error code place holder on the stack
pushl $0x2
jmpCommonIdtEntry
INT3:
pushl $0x0 # push error code place holder on the stack
pushl $0x3
jmpCommonIdtEntry
INT4:
pushl $0x0 # push error code place holder on the stack
pushl $0x4
jmpCommonIdtEntry
INT5:
pushl $0x0 # push error code place holder on the stack
pushl $0x5
jmpCommonIdtEntry
INT6:
pushl $0x0 # push error code place holder on the stack
pushl $0x6
jmpCommonIdtEntry
INT7:
pushl $0x0 # push error code place holder on the stack
pushl $0x7
jmpCommonIdtEntry
INT8:
# Double fault causes an error code to be pushed so no phony push necessary
nop
nop
pushl $0x8
jmpCommonIdtEntry
INT9:
pushl $0x0 # push error code place holder on the stack
pushl $0x9
jmpCommonIdtEntry
INT10:
# Invalid TSS causes an error code to be pushed so no phony push necessary
nop
nop
pushl $10
jmpCommonIdtEntry
INT11:
# Segment Not Present causes an error code to be pushed so no phony push necessary
nop
nop
pushl $11
jmpCommonIdtEntry
INT12:
# Stack fault causes an error code to be pushed so no phony push necessary
nop
nop
pushl $12
jmpCommonIdtEntry
INT13:
# GP fault causes an error code to be pushed so no phony push necessary
nop
nop
pushl $13
jmpCommonIdtEntry
INT14:
# Page fault causes an error code to be pushed so no phony push necessary
nop
nop
pushl $14
jmpCommonIdtEntry
INT15:
pushl $0x0 # push error code place holder on the stack
pushl $15
jmpCommonIdtEntry
INT16:
pushl $0x0 # push error code place holder on the stack
pushl $16
jmpCommonIdtEntry
INT17:
# Alignment check causes an error code to be pushed so no phony push necessary
nop
nop
pushl $17
jmpCommonIdtEntry
INT18:
pushl $0x0 # push error code place holder on the stack
pushl $18
jmpCommonIdtEntry
INT19:
pushl $0x0 # push error code place holder on the stack
pushl $19
jmpCommonIdtEntry
INTUnknown:
# The following segment repeats (0x78 - 20) times:
# No. 1
pushl $0x0 # push error code place holder on the stack
# push xxh ; push vector number
.byte 0x6a
.byte ( . - INTUnknown - 3 ) / 9 + 20 # vector number
jmpCommonIdtEntry
# No. 2
pushl $0x0 # push error code place holder on the stack
# push xxh ; push vector number
.byte 0x6a
.byte ( . - INTUnknown - 3 ) / 9 + 20 # vector number
jmpCommonIdtEntry
# No. 3
pushl $0x0 # push error code place holder on the stack
# push xxh ; push vector number
.byte 0x6a
.byte ( . - INTUnknown - 3 ) / 9 + 20 # vector number
jmpCommonIdtEntry
# No. 4
pushl $0x0 # push error code place holder on the stack
# push xxh ; push vector number
.byte 0x6a
.byte ( . - INTUnknown - 3 ) / 9 + 20 # vector number
jmpCommonIdtEntry
# No. 5
pushl $0x0 # push error code place holder on the stack
# push xxh ; push vector number
.byte 0x6a
.byte ( . - INTUnknown - 3 ) / 9 + 20 # vector number
jmpCommonIdtEntry
# No. 6
pushl $0x0 # push error code place holder on the stack
# push xxh ; push vector number
.byte 0x6a
.byte ( . - INTUnknown - 3 ) / 9 + 20 # vector number
jmpCommonIdtEntry
# No. 7
pushl $0x0 # push error code place holder on the stack
# push xxh ; push vector number
.byte 0x6a
.byte ( . - INTUnknown - 3 ) / 9 + 20 # vector number
jmpCommonIdtEntry
# No. 8
pushl $0x0 # push error code place holder on the stack
# push xxh ; push vector number
.byte 0x6a
.byte ( . - INTUnknown - 3 ) / 9 + 20 # vector number
jmpCommonIdtEntry
# No. 9
pushl $0x0 # push error code place holder on the stack
# push xxh ; push vector number
.byte 0x6a
.byte ( . - INTUnknown - 3 ) / 9 + 20 # vector number
jmpCommonIdtEntry
# No. 10
pushl $0x0 # push error code place holder on the stack
# push xxh ; push vector number
.byte 0x6a
.byte ( . - INTUnknown - 3 ) / 9 + 20 # vector number
jmpCommonIdtEntry
# No. 11
pushl $0x0 # push error code place holder on the stack
# push xxh ; push vector number
.byte 0x6a
.byte ( . - INTUnknown - 3 ) / 9 + 20 # vector number
jmpCommonIdtEntry
# No. 12
pushl $0x0 # push error code place holder on the stack
# push xxh ; push vector number
.byte 0x6a
.byte ( . - INTUnknown - 3 ) / 9 + 20 # vector number
jmpCommonIdtEntry
# No. 13
pushl $0x0 # push error code place holder on the stack
# push xxh ; push vector number
.byte 0x6a
.byte ( . - INTUnknown - 3 ) / 9 + 20 # vector number
jmpCommonIdtEntry
# No. 14
pushl $0x0 # push error code place holder on the stack
# push xxh ; push vector number
.byte 0x6a
.byte ( . - INTUnknown - 3 ) / 9 + 20 # vector number
jmpCommonIdtEntry
# No. 15
pushl $0x0 # push error code place holder on the stack
# push xxh ; push vector number
.byte 0x6a
.byte ( . - INTUnknown - 3 ) / 9 + 20 # vector number
jmpCommonIdtEntry
# No. 16
pushl $0x0 # push error code place holder on the stack
# push xxh ; push vector number
.byte 0x6a
.byte ( . - INTUnknown - 3 ) / 9 + 20 # vector number
jmpCommonIdtEntry
# No. 17
pushl $0x0 # push error code place holder on the stack
# push xxh ; push vector number
.byte 0x6a
.byte ( . - INTUnknown - 3 ) / 9 + 20 # vector number
jmpCommonIdtEntry
# No. 18
pushl $0x0 # push error code place holder on the stack
# push xxh ; push vector number
.byte 0x6a
.byte ( . - INTUnknown - 3 ) / 9 + 20 # vector number
jmpCommonIdtEntry
# No. 19
pushl $0x0 # push error code place holder on the stack
# push xxh ; push vector number
.byte 0x6a
.byte ( . - INTUnknown - 3 ) / 9 + 20 # vector number
jmpCommonIdtEntry
# No. 20
pushl $0x0 # push error code place holder on the stack
# push xxh ; push vector number
.byte 0x6a
.byte ( . - INTUnknown - 3 ) / 9 + 20 # vector number
jmpCommonIdtEntry
# No. 21
pushl $0x0 # push error code place holder on the stack
# push xxh ; push vector number
.byte 0x6a
.byte ( . - INTUnknown - 3 ) / 9 + 20 # vector number
jmpCommonIdtEntry
# No. 22
pushl $0x0 # push error code place holder on the stack
# push xxh ; push vector number
.byte 0x6a
.byte ( . - INTUnknown - 3 ) / 9 + 20 # vector number
jmpCommonIdtEntry
# No. 23
pushl $0x0 # push error code place holder on the stack
# push xxh ; push vector number
.byte 0x6a
.byte ( . - INTUnknown - 3 ) / 9 + 20 # vector number
jmpCommonIdtEntry
# No. 24
pushl $0x0 # push error code place holder on the stack
# push xxh ; push vector number
.byte 0x6a
.byte ( . - INTUnknown - 3 ) / 9 + 20 # vector number
jmpCommonIdtEntry
# No. 25
pushl $0x0 # push error code place holder on the stack
# push xxh ; push vector number
.byte 0x6a
.byte ( . - INTUnknown - 3 ) / 9 + 20 # vector number
jmpCommonIdtEntry
# No. 26
pushl $0x0 # push error code place holder on the stack
# push xxh ; push vector number
.byte 0x6a
.byte ( . - INTUnknown - 3 ) / 9 + 20 # vector number
jmpCommonIdtEntry
# No. 27
pushl $0x0 # push error code place holder on the stack
# push xxh ; push vector number
.byte 0x6a
.byte ( . - INTUnknown - 3 ) / 9 + 20 # vector number
jmpCommonIdtEntry
# No. 28
pushl $0x0 # push error code place holder on the stack
# push xxh ; push vector number
.byte 0x6a
.byte ( . - INTUnknown - 3 ) / 9 + 20 # vector number
jmpCommonIdtEntry
# No. 29
pushl $0x0 # push error code place holder on the stack
# push xxh ; push vector number
.byte 0x6a
.byte ( . - INTUnknown - 3 ) / 9 + 20 # vector number
jmpCommonIdtEntry
# No. 30
pushl $0x0 # push error code place holder on the stack
# push xxh ; push vector number
.byte 0x6a
.byte ( . - INTUnknown - 3 ) / 9 + 20 # vector number
jmpCommonIdtEntry
# No. 31
pushl $0x0 # push error code place holder on the stack
# push xxh ; push vector number
.byte 0x6a
.byte ( . - INTUnknown - 3 ) / 9 + 20 # vector number
jmpCommonIdtEntry
# No. 32
pushl $0x0 # push error code place holder on the stack
# push xxh ; push vector number
.byte 0x6a
.byte ( . - INTUnknown - 3 ) / 9 + 20 # vector number
jmpCommonIdtEntry
# No. 33
pushl $0x0 # push error code place holder on the stack
# push xxh ; push vector number
.byte 0x6a
.byte ( . - INTUnknown - 3 ) / 9 + 20 # vector number
jmpCommonIdtEntry
# No. 34
pushl $0x0 # push error code place holder on the stack
# push xxh ; push vector number
.byte 0x6a
.byte ( . - INTUnknown - 3 ) / 9 + 20 # vector number
jmpCommonIdtEntry
# No. 35
pushl $0x0 # push error code place holder on the stack
# push xxh ; push vector number
.byte 0x6a
.byte ( . - INTUnknown - 3 ) / 9 + 20 # vector number
jmpCommonIdtEntry
# No. 36
pushl $0x0 # push error code place holder on the stack
# push xxh ; push vector number
.byte 0x6a
.byte ( . - INTUnknown - 3 ) / 9 + 20 # vector number
jmpCommonIdtEntry
# No. 37
pushl $0x0 # push error code place holder on the stack
# push xxh ; push vector number
.byte 0x6a
.byte ( . - INTUnknown - 3 ) / 9 + 20 # vector number
jmpCommonIdtEntry
# No. 38
pushl $0x0 # push error code place holder on the stack
# push xxh ; push vector number
.byte 0x6a
.byte ( . - INTUnknown - 3 ) / 9 + 20 # vector number
jmpCommonIdtEntry
# No. 39
pushl $0x0 # push error code place holder on the stack
# push xxh ; push vector number
.byte 0x6a
.byte ( . - INTUnknown - 3 ) / 9 + 20 # vector number
jmpCommonIdtEntry
# No. 40
pushl $0x0 # push error code place holder on the stack
# push xxh ; push vector number
.byte 0x6a
.byte ( . - INTUnknown - 3 ) / 9 + 20 # vector number
jmpCommonIdtEntry
# No. 41
pushl $0x0 # push error code place holder on the stack
# push xxh ; push vector number
.byte 0x6a
.byte ( . - INTUnknown - 3 ) / 9 + 20 # vector number
jmpCommonIdtEntry
# No. 42
pushl $0x0 # push error code place holder on the stack
# push xxh ; push vector number
.byte 0x6a
.byte ( . - INTUnknown - 3 ) / 9 + 20 # vector number
jmpCommonIdtEntry
# No. 43
pushl $0x0 # push error code place holder on the stack
# push xxh ; push vector number
.byte 0x6a
.byte ( . - INTUnknown - 3 ) / 9 + 20 # vector number
jmpCommonIdtEntry
# No. 44
pushl $0x0 # push error code place holder on the stack
# push xxh ; push vector number
.byte 0x6a
.byte ( . - INTUnknown - 3 ) / 9 + 20 # vector number
jmpCommonIdtEntry
# No. 45
pushl $0x0 # push error code place holder on the stack
# push xxh ; push vector number
.byte 0x6a
.byte ( . - INTUnknown - 3 ) / 9 + 20 # vector number
jmpCommonIdtEntry
# No. 46
pushl $0x0 # push error code place holder on the stack
# push xxh ; push vector number
.byte 0x6a
.byte ( . - INTUnknown - 3 ) / 9 + 20 # vector number
jmpCommonIdtEntry
# No. 47
pushl $0x0 # push error code place holder on the stack
# push xxh ; push vector number
.byte 0x6a
.byte ( . - INTUnknown - 3 ) / 9 + 20 # vector number
jmpCommonIdtEntry
# No. 48
pushl $0x0 # push error code place holder on the stack
# push xxh ; push vector number
.byte 0x6a
.byte ( . - INTUnknown - 3 ) / 9 + 20 # vector number
jmpCommonIdtEntry
# No. 49
pushl $0x0 # push error code place holder on the stack
# push xxh ; push vector number
.byte 0x6a
.byte ( . - INTUnknown - 3 ) / 9 + 20 # vector number
jmpCommonIdtEntry
# No. 50
pushl $0x0 # push error code place holder on the stack
# push xxh ; push vector number
.byte 0x6a
.byte ( . - INTUnknown - 3 ) / 9 + 20 # vector number
jmpCommonIdtEntry
# No. 51
pushl $0x0 # push error code place holder on the stack
# push xxh ; push vector number
.byte 0x6a
.byte ( . - INTUnknown - 3 ) / 9 + 20 # vector number
jmpCommonIdtEntry
# No. 52
pushl $0x0 # push error code place holder on the stack
# push xxh ; push vector number
.byte 0x6a
.byte ( . - INTUnknown - 3 ) / 9 + 20 # vector number
jmpCommonIdtEntry
# No. 53
pushl $0x0 # push error code place holder on the stack
# push xxh ; push vector number
.byte 0x6a
.byte ( . - INTUnknown - 3 ) / 9 + 20 # vector number
jmpCommonIdtEntry
# No. 54
pushl $0x0 # push error code place holder on the stack
# push xxh ; push vector number
.byte 0x6a
.byte ( . - INTUnknown - 3 ) / 9 + 20 # vector number
jmpCommonIdtEntry
# No. 55
pushl $0x0 # push error code place holder on the stack
# push xxh ; push vector number
.byte 0x6a
.byte ( . - INTUnknown - 3 ) / 9 + 20 # vector number
jmpCommonIdtEntry
# No. 56
pushl $0x0 # push error code place holder on the stack
# push xxh ; push vector number
.byte 0x6a
.byte ( . - INTUnknown - 3 ) / 9 + 20 # vector number
jmpCommonIdtEntry
# No. 57
pushl $0x0 # push error code place holder on the stack
# push xxh ; push vector number
.byte 0x6a
.byte ( . - INTUnknown - 3 ) / 9 + 20 # vector number
jmpCommonIdtEntry
# No. 58
pushl $0x0 # push error code place holder on the stack
# push xxh ; push vector number
.byte 0x6a
.byte ( . - INTUnknown - 3 ) / 9 + 20 # vector number
jmpCommonIdtEntry
# No. 59
pushl $0x0 # push error code place holder on the stack
# push xxh ; push vector number
.byte 0x6a
.byte ( . - INTUnknown - 3 ) / 9 + 20 # vector number
jmpCommonIdtEntry
# No. 60
pushl $0x0 # push error code place holder on the stack
# push xxh ; push vector number
.byte 0x6a
.byte ( . - INTUnknown - 3 ) / 9 + 20 # vector number
jmpCommonIdtEntry
# No. 61
pushl $0x0 # push error code place holder on the stack
# push xxh ; push vector number
.byte 0x6a
.byte ( . - INTUnknown - 3 ) / 9 + 20 # vector number
jmpCommonIdtEntry
# No. 62
pushl $0x0 # push error code place holder on the stack
# push xxh ; push vector number
.byte 0x6a
.byte ( . - INTUnknown - 3 ) / 9 + 20 # vector number
jmpCommonIdtEntry
# No. 63
pushl $0x0 # push error code place holder on the stack
# push xxh ; push vector number
.byte 0x6a
.byte ( . - INTUnknown - 3 ) / 9 + 20 # vector number
jmpCommonIdtEntry
# No. 64
pushl $0x0 # push error code place holder on the stack
# push xxh ; push vector number
.byte 0x6a
.byte ( . - INTUnknown - 3 ) / 9 + 20 # vector number
jmpCommonIdtEntry
# No. 65
pushl $0x0 # push error code place holder on the stack
# push xxh ; push vector number
.byte 0x6a
.byte ( . - INTUnknown - 3 ) / 9 + 20 # vector number
jmpCommonIdtEntry
# No. 66
pushl $0x0 # push error code place holder on the stack
# push xxh ; push vector number
.byte 0x6a
.byte ( . - INTUnknown - 3 ) / 9 + 20 # vector number
jmpCommonIdtEntry
# No. 67
pushl $0x0 # push error code place holder on the stack
# push xxh ; push vector number
.byte 0x6a
.byte ( . - INTUnknown - 3 ) / 9 + 20 # vector number
jmpCommonIdtEntry
# No. 68
pushl $0x0 # push error code place holder on the stack
# push xxh ; push vector number
.byte 0x6a
.byte ( . - INTUnknown - 3 ) / 9 + 20 # vector number
jmpCommonIdtEntry
# No. 69
pushl $0x0 # push error code place holder on the stack
# push xxh ; push vector number
.byte 0x6a
.byte ( . - INTUnknown - 3 ) / 9 + 20 # vector number
jmpCommonIdtEntry
# No. 70
pushl $0x0 # push error code place holder on the stack
# push xxh ; push vector number
.byte 0x6a
.byte ( . - INTUnknown - 3 ) / 9 + 20 # vector number
jmpCommonIdtEntry
# No. 71
pushl $0x0 # push error code place holder on the stack
# push xxh ; push vector number
.byte 0x6a
.byte ( . - INTUnknown - 3 ) / 9 + 20 # vector number
jmpCommonIdtEntry
# No. 72
pushl $0x0 # push error code place holder on the stack
# push xxh ; push vector number
.byte 0x6a
.byte ( . - INTUnknown - 3 ) / 9 + 20 # vector number
jmpCommonIdtEntry
# No. 73
pushl $0x0 # push error code place holder on the stack
# push xxh ; push vector number
.byte 0x6a
.byte ( . - INTUnknown - 3 ) / 9 + 20 # vector number
jmpCommonIdtEntry
# No. 74
pushl $0x0 # push error code place holder on the stack
# push xxh ; push vector number
.byte 0x6a
.byte ( . - INTUnknown - 3 ) / 9 + 20 # vector number
jmpCommonIdtEntry
# No. 75
pushl $0x0 # push error code place holder on the stack
# push xxh ; push vector number
.byte 0x6a
.byte ( . - INTUnknown - 3 ) / 9 + 20 # vector number
jmpCommonIdtEntry
# No. 76
pushl $0x0 # push error code place holder on the stack
# push xxh ; push vector number
.byte 0x6a
.byte ( . - INTUnknown - 3 ) / 9 + 20 # vector number
jmpCommonIdtEntry
# No. 77
pushl $0x0 # push error code place holder on the stack
# push xxh ; push vector number
.byte 0x6a
.byte ( . - INTUnknown - 3 ) / 9 + 20 # vector number
jmpCommonIdtEntry
# No. 78
pushl $0x0 # push error code place holder on the stack
# push xxh ; push vector number
.byte 0x6a
.byte ( . - INTUnknown - 3 ) / 9 + 20 # vector number
jmpCommonIdtEntry
# No. 79
pushl $0x0 # push error code place holder on the stack
# push xxh ; push vector number
.byte 0x6a
.byte ( . - INTUnknown - 3 ) / 9 + 20 # vector number
jmpCommonIdtEntry
# No. 80
pushl $0x0 # push error code place holder on the stack
# push xxh ; push vector number
.byte 0x6a
.byte ( . - INTUnknown - 3 ) / 9 + 20 # vector number
jmpCommonIdtEntry
# No. 81
pushl $0x0 # push error code place holder on the stack
# push xxh ; push vector number
.byte 0x6a
.byte ( . - INTUnknown - 3 ) / 9 + 20 # vector number
jmpCommonIdtEntry
# No. 82
pushl $0x0 # push error code place holder on the stack
# push xxh ; push vector number
.byte 0x6a
.byte ( . - INTUnknown - 3 ) / 9 + 20 # vector number
jmpCommonIdtEntry
# No. 83
pushl $0x0 # push error code place holder on the stack
# push xxh ; push vector number
.byte 0x6a
.byte ( . - INTUnknown - 3 ) / 9 + 20 # vector number
jmpCommonIdtEntry
# No. 84
pushl $0x0 # push error code place holder on the stack
# push xxh ; push vector number
.byte 0x6a
.byte ( . - INTUnknown - 3 ) / 9 + 20 # vector number
jmpCommonIdtEntry
# No. 85
pushl $0x0 # push error code place holder on the stack
# push xxh ; push vector number
.byte 0x6a
.byte ( . - INTUnknown - 3 ) / 9 + 20 # vector number
jmpCommonIdtEntry
# No. 86
pushl $0x0 # push error code place holder on the stack
# push xxh ; push vector number
.byte 0x6a
.byte ( . - INTUnknown - 3 ) / 9 + 20 # vector number
jmpCommonIdtEntry
# No. 87
pushl $0x0 # push error code place holder on the stack
# push xxh ; push vector number
.byte 0x6a
.byte ( . - INTUnknown - 3 ) / 9 + 20 # vector number
jmpCommonIdtEntry
# No. 88
pushl $0x0 # push error code place holder on the stack
# push xxh ; push vector number
.byte 0x6a
.byte ( . - INTUnknown - 3 ) / 9 + 20 # vector number
jmpCommonIdtEntry
# No. 89
pushl $0x0 # push error code place holder on the stack
# push xxh ; push vector number
.byte 0x6a
.byte ( . - INTUnknown - 3 ) / 9 + 20 # vector number
jmpCommonIdtEntry
# No. 90
pushl $0x0 # push error code place holder on the stack
# push xxh ; push vector number
.byte 0x6a
.byte ( . - INTUnknown - 3 ) / 9 + 20 # vector number
jmpCommonIdtEntry
# No. 91
pushl $0x0 # push error code place holder on the stack
# push xxh ; push vector number
.byte 0x6a
.byte ( . - INTUnknown - 3 ) / 9 + 20 # vector number
jmpCommonIdtEntry
# No. 92
pushl $0x0 # push error code place holder on the stack
# push xxh ; push vector number
.byte 0x6a
.byte ( . - INTUnknown - 3 ) / 9 + 20 # vector number
jmpCommonIdtEntry
# No. 93
pushl $0x0 # push error code place holder on the stack
# push xxh ; push vector number
.byte 0x6a
.byte ( . - INTUnknown - 3 ) / 9 + 20 # vector number
jmpCommonIdtEntry
# No. 94
pushl $0x0 # push error code place holder on the stack
# push xxh ; push vector number
.byte 0x6a
.byte ( . - INTUnknown - 3 ) / 9 + 20 # vector number
jmpCommonIdtEntry
# No. 95
pushl $0x0 # push error code place holder on the stack
# push xxh ; push vector number
.byte 0x6a
.byte ( . - INTUnknown - 3 ) / 9 + 20 # vector number
jmpCommonIdtEntry
# No. 96
pushl $0x0 # push error code place holder on the stack
# push xxh ; push vector number
.byte 0x6a
.byte ( . - INTUnknown - 3 ) / 9 + 20 # vector number
jmpCommonIdtEntry
# No. 97
pushl $0x0 # push error code place holder on the stack
# push xxh ; push vector number
.byte 0x6a
.byte ( . - INTUnknown - 3 ) / 9 + 20 # vector number
jmpCommonIdtEntry
# No. 98
pushl $0x0 # push error code place holder on the stack
# push xxh ; push vector number
.byte 0x6a
.byte ( . - INTUnknown - 3 ) / 9 + 20 # vector number
jmpCommonIdtEntry
# No. 99
pushl $0x0 # push error code place holder on the stack
# push xxh ; push vector number
.byte 0x6a
.byte ( . - INTUnknown - 3 ) / 9 + 20 # vector number
jmpCommonIdtEntry
# No. 100
pushl $0x0 # push error code place holder on the stack
# push xxh ; push vector number
.byte 0x6a
.byte ( . - INTUnknown - 3 ) / 9 + 20 # vector number
jmpCommonIdtEntry
commonIdtEntry:
pushl %eax
pushl %ecx
pushl %edx
pushl %ebx
pushl %esp
pushl %ebp
pushl %esi
pushl %edi
.byte 0x41
.byte 0x50
# push r8
.byte 0x41
.byte 0x51
# push r9
.byte 0x41
.byte 0x52
# push r10
.byte 0x41
.byte 0x53
# push r11
.byte 0x41
.byte 0x54
# push r12
.byte 0x41
.byte 0x55
# push r13
.byte 0x41
.byte 0x56
# push r14
.byte 0x41
.byte 0x57
# push r15
.byte 0x48
movl %esp,%ebp
# mov rbp, rsp
##
## At this point the stack looks like this:
##
## Calling SS
## Calling RSP
## rflags
## Calling CS
## Calling RIP
## Error code or 0
## Int num or 0ffh for unknown int num
## rax
## rcx
## rdx
## rbx
## rsp
## rbp
## rsi
## rdi
## r8
## r9
## r10
## r11
## r12
## r13
## r14
## r15 <------- RSP, RBP
##
call ClearScreen
movl $String1, %esi
call PrintString
.byte 0x48
movl 16*8(%ebp),%eax ## move Int number into RAX
.byte 0x48
cmpl $19,%eax
ja PrintDefaultString
PrintExceptionString:
shll $3,%eax ## multiply by 8 to get offset from StringTable to actual string address
addl $StringTable, %eax
movl (%eax),%esi
jmp PrintTheString
PrintDefaultString:
movl $IntUnknownString, %esi
# patch Int number
movl %eax,%edx
call A2C
movb %al,1(%esi)
movl %edx,%eax
shrl $4,%eax
call A2C
movb %al,(%esi)
PrintTheString:
call PrintString
movl $String2, %esi
call PrintString
.byte 0x48
movl 19*8(%ebp),%eax # CS
call PrintQword
movb $':', %al
movb %al, (%edi)
addl $2,%edi
.byte 0x48
movl 18*8(%ebp),%eax # RIP
call PrintQword
movl $String3, %esi
call PrintString
movl $0xb8140,%edi
movl $StringRax, %esi
call PrintString
.byte 0x48
movl 15*8(%ebp),%eax
call PrintQword
movl $StringRcx, %esi
call PrintString
.byte 0x48
movl 14*8(%ebp),%eax
call PrintQword
movl $StringRdx, %esi
call PrintString
.byte 0x48
movl 13*8(%ebp),%eax
call PrintQword
movl $0xb81e0,%edi
movl $StringRbx, %esi
call PrintString
.byte 0x48
movl 12*8(%ebp),%eax
call PrintQword
movl $StringRsp, %esi
call PrintString
.byte 0x48
movl 21*8(%ebp),%eax
call PrintQword
movl $StringRbp, %esi
call PrintString
.byte 0x48
movl 10*8(%ebp),%eax
call PrintQword
movl $0xb8280,%edi
movl $StringRsi, %esi
call PrintString
.byte 0x48
movl 9*8(%ebp),%eax
call PrintQword
movl $StringRdi, %esi
call PrintString
.byte 0x48
movl 8*8(%ebp),%eax
call PrintQword
movl $StringEcode, %esi
call PrintString
.byte 0x48
movl 17*8(%ebp),%eax
call PrintQword
movl $0xb8320,%edi
movl $StringR8, %esi
call PrintString
.byte 0x48
movl 7*8(%ebp),%eax
call PrintQword
movl $StringR9, %esi
call PrintString
.byte 0x48
movl 6*8(%ebp),%eax
call PrintQword
movl $StringR10, %esi
call PrintString
.byte 0x48
movl 5*8(%ebp),%eax
call PrintQword
movl $0xb83c0,%edi
movl $StringR11, %esi
call PrintString
.byte 0x48
movl 4*8(%ebp),%eax
call PrintQword
movl $StringR12, %esi
call PrintString
.byte 0x48
movl 3*8(%ebp),%eax
call PrintQword
movl $StringR13, %esi
call PrintString
.byte 0x48
movl 2*8(%ebp),%eax
call PrintQword
movl $0xb8460,%edi
movl $StringR14, %esi
call PrintString
.byte 0x48
movl 1*8(%ebp),%eax
call PrintQword
movl $StringR15, %esi
call PrintString
.byte 0x48
movl 0*8(%ebp),%eax
call PrintQword
movl $StringSs, %esi
call PrintString
.byte 0x48
movl 22*8(%ebp),%eax
call PrintQword
movl $0xb8500,%edi
movl $StringRflags, %esi
call PrintString
.byte 0x48
movl 20*8(%ebp),%eax
call PrintQword
movl $0xb8640,%edi
movl %ebp,%esi
addl $23*8,%esi
movl $4,%ecx
OuterLoop:
pushl %ecx
movl $4,%ecx
.byte 0x48
movl %edi,%edx
InnerLoop:
.byte 0x48
movl (%esi),%eax
call PrintQword
addl $8,%esi
movb $0x20, %al # blank character
movb %al,(%edi)
addl $2,%edi
loop InnerLoop
popl %ecx
addl $0xa0,%edx
movl %edx,%edi
loop OuterLoop
movl $0xb8960,%edi
.byte 0x48
movl 18*8(%ebp),%eax # RIP
subl $8*8,%eax
.byte 0x48
movl %eax,%esi # esi = rip - 8 QWORD linear (total 16 QWORD)
movl $4,%ecx
OuterLoop1:
pushl %ecx
movl $4,%ecx
movl %edi,%edx
InnerLoop1:
.byte 0x48
movl (%esi),%eax
call PrintQword
addl $8,%esi
movb $0x20, %al # blank character
movb %al,(%edi)
addl $2,%edi
loop InnerLoop1
popl %ecx
addl $0xa0,%edx
movl %edx,%edi
loop OuterLoop1
#wbinvd
LN_C1:
jmp LN_C1
#
# return
#
movl %ebp,%esp
# mov rsp, rbp
.byte 0x41
.byte 0x5f
# pop r15
.byte 0x41
.byte 0x5e
# pop r14
.byte 0x41
.byte 0x5d
# pop r13
.byte 0x41
.byte 0x5c
# pop r12
.byte 0x41
.byte 0x5b
# pop r11
.byte 0x41
.byte 0x5a
# pop r10
.byte 0x41
.byte 0x59
# pop r9
.byte 0x41
.byte 0x58
# pop r8
popl %edi
popl %esi
popl %ebp
popl %eax # esp
popl %ebx
popl %edx
popl %ecx
popl %eax
.byte 0x48
.byte 0x83
.byte 0xc4
.byte 0x10
# add esp, 16 ; error code and INT number
.byte 0x48
.byte 0xcf
# iretq
PrintString:
pushl %eax
LN_C2:
movb (%esi), %al
cmpb $0,%al
je LN_C3
movb %al, (%edi)
.byte 0xff
.byte 0xc6
# inc esi
addl $2,%edi
jmp LN_C2
LN_C3:
popl %eax
ret
## RAX contains qword to print
## RDI contains memory location (screen location) to print it to
PrintQword:
pushl %ecx
pushl %ebx
pushl %eax
.byte 0x48
.byte 0xc7
.byte 0xc1
.long 16
# mov rcx, 16
looptop:
.byte 0x48
roll $4,%eax
movb %al,%bl
andb $0xf,%bl
addb $'0', %bl
cmpb $'9', %bl
jle LN_C4
addb $7,%bl
LN_C4:
movb %bl, (%edi)
addl $2,%edi
loop looptop
#wbinvd
popl %eax
popl %ebx
popl %ecx
ret
ClearScreen:
pushl %eax
pushl %ecx
movb $0x20, %al # blank character
movb $0xc,%ah
movl $0xb8000,%edi
movl $80*24,%ecx
LN_C5:
movw %ax, (%edi)
addl $2,%edi
loop LN_C5
movl $0xb8000,%edi
popl %ecx
popl %eax
ret
A2C:
andb $0xf,%al
addb $'0', %al
cmpb $'9', %al
jle LN_C6
addb $7,%al
LN_C6:
ret
String1: .asciz "*** INT "
Int0String: .asciz "00h Divide by 0 -"
Int1String: .asciz "01h Debug exception -"
Int2String: .asciz "02h NMI -"
Int3String: .asciz "03h Breakpoint -"
Int4String: .asciz "04h Overflow -"
Int5String: .asciz "05h Bound -"
Int6String: .asciz "06h Invalid opcode -"
Int7String: .asciz "07h Device not available -"
Int8String: .asciz "08h Double fault -"
Int9String: .asciz "09h Coprocessor seg overrun (reserved) -"
Int10String: .asciz "0Ah Invalid TSS -"
Int11String: .asciz "0Bh Segment not present -"
Int12String: .asciz "0Ch Stack fault -"
Int13String: .asciz "0Dh General protection fault -"
Int14String: .asciz "0Eh Page fault -"
Int15String: .asciz "0Fh (Intel reserved) -"
Int16String: .asciz "10h Floating point error -"
Int17String: .asciz "11h Alignment check -"
Int18String: .asciz "12h Machine check -"
Int19String: .asciz "13h SIMD Floating-Point Exception -"
IntUnknownString: .asciz "??h Unknown interrupt -"
StringTable: .long Int0String, 0, Int1String, 0, Int2String, 0, Int3String, 0, \
Int4String, 0, Int5String, 0, Int6String, 0, Int7String, 0, \
Int8String, 0, Int9String, 0, Int10String, 0, Int11String, 0, \
Int12String, 0, Int13String, 0, Int14String, 0, Int15String, 0, \
Int16String, 0, Int17String, 0, Int18String, 0, Int19String, 0
String2: .asciz " HALT!! *** ("
String3: .asciz ")"
StringRax: .asciz "RAX="
StringRcx: .asciz " RCX="
StringRdx: .asciz " RDX="
StringRbx: .asciz "RBX="
StringRsp: .asciz " RSP="
StringRbp: .asciz " RBP="
StringRsi: .asciz "RSI="
StringRdi: .asciz " RDI="
StringEcode: .asciz " ECODE="
StringR8: .asciz "R8 ="
StringR9: .asciz " R9 ="
StringR10: .asciz " R10="
StringR11: .asciz "R11="
StringR12: .asciz " R12="
StringR13: .asciz " R13="
StringR14: .asciz "R14="
StringR15: .asciz " R15="
StringSs: .asciz " SS ="
StringRflags: .asciz "RFLAGS="
Idtr: .float 0
.float 0
.org 0x21ffe
BlockSignature:
.word 0xaa55
|
al3xtjames/Clover
| 22,720
|
CloverEFI/BootSector/start32H2.S
|
# 1 "start32H2.S"
# 1 "start32H2.S" 1
# 1 "<built-in>" 1
# 1 "start32H2.S" 2
#------------------------------------------------------------------------------
#*
#* Copyright (c) 2006 - 2007, Intel Corporation. All rights reserved.<BR>
#* This program and the accompanying materials
#* are licensed and made available under the terms and conditions of the BSD License
#* which accompanies this distribution. The full text of the license may be found at
#* http:
#*
#* THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
#* WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
#*
#* start32.asm
#*
#* Abstract:
#*
#------------------------------------------------------------------------------
#.MODEL small
.stack:
.486p:
.code:
.equ FAT_DIRECTORY_ENTRY_SIZE, 0x020
.equ FAT_DIRECTORY_ENTRY_SHIFT, 5
.equ BLOCK_SIZE, 0x0200
.equ BLOCK_MASK, 0x01ff
.equ BLOCK_SHIFT, 9
.org 0x0
.global _start
_start:
Ia32Jump:
jmp BootSectorEntryPoint # JMP inst - 3 bytes
nop
OemId: .ascii "INTEL " # OemId - 8 bytes
SectorSize: .word 0 # Sector Size - 2 bytes
SectorsPerCluster: .byte 0 # Sector Per Cluster - 1 byte
ReservedSectors: .word 0 # Reserved Sectors - 2 bytes
NoFats: .byte 0 # Number of FATs - 1 byte
RootEntries: .word 0 # Root Entries - 2 bytes
Sectors: .word 0 # Number of Sectors - 2 bytes
Media: .byte 0 # Media - 1 byte
SectorsPerFat16: .word 0 # Sectors Per FAT for FAT12/FAT16 - 2 byte
SectorsPerTrack: .word 0 # Sectors Per Track - 2 bytes
Heads: .word 0 # Heads - 2 bytes
HiddenSectors: .long 0 # Hidden Sectors - 4 bytes
LargeSectors: .long 0 # Large Sectors - 4 bytes
#******************************************************************************
#The structure for FAT32 starting at offset 36 of the boot sector. (At this point,
#the BPB/boot sector for FAT12 and FAT16 differs from the BPB/boot sector for FAT32.)
#******************************************************************************
SectorsPerFat32: .long 0 # Sectors Per FAT for FAT32 - 4 bytes
ExtFlags: .word 0 # Mirror Flag - 2 bytes
FSVersion: .word 0 # File System Version - 2 bytes
RootCluster: .long 0 # 1st Cluster Number of Root Dir - 4 bytes
FSInfo: .word 0 # Sector Number of FSINFO - 2 bytes
BkBootSector: .word 0 # Sector Number of Bk BootSector - 2 bytes
Reserved: .fill 12,1,0 # Reserved Field - 12 bytes
PhysicalDrive: .byte 0 # Physical Drive Number - 1 byte
Reserved1: .byte 0 # Reserved Field - 1 byte
Signature: .byte 0 # Extended Boot Signature - 1 byte
VolId: .ascii " " # Volume Serial Number - 4 bytes
FatLabel: .ascii "Clover " # Volume Label - 11 bytes
FileSystemType: .ascii "HFSPlus " # File System Type - 8 bytes
BootSectorEntryPoint:
#ASSUME ds:@code
#ASSUME ss:@code
# ds = 1000, es = 2000 + x (size of first cluster >> 4)
# cx = Start Cluster of EfiLdr
# dx = Start Cluster of Efivar.bin
# Re use the BPB data stored in Boot Sector
movw $0x7c00, %bp
JumpFarInstruction:
.byte 0xea
JumpOffset:
.word 0x200
JumpSegment:
.word 0x2000
# 107 "start32H2.S"
.org 0x01fa # Will cause build break
LBAOffsetForBootSector:
.long 0x0
.org 0x01fe # Will cause build break
.word 0xaa55
#******************************************************************************
#******************************************************************************
#******************************************************************************
.equ DELAY_PORT, 0x0ed # Port to use for 1uS delay
.equ KBD_CONTROL_PORT, 0x060 # 8042 control port
.equ KBD_STATUS_PORT, 0x064 # 8042 status port
.equ WRITE_DATA_PORT_CMD, 0x0d1 # 8042 command to write the data port
.equ ENABLE_A20_CMD, 0x0df # 8042 command to enable A20
.org 0x200 # Will cause build break? lol
.code16
jmp start
#Em64String:
# .byte 'E', 0x0c, 'm', 0x0c, '6', 0x0c, '4', 0x0c, 'T', 0x0c, ' ', 0x0c, 'U', 0x0c, 'n', 0x0c, 's', 0x0c, 'u', 0x0c, 'p', 0x0c, 'p', 0x0c, 'o', 0x0c, 'r', 0x0c, 't', 0x0c, 'e', 0x0c, 'd', 0x0c, '!', 0x0c
Label: .ascii "Clover " # Bootloader Label
start:
movw %cs, %ax
movw %ax, %ds
movw %ax, %es
movw %ax, %ss
movw $MyStack, %sp
# mov ax,0b800h
# mov es,ax
# mov byte ptr es:[160],'a'
# mov ax,cs
# mov es,ax
# movw $0xb800, %ax
# movw %ax, %es
# movw $0x61, byte ptr %es:[160]
# movw %cs, %ax
# movw %ax, %es
# 239 "start32H2.S"
movl $0, %ebx
leal MemoryMap, %edi
MemMapLoop:
movl $0xe820, %eax
movl $20, %ecx #WIKI said $24
movl $0x534d4150, %edx # 0x534d4150 = 'SMAP'
int $0x15
jc MemMapDone
addl $20, %edi
cmpl $0, %ebx
je MemMapDone
jmp MemMapLoop
MemMapDone:
leal MemoryMap, %eax
subl %eax, %edi # Get the address of the memory map
movl %edi, MemoryMapSize # Save the size of the memory map
xorl %ebx, %ebx
movw %cs, %bx # BX=segment
shll $4, %ebx # BX="linear" address of segment base
leal GDT_BASE(%ebx), %eax # EAX=PHYSICAL address of gdt
movl %eax, gdtr + 2 # Put address of gdt into the gdtr
leal IDT_BASE(%ebx), %eax # EAX=PHYSICAL address of idt
movl %eax, idtr + 2 # Put address of idt into the idtr
leal MemoryMapSize(%ebx), %edx # Physical base address of the memory map
addl $0x1000, %ebx # Source of EFI32 = $0x21000
movl %ebx, JUMP+2
addl $0x1000, %ebx
movl %ebx, %esi # Source of EFILDR32 = $0x22000
# mov ax,0b800h
# mov es,ax
# mov byte ptr es:[162],'b'
# mov ax,cs
# mov es,ax
# movw $0xb800, %ax
# movw %ax, %es
# movw $0x62, byte ptr %es:[162]
# movw %cs, %ax
# movw %ax, %es
# Enable A20 Gate
movw $0x2401, %ax # Enable A20 Gate
int $0x15
jnc A20GateEnabled # Jump if it succeeded
# If INT 15 Function 2401 is not supported, then attempt to Enable A20 manually.
#New algo from WIKI
# 322 "start32H2.S"
#UEFI/DUET
call Empty8042InputBuffer # Empty the Input Buffer on the 8042 controller
jnz Timeout8042 # Jump if the 8042 timed out
outw %ax, $DELAY_PORT # Delay 1 uS
movb $WRITE_DATA_PORT_CMD, %al # 8042 cmd to write output port
outb %al, $KBD_STATUS_PORT # Send command to the 8042
call Empty8042InputBuffer # Empty the Input Buffer on the 8042 controller
jnz Timeout8042 # Jump if the 8042 timed out
movb $ENABLE_A20_CMD, %al # gate address bit 20 on
outb %al, $KBD_CONTROL_PORT # Send command to thre 8042
call Empty8042InputBuffer # Empty the Input Buffer on the 8042 controller
movw $25, %cx # Delay 25 uS for the command to complete on the 8042
Delay25uS:
outw %ax, $DELAY_PORT # Delay 1 uS
loopl Delay25uS
Timeout8042:
#WIKI -fast A20gate
# inb $0x92, %al
# orb $2, %al
# outb %al, $0x92
A20GateEnabled:
# movw $0x0002, %ax
# int $0x10
#put char 7
movl $0x000F, %ebx
movl $0x0E37, %eax
movl $0x0010, %ecx
int $0x10
#PAUSE1:
# jmp PAUSE1
# DISABLE INTERRUPTS - Entering Protected Mode
movw $0x0008, %bx # Flat data descriptor
cli
.byte 0x66
lgdt gdtr
#PAUSE2:
# jmp PAUSE2
# .byte 0x67
.byte 0x66
lidt idtr
#PAUSE3:
# jmp PAUSE3
movl %cr0, %eax
orb $1, %al
# .byte 0x66
# or $1, %eax
movl %eax, %cr0
# now 32-bit protected mode
#.code32
# movl $0x008, %eax # Flat data descriptor
# movl $0x00400000, %ebp # Destination of EFILDR32
# movl $0x00070000, %ebx # Length of copy
JUMP:
# jmp far 0010:00020000
.byte 0x66
.byte 0xea
.long 0x00020000
.word 0x0010
Empty8042InputBuffer:
movw $0, %cx
Empty8042Loop:
outw %ax, $DELAY_PORT # Delay 1us
inb $KBD_STATUS_PORT, %al # Read the 8042 Status Port
andb $0x2, %al # Check the Input Buffer Full Flag
loopnz Empty8042Loop # Loop until the input buffer is empty or a timout of 65536 uS
ret
# 431 "start32H2.S"
##############################################################################
# data
##############################################################################
.p2align 1
gdtr: .word GDT_END - GDT_BASE - 1
.long 0 # (GDT base gets set above)
##############################################################################
# global descriptor table (GDT)
##############################################################################
.p2align 1
GDT_BASE:
# null descriptor
.equ NULL_SEL, .-GDT_BASE
.word 0 # limit 15:0
.word 0 # base 15:0
.byte 0 # base 23:16
.byte 0 # type
.byte 0 # limit 19:16, flags
.byte 0 # base 31:24
# linear data segment descriptor
.equ LINEAR_SEL, .-GDT_BASE
.word 0xFFFF # limit 0xFFFFF
.word 0 # base 0
.byte 0
.byte 0x92 # present, ring 0, data, expand-up, writable
.byte 0xCF # page-granular, 32-bit
.byte 0
# linear code segment descriptor
.equ LINEAR_CODE_SEL, .-GDT_BASE
.word 0xFFFF # limit 0xFFFFF
.word 0 # base 0
.byte 0
.byte 0x9A # present, ring 0, data, expand-up, writable
.byte 0xCF # page-granular, 32-bit
.byte 0
# system data segment descriptor
.equ SYS_DATA_SEL, .-GDT_BASE
.word 0xFFFF # limit 0xFFFFF
.word 0 # base 0
.byte 0
.byte 0x92 # present, ring 0, data, expand-up, writable
.byte 0xCF # page-granular, 32-bit
.byte 0
# system code segment descriptor
.equ SYS_CODE_SEL, .-GDT_BASE
.word 0xFFFF # limit 0xFFFFF
.word 0 # base 0
.byte 0
.byte 0x9A # present, ring 0, data, expand-up, writable
.byte 0xCF # page-granular, 32-bit
.byte 0
# spare segment descriptor
.equ SPARE3_SEL, .-GDT_BASE
.word 0 # limit 0xFFFFF
.word 0 # base 0
.byte 0
.byte 0 # present, ring 0, data, expand-up, writable
.byte 0 # page-granular, 32-bit
.byte 0
# spare segment descriptor
.equ SPARE4_SEL, .-GDT_BASE
.word 0 # limit 0xFFFFF
.word 0 # base 0
.byte 0
.byte 0 # present, ring 0, data, expand-up, writable
.byte 0 # page-granular, 32-bit
.byte 0
# spare segment descriptor
.equ SPARE5_SEL, .-GDT_BASE
.word 0 # limit 0xFFFFF
.word 0 # base 0
.byte 0
.byte 0 # present, ring 0, data, expand-up, writable
.byte 0 # page-granular, 32-bit
.byte 0
GDT_END:
.p2align 1
idtr: .word IDT_END - IDT_BASE - 1
.long 0 # (IDT base gets set above)
##############################################################################
# interrupt descriptor table (IDT)
# Note: The hardware IRQ's specified in this table are the normal PC/AT IRQ
# mappings. This implementation only uses the system timer and all other
# IRQs will remain masked. The descriptors for vectors 33+ are provided
# for convenience.
##############################################################################
#idt_tag db "IDT",0
.p2align 1
IDT_BASE:
# divide by zero (INT 0)
.equ DIV_ZERO_SEL, .-IDT_BASE
.word 0 # offset 15:0
.word SYS_CODE_SEL # selector 15:0
.byte 0 # 0 for interrupt gate
.byte 0x0e | 0x80 # type = 386 interrupt gate, present
.word 0 # offset 31:16
# debug exception (INT 1)
.equ DEBUG_EXCEPT_SEL, .-IDT_BASE
.word 0 # offset 15:0
.word SYS_CODE_SEL # selector 15:0
.byte 0 # 0 for interrupt gate
.byte 0x0e | 0x80 # type = 386 interrupt gate, present
.word 0 # offset 31:16
# NMI (INT 2)
.equ NMI_SEL, .-IDT_BASE
.word 0 # offset 15:0
.word SYS_CODE_SEL # selector 15:0
.byte 0 # 0 for interrupt gate
.byte 0x0e | 0x80 # type = 386 interrupt gate, present
.word 0 # offset 31:16
# soft breakpoint (INT 3)
.equ BREAKPOINT_SEL, .-IDT_BASE
.word 0 # offset 15:0
.word SYS_CODE_SEL # selector 15:0
.byte 0 # 0 for interrupt gate
.byte 0x0e | 0x80 # type = 386 interrupt gate, present
.word 0 # offset 31:16
# overflow (INT 4)
.equ OVERFLOW_SEL, .-IDT_BASE
.word 0 # offset 15:0
.word SYS_CODE_SEL # selector 15:0
.byte 0 # 0 for interrupt gate
.byte 0x0e | 0x80 # type = 386 interrupt gate, present
.word 0 # offset 31:16
# bounds check (INT 5)
.equ BOUNDS_CHECK_SEL, .-IDT_BASE
.word 0 # offset 15:0
.word SYS_CODE_SEL # selector 15:0
.byte 0 # 0 for interrupt gate
.byte 0x0e | 0x80 # type = 386 interrupt gate, present
.word 0 # offset 31:16
# invalid opcode (INT 6)
.equ INVALID_OPCODE_SEL, .-IDT_BASE
.word 0 # offset 15:0
.word SYS_CODE_SEL # selector 15:0
.byte 0 # 0 for interrupt gate
.byte 0x0e | 0x80 # type = 386 interrupt gate, present
.word 0 # offset 31:16
# device not available (INT 7)
.equ DEV_NOT_AVAIL_SEL, .-IDT_BASE
.word 0 # offset 15:0
.word SYS_CODE_SEL # selector 15:0
.byte 0 # 0 for interrupt gate
.byte 0x0e | 0x80 # type = 386 interrupt gate, present
.word 0 # offset 31:16
# double fault (INT 8)
.equ DOUBLE_FAULT_SEL, .-IDT_BASE
.word 0 # offset 15:0
.word SYS_CODE_SEL # selector 15:0
.byte 0 # 0 for interrupt gate
.byte 0x0e | 0x80 # type = 386 interrupt gate, present
.word 0 # offset 31:16
# Coprocessor segment overrun - reserved (INT 9)
.equ RSVD_INTR_SEL1, .-IDT_BASE
.word 0 # offset 15:0
.word SYS_CODE_SEL # selector 15:0
.byte 0 # 0 for interrupt gate
.byte 0x0e | 0x80 # type = 386 interrupt gate, present
.word 0 # offset 31:16
# invalid TSS (INT 0ah)
.equ INVALID_TSS_SEL, .-IDT_BASE
.word 0 # offset 15:0
.word SYS_CODE_SEL # selector 15:0
.byte 0 # 0 for interrupt gate
.byte 0x0e | 0x80 # type = 386 interrupt gate, present
.word 0 # offset 31:16
# segment not present (INT 0bh)
.equ SEG_NOT_PRESENT_SEL, .-IDT_BASE
.word 0 # offset 15:0
.word SYS_CODE_SEL # selector 15:0
.byte 0 # 0 for interrupt gate
.byte 0x0e | 0x80 # type = 386 interrupt gate, present
.word 0 # offset 31:16
# stack fault (INT 0ch)
.equ STACK_FAULT_SEL, .-IDT_BASE
.word 0 # offset 15:0
.word SYS_CODE_SEL # selector 15:0
.byte 0 # 0 for interrupt gate
.byte 0x0e | 0x80 # type = 386 interrupt gate, present
.word 0 # offset 31:16
# general protection (INT 0dh)
.equ GP_FAULT_SEL, .-IDT_BASE
.word 0 # offset 15:0
.word SYS_CODE_SEL # selector 15:0
.byte 0 # 0 for interrupt gate
.byte 0x0e | 0x80 # type = 386 interrupt gate, present
.word 0 # offset 31:16
# page fault (INT 0eh)
.equ PAGE_FAULT_SEL, .-IDT_BASE
.word 0 # offset 15:0
.word SYS_CODE_SEL # selector 15:0
.byte 0 # 0 for interrupt gate
.byte 0x0e | 0x80 # type = 386 interrupt gate, present
.word 0 # offset 31:16
# Intel reserved - do not use (INT 0fh)
.equ RSVD_INTR_SEL2, .-IDT_BASE
.word 0 # offset 15:0
.word SYS_CODE_SEL # selector 15:0
.byte 0 # 0 for interrupt gate
.byte 0x0e | 0x80 # type = 386 interrupt gate, present
.word 0 # offset 31:16
# floating point error (INT 10h)
.equ FLT_POINT_ERR_SEL, .-IDT_BASE
.word 0 # offset 15:0
.word SYS_CODE_SEL # selector 15:0
.byte 0 # 0 for interrupt gate
.byte 0x0e | 0x80 # type = 386 interrupt gate, present
.word 0 # offset 31:16
# alignment check (INT 11h)
.equ ALIGNMENT_CHECK_SEL, .-IDT_BASE
.word 0 # offset 15:0
.word SYS_CODE_SEL # selector 15:0
.byte 0 # 0 for interrupt gate
.byte 0x0e | 0x80 # type = 386 interrupt gate, present
.word 0 # offset 31:16
# machine check (INT 12h)
.equ MACHINE_CHECK_SEL, .-IDT_BASE
.word 0 # offset 15:0
.word SYS_CODE_SEL # selector 15:0
.byte 0 # 0 for interrupt gate
.byte 0x0e | 0x80 # type = 386 interrupt gate, present
.word 0 # offset 31:16
# SIMD floating-point exception (INT 13h)
.equ SIMD_EXCEPTION_SEL, .-IDT_BASE
.word 0 # offset 15:0
.word SYS_CODE_SEL # selector 15:0
.byte 0 # 0 for interrupt gate
.byte 0x0e | 0x80 # type = 386 interrupt gate, present
.word 0 # offset 31:16
# 84 unspecified descriptors, First 12 of them are reserved, the rest are avail
.fill 84 * 8, 1, 0
# IRQ 0 (System timer) - (INT 68h)
.equ IRQ0_SEL, .-IDT_BASE
.word 0 # offset 15:0
.word SYS_CODE_SEL # selector 15:0
.byte 0 # 0 for interrupt gate
.byte 0x0e | 0x80 # type = 386 interrupt gate, present
.word 0 # offset 31:16
# IRQ 1 (8042 Keyboard controller) - (INT 69h)
.equ IRQ1_SEL, .-IDT_BASE
.word 0 # offset 15:0
.word SYS_CODE_SEL # selector 15:0
.byte 0 # 0 for interrupt gate
.byte 0x0e | 0x80 # type = 386 interrupt gate, present
.word 0 # offset 31:16
# Reserved - IRQ 2 redirect (IRQ 2) - DO NOT USE!!! - (INT 6ah)
.equ IRQ2_SEL, .-IDT_BASE
.word 0 # offset 15:0
.word SYS_CODE_SEL # selector 15:0
.byte 0 # 0 for interrupt gate
.byte 0x0e | 0x80 # type = 386 interrupt gate, present
.word 0 # offset 31:16
# IRQ 3 (COM 2) - (INT 6bh)
.equ IRQ3_SEL, .-IDT_BASE
.word 0 # offset 15:0
.word SYS_CODE_SEL # selector 15:0
.byte 0 # 0 for interrupt gate
.byte 0x0e | 0x80 # type = 386 interrupt gate, present
.word 0 # offset 31:16
# IRQ 4 (COM 1) - (INT 6ch)
.equ IRQ4_SEL, .-IDT_BASE
.word 0 # offset 15:0
.word SYS_CODE_SEL # selector 15:0
.byte 0 # 0 for interrupt gate
.byte 0x0e | 0x80 # type = 386 interrupt gate, present
.word 0 # offset 31:16
# IRQ 5 (LPT 2) - (INT 6dh)
.equ IRQ5_SEL, .-IDT_BASE
.word 0 # offset 15:0
.word SYS_CODE_SEL # selector 15:0
.byte 0 # 0 for interrupt gate
.byte 0x0e | 0x80 # type = 386 interrupt gate, present
.word 0 # offset 31:16
# IRQ 6 (Floppy controller) - (INT 6eh)
.equ IRQ6_SEL, .-IDT_BASE
.word 0 # offset 15:0
.word SYS_CODE_SEL # selector 15:0
.byte 0 # 0 for interrupt gate
.byte 0x0e | 0x80 # type = 386 interrupt gate, present
.word 0 # offset 31:16
# IRQ 7 (LPT 1) - (INT 6fh)
.equ IRQ7_SEL, .-IDT_BASE
.word 0 # offset 15:0
.word SYS_CODE_SEL # selector 15:0
.byte 0 # 0 for interrupt gate
.byte 0x0e | 0x80 # type = 386 interrupt gate, present
.word 0 # offset 31:16
# IRQ 8 (RTC Alarm) - (INT 70h)
.equ IRQ8_SEL, .-IDT_BASE
.word 0 # offset 15:0
.word SYS_CODE_SEL # selector 15:0
.byte 0 # 0 for interrupt gate
.byte 0x0e | 0x80 # type = 386 interrupt gate, present
.word 0 # offset 31:16
# IRQ 9 - (INT 71h)
.equ IRQ9_SEL, .-IDT_BASE
.word 0 # offset 15:0
.word SYS_CODE_SEL # selector 15:0
.byte 0 # 0 for interrupt gate
.byte 0x0e | 0x80 # type = 386 interrupt gate, present
.word 0 # offset 31:16
# IRQ 10 - (INT 72h)
.equ IRQ10_SEL, .-IDT_BASE
.word 0 # offset 15:0
.word SYS_CODE_SEL # selector 15:0
.byte 0 # 0 for interrupt gate
.byte 0x0e | 0x80 # type = 386 interrupt gate, present
.word 0 # offset 31:16
# IRQ 11 - (INT 73h)
.equ IRQ11_SEL, .-IDT_BASE
.word 0 # offset 15:0
.word SYS_CODE_SEL # selector 15:0
.byte 0 # 0 for interrupt gate
.byte 0x0e | 0x80 # type = 386 interrupt gate, present
.word 0 # offset 31:16
# IRQ 12 (PS/2 mouse) - (INT 74h)
.equ IRQ12_SEL, .-IDT_BASE
.word 0 # offset 15:0
.word SYS_CODE_SEL # selector 15:0
.byte 0 # 0 for interrupt gate
.byte 0x0e | 0x80 # type = 386 interrupt gate, present
.word 0 # offset 31:16
# IRQ 13 (Floating point error) - (INT 75h)
.equ IRQ13_SEL, .-IDT_BASE
.word 0 # offset 15:0
.word SYS_CODE_SEL # selector 15:0
.byte 0 # 0 for interrupt gate
.byte 0x0e | 0x80 # type = 386 interrupt gate, present
.word 0 # offset 31:16
# IRQ 14 (Secondary IDE) - (INT 76h)
.equ IRQ14_SEL, .-IDT_BASE
.word 0 # offset 15:0
.word SYS_CODE_SEL # selector 15:0
.byte 0 # 0 for interrupt gate
.byte 0x0e | 0x80 # type = 386 interrupt gate, present
.word 0 # offset 31:16
# IRQ 15 (Primary IDE) - (INT 77h)
.equ IRQ15_SEL, .-IDT_BASE
.word 0 # offset 15:0
.word SYS_CODE_SEL # selector 15:0
.byte 0 # 0 for interrupt gate
.byte 0x0e | 0x80 # type = 386 interrupt gate, present
.word 0 # offset 31:16
.fill 8, 1, 0
IDT_END:
.p2align 1
MemoryMapSize: .long 0
MemoryMap: .long 0,0,0,0,0,0,0,0
.long 0,0,0,0,0,0,0,0
.long 0,0,0,0,0,0,0,0
.long 0,0,0,0,0,0,0,0
.long 0,0,0,0,0,0,0,0
.long 0,0,0,0,0,0,0,0
.long 0,0,0,0,0,0,0,0
.long 0,0,0,0,0,0,0,0
.long 0,0,0,0,0,0,0,0
.long 0,0,0,0,0,0,0,0
.long 0,0,0,0,0,0,0,0
.long 0,0,0,0,0,0,0,0
.long 0,0,0,0,0,0,0,0
.long 0,0,0,0,0,0,0,0
.long 0,0,0,0,0,0,0,0
.long 0,0,0,0,0,0,0,0
.long 0,0,0,0,0,0,0,0
.long 0,0,0,0,0,0,0,0
.long 0,0,0,0,0,0,0,0
.long 0,0,0,0,0,0,0,0
.long 0,0,0,0,0,0,0,0
.long 0,0,0,0,0,0,0,0
.long 0,0,0,0,0,0,0,0
.long 0,0,0,0,0,0,0,0
.long 0,0,0,0,0,0,0,0
.long 0,0,0,0,0,0,0,0
.long 0,0,0,0,0,0,0,0
.long 0,0,0,0,0,0,0,0
.long 0,0,0,0,0,0,0,0
.long 0,0,0,0,0,0,0,0
.long 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0
.long 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0
.org 0x0fe0
MyStack:
# below is the pieces of the IVT that is used to redirect INT 68h - 6fh
# back to INT 08h - 0fh when in real mode... It is 'org'ed to a
# known low address (20f00) so it can be set up by PlMapIrqToVect in
# 8259.c
int $8
iret
int $9
iret
int $10
iret
int $11
iret
int $12
iret
int $13
iret
int $14
iret
int $15
iret
.org 0x0ffe
BlockSignature:
.word 0xaa55
|
al3xtjames/Clover
| 38,283
|
CloverEFI/BootSector/efi32.S
|
#------------------------------------------------------------------------------
#*
#* Copyright (c) 2006 - 2011, Intel Corporation. All rights reserved.<BR>
#* This program and the accompanying materials
#* are licensed and made available under the terms and conditions of the BSD License
#* which accompanies this distribution. The full text of the license may be found at
#* http://opensource.org/licenses/bsd-license.php
#*
#* THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
#* WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
#*
#* efi32.asm
#*
#* Abstract:
#*
#------------------------------------------------------------------------------
##############################################################################
# Now in 32-bit protected mode.
##############################################################################
.org 0x21000
.code32
.global _start
_start:
.equ DEFAULT_HANDLER_SIZE, INT1 - INT0
.macro jmpCommonIdtEntry
# jmp commonIdtEntry - this must be hand coded to keep the assembler from
# using a 8 bit reletive jump when the entries are
# within 255 bytes of the common entry. This must
# be done to maintain the consistency of the size
# of entry points...
.byte 0xe9 # jmp 16 bit relative
.long commonIdtEntry - . - 4 # offset to jump to
.endm
Start:
movw %bx, %ax
movw %ax, %ds
movw %ax, %es
movw %ax, %fs
movw %ax, %gs
movw %ax, %ss
# .byte 0x66
# movl $0x00400000, %ebp
movl $0x001ffff0, %esp
# movl $0x00070000, %ebx
#Slice
# call ClearScreen
# movl $0xb8000, %edi
# movl Int9String, %esi
# call PrintString
# call PrintString
# call PrintString
# call PrintString
# Populate IDT with meaningful offsets for exception handlers...
sidt Idtr
leal Halt, %eax
movl %eax, %ebx # use bx to copy 15..0 to descriptors
shrl $16, %eax # use ax to copy 31..16 to descriptors
movl $0x78, %ecx # 78h IDT entries to initialize with unique entry points (exceptions)
movl (Idtr + 2), %edi
LOOP_1: # loop through all IDT entries exception handlers and initialize to default handler
movw %bx, (%edi) # write bits 15..0 of offset
movw $0x20, 2(%edi) # SYS_CODE_SEL from GDT
movw $(0x0e00 | 0x8000), 4(%edi) # type = 386 interrupt gate, present
movw %ax, 6(%edi) # write bits 31..16 of offset
addl $8, %edi # move up to next descriptor
addw $DEFAULT_HANDLER_SIZE, %bx # move to next entry point
loop LOOP_1 # loop back through again until all descriptors are initialized
## at this point edi contains the offset of the descriptor for INT 20
## and bx contains the low 16 bits of the offset of the default handler
## so initialize all the rest of the descriptors with these two values...
# mov ecx, 101 ; there are 100 descriptors left (INT 20 (14h) - INT 119 (77h)
#@@: ; loop through all IDT entries exception handlers and initialize to default handler
# mov word ptr [edi], bx ; write bits 15..0 of offset
# mov word ptr [edi+2], 20h ; SYS_CODE_SEL from GDT
# mov word ptr [edi+4], 0e00h OR 8000h ; type = 386 interrupt gate, present
# mov word ptr [edi+6], ax ; write bits 31..16 of offset
# add edi, 8 ; move up to next descriptor
# loop @b ; loop back through again until all descriptors are initialized
## DUMP location of IDT and several of the descriptors
# mov ecx, 8
# mov eax, [offset Idtr + 2]
# mov eax, [eax]
# mov edi, 0b8000h
# call PrintDword
# mov esi, eax
# mov edi, 0b80a0h
# jmp OuterLoop
##
## just for fun, let's 'do a software interrupt to see if we correctly land in the exception handler...
# movl $0x011111111, %eax
# mov ebx, 022222222h
# mov ecx, 033333333h
# mov edx, 044444444h
# mov ebp, 055555555h
# mov esi, 066666666h
# mov edi, 077777777h
# push 011111111h
# push 022222222h
# push $0x033333333
# int $119
# movl $0xb8000, %edi
# movl Int9String, %esi
# call PrintString
# int $5
# jmp Halt
movl $0x22000, %esi # esi = 22000
movl 0x14(%esi), %eax # eax = [22014]
addl %eax, %esi # esi = 22000 + [22014] = Base of EFILDR.C
movl 0x3c(%esi), %ebp # ebp = [22000 + [22014] + 3c] = NT Image Header for EFILDR.C
addl %esi, %ebp
movl 0x34(%ebp), %edi # edi = [[22000 + [22014] + 3c] + 30] = ImageBase
movl 0x28(%ebp), %eax # eax = [[22000 + [22014] + 3c] + 24] = EntryPoint
addl %edi, %eax # eax = ImageBase + EntryPoint
movl %eax, (EfiLdrOffset + 1) # Modify far jump instruction for correct entry point
movw 6(%ebp), %bx # bx = Number of sections
xorl %eax, %eax
movw 0x14(%ebp), %ax # ax = Optional Header Size
addl %eax, %ebp
addl $0x18, %ebp # ebp = Start of 1st Section
SectionLoop:
pushl %esi # Save Base of EFILDR.C
pushl %edi # Save ImageBase
addl 0x14(%ebp), %esi # esi = Base of EFILDR.C + PointerToRawData
addl 0x0c(%ebp), %edi # edi = ImageBase + VirtualAddress
movl 0x10(%ebp), %ecx # ecx = SizeOfRawData
cld
shrl $2, %ecx
rep
movsl
popl %edi # Restore ImageBase
popl %esi # Restore Base of EFILDR.C
addw $0x28, %bp # ebp = ebp + 028h = Pointer to next section record
decw %bx
cmpw $0, %bx
jne SectionLoop
#Slice
# movl $0xb8000, %edi
# movl Int9String, %esi
# call PrintString
#
movzwl (Idtr), %eax # get size of IDT
incl %eax
addl (Idtr + 2), %eax # add to base of IDT to get location of memory map...
pushl %eax # push memory map location on stack for call to EFILDR...
pushl %eax # push return address (useless, just for stack balance)
EfiLdrOffset:
movl $0x00401000, %eax # Offset of EFILDR
jmpl *%eax
# db "**** DEFAULT IDT ENTRY ***",0
.p2align 1
Halt:
INT0:
pushl $0x0 # push error code place holder on the stack
pushl $0x0
jmpCommonIdtEntry
# db 0e9h ; jmp 16 bit reletive
# dd commonIdtEntry - $ - 4 ; offset to jump to
INT1:
pushl $0x0 # push error code place holder on the stack
pushl $0x1
jmpCommonIdtEntry
INT2:
pushl $0x0 # push error code place holder on the stack
pushl $0x2
jmpCommonIdtEntry
INT3:
pushl $0x0 # push error code place holder on the stack
pushl $0x3
jmpCommonIdtEntry
INT4:
pushl $0x0 # push error code place holder on the stack
pushl $0x4
jmpCommonIdtEntry
INT5:
pushl $0x0 # push error code place holder on the stack
pushl $0x5
jmpCommonIdtEntry
INT6:
pushl $0x0 # push error code place holder on the stack
pushl $0x6
jmpCommonIdtEntry
INT7:
pushl $0x0 # push error code place holder on the stack
pushl $0x7
jmpCommonIdtEntry
INT8:
# Double fault causes an error code to be pushed so no phony push necessary
nop
nop
pushl $0x8
jmpCommonIdtEntry
INT9:
pushl $0x0 # push error code place holder on the stack
pushl $0x9
jmpCommonIdtEntry
INT10:
# Invalid TSS causes an error code to be pushed so no phony push necessary
nop
nop
pushl $10
jmpCommonIdtEntry
INT11:
# Segment Not Present causes an error code to be pushed so no phony push necessary
nop
nop
pushl $11
jmpCommonIdtEntry
INT12:
# Stack fault causes an error code to be pushed so no phony push necessary
nop
nop
pushl $12
jmpCommonIdtEntry
INT13:
# GP fault causes an error code to be pushed so no phony push necessary
nop
nop
pushl $13
jmpCommonIdtEntry
INT14:
# Page fault causes an error code to be pushed so no phony push necessary
nop
nop
pushl $14
jmpCommonIdtEntry
INT15:
pushl $0x0 # push error code place holder on the stack
pushl $15
jmpCommonIdtEntry
INT16:
pushl $0x0 # push error code place holder on the stack
pushl $16
jmpCommonIdtEntry
INT17:
# Alignment check causes an error code to be pushed so no phony push necessary
nop
nop
pushl $17
jmpCommonIdtEntry
INT18:
pushl $0x0 # push error code place holder on the stack
pushl $18
jmpCommonIdtEntry
INT19:
pushl $0x0 # push error code place holder on the stack
pushl $19
jmpCommonIdtEntry
INTUnknown:
# The following segment repeats (0x78 - 20) times:
# No. 1
pushl $0x0 # push error code place holder on the stack
# push $0xxx # push vector number
.byte 0x6a
.byte ( . - INTUnknown - 3 ) / 9 + 20 # vector number
jmpCommonIdtEntry
# No. 2
pushl $0x0 # push error code place holder on the stack
# push $0xxx # push vector number
.byte 0x6a
.byte ( . - INTUnknown - 3 ) / 9 + 20 # vector number
jmpCommonIdtEntry
# No. 3
pushl $0x0 # push error code place holder on the stack
# push $0xxx # push vector number
.byte 0x6a
.byte ( . - INTUnknown - 3 ) / 9 + 20 # vector number
jmpCommonIdtEntry
# No. 4
pushl $0x0 # push error code place holder on the stack
# push $0xxx # push vector number
.byte 0x6a
.byte ( . - INTUnknown - 3 ) / 9 + 20 # vector number
jmpCommonIdtEntry
# No. 5
pushl $0x0 # push error code place holder on the stack
# push $0xxx # push vector number
.byte 0x6a
.byte ( . - INTUnknown - 3 ) / 9 + 20 # vector number
jmpCommonIdtEntry
# No. 6
pushl $0x0 # push error code place holder on the stack
# push $0xxx # push vector number
.byte 0x6a
.byte ( . - INTUnknown - 3 ) / 9 + 20 # vector number
jmpCommonIdtEntry
# No. 7
pushl $0x0 # push error code place holder on the stack
# push $0xxx # push vector number
.byte 0x6a
.byte ( . - INTUnknown - 3 ) / 9 + 20 # vector number
jmpCommonIdtEntry
# No. 8
pushl $0x0 # push error code place holder on the stack
# push $0xxx # push vector number
.byte 0x6a
.byte ( . - INTUnknown - 3 ) / 9 + 20 # vector number
jmpCommonIdtEntry
# No. 9
pushl $0x0 # push error code place holder on the stack
# push $0xxx # push vector number
.byte 0x6a
.byte ( . - INTUnknown - 3 ) / 9 + 20 # vector number
jmpCommonIdtEntry
# No. 10
pushl $0x0 # push error code place holder on the stack
# push $0xxx # push vector number
.byte 0x6a
.byte ( . - INTUnknown - 3 ) / 9 + 20 # vector number
jmpCommonIdtEntry
# No. 11
pushl $0x0 # push error code place holder on the stack
# push $0xxx # push vector number
.byte 0x6a
.byte ( . - INTUnknown - 3 ) / 9 + 20 # vector number
jmpCommonIdtEntry
# No. 12
pushl $0x0 # push error code place holder on the stack
# push $0xxx # push vector number
.byte 0x6a
.byte ( . - INTUnknown - 3 ) / 9 + 20 # vector number
jmpCommonIdtEntry
# No. 13
pushl $0x0 # push error code place holder on the stack
# push $0xxx # push vector number
.byte 0x6a
.byte ( . - INTUnknown - 3 ) / 9 + 20 # vector number
jmpCommonIdtEntry
# No. 14
pushl $0x0 # push error code place holder on the stack
# push $0xxx # push vector number
.byte 0x6a
.byte ( . - INTUnknown - 3 ) / 9 + 20 # vector number
jmpCommonIdtEntry
# No. 15
pushl $0x0 # push error code place holder on the stack
# push $0xxx # push vector number
.byte 0x6a
.byte ( . - INTUnknown - 3 ) / 9 + 20 # vector number
jmpCommonIdtEntry
# No. 16
pushl $0x0 # push error code place holder on the stack
# push $0xxx # push vector number
.byte 0x6a
.byte ( . - INTUnknown - 3 ) / 9 + 20 # vector number
jmpCommonIdtEntry
# No. 17
pushl $0x0 # push error code place holder on the stack
# push $0xxx # push vector number
.byte 0x6a
.byte ( . - INTUnknown - 3 ) / 9 + 20 # vector number
jmpCommonIdtEntry
# No. 18
pushl $0x0 # push error code place holder on the stack
# push $0xxx # push vector number
.byte 0x6a
.byte ( . - INTUnknown - 3 ) / 9 + 20 # vector number
jmpCommonIdtEntry
# No. 19
pushl $0x0 # push error code place holder on the stack
# push $0xxx # push vector number
.byte 0x6a
.byte ( . - INTUnknown - 3 ) / 9 + 20 # vector number
jmpCommonIdtEntry
# No. 20
pushl $0x0 # push error code place holder on the stack
# push $0xxx # push vector number
.byte 0x6a
.byte ( . - INTUnknown - 3 ) / 9 + 20 # vector number
jmpCommonIdtEntry
# No. 21
pushl $0x0 # push error code place holder on the stack
# push $0xxx # push vector number
.byte 0x6a
.byte ( . - INTUnknown - 3 ) / 9 + 20 # vector number
jmpCommonIdtEntry
# No. 22
pushl $0x0 # push error code place holder on the stack
# push $0xxx # push vector number
.byte 0x6a
.byte ( . - INTUnknown - 3 ) / 9 + 20 # vector number
jmpCommonIdtEntry
# No. 23
pushl $0x0 # push error code place holder on the stack
# push $0xxx # push vector number
.byte 0x6a
.byte ( . - INTUnknown - 3 ) / 9 + 20 # vector number
jmpCommonIdtEntry
# No. 24
pushl $0x0 # push error code place holder on the stack
# push $0xxx # push vector number
.byte 0x6a
.byte ( . - INTUnknown - 3 ) / 9 + 20 # vector number
jmpCommonIdtEntry
# No. 25
pushl $0x0 # push error code place holder on the stack
# push $0xxx # push vector number
.byte 0x6a
.byte ( . - INTUnknown - 3 ) / 9 + 20 # vector number
jmpCommonIdtEntry
# No. 26
pushl $0x0 # push error code place holder on the stack
# push $0xxx # push vector number
.byte 0x6a
.byte ( . - INTUnknown - 3 ) / 9 + 20 # vector number
jmpCommonIdtEntry
# No. 27
pushl $0x0 # push error code place holder on the stack
# push $0xxx # push vector number
.byte 0x6a
.byte ( . - INTUnknown - 3 ) / 9 + 20 # vector number
jmpCommonIdtEntry
# No. 28
pushl $0x0 # push error code place holder on the stack
# push $0xxx # push vector number
.byte 0x6a
.byte ( . - INTUnknown - 3 ) / 9 + 20 # vector number
jmpCommonIdtEntry
# No. 29
pushl $0x0 # push error code place holder on the stack
# push $0xxx # push vector number
.byte 0x6a
.byte ( . - INTUnknown - 3 ) / 9 + 20 # vector number
jmpCommonIdtEntry
# No. 30
pushl $0x0 # push error code place holder on the stack
# push $0xxx # push vector number
.byte 0x6a
.byte ( . - INTUnknown - 3 ) / 9 + 20 # vector number
jmpCommonIdtEntry
# No. 31
pushl $0x0 # push error code place holder on the stack
# push $0xxx # push vector number
.byte 0x6a
.byte ( . - INTUnknown - 3 ) / 9 + 20 # vector number
jmpCommonIdtEntry
# No. 32
pushl $0x0 # push error code place holder on the stack
# push $0xxx # push vector number
.byte 0x6a
.byte ( . - INTUnknown - 3 ) / 9 + 20 # vector number
jmpCommonIdtEntry
# No. 33
pushl $0x0 # push error code place holder on the stack
# push $0xxx # push vector number
.byte 0x6a
.byte ( . - INTUnknown - 3 ) / 9 + 20 # vector number
jmpCommonIdtEntry
# No. 34
pushl $0x0 # push error code place holder on the stack
# push $0xxx # push vector number
.byte 0x6a
.byte ( . - INTUnknown - 3 ) / 9 + 20 # vector number
jmpCommonIdtEntry
# No. 35
pushl $0x0 # push error code place holder on the stack
# push $0xxx # push vector number
.byte 0x6a
.byte ( . - INTUnknown - 3 ) / 9 + 20 # vector number
jmpCommonIdtEntry
# No. 36
pushl $0x0 # push error code place holder on the stack
# push $0xxx # push vector number
.byte 0x6a
.byte ( . - INTUnknown - 3 ) / 9 + 20 # vector number
jmpCommonIdtEntry
# No. 37
pushl $0x0 # push error code place holder on the stack
# push $0xxx # push vector number
.byte 0x6a
.byte ( . - INTUnknown - 3 ) / 9 + 20 # vector number
jmpCommonIdtEntry
# No. 38
pushl $0x0 # push error code place holder on the stack
# push $0xxx # push vector number
.byte 0x6a
.byte ( . - INTUnknown - 3 ) / 9 + 20 # vector number
jmpCommonIdtEntry
# No. 39
pushl $0x0 # push error code place holder on the stack
# push $0xxx # push vector number
.byte 0x6a
.byte ( . - INTUnknown - 3 ) / 9 + 20 # vector number
jmpCommonIdtEntry
# No. 40
pushl $0x0 # push error code place holder on the stack
# push $0xxx # push vector number
.byte 0x6a
.byte ( . - INTUnknown - 3 ) / 9 + 20 # vector number
jmpCommonIdtEntry
# No. 41
pushl $0x0 # push error code place holder on the stack
# push $0xxx # push vector number
.byte 0x6a
.byte ( . - INTUnknown - 3 ) / 9 + 20 # vector number
jmpCommonIdtEntry
# No. 42
pushl $0x0 # push error code place holder on the stack
# push $0xxx # push vector number
.byte 0x6a
.byte ( . - INTUnknown - 3 ) / 9 + 20 # vector number
jmpCommonIdtEntry
# No. 43
pushl $0x0 # push error code place holder on the stack
# push $0xxx # push vector number
.byte 0x6a
.byte ( . - INTUnknown - 3 ) / 9 + 20 # vector number
jmpCommonIdtEntry
# No. 44
pushl $0x0 # push error code place holder on the stack
# push $0xxx # push vector number
.byte 0x6a
.byte ( . - INTUnknown - 3 ) / 9 + 20 # vector number
jmpCommonIdtEntry
# No. 45
pushl $0x0 # push error code place holder on the stack
# push $0xxx # push vector number
.byte 0x6a
.byte ( . - INTUnknown - 3 ) / 9 + 20 # vector number
jmpCommonIdtEntry
# No. 46
pushl $0x0 # push error code place holder on the stack
# push $0xxx # push vector number
.byte 0x6a
.byte ( . - INTUnknown - 3 ) / 9 + 20 # vector number
jmpCommonIdtEntry
# No. 47
pushl $0x0 # push error code place holder on the stack
# push $0xxx # push vector number
.byte 0x6a
.byte ( . - INTUnknown - 3 ) / 9 + 20 # vector number
jmpCommonIdtEntry
# No. 48
pushl $0x0 # push error code place holder on the stack
# push $0xxx # push vector number
.byte 0x6a
.byte ( . - INTUnknown - 3 ) / 9 + 20 # vector number
jmpCommonIdtEntry
# No. 49
pushl $0x0 # push error code place holder on the stack
# push $0xxx # push vector number
.byte 0x6a
.byte ( . - INTUnknown - 3 ) / 9 + 20 # vector number
jmpCommonIdtEntry
# No. 50
pushl $0x0 # push error code place holder on the stack
# push $0xxx # push vector number
.byte 0x6a
.byte ( . - INTUnknown - 3 ) / 9 + 20 # vector number
jmpCommonIdtEntry
# No. 51
pushl $0x0 # push error code place holder on the stack
# push $0xxx # push vector number
.byte 0x6a
.byte ( . - INTUnknown - 3 ) / 9 + 20 # vector number
jmpCommonIdtEntry
# No. 52
pushl $0x0 # push error code place holder on the stack
# push $0xxx # push vector number
.byte 0x6a
.byte ( . - INTUnknown - 3 ) / 9 + 20 # vector number
jmpCommonIdtEntry
# No. 53
pushl $0x0 # push error code place holder on the stack
# push $0xxx # push vector number
.byte 0x6a
.byte ( . - INTUnknown - 3 ) / 9 + 20 # vector number
jmpCommonIdtEntry
# No. 54
pushl $0x0 # push error code place holder on the stack
# push $0xxx # push vector number
.byte 0x6a
.byte ( . - INTUnknown - 3 ) / 9 + 20 # vector number
jmpCommonIdtEntry
# No. 55
pushl $0x0 # push error code place holder on the stack
# push $0xxx # push vector number
.byte 0x6a
.byte ( . - INTUnknown - 3 ) / 9 + 20 # vector number
jmpCommonIdtEntry
# No. 56
pushl $0x0 # push error code place holder on the stack
# push $0xxx # push vector number
.byte 0x6a
.byte ( . - INTUnknown - 3 ) / 9 + 20 # vector number
jmpCommonIdtEntry
# No. 57
pushl $0x0 # push error code place holder on the stack
# push $0xxx # push vector number
.byte 0x6a
.byte ( . - INTUnknown - 3 ) / 9 + 20 # vector number
jmpCommonIdtEntry
# No. 58
pushl $0x0 # push error code place holder on the stack
# push $0xxx # push vector number
.byte 0x6a
.byte ( . - INTUnknown - 3 ) / 9 + 20 # vector number
jmpCommonIdtEntry
# No. 59
pushl $0x0 # push error code place holder on the stack
# push $0xxx # push vector number
.byte 0x6a
.byte ( . - INTUnknown - 3 ) / 9 + 20 # vector number
jmpCommonIdtEntry
# No. 60
pushl $0x0 # push error code place holder on the stack
# push $0xxx # push vector number
.byte 0x6a
.byte ( . - INTUnknown - 3 ) / 9 + 20 # vector number
jmpCommonIdtEntry
# No. 61
pushl $0x0 # push error code place holder on the stack
# push $0xxx # push vector number
.byte 0x6a
.byte ( . - INTUnknown - 3 ) / 9 + 20 # vector number
jmpCommonIdtEntry
# No. 62
pushl $0x0 # push error code place holder on the stack
# push $0xxx # push vector number
.byte 0x6a
.byte ( . - INTUnknown - 3 ) / 9 + 20 # vector number
jmpCommonIdtEntry
# No. 63
pushl $0x0 # push error code place holder on the stack
# push $0xxx # push vector number
.byte 0x6a
.byte ( . - INTUnknown - 3 ) / 9 + 20 # vector number
jmpCommonIdtEntry
# No. 64
pushl $0x0 # push error code place holder on the stack
# push $0xxx # push vector number
.byte 0x6a
.byte ( . - INTUnknown - 3 ) / 9 + 20 # vector number
jmpCommonIdtEntry
# No. 65
pushl $0x0 # push error code place holder on the stack
# push $0xxx # push vector number
.byte 0x6a
.byte ( . - INTUnknown - 3 ) / 9 + 20 # vector number
jmpCommonIdtEntry
# No. 66
pushl $0x0 # push error code place holder on the stack
# push $0xxx # push vector number
.byte 0x6a
.byte ( . - INTUnknown - 3 ) / 9 + 20 # vector number
jmpCommonIdtEntry
# No. 67
pushl $0x0 # push error code place holder on the stack
# push $0xxx # push vector number
.byte 0x6a
.byte ( . - INTUnknown - 3 ) / 9 + 20 # vector number
jmpCommonIdtEntry
# No. 68
pushl $0x0 # push error code place holder on the stack
# push $0xxx # push vector number
.byte 0x6a
.byte ( . - INTUnknown - 3 ) / 9 + 20 # vector number
jmpCommonIdtEntry
# No. 69
pushl $0x0 # push error code place holder on the stack
# push $0xxx # push vector number
.byte 0x6a
.byte ( . - INTUnknown - 3 ) / 9 + 20 # vector number
jmpCommonIdtEntry
# No. 70
pushl $0x0 # push error code place holder on the stack
# push $0xxx # push vector number
.byte 0x6a
.byte ( . - INTUnknown - 3 ) / 9 + 20 # vector number
jmpCommonIdtEntry
# No. 71
pushl $0x0 # push error code place holder on the stack
# push $0xxx # push vector number
.byte 0x6a
.byte ( . - INTUnknown - 3 ) / 9 + 20 # vector number
jmpCommonIdtEntry
# No. 72
pushl $0x0 # push error code place holder on the stack
# push $0xxx # push vector number
.byte 0x6a
.byte ( . - INTUnknown - 3 ) / 9 + 20 # vector number
jmpCommonIdtEntry
# No. 73
pushl $0x0 # push error code place holder on the stack
# push $0xxx # push vector number
.byte 0x6a
.byte ( . - INTUnknown - 3 ) / 9 + 20 # vector number
jmpCommonIdtEntry
# No. 74
pushl $0x0 # push error code place holder on the stack
# push $0xxx # push vector number
.byte 0x6a
.byte ( . - INTUnknown - 3 ) / 9 + 20 # vector number
jmpCommonIdtEntry
# No. 75
pushl $0x0 # push error code place holder on the stack
# push $0xxx # push vector number
.byte 0x6a
.byte ( . - INTUnknown - 3 ) / 9 + 20 # vector number
jmpCommonIdtEntry
# No. 76
pushl $0x0 # push error code place holder on the stack
# push $0xxx # push vector number
.byte 0x6a
.byte ( . - INTUnknown - 3 ) / 9 + 20 # vector number
jmpCommonIdtEntry
# No. 77
pushl $0x0 # push error code place holder on the stack
# push $0xxx # push vector number
.byte 0x6a
.byte ( . - INTUnknown - 3 ) / 9 + 20 # vector number
jmpCommonIdtEntry
# No. 78
pushl $0x0 # push error code place holder on the stack
# push $0xxx # push vector number
.byte 0x6a
.byte ( . - INTUnknown - 3 ) / 9 + 20 # vector number
jmpCommonIdtEntry
# No. 79
pushl $0x0 # push error code place holder on the stack
# push $0xxx # push vector number
.byte 0x6a
.byte ( . - INTUnknown - 3 ) / 9 + 20 # vector number
jmpCommonIdtEntry
# No. 80
pushl $0x0 # push error code place holder on the stack
# push $0xxx # push vector number
.byte 0x6a
.byte ( . - INTUnknown - 3 ) / 9 + 20 # vector number
jmpCommonIdtEntry
# No. 81
pushl $0x0 # push error code place holder on the stack
# push $0xxx # push vector number
.byte 0x6a
.byte ( . - INTUnknown - 3 ) / 9 + 20 # vector number
jmpCommonIdtEntry
# No. 82
pushl $0x0 # push error code place holder on the stack
# push $0xxx # push vector number
.byte 0x6a
.byte ( . - INTUnknown - 3 ) / 9 + 20 # vector number
jmpCommonIdtEntry
# No. 83
pushl $0x0 # push error code place holder on the stack
# push $0xxx # push vector number
.byte 0x6a
.byte ( . - INTUnknown - 3 ) / 9 + 20 # vector number
jmpCommonIdtEntry
# No. 84
pushl $0x0 # push error code place holder on the stack
# push $0xxx # push vector number
.byte 0x6a
.byte ( . - INTUnknown - 3 ) / 9 + 20 # vector number
jmpCommonIdtEntry
# No. 85
pushl $0x0 # push error code place holder on the stack
# push $0xxx # push vector number
.byte 0x6a
.byte ( . - INTUnknown - 3 ) / 9 + 20 # vector number
jmpCommonIdtEntry
# No. 86
pushl $0x0 # push error code place holder on the stack
# push $0xxx # push vector number
.byte 0x6a
.byte ( . - INTUnknown - 3 ) / 9 + 20 # vector number
jmpCommonIdtEntry
# No. 87
pushl $0x0 # push error code place holder on the stack
# push $0xxx # push vector number
.byte 0x6a
.byte ( . - INTUnknown - 3 ) / 9 + 20 # vector number
jmpCommonIdtEntry
# No. 88
pushl $0x0 # push error code place holder on the stack
# push $0xxx # push vector number
.byte 0x6a
.byte ( . - INTUnknown - 3 ) / 9 + 20 # vector number
jmpCommonIdtEntry
# No. 89
pushl $0x0 # push error code place holder on the stack
# push $0xxx # push vector number
.byte 0x6a
.byte ( . - INTUnknown - 3 ) / 9 + 20 # vector number
jmpCommonIdtEntry
# No. 90
pushl $0x0 # push error code place holder on the stack
# push $0xxx # push vector number
.byte 0x6a
.byte ( . - INTUnknown - 3 ) / 9 + 20 # vector number
jmpCommonIdtEntry
# No. 91
pushl $0x0 # push error code place holder on the stack
# push $0xxx # push vector number
.byte 0x6a
.byte ( . - INTUnknown - 3 ) / 9 + 20 # vector number
jmpCommonIdtEntry
# No. 92
pushl $0x0 # push error code place holder on the stack
# push $0xxx # push vector number
.byte 0x6a
.byte ( . - INTUnknown - 3 ) / 9 + 20 # vector number
jmpCommonIdtEntry
# No. 93
pushl $0x0 # push error code place holder on the stack
# push $0xxx # push vector number
.byte 0x6a
.byte ( . - INTUnknown - 3 ) / 9 + 20 # vector number
jmpCommonIdtEntry
# No. 94
pushl $0x0 # push error code place holder on the stack
# push $0xxx # push vector number
.byte 0x6a
.byte ( . - INTUnknown - 3 ) / 9 + 20 # vector number
jmpCommonIdtEntry
# No. 95
pushl $0x0 # push error code place holder on the stack
# push $0xxx # push vector number
.byte 0x6a
.byte ( . - INTUnknown - 3 ) / 9 + 20 # vector number
jmpCommonIdtEntry
# No. 96
pushl $0x0 # push error code place holder on the stack
# push $0xxx # push vector number
.byte 0x6a
.byte ( . - INTUnknown - 3 ) / 9 + 20 # vector number
jmpCommonIdtEntry
# No. 97
pushl $0x0 # push error code place holder on the stack
# push $0xxx # push vector number
.byte 0x6a
.byte ( . - INTUnknown - 3 ) / 9 + 20 # vector number
jmpCommonIdtEntry
# No. 98
pushl $0x0 # push error code place holder on the stack
# push $0xxx # push vector number
.byte 0x6a
.byte ( . - INTUnknown - 3 ) / 9 + 20 # vector number
jmpCommonIdtEntry
# No. 99
pushl $0x0 # push error code place holder on the stack
# push $0xxx # push vector number
.byte 0x6a
.byte ( . - INTUnknown - 3 ) / 9 + 20 # vector number
jmpCommonIdtEntry
# No. 100
pushl $0x0 # push error code place holder on the stack
# push $0xxx # push vector number
.byte 0x6a
.byte ( . - INTUnknown - 3 ) / 9 + 20 # vector number
jmpCommonIdtEntry
commonIdtEntry:
pushal
movl %esp, %ebp
##
## At this point the stack looks like this:
##
## eflags
## Calling CS
## Calling EIP
## Error code or 0
## Int num or 0ffh for unknown int num
## eax
## ecx
## edx
## ebx
## esp
## ebp
## esi
## edi <------- ESP, EBP
##
# call ClearScreen
movl $0xb8000, %edi
leal String1, %esi
call PrintString
movl 32(%ebp), %eax ## move Int number into EAX
cmpl $19, %eax
ja PrintDefaultString
PrintExceptionString:
shll $2, %eax ## multiply by 4 to get offset from StringTable to actual string address
movl StringTable(%eax), %esi
jmp PrintTheString
PrintDefaultString:
leal IntUnknownString, %esi
# patch Int number
movl %eax, %edx
call A2C
movb %al, 1(%esi)
movl %edx, %eax
shrl $4, %eax
call A2C
movb %al, (%esi)
PrintTheString:
call PrintString
leal String2, %esi
call PrintString
movl 44(%ebp), %eax # CS
call PrintDword
movb $':', (%edi)
addl $2, %edi
movl 40(%ebp), %eax # EIP
call PrintDword
leal String3, %esi
call PrintString
movl $0xb8140, %edi
leal StringEax, %esi # eax
call PrintString
movl 28(%ebp), %eax
call PrintDword
leal StringEbx, %esi # ebx
call PrintString
movl 16(%ebp), %eax
call PrintDword
leal StringEcx, %esi # ecx
call PrintString
movl 24(%ebp), %eax
call PrintDword
leal StringEdx, %esi # edx
call PrintString
movl 20(%ebp), %eax
call PrintDword
leal StringEcode, %esi # error code
call PrintString
movl 36(%ebp), %eax
call PrintDword
movl $0xb81e0, %edi
leal StringEsp, %esi # esp
call PrintString
movl 12(%ebp), %eax
call PrintDword
leal StringEbp, %esi # ebp
call PrintString
movl 8(%ebp), %eax
call PrintDword
leal StringEsi, %esi # esi
call PrintString
movl 4(%ebp), %eax
call PrintDword
leal StringEdi, %esi # edi
call PrintString
movl (%ebp), %eax
call PrintDword
leal StringEflags, %esi # eflags
call PrintString
movl 48(%ebp), %eax
call PrintDword
movl $0xb8320, %edi
movl %ebp, %esi
addl $52, %esi
movl $8, %ecx
OuterLoop:
pushl %ecx
movl $8, %ecx
movl %edi, %edx
InnerLoop:
movl (%esi), %eax
call PrintDword
addl $4, %esi
movb $' ', (%edi)
addl $2, %edi
loop InnerLoop
popl %ecx
addl $0xa0, %edx
movl %edx, %edi
loop OuterLoop
movl $0xb8960, %edi
movl 40(%ebp), %eax # EIP
subl $32*4, %eax
movl %eax, %esi # esi = eip - 32 DWORD linear (total 64 DWORD)
movl $8, %ecx
OuterLoop1:
pushl %ecx
movl $8, %ecx
movl %edi, %edx
InnerLoop1:
movl (%esi), %eax
call PrintDword
addl $4, %esi
movb $' ', (%edi)
addl $2, %edi
loop InnerLoop1
popl %ecx
addl $0xa0, %edx
movl %edx, %edi
loop OuterLoop1
# wbinvd ; this intruction does not support in early than 486 arch
LN_C1:
jmp LN_C1
#
# return
#
movl %ebp, %esp
popal
addl $8, %esp # error code and INT number
iretl
PrintString:
pushl %eax
LN_C2:
movb (%esi), %al
cmpb $0, %al
je LN_C3
movb %al, (%edi)
incl %esi
addl $2, %edi
jmp LN_C2
LN_C3:
popl %eax
ret
## EAX contains dword to print
## EDI contains memory location (screen location) to print it to
PrintDword:
pushl %ecx
pushl %ebx
pushl %eax
movl $8, %ecx
looptop:
roll $4, %eax
movb %al, %bl
andb $0xf, %bl
addb $'0', %bl
cmpb $'9', %bl
jle LN_C4
addb $7, %bl
LN_C4:
movb %bl, (%edi)
addl $2, %edi
loop looptop
#wbinvd
popl %eax
popl %ebx
popl %ecx
ret
ClearScreen:
pushl %eax
pushl %ecx
movb $0x00, %al
movb $0xc, %ah
movl $0xb8000, %edi
movl $80*24, %ecx
LN_C5:
movw %ax, (%edi)
addl $2, %edi
loop LN_C5
movl $0xb8000, %edi
popl %ecx
popl %eax
ret
A2C:
andb $0xf, %al
addb $'0', %al
cmpb $'9', %al
jle LN_C6
addb $7, %al
LN_C6:
ret
String1: .asciz "*** INT "
Int0String: .asciz "00h Divide by 0 -"
Int1String: .asciz "01h Debug exception -"
Int2String: .asciz "02h NMI -"
Int3String: .asciz "03h Breakpoint -"
Int4String: .asciz "04h Overflow -"
Int5String: .asciz "05h Bound -"
Int6String: .asciz "06h Invalid opcode -"
Int7String: .asciz "07h Device not available -"
Int8String: .asciz "08h Double fault -"
Int9String: .asciz "09h Coprocessor seg overrun (reserved) -"
Int10String: .asciz "0Ah Invalid TSS -"
Int11String: .asciz "0Bh Segment not present -"
Int12String: .asciz "0Ch Stack fault -"
Int13String: .asciz "0Dh General protection fault -"
Int14String: .asciz "0Eh Page fault -"
Int15String: .asciz "0Fh (Intel reserved) -"
Int16String: .asciz "10h Floating point error -"
Int17String: .asciz "11h Alignment check -"
Int18String: .asciz "12h Machine check -"
Int19String: .asciz "13h SIMD Floating-Point Exception -"
IntUnknownString: .asciz "??h Unknown interrupt -"
StringTable: .long Int0String, Int1String, Int2String, Int3String
.long Int4String, Int5String, Int6String, Int7String
.long Int8String, Int9String, Int10String, Int11String
.long Int12String, Int13String, Int14String, Int15String
.long Int16String, Int17String, Int18String, Int19String
String2: .asciz " HALT!! *** ("
String3: .asciz ")"
StringEax: .asciz "EAX="
StringEbx: .asciz " EBX="
StringEcx: .asciz " ECX="
StringEdx: .asciz " EDX="
StringEcode: .asciz " ECODE="
StringEsp: .asciz "ESP="
StringEbp: .asciz " EBP="
StringEsi: .asciz " ESI="
StringEdi: .asciz " EDI="
StringEflags: .asciz " EFLAGS="
.p2align 1
Idtr: .skip 6
.org 0x21ffe
BlockSignature:
.word 0xaa55
|
al3xtjames/Clover
| 13,838
|
CloverEFI/BootSector/cdboot.s
|
; Copyright (c) 2003 Apple Computer, Inc. All rights reserved.
;
; @APPLE_LICENSE_HEADER_START@
;
; Portions Copyright (c) 2003 Apple Computer, Inc. All Rights
; Reserved. This file contains Original Code and/or Modifications of
; Original Code as defined in and that are subject to the Apple Public
; Source License Version 2.0 (the "License"). You may not use this file
; except in compliance with the License. Please obtain a copy of the
; License at http://www.apple.com/publicsource and read it before using
; this file.
;
; The Original Code and all software distributed under the License are
; distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
; EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
; INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
; FITNESS FOR A PARTICULAR PURPOSE OR NON- INFRINGEMENT. Please see the
; License for the specific language governing rights and limitations
; under the License.
;
; @APPLE_LICENSE_HEADER_END@
; nasm cdboot.s -o cdboot
;
; This version of cdboot loads 256k of data.
;
; Modifications by Tams Kosrszky on 2008-10-20
;
;
; Set to 1 to enable obscure debug messages.
;
DEBUG EQU 0
;
; Set to 1 to enable unused code.
;
UNUSED EQU 0
;
; Set to 1 to enable verbose mode.
;
VERBOSE EQU 1
;
; Various constants.
;
NULL EQU 0
CR EQU 0x0D
LF EQU 0x0A
;
; Macros.
;
%macro jmpabs 1
push WORD %1
ret
%endmacro
%macro DebugCharMacro 1
pushad
mov al, %1
call print_char
call getc
popad
%endmacro
%macro DebugPauseMacro 0
push ax
call getc
pop ax
%endmacro
%macro PrintCharMacro 1
pushad
mov al, %1
call print_char
popad
%endmacro
%macro PutCharMacro 1
call print_char
%endmacro
%macro PrintHexMacro 1
; call print_hex
%endmacro
%macro PrintString 1
mov si, %1
call print_string
%endmacro
%macro LogString 1
mov di, %1
call log_string
%endmacro
%if DEBUG
%define DebugChar(x) DebugCharMacro x
%define DebugPause(x) DebugPauseMacro
%define PrintChar(x) PrintCharMacro x
%define PutChar(x) PutCharMacro
%define PrintHex(x) ;PrintHexMacro x
%else
%define DebugChar(x)
%define DebugPause(x)
%define PrintChar(x)
%define PutChar(x)
%define PrintHex(x)
%endif
maxSectorCount EQU 8 ; maximum sector count for readSectors
CDBootSizeMagic EQU 0xDEADFACE ; indicates if the size field was not specificed
; at build time.
kSectorBytes EQU 2048 ; sector size in bytes
kBoot2Size EQU 65024 ; default load size for boot2
kBoot2MaxSize EQU (472*1024-512) ;458240 ; max size for boot2
kBoot2Address EQU 0x0200 ; boot2 load address
kBoot2Segment EQU 0x2000 ; boot2 load segment
kBoot0Stack EQU 0xFFF0 ; boot0 stack pointer
kReadBuffer EQU 0x1000 ; disk data buffer address
kVolSectorOffset EQU 0x47 ; offset in buffer of sector number
; in volume descriptor
kBootSectorOffset EQU 0x28 ; offset in boot catalog
; of sector number to load boot file
kBootCountOffset EQU 0x26 ; offset in boot catalog
; of number of sectors to read
kCDBootSizeOffset EQU 2048 - 4 ; the file size can be found at the
; last dword of this sector.
;--------------------------------------------------------------------------
; Start of text segment.
SEGMENT .text
ORG 0x7C00
;--------------------------------------------------------------------------
; Boot code is loaded at 0:7C00h.
;
start:
cli
jmp 0:start1
times 8-($-$$) nop ; Put boot information table at offset 8
; El Torito boot information table, filled in by the
; mkisofs -boot-info-table option, if used.
bi_pvd: dd 0 ; LBA of primary volume descriptor
bi_file: dd 0 ; LBA of boot file
bi_length: dd 0 ; Length of boot file
bi_csum: dd 0 ; Checksum of boot file
bi_reserved: times 10 dd 0 ; Reserved
start1:
xor ax, ax ; zero %ax
mov ss, ax ; setup the
mov sp, kBoot0Stack ; stack
sti
cld ; increment SI after each lodsb call
mov ds, ax ; setup the
mov es, ax ; data segments
%if VERBOSE
LogString(init_str)
%endif
;; BIOS boot drive is in DL
mov [gBIOSDriveNumber], dl ; save BIOS drive number
DebugChar('!')
DebugPause()
%if 0 ;DEBUG
mov eax, [kBoot2LoadAddr]
call print_hex
call getc
%endif
;;
;; The BIOS likely didn't load the rest of the booter,
;; so we have to fetch it ourselves.
;;
mov edx, kReadBuffer
mov al, 1
mov ecx, 17
call readLBA
jc NEAR error
DebugChar('A')
mov ecx, [kReadBuffer + kVolSectorOffset]
%if 0 ;DEBUG
mov eax, ecx
call print_hex
DebugPause()
%endif
mov al, 1
call readLBA
jc NEAR error
;; Now we have the boot catalog in the buffer.
;; Really we should look at the validation entry, but oh well.
DebugChar('B')
mov ecx, [kReadBuffer + kBootSectorOffset]
mov al, 1
call readLBA ; reading this boot sector
inc ecx ; skip the first sector which is what we are in
%if 0 ;DEBUG
mov eax, ecx
call print_hex
DebugPause()
%endif
;
; Testing cdboot size
;
mov eax, [kReadBuffer + kCDBootSizeOffset]
or eax, eax
jz .useDefaultSize ; use the default size if zero
cmp eax, CDBootSizeMagic
je .useDefaultSize ; use the default size if equals to magic
cmp eax, kBoot2MaxSize
jbe .calcSectors ; use the actual size
.useDefaultSize:
mov eax, kBoot2Size
%if VERBOSE
LogString(defaultsize_str)
%endif
.calcSectors:
%if VERBOSE
LogString(size_str)
; call print_hex
%endif
add eax, kSectorBytes - 1 ; adjust size before unit conversion
shr eax, 11 ; convert file size to CD sector unit
%if VERBOSE
LogString(read_str)
; call print_hex
%endif
%if VERBOSE
LogString(loading_str)
%endif
mov edx, (kBoot2Segment << 4) + kBoot2Address
call readSectors
jc error
DebugChar('C')
%if 0 ;DEBUG
mov eax, [es:kBoot2Address]
call print_hex
DebugPause()
%endif
DebugChar('X')
DebugPause()
;; Jump to newly-loaded booter
%if VERBOSE
LogString(done_str)
%endif
%if UNUSED
LogString(keypress_str)
call getc
%endif
mov dl, [gBIOSDriveNumber] ; load BIOS drive number
jmp kBoot2Segment:kBoot2Address
error:
%if VERBOSE
LogString(error_str)
%endif
.loop:
hlt
jmp .loop
;;
;; Support functions
;;
;--------------------------------------------------------------------------
; readSectors - Reads more than 127 sectors using LBA addressing.
;
; Arguments:
; AX = number of 2048-byte sectors to read (valid from 1-320).
; EDX = pointer to where the sectors should be stored.
; ECX = sector offset in partition
;
; Returns:
; CF = 0 success
; 1 error
;
readSectors:
pushad
mov bx, ax
.loop:
mov al, '.'
call print_char
xor eax, eax ; EAX = 0
mov al, bl ; assume we reached the last block.
cmp bx, maxSectorCount ; check if we really reached the last block
jb .readBlock ; yes, BX < MaxSectorCount
mov al, maxSectorCount ; no, read MaxSectorCount
.readBlock:
call readLBA
jc .exit
sub bx, ax ; decrease remaning sectors with the read amount
jz .exit ; exit if no more sectors left to be loaded
add ecx, eax ; adjust LBA sector offset
shl eax, 11 ; convert CD sectors to bytes
add edx, eax ; adjust target memory location
jmp .loop ; read remaining sectors
.exit:
popad
ret
;--------------------------------------------------------------------------
; readLBA - Read sectors from a partition using LBA addressing.
;
; Arguments:
; AL = number of 512-byte sectors to read (valid from 1-127).
; EDX = pointer to where the sectors should be stored.
; ECX = sector offset in partition
; [gBIOSDriveNumber] = drive number (0x80 + unit number)
;
; Returns:
; CF = 0 success
; 1 error
;
readLBA:
pushad ; save all registers
push es ; save ES
mov bp, sp ; save current SP
;
; Convert EDX to segment:offset model and set ES:BX
;
; Some BIOSes do not like offset to be negative while reading
; from hard drives. This usually leads to "boot1: error" when trying
; to boot from hard drive, while booting normally from USB flash.
; The routines, responsible for this are apparently different.
; Thus we split linear address slightly differently for these
; capricious BIOSes to make sure offset is always positive.
;
mov bx, dx ; save offset to BX
and bh, 0x0f ; keep low 12 bits
shr edx, 4 ; adjust linear address to segment base
xor dl, dl ; mask low 8 bits
mov es, dx ; save segment to ES
;
; Create the Disk Address Packet structure for the
; INT13/F42 (Extended Read Sectors) on the stack.
;
; push DWORD 0 ; offset 12, upper 32-bit LBA
push ds ; For sake of saving memory,
push ds ; push DS register, which is 0.
push ecx
push es ; offset 6, memory segment
push bx ; offset 4, memory offset
xor ah, ah ; offset 3, must be 0
push ax ; offset 2, number of sectors
push WORD 16 ; offset 0-1, packet size
;
; INT13 Func 42 - Extended Read Sectors
;
; Arguments:
; AH = 0x42
; [gBIOSDriveNumber] = drive number (0x80 + unit number)
; DS:SI = pointer to Disk Address Packet
;
; Returns:
; AH = return status (sucess is 0)
; carry = 0 success
; 1 error
;
; Packet offset 2 indicates the number of sectors read
; successfully.
;
mov dl, [gBIOSDriveNumber] ; load BIOS drive number
mov si, sp
mov ah, 0x42
int 0x13
jnc .exit
DebugChar('R') ; indicate INT13/F42 error
;
; Issue a disk reset on error.
; Should this be changed to Func 0xD to skip the diskette controller
; reset?
;
%if VERBOSE
LogString(readerror_str)
mov eax, ecx
; call print_hex
%endif
xor ax, ax ; Func 0
int 0x13 ; INT 13
stc ; set carry to indicate error
.exit:
mov sp, bp ; restore SP
pop es ; restore ES
popad
ret
;--------------------------------------------------------------------------
; Write a string with 'cdboot: ' prefix to the console.
;
; Arguments:
; ES:DI pointer to a NULL terminated string.
;
; Clobber list:
; DI
;
log_string:
pushad
push di
mov si, log_title_str
call print_string
pop si
call print_string
popad
ret
;-------------------------------------------------------------------------
; Write a string to the console.
;
; Arguments:
; DS:SI pointer to a NULL terminated string.
;
; Clobber list:
; AX, BX, SI
;
print_string:
mov bx, 1 ; BH=0, BL=1 (blue)
.loop:
lodsb ; load a byte from DS:SI into AL
cmp al, 0 ; Is it a NULL?
je .exit ; yes, all done
mov ah, 0xE ; INT10 Func 0xE
int 0x10 ; display byte in tty mode
jmp .loop
.exit:
ret
;%if DEBUG
;--------------------------------------------------------------------------
; Write the 4-byte value to the console in hex.
;
; Arguments:
; EAX = Value to be displayed in hex.
;
%if 0
print_hex:
pushad
mov cx, WORD 4
bswap eax
.loop:
push ax
ror al, 4
call print_nibble ; display upper nibble
pop ax
call print_nibble ; display lower nibble
ror eax, 8
loop .loop
%if UNUSED
mov al, 10 ; carriage return
call print_char
mov al, 13
call print_char
%endif ; UNUSED
popad
ret
print_nibble:
and al, 0x0f
add al, '0'
cmp al, '9'
jna .print_ascii
add al, 'A' - '9' - 1
.print_ascii:
call print_char
ret
%endif
;--------------------------------------------------------------------------
; getc - wait for a key press
;
getc:
pushad
mov ah, 0
int 0x16
popad
ret
;--------------------------------------------------------------------------
; Write a ASCII character to the console.
;
; Arguments:
; AL = ASCII character.
;
print_char:
pushad
mov bx, 1 ; BH=0, BL=1 (blue)
mov ah, 0x0e ; bios INT 10, Function 0xE
int 0x10 ; display byte in tty mode
popad
ret
;--------------------------------------------------------------------------
; Static data.
;
%if VERBOSE
log_title_str db CR, LF, 'cdboot: ', NULL
init_str db 'init', NULL
defaultsize_str db 'using default size', NULL
size_str db 'file size: ', NULL
read_str db 'reading sectors: ', NULL
loading_str db 'loading', NULL
done_str db 'done', NULL
readerror_str db 'BIOS error at: ', NULL
error_str db 'error', NULL
%endif
%if UNUSED
keypress_str db 'Press any key to continue...', NULL
%endif
;; Pad this file to a size of 2048 bytes (one CD sector).
pad:
times 2044-($-$$) db 0
CDBootSize dd CDBootSizeMagic
;; Location of loaded boot2 code.
kBoot2LoadAddr equ $
;
; Global variables
;
ABSOLUTE kReadBuffer + kSectorBytes
gBIOSDriveNumber resw 1
; END
|
al3xtjames/Clover
| 29,241
|
CloverEFI/BootSector/st32_64H.S
|
#------------------------------------------------------------------------------
#*
#* Copyright (c) 2006 - 2016, Intel Corporation. All rights reserved.<BR>
#* Copyright (c) 2016, Clover Inc. All rights reserved.<BR>
#* This program and the accompanying materials
#* are licensed and made available under the terms and conditions of the BSD License
#* which accompanies this distribution. The full text of the license may be found at
#* http://opensource.org/licenses/bsd-license.php
#*
#* THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
#* WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
#*
#* st32_64H.S
#*
#* Abstract:
#*
#------------------------------------------------------------------------------
# Build with
# as -o st32_64H.o st32_64H.S
# ld --oformat=binary -Ttext=0x200 -o st32_64H.com st32_64H.o
#
# To change character displayed use --defsym CHARACTER_TO_SHOW=<int value> parameter to as
#
.code16
/*
.equ DELAY_PORT, 0x0ed # Port to use for 1uS delay
.equ KBD_CONTROL_PORT, 0x060 # 8042 control port
.equ KBD_STATUS_PORT, 0x064 # 8042 status port
.equ WRITE_DATA_PORT_CMD, 0x0d1 # 8042 command to write the data port
.equ ENABLE_A20_CMD, 0x0df # 8042 command to enable A20
*/
.equ FAST_ENABLE_A20_PORT, 0x92
.equ FAST_ENABLE_A20_MASK, 2
.equ IA32_EFER, 0xC0000080
.ifndef CHARACTER_TO_SHOW
.equ CHARACTER_TO_SHOW, 'T'
.endif
.globl _start
_start:
jmp 1f
.ascii "CLOVERX64 "
1:
movw %cs,%ax
movw %ax,%ds
movw %ax,%es
movw %ax,%ss
movw $MyStack, %sp
#
# Retrieve Bios Memory Map
#
xorl %ebx,%ebx
leal MemoryMap,%edi
MemMapLoop:
movl $0xe820,%eax
movl $20,%ecx
movl $0x534d4150, %edx # SMAP
int $0x15
jc MemMapDone
addw $20,%di
test %ebx,%ebx
jne MemMapLoop
MemMapDone:
subw $MemoryMap,%di # Get the address of the memory map
movl %edi, MemoryMapSize # Save the size of the memory map
#
# Rebase Self
#
xorl %ebx,%ebx
movw %cs,%bx # BX=segment
shll $4,%ebx # BX="linear" address of segment base
addl %ebx, (gdtr + 2) # Rebase address of GDT
addl %ebx, (idtr + 2) # Rebase address of IDT
addl %ebx, JumpToLongMode # Rebase ljmp Real Mode -> Long Mode
#
# Enable A20 Gate
#
movw $0x2401,%ax # Enable A20 Gate
int $0x15
jnc A20GateEnabled # Jump if it suceeded
#
# If INT 15 Function 2401 is not supported, then attempt to Enable A20 manually.
#
/*
call Empty8042InputBuffer # Empty the Input Buffer on the 8042 controller
jnz Timeout8042 # Jump if the 8042 timed out
outw %ax, $DELAY_PORT # Delay 1 uS
movb $WRITE_DATA_PORT_CMD, %al # 8042 cmd to write output port
outb %al, $KBD_STATUS_PORT # Send command to the 8042
call Empty8042InputBuffer # Empty the Input Buffer on the 8042 controller
jnz Timeout8042 # Jump if the 8042 timed out
movb $ENABLE_A20_CMD, %al # gate address bit 20 on
outb %al, $KBD_CONTROL_PORT # Send command to thre 8042
call Empty8042InputBuffer # Empty the Input Buffer on the 8042 controller
movw $25,%cx # Delay 25 uS for the command to complete on the 8042
Delay25uS:
outw %ax, $DELAY_PORT # Delay 1 uS
loop Delay25uS
Timeout8042:
*/
# WIKI - fast A20gate
inb $FAST_ENABLE_A20_PORT, %al
orb $FAST_ENABLE_A20_MASK, %al
outb %al, $FAST_ENABLE_A20_PORT
A20GateEnabled:
#
# Create Page Table
#
call CreatePageTable
#
# DISABLE INTERRUPTS - Entering Protected Mode
# 253668.pdf page 401
#
movw $0x000F, %bx
movw $(0x0E00 | (CHARACTER_TO_SHOW & 255)), %ax
int $0x10
cli
#
# Ready Address of Page Table in EDX
#
movzwl PageTableSegment, %edx
shll $4, %edx
#
# load GDT
#
lgdtl gdtr
#
# Enable the 64-bit page-translation-table entries by
# setting CR4.PAE=1 (this is _required_ before activating
# long mode). Paging is not enabled until after long mode
# is enabled.
#
movl %cr4, %eax
orb $0x20, %al
movl %eax, %cr4
#
# This is the Trapolean Page Tables that are guarenteed
# under 4GB.
#
# Address Map:
# 10000 ~ 12000 - efildr (loaded)
# 20000 ~ 21000 - start64.com
# 21000 ~ 22000 - efi64.com
# 22000 ~ 90000 - efildr
# 90000 ~ 96000 - 4G pagetable (will be reload later)
#
movl %edx, %cr3
#
# Enable long mode (set EFER.LME=1).
#
movl $IA32_EFER, %ecx
rdmsr
orw $0x100, %ax
wrmsr # Write EFER.
#
# Enable protected mode and paging to activate long mode (set CR0.PE=1, CR0.PG=1)
#
movl %cr0, %eax # Read CR0.
orl $0x80000001, %eax # Set PE, PG
movl %eax, %cr0 # Write CR0.
.equ JumpToLongMode, . + 2
ljmpl $0x38, $InLongMode # 0x38 is SYS_CODE64_SEL
InLongMode:
.code64
movw $SYS_DATA_SEL,%ax
movw %ax,%ds
movw %ax,%es
movw %ax,%ss
leaq MyStack(%rip), %rsp # Reload RSP
#
# load IDT
#
lidtq idtr(%rip)
jmp BlockSignature + 2
.code16
/*
Empty8042InputBuffer:
xorw %cx,%cx
Empty8042Loop:
outw %ax, $DELAY_PORT # Delay 1us
inb $KBD_STATUS_PORT, %al # Read the 8042 Status Port
andb $0x2,%al # Check the Input Buffer Full Flag
loopnz Empty8042Loop # Loop until the input buffer is empty or a timout of 65536 uS
ret
*/
#
# Find place for page table and create it
#
.equ EFILDR_BASE, 0x2000 # Offset to start of EFILDR block
.equ EFILDR_FILE_LENGTH, 8 # Dword in EFILDR_HEADER holding size of block
.equ EBDA_SEG, 0x40 # Segment:Offset for finding the EBDA
.equ EBDA_OFFSET, 0xE
CreatePageTable:
movl (EFILDR_BASE + EFILDR_FILE_LENGTH), %edx # Size of EFILDR block -> EDX
addl $(EFILDR_BASE + 15), %edx # Add base
shrl $4, %edx # And round up to multiple of 16
movw %ds, %ax
addw %ax, %dx # Add in linear base
addw $255, %dx
xorb %dl, %dl # And round up to page size
# DX holds 16-bit segment of page table
movw %ds, %cx # Save DS
movw $EBDA_SEG, %ax
addb $6, %dh # Need 6 pages for table
movw %ax, %ds
movw EBDA_OFFSET, %ax # EBDA 16-bit segment now in AX
movw %cx, %ds # Restore DS
cmpw %dx, %ax # Does page table fit under EBDA?
jae 1f # Yes, continue
jmp PageTableError # No, abort
1:
subb $6, %dh # Restore DX to start segment of page table
movw %dx, PageTableSegment # Stash it for client
pushw %es
pushw %di # Save ES:DI used to build page table
movw %dx, %es
xorw %di, %di # ES:DI points to start of page table
incb %dh # Bump DX to next page
#
# Set up page table root page (only 1 entry)
#
xorl %eax, %eax
movw %dx, %ax
incb %dh # Bump DX to next page
shll $4, %eax
orb $3, %al
stosl
xorl %eax, %eax
movw $2046, %cx
rep stosw # Wipe rest of 1st page
#
# Set up page table 2nd page (depth 1 - 4 entries)
#
movw $4, %cx
2:
movw %dx, %ax
incb %dh # Bump DX to next page
shll $4, %eax
orb $3, %al
stosl
xorl %eax, %eax
stosl
loop 2b
movw $2032, %cx # Wipe rest of 2nd page
rep stosw
#
# Set up pages 3 - 6 (depth 2 - 2048 entries)
#
xorl %edx, %edx # Start at base of memory
movb $0x83, %dl # Flags at leaf nodes mark large pages (2MB each)
movw $2048, %cx
3:
movl %edx, %eax
addl $0x200000, %edx # Bump EDX to next large page
stosl
xorl %eax, %eax
stosl
loop 3b
#
# Done - restore ES:DI and return
#
popw %di
popw %es
ret
#
# Get here if not enough space between boot file
# and bottom of the EBDA - print error and halt
#
PageTableError:
addw $2, %sp # Clear return address of CreatePageTable
movw $15, %bx
movw $PageErrorMsg, %si
1:
lodsb
testb %al, %al
jz 2f
movb $14, %ah
int $16
jmp 1b
2:
hlt
jmp 2b
##############################################################################
# data
##############################################################################
.p2align 1
PageTableSegment: .word 0
PageErrorMsg: .asciz "Unable to Allocate Memory for Page Table"
.p2align 1
gdtr: .word GDT_END - GDT_BASE - 1 # GDT limit
.long GDT_BASE # (GDT base gets adjusted above)
##############################################################################
# global descriptor table (GDT)
##############################################################################
.p2align 1
GDT_BASE:
# null descriptor
.equ NULL_SEL, .-GDT_BASE # Selector [0x0]
.word 0 # limit 15:0
.word 0 # base 15:0
.byte 0 # base 23:16
.byte 0 # type
.byte 0 # limit 19:16, flags
.byte 0 # base 31:24
# linear data segment descriptor
.equ LINEAR_DATA_SEL, .-GDT_BASE # Selector [0x8]
.word 0xFFFF # limit 0xFFFFF
.word 0 # base 0
.byte 0
.byte 0x92 # present, ring 0, data, expand-up, writable
.byte 0xCF # page-granular, 32-bit
.byte 0
# linear code segment descriptor
.equ LINEAR_CODE_SEL, .-GDT_BASE # Selector [0x10]
.word 0xFFFF # limit 0xFFFFF
.word 0 # base 0
.byte 0
.byte 0x9A # present, ring 0, code, non-conforming, readable
.byte 0xCF # page-granular, 32-bit
.byte 0
# system data segment descriptor
.equ SYS_DATA_SEL, .-GDT_BASE # Selector [0x18]
.word 0xFFFF # limit 0xFFFFF
.word 0 # base 0
.byte 0
.byte 0x92 # present, ring 0, data, expand-up, writable
.byte 0xCF # page-granular, 32-bit
.byte 0
# system code segment descriptor
.equ SYS_CODE_SEL, .-GDT_BASE # Selector [0x20]
.word 0xFFFF # limit 0xFFFFF
.word 0 # base 0
.byte 0
.byte 0x9A # present, ring 0, code, non-conforming, readable
.byte 0xCF # page-granular, 32-bit
.byte 0
# spare segment descriptor
.equ SPARE3_SEL, .-GDT_BASE # Selector [0x28]
.word 0 # limit 0
.word 0 # base 0
.byte 0 #
.byte 0 # non-present, ring 0, system, reserved
.byte 0 #
.byte 0
#
# system data segment descriptor
#
.equ SYS_DATA64_SEL, .-GDT_BASE # Selector [0x30]
.word 0xFFFF # limit 0xFFFFF
.word 0 # base 0
.byte 0
.byte 0x92 # present, ring 0, data, expand-up, writable
.byte 0xCF # page-granular, 32-bit
.byte 0
#
# system code segment descriptor
#
.equ SYS_CODE64_SEL, .-GDT_BASE # Selector [0x38]
.word 0xFFFF # limit 0xFFFFF
.word 0 # base 0
.byte 0
.byte 0x9A # present, ring 0, code, non-conforming, readable
.byte 0xAF # page-granular, 64-bit
.byte 0
# spare segment descriptor
.equ SPARE4_SEL, .-GDT_BASE # Selector [0x40]
.word 0 # limit 0
.word 0 # base 0
.byte 0
.byte 0 # non-present, ring 0, system, reserved
.byte 0 #
.byte 0
GDT_END:
.p2align 1
idtr: .word IDT_END - IDT_BASE - 1 # IDT limit
.quad IDT_BASE # (IDT base gets adjusted above)
##############################################################################
# interrupt descriptor table (IDT)
#
# Note: The hardware IRQ's specified in this table are the normal PC/AT IRQ
# mappings. This implementation only uses the system timer and all other
# IRQs will remain masked. The descriptors for vectors 33+ are provided
# for convenience.
##############################################################################
.p2align 1
IDT_BASE:
# divide by zero (INT 0)
.equ DIV_ZERO_SEL, .-IDT_BASE
.word 0 # offset 15:0
.word SYS_CODE64_SEL # selector 15:0
.byte 0 # 0 for interrupt gate
.byte 0x0e | 0x80 # type = 386 interrupt gate, present
.word 0 # offset 31:16
.long 0 # offset 63:32
.long 0 # 0 for reserved
# debug exception (INT 1)
.equ DEBUG_EXCEPT_SEL, .-IDT_BASE
.word 0 # offset 15:0
.word SYS_CODE64_SEL # selector 15:0
.byte 0 # 0 for interrupt gate
.byte 0x0e | 0x80 # type = 386 interrupt gate, present
.word 0 # offset 31:16
.long 0 # offset 63:32
.long 0 # 0 for reserved
# NMI (INT 2)
.equ NMI_SEL, .-IDT_BASE
.word 0 # offset 15:0
.word SYS_CODE64_SEL # selector 15:0
.byte 0 # 0 for interrupt gate
.byte 0x0e | 0x80 # type = 386 interrupt gate, present
.word 0 # offset 31:16
.long 0 # offset 63:32
.long 0 # 0 for reserved
# soft breakpoint (INT 3)
.equ BREAKPOINT_SEL, .-IDT_BASE
.word 0 # offset 15:0
.word SYS_CODE64_SEL # selector 15:0
.byte 0 # 0 for interrupt gate
.byte 0x0e | 0x80 # type = 386 interrupt gate, present
.word 0 # offset 31:16
.long 0 # offset 63:32
.long 0 # 0 for reserved
# overflow (INT 4)
.equ OVERFLOW_SEL, .-IDT_BASE
.word 0 # offset 15:0
.word SYS_CODE64_SEL # selector 15:0
.byte 0 # 0 for interrupt gate
.byte 0x0e | 0x80 # type = 386 interrupt gate, present
.word 0 # offset 31:16
.long 0 # offset 63:32
.long 0 # 0 for reserved
# bounds check (INT 5)
.equ BOUNDS_CHECK_SEL, .-IDT_BASE
.word 0 # offset 15:0
.word SYS_CODE64_SEL # selector 15:0
.byte 0 # 0 for interrupt gate
.byte 0x0e | 0x80 # type = 386 interrupt gate, present
.word 0 # offset 31:16
.long 0 # offset 63:32
.long 0 # 0 for reserved
# invalid opcode (INT 6)
.equ INVALID_OPCODE_SEL, .-IDT_BASE
.word 0 # offset 15:0
.word SYS_CODE64_SEL # selector 15:0
.byte 0 # 0 for interrupt gate
.byte 0x0e | 0x80 # type = 386 interrupt gate, present
.word 0 # offset 31:16
.long 0 # offset 63:32
.long 0 # 0 for reserved
# device not available (INT 7)
.equ DEV_NOT_AVAIL_SEL, .-IDT_BASE
.word 0 # offset 15:0
.word SYS_CODE64_SEL # selector 15:0
.byte 0 # 0 for interrupt gate
.byte 0x0e | 0x80 # type = 386 interrupt gate, present
.word 0 # offset 31:16
.long 0 # offset 63:32
.long 0 # 0 for reserved
# double fault (INT 8)
.equ DOUBLE_FAULT_SEL, .-IDT_BASE
.word 0 # offset 15:0
.word SYS_CODE64_SEL # selector 15:0
.byte 0 # 0 for interrupt gate
.byte 0x0e | 0x80 # type = 386 interrupt gate, present
.word 0 # offset 31:16
.long 0 # offset 63:32
.long 0 # 0 for reserved
# Coprocessor segment overrun - reserved (INT 9)
.equ RSVD_INTR_SEL1, .-IDT_BASE
.word 0 # offset 15:0
.word SYS_CODE64_SEL # selector 15:0
.byte 0 # 0 for interrupt gate
.byte 0x0e | 0x80 # type = 386 interrupt gate, present
.word 0 # offset 31:16
.long 0 # offset 63:32
.long 0 # 0 for reserved
# invalid TSS (INT 0ah)
.equ INVALID_TSS_SEL, .-IDT_BASE
.word 0 # offset 15:0
.word SYS_CODE64_SEL # selector 15:0
.byte 0 # 0 for interrupt gate
.byte 0x0e | 0x80 # type = 386 interrupt gate, present
.word 0 # offset 31:16
.long 0 # offset 63:32
.long 0 # 0 for reserved
# segment not present (INT 0bh)
.equ SEG_NOT_PRESENT_SEL, .-IDT_BASE
.word 0 # offset 15:0
.word SYS_CODE64_SEL # selector 15:0
.byte 0 # 0 for interrupt gate
.byte 0x0e | 0x80 # type = 386 interrupt gate, present
.word 0 # offset 31:16
.long 0 # offset 63:32
.long 0 # 0 for reserved
# stack fault (INT 0ch)
.equ STACK_FAULT_SEL, .-IDT_BASE
.word 0 # offset 15:0
.word SYS_CODE64_SEL # selector 15:0
.byte 0 # 0 for interrupt gate
.byte 0x0e | 0x80 # type = 386 interrupt gate, present
.word 0 # offset 31:16
.long 0 # offset 63:32
.long 0 # 0 for reserved
# general protection (INT 0dh)
.equ GP_FAULT_SEL, .-IDT_BASE
.word 0 # offset 15:0
.word SYS_CODE64_SEL # selector 15:0
.byte 0 # 0 for interrupt gate
.byte 0x0e | 0x80 # type = 386 interrupt gate, present
.word 0 # offset 31:16
.long 0 # offset 63:32
.long 0 # 0 for reserved
# page fault (INT 0eh)
.equ PAGE_FAULT_SEL, .-IDT_BASE
.word 0 # offset 15:0
.word SYS_CODE64_SEL # selector 15:0
.byte 0 # 0 for interrupt gate
.byte 0x0e | 0x80 # type = 386 interrupt gate, present
.word 0 # offset 31:16
.long 0 # offset 63:32
.long 0 # 0 for reserved
# Intel reserved - do not use (INT 0fh)
.equ RSVD_INTR_SEL2, .-IDT_BASE
.word 0 # offset 15:0
.word SYS_CODE64_SEL # selector 15:0
.byte 0 # 0 for interrupt gate
.byte 0x0e | 0x80 # type = 386 interrupt gate, present
.word 0 # offset 31:16
.long 0 # offset 63:32
.long 0 # 0 for reserved
# floating point error (INT 10h)
.equ FLT_POINT_ERR_SEL, .-IDT_BASE
.word 0 # offset 15:0
.word SYS_CODE64_SEL # selector 15:0
.byte 0 # 0 for interrupt gate
.byte 0x0e | 0x80 # type = 386 interrupt gate, present
.word 0 # offset 31:16
.long 0 # offset 63:32
.long 0 # 0 for reserved
# alignment check (INT 11h)
.equ ALIGNMENT_CHECK_SEL, .-IDT_BASE
.word 0 # offset 15:0
.word SYS_CODE64_SEL # selector 15:0
.byte 0 # 0 for interrupt gate
.byte 0x0e | 0x80 # type = 386 interrupt gate, present
.word 0 # offset 31:16
.long 0 # offset 63:32
.long 0 # 0 for reserved
# machine check (INT 12h)
.equ MACHINE_CHECK_SEL, .-IDT_BASE
.word 0 # offset 15:0
.word SYS_CODE64_SEL # selector 15:0
.byte 0 # 0 for interrupt gate
.byte 0x0e | 0x80 # type = 386 interrupt gate, present
.word 0 # offset 31:16
.long 0 # offset 63:32
.long 0 # 0 for reserved
# SIMD floating-point exception (INT 13h)
.equ SIMD_EXCEPTION_SEL, .-IDT_BASE
.word 0 # offset 15:0
.word SYS_CODE64_SEL # selector 15:0
.byte 0 # 0 for interrupt gate
.byte 0x0e | 0x80 # type = 386 interrupt gate, present
.word 0 # offset 31:16
.long 0 # offset 63:32
.long 0 # 0 for reserved
# 84 unspecified descriptors, First 12 of them are reserved, the rest are avail
.fill 84 * 16, 1, 0 # db (84 * 16) dup(0)
# IRQ 0 (System timer) - (INT 68h)
.equ IRQ0_SEL, .-IDT_BASE
.word 0 # offset 15:0
.word SYS_CODE64_SEL # selector 15:0
.byte 0 # 0 for interrupt gate
.byte 0x0e | 0x80 # type = 386 interrupt gate, present
.word 0 # offset 31:16
.long 0 # offset 63:32
.long 0 # 0 for reserved
# IRQ 1 (8042 Keyboard controller) - (INT 69h)
.equ IRQ1_SEL, .-IDT_BASE
.word 0 # offset 15:0
.word SYS_CODE64_SEL # selector 15:0
.byte 0 # 0 for interrupt gate
.byte 0x0e | 0x80 # type = 386 interrupt gate, present
.word 0 # offset 31:16
.long 0 # offset 63:32
.long 0 # 0 for reserved
# Reserved - IRQ 2 redirect (IRQ 2) - DO NOT USE!!! - (INT 6ah)
.equ IRQ2_SEL, .-IDT_BASE
.word 0 # offset 15:0
.word SYS_CODE64_SEL # selector 15:0
.byte 0 # 0 for interrupt gate
.byte 0x0e | 0x80 # type = 386 interrupt gate, present
.word 0 # offset 31:16
.long 0 # offset 63:32
.long 0 # 0 for reserved
# IRQ 3 (COM 2) - (INT 6bh)
.equ IRQ3_SEL, .-IDT_BASE
.word 0 # offset 15:0
.word SYS_CODE64_SEL # selector 15:0
.byte 0 # 0 for interrupt gate
.byte 0x0e | 0x80 # type = 386 interrupt gate, present
.word 0 # offset 31:16
.long 0 # offset 63:32
.long 0 # 0 for reserved
# IRQ 4 (COM 1) - (INT 6ch)
.equ IRQ4_SEL, .-IDT_BASE
.word 0 # offset 15:0
.word SYS_CODE64_SEL # selector 15:0
.byte 0 # 0 for interrupt gate
.byte 0x0e | 0x80 # type = 386 interrupt gate, present
.word 0 # offset 31:16
.long 0 # offset 63:32
.long 0 # 0 for reserved
# IRQ 5 (LPT 2) - (INT 6dh)
.equ IRQ5_SEL, .-IDT_BASE
.word 0 # offset 15:0
.word SYS_CODE64_SEL # selector 15:0
.byte 0 # 0 for interrupt gate
.byte 0x0e | 0x80 # type = 386 interrupt gate, present
.word 0 # offset 31:16
.long 0 # offset 63:32
.long 0 # 0 for reserved
# IRQ 6 (Floppy controller) - (INT 6eh)
.equ IRQ6_SEL, .-IDT_BASE
.word 0 # offset 15:0
.word SYS_CODE64_SEL # selector 15:0
.byte 0 # 0 for interrupt gate
.byte 0x0e | 0x80 # type = 386 interrupt gate, present
.word 0 # offset 31:16
.long 0 # offset 63:32
.long 0 # 0 for reserved
# IRQ 7 (LPT 1) - (INT 6fh)
.equ IRQ7_SEL, .-IDT_BASE
.word 0 # offset 15:0
.word SYS_CODE64_SEL # selector 15:0
.byte 0 # 0 for interrupt gate
.byte 0x0e | 0x80 # type = 386 interrupt gate, present
.word 0 # offset 31:16
.long 0 # offset 63:32
.long 0 # 0 for reserved
# IRQ 8 (RTC Alarm) - (INT 70h)
.equ IRQ8_SEL, .-IDT_BASE
.word 0 # offset 15:0
.word SYS_CODE64_SEL # selector 15:0
.byte 0 # 0 for interrupt gate
.byte 0x0e | 0x80 # type = 386 interrupt gate, present
.word 0 # offset 31:16
.long 0 # offset 63:32
.long 0 # 0 for reserved
# IRQ 9 - (INT 71h)
.equ IRQ9_SEL, .-IDT_BASE
.word 0 # offset 15:0
.word SYS_CODE64_SEL # selector 15:0
.byte 0 # 0 for interrupt gate
.byte 0x0e | 0x80 # type = 386 interrupt gate, present
.word 0 # offset 31:16
.long 0 # offset 63:32
.long 0 # 0 for reserved
# IRQ 10 - (INT 72h)
.equ IRQ10_SEL, .-IDT_BASE
.word 0 # offset 15:0
.word SYS_CODE64_SEL # selector 15:0
.byte 0 # 0 for interrupt gate
.byte 0x0e | 0x80 # type = 386 interrupt gate, present
.word 0 # offset 31:16
.long 0 # offset 63:32
.long 0 # 0 for reserved
# IRQ 11 - (INT 73h)
.equ IRQ11_SEL, .-IDT_BASE
.word 0 # offset 15:0
.word SYS_CODE64_SEL # selector 15:0
.byte 0 # 0 for interrupt gate
.byte 0x0e | 0x80 # type = 386 interrupt gate, present
.word 0 # offset 31:16
.long 0 # offset 63:32
.long 0 # 0 for reserved
# IRQ 12 (PS/2 mouse) - (INT 74h)
.equ IRQ12_SEL, .-IDT_BASE
.word 0 # offset 15:0
.word SYS_CODE64_SEL # selector 15:0
.byte 0 # 0 for interrupt gate
.byte 0x0e | 0x80 # type = 386 interrupt gate, present
.word 0 # offset 31:16
.long 0 # offset 63:32
.long 0 # 0 for reserved
# IRQ 13 (Floating point error) - (INT 75h)
.equ IRQ13_SEL, .-IDT_BASE
.word 0 # offset 15:0
.word SYS_CODE64_SEL # selector 15:0
.byte 0 # 0 for interrupt gate
.byte 0x0e | 0x80 # type = 386 interrupt gate, present
.word 0 # offset 31:16
.long 0 # offset 63:32
.long 0 # 0 for reserved
# IRQ 14 (Secondary IDE) - (INT 76h)
.equ IRQ14_SEL, .-IDT_BASE
.word 0 # offset 15:0
.word SYS_CODE64_SEL # selector 15:0
.byte 0 # 0 for interrupt gate
.byte 0x0e | 0x80 # type = 386 interrupt gate, present
.word 0 # offset 31:16
.long 0 # offset 63:32
.long 0 # 0 for reserved
# IRQ 15 (Primary IDE) - (INT 77h)
.equ IRQ15_SEL, .-IDT_BASE
.word 0 # offset 15:0
.word SYS_CODE64_SEL # selector 15:0
.byte 0 # 0 for interrupt gate
.byte 0x0e | 0x80 # type = 386 interrupt gate, present
.word 0 # offset 31:16
.long 0 # offset 63:32
.long 0 # 0 for reserved
.fill 16, 1, 0
IDT_END:
.p2align 1
MemoryMapSize: .long 0
MemoryMap: .fill 267, 4, 0
.org 0x0de0
MyStack:
# below is the pieces of the IVT that is used to redirect INT 68h - 6fh
# back to INT 08h - 0fh when in real mode... It is 'org'ed to a
# known low address (20f00) so it can be set up by PlMapIrqToVect in
# 8259.c
int $8
iret
int $9
iret
int $10
iret
int $11
iret
int $12
iret
int $13
iret
int $14
iret
int $15
iret
.org 0x0dfe
BlockSignature:
.word 0xaa55
|
al3xtjames/Clover
| 31,360
|
CloverEFI/CpuDxe/Ia32/CpuInterrupt.S
|
#------------------------------------------------------------------------------
#*
#* Copyright 2006, Intel Corporation
#* All rights reserved. This program and the accompanying materials
#* are licensed and made available under the terms and conditions of the BSD License
#* which accompanies this distribution. The full text of the license may be found at
#* http://opensource.org/licenses/bsd-license.php
#*
#* THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
#* WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
#*
#* CpuInterrupt.S
#*
#* Abstract:
#*
#------------------------------------------------------------------------------
#PUBLIC SystemTimerHandler
#PUBLIC SystemExceptionHandler
#EXTERNDEF mExceptionCodeSize:DWORD
#EXTERN TimerHandler: NEAR
#EXTERN ExceptionHandler: NEAR
#EXTERN mTimerVector: DWORD
# .data
# ASM_GLOBAL ASM_PFX(mExceptionCodeSize)
#ASM_PFX(mExceptionCodeSize): .long 9
# .text
ASM_GLOBAL ASM_PFX(InitDescriptor)
ASM_PFX(InitDescriptor):
movl $GDT_BASE,%eax # EAX=PHYSICAL address of gdt
movl %eax, gdtr + 2 # Put address of gdt into the gdtr
lgdt gdtr
movl $IDT_BASE,%eax # EAX=PHYSICAL address of idt
movl %eax, idtr + 2 # Put address of idt into the idtr
lidt idtr
ret
# VOID
# EFIAPI
# InstallInterruptHandler (
# UINTN Vector,
# VOID (*Handler)(VOID)
# )
ASM_GLOBAL ASM_PFX(InstallInterruptHandler)
ASM_PFX(InstallInterruptHandler):
# Vector:DWORD @ 4(%esp)
# Handler:DWORD @ 8(%esp)
push %edi
pushf # save eflags
cli # turn off interrupts
subl $6,%esp # open some space on the stack
movl %esp,%edi
sidt (%edi) # get fword address of IDT
movl 2(%edi), %edi # move offset of IDT into EDI
addl $6,%esp # correct stack
movl 12(%esp),%eax # Get vector number
shl $3,%eax # multiply by 8 to get offset
addl %eax,%edi # add to IDT base to get entry
movl 16(%esp),%eax # load new address into IDT entry
movw %ax,(%edi) # write bits 15..0 of offset
shrl $16,%eax # use ax to copy 31..16 to descriptors
movw %ax,6(%edi) # write bits 31..16 of offset
popf # restore flags (possible enabling interrupts)
pop %edi
ret
.macro JmpCommonIdtEntry
# jmp commonIdtEntry - this must be hand coded to keep the assembler from
# using a 8 bit reletive jump when the entries are
# within 255 bytes of the common entry. This must
# be done to maintain the consistency of the size
# of entry points...
.byte 0xe9 # jmp 16 bit reletive
.long commonIdtEntry - . - 4 # offset to jump to
.endm
.p2align 1
ASM_GLOBAL ASM_PFX(SystemExceptionHandler)
ASM_PFX(SystemExceptionHandler):
INT0:
pushl $0x0 # push error code place holder on the stack
pushl $0x0
JmpCommonIdtEntry
# db 0e9h # jmp 16 bit reletive
# dd commonIdtEntry - $ - 4 # offset to jump to
INT1:
pushl $0x0 # push error code place holder on the stack
pushl $0x1
JmpCommonIdtEntry
INT2:
pushl $0x0 # push error code place holder on the stack
pushl $0x2
JmpCommonIdtEntry
INT3:
pushl $0x0 # push error code place holder on the stack
pushl $0x3
JmpCommonIdtEntry
INT4:
pushl $0x0 # push error code place holder on the stack
pushl $0x4
JmpCommonIdtEntry
INT5:
pushl $0x0 # push error code place holder on the stack
pushl $0x5
JmpCommonIdtEntry
INT6:
pushl $0x0 # push error code place holder on the stack
pushl $0x6
JmpCommonIdtEntry
INT7:
pushl $0x0 # push error code place holder on the stack
pushl $0x7
JmpCommonIdtEntry
INT8:
# Double fault causes an error code to be pushed so no phony push necessary
nop
nop
pushl $0x8
JmpCommonIdtEntry
INT9:
pushl $0x0 # push error code place holder on the stack
pushl $0x9
JmpCommonIdtEntry
INT10:
# Invalid TSS causes an error code to be pushed so no phony push necessary
nop
nop
pushl $10
JmpCommonIdtEntry
INT11:
# Segment Not Present causes an error code to be pushed so no phony push necessary
nop
nop
pushl $11
JmpCommonIdtEntry
INT12:
# Stack fault causes an error code to be pushed so no phony push necessary
nop
nop
pushl $12
JmpCommonIdtEntry
INT13:
# GP fault causes an error code to be pushed so no phony push necessary
nop
nop
pushl $13
JmpCommonIdtEntry
INT14:
# Page fault causes an error code to be pushed so no phony push necessary
nop
nop
pushl $14
JmpCommonIdtEntry
INT15:
pushl $0x0 # push error code place holder on the stack
pushl $15
JmpCommonIdtEntry
INT16:
pushl $0x0 # push error code place holder on the stack
pushl $16
JmpCommonIdtEntry
INT17:
# Alignment check causes an error code to be pushed so no phony push necessary
nop
nop
pushl $17
JmpCommonIdtEntry
INT18:
pushl $0x0 # push error code place holder on the stack
pushl $18
JmpCommonIdtEntry
INT19:
pushl $0x0 # push error code place holder on the stack
pushl $19
JmpCommonIdtEntry
INTUnknown:
# The following segment repeats (32 - 20) times:
# No. 1
pushl $0x0 # push error code place holder on the stack
# push xxh # push vector number
.byte 0x6a
.byte ( . - INTUnknown - 3 ) / 9 + 20 # vector number
JmpCommonIdtEntry
# No. 2
pushl $0x0 # push error code place holder on the stack
# push xxh # push vector number
.byte 0x6a
.byte ( . - INTUnknown - 3 ) / 9 + 20 # vector number
JmpCommonIdtEntry
# No. 3
pushl $0x0 # push error code place holder on the stack
# push xxh # push vector number
.byte 0x6a
.byte ( . - INTUnknown - 3 ) / 9 + 20 # vector number
JmpCommonIdtEntry
# No. 4
pushl $0x0 # push error code place holder on the stack
# push xxh # push vector number
.byte 0x6a
.byte ( . - INTUnknown - 3 ) / 9 + 20 # vector number
JmpCommonIdtEntry
# No. 5
pushl $0x0 # push error code place holder on the stack
# push xxh # push vector number
.byte 0x6a
.byte ( . - INTUnknown - 3 ) / 9 + 20 # vector number
JmpCommonIdtEntry
# No. 6
pushl $0x0 # push error code place holder on the stack
# push xxh # push vector number
.byte 0x6a
.byte ( . - INTUnknown - 3 ) / 9 + 20 # vector number
JmpCommonIdtEntry
# No. 7
pushl $0x0 # push error code place holder on the stack
# push xxh # push vector number
.byte 0x6a
.byte ( . - INTUnknown - 3 ) / 9 + 20 # vector number
JmpCommonIdtEntry
# No. 8
pushl $0x0 # push error code place holder on the stack
# push xxh # push vector number
.byte 0x6a
.byte ( . - INTUnknown - 3 ) / 9 + 20 # vector number
JmpCommonIdtEntry
# No. 9
pushl $0x0 # push error code place holder on the stack
# push xxh # push vector number
.byte 0x6a
.byte ( . - INTUnknown - 3 ) / 9 + 20 # vector number
JmpCommonIdtEntry
# No. 10
pushl $0x0 # push error code place holder on the stack
# push xxh # push vector number
.byte 0x6a
.byte ( . - INTUnknown - 3 ) / 9 + 20 # vector number
JmpCommonIdtEntry
# No. 11
pushl $0x0 # push error code place holder on the stack
# push xxh # push vector number
.byte 0x6a
.byte ( . - INTUnknown - 3 ) / 9 + 20 # vector number
JmpCommonIdtEntry
# No. 12
pushl $0x0 # push error code place holder on the stack
# push xxh # push vector number
.byte 0x6a
.byte ( . - INTUnknown - 3 ) / 9 + 20 # vector number
JmpCommonIdtEntry
ASM_GLOBAL ASM_PFX(SystemTimerHandler)
ASM_PFX(SystemTimerHandler):
pushl $0
pushl $0 # $ASM_PFX(mTimerVector)
JmpCommonIdtEntry
commonIdtEntry:
# +---------------------+
# + EFlags +
# +---------------------+
# + CS +
# +---------------------+
# + EIP +
# +---------------------+
# + Error Code +
# +---------------------+
# + Vector Number +
# +---------------------+
# + EBP +
# +---------------------+ <-- EBP
cli
push %ebp
movl %esp,%ebp
#
# Align stack to make sure that EFI_FX_SAVE_STATE_IA32 of EFI_SYSTEM_CONTEXT_IA32
# is 16-byte aligned
#
andl $0xfffffff0,%esp
subl $12,%esp
## UINT32 Edi, Esi, Ebp, Esp, Ebx, Edx, Ecx, Eax#
push %eax
push %ecx
push %edx
push %ebx
leal 6*4(%ebp),%ecx
push %ecx # ESP
push (%ebp) # EBP
push %esi
push %edi
## UINT32 Gs, Fs, Es, Ds, Cs, Ss#
movw %ss,%ax
push %eax
movzwl 4*4(%ebp),%eax
push %eax
movw %ds,%ax
push %eax
movw %es,%ax
push %eax
movw %fs,%ax
push %eax
movw %gs,%ax
push %eax
## UINT32 Eip#
pushl 3*4(%ebp)
## UINT32 Gdtr[2], Idtr[2]#
subl $8,%esp
sidt (%esp)
subl $8,%esp
sgdt (%esp)
## UINT32 Ldtr, Tr#
xorl %eax, %eax
str %ax
push %eax
sldt %eax
push %eax
## UINT32 EFlags#
pushl 5*4(%ebp)
## UINT32 Cr0, Cr1, Cr2, Cr3, Cr4#
mov %cr4,%eax
orl $0x208,%eax
mov %eax,%cr4
push %eax
mov %cr3,%eax
push %eax
mov %cr2,%eax
push %eax
xor %eax, %eax
push %eax
mov %cr0,%eax
push %eax
## UINT32 Dr0, Dr1, Dr2, Dr3, Dr6, Dr7#
mov %dr7,%eax
push %eax
## clear Dr7 while executing debugger itself
xor %eax, %eax
mov %eax,%dr7
mov %dr6,%eax
push %eax
## insure all status bits in dr6 are clear...
xor %eax, %eax
mov %eax,%dr6
mov %dr3,%eax
push %eax
mov %dr2,%eax
push %eax
mov %dr1,%eax
push %eax
mov %dr0,%eax
push %eax
## FX_SAVE_STATE_IA32 FxSaveState;
sub $512,%esp
mov %esp,%edi
fxsave (%edi)
## UINT32 ExceptionData;
pushl 2*4(%ebp)
## Prepare parameter and call
mov %esp,%edx
push %edx
mov 1*4(%ebp),%eax
push %eax
cmpl $32,%eax
jb 1f # CallException
call ASM_PFX(TimerHandler)
jmp 2f # ExceptionDone
#CallException:
1:
call ASM_PFX(ExceptionHandler)
#ExceptionDone:
2:
addl $8,%esp
cli
## UINT32 ExceptionData;
addl $4,%esp
## FX_SAVE_STATE_IA32 FxSaveState;
mov %esp,%esi
fxrstor (%esi)
addl $512,%esp
#; UINT32 Dr0, Dr1, Dr2, Dr3, Dr6, Dr7;
pop %eax
mov %eax,%dr0
pop %eax
mov %eax,%dr1
pop %eax
mov %eax,%dr2
pop %eax
mov %eax,%dr3
## skip restore of dr6. We cleared dr6 during the context save.
addl $4,%esp
pop %eax
mov %eax,%dr7
## UINT32 Cr0, Cr1, Cr2, Cr3, Cr4;
pop %eax
mov %eax,%cr0
addl $4,%esp # not for Cr1
pop %eax
mov %eax,%cr2
pop %eax
mov %eax,%cr3
pop %eax
mov %eax,%cr4
## UINT32 EFlags;
popl 5*4(%ebp)
## UINT32 Ldtr, Tr;
## UINT32 Gdtr[2], Idtr[2];
## Best not let anyone mess with these particular registers...
addl $24,%esp
## UINT32 Eip;
popl 3*4(%ebp)
## UINT32 Gs, Fs, Es, Ds, Cs, Ss;
## NOTE - modified segment registers could hang the debugger... We
## could attempt to insulate ourselves against this possibility,
## but that poses risks as well.
##
pop %gs
pop %fs
pop %es
pop %ds
popl 4*4(%ebp)
pop %ss
## UINT32 Edi, Esi, Ebp, Esp, Ebx, Edx, Ecx, Eax;
pop %edi
pop %esi
addl $4,%esp # not for ebp
addl $4,%esp # not for esp
pop %ebx
pop %edx
pop %ecx
pop %eax
mov %ebp,%esp
pop %ebp
addl $8,%esp
iret
#;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
# data
#;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
.data
.p2align 2
gdtr: .short GDT_END - GDT_BASE - 1 # GDT limit
.long 0 # (GDT base gets set above)
#;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
# global descriptor table (GDT)
#;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
.p2align 2
GDT_BASE:
# null descriptor
NULL_SEL = .-GDT_BASE
.short 0 # limit 15:0
.short 0 # base 15:0
.byte 0 # base 23:16
.byte 0 # type
.byte 0 # limit 19:16, flags
.byte 0 # base 31:24
# linear data segment descriptor
LINEAR_SEL = .-GDT_BASE
.short 0x0FFFF # limit 0xFFFFF
.short 0 # base 0
.byte 0
.byte 0x092 # present, ring 0, data, expand-up, writable
.byte 0x0CF # page-granular, 32-bit
.byte 0
# linear code segment descriptor
LINEAR_CODE_SEL = .-GDT_BASE
.short 0x0FFFF # limit 0xFFFFF
.short 0 # base 0
.byte 0
.byte 0x09A # present, ring 0, data, expand-up, writable
.byte 0x0CF # page-granular, 32-bit
.byte 0
# system data segment descriptor
SYS_DATA_SEL = .-GDT_BASE
.short 0x0FFFF # limit 0xFFFFF
.short 0 # base 0
.byte 0
.byte 0x092 # present, ring 0, data, expand-up, writable
.byte 0x0CF # page-granular, 32-bit
.byte 0
# system code segment descriptor
SYS_CODE_SEL = .-GDT_BASE
.short 0x0FFFF # limit 0xFFFFF
.short 0 # base 0
.byte 0
.byte 0x09A # present, ring 0, data, expand-up, writable
.byte 0x0CF # page-granular, 32-bit
.byte 0
# spare segment descriptor
SPARE3_SEL = .-GDT_BASE
.short 0 # limit 0xFFFFF
.short 0 # base 0
.byte 0
.byte 0 # present, ring 0, data, expand-up, writable
.byte 0 # page-granular, 32-bit
.byte 0
# spare segment descriptor
SPARE4_SEL = .-GDT_BASE
.short 0 # limit 0xFFFFF
.short 0 # base 0
.byte 0
.byte 0 # present, ring 0, data, expand-up, writable
.byte 0 # page-granular, 32-bit
.byte 0
# spare segment descriptor
SPARE5_SEL = .-GDT_BASE
.short 0 # limit 0xFFFFF
.short 0 # base 0
.byte 0
.byte 0 # present, ring 0, data, expand-up, writable
.byte 0 # page-granular, 32-bit
.byte 0
GDT_END:
.p2align 2
#idtr: .short IDT_END - IDT_BASE - 1 # IDT limit
idtr: .short IDT_LEN
.long 0 # (IDT base gets set above)
#;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
# interrupt descriptor table (IDT)
#
# Note: The hardware IRQ's specified in this table are the normal PC/AT IRQ
# mappings. This implementation only uses the system timer and all other
# IRQs will remain masked. The descriptors for vectors 33+ are provided
# for convenience.
#;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
#idt_tag .byte "IDT",0
.p2align 2
IDT_BASE:
# divide by zero (INT 0)
DIV_ZERO_SEL = .-IDT_BASE
.short 0 # offset 15:0
.short SYS_CODE_SEL # selector 15:0
.byte 0 # 0 for interrupt gate
.byte 0x0e | 0x80 # type = 386 interrupt gate, present
.short 0 # offset 31:16
# debug exception (INT 1)
DEBUG_EXCEPT_SEL = .-IDT_BASE
.short 0 # offset 15:0
.short SYS_CODE_SEL # selector 15:0
.byte 0 # 0 for interrupt gate
.byte 0x0e | 0x80 # type = 386 interrupt gate, present
.short 0 # offset 31:16
# NMI (INT 2)
NMI_SEL = .-IDT_BASE
.short 0 # offset 15:0
.short SYS_CODE_SEL # selector 15:0
.byte 0 # 0 for interrupt gate
.byte 0x0e | 0x80 # type = 386 interrupt gate, present
.short 0 # offset 31:16
# soft breakpoint (INT 3)
BREAKPOINT_SEL = .-IDT_BASE
.short 0 # offset 15:0
.short SYS_CODE_SEL # selector 15:0
.byte 0 # 0 for interrupt gate
.byte 0x0e | 0x80 # type = 386 interrupt gate, present
.short 0 # offset 31:16
# overflow (INT 4)
OVERFLOW_SEL = .-IDT_BASE
.short 0 # offset 15:0
.short SYS_CODE_SEL # selector 15:0
.byte 0 # 0 for interrupt gate
.byte 0x0e | 0x80 # type = 386 interrupt gate, present
.short 0 # offset 31:16
# bounds check (INT 5)
BOUNDS_CHECK_SEL = .-IDT_BASE
.short 0 # offset 15:0
.short SYS_CODE_SEL # selector 15:0
.byte 0 # 0 for interrupt gate
.byte 0x0e | 0x80 # type = 386 interrupt gate, present
.short 0 # offset 31:16
# invalid opcode (INT 6)
INVALID_OPCODE_SEL = .-IDT_BASE
.short 0 # offset 15:0
.short SYS_CODE_SEL # selector 15:0
.byte 0 # 0 for interrupt gate
.byte 0x0e | 0x80 # type = 386 interrupt gate, present
.short 0 # offset 31:16
# device not available (INT 7)
DEV_NOT_AVAIL_SEL = .-IDT_BASE
.short 0 # offset 15:0
.short SYS_CODE_SEL # selector 15:0
.byte 0 # 0 for interrupt gate
.byte 0x0e | 0x80 # type = 386 interrupt gate, present
.short 0 # offset 31:16
# double fault (INT 8)
DOUBLE_FAULT_SEL = .-IDT_BASE
.short 0 # offset 15:0
.short SYS_CODE_SEL # selector 15:0
.byte 0 # 0 for interrupt gate
.byte 0x0e | 0x80 # type = 386 interrupt gate, present
.short 0 # offset 31:16
# Coprocessor segment overrun - reserved (INT 9)
RSVD_INTR_SEL1 = .-IDT_BASE
.short 0 # offset 15:0
.short SYS_CODE_SEL # selector 15:0
.byte 0 # 0 for interrupt gate
.byte 0x0e | 0x80 # type = 386 interrupt gate, present
.short 0 # offset 31:16
# invalid TSS (INT 0ah)
INVALID_TSS_SEL = .-IDT_BASE
.short 0 # offset 15:0
.short SYS_CODE_SEL # selector 15:0
.byte 0 # 0 for interrupt gate
.byte 0x0e | 0x80 # type = 386 interrupt gate, present
.short 0 # offset 31:16
# segment not present (INT 0bh)
SEG_NOT_PRESENT_SEL = .-IDT_BASE
.short 0 # offset 15:0
.short SYS_CODE_SEL # selector 15:0
.byte 0 # 0 for interrupt gate
.byte 0x0e | 0x80 # type = 386 interrupt gate, present
.short 0 # offset 31:16
# stack fault (INT 0ch)
STACK_FAULT_SEL = .-IDT_BASE
.short 0 # offset 15:0
.short SYS_CODE_SEL # selector 15:0
.byte 0 # 0 for interrupt gate
.byte 0x0e | 0x80 # type = 386 interrupt gate, present
.short 0 # offset 31:16
# general protection (INT 0dh)
GP_FAULT_SEL = .-IDT_BASE
.short 0 # offset 15:0
.short SYS_CODE_SEL # selector 15:0
.byte 0 # 0 for interrupt gate
.byte 0x0e | 0x80 # type = 386 interrupt gate, present
.short 0 # offset 31:16
# page fault (INT 0eh)
PAGE_FAULT_SEL = .-IDT_BASE
.short 0 # offset 15:0
.short SYS_CODE_SEL # selector 15:0
.byte 0 # 0 for interrupt gate
.byte 0x0e | 0x80 # type = 386 interrupt gate, present
.short 0 # offset 31:16
# Intel reserved - do not use (INT 0fh)
RSVD_INTR_SEL2 = .-IDT_BASE
.short 0 # offset 15:0
.short SYS_CODE_SEL # selector 15:0
.byte 0 # 0 for interrupt gate
.byte 0x0e | 0x80 # type = 386 interrupt gate, present
.short 0 # offset 31:16
# floating point error (INT 0x10)
FLT_POINT_ERR_SEL = .-IDT_BASE
.short 0 # offset 15:0
.short SYS_CODE_SEL # selector 15:0
.byte 0 # 0 for interrupt gate
.byte 0x0e | 0x80 # type = 386 interrupt gate, present
.short 0 # offset 31:16
# alignment check (INT 0x11)
ALIGNMENT_CHECK_SEL = .-IDT_BASE
.short 0 # offset 15:0
.short SYS_CODE_SEL # selector 15:0
.byte 0 # 0 for interrupt gate
.byte 0x0e | 0x80 # (10001110)type = 386 interrupt gate, present
.short 0 # offset 31:16
# machine check (INT 0x12)
MACHINE_CHECK_SEL = .-IDT_BASE
.short 0 # offset 15:0
.short SYS_CODE_SEL # selector 15:0
.byte 0 # 0 for interrupt gate
.byte 0x0e | 0x80 # (10001110)type = 386 interrupt gate, present
.short 0 # offset 31:16
# SIMD floating-point exception (INT 0x13)
SIMD_EXCEPTION_SEL = .-IDT_BASE
.short 0 # offset 15:0
.short SYS_CODE_SEL # selector 15:0
.byte 0 # 0 for interrupt gate
.byte 0x0e | 0x80 # (10001110)type = 386 interrupt gate, present
.short 0 # offset 31:16
# The following segment repeats (32 - 20) times:
# No. 1
.short 0 # offset 15:0
.short SYS_CODE_SEL # selector 15:0
.byte 0 # 0 for interrupt gate
.byte 0x0e | 0x80 # (10001110)type = 386 interrupt gate, present
.short 0 # offset 31:16
# No. 2
.short 0 # offset 15:0
.short SYS_CODE_SEL # selector 15:0
.byte 0 # 0 for interrupt gate
.byte 0x0e | 0x80 # (10001110)type = 386 interrupt gate, present
.short 0 # offset 31:16
# No. 3
.short 0 # offset 15:0
.short SYS_CODE_SEL # selector 15:0
.byte 0 # 0 for interrupt gate
.byte 0x0e | 0x80 # (10001110)type = 386 interrupt gate, present
.short 0 # offset 31:16
# No. 4
.short 0 # offset 15:0
.short SYS_CODE_SEL # selector 15:0
.byte 0 # 0 for interrupt gate
.byte 0x0e | 0x80 # (10001110)type = 386 interrupt gate, present
.short 0 # offset 31:16
# No. 5
.short 0 # offset 15:0
.short SYS_CODE_SEL # selector 15:0
.byte 0 # 0 for interrupt gate
.byte 0x0e | 0x80 # (10001110)type = 386 interrupt gate, present
.short 0 # offset 31:16
# No. 6
.short 0 # offset 15:0
.short SYS_CODE_SEL # selector 15:0
.byte 0 # 0 for interrupt gate
.byte 0x0e | 0x80 # (10001110)type = 386 interrupt gate, present
.short 0 # offset 31:16
# No. 7
.short 0 # offset 15:0
.short SYS_CODE_SEL # selector 15:0
.byte 0 # 0 for interrupt gate
.byte 0x0e | 0x80 # (10001110)type = 386 interrupt gate, present
.short 0 # offset 31:16
# No. 8
.short 0 # offset 15:0
.short SYS_CODE_SEL # selector 15:0
.byte 0 # 0 for interrupt gate
.byte 0x0e | 0x80 # (10001110)type = 386 interrupt gate, present
.short 0 # offset 31:16
# No. 9
.short 0 # offset 15:0
.short SYS_CODE_SEL # selector 15:0
.byte 0 # 0 for interrupt gate
.byte 0x0e | 0x80 # (10001110)type = 386 interrupt gate, present
.short 0 # offset 31:16
# No. 10
.short 0 # offset 15:0
.short SYS_CODE_SEL # selector 15:0
.byte 0 # 0 for interrupt gate
.byte 0x0e | 0x80 # (10001110)type = 386 interrupt gate, present
.short 0 # offset 31:16
# No. 11
.short 0 # offset 15:0
.short SYS_CODE_SEL # selector 15:0
.byte 0 # 0 for interrupt gate
.byte 0x0e | 0x80 # (10001110)type = 386 interrupt gate, present
.short 0 # offset 31:16
# No. 12
.short 0 # offset 15:0
.short SYS_CODE_SEL # selector 15:0
.byte 0 # 0 for interrupt gate
.byte 0x0e | 0x80 # (10001110)type = 386 interrupt gate, present
.short 0 # offset 31:16
# 72 unspecified descriptors
.fill 72 * 8, 1, 0
# IRQ 0 (System timer) - (INT 0x68)
IRQ0_SEL = .-IDT_BASE
.short 0 # offset 15:0
.short SYS_CODE_SEL # selector 15:0
.byte 0 # 0 for interrupt gate
.byte 0x0e | 0x80 # (10001110)type = 386 interrupt gate, present
.short 0 # offset 31:16
# IRQ 1 (8042 Keyboard controller) - (INT 0x69)
IRQ1_SEL = .-IDT_BASE
.short 0 # offset 15:0
.short SYS_CODE_SEL # selector 15:0
.byte 0 # 0 for interrupt gate
.byte 0x0e | 0x80 # (10001110)type = 386 interrupt gate, present
.short 0 # offset 31:16
# Reserved - IRQ 2 redirect (IRQ 2) - DO NOT USE!!! - (INT 6ah)
IRQ2_SEL = .-IDT_BASE
.short 0 # offset 15:0
.short SYS_CODE_SEL # selector 15:0
.byte 0 # 0 for interrupt gate
.byte 0x0e | 0x80 # (10001110)type = 386 interrupt gate, present
.short 0 # offset 31:16
# IRQ 3 (COM 2) - (INT 6bh)
IRQ3_SEL = .-IDT_BASE
.short 0 # offset 15:0
.short SYS_CODE_SEL # selector 15:0
.byte 0 # 0 for interrupt gate
.byte 0x0e | 0x80 # (10001110)type = 386 interrupt gate, present
.short 0 # offset 31:16
# IRQ 4 (COM 1) - (INT 6ch)
IRQ4_SEL = .-IDT_BASE
.short 0 # offset 15:0
.short SYS_CODE_SEL # selector 15:0
.byte 0 # 0 for interrupt gate
.byte 0x0e | 0x80 # (10001110)type = 386 interrupt gate, present
.short 0 # offset 31:16
# IRQ 5 (LPT 2) - (INT 6dh)
IRQ5_SEL = .-IDT_BASE
.short 0 # offset 15:0
.short SYS_CODE_SEL # selector 15:0
.byte 0 # 0 for interrupt gate
.byte 0x0e | 0x80 # (10001110)type = 386 interrupt gate, present
.short 0 # offset 31:16
# IRQ 6 (Floppy controller) - (INT 6eh)
IRQ6_SEL = .-IDT_BASE
.short 0 # offset 15:0
.short SYS_CODE_SEL # selector 15:0
.byte 0 # 0 for interrupt gate
.byte 0x0e | 0x80 # (10001110)type = 386 interrupt gate, present
.short 0 # offset 31:16
# IRQ 7 (LPT 1) - (INT 6fh)
IRQ7_SEL = .-IDT_BASE
.short 0 # offset 15:0
.short SYS_CODE_SEL # selector 15:0
.byte 0 # 0 for interrupt gate
.byte 0x0e | 0x80 # (10001110)type = 386 interrupt gate, present
.short 0 # offset 31:16
# IRQ 8 (RTC Alarm) - (INT 0x70)
IRQ8_SEL = .-IDT_BASE
.short 0 # offset 15:0
.short SYS_CODE_SEL # selector 15:0
.byte 0 # 0 for interrupt gate
.byte 0x0e | 0x80 # (10001110)type = 386 interrupt gate, present
.short 0 # offset 31:16
# IRQ 9 - (INT 0x71)
IRQ9_SEL = .-IDT_BASE
.short 0 # offset 15:0
.short SYS_CODE_SEL # selector 15:0
.byte 0 # 0 for interrupt gate
.byte 0x0e | 0x80 # (10001110)type = 386 interrupt gate, present
.short 0 # offset 31:16
# IRQ 10 - (INT 0x72)
IRQ10_SEL = .-IDT_BASE
.short 0 # offset 15:0
.short SYS_CODE_SEL # selector 15:0
.byte 0 # 0 for interrupt gate
.byte 0x0e | 0x80 # (10001110)type = 386 interrupt gate, present
.short 0 # offset 31:16
# IRQ 11 - (INT 0x73)
IRQ11_SEL = .-IDT_BASE
.short 0 # offset 15:0
.short SYS_CODE_SEL # selector 15:0
.byte 0 # 0 for interrupt gate
.byte 0x0e | 0x80 # (10001110)type = 386 interrupt gate, present
.short 0 # offset 31:16
# IRQ 12 (PS/2 mouse) - (INT 0x74)
IRQ12_SEL = .-IDT_BASE
.short 0 # offset 15:0
.short SYS_CODE_SEL # selector 15:0
.byte 0 # 0 for interrupt gate
.byte 0x0e | 0x80 # (10001110)type = 386 interrupt gate, present
.short 0 # offset 31:16
# IRQ 13 (Floating point error) - (INT 0x75)
IRQ13_SEL = .-IDT_BASE
.short 0 # offset 15:0
.short SYS_CODE_SEL # selector 15:0
.byte 0 # 0 for interrupt gate
.byte 0x0e | 0x80 # (10001110)type = 386 interrupt gate, present
.short 0 # offset 31:16
# IRQ 14 (Secondary IDE) - (INT 0x76)
IRQ14_SEL = .-IDT_BASE
.short 0 # offset 15:0
.short SYS_CODE_SEL # selector 15:0
.byte 0 # 0 for interrupt gate
.byte 0x0e | 0x80 # (10001110)type = 386 interrupt gate, present
.short 0 # offset 31:16
# IRQ 15 (Primary IDE) - (INT 0x77)
IRQ15_SEL = .-IDT_BASE
.short 0 # offset 15:0
.short SYS_CODE_SEL # selector 15:0
.byte 0 # 0 for interrupt gate
.byte 0x0e | 0x80 # (10001110)type = 386 interrupt gate, present
.short 0 # offset 31:16
.fill 1 * 8, 1, 0
IDT_END:
.set IDT_LEN, .-IDT_BASE - 1
#ASM_FUNCTION_REMOVE_IF_UNREFERENCED
|
al3xtjames/Clover
| 37,132
|
CloverEFI/CpuDxe/X64/CpuInterrupt.S
|
#------------------------------------------------------------------------------
#*
#* Copyright 2006 - 2010, Intel Corporation
#* All rights reserved. This program and the accompanying materials
#* are licensed and made available under the terms and conditions of the BSD License
#* which accompanies this distribution. The full text of the license may be found at
#* http://opensource.org/licenses/bsd-license.php
#*
#* THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
#* WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
#*
#* CpuInterrupt.S
#*
#* Abstract:
#*
#------------------------------------------------------------------------------
#PUBLIC SystemTimerHandler
#PUBLIC SystemExceptionHandler
#EXTERNDEF mExceptionCodeSize:DWORD
#EXTERN TimerHandler: NEAR
#EXTERN ExceptionHandler: NEAR
#EXTERN mTimerVector: DWORD
#.text
ASM_GLOBAL ASM_PFX(InitDescriptor)
ASM_PFX(InitDescriptor):
leaq GDT_BASE(%rip),%rax # RAX=PHYSICAL address of gdt
movq %rax, (gdtr + 2)(%rip) # Put address of gdt into the gdtr
lgdt gdtr(%rip)
movl $0x18, %eax
movl %eax, %gs
movl %eax, %fs
leaq IDT_BASE(%rip),%rax # RAX=PHYSICAL address of idt
movq %rax, (idtr + 2)(%rip) # Put address of idt into the idtr
lidt idtr(%rip)
ret
# VOID
# EFIAPI
# InstallInterruptHandler (
# UINTN Vector,
# VOID (*Handler)(VOID)
# )
ASM_GLOBAL ASM_PFX(InstallInterruptHandler)
ASM_PFX(InstallInterruptHandler):
# Vector:DWORD @ 4(%esp)
# Handler:DWORD @ 8(%esp)
push %rbx
pushfq # save eflags
cli # turn off interrupts
subq $0x10, %rsp # open some space on the stack
movq %rsp, %rbx
sidt (%rbx) # get fword address of IDT
movq 2(%rbx), %rbx # move offset of IDT into RBX
addq $0x10, %rsp # correct stack
movq %rcx, %rax # Get vector number
shlq $4, %rax # multiply by 16 to get offset
addq %rax, %rbx # add to IDT base to get entry
movq %rdx, %rax # load new address into IDT entry
movw %ax, (%rbx) # write bits 15..0 of offset
shrq $16, %rax # use ax to copy 31..16 to descriptors
movw %ax, 6(%rbx) # write bits 31..16 of offset
shrq $16, %rax # use eax to copy 63..32 to descriptors
movl %eax, 8(%rbx) # write bits 63..32 of offset
popfq # restore flags (possible enabling interrupts)
pop %rbx
ret
.macro JmpCommonIdtEntry
# jmp commonIdtEntry - this must be hand coded to keep the assembler from
# using a 8 bit reletive jump when the entries are
# within 255 bytes of the common entry. This must
# be done to maintain the consistency of the size
# of entry points...
.byte 0xe9 # jmp 16 bit reletive
.long commonIdtEntry - . - 4 # offset to jump to
.endm
.p2align 1
ASM_GLOBAL ASM_PFX(SystemExceptionHandler)
ASM_PFX(SystemExceptionHandler):
INT0:
push $0x0 # push error code place holder on the stack
push $0x0
JmpCommonIdtEntry
# db 0e9h # jmp 16 bit reletive
# dd commonIdtEntry - $ - 4 # offset to jump to
INT1:
push $0x0 # push error code place holder on the stack
push $0x1
JmpCommonIdtEntry
INT2:
push $0x0 # push error code place holder on the stack
push $0x2
JmpCommonIdtEntry
INT3:
push $0x0 # push error code place holder on the stack
push $0x3
JmpCommonIdtEntry
INT4:
push $0x0 # push error code place holder on the stack
push $0x4
JmpCommonIdtEntry
INT5:
push $0x0 # push error code place holder on the stack
push $0x5
JmpCommonIdtEntry
INT6:
push $0x0 # push error code place holder on the stack
push $0x6
JmpCommonIdtEntry
INT7:
push $0x0 # push error code place holder on the stack
push $0x7
JmpCommonIdtEntry
INT8:
# Double fault causes an error code to be pushed so no phony push necessary
nop
nop
push $0x8
JmpCommonIdtEntry
INT9:
push $0x0 # push error code place holder on the stack
push $0x9
JmpCommonIdtEntry
INT10:
# Invalid TSS causes an error code to be pushed so no phony push necessary
nop
nop
push $10
JmpCommonIdtEntry
INT11:
# Segment Not Present causes an error code to be pushed so no phony push necessary
nop
nop
push $11
JmpCommonIdtEntry
INT12:
# Stack fault causes an error code to be pushed so no phony push necessary
nop
nop
push $12
JmpCommonIdtEntry
INT13:
# GP fault causes an error code to be pushed so no phony push necessary
nop
nop
push $13
JmpCommonIdtEntry
INT14:
# Page fault causes an error code to be pushed so no phony push necessary
nop
nop
push $14
JmpCommonIdtEntry
INT15:
push $0x0 # push error code place holder on the stack
push $15
JmpCommonIdtEntry
INT16:
push $0x0 # push error code place holder on the stack
push $16
JmpCommonIdtEntry
INT17:
# Alignment check causes an error code to be pushed so no phony push necessary
nop
nop
push $17
JmpCommonIdtEntry
INT18:
push $0x0 # push error code place holder on the stack
push $18
JmpCommonIdtEntry
INT19:
push $0x0 # push error code place holder on the stack
push $19
JmpCommonIdtEntry
INTUnknown:
# The following segment repeats (32 - 20) times:
# macro .rept isn't used here because Apple GAS compiler doesn't support it.
# No. 1
push $0x0 # push error code place holder on the stack
push $20 # push vector number
# .byte 0x6a
# .byte ( . - INTUnknown(%rip) - 3 ) / 9 + 20 # vector number
JmpCommonIdtEntry
# No. 2
push $0x0 # push error code place holder on the stack
push $21 # push vector number
# .byte 0x6a
# .byte ( . - INTUnknown(%rip) - 3 ) / 9 + 20 # vector number
JmpCommonIdtEntry
# No. 3
push $0x0 # push error code place holder on the stack
push $22 # push vector number
# .byte 0x6a
# .byte ( . - INTUnknown - 3 ) / 9 + 20 # vector number
JmpCommonIdtEntry
# No. 4
push $0x0 # push error code place holder on the stack
push $23 # push vector number
# .byte 0x6a
# .byte ( . - INTUnknown - 3 ) / 9 + 20 # vector number
JmpCommonIdtEntry
# No. 5
push $0x0 # push error code place holder on the stack
push $24 # push vector number
# .byte 0x6a
# .byte ( . - INTUnknown - 3 ) / 9 + 20 # vector number
JmpCommonIdtEntry
# No. 6
push $0x0 # push error code place holder on the stack
push $25 # push vector number
# .byte 0x6a
# .byte ( . - INTUnknown - 3 ) / 9 + 20 # vector number
JmpCommonIdtEntry
# No. 7
push $0x0 # push error code place holder on the stack
push $26 # push vector number
# .byte 0x6a
# .byte ( . - INTUnknown - 3 ) / 9 + 20 # vector number
JmpCommonIdtEntry
# No. 8
push $0x0 # push error code place holder on the stack
push $27 # push vector number
# .byte 0x6a
# .byte ( . - INTUnknown - 3 ) / 9 + 20 # vector number
JmpCommonIdtEntry
# No. 9
push $0x0 # push error code place holder on the stack
push $28 # push vector number
# .byte 0x6a
# .byte ( . - INTUnknown - 3 ) / 9 + 20 # vector number
JmpCommonIdtEntry
# No. 10
push $0x0 # push error code place holder on the stack
push $29 # push vector number
# .byte 0x6a
# .byte ( . - INTUnknown - 3 ) / 9 + 20 # vector number
JmpCommonIdtEntry
# No. 11
push $0x0 # push error code place holder on the stack
push $30 # push vector number
# .byte 0x6a
# .byte ( . - INTUnknown - 3 ) / 9 + 20 # vector number
JmpCommonIdtEntry
# No. 12
push $0x0 # push error code place holder on the stack
push $31 # push vector number
# .byte 0x6a
# .byte ( . - INTUnknown - 3 ) / 9 + 20 # vector number
JmpCommonIdtEntry
ASM_GLOBAL ASM_PFX(SystemTimerHandler)
ASM_PFX(SystemTimerHandler):
push $0
push $0 #$ASM_PFX(mTimerVector) //to be patched in Cpu.c
JmpCommonIdtEntry
commonIdtEntry:
# +---------------------+
# + EFlags +
# +---------------------+
# + CS +
# +---------------------+
# + EIP +
# +---------------------+
# + Error Code +
# +---------------------+
# + Vector Number +
# +---------------------+
# + EBP +
# +---------------------+ <-- EBP
cli
push %rbp
movq %rsp,%rbp
#
# Since here the stack pointer is 16-byte aligned, so
# EFI_FX_SAVE_STATE_X64 of EFI_SYSTEM_CONTEXT_x64
# is 16-byte aligned
#
## UINT64 Rdi, Rsi, Rbp, Rsp, Rbx, Rdx, Rcx, Rax#
## UINT64 R8, R9, R10, R11, R12, R13, R14, R15#
push %r15
push %r14
push %r13
push %r12
push %r11
push %r10
push %r9
push %r8
push %rax
push %rcx
push %rdx
push %rbx
push 6*8(%rbp)
push (%rbp)
push %rsi
push %rdi
## UINT64 Gs, Fs, Es, Ds, Cs, Ss# insure high 16 bits of each is zero
movzwq 7*8(%rbp), %rax
push %rax # for ss
movzwq 4*8(%rbp), %rax
push %rax # for cs
movl %ds, %eax
push %rax
movl %es, %eax
push %rax
movl %fs, %eax
push %rax
movl %gs, %eax
push %rax
## UINT64 Rip#
push 3*8(%rbp)
## UINT64 Gdtr[2], Idtr[2]#
subq $16, %rsp
sidt (%rsp)
subq $16, %rsp
sgdt (%rsp)
## UINT64 Ldtr, Tr#
xorq %rax, %rax
str %ax
push %rax
sldt %ax
push %rax
## UINT64 RFlags#
push 5*8(%rbp)
## UINT64 Cr0, Cr1, Cr2, Cr3, Cr4, Cr8#
movq %cr8, %rax
push %rax
movq %cr4, %rax
orq $0x208, %rax
movq %rax, %cr4
push %rax
movq %cr3, %rax
push %rax
movq %cr2, %rax
push %rax
xorq %rax, %rax
push %rax
movq %cr0, %rax
push %rax
## UINT64 Dr0, Dr1, Dr2, Dr3, Dr6, Dr7#
movq %dr7, %rax
push %rax
## clear Dr7 while executing debugger itself
xorq %rax, %rax
movq %rax, %dr7
movq %dr6, %rax
push %rax
## insure all status bits in dr6 are clear...
xorq %rax, %rax
movq %rax, %dr6
movq %dr3, %rax
push %rax
movq %dr2, %rax
push %rax
movq %dr1, %rax
push %rax
movq %dr0, %rax
push %rax
## FX_SAVE_STATE_X64 FxSaveState#
subq $512, %rsp
movq %rsp, %rdi
fxsave (%rdi)
## UINT64 ExceptionData#
push 2*8 (%rbp)
## call into exception handler
## Prepare parameter and call
movq 1*8(%rbp), %rcx
movq %rsp, %rdx
#
# Per X64 calling convention, allocate maximum parameter stack space
# and make sure RSP is 16-byte aligned
#
subq $(4*8+8), %rsp
cmpq $32, %rcx
jb 1f # CallException
call ASM_PFX(TimerHandler)
jmp 2f # ExceptionDone
#CallException:
1:
call ASM_PFX(ExceptionHandler)
#ExceptionDone:
2:
addq $(4*8+8), %rsp
cli
## UINT64 ExceptionData#
addq $8, %rsp
## FX_SAVE_STATE_X64 FxSaveState#
movq %rsp, %rsi
fxrstor (%esi)
addq $512, %rsp
## UINT64 Dr0, Dr1, Dr2, Dr3, Dr6, Dr7#
pop %rax
movq %rax, %dr0
pop %rax
movq %rax, %dr1
pop %rax
movq %rax, %dr2
pop %rax
movq %rax, %dr3
## skip restore of dr6. We cleared dr6 during the context save.
addq $8, %rsp
pop %rax
movq %rax, %dr7
## UINT64 Cr0, Cr1, Cr2, Cr3, Cr4, Cr8#
pop %rax
movq %rax, %cr0
addq $8, %rsp # not for Cr1
pop %rax
movq %rax, %cr2
pop %rax
movq %rax, %cr3
pop %rax
movq %rax, %cr4
pop %rax
mov %rax, %cr8
## UINT64 RFlags#
pop 5*8(%rbp)
## UINT64 Ldtr, Tr#
## UINT64 Gdtr[2], Idtr[2]#
## Best not let anyone mess with these particular registers...
addq $48, %rsp
## UINT64 Rip#
pop 3*8(%rbp)
## UINT64 Gs, Fs, Es, Ds, Cs, Ss#
pop %rax
# mov gs, rax # not for gs
pop %rax
# mov fs, rax # not for fs
# (X64 will not use fs and gs, so we do not restore it)
pop %rax
movl %eax, %es #movq %rax, %es #Slice
pop %rax
movl %eax, %ds
pop 4*8(%rbp) # for cs
pop 7*8(%rbp) # for ss
## UINT64 Rdi, Rsi, Rbp, Rsp, Rbx, Rdx, Rcx, Rax#
## UINT64 R8, R9, R10, R11, R12, R13, R14, R15#
pop %rdi
pop %rsi
addq $8, %rsp # not for rbp
pop 6*8(%rbp) # for rsp
pop %rbx
pop %rdx
pop %rcx
pop %rax
pop %r8
pop %r9
pop %r10
pop %r11
pop %r12
pop %r13
pop %r14
pop %r15
movq %rbp, %rsp
pop %rbp
addq $16, %rsp
iretq
##############################################################################
# data
##############################################################################
.data
#gdtr: .short GDT_END - GDT_BASE - 1 # GDT limit
gdtr: .short GDT_LEN
.quad 0 #GDT_BASE # (GDT base gets set above)
##############################################################################
# global descriptor table (GDT)
##############################################################################
.p2align 4 # make GDT 16-byte align
GDT_BASE:
# null descriptor
NULL_SEL = .-GDT_BASE # Selector [0x0]
.short 0 # limit 15:0
.short 0 # base 15:0
.byte 0 # base 23:16
.byte 0 # type
.byte 0 # limit 19:16, flags
.byte 0 # base 31:24
# linear data segment descriptor
LINEAR_SEL = .-GDT_BASE # Selector [0x8]
.short 0x0FFFF # limit 0xFFFFF
.short 0 # base 0
.byte 0
.byte 0x092 # present, ring 0, data, expand-up, writable
.byte 0x0CF # page-granular, 32-bit
.byte 0
# linear code segment descriptor
LINEAR_CODE_SEL = .-GDT_BASE # Selector [0x10]
.short 0x0FFFF # limit 0xFFFFF
.short 0 # base 0
.byte 0
.byte 0x09A # present, ring 0, code, expand-up, writable
.byte 0x0CF # page-granular, 32-bit
.byte 0
# system data segment descriptor
SYS_DATA_SEL = .-GDT_BASE # Selector [0x18]
.short 0x0FFFF # limit 0xFFFFF
.short 0 # base 0
.byte 0
.byte 0x092 # present, ring 0, data, expand-up, writable
.byte 0x0CF # page-granular, 32-bit
.byte 0
# system code segment descriptor
SYS_CODE_SEL = .-GDT_BASE # Selector [0x20]
.short 0x0FFFF # limit 0xFFFFF
.short 0 # base 0
.byte 0
.byte 0x09A # present, ring 0, code, expand-up, writable
.byte 0x0CF # page-granular, 32-bit
.byte 0
# spare segment descriptor
SPARE3_SEL = .-GDT_BASE # Selector [0x28]
.short 0
.short 0
.byte 0
.byte 0
.byte 0
.byte 0
# system data segment descriptor
SYS_DATA64_SEL = .-GDT_BASE # Selector [0x30]
.short 0x0FFFF # limit 0xFFFFF
.short 0 # base 0
.byte 0
.byte 0x092 # present, ring 0, data, expand-up, writable
.byte 0x0CF # page-granular, 32-bit
.byte 0
# system code segment descriptor
SYS_CODE64_SEL = .-GDT_BASE # Selector [0x38]
.short 0x0FFFF # limit 0xFFFFF
.short 0 # base 0
.byte 0
.byte 0x09A # present, ring 0, code, expand-up, writable
.byte 0x0AF # page-granular, 64-bit
.byte 0
# spare segment descriptor
SPARE4_SEL = .-GDT_BASE # Selector [0x40]
.short 0
.short 0
.byte 0
.byte 0
.byte 0
.byte 0
GDT_END:
.set GDT_LEN, . - GDT_BASE - 1
#idtr: .short IDT_END - IDT_BASE - 1 # IDT limit
idtr: .short IDT_LEN
.quad 0 #IDT_BASE # (IDT base gets set above)
##############################################################################
# interrupt descriptor table (IDT)
#
# Note: The hardware IRQ's specified in this table are the normal PC/AT IRQ
# mappings. This implementation only uses the system timer and all other
# IRQs will remain masked. The descriptors for vectors 33+ are provided
# for convenience.
##############################################################################
.p2align 3 # make IDT 8-byte align
IDT_BASE:
# divide by zero (INT 0)
DIV_ZERO_SEL = .-IDT_BASE
.short 0 # offset 15:0
.short SYS_CODE64_SEL # selector 15:0
.byte 0 # 0 for interrupt gate
.byte 0x0e | 0x80 # type = 386 interrupt gate, present
.short 0 # offset 31:16
.long 0 # offset 63:32
.long 0 # for reserved
# debug exception (INT 1)
DEBUG_EXCEPT_SEL = .-IDT_BASE
.short 0 # offset 15:0
.short SYS_CODE64_SEL # selector 15:0
.byte 0 # 0 for interrupt gate
.byte 0x0e | 0x80 # type = 386 interrupt gate, present
.short 0 # offset 31:16
.long 0 # offset 63:32
.long 0 # for reserved
# NMI (INT 2)
NMI_SEL = .-IDT_BASE
.short 0 # offset 15:0
.short SYS_CODE64_SEL # selector 15:0
.byte 0 # 0 for interrupt gate
.byte 0x0e | 0x80 # type = 386 interrupt gate, present
.short 0 # offset 31:16
.long 0 # offset 63:32
.long 0 # for reserved
# soft breakpoint (INT 3)
BREAKPOINT_SEL = .-IDT_BASE
.short 0 # offset 15:0
.short SYS_CODE64_SEL # selector 15:0
.byte 0 # 0 for interrupt gate
.byte 0x0e | 0x80 # type = 386 interrupt gate, present
.short 0 # offset 31:16
.long 0 # offset 63:32
.long 0 # for reserved
# overflow (INT 4)
OVERFLOW_SEL = .-IDT_BASE
.short 0 # offset 15:0
.short SYS_CODE64_SEL # selector 15:0
.byte 0 # 0 for interrupt gate
.byte 0x0e | 0x80 # type = 386 interrupt gate, present
.short 0 # offset 31:16
.long 0 # offset 63:32
.long 0 # for reserved
# bounds check (INT 5)
BOUNDS_CHECK_SEL = .-IDT_BASE
.short 0 # offset 15:0
.short SYS_CODE64_SEL # selector 15:0
.byte 0 # 0 for interrupt gate
.byte 0x0e | 0x80 # type = 386 interrupt gate, present
.short 0 # offset 31:16
.long 0 # offset 63:32
.long 0 # for reserved
# invalid opcode (INT 6)
INVALID_OPCODE_SEL = .-IDT_BASE
.short 0 # offset 15:0
.short SYS_CODE64_SEL # selector 15:0
.byte 0 # 0 for interrupt gate
.byte 0x0e | 0x80 # type = 386 interrupt gate, present
.short 0 # offset 31:16
.long 0 # offset 63:32
.long 0 # for reserved
# device not available (INT 7)
DEV_NOT_AVAIL_SEL = .-IDT_BASE
.short 0 # offset 15:0
.short SYS_CODE64_SEL # selector 15:0
.byte 0 # 0 for interrupt gate
.byte 0x0e | 0x80 # type = 386 interrupt gate, present
.short 0 # offset 31:16
.long 0 # offset 63:32
.long 0 # for reserved
# double fault (INT 8)
DOUBLE_FAULT_SEL = .-IDT_BASE
.short 0 # offset 15:0
.short SYS_CODE64_SEL # selector 15:0
.byte 0 # 0 for interrupt gate
.byte 0x0e | 0x80 # type = 386 interrupt gate, present
.short 0 # offset 31:16
.long 0 # offset 63:32
.long 0 # for reserved
# Coprocessor segment overrun - reserved (INT 9)
RSVD_INTR_SEL1 = .-IDT_BASE
.short 0 # offset 15:0
.short SYS_CODE64_SEL # selector 15:0
.byte 0 # 0 for interrupt gate
.byte 0x0e | 0x80 # type = 386 interrupt gate, present
.short 0 # offset 31:16
.long 0 # offset 63:32
.long 0 # for reserved
# invalid TSS (INT 0ah)
INVALID_TSS_SEL = .-IDT_BASE
.short 0 # offset 15:0
.short SYS_CODE64_SEL # selector 15:0
.byte 0 # 0 for interrupt gate
.byte 0x0e | 0x80 # type = 386 interrupt gate, present
.short 0 # offset 31:16
.long 0 # offset 63:32
.long 0 # for reserved
# segment not present (INT 0bh)
SEG_NOT_PRESENT_SEL = .-IDT_BASE
.short 0 # offset 15:0
.short SYS_CODE64_SEL # selector 15:0
.byte 0 # 0 for interrupt gate
.byte 0x0e | 0x80 # type = 386 interrupt gate, present
.short 0 # offset 31:16
.long 0 # offset 63:32
.long 0 # for reserved
# stack fault (INT 0ch)
STACK_FAULT_SEL = .-IDT_BASE
.short 0 # offset 15:0
.short SYS_CODE64_SEL # selector 15:0
.byte 0 # 0 for interrupt gate
.byte 0x0e | 0x80 # type = 386 interrupt gate, present
.short 0 # offset 31:16
.long 0 # offset 63:32
.long 0 # for reserved
# general protection (INT 0dh)
GP_FAULT_SEL = .-IDT_BASE
.short 0 # offset 15:0
.short SYS_CODE64_SEL # selector 15:0
.byte 0 # 0 for interrupt gate
.byte 0x0e | 0x80 # type = 386 interrupt gate, present
.short 0 # offset 31:16
.long 0 # offset 63:32
.long 0 # for reserved
# page fault (INT 0eh)
PAGE_FAULT_SEL = .-IDT_BASE
.short 0 # offset 15:0
.short SYS_CODE64_SEL # selector 15:0
.byte 0 # 0 for interrupt gate
.byte 0x0e | 0x80 # type = 386 interrupt gate, present
.short 0 # offset 31:16
.long 0 # offset 63:32
.long 0 # for reserved
# Intel reserved - do not use (INT 0fh)
RSVD_INTR_SEL2 = .-IDT_BASE
.short 0 # offset 15:0
.short SYS_CODE64_SEL # selector 15:0
.byte 0 # 0 for interrupt gate
.byte 0x0e | 0x80 # type = 386 interrupt gate, present
.short 0 # offset 31:16
.long 0 # offset 63:32
.long 0 # for reserved
# floating point error (INT 0x10)
FLT_POINT_ERR_SEL = .-IDT_BASE
.short 0 # offset 15:0
.short SYS_CODE64_SEL # selector 15:0
.byte 0 # 0 for interrupt gate
.byte 0x0e | 0x80 # type = 386 interrupt gate, present
.short 0 # offset 31:16
.long 0 # offset 63:32
.long 0 # for reserved
# alignment check (INT 0x11)
ALIGNMENT_CHECK_SEL = .-IDT_BASE
.short 0 # offset 15:0
.short SYS_CODE64_SEL # selector 15:0
.byte 0 # 0 for interrupt gate
.byte 0x0e | 0x80 # (10001110)type = 386 interrupt gate, present
.short 0 # offset 31:16
.long 0 # offset 63:32
.long 0 # for reserved
# machine check (INT 0x12)
MACHINE_CHECK_SEL = .-IDT_BASE
.short 0 # offset 15:0
.short SYS_CODE64_SEL # selector 15:0
.byte 0 # 0 for interrupt gate
.byte 0x0e | 0x80 # (10001110)type = 386 interrupt gate, present
.short 0 # offset 31:16
.long 0 # offset 63:32
.long 0 # for reserved
# SIMD floating-point exception (INT 0x13)
SIMD_EXCEPTION_SEL = .-IDT_BASE
.short 0 # offset 15:0
.short SYS_CODE64_SEL # selector 15:0
.byte 0 # 0 for interrupt gate
.byte 0x0e | 0x80 # (10001110)type = 386 interrupt gate, present
.short 0 # offset 31:16
.long 0 # offset 63:32
.long 0 # for reserved
# The following segment repeats (32 - 20) times:
# macro .rept isn't used here because Apple GAS compiler doesn't support it.
# No. 1
.short 0 # offset 15:0
.short SYS_CODE64_SEL # selector 15:0
.byte 0 # 0 for interrupt gate
.byte 0x0e | 0x80 # (10001110)type = 386 interrupt gate, present
.short 0 # offset 31:16
.long 0 # offset 63:32
.long 0 # for reserved
# No. 2
.short 0 # offset 15:0
.short SYS_CODE64_SEL # selector 15:0
.byte 0 # 0 for interrupt gate
.byte 0x0e | 0x80 # (10001110)type = 386 interrupt gate, present
.short 0 # offset 31:16
.long 0 # offset 63:32
.long 0 # for reserved
# No. 3
.short 0 # offset 15:0
.short SYS_CODE64_SEL # selector 15:0
.byte 0 # 0 for interrupt gate
.byte 0x0e | 0x80 # (10001110)type = 386 interrupt gate, present
.short 0 # offset 31:16
.long 0 # offset 63:32
.long 0 # for reserved
# No. 4
.short 0 # offset 15:0
.short SYS_CODE64_SEL # selector 15:0
.byte 0 # 0 for interrupt gate
.byte 0x0e | 0x80 # (10001110)type = 386 interrupt gate, present
.short 0 # offset 31:16
.long 0 # offset 63:32
.long 0 # for reserved
# No. 5
.short 0 # offset 15:0
.short SYS_CODE64_SEL # selector 15:0
.byte 0 # 0 for interrupt gate
.byte 0x0e | 0x80 # (10001110)type = 386 interrupt gate, present
.short 0 # offset 31:16
.long 0 # offset 63:32
.long 0 # for reserved
# No. 6
.short 0 # offset 15:0
.short SYS_CODE64_SEL # selector 15:0
.byte 0 # 0 for interrupt gate
.byte 0x0e | 0x80 # (10001110)type = 386 interrupt gate, present
.short 0 # offset 31:16
.long 0 # offset 63:32
.long 0 # for reserved
# No. 7
.short 0 # offset 15:0
.short SYS_CODE64_SEL # selector 15:0
.byte 0 # 0 for interrupt gate
.byte 0x0e | 0x80 # (10001110)type = 386 interrupt gate, present
.short 0 # offset 31:16
.long 0 # offset 63:32
.long 0 # for reserved
# No. 8
.short 0 # offset 15:0
.short SYS_CODE64_SEL # selector 15:0
.byte 0 # 0 for interrupt gate
.byte 0x0e | 0x80 # (10001110)type = 386 interrupt gate, present
.short 0 # offset 31:16
.long 0 # offset 63:32
.long 0 # for reserved
# No. 9
.short 0 # offset 15:0
.short SYS_CODE64_SEL # selector 15:0
.byte 0 # 0 for interrupt gate
.byte 0x0e | 0x80 # (10001110)type = 386 interrupt gate, present
.short 0 # offset 31:16
.long 0 # offset 63:32
.long 0 # for reserved
# No. 10
.short 0 # offset 15:0
.short SYS_CODE64_SEL # selector 15:0
.byte 0 # 0 for interrupt gate
.byte 0x0e | 0x80 # (10001110)type = 386 interrupt gate, present
.short 0 # offset 31:16
.long 0 # offset 63:32
.long 0 # for reserved
# No. 11
.short 0 # offset 15:0
.short SYS_CODE64_SEL # selector 15:0
.byte 0 # 0 for interrupt gate
.byte 0x0e | 0x80 # (10001110)type = 386 interrupt gate, present
.short 0 # offset 31:16
.long 0 # offset 63:32
.long 0 # for reserved
# No. 12
.short 0 # offset 15:0
.short SYS_CODE64_SEL # selector 15:0
.byte 0 # 0 for interrupt gate
.byte 0x0e | 0x80 # (10001110)type = 386 interrupt gate, present
.short 0 # offset 31:16
.long 0 # offset 63:32
.long 0 # for reserved
# 72 unspecified descriptors
.fill 72 * 16, 1, 0
# IRQ 0 (System timer) - (INT 0x68)
IRQ0_SEL = .-IDT_BASE
.short 0 # offset 15:0
.short SYS_CODE64_SEL # selector 15:0
.byte 0 # 0 for interrupt gate
.byte 0x0e | 0x80 # (10001110)type = 386 interrupt gate, present
.short 0 # offset 31:16
.long 0 # offset 63:32
.long 0 # 0 for reserved
# IRQ 1 (8042 Keyboard controller) - (INT 0x69)
IRQ1_SEL = .-IDT_BASE
.short 0 # offset 15:0
.short SYS_CODE64_SEL # selector 15:0
.byte 0 # 0 for interrupt gate
.byte 0x0e | 0x80 # (10001110)type = 386 interrupt gate, present
.short 0 # offset 31:16
.long 0 # offset 63:32
.long 0 # for reserved
# Reserved - IRQ 2 redirect (IRQ 2) - DO NOT USE!!! - (INT 6ah)
IRQ2_SEL = .-IDT_BASE
.short 0 # offset 15:0
.short SYS_CODE64_SEL # selector 15:0
.byte 0 # 0 for interrupt gate
.byte 0x0e | 0x80 # (10001110)type = 386 interrupt gate, present
.short 0 # offset 31:16
.long 0 # offset 63:32
.long 0 # for reserved
# IRQ 3 (COM 2) - (INT 6bh)
IRQ3_SEL = .-IDT_BASE
.short 0 # offset 15:0
.short SYS_CODE64_SEL # selector 15:0
.byte 0 # 0 for interrupt gate
.byte 0x0e | 0x80 # (10001110)type = 386 interrupt gate, present
.short 0 # offset 31:16
.long 0 # offset 63:32
.long 0 # for reserved
# IRQ 4 (COM 1) - (INT 6ch)
IRQ4_SEL = .-IDT_BASE
.short 0 # offset 15:0
.short SYS_CODE64_SEL # selector 15:0
.byte 0 # 0 for interrupt gate
.byte 0x0e | 0x80 # (10001110)type = 386 interrupt gate, present
.short 0 # offset 31:16
.long 0 # offset 63:32
.long 0 # for reserved
# IRQ 5 (LPT 2) - (INT 6dh)
IRQ5_SEL = .-IDT_BASE
.short 0 # offset 15:0
.short SYS_CODE64_SEL # selector 15:0
.byte 0 # 0 for interrupt gate
.byte 0x0e | 0x80 # (10001110)type = 386 interrupt gate, present
.short 0 # offset 31:16
.long 0 # offset 63:32
.long 0 # for reserved
# IRQ 6 (Floppy controller) - (INT 6eh)
IRQ6_SEL = .-IDT_BASE
.short 0 # offset 15:0
.short SYS_CODE64_SEL # selector 15:0
.byte 0 # 0 for interrupt gate
.byte 0x0e | 0x80 # (10001110)type = 386 interrupt gate, present
.short 0 # offset 31:16
.long 0 # offset 63:32
.long 0 # for reserved
# IRQ 7 (LPT 1) - (INT 6fh)
IRQ7_SEL = .-IDT_BASE
.short 0 # offset 15:0
.short SYS_CODE64_SEL # selector 15:0
.byte 0 # 0 for interrupt gate
.byte 0x0e | 0x80 # (10001110)type = 386 interrupt gate, present
.short 0 # offset 31:16
.long 0 # offset 63:32
.long 0 # for reserved
# IRQ 8 (RTC Alarm) - (INT 0x70)
IRQ8_SEL = .-IDT_BASE
.short 0 # offset 15:0
.short SYS_CODE64_SEL # selector 15:0
.byte 0 # 0 for interrupt gate
.byte 0x0e | 0x80 # (10001110)type = 386 interrupt gate, present
.short 0 # offset 31:16
.long 0 # offset 63:32
.long 0 # for reserved
# IRQ 9 - (INT 0x71)
IRQ9_SEL = .-IDT_BASE
.short 0 # offset 15:0
.short SYS_CODE64_SEL # selector 15:0
.byte 0 # 0 for interrupt gate
.byte 0x0e | 0x80 # (10001110)type = 386 interrupt gate, present
.short 0 # offset 31:16
.long 0 # offset 63:32
.long 0 # for reserved
# IRQ 10 - (INT 0x72)
IRQ10_SEL = .-IDT_BASE
.short 0 # offset 15:0
.short SYS_CODE64_SEL # selector 15:0
.byte 0 # 0 for interrupt gate
.byte 0x0e | 0x80 # (10001110)type = 386 interrupt gate, present
.short 0 # offset 31:16
.long 0 # offset 63:32
.long 0 # for reserved
# IRQ 11 - (INT 0x73)
IRQ11_SEL = .-IDT_BASE
.short 0 # offset 15:0
.short SYS_CODE64_SEL # selector 15:0
.byte 0 # 0 for interrupt gate
.byte 0x0e | 0x80 # (10001110)type = 386 interrupt gate, present
.short 0 # offset 31:16
.long 0 # offset 63:32
.long 0 # for reserved
# IRQ 12 (PS/2 mouse) - (INT 0x74)
IRQ12_SEL = .-IDT_BASE
.short 0 # offset 15:0
.short SYS_CODE64_SEL # selector 15:0
.byte 0 # 0 for interrupt gate
.byte 0x0e | 0x80 # (10001110)type = 386 interrupt gate, present
.short 0 # offset 31:16
.long 0 # offset 63:32
.long 0 # for reserved
# IRQ 13 (Floating point error) - (INT 0x75)
IRQ13_SEL = .-IDT_BASE
.short 0 # offset 15:0
.short SYS_CODE64_SEL # selector 15:0
.byte 0 # 0 for interrupt gate
.byte 0x0e | 0x80 # (10001110)type = 386 interrupt gate, present
.short 0 # offset 31:16
.long 0 # offset 63:32
.long 0 # for reserved
# IRQ 14 (Secondary IDE) - (INT 0x76)
IRQ14_SEL = .-IDT_BASE
.short 0 # offset 15:0
.short SYS_CODE64_SEL # selector 15:0
.byte 0 # 0 for interrupt gate
.byte 0x0e | 0x80 # (10001110)type = 386 interrupt gate, present
.short 0 # offset 31:16
.long 0 # offset 63:32
.long 0 # for reserved
# IRQ 15 (Primary IDE) - (INT 0x77)
IRQ15_SEL = .-IDT_BASE
.short 0 # offset 15:0
.short SYS_CODE64_SEL # selector 15:0
.byte 0 # 0 for interrupt gate
.byte 0x0e | 0x80 # (10001110)type = 386 interrupt gate, present
.short 0 # offset 31:16
.long 0 # offset 63:32
.long 0 # for reserved
.fill 16, 1, 0
IDT_END:
.set IDT_LEN, .-IDT_BASE - 1
#ASM_FUNCTION_REMOVE_IF_UNREFERENCED
|
al3xtjames/Clover
| 15,831
|
CloverEFI/UefiCpuPkg/Library/CpuExceptionHandlerLib/Ia32/ExceptionHandlerAsm.S
|
#------------------------------------------------------------------------------
#*
#* Copyright (c) 2012 - 2013, Intel Corporation. All rights reserved.<BR>
#* This program and the accompanying materials
#* are licensed and made available under the terms and conditions of the BSD License
#* which accompanies this distribution. The full text of the license may be found at
#* http://opensource.org/licenses/bsd-license.php
#*
#* THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
#* WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
#*
#* ExceptionHandlerAsm.S
#*
#* Abstract:
#*
#* IA32 CPU Exception Handler
#
#------------------------------------------------------------------------------
#.MMX
#.XMM
ASM_GLOBAL ASM_PFX(CommonExceptionHandler)
ASM_GLOBAL ASM_PFX(CommonInterruptEntry)
ASM_GLOBAL ASM_PFX(HookAfterStubHeaderEnd)
#EXTRN ASM_PFX(mErrorCodeFlag):DWORD # Error code flags for exceptions
#EXTRN ASM_PFX(mDoFarReturnFlag):DWORD # Do far return flag
.text
#
# exception handler stub table
#
Exception0Handle:
.byte 0x6a # push #VectorNum
.byte 0
pushl %eax
.byte 0xB8
.long ASM_PFX(CommonInterruptEntry)
jmp *%eax
Exception1Handle:
.byte 0x6a # push #VectorNum
.byte 1
pushl %eax
.byte 0xB8
.long ASM_PFX(CommonInterruptEntry)
jmp *%eax
Exception2Handle:
.byte 0x6a # push #VectorNum
.byte 2
pushl %eax
.byte 0xB8
.long ASM_PFX(CommonInterruptEntry)
jmp *%eax
Exception3Handle:
.byte 0x6a # push #VectorNum
.byte 3
pushl %eax
.byte 0xB8
.long ASM_PFX(CommonInterruptEntry)
jmp *%eax
Exception4Handle:
.byte 0x6a # push #VectorNum
.byte 4
pushl %eax
.byte 0xB8
.long ASM_PFX(CommonInterruptEntry)
jmp *%eax
Exception5Handle:
.byte 0x6a # push #VectorNum
.byte 5
pushl %eax
.byte 0xB8
.long ASM_PFX(CommonInterruptEntry)
jmp *%eax
Exception6Handle:
.byte 0x6a # push #VectorNum
.byte 6
pushl %eax
.byte 0xB8
.long ASM_PFX(CommonInterruptEntry)
jmp *%eax
Exception7Handle:
.byte 0x6a # push #VectorNum
.byte 7
pushl %eax
.byte 0xB8
.long ASM_PFX(CommonInterruptEntry)
jmp *%eax
Exception8Handle:
.byte 0x6a # push #VectorNum
.byte 8
pushl %eax
.byte 0xB8
.long ASM_PFX(CommonInterruptEntry)
jmp *%eax
Exception9Handle:
.byte 0x6a # push #VectorNum
.byte 9
pushl %eax
.byte 0xB8
.long ASM_PFX(CommonInterruptEntry)
jmp *%eax
Exception10Handle:
.byte 0x6a # push #VectorNum
.byte 10
pushl %eax
.byte 0xB8
.long ASM_PFX(CommonInterruptEntry)
jmp *%eax
Exception11Handle:
.byte 0x6a # push #VectorNum
.byte 11
pushl %eax
.byte 0xB8
.long ASM_PFX(CommonInterruptEntry)
jmp *%eax
Exception12Handle:
.byte 0x6a # push #VectorNum
.byte 12
pushl %eax
.byte 0xB8
.long ASM_PFX(CommonInterruptEntry)
jmp *%eax
Exception13Handle:
.byte 0x6a # push #VectorNum
.byte 13
pushl %eax
.byte 0xB8
.long ASM_PFX(CommonInterruptEntry)
jmp *%eax
Exception14Handle:
.byte 0x6a # push #VectorNum
.byte 14
pushl %eax
.byte 0xB8
.long ASM_PFX(CommonInterruptEntry)
jmp *%eax
Exception15Handle:
.byte 0x6a # push #VectorNum
.byte 15
pushl %eax
.byte 0xB8
.long ASM_PFX(CommonInterruptEntry)
jmp *%eax
Exception16Handle:
.byte 0x6a # push #VectorNum
.byte 16
pushl %eax
.byte 0xB8
.long ASM_PFX(CommonInterruptEntry)
jmp *%eax
Exception17Handle:
.byte 0x6a # push #VectorNum
.byte 17
pushl %eax
.byte 0xB8
.long ASM_PFX(CommonInterruptEntry)
jmp *%eax
Exception18Handle:
.byte 0x6a # push #VectorNum
.byte 18
pushl %eax
.byte 0xB8
.long ASM_PFX(CommonInterruptEntry)
jmp *%eax
Exception19Handle:
.byte 0x6a # push #VectorNum
.byte 19
pushl %eax
.byte 0xB8
.long ASM_PFX(CommonInterruptEntry)
jmp *%eax
Exception20Handle:
.byte 0x6a # push #VectorNum
.byte 20
pushl %eax
.byte 0xB8
.long ASM_PFX(CommonInterruptEntry)
jmp *%eax
Exception21Handle:
.byte 0x6a # push #VectorNum
.byte 21
pushl %eax
.byte 0xB8
.long ASM_PFX(CommonInterruptEntry)
jmp *%eax
Exception22Handle:
.byte 0x6a # push #VectorNum
.byte 22
pushl %eax
.byte 0xB8
.long ASM_PFX(CommonInterruptEntry)
jmp *%eax
Exception23Handle:
.byte 0x6a # push #VectorNum
.byte 23
pushl %eax
.byte 0xB8
.long ASM_PFX(CommonInterruptEntry)
jmp *%eax
Exception24Handle:
.byte 0x6a # push #VectorNum
.byte 24
pushl %eax
.byte 0xB8
.long ASM_PFX(CommonInterruptEntry)
jmp *%eax
Exception25Handle:
.byte 0x6a # push #VectorNum
.byte 25
pushl %eax
.byte 0xB8
.long ASM_PFX(CommonInterruptEntry)
jmp *%eax
Exception26Handle:
.byte 0x6a # push #VectorNum
.byte 26
pushl %eax
.byte 0xB8
.long ASM_PFX(CommonInterruptEntry)
jmp *%eax
Exception27Handle:
.byte 0x6a # push #VectorNum
.byte 27
pushl %eax
.byte 0xB8
.long ASM_PFX(CommonInterruptEntry)
jmp *%eax
Exception28Handle:
.byte 0x6a # push #VectorNum
.byte 28
pushl %eax
.byte 0xB8
.long ASM_PFX(CommonInterruptEntry)
jmp *%eax
Exception29Handle:
.byte 0x6a # push #VectorNum
.byte 29
pushl %eax
.byte 0xB8
.long ASM_PFX(CommonInterruptEntry)
jmp *%eax
Exception30Handle:
.byte 0x6a # push #VectorNum
.byte 30
pushl %eax
.byte 0xB8
.long ASM_PFX(CommonInterruptEntry)
jmp *%eax
Exception31Handle:
.byte 0x6a # push #VectorNum
.byte 31
pushl %eax
.byte 0xB8
.long ASM_PFX(CommonInterruptEntry)
jmp *%eax
HookAfterStubBegin:
.byte 0x6a # push
VectorNum:
.byte 0 # 0 will be fixed
pushl %eax
.byte 0xB8 # movl ASM_PFX(HookAfterStubHeaderEnd), %eax
.long ASM_PFX(HookAfterStubHeaderEnd)
jmp *%eax
ASM_GLOBAL ASM_PFX(HookAfterStubHeaderEnd)
ASM_PFX(HookAfterStubHeaderEnd):
popl %eax
subl $8, %esp # reserve room for filling exception data later
pushl 8(%esp)
xchgl (%esp), %ecx # get vector number
bt %ecx, ASM_PFX(mErrorCodeFlag)
jnc NoErrorData
pushl (%esp) # addition push if exception data needed
NoErrorData:
xchg (%esp), %ecx # restore ecx
pushl %eax
#---------------------------------------;
# CommonInterruptEntry ;
#---------------------------------------;
# The follow algorithm is used for the common interrupt routine.
ASM_GLOBAL ASM_PFX(CommonInterruptEntry)
ASM_PFX(CommonInterruptEntry):
cli
popl %eax
#
# All interrupt handlers are invoked through interrupt gates, so
# IF flag automatically cleared at the entry point
#
#
# Get vector number from top of stack
#
xchgl (%esp), %ecx
andl $0x0FF, %ecx # Vector number should be less than 256
cmpl $32, %ecx # Intel reserved vector for exceptions?
jae NoErrorCode
bt %ecx, ASM_PFX(mErrorCodeFlag)
jc HasErrorCode
NoErrorCode:
#
# Stack:
# +---------------------+
# + EFlags +
# +---------------------+
# + CS +
# +---------------------+
# + EIP +
# +---------------------+
# + ECX +
# +---------------------+ <-- ESP
#
# Registers:
# ECX - Vector Number
#
#
# Put Vector Number on stack
#
pushl %ecx
#
# Put 0 (dummy) error code on stack, and restore ECX
#
xorl %ecx, %ecx # ECX = 0
xchgl 4(%esp), %ecx
jmp ErrorCodeAndVectorOnStack
HasErrorCode:
#
# Stack:
# +---------------------+
# + EFlags +
# +---------------------+
# + CS +
# +---------------------+
# + EIP +
# +---------------------+
# + Error Code +
# +---------------------+
# + ECX +
# +---------------------+ <-- ESP
#
# Registers:
# ECX - Vector Number
#
#
# Put Vector Number on stack and restore ECX
#
xchgl (%esp), %ecx
ErrorCodeAndVectorOnStack:
pushl %ebp
movl %esp, %ebp
#
# Stack:
# +---------------------+
# + EFlags +
# +---------------------+
# + CS +
# +---------------------+
# + EIP +
# +---------------------+
# + Error Code +
# +---------------------+
# + Vector Number +
# +---------------------+
# + EBP +
# +---------------------+ <-- EBP
#
#
# Align stack to make sure that EFI_FX_SAVE_STATE_IA32 of EFI_SYSTEM_CONTEXT_IA32
# is 16-byte aligned
#
andl $0x0fffffff0, %esp
subl $12, %esp
subl $8, %esp
pushl $0 # check EXCEPTION_HANDLER_CONTEXT.OldIdtHandler
pushl $0 # check EXCEPTION_HANDLER_CONTEXT.ExceptionDataFlag
#; UINT32 Edi, Esi, Ebp, Esp, Ebx, Edx, Ecx, Eax;
pushl %eax
pushl %ecx
pushl %edx
pushl %ebx
leal 24(%ebp), %ecx
pushl %ecx # ESP
pushl (%ebp) # EBP
pushl %esi
pushl %edi
#; UINT32 Gs, Fs, Es, Ds, Cs, Ss;
movl %ss, %eax
pushl %eax
movzwl 16(%ebp), %eax
pushl %eax
movl %ds, %eax
pushl %eax
movl %es, %eax
pushl %eax
movl %fs, %eax
pushl %eax
movl %gs, %eax
pushl %eax
#; UINT32 Eip;
movl 12(%ebp), %eax
pushl %eax
#; UINT32 Gdtr[2], Idtr[2];
subl $8, %esp
sidt (%esp)
movl 2(%esp), %eax
xchgl (%esp), %eax
andl $0x0FFFF, %eax
movl %eax, 4(%esp)
subl $8, %esp
sgdt (%esp)
movl 2(%esp), %eax
xchgl (%esp), %eax
andl $0x0FFFF, %eax
movl %eax, 4(%esp)
#; UINT32 Ldtr, Tr;
xorl %eax, %eax
str %ax
pushl %eax
sldt %ax
pushl %eax
#; UINT32 EFlags;
movl 20(%ebp), %eax
pushl %eax
#; UINT32 Cr0, Cr1, Cr2, Cr3, Cr4;
movl %cr4, %eax
orl $0x208, %eax
movl %eax, %cr4
pushl %eax
movl %cr3, %eax
pushl %eax
movl %cr2, %eax
pushl %eax
xorl %eax, %eax
pushl %eax
movl %cr0, %eax
pushl %eax
#; UINT32 Dr0, Dr1, Dr2, Dr3, Dr6, Dr7;
movl %dr7, %eax
pushl %eax
movl %dr6, %eax
pushl %eax
movl %dr3, %eax
pushl %eax
movl %dr2, %eax
pushl %eax
movl %dr1, %eax
pushl %eax
movl %dr0, %eax
pushl %eax
#; FX_SAVE_STATE_IA32 FxSaveState;
subl $512, %esp
movl %esp, %edi
.byte 0x0f, 0x0ae, 0x07 #fxsave [edi]
#; UEFI calling convention for IA32 requires that Direction flag in EFLAGs is clear
cld
#; UINT32 ExceptionData;
pushl 8(%ebp)
#; Prepare parameter and call
movl %esp, %edx
pushl %edx
movl 4(%ebp), %edx
pushl %edx
#
# Call External Exception Handler
#
call ASM_PFX(CommonExceptionHandler)
addl $8, %esp
cli
#; UINT32 ExceptionData;
addl $4, %esp
#; FX_SAVE_STATE_IA32 FxSaveState;
movl %esp, %esi
.byte 0x0f, 0x0ae, 0x0e # fxrstor [esi]
addl $512, %esp
#; UINT32 Dr0, Dr1, Dr2, Dr3, Dr6, Dr7;
#; Skip restoration of DRx registers to support in-circuit emualators
#; or debuggers set breakpoint in interrupt/exception context
addl $24, %esp
#; UINT32 Cr0, Cr1, Cr2, Cr3, Cr4;
popl %eax
movl %eax, %cr0
addl $4, %esp # not for Cr1
popl %eax
movl %eax, %cr2
popl %eax
movl %eax, %cr3
popl %eax
movl %eax, %cr4
#; UINT32 EFlags;
popl 20(%ebp)
#; UINT32 Ldtr, Tr;
#; UINT32 Gdtr[2], Idtr[2];
#; Best not let anyone mess with these particular registers...
addl $24, %esp
#; UINT32 Eip;
popl 12(%ebp)
#; UINT32 Gs, Fs, Es, Ds, Cs, Ss;
#; NOTE - modified segment registers could hang the debugger... We
#; could attempt to insulate ourselves against this possibility,
#; but that poses risks as well.
#;
popl %gs
popl %fs
popl %es
popl %ds
popl 16(%ebp)
popl %ss
#; UINT32 Edi, Esi, Ebp, Esp, Ebx, Edx, Ecx, Eax;
popl %edi
popl %esi
addl $4, %esp # not for ebp
addl $4, %esp # not for esp
popl %ebx
popl %edx
popl %ecx
popl %eax
popl -8(%ebp)
popl -4(%ebp)
movl %ebp, %esp
popl %ebp
addl $8, %esp
cmpl $0, -16(%esp) # check EXCEPTION_HANDLER_CONTEXT.OldIdtHandler
jz DoReturn
cmpl $1, -20(%esp) # check EXCEPTION_HANDLER_CONTEXT.ExceptionDataFlag
jz ErrorCode
jmp *-16(%esp)
ErrorCode:
subl $4, %esp
jmp *-12(%esp)
DoReturn:
cmpl $0, ASM_PFX(mDoFarReturnFlag)
jz DoIret
pushl 8(%esp) # save EFLAGS
addl $16, %esp
pushl -8(%esp) # save CS in new location
pushl -8(%esp) # save EIP in new location
pushl -8(%esp) # save EFLAGS in new location
popfl # restore EFLAGS
retf # far return
DoIret:
iretl
#---------------------------------------;
# _AsmGetTemplateAddressMap ;
#---------------------------------------;
#
# Protocol prototype
# AsmGetTemplateAddressMap (
# EXCEPTION_HANDLER_TEMPLATE_MAP *AddressMap
# );
#
# Routine Description:
#
# Return address map of interrupt handler template so that C code can generate
# interrupt table.
#
# Arguments:
#
#
# Returns:
#
# Nothing
#
#
# Input: [ebp][0] = Original ebp
# [ebp][4] = Return address
#
# Output: Nothing
#
# Destroys: Nothing
#-----------------------------------------------------------------------------;
#-------------------------------------------------------------------------------------
# AsmGetAddressMap (&AddressMap);
#-------------------------------------------------------------------------------------
ASM_GLOBAL ASM_PFX(AsmGetTemplateAddressMap)
ASM_PFX(AsmGetTemplateAddressMap):
pushl %ebp
movl %esp,%ebp
pushal
movl 0x8(%ebp), %ebx
movl $Exception0Handle, (%ebx)
movl $(Exception1Handle - Exception0Handle), 0x4(%ebx)
movl $(HookAfterStubBegin), 0x8(%ebx)
popal
popl %ebp
ret
#-------------------------------------------------------------------------------------
# AsmVectorNumFixup (*VectorBase, VectorNum, HookStub);
#-------------------------------------------------------------------------------------
ASM_GLOBAL ASM_PFX(AsmVectorNumFixup)
ASM_PFX(AsmVectorNumFixup):
movl 8(%esp), %eax
movl 4(%esp), %ecx
movb %al, (VectorNum - HookAfterStubBegin)(%ecx)
ret
|
al3xtjames/Clover
| 16,973
|
CloverEFI/UefiCpuPkg/Library/CpuExceptionHandlerLib/X64/ExceptionHandlerAsm.S
|
#------------------------------------------------------------------------------ ;
# Copyright (c) 2012 - 2014, Intel Corporation. All rights reserved.<BR>
# This program and the accompanying materials
# are licensed and made available under the terms and conditions of the BSD License
# which accompanies this distribution. The full text of the license may be found at
# http://opensource.org/licenses/bsd-license.php.
#
# THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
# WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
#
# Module Name:
#
# ExceptionHandlerAsm.S
#
# Abstract:
#
# x64 CPU Exception Handler
#
# Notes:
#
#------------------------------------------------------------------------------
ASM_GLOBAL ASM_PFX(CommonExceptionHandler)
#ASM_GLOBAL ASM_PFX(CommonInterruptEntry)
#ASM_GLOBAL ASM_PFX(HookAfterStubHeaderEnd)
#EXTRN ASM_PFX(mErrorCodeFlag):DWORD # Error code flags for exceptions
#EXTRN ASM_PFX(mDoFarReturnFlag):QWORD # Do far return flag
.text
.align 3
#
# exception handler stub table
#
Exception0Handle:
.byte 0x6a # push #VectorNum
.byte 0
pushq %rax
.byte 0x48, 0xB8
.quad 0 #ASM_PFX(CommonInterruptEntry)
jmp *%rax
Exception1Handle:
.byte 0x6a # push #VectorNum
.byte 1
pushq %rax
.byte 0x48, 0xB8
.quad 0 #ASM_PFX(CommonInterruptEntry)
jmp *%rax
Exception2Handle:
.byte 0x6a # push #VectorNum
.byte 2
pushq %rax
.byte 0x48, 0xB8
.quad 0 #ASM_PFX(CommonInterruptEntry)
jmp *%rax
Exception3Handle:
.byte 0x6a # push #VectorNum
.byte 3
pushq %rax
.byte 0x48, 0xB8
.quad 0 #ASM_PFX(CommonInterruptEntry)
jmp *%rax
Exception4Handle:
.byte 0x6a # push #VectorNum
.byte 4
pushq %rax
.byte 0x48, 0xB8
.quad 0 #ASM_PFX(CommonInterruptEntry)
jmp *%rax
Exception5Handle:
.byte 0x6a # push #VectorNum
.byte 5
pushq %rax
.byte 0x48, 0xB8
.quad 0 #ASM_PFX(CommonInterruptEntry)
jmp *%rax
Exception6Handle:
.byte 0x6a # push #VectorNum
.byte 6
pushq %rax
.byte 0x48, 0xB8
.quad 0 #ASM_PFX(CommonInterruptEntry)
jmp *%rax
Exception7Handle:
.byte 0x6a # push #VectorNum
.byte 7
pushq %rax
.byte 0x48, 0xB8
.quad 0 #ASM_PFX(CommonInterruptEntry)
jmp *%rax
Exception8Handle:
.byte 0x6a # push #VectorNum
.byte 8
pushq %rax
.byte 0x48, 0xB8
.quad 0 #ASM_PFX(CommonInterruptEntry)
jmp *%rax
Exception9Handle:
.byte 0x6a # push #VectorNum
.byte 9
pushq %rax
.byte 0x48, 0xB8
.quad 0 #ASM_PFX(CommonInterruptEntry)
jmp *%rax
Exception10Handle:
.byte 0x6a # push #VectorNum
.byte 10
pushq %rax
.byte 0x48, 0xB8
.quad 0 #ASM_PFX(CommonInterruptEntry)
jmp *%rax
Exception11Handle:
.byte 0x6a # push #VectorNum
.byte 11
pushq %rax
.byte 0x48, 0xB8
.quad 0 #ASM_PFX(CommonInterruptEntry)
jmp *%rax
Exception12Handle:
.byte 0x6a # push #VectorNum
.byte 12
pushq %rax
.byte 0x48, 0xB8
.quad 0 #ASM_PFX(CommonInterruptEntry)
jmp *%rax
Exception13Handle:
.byte 0x6a # push #VectorNum
.byte 13
pushq %rax
.byte 0x48, 0xB8
.quad 0 #ASM_PFX(CommonInterruptEntry)
jmp *%rax
Exception14Handle:
.byte 0x6a # push #VectorNum
.byte 14
pushq %rax
.byte 0x48, 0xB8
.quad 0 #ASM_PFX(CommonInterruptEntry)
jmp *%rax
Exception15Handle:
.byte 0x6a # push #VectorNum
.byte 15
pushq %rax
.byte 0x48, 0xB8
.quad 0 #ASM_PFX(CommonInterruptEntry)
jmp *%rax
Exception16Handle:
.byte 0x6a # push #VectorNum
.byte 16
pushq %rax
.byte 0x48, 0xB8
.quad 0 #ASM_PFX(CommonInterruptEntry)
jmp *%rax
Exception17Handle:
.byte 0x6a # push #VectorNum
.byte 17
pushq %rax
.byte 0x48, 0xB8
.quad 0 #ASM_PFX(CommonInterruptEntry)
jmp *%rax
Exception18Handle:
.byte 0x6a # push #VectorNum
.byte 18
pushq %rax
.byte 0x48, 0xB8
.quad 0 #ASM_PFX(CommonInterruptEntry)
jmp *%rax
Exception19Handle:
.byte 0x6a # push #VectorNum
.byte 19
pushq %rax
.byte 0x48, 0xB8
.quad 0 #ASM_PFX(CommonInterruptEntry)
jmp *%rax
Exception20Handle:
.byte 0x6a # push #VectorNum
.byte 20
pushq %rax
.byte 0x48, 0xB8
.quad 0 #ASM_PFX(CommonInterruptEntry)
jmp *%rax
Exception21Handle:
.byte 0x6a # push #VectorNum
.byte 21
pushq %rax
.byte 0x48, 0xB8
.quad 0 #ASM_PFX(CommonInterruptEntry)
jmp *%rax
Exception22Handle:
.byte 0x6a # push #VectorNum
.byte 22
pushq %rax
.byte 0x48, 0xB8
.quad 0 #ASM_PFX(CommonInterruptEntry)
jmp *%rax
Exception23Handle:
.byte 0x6a # push #VectorNum
.byte 23
pushq %rax
.byte 0x48, 0xB8
.quad 0 #ASM_PFX(CommonInterruptEntry)
jmp *%rax
Exception24Handle:
.byte 0x6a # push #VectorNum
.byte 24
pushq %rax
.byte 0x48, 0xB8
.quad 0 #ASM_PFX(CommonInterruptEntry)
jmp *%rax
Exception25Handle:
.byte 0x6a # push #VectorNum
.byte 25
pushq %rax
.byte 0x48, 0xB8
.quad 0 #ASM_PFX(CommonInterruptEntry)
jmp *%rax
Exception26Handle:
.byte 0x6a # push #VectorNum
.byte 26
pushq %rax
.byte 0x48, 0xB8
.quad 0 #ASM_PFX(CommonInterruptEntry)
jmp *%rax
Exception27Handle:
.byte 0x6a # push #VectorNum
.byte 27
pushq %rax
.byte 0x48, 0xB8
.quad 0 #ASM_PFX(CommonInterruptEntry)
jmp *%rax
Exception28Handle:
.byte 0x6a # push #VectorNum
.byte 28
pushq %rax
.byte 0x48, 0xB8
.quad 0 #ASM_PFX(CommonInterruptEntry)
jmp *%rax
Exception29Handle:
.byte 0x6a # push #VectorNum
.byte 29
pushq %rax
.byte 0x48, 0xB8
.quad 0 #ASM_PFX(CommonInterruptEntry)
jmp *%rax
Exception30Handle:
.byte 0x6a # push #VectorNum
.byte 30
pushq %rax
.byte 0x48, 0xB8
.quad 0 #ASM_PFX(CommonInterruptEntry)
jmp *%rax
Exception31Handle:
.byte 0x6a # push #VectorNum
.byte 31
pushq %rax
.byte 0x48, 0xB8
.quad 0 #ASM_PFX(CommonInterruptEntry)
jmp *%rax
HookAfterStubHeaderBegin:
.byte 0x6a # push
#VectorNum:
PatchVectorNum:
.byte 0 # 0 will be fixed
pushq %rax
.byte 0x48, 0xB8 # movq ASM_PFX(HookAfterStubHeaderEnd), %rax
# .quad ASM_PFX(HookAfterStubHeaderEnd)
PatchFuncAddress:
.quad 0
jmp *%rax
ASM_GLOBAL ASM_PFX(HookAfterStubHeaderEnd)
ASM_PFX(HookAfterStubHeaderEnd):
movq %rsp, %rax
andl $0x0fffffff0, %esp # make sure 16-byte aligned for exception context
subq $0x18, %rsp # reserve room for filling exception data later
pushq %rcx
movq 8(%rax), %rcx
# pushq %rax
# movabsl ASM_PFX(mErrorCodeFlag), %eax
# bt %ecx, %eax
# popq %rax
bt %ecx, ASM_PFX(mErrorCodeFlag)(%rip)
jnc NoErrorData
pushq (%rsp) # push additional rcx to make stack alignment
NoErrorData:
xchgq (%rsp), %rcx # restore rcx, save Exception Number in stack
pushq (%rax) # push rax into stack to keep code consistence
#---------------------------------------;
# CommonInterruptEntry ;
#---------------------------------------;
# The follow algorithm is used for the common interrupt routine.
ASM_GLOBAL ASM_PFX(CommonInterruptEntry)
ASM_PFX(CommonInterruptEntry):
cli
popq %rax
#
# All interrupt handlers are invoked through interrupt gates, so
# IF flag automatically cleared at the entry point
#
#
# Calculate vector number
#
xchgq (%rsp), %rcx # get the return address of call, actually, it is the address of vector number.
andq $0x0FF, %rcx
cmp $32, %ecx # Intel reserved vector for exceptions?
jae NoErrorCode
pushq %rax
# movabsl ASM_PFX(mErrorCodeFlag), %eax
movl ASM_PFX(mErrorCodeFlag)(%rip), %eax
bt %ecx, %eax
popq %rax
jc CommonInterruptEntry_al_0000
NoErrorCode:
#
# Push a dummy error code on the stack
# to maintain coherent stack map
#
pushq (%rsp)
movq $0, 8(%rsp)
CommonInterruptEntry_al_0000:
pushq %rbp
movq %rsp, %rbp
pushq $0 # check EXCEPTION_HANDLER_CONTEXT.OldIdtHandler
pushq $0 # check EXCEPTION_HANDLER_CONTEXT.ExceptionDataFlag
#
# Stack:
# +---------------------+ <-- 16-byte aligned ensured by processor
# + Old SS +
# +---------------------+
# + Old RSP +
# +---------------------+
# + RFlags +
# +---------------------+
# + CS +
# +---------------------+
# + RIP +
# +---------------------+
# + Error Code +
# +---------------------+
# + RCX / Vector Number +
# +---------------------+
# + RBP +
# +---------------------+ <-- RBP, 16-byte aligned
#
#
# Since here the stack pointer is 16-byte aligned, so
# EFI_FX_SAVE_STATE_X64 of EFI_SYSTEM_CONTEXT_x64
# is 16-byte aligned
#
#; UINT64 Rdi, Rsi, Rbp, Rsp, Rbx, Rdx, Rcx, Rax;
#; UINT64 R8, R9, R10, R11, R12, R13, R14, R15;
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %r11
pushq %r10
pushq %r9
pushq %r8
pushq %rax
pushq 8(%rbp) # RCX
pushq %rdx
pushq %rbx
pushq 48(%rbp) # RSP
pushq (%rbp) # RBP
pushq %rsi
pushq %rdi
#; UINT64 Gs, Fs, Es, Ds, Cs, Ss; insure high 16 bits of each is zero
movzwq 56(%rbp), %rax
pushq %rax # for ss
movzwq 32(%rbp), %rax
pushq %rax # for cs
movl %ds, %eax
pushq %rax
movl %es, %eax
pushq %rax
movl %fs, %eax
pushq %rax
movl %gs, %eax
pushq %rax
movq %rcx, 8(%rbp) # save vector number
#; UINT64 Rip;
pushq 24(%rbp)
#; UINT64 Gdtr[2], Idtr[2];
xorq %rax, %rax
pushq %rax
pushq %rax
sidt (%rsp)
xchgq 2(%rsp), %rax
xchgq (%rsp), %rax
xchgq 8(%rsp), %rax
xorq %rax, %rax
pushq %rax
pushq %rax
sgdt (%rsp)
xchgq 2(%rsp), %rax
xchgq (%rsp), %rax
xchgq 8(%rsp), %rax
#; UINT64 Ldtr, Tr;
xorq %rax, %rax
str %ax
pushq %rax
sldt %ax
pushq %rax
#; UINT64 RFlags;
pushq 40(%rbp)
#; UINT64 Cr0, Cr1, Cr2, Cr3, Cr4, Cr8;
movq %cr8, %rax
pushq %rax
movq %cr4, %rax
orq $0x208, %rax
movq %rax, %cr4
pushq %rax
mov %cr3, %rax
pushq %rax
mov %cr2, %rax
pushq %rax
xorq %rax, %rax
pushq %rax
mov %cr0, %rax
pushq %rax
#; UINT64 Dr0, Dr1, Dr2, Dr3, Dr6, Dr7;
movq %dr7, %rax
pushq %rax
movq %dr6, %rax
pushq %rax
movq %dr3, %rax
pushq %rax
movq %dr2, %rax
pushq %rax
movq %dr1, %rax
pushq %rax
movq %dr0, %rax
pushq %rax
#; FX_SAVE_STATE_X64 FxSaveState;
subq $512, %rsp
movq %rsp, %rdi
.byte 0x0f, 0x0ae, 0x07 #fxsave [rdi]
#; UEFI calling convention for x64 requires that Direction flag in EFLAGs is clear
cld
#; UINT32 ExceptionData;
pushq 16(%rbp)
#; Prepare parameter and call
mov 8(%rbp), %rcx
mov %rsp, %rdx
#
# Per X64 calling convention, allocate maximum parameter stack space
# and make sure RSP is 16-byte aligned
#
subq $40, %rsp
call ASM_PFX(CommonExceptionHandler)
addq $40, %rsp
cli
#; UINT64 ExceptionData;
addq $8, %rsp
#; FX_SAVE_STATE_X64 FxSaveState;
movq %rsp, %rsi
.byte 0x0f, 0x0ae, 0x0E # fxrstor [rsi]
addq $512, %rsp
#; UINT64 Dr0, Dr1, Dr2, Dr3, Dr6, Dr7;
#; Skip restoration of DRx registers to support in-circuit emualators
#; or debuggers set breakpoint in interrupt/exception context
addq $48, %rsp
#; UINT64 Cr0, Cr1, Cr2, Cr3, Cr4, Cr8;
popq %rax
movq %rax, %cr0
addq $8, %rsp # not for Cr1
popq %rax
movq %rax, %cr2
popq %rax
movq %rax, %cr3
popq %rax
movq %rax, %cr4
popq %rax
movq %rax, %cr8
#; UINT64 RFlags;
popq 40(%rbp)
#; UINT64 Ldtr, Tr;
#; UINT64 Gdtr[2], Idtr[2];
#; Best not let anyone mess with these particular registers...
addq $48, %rsp
#; UINT64 Rip;
popq 24(%rbp)
#; UINT64 Gs, Fs, Es, Ds, Cs, Ss;
popq %rax
# mov %rax, %gs ; not for gs
popq %rax
# mov %rax, %fs ; not for fs
# (X64 will not use fs and gs, so we do not restore it)
popq %rax
movl %eax, %es
popq %rax
movl %eax, %ds
popq 32(%rbp) # for cs
popq 56(%rbp) # for ss
#; UINT64 Rdi, Rsi, Rbp, Rsp, Rbx, Rdx, Rcx, Rax;
#; UINT64 R8, R9, R10, R11, R12, R13, R14, R15;
popq %rdi
popq %rsi
addq $8, %rsp # not for rbp
popq 48(%rbp) # for rsp
popq %rbx
popq %rdx
popq %rcx
popq %rax
popq %r8
popq %r9
popq %r10
popq %r11
popq %r12
popq %r13
popq %r14
popq %r15
movq %rbp, %rsp
popq %rbp
addq $16, %rsp
cmpq $0, -32(%rsp) # check EXCEPTION_HANDLER_CONTEXT.OldIdtHandler
jz DoReturn # check EXCEPTION_HANDLER_CONTEXT.ExceptionDataFlag
cmpb $1, -40(%rsp)
jz ErrorCode
jmp *-32(%rsp)
ErrorCode:
subq $8, %rsp
jmp *-24(%rsp)
DoReturn:
pushq %rax
# movabsq ASM_PFX(mDoFarReturnFlag), %rax
movq ASM_PFX(mDoFarReturnFlag)(%rip), %rax
cmpq $0, %rax # Check if need to do far return instead of IRET
popq %rax
jz DoIret
pushq %rax
movq %rsp, %rax # save old RSP to rax
movq 0x20(%rsp), %rsp
pushq 0x10(%rax) # save CS in new location
pushq 0x8(%rax) # save EIP in new location
pushq 0x18(%rax) # save EFLAGS in new location
movq (%rax), %rax # restore rax
popfq # restore EFLAGS
# .byte 0x48 # prefix to composite "retq" with next "retf"
# lretq #retf # far return
.byte 0x48 # prefix to composite "retq" with next "retf"
#ifdef __APPLE__
.byte 0xCB
#else
retf # far return
#endif
DoIret:
iretq
#-------------------------------------------------------------------------------------
# AsmGetTemplateAddressMap (&AddressMap);
#-------------------------------------------------------------------------------------
# comments here for definition of address map
ASM_GLOBAL ASM_PFX(AsmGetTemplateAddressMap)
ASM_PFX(AsmGetTemplateAddressMap):
# movabsq $Exception0Handle, %rax
# movq %rax, (%rcx)
# movq $(Exception1Handle - Exception0Handle), 0x08(%rcx)
# movabsq $HookAfterStubHeaderBegin, %rax
# movq %rax, 0x10(%rcx)
# ret
leaq Exception0Handle(%rip), %rax
movq %rax, (%rcx)
movq $(Exception1Handle - Exception0Handle), 0x08(%rcx)
leaq HookAfterStubHeaderBegin(%rip), %rax
movq %rax, 0x10(%rcx)
ret
#-------------------------------------------------------------------------------------
# VOID
# EFIAPI
# AsmVectorNumFixup (
# IN VOID *VectorBase, // RCX
# IN UINT8 VectorNum, // RDX
# IN BOOLEAN HookStub // R8
# );
#-------------------------------------------------------------------------------------
ASM_GLOBAL ASM_PFX(AsmVectorNumFixup)
ASM_PFX(AsmVectorNumFixup):
# movq %rdx, %rax
# movb %al, (VectorNum - HookAfterStubHeaderBegin)(%rcx)
# ret
pushq %rbp
movq %rsp, %rbp
# Patch vector #
movb %dl, (PatchVectorNum - HookAfterStubHeaderBegin)(%rcx)
# Patch Function address
leaq ASM_PFX(HookAfterStubHeaderEnd)(%rip), %rax
leaq ASM_PFX(CommonInterruptEntry)(%rip), %r10
testb %r8b, %r8b
cmovneq %rax, %r10
movq %r10, (PatchFuncAddress - HookAfterStubHeaderBegin)(%rcx)
popq %rbp
ret
#END
|
al3xtjames/Clover
| 2,066
|
CloverEFI/UefiCpuPkg/Library/BaseUefiCpuLib/Ia32/InitializeFpu.S
|
#------------------------------------------------------------------------------
#*
#* Copyright (c) 2009 - 2010, Intel Corporation. All rights reserved.<BR>
#* This program and the accompanying materials
#* are licensed and made available under the terms and conditions of the BSD License
#* which accompanies this distribution. The full text of the license may be found at
#* http://opensource.org/licenses/bsd-license.php
#*
#* THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
#* WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
#*
#*
#------------------------------------------------------------------------------
#
# Float control word initial value:
# all exceptions masked, double-precision, round-to-nearest
#
ASM_PFX(mFpuControlWord): .word 0x027F
#
# Multimedia-extensions control word:
# all exceptions masked, round-to-nearest, flush to zero for masked underflow
#
ASM_PFX(mMmxControlWord): .long 0x01F80
#
# Initializes floating point units for requirement of UEFI specification.
#
# This function initializes floating-point control word to 0x027F (all exceptions
# masked,double-precision, round-to-nearest) and multimedia-extensions control word
# (if supported) to 0x1F80 (all exceptions masked, round-to-nearest, flush to zero
# for masked underflow).
#
ASM_GLOBAL ASM_PFX(InitializeFloatingPointUnits)
ASM_PFX(InitializeFloatingPointUnits):
pushl %ebx
#
# Initialize floating point units
#
finit
fldcw ASM_PFX(mFpuControlWord)
#
# Use CpuId instructuion (CPUID.01H:EDX.SSE[bit 25] = 1) to test
# whether the processor supports SSE instruction.
#
movl $1, %eax
cpuid
btl $25, %edx
jnc Done
#
# Set OSFXSR bit 9 in CR4
#
movl %cr4, %eax
or $0x200, %eax
movl %eax, %cr4
#
# The processor should support SSE instruction and we can use
# ldmxcsr instruction
#
ldmxcsr ASM_PFX(mMmxControlWord)
Done:
popl %ebx
ret
#END
|
al3xtjames/Clover
| 1,746
|
CloverEFI/UefiCpuPkg/Library/BaseUefiCpuLib/X64/InitializeFpu.S
|
#------------------------------------------------------------------------------
#*
#* Copyright (c) 2009 - 2010, Intel Corporation. All rights reserved.<BR>
#* This program and the accompanying materials
#* are licensed and made available under the terms and conditions of the BSD License
#* which accompanies this distribution. The full text of the license may be found at
#* http://opensource.org/licenses/bsd-license.php
#*
#* THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
#* WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
#*
#*
#------------------------------------------------------------------------------
#
# Initializes floating point units for requirement of UEFI specification.
#
# This function initializes floating-point control word to 0x027F (all exceptions
# masked,double-precision, round-to-nearest) and multimedia-extensions control word
# (if supported) to 0x1F80 (all exceptions masked, round-to-nearest, flush to zero
# for masked underflow).
#
ASM_GLOBAL ASM_PFX(InitializeFloatingPointUnits)
ASM_PFX(InitializeFloatingPointUnits):
#
# Initialize floating point units
#
finit
#
# Float control word initial value:
# all exceptions masked, double-precision, round-to-nearest
#
pushq $0x027F
lea (%rsp), %rax
fldcw (%rax)
popq %rax
#
# Set OSFXSR bit 9 in CR4
#
movq %cr4, %rax
or $0x200, %rax
movq %rax, %cr4
#
# Multimedia-extensions control word:
# all exceptions masked, round-to-nearest, flush to zero for masked underflow
#
pushq $0x01F80
lea (%rsp), %rax
ldmxcsr (%rax)
popq %rax
ret
|
al3xtjames/Clover
| 8,600
|
CloverEFI/UefiCpuPkg/CpuDxe/Ia32/CpuAsm.S
|
#------------------------------------------------------------------------------
#*
#* Copyright (c) 2006 - 2012, Intel Corporation. All rights reserved.<BR>
#* This program and the accompanying materials
#* are licensed and made available under the terms and conditions of the BSD License
#* which accompanies this distribution. The full text of the license may be found at
#* http://opensource.org/licenses/bsd-license.php
#*
#* THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
#* WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
#*
#* CpuAsm.S
#*
#* Abstract:
#*
#------------------------------------------------------------------------------
#.MMX
#.XMM
#EXTRN ASM_PFX(mErrorCodeFlag):DWORD # Error code flags for exceptions
#
# point to the external interrupt vector table
#
ExternalVectorTablePtr:
.byte 0, 0, 0, 0
ASM_GLOBAL ASM_PFX(InitializeExternalVectorTablePtr)
ASM_PFX(InitializeExternalVectorTablePtr):
movl 4(%esp), %eax
movl %eax, ExternalVectorTablePtr
ret
#------------------------------------------------------------------------------
# VOID
# SetCodeSelector (
# UINT16 Selector
# );
#------------------------------------------------------------------------------
ASM_GLOBAL ASM_PFX(SetCodeSelector)
ASM_PFX(SetCodeSelector):
movl 4(%esp), %ecx
subl $0x10, %esp
leal setCodeSelectorLongJump, %eax
movl %eax, (%esp)
movw %cx, 4(%esp)
.byte 0xFF, 0x2C, 0x24 # jmp *(%esp) note:(FWORD jmp)
setCodeSelectorLongJump:
addl $0x10, %esp
ret
#------------------------------------------------------------------------------
# VOID
# SetDataSelectors (
# UINT16 Selector
# );
#------------------------------------------------------------------------------
ASM_GLOBAL ASM_PFX(SetDataSelectors)
ASM_PFX(SetDataSelectors):
movl 4(%esp), %ecx
movw %cx, %ss
movw %cx, %ds
movw %cx, %es
movw %cx, %fs
movw %cx, %gs
ret
#---------------------------------------;
# CommonInterruptEntry ;
#---------------------------------------;
# The follow algorithm is used for the common interrupt routine.
ASM_GLOBAL ASM_PFX(CommonInterruptEntry)
ASM_PFX(CommonInterruptEntry):
cli
#
# All interrupt handlers are invoked through interrupt gates, so
# IF flag automatically cleared at the entry point
#
#
# Calculate vector number
#
# Get the return address of call, actually, it is the
# address of vector number.
#
xchgl (%esp), %ecx
movw (%ecx), %cx
andl $0x0FFFF, %ecx
cmpl $32, %ecx # Intel reserved vector for exceptions?
jae NoErrorCode
bt %ecx, ASM_PFX(mErrorCodeFlag)
jc HasErrorCode
NoErrorCode:
#
# Stack:
# +---------------------+
# + EFlags +
# +---------------------+
# + CS +
# +---------------------+
# + EIP +
# +---------------------+
# + ECX +
# +---------------------+ <-- ESP
#
# Registers:
# ECX - Vector Number
#
#
# Put Vector Number on stack
#
pushl %ecx
#
# Put 0 (dummy) error code on stack, and restore ECX
#
xorl %ecx, %ecx # ECX = 0
xchgl 4(%esp), %ecx
jmp ErrorCodeAndVectorOnStack
HasErrorCode:
#
# Stack:
# +---------------------+
# + EFlags +
# +---------------------+
# + CS +
# +---------------------+
# + EIP +
# +---------------------+
# + Error Code +
# +---------------------+
# + ECX +
# +---------------------+ <-- ESP
#
# Registers:
# ECX - Vector Number
#
#
# Put Vector Number on stack and restore ECX
#
xchgl (%esp), %ecx
ErrorCodeAndVectorOnStack:
pushl %ebp
movl %esp, %ebp
#
# Stack:
# +---------------------+
# + EFlags +
# +---------------------+
# + CS +
# +---------------------+
# + EIP +
# +---------------------+
# + Error Code +
# +---------------------+
# + Vector Number +
# +---------------------+
# + EBP +
# +---------------------+ <-- EBP
#
#
# Align stack to make sure that EFI_FX_SAVE_STATE_IA32 of EFI_SYSTEM_CONTEXT_IA32
# is 16-byte aligned
#
andl $0x0fffffff0, %esp
subl $12, %esp
#; UINT32 Edi, Esi, Ebp, Esp, Ebx, Edx, Ecx, Eax;
pushl %eax
pushl %ecx
pushl %edx
pushl %ebx
leal 24(%ebp), %ecx
pushl %ecx # ESP
pushl (%ebp) # EBP
pushl %esi
pushl %edi
#; UINT32 Gs, Fs, Es, Ds, Cs, Ss;
movl %ss, %eax
pushl %eax
movzwl 16(%ebp), %eax
pushl %eax
movl %ds, %eax
pushl %eax
movl %es, %eax
pushl %eax
movl %fs, %eax
pushl %eax
movl %gs, %eax
pushl %eax
#; UINT32 Eip;
movl 12(%ebp), %eax
pushl %eax
#; UINT32 Gdtr[2], Idtr[2];
subl $8, %esp
sidt (%esp)
movl 2(%esp), %eax
xchgl (%esp), %eax
andl $0x0FFFF, %eax
movl %eax, 4(%esp)
subl $8, %esp
sgdt (%esp)
movl 2(%esp), %eax
xchgl (%esp), %eax
andl $0x0FFFF, %eax
movl %eax, 4(%esp)
#; UINT32 Ldtr, Tr;
xorl %eax, %eax
str %ax
pushl %eax
sldt %ax
pushl %eax
#; UINT32 EFlags;
movl 20(%ebp), %eax
pushl %eax
#; UINT32 Cr0, Cr1, Cr2, Cr3, Cr4;
movl %cr4, %eax
orl $0x208, %eax
movl %eax, %cr4
pushl %eax
movl %cr3, %eax
pushl %eax
movl %cr2, %eax
pushl %eax
xorl %eax, %eax
pushl %eax
movl %cr0, %eax
pushl %eax
#; UINT32 Dr0, Dr1, Dr2, Dr3, Dr6, Dr7;
movl %dr7, %eax
pushl %eax
movl %dr6, %eax
pushl %eax
movl %dr3, %eax
pushl %eax
movl %dr2, %eax
pushl %eax
movl %dr1, %eax
pushl %eax
movl %dr0, %eax
pushl %eax
#; FX_SAVE_STATE_IA32 FxSaveState;
subl $512, %esp
movl %esp, %edi
.byte 0x0f, 0x0ae, 0x07 #fxsave [edi]
#; UEFI calling convention for IA32 requires that Direction flag in EFLAGs is clear
cld
#; UINT32 ExceptionData;
pushl 8(%ebp)
#; call into exception handler
movl ExternalVectorTablePtr, %eax # get the interrupt vectors base
orl %eax, %eax # NULL?
jz nullExternalExceptionHandler
mov 4(%ebp), %ecx
movl (%eax,%ecx,4), %eax
orl %eax, %eax # NULL?
jz nullExternalExceptionHandler
#; Prepare parameter and call
movl %esp, %edx
pushl %edx
movl 4(%ebp), %edx
pushl %edx
#
# Call External Exception Handler
#
call *%eax
addl $8, %esp
nullExternalExceptionHandler:
cli
#; UINT32 ExceptionData;
addl $4, %esp
#; FX_SAVE_STATE_IA32 FxSaveState;
movl %esp, %esi
.byte 0x0f, 0x0ae, 0x0e # fxrstor [esi]
addl $512, %esp
#; UINT32 Dr0, Dr1, Dr2, Dr3, Dr6, Dr7;
#; Skip restoration of DRx registers to support in-circuit emualators
#; or debuggers set breakpoint in interrupt/exception context
addl $24, %esp
#; UINT32 Cr0, Cr1, Cr2, Cr3, Cr4;
popl %eax
movl %eax, %cr0
addl $4, %esp # not for Cr1
popl %eax
movl %eax, %cr2
popl %eax
movl %eax, %cr3
popl %eax
movl %eax, %cr4
#; UINT32 EFlags;
popl 20(%ebp)
#; UINT32 Ldtr, Tr;
#; UINT32 Gdtr[2], Idtr[2];
#; Best not let anyone mess with these particular registers...
addl $24, %esp
#; UINT32 Eip;
popl 12(%ebp)
#; UINT32 Gs, Fs, Es, Ds, Cs, Ss;
#; NOTE - modified segment registers could hang the debugger... We
#; could attempt to insulate ourselves against this possibility,
#; but that poses risks as well.
#;
popl %gs
popl %fs
popl %es
popl %ds
popl 16(%ebp)
popl %ss
#; UINT32 Edi, Esi, Ebp, Esp, Ebx, Edx, Ecx, Eax;
popl %edi
popl %esi
addl $4, %esp # not for ebp
addl $4, %esp # not for esp
popl %ebx
popl %edx
popl %ecx
popl %eax
movl %ebp, %esp
popl %ebp
addl $8, %esp
iretl
#END
|
al3xtjames/Clover
| 18,145
|
CloverEFI/UefiCpuPkg/CpuDxe/Ia32/IvtAsm.S
|
#------------------------------------------------------------------------------
#
# Copyright (c) 2006 - 2009, Intel Corporation. All rights reserved.<BR>
# This program and the accompanying materials
# are licensed and made available under the terms and conditions of the BSD License
# which accompanies this distribution. The full text of the license may be found at
# http://opensource.org/licenses/bsd-license.php
#
# THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
# WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
#
# Module Name:
#
# IvtAsm.S
#
# Abstract:
#
# Interrupt Vector Table
#
#------------------------------------------------------------------------------
#
# Interrupt Vector Table
#
ASM_GLOBAL ASM_PFX(AsmIdtVector00)
.p2align 3
ASM_PFX(AsmIdtVector00):
call ASM_PFX(CommonInterruptEntry)
.short 0x00
nop
call ASM_PFX(CommonInterruptEntry)
.short 0x01
nop
call ASM_PFX(CommonInterruptEntry)
.short 0x02
nop
call ASM_PFX(CommonInterruptEntry)
.short 0x03
nop
call ASM_PFX(CommonInterruptEntry)
.short 0x04
nop
call ASM_PFX(CommonInterruptEntry)
.short 0x05
nop
call ASM_PFX(CommonInterruptEntry)
.short 0x06
nop
call ASM_PFX(CommonInterruptEntry)
.short 0x07
nop
call ASM_PFX(CommonInterruptEntry)
.short 0x08
nop
call ASM_PFX(CommonInterruptEntry)
.short 0x09
nop
call ASM_PFX(CommonInterruptEntry)
.short 0x0a
nop
call ASM_PFX(CommonInterruptEntry)
.short 0x0b
nop
call ASM_PFX(CommonInterruptEntry)
.short 0x0c
nop
call ASM_PFX(CommonInterruptEntry)
.short 0x0d
nop
call ASM_PFX(CommonInterruptEntry)
.short 0x0e
nop
call ASM_PFX(CommonInterruptEntry)
.short 0x0f
nop
call ASM_PFX(CommonInterruptEntry)
.short 0x10
nop
call ASM_PFX(CommonInterruptEntry)
.short 0x11
nop
call ASM_PFX(CommonInterruptEntry)
.short 0x12
nop
call ASM_PFX(CommonInterruptEntry)
.short 0x13
nop
call ASM_PFX(CommonInterruptEntry)
.short 0x14
nop
call ASM_PFX(CommonInterruptEntry)
.short 0x15
nop
call ASM_PFX(CommonInterruptEntry)
.short 0x16
nop
call ASM_PFX(CommonInterruptEntry)
.short 0x17
nop
call ASM_PFX(CommonInterruptEntry)
.short 0x18
nop
call ASM_PFX(CommonInterruptEntry)
.short 0x19
nop
call ASM_PFX(CommonInterruptEntry)
.short 0x1a
nop
call ASM_PFX(CommonInterruptEntry)
.short 0x1b
nop
call ASM_PFX(CommonInterruptEntry)
.short 0x1c
nop
call ASM_PFX(CommonInterruptEntry)
.short 0x1d
nop
call ASM_PFX(CommonInterruptEntry)
.short 0x1e
nop
call ASM_PFX(CommonInterruptEntry)
.short 0x1f
nop
call ASM_PFX(CommonInterruptEntry)
.short 0x00
nop
call ASM_PFX(CommonInterruptEntry)
.short 0x21
nop
call ASM_PFX(CommonInterruptEntry)
.short 0x22
nop
call ASM_PFX(CommonInterruptEntry)
.short 0x23
nop
call ASM_PFX(CommonInterruptEntry)
.short 0x24
nop
call ASM_PFX(CommonInterruptEntry)
.short 0x25
nop
call ASM_PFX(CommonInterruptEntry)
.short 0x26
nop
call ASM_PFX(CommonInterruptEntry)
.short 0x27
nop
call ASM_PFX(CommonInterruptEntry)
.short 0x28
nop
call ASM_PFX(CommonInterruptEntry)
.short 0x29
nop
call ASM_PFX(CommonInterruptEntry)
.short 0x2a
nop
call ASM_PFX(CommonInterruptEntry)
.short 0x2b
nop
call ASM_PFX(CommonInterruptEntry)
.short 0x2c
nop
call ASM_PFX(CommonInterruptEntry)
.short 0x2d
nop
call ASM_PFX(CommonInterruptEntry)
.short 0x2e
nop
call ASM_PFX(CommonInterruptEntry)
.short 0x2f
nop
call ASM_PFX(CommonInterruptEntry)
.short 0x30
nop
call ASM_PFX(CommonInterruptEntry)
.short 0x31
nop
call ASM_PFX(CommonInterruptEntry)
.short 0x32
nop
call ASM_PFX(CommonInterruptEntry)
.short 0x33
nop
call ASM_PFX(CommonInterruptEntry)
.short 0x34
nop
call ASM_PFX(CommonInterruptEntry)
.short 0x35
nop
call ASM_PFX(CommonInterruptEntry)
.short 0x36
nop
call ASM_PFX(CommonInterruptEntry)
.short 0x37
nop
call ASM_PFX(CommonInterruptEntry)
.short 0x38
nop
call ASM_PFX(CommonInterruptEntry)
.short 0x39
nop
call ASM_PFX(CommonInterruptEntry)
.short 0x3a
nop
call ASM_PFX(CommonInterruptEntry)
.short 0x3b
nop
call ASM_PFX(CommonInterruptEntry)
.short 0x3c
nop
call ASM_PFX(CommonInterruptEntry)
.short 0x3d
nop
call ASM_PFX(CommonInterruptEntry)
.short 0x3e
nop
call ASM_PFX(CommonInterruptEntry)
.short 0x3f
nop
call ASM_PFX(CommonInterruptEntry)
.short 0x40
nop
call ASM_PFX(CommonInterruptEntry)
.short 0x41
nop
call ASM_PFX(CommonInterruptEntry)
.short 0x42
nop
call ASM_PFX(CommonInterruptEntry)
.short 0x43
nop
call ASM_PFX(CommonInterruptEntry)
.short 0x44
nop
call ASM_PFX(CommonInterruptEntry)
.short 0x45
nop
call ASM_PFX(CommonInterruptEntry)
.short 0x46
nop
call ASM_PFX(CommonInterruptEntry)
.short 0x47
nop
call ASM_PFX(CommonInterruptEntry)
.short 0x48
nop
call ASM_PFX(CommonInterruptEntry)
.short 0x49
nop
call ASM_PFX(CommonInterruptEntry)
.short 0x4a
nop
call ASM_PFX(CommonInterruptEntry)
.short 0x4b
nop
call ASM_PFX(CommonInterruptEntry)
.short 0x4c
nop
call ASM_PFX(CommonInterruptEntry)
.short 0x4d
nop
call ASM_PFX(CommonInterruptEntry)
.short 0x4e
nop
call ASM_PFX(CommonInterruptEntry)
.short 0x4f
nop
call ASM_PFX(CommonInterruptEntry)
.short 0x50
nop
call ASM_PFX(CommonInterruptEntry)
.short 0x51
nop
call ASM_PFX(CommonInterruptEntry)
.short 0x52
nop
call ASM_PFX(CommonInterruptEntry)
.short 0x53
nop
call ASM_PFX(CommonInterruptEntry)
.short 0x54
nop
call ASM_PFX(CommonInterruptEntry)
.short 0x55
nop
call ASM_PFX(CommonInterruptEntry)
.short 0x56
nop
call ASM_PFX(CommonInterruptEntry)
.short 0x57
nop
call ASM_PFX(CommonInterruptEntry)
.short 0x58
nop
call ASM_PFX(CommonInterruptEntry)
.short 0x59
nop
call ASM_PFX(CommonInterruptEntry)
.short 0x5a
nop
call ASM_PFX(CommonInterruptEntry)
.short 0x5b
nop
call ASM_PFX(CommonInterruptEntry)
.short 0x5c
nop
call ASM_PFX(CommonInterruptEntry)
.short 0x5d
nop
call ASM_PFX(CommonInterruptEntry)
.short 0x5e
nop
call ASM_PFX(CommonInterruptEntry)
.short 0x5f
nop
call ASM_PFX(CommonInterruptEntry)
.short 0x60
nop
call ASM_PFX(CommonInterruptEntry)
.short 0x61
nop
call ASM_PFX(CommonInterruptEntry)
.short 0x62
nop
call ASM_PFX(CommonInterruptEntry)
.short 0x63
nop
call ASM_PFX(CommonInterruptEntry)
.short 0x64
nop
call ASM_PFX(CommonInterruptEntry)
.short 0x65
nop
call ASM_PFX(CommonInterruptEntry)
.short 0x66
nop
call ASM_PFX(CommonInterruptEntry)
.short 0x67
nop
call ASM_PFX(CommonInterruptEntry)
.short 0x68
nop
call ASM_PFX(CommonInterruptEntry)
.short 0x69
nop
call ASM_PFX(CommonInterruptEntry)
.short 0x6a
nop
call ASM_PFX(CommonInterruptEntry)
.short 0x6b
nop
call ASM_PFX(CommonInterruptEntry)
.short 0x6c
nop
call ASM_PFX(CommonInterruptEntry)
.short 0x6d
nop
call ASM_PFX(CommonInterruptEntry)
.short 0x6e
nop
call ASM_PFX(CommonInterruptEntry)
.short 0x6f
nop
call ASM_PFX(CommonInterruptEntry)
.short 0x70
nop
call ASM_PFX(CommonInterruptEntry)
.short 0x71
nop
call ASM_PFX(CommonInterruptEntry)
.short 0x72
nop
call ASM_PFX(CommonInterruptEntry)
.short 0x73
nop
call ASM_PFX(CommonInterruptEntry)
.short 0x74
nop
call ASM_PFX(CommonInterruptEntry)
.short 0x75
nop
call ASM_PFX(CommonInterruptEntry)
.short 0x76
nop
call ASM_PFX(CommonInterruptEntry)
.short 0x77
nop
call ASM_PFX(CommonInterruptEntry)
.short 0x78
nop
call ASM_PFX(CommonInterruptEntry)
.short 0x79
nop
call ASM_PFX(CommonInterruptEntry)
.short 0x7a
nop
call ASM_PFX(CommonInterruptEntry)
.short 0x7b
nop
call ASM_PFX(CommonInterruptEntry)
.short 0x7c
nop
call ASM_PFX(CommonInterruptEntry)
.short 0x7d
nop
call ASM_PFX(CommonInterruptEntry)
.short 0x7e
nop
call ASM_PFX(CommonInterruptEntry)
.short 0x7f
nop
call ASM_PFX(CommonInterruptEntry)
.short 0x80
nop
call ASM_PFX(CommonInterruptEntry)
.short 0x81
nop
call ASM_PFX(CommonInterruptEntry)
.short 0x82
nop
call ASM_PFX(CommonInterruptEntry)
.short 0x83
nop
call ASM_PFX(CommonInterruptEntry)
.short 0x84
nop
call ASM_PFX(CommonInterruptEntry)
.short 0x85
nop
call ASM_PFX(CommonInterruptEntry)
.short 0x86
nop
call ASM_PFX(CommonInterruptEntry)
.short 0x87
nop
call ASM_PFX(CommonInterruptEntry)
.short 0x88
nop
call ASM_PFX(CommonInterruptEntry)
.short 0x89
nop
call ASM_PFX(CommonInterruptEntry)
.short 0x8a
nop
call ASM_PFX(CommonInterruptEntry)
.short 0x8b
nop
call ASM_PFX(CommonInterruptEntry)
.short 0x8c
nop
call ASM_PFX(CommonInterruptEntry)
.short 0x8d
nop
call ASM_PFX(CommonInterruptEntry)
.short 0x8e
nop
call ASM_PFX(CommonInterruptEntry)
.short 0x8f
nop
call ASM_PFX(CommonInterruptEntry)
.short 0x90
nop
call ASM_PFX(CommonInterruptEntry)
.short 0x91
nop
call ASM_PFX(CommonInterruptEntry)
.short 0x92
nop
call ASM_PFX(CommonInterruptEntry)
.short 0x93
nop
call ASM_PFX(CommonInterruptEntry)
.short 0x94
nop
call ASM_PFX(CommonInterruptEntry)
.short 0x95
nop
call ASM_PFX(CommonInterruptEntry)
.short 0x96
nop
call ASM_PFX(CommonInterruptEntry)
.short 0x97
nop
call ASM_PFX(CommonInterruptEntry)
.short 0x98
nop
call ASM_PFX(CommonInterruptEntry)
.short 0x99
nop
call ASM_PFX(CommonInterruptEntry)
.short 0x9a
nop
call ASM_PFX(CommonInterruptEntry)
.short 0x9b
nop
call ASM_PFX(CommonInterruptEntry)
.short 0x9c
nop
call ASM_PFX(CommonInterruptEntry)
.short 0x9d
nop
call ASM_PFX(CommonInterruptEntry)
.short 0x9e
nop
call ASM_PFX(CommonInterruptEntry)
.short 0x9f
nop
call ASM_PFX(CommonInterruptEntry)
.short 0xa0
nop
call ASM_PFX(CommonInterruptEntry)
.short 0xa1
nop
call ASM_PFX(CommonInterruptEntry)
.short 0xa2
nop
call ASM_PFX(CommonInterruptEntry)
.short 0xa3
nop
call ASM_PFX(CommonInterruptEntry)
.short 0xa4
nop
call ASM_PFX(CommonInterruptEntry)
.short 0xa5
nop
call ASM_PFX(CommonInterruptEntry)
.short 0xa6
nop
call ASM_PFX(CommonInterruptEntry)
.short 0xa7
nop
call ASM_PFX(CommonInterruptEntry)
.short 0xa8
nop
call ASM_PFX(CommonInterruptEntry)
.short 0xa9
nop
call ASM_PFX(CommonInterruptEntry)
.short 0xaa
nop
call ASM_PFX(CommonInterruptEntry)
.short 0xab
nop
call ASM_PFX(CommonInterruptEntry)
.short 0xac
nop
call ASM_PFX(CommonInterruptEntry)
.short 0xad
nop
call ASM_PFX(CommonInterruptEntry)
.short 0xae
nop
call ASM_PFX(CommonInterruptEntry)
.short 0xaf
nop
call ASM_PFX(CommonInterruptEntry)
.short 0xb0
nop
call ASM_PFX(CommonInterruptEntry)
.short 0xb1
nop
call ASM_PFX(CommonInterruptEntry)
.short 0xb2
nop
call ASM_PFX(CommonInterruptEntry)
.short 0xb3
nop
call ASM_PFX(CommonInterruptEntry)
.short 0xb4
nop
call ASM_PFX(CommonInterruptEntry)
.short 0xb5
nop
call ASM_PFX(CommonInterruptEntry)
.short 0xb6
nop
call ASM_PFX(CommonInterruptEntry)
.short 0xb7
nop
call ASM_PFX(CommonInterruptEntry)
.short 0xb8
nop
call ASM_PFX(CommonInterruptEntry)
.short 0xb9
nop
call ASM_PFX(CommonInterruptEntry)
.short 0xba
nop
call ASM_PFX(CommonInterruptEntry)
.short 0xbb
nop
call ASM_PFX(CommonInterruptEntry)
.short 0xbc
nop
call ASM_PFX(CommonInterruptEntry)
.short 0xbd
nop
call ASM_PFX(CommonInterruptEntry)
.short 0xbe
nop
call ASM_PFX(CommonInterruptEntry)
.short 0xbf
nop
call ASM_PFX(CommonInterruptEntry)
.short 0xc0
nop
call ASM_PFX(CommonInterruptEntry)
.short 0xc1
nop
call ASM_PFX(CommonInterruptEntry)
.short 0xc2
nop
call ASM_PFX(CommonInterruptEntry)
.short 0xc3
nop
call ASM_PFX(CommonInterruptEntry)
.short 0xc4
nop
call ASM_PFX(CommonInterruptEntry)
.short 0xc5
nop
call ASM_PFX(CommonInterruptEntry)
.short 0xc6
nop
call ASM_PFX(CommonInterruptEntry)
.short 0xc7
nop
call ASM_PFX(CommonInterruptEntry)
.short 0xc8
nop
call ASM_PFX(CommonInterruptEntry)
.short 0xc9
nop
call ASM_PFX(CommonInterruptEntry)
.short 0xca
nop
call ASM_PFX(CommonInterruptEntry)
.short 0xcb
nop
call ASM_PFX(CommonInterruptEntry)
.short 0xcc
nop
call ASM_PFX(CommonInterruptEntry)
.short 0xcd
nop
call ASM_PFX(CommonInterruptEntry)
.short 0xce
nop
call ASM_PFX(CommonInterruptEntry)
.short 0xcf
nop
call ASM_PFX(CommonInterruptEntry)
.short 0xd0
nop
call ASM_PFX(CommonInterruptEntry)
.short 0xd1
nop
call ASM_PFX(CommonInterruptEntry)
.short 0xd2
nop
call ASM_PFX(CommonInterruptEntry)
.short 0xd3
nop
call ASM_PFX(CommonInterruptEntry)
.short 0xd4
nop
call ASM_PFX(CommonInterruptEntry)
.short 0xd5
nop
call ASM_PFX(CommonInterruptEntry)
.short 0xd6
nop
call ASM_PFX(CommonInterruptEntry)
.short 0xd7
nop
call ASM_PFX(CommonInterruptEntry)
.short 0xd8
nop
call ASM_PFX(CommonInterruptEntry)
.short 0xd9
nop
call ASM_PFX(CommonInterruptEntry)
.short 0xda
nop
call ASM_PFX(CommonInterruptEntry)
.short 0xdb
nop
call ASM_PFX(CommonInterruptEntry)
.short 0xdc
nop
call ASM_PFX(CommonInterruptEntry)
.short 0xdd
nop
call ASM_PFX(CommonInterruptEntry)
.short 0xde
nop
call ASM_PFX(CommonInterruptEntry)
.short 0xdf
nop
call ASM_PFX(CommonInterruptEntry)
.short 0xe0
nop
call ASM_PFX(CommonInterruptEntry)
.short 0xe1
nop
call ASM_PFX(CommonInterruptEntry)
.short 0xe2
nop
call ASM_PFX(CommonInterruptEntry)
.short 0xe3
nop
call ASM_PFX(CommonInterruptEntry)
.short 0xe4
nop
call ASM_PFX(CommonInterruptEntry)
.short 0xe5
nop
call ASM_PFX(CommonInterruptEntry)
.short 0xe6
nop
call ASM_PFX(CommonInterruptEntry)
.short 0xe7
nop
call ASM_PFX(CommonInterruptEntry)
.short 0xe8
nop
call ASM_PFX(CommonInterruptEntry)
.short 0xe9
nop
call ASM_PFX(CommonInterruptEntry)
.short 0xea
nop
call ASM_PFX(CommonInterruptEntry)
.short 0xeb
nop
call ASM_PFX(CommonInterruptEntry)
.short 0xec
nop
call ASM_PFX(CommonInterruptEntry)
.short 0xed
nop
call ASM_PFX(CommonInterruptEntry)
.short 0xee
nop
call ASM_PFX(CommonInterruptEntry)
.short 0xef
nop
call ASM_PFX(CommonInterruptEntry)
.short 0xf0
nop
call ASM_PFX(CommonInterruptEntry)
.short 0xf1
nop
call ASM_PFX(CommonInterruptEntry)
.short 0xf2
nop
call ASM_PFX(CommonInterruptEntry)
.short 0xf3
nop
call ASM_PFX(CommonInterruptEntry)
.short 0xf4
nop
call ASM_PFX(CommonInterruptEntry)
.short 0xf5
nop
call ASM_PFX(CommonInterruptEntry)
.short 0xf6
nop
call ASM_PFX(CommonInterruptEntry)
.short 0xf7
nop
call ASM_PFX(CommonInterruptEntry)
.short 0xf8
nop
call ASM_PFX(CommonInterruptEntry)
.short 0xf9
nop
call ASM_PFX(CommonInterruptEntry)
.short 0xfa
nop
call ASM_PFX(CommonInterruptEntry)
.short 0xfb
nop
call ASM_PFX(CommonInterruptEntry)
.short 0xfc
nop
call ASM_PFX(CommonInterruptEntry)
.short 0xfd
nop
call ASM_PFX(CommonInterruptEntry)
.short 0xfe
nop
call ASM_PFX(CommonInterruptEntry)
.short 0xff
nop
ASM_GLOBAL ASM_PFX(AsmCommonIdtEnd)
ASM_PFX(AsmCommonIdtEnd):
.byte 0
|
al3xtjames/Clover
| 8,428
|
CloverEFI/UefiCpuPkg/CpuDxe/X64/CpuAsm.S
|
# TITLE CpuAsm.S:
#------------------------------------------------------------------------------
#*
#* Copyright (c) 2008 - 2011, Intel Corporation. All rights reserved.<BR>
#* This program and the accompanying materials
#* are licensed and made available under the terms and conditions of the BSD License
#* which accompanies this distribution. The full text of the license may be found at
#* http://opensource.org/licenses/bsd-license.php
#*
#* THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
#* WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
#*
#* CpuAsm.S
#*
#* Abstract:
#*
#------------------------------------------------------------------------------
#text SEGMENT
#EXTRN ASM_PFX(mErrorCodeFlag):DWORD # Error code flags for exceptions
#
# point to the external interrupt vector table
#
ExternalVectorTablePtr:
.byte 0, 0, 0, 0, 0, 0, 0, 0
ASM_GLOBAL ASM_PFX(InitializeExternalVectorTablePtr)
ASM_PFX(InitializeExternalVectorTablePtr):
leaq ExternalVectorTablePtr(%rip), %rax # save vector number
movq %rcx, (%rax)
ret
#------------------------------------------------------------------------------
# VOID
# SetCodeSelector (
# UINT16 Selector
# );
#------------------------------------------------------------------------------
ASM_GLOBAL ASM_PFX(SetCodeSelector)
ASM_PFX(SetCodeSelector):
subq $0x14, %rsp
leaq L_setCodeSelectorLongJump(%rip), %rax
movq %rax, (%rsp)
movw %cx, 4(%rsp)
.byte 0x48, 0xFF, 0x2C, 0x24 # jmp (%rsp) note:fword jmp
L_setCodeSelectorLongJump:
addq $0x14, %rsp
ret
#------------------------------------------------------------------------------
# VOID
# SetDataSelectors (
# UINT16 Selector
# );
#------------------------------------------------------------------------------
ASM_GLOBAL ASM_PFX(SetDataSelectors)
ASM_PFX(SetDataSelectors):
movw %cx, %ss
movw %cx, %ds
movw %cx, %es
movw %cx, %fs
movw %cx, %gs
ret
#---------------------------------------;
# CommonInterruptEntry ;
#---------------------------------------;
# The follow algorithm is used for the common interrupt routine.
ASM_GLOBAL ASM_PFX(CommonInterruptEntry)
ASM_PFX(CommonInterruptEntry):
cli
#
# All interrupt handlers are invoked through interrupt gates, so
# IF flag automatically cleared at the entry point
#
#
# Calculate vector number
#
xchgq (%rsp), %rcx # get the return address of call, actually, it is the address of vector number.
movzwl (%rcx), %ecx
cmp $32, %ecx # Intel reserved vector for exceptions?
jae NoErrorCode
pushq %rax
leaq ASM_PFX(mErrorCodeFlag)(%rip), %rax
bt %ecx, (%rax)
popq %rax
jc CommonInterruptEntry_al_0000
NoErrorCode:
#
# Push a dummy error code on the stack
# to maintain coherent stack map
#
pushq (%rsp)
movq $0, 8(%rsp)
CommonInterruptEntry_al_0000:
pushq %rbp
movq %rsp, %rbp
#
# Stack:
# +---------------------+ <-- 16-byte aligned ensured by processor
# + Old SS +
# +---------------------+
# + Old RSP +
# +---------------------+
# + RFlags +
# +---------------------+
# + CS +
# +---------------------+
# + RIP +
# +---------------------+
# + Error Code +
# +---------------------+
# + RCX / Vector Number +
# +---------------------+
# + RBP +
# +---------------------+ <-- RBP, 16-byte aligned
#
#
# Since here the stack pointer is 16-byte aligned, so
# EFI_FX_SAVE_STATE_X64 of EFI_SYSTEM_CONTEXT_x64
# is 16-byte aligned
#
#; UINT64 Rdi, Rsi, Rbp, Rsp, Rbx, Rdx, Rcx, Rax;
#; UINT64 R8, R9, R10, R11, R12, R13, R14, R15;
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %r11
pushq %r10
pushq %r9
pushq %r8
pushq %rax
pushq 8(%rbp) # RCX
pushq %rdx
pushq %rbx
pushq 48(%rbp) # RSP
pushq (%rbp) # RBP
pushq %rsi
pushq %rdi
#; UINT64 Gs, Fs, Es, Ds, Cs, Ss; insure high 16 bits of each is zero
movzwq 56(%rbp), %rax
pushq %rax # for ss
movzwq 32(%rbp), %rax
pushq %rax # for cs
movl %ds, %eax
pushq %rax
movl %es, %eax
pushq %rax
movl %fs, %eax
pushq %rax
movl %gs, %eax
pushq %rax
movq %rcx, 8(%rbp) # save vector number
#; UINT64 Rip;
pushq 24(%rbp)
#; UINT64 Gdtr[2], Idtr[2];
xorq %rax, %rax
pushq %rax
pushq %rax
sidt (%rsp)
xchgq 2(%rsp), %rax
xchgq (%rsp), %rax
xchgq 8(%rsp), %rax
xorq %rax, %rax
pushq %rax
pushq %rax
sgdt (%rsp)
xchgq 2(%rsp), %rax
xchgq (%rsp), %rax
xchgq 8(%rsp), %rax
#; UINT64 Ldtr, Tr;
xorq %rax, %rax
str %ax
pushq %rax
sldt %ax
pushq %rax
#; UINT64 RFlags;
pushq 40(%rbp)
#; UINT64 Cr0, Cr1, Cr2, Cr3, Cr4, Cr8;
movq %cr8, %rax
pushq %rax
movq %cr4, %rax
orq $0x208, %rax
movq %rax, %cr4
pushq %rax
mov %cr3, %rax
pushq %rax
mov %cr2, %rax
pushq %rax
xorq %rax, %rax
pushq %rax
mov %cr0, %rax
pushq %rax
#; UINT64 Dr0, Dr1, Dr2, Dr3, Dr6, Dr7;
movq %dr7, %rax
pushq %rax
movq %dr6, %rax
pushq %rax
movq %dr3, %rax
pushq %rax
movq %dr2, %rax
pushq %rax
movq %dr1, %rax
pushq %rax
movq %dr0, %rax
pushq %rax
#; FX_SAVE_STATE_X64 FxSaveState;
subq $512, %rsp
movq %rsp, %rdi
.byte 0x0f, 0x0ae, 0x07 #fxsave [rdi]
#; UEFI calling convention for x64 requires that Direction flag in EFLAGs is clear
cld
#; UINT32 ExceptionData;
pushq 16(%rbp)
#; call into exception handler
movq 8(%rbp), %rcx
leaq ExternalVectorTablePtr(%rip), %rax
# movl (%eax), %eax
movq (%rax), %rax
movq (%rax,%rcx,8), %rax
orq %rax, %rax # NULL?
je nonNullValue#
#; Prepare parameter and call
# mov rcx, [rbp + 8]
mov %rsp, %rdx
#
# Per X64 calling convention, allocate maximum parameter stack space
# and make sure RSP is 16-byte aligned
#
subq $40, %rsp
call *%rax
addq $40, %rsp
nonNullValue:
cli
#; UINT64 ExceptionData;
addq $8, %rsp
#; FX_SAVE_STATE_X64 FxSaveState;
movq %rsp, %rsi
.byte 0x0f, 0x0ae, 0x0E # fxrstor [rsi]
addq $512, %rsp
#; UINT64 Dr0, Dr1, Dr2, Dr3, Dr6, Dr7;
#; Skip restoration of DRx registers to support in-circuit emualators
#; or debuggers set breakpoint in interrupt/exception context
addq $48, %rsp
#; UINT64 Cr0, Cr1, Cr2, Cr3, Cr4, Cr8;
popq %rax
movq %rax, %cr0
addq $8, %rsp # not for Cr1
popq %rax
movq %rax, %cr2
popq %rax
movq %rax, %cr3
popq %rax
movq %rax, %cr4
popq %rax
movq %rax, %cr8
#; UINT64 RFlags;
popq 40(%rbp)
#; UINT64 Ldtr, Tr;
#; UINT64 Gdtr[2], Idtr[2];
#; Best not let anyone mess with these particular registers...
addq $48, %rsp
#; UINT64 Rip;
popq 24(%rbp)
#; UINT64 Gs, Fs, Es, Ds, Cs, Ss;
popq %rax
# mov %rax, %gs ; not for gs
popq %rax
# mov %rax, %fs ; not for fs
# (X64 will not use fs and gs, so we do not restore it)
popq %rax
movl %eax, %es
popq %rax
movl %eax, %ds
popq 32(%rbp) # for cs
popq 56(%rbp) # for ss
#; UINT64 Rdi, Rsi, Rbp, Rsp, Rbx, Rdx, Rcx, Rax;
#; UINT64 R8, R9, R10, R11, R12, R13, R14, R15;
popq %rdi
popq %rsi
addq $8, %rsp # not for rbp
popq 48(%rbp) # for rsp
popq %rbx
popq %rdx
popq %rcx
popq %rax
popq %r8
popq %r9
popq %r10
popq %r11
popq %r12
popq %r13
popq %r14
popq %r15
movq %rbp, %rsp
popq %rbp
addq $16, %rsp
iretq
#text ENDS
#END
|
al3xtjames/Clover
| 1,822
|
LegacyBios/IA32/InterruptTable.S
|
## @file
# Interrupt Redirection Template
#
# Copyright (c) 2006, Intel Corporation. All rights reserved.<BR>
#
# This program and the accompanying materials
# are licensed and made available under the terms and conditions
# of the BSD License which accompanies this distribution. The
# full text of the license may be found at
# http://opensource.org/licenses/bsd-license.php
#
# THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
# WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
#
##
ASM_GLOBAL ASM_PFX(InterruptRedirectionTemplate)
#----------------------------------------------------------------------------
# Procedure: InterruptRedirectionTemplate: Redirects interrupts 0x68-0x6F
#
# Input: None
#
# Output: None
#
# Prototype: VOID
# InterruptRedirectionTemplate (
# VOID
# );
#
# Saves: None
#
# Modified: None
#
# Description: Contains the code that is copied into low memory (below 640K).
# This code reflects interrupts 0x68-0x6f to interrupts 0x08-0x0f.
# This template must be copied into low memory, and the IDT entries
# 0x68-0x6F must be point to the low memory copy of this code. Each
# entry is 4 bytes long, so IDT entries 0x68-0x6F can be easily
# computed.
#
#----------------------------------------------------------------------------
ASM_PFX(InterruptRedirectionTemplate):
int $0x8
.byte 0xcf
nop
int $0x9
.byte 0xcf
nop
int $0xa
.byte 0xcf
nop
int $0xb
.byte 0xcf
nop
int $0xc
.byte 0xcf
nop
int $0xd
.byte 0xcf
nop
int $0xe
.byte 0xcf
nop
int $0xf
.byte 0xcf
nop
|
al3xtjames/Clover
| 2,008
|
LegacyBios/X64/InterruptTable.S
|
## @file
# Interrupt Redirection Template
#
# Copyright (c) 2006, Intel Corporation. All rights reserved.<BR>
#
# This program and the accompanying materials
# are licensed and made available under the terms and conditions
# of the BSD License which accompanies this distribution. The
# full text of the license may be found at
# http://opensource.org/licenses/bsd-license.php
#
# THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
# WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
#
##
#text SEGMENT
#----------------------------------------------------------------------------
# Procedure: InterruptRedirectionTemplate: Redirects interrupts 0x68-0x6F
#
# Input: None
#
# Output: None
#
# Prototype: VOID
# InterruptRedirectionTemplate (
# VOID
# );
#
# Saves: None
#
# Modified: None
#
# Description: Contains the code that is copied into low memory (below 640K).
# This code reflects interrupts 0x68-0x6f to interrupts 0x08-0x0f.
# This template must be copied into low memory, and the IDT entries
# 0x68-0x6F must be point to the low memory copy of this code. Each
# entry is 4 bytes long, so IDT entries 0x68-0x6F can be easily
# computed.
#
#----------------------------------------------------------------------------
ASM_GLOBAL ASM_PFX(InterruptRedirectionTemplate)
ASM_PFX(InterruptRedirectionTemplate):
int $0x08
.byte 0x0cf # IRET
nop
int $0x09
.byte 0x0cf # IRET
nop
int $0x0a
.byte 0x0cf # IRET
nop
int $0x0b
.byte 0x0cf # IRET
nop
int $0x0c
.byte 0x0cf # IRET
nop
int $0x0d
.byte 0x0cf # IRET
nop
int $0x0e
.byte 0x0cf # IRET
nop
int $0x0f
.byte 0x0cf # IRET
nop
#END
|
al3xtjames/Clover
| 3,007
|
FileSystems/GrubFS/grub/grub-core/efiemu/runtime/efiemu.S
|
/*
* GRUB -- GRand Unified Bootloader
* Copyright (C) 2006,2007,2009 Free Software Foundation, Inc.
*
* GRUB is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* GRUB is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with GRUB. If not, see <http://www.gnu.org/licenses/>.
*/
#include <grub/symbol.h>
/*
* x86_64 uses registry to pass parameters. Unfortunately, gcc and efi use
* different call conversion, so we need to do some conversion.
*
* gcc:
* %rdi, %rsi, %rdx, %rcx, %r8, %r9, 8(%rsp), 16(%rsp), ...
*
* efi:
* %rcx, %rdx, %r8, %r9, 32(%rsp), 40(%rsp), 48(%rsp), ...
*
*/
.file "efiemu.S"
.text
FUNCTION (efiemu_get_time)
push %rdi
push %rsi
mov %rcx, %rdi
mov %rdx, %rsi
call efiemu_get_time_real
pop %rsi
pop %rdi
ret
FUNCTION (efiemu_set_time)
push %rdi
push %rsi
mov %rcx, %rdi
call efiemu_set_time_real
pop %rsi
pop %rdi
ret
FUNCTION (efiemu_get_wakeup_time)
push %rdi
push %rsi
mov %rcx, %rdi
mov %rdx, %rsi
mov %r8, %rdx
call efiemu_get_wakeup_time_real
pop %rsi
pop %rdi
ret
FUNCTION (efiemu_set_wakeup_time)
push %rdi
push %rsi
mov %rcx, %rdi
mov %rdx, %rsi
call efiemu_set_wakeup_time_real
pop %rsi
pop %rdi
ret
FUNCTION (efiemu_get_variable)
push %rdi
push %rsi
mov %rcx, %rdi
mov %rdx, %rsi
mov %r8, %rdx
mov %r9, %rcx
mov 56(%rsp), %r8
call efiemu_get_variable_real
pop %rsi
pop %rdi
ret
FUNCTION (efiemu_get_next_variable_name)
push %rdi
push %rsi
mov %rcx, %rdi
mov %rdx, %rsi
mov %r8, %rdx
call efiemu_get_next_variable_name_real
pop %rsi
pop %rdi
ret
FUNCTION (efiemu_set_variable)
push %rdi
push %rsi
mov %rcx, %rdi
mov %rdx, %rsi
mov %r8, %rdx
mov %r9, %rcx
mov 56(%rsp), %r8
call efiemu_set_variable_real
pop %rsi
pop %rdi
ret
FUNCTION (efiemu_get_next_high_monotonic_count)
push %rdi
push %rsi
mov %rcx, %rdi
call efiemu_get_next_high_monotonic_count_real
pop %rsi
pop %rdi
ret
FUNCTION (efiemu_reset_system)
push %rdi
push %rsi
mov %rcx, %rdi
mov %rdx, %rsi
mov %r8, %rdx
mov %r9, %rcx
call efiemu_reset_system_real
pop %rsi
pop %rdi
ret
/* The following functions are always called in physical mode */
.section ".text-physical", "ax"
FUNCTION (efiemu_set_virtual_address_map)
push %rdi
push %rsi
mov %rcx, %rdi
mov %rdx, %rsi
mov %r8, %rdx
mov %r9, %rcx
call efiemu_set_virtual_address_map_real
pop %rsi
pop %rdi
ret
FUNCTION (efiemu_convert_pointer)
push %rdi
push %rsi
mov %rcx, %rdi
mov %rdx, %rsi
call efiemu_convert_pointer_real
pop %rsi
pop %rdi
ret
|
al3xtjames/Clover
| 3,902
|
FileSystems/GrubFS/grub/grub-core/lib/i386/relocator64.S
|
/*
* GRUB -- GRand Unified Bootloader
* Copyright (C) 2009,2010 Free Software Foundation, Inc.
*
* GRUB is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* GRUB is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with GRUB. If not, see <http://www.gnu.org/licenses/>.
*/
#define CODE32_SEGMENT 0x18
#define CODE_SEGMENT 0x08
/* The data segment of the protected mode. */
#define DATA_SEGMENT 0x10
#include "relocator_common.S"
.p2align 4 /* force 16-byte alignment */
VARIABLE(grub_relocator64_start)
PREAMBLE
#ifndef __x86_64__
DISABLE_PAGING
/* Turn on PAE. */
movl %cr4, %eax
orl $(GRUB_MEMORY_CPU_CR4_PAE_ON | GRUB_MEMORY_CPU_CR4_PSE_ON), %eax
movl %eax, %cr4
/* mov imm32, %eax */
.byte 0xb8
VARIABLE(grub_relocator64_cr3)
.long 0
movl %eax, %cr3
/* Turn on amd64. */
movl $GRUB_MEMORY_CPU_AMD64_MSR, %ecx
rdmsr
orl $GRUB_MEMORY_CPU_AMD64_MSR_ON, %eax
wrmsr
/* Enable paging. */
movl %cr0, %eax
orl $GRUB_MEMORY_CPU_CR0_PAGING_ON, %eax
movl %eax, %cr0
RELOAD_GDT
#else
/* mov imm64, %rax */
.byte 0x48
.byte 0xb8
VARIABLE(grub_relocator64_cr3)
.quad 0
movq %rax, %cr3
#endif
.code64
/* mov imm64, %rax */
.byte 0x48
.byte 0xb8
VARIABLE(grub_relocator64_rsp)
.quad 0
movq %rax, %rsp
/* mov imm64, %rax */
.byte 0x48
.byte 0xb8
VARIABLE(grub_relocator64_rsi)
.quad 0
movq %rax, %rsi
/* mov imm64, %rax */
.byte 0x48
.byte 0xb8
VARIABLE(grub_relocator64_rax)
.quad 0
/* mov imm64, %rbx */
.byte 0x48
.byte 0xbb
VARIABLE(grub_relocator64_rbx)
.quad 0
/* mov imm64, %rcx */
.byte 0x48
.byte 0xb9
VARIABLE(grub_relocator64_rcx)
.quad 0
/* mov imm64, %rdx */
.byte 0x48
.byte 0xba
VARIABLE(grub_relocator64_rdx)
.quad 0
/* Cleared direction flag is of no problem with any current
payload and makes this implementation easier. */
cld
#ifdef __APPLE__
.byte 0xff, 0x25
.quad 0
#else
jmp *LOCAL(jump_addr) (%rip)
#endif
LOCAL(jump_addr):
VARIABLE(grub_relocator64_rip)
.quad 0
#ifndef __x86_64__
.p2align 4
LOCAL(gdt):
/* NULL. */
.byte 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
/* 64-bit segment. */
.word 0xffff /* Limit xffff. */
.word 0x0000 /* Base xxxx0000. */
.byte 0x00 /* Base xx00xxxx. */
.byte (0x8 /* Type 8. */ | (1 << 4) /* Code. */ \
| (0 << 5) /* Ring 0. */ | (1 << 7) /* Present. */)
.byte (0xf /* Limit fxxxx. */ | (0 << 4) /* AVL flag. */ \
| (1 << 5) /* 64-bit. */ | (0 << 6) \
| (1 << 7) /* 4K granular. */)
.byte 0x00 /* Base 00xxxxxx. */
/* Data segment*/
.word 0xffff /* Limit xffff. */
.word 0x0000 /* Base xxxx0000. */
.byte 0x00 /* Base xx00xxxx. */
.byte (0x0 /* Type 0. */ | (0 << 4) /* Data. */ \
| (0 << 5) /* Ring 0. */ | (1 << 7) /* Present. */)
.byte (0xf /* Limit fxxxx. */ | (0 << 4) /* AVL flag. */ \
| (0 << 5) /* Data. */ | (0 << 6) \
| (1 << 7) /* 4K granular. */)
.byte 0x00 /* Base 00xxxxxx. */
/* Compatibility segment. */
.word 0xffff /* Limit xffff. */
.word 0x0000 /* Base xxxx0000. */
.byte 0x00 /* Base xx00xxxx. */
.byte (0x8 /* Type 8. */ | (1 << 4) /* Code. */ \
| (0 << 5) /* Ring 0. */ | (1 << 7) /* Present. */)
.byte (0xf /* Limit fxxxx. */ | (0 << 4) /* AVL flag. */ \
| (0 << 5) /* 32-bit. */ | (1 << 6) /* 32-bit. */ \
| (1 << 7) /* 4K granular. */)
.byte 0x00 /* Base 00xxxxxx. */
LOCAL(gdt_end):
#endif
VARIABLE(grub_relocator64_end)
|
al3xtjames/Clover
| 2,802
|
FileSystems/GrubFS/grub/grub-core/lib/i386/relocator_common.S
|
/*
* GRUB -- GRand Unified Bootloader
* Copyright (C) 2009,2010 Free Software Foundation, Inc.
*
* GRUB is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* GRUB is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with GRUB. If not, see <http://www.gnu.org/licenses/>.
*/
#include <grub/symbol.h>
#include <grub/i386/memory.h>
#ifdef __x86_64__
#define RAX %rax
#define RSI %rsi
#else
#define RAX %eax
#define RSI %esi
#endif
.macro DISABLE_PAGING
#ifdef GRUB_MACHINE_IEEE1275
#endif
movl %cr0, %eax
andl $(~GRUB_MEMORY_CPU_CR0_PAGING_ON), %eax
movl %eax, %cr0
.endm
.macro PREAMBLE
LOCAL(base):
/* %rax contains now our new 'base'. */
mov RAX, RSI
#if defined (__APPLE__) && defined (__x86_64__)
leaq LOCAL(cont0) (%rip), RAX
#elif defined (__APPLE__)
LOCAL(cont0_offset) = LOCAL(cont0) - LOCAL(base)
add $LOCAL(cont0_offset), RAX
#else
add $(LOCAL(cont0) - LOCAL(base)), RAX
#endif
jmp *RAX
LOCAL(cont0):
.endm
.macro RELOAD_GDT
#ifdef __APPLE__
LOCAL(cont1_offset) = LOCAL(cont1) - LOCAL(base)
LOCAL(jump_vector_offset) = LOCAL(jump_vector) - LOCAL(base)
LOCAL(gdt_offset) = LOCAL(gdt) - LOCAL(base)
LOCAL(gdt_addr_offset) = LOCAL(gdt_addr) - LOCAL(base)
LOCAL(gdtdesc_offset) = LOCAL(gdtdesc) - LOCAL(base)
lea LOCAL(cont1_offset) (RSI, 1), RAX
movl %eax, LOCAL(jump_vector_offset) (RSI, 1)
lea LOCAL(gdt_offset) (RSI, 1), RAX
mov RAX, (LOCAL(gdt_addr_offset)) (RSI, 1)
/* Switch to compatibility mode. */
lgdt (LOCAL(gdtdesc_offset)) (RSI, 1)
/* Update %cs. */
ljmp *(LOCAL(jump_vector_offset)) (RSI, 1)
.p2align 4
LOCAL(gdtdesc):
LOCAL(gdtsize) = LOCAL(gdt_end) - LOCAL(gdt)
.word LOCAL(gdtsize)
#else
lea (LOCAL(cont1) - LOCAL(base)) (RSI, 1), RAX
movl %eax, (LOCAL(jump_vector) - LOCAL(base)) (RSI, 1)
lea (LOCAL(gdt) - LOCAL(base)) (RSI, 1), RAX
mov RAX, (LOCAL(gdt_addr) - LOCAL(base)) (RSI, 1)
/* Switch to compatibility mode. */
lgdt (LOCAL(gdtdesc) - LOCAL(base)) (RSI, 1)
/* Update %cs. */
ljmp *(LOCAL(jump_vector) - LOCAL(base)) (RSI, 1)
.p2align 4
LOCAL(gdtdesc):
.word LOCAL(gdt_end) - LOCAL(gdt)
#endif
LOCAL(gdt_addr):
#ifdef __x86_64__
/* Filled by the code. */
.quad 0
#else
/* Filled by the code. */
.long 0
#endif
.p2align 4
LOCAL(jump_vector):
/* Jump location. Is filled by the code */
.long 0
.long CODE_SEGMENT
LOCAL(cont1):
.endm
|
al3xtjames/Clover
| 1,404
|
FileSystems/GrubFS/grub/grub-core/lib/i386/setjmp.S
|
/*
* GRUB -- GRand Unified Bootloader
* Copyright (C) 2003,2007 Free Software Foundation, Inc.
*
* GRUB is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* GRUB is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with GRUB. If not, see <http://www.gnu.org/licenses/>.
*/
#include <grub/symbol.h>
#include <grub/dl.h>
.file "setjmp.S"
GRUB_MOD_LICENSE "GPLv3+"
.text
/*
* int grub_setjmp (grub_jmp_buf env)
*/
FUNCTION(grub_setjmp)
movl %ebx, 0(%eax) /* EBX */
movl %esi, 4(%eax) /* ESI */
movl %edi, 8(%eax) /* EDI */
movl %ebp, 12(%eax) /* EBP */
popl %ecx
movl %esp, 16(%eax) /* ESP */
movl %ecx, 20(%eax) /* EIP */
xorl %eax, %eax
jmp *%ecx
/*
* int grub_longjmp (grub_jmp_buf env, int val)
*/
FUNCTION(grub_longjmp)
movl 0(%eax), %ebx
movl 4(%eax), %esi
movl 8(%eax), %edi
movl 12(%eax), %ebp
movl 16(%eax), %esp
movl 20(%eax), %ecx
movl %edx, %eax
testl %eax, %eax
jnz 1f
incl %eax
1: jmp *%ecx
|
al3xtjames/Clover
| 1,752
|
FileSystems/GrubFS/grub/grub-core/lib/i386/relocator_asm.S
|
/*
* GRUB -- GRand Unified Bootloader
* Copyright (C) 2009 Free Software Foundation, Inc.
*
* GRUB is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* GRUB is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with GRUB. If not, see <http://www.gnu.org/licenses/>.
*/
#include <grub/symbol.h>
#include <grub/i386/memory.h>
.p2align 2
VARIABLE(grub_relocator_backward_start)
/* mov imm32, %eax */
.byte 0xb8
VARIABLE(grub_relocator_backward_dest)
.long 0
movl %eax, %edi
/* mov imm32, %eax */
.byte 0xb8
VARIABLE(grub_relocator_backward_src)
.long 0
movl %eax, %esi
/* mov imm32, %ecx */
.byte 0xb9
VARIABLE(grub_relocator_backward_chunk_size)
.long 0
add %ecx, %esi
add %ecx, %edi
/* Backward movsb is implicitly off-by-one. compensate that. */
sub $1, %esi
sub $1, %edi
/* Backward copy. */
std
rep
movsb
VARIABLE(grub_relocator_backward_end)
VARIABLE(grub_relocator_forward_start)
/* mov imm32, %eax */
.byte 0xb8
VARIABLE(grub_relocator_forward_dest)
.long 0
movl %eax, %edi
/* mov imm32, %rax */
.byte 0xb8
VARIABLE(grub_relocator_forward_src)
.long 0
movl %eax, %esi
/* mov imm32, %ecx */
.byte 0xb9
VARIABLE(grub_relocator_forward_chunk_size)
.long 0
/* Forward copy. */
cld
rep
movsb
VARIABLE(grub_relocator_forward_end)
|
al3xtjames/Clover
| 2,757
|
FileSystems/GrubFS/grub/grub-core/lib/i386/relocator32.S
|
/*
* GRUB -- GRand Unified Bootloader
* Copyright (C) 2009,2010 Free Software Foundation, Inc.
*
* GRUB is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* GRUB is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with GRUB. If not, see <http://www.gnu.org/licenses/>.
*/
/* The code segment of the protected mode. */
#define CODE_SEGMENT 0x10
/* The data segment of the protected mode. */
#define DATA_SEGMENT 0x18
#include "relocator_common.S"
.p2align 4 /* force 16-byte alignment */
VARIABLE(grub_relocator32_start)
PREAMBLE
RELOAD_GDT
.code32
/* Update other registers. */
movl $DATA_SEGMENT, %eax
movl %eax, %ds
movl %eax, %es
movl %eax, %fs
movl %eax, %gs
movl %eax, %ss
DISABLE_PAGING
#ifdef __x86_64__
/* Disable amd64. */
movl $GRUB_MEMORY_CPU_AMD64_MSR, %ecx
rdmsr
andl $(~GRUB_MEMORY_CPU_AMD64_MSR_ON), %eax
wrmsr
#endif
/* Turn off PAE. */
movl %cr4, %eax
andl $(~GRUB_MEMORY_CPU_CR4_PAE_ON), %eax
movl %eax, %cr4
jmp LOCAL(cont2)
LOCAL(cont2):
.code32
/* mov imm32, %eax */
.byte 0xb8
VARIABLE(grub_relocator32_esp)
.long 0
movl %eax, %esp
/* mov imm32, %eax */
.byte 0xb8
VARIABLE(grub_relocator32_ebp)
.long 0
movl %eax, %ebp
/* mov imm32, %eax */
.byte 0xb8
VARIABLE(grub_relocator32_esi)
.long 0
movl %eax, %esi
/* mov imm32, %eax */
.byte 0xb8
VARIABLE(grub_relocator32_edi)
.long 0
movl %eax, %edi
/* mov imm32, %eax */
.byte 0xb8
VARIABLE(grub_relocator32_eax)
.long 0
/* mov imm32, %ebx */
.byte 0xbb
VARIABLE(grub_relocator32_ebx)
.long 0
/* mov imm32, %ecx */
.byte 0xb9
VARIABLE(grub_relocator32_ecx)
.long 0
/* mov imm32, %edx */
.byte 0xba
VARIABLE(grub_relocator32_edx)
.long 0
/* Cleared direction flag is of no problem with any current
payload and makes this implementation easier. */
cld
.byte 0xea
VARIABLE(grub_relocator32_eip)
.long 0
.word CODE_SEGMENT
/* GDT. Copied from loader/i386/linux.c. */
.p2align 4
LOCAL(gdt):
/* NULL. */
.byte 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
/* Reserved. */
.byte 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
/* Code segment. */
.byte 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x9A, 0xCF, 0x00
/* Data segment. */
.byte 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x92, 0xCF, 0x00
LOCAL(gdt_end):
VARIABLE(grub_relocator32_end)
|
al3xtjames/Clover
| 7,161
|
FileSystems/GrubFS/grub/grub-core/lib/i386/relocator16.S
|
/*
* GRUB -- GRand Unified Bootloader
* Copyright (C) 2009,2010 Free Software Foundation, Inc.
*
* GRUB is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* GRUB is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with GRUB. If not, see <http://www.gnu.org/licenses/>.
*/
/* The code segment of the protected mode. */
#define CODE_SEGMENT 0x08
/* The data segment of the protected mode. */
#define DATA_SEGMENT 0x10
#define PSEUDO_REAL_CSEG 0x18
#define PSEUDO_REAL_DSEG 0x20
#include <grub/i386/relocator_private.h>
#include "relocator_common.S"
.p2align 4 /* force 16-byte alignment */
VARIABLE(grub_relocator16_start)
PREAMBLE
#ifdef __APPLE__
LOCAL(cs_base_bytes12_offset) = LOCAL (cs_base_bytes12) - LOCAL (base)
LOCAL(cs_base_byte3_offset) = LOCAL (cs_base_byte3) - LOCAL (base)
movl %esi, %eax
movw %ax, (LOCAL(cs_base_bytes12_offset)) (RSI, 1)
shrl $16, %eax
movb %al, (LOCAL (cs_base_byte3_offset)) (RSI, 1)
#else
movl %esi, %eax
movw %ax, (LOCAL (cs_base_bytes12) - LOCAL (base)) (RSI, 1)
shrl $16, %eax
movb %al, (LOCAL (cs_base_byte3) - LOCAL (base)) (RSI, 1)
#endif
RELOAD_GDT
.code32
/* Update other registers. */
movl $DATA_SEGMENT, %eax
movl %eax, %ds
movl %eax, %es
movl %eax, %fs
movl %eax, %gs
movl %eax, %ss
DISABLE_PAGING
#ifdef __x86_64__
/* Disable amd64. */
movl $GRUB_MEMORY_CPU_AMD64_MSR, %ecx
rdmsr
andl $(~GRUB_MEMORY_CPU_AMD64_MSR_ON), %eax
wrmsr
#endif
/* Turn off PAE. */
movl %cr4, %eax
andl $(~GRUB_MEMORY_CPU_CR4_PAE_ON), %eax
movl %eax, %cr4
/* Update other registers. */
movl $PSEUDO_REAL_DSEG, %eax
movl %eax, %ds
movl %eax, %es
movl %eax, %fs
movl %eax, %gs
movl %eax, %ss
movl %esi, %eax
shrl $4, %eax
#ifdef __APPLE__
LOCAL(segment_offset) = LOCAL (segment) - LOCAL (base)
LOCAL(idt_offset) = LOCAL(relocator16_idt) - LOCAL (base)
LOCAL(cont2_offset) = LOCAL (cont2) - LOCAL(base)
movw %ax, LOCAL(segment_offset) (%esi, 1)
lidt LOCAL(idt_offset) (%esi, 1)
/* jump to a 16 bit segment */
ljmp $PSEUDO_REAL_CSEG, $(LOCAL(cont2_offset))
#else
movw %ax, (LOCAL (segment) - LOCAL (base)) (%esi, 1)
lidt (EXT_C(grub_relocator16_idt) - LOCAL (base)) (%esi, 1)
/* jump to a 16 bit segment */
ljmp $PSEUDO_REAL_CSEG, $(LOCAL (cont2) - LOCAL(base))
#endif
LOCAL(cont2):
.code16
/* clear the PE bit of CR0 */
movl %cr0, %eax
andl $(~GRUB_MEMORY_CPU_CR0_PE_ON), %eax
movl %eax, %cr0
/* flush prefetch queue, reload %cs */
/* ljmp */
.byte 0xea
#ifdef __APPLE__
LOCAL(cont3_offset) = LOCAL(cont3) - LOCAL(base)
.word LOCAL(cont3_offset)
#else
.word LOCAL(cont3)-LOCAL(base)
#endif
LOCAL(segment):
.word 0
LOCAL(cont3):
/* movw imm16, %ax. */
.byte 0xb8
VARIABLE(grub_relocator16_keep_a20_enabled)
.word 0
test %ax, %ax
jnz LOCAL(gate_a20_done)
movw %cs, %ax
movw %ax, %ss
#ifdef __APPLE__
LOCAL(relocator16_end_offset) = LOCAL(relocator16_end) - LOCAL(base)
leaw LOCAL(relocator16_end_offset), %sp
#else
leaw LOCAL(relocator16_end) - LOCAL(base), %sp
#endif
addw $GRUB_RELOCATOR16_STACK_SIZE, %sp
/* second, try a BIOS call */
movw $0x2400, %ax
int $0x15
call LOCAL(gate_a20_check_state)
testb %al, %al
jz LOCAL(gate_a20_done)
/*
* In macbook, the keyboard test would hang the machine, so we move
* this forward.
*/
/* fourth, try the system control port A */
inb $0x92
andb $(~0x03), %al
outb $0x92
/* When turning off Gate A20, do not check the state strictly,
because a failure is not fatal usually, and Gate A20 is always
on some modern machines. */
jmp LOCAL(gate_a20_done)
LOCAL(gate_a20_check_state):
/* iterate the checking for a while */
movw $100, %cx
1:
xorw %ax, %ax
movw %ax, %ds
decw %ax
movw %ax, %es
xorw %ax, %ax
movw $0x8000, %ax
/* compare the byte at ADDR with that at 0x100000 + ADDR */
movw %ax, %si
addw $0x10, %ax
movw %ax, %di
/* save the original byte in DL */
movb %ds:(%si), %dl
movb %es:(%di), %al
/* try to set one less value at ADDR */
movb %al, %dh
decb %dh
movb %dh, %ds:(%si)
/* serialize */
outb %al, $0x80
outb %al, $0x80
/* obtain the value at 0x100000 + ADDR in CH */
movb %es:(%di), %dh
/* this result is 1 if A20 is on or 0 if it is off */
subb %dh, %al
xorb $1, %al
/* restore the original */
movb %dl, %ds:(%si)
testb %al, %al
jz LOCAL(gate_a20_done)
loop 1b
2:
ret
LOCAL(gate_a20_done):
/* we are in real mode now
* set up the real mode segment registers : DS, SS, ES
*/
/* movw imm16, %ax. */
.byte 0xb8
VARIABLE(grub_relocator16_ds)
.word 0
movw %ax, %ds
/* movw imm16, %ax. */
.byte 0xb8
VARIABLE(grub_relocator16_es)
.word 0
movw %ax, %es
/* movw imm16, %ax. */
.byte 0xb8
VARIABLE(grub_relocator16_fs)
.word 0
movw %ax, %fs
/* movw imm16, %ax. */
.byte 0xb8
VARIABLE(grub_relocator16_gs)
.word 0
movw %ax, %gs
/* movw imm16, %ax. */
.byte 0xb8
VARIABLE(grub_relocator16_ss)
.word 0
movw %ax, %ss
/* movw imm16, %ax. */
.byte 0xb8
VARIABLE(grub_relocator16_sp)
.word 0
movzwl %ax, %esp
/* movw imm32, %eax. */
.byte 0x66, 0xb8
VARIABLE(grub_relocator16_esi)
.long 0
movl %eax, %esi
/* movw imm32, %edx. */
.byte 0x66, 0xba
VARIABLE(grub_relocator16_edx)
.long 0
/* movw imm32, %ebx. */
.byte 0x66, 0xbb
VARIABLE(grub_relocator16_ebx)
.long 0
/* movl imm32, %ebp. */
.byte 0x66, 0xbd
VARIABLE(grub_relocator16_ebp)
.long 0
/* Cleared direction flag is of no problem with any current
payload and makes this implementation easier. */
cld
/* ljmp */
.byte 0xea
VARIABLE(grub_relocator16_ip)
.word 0
VARIABLE(grub_relocator16_cs)
.word 0
.code32
/* GDT. Copied from loader/i386/linux.c. */
.p2align 4
LOCAL(gdt):
.word 0, 0
.byte 0, 0, 0, 0
/* -- code segment --
* base = 0x00000000, limit = 0xFFFFF (4 KiB Granularity), present
* type = 32bit code execute/read, DPL = 0
*/
.word 0xFFFF, 0
.byte 0, 0x9A, 0xCF, 0
/* -- data segment --
* base = 0x00000000, limit 0xFFFFF (4 KiB Granularity), present
* type = 32 bit data read/write, DPL = 0
*/
.word 0xFFFF, 0
.byte 0, 0x92, 0xCF, 0
/* -- 16 bit real mode CS --
* base = 0x00000000, limit 0x0FFFF (1 B Granularity), present
* type = 16 bit code execute/read only/conforming, DPL = 0
*/
.word 0xFFFF
LOCAL(cs_base_bytes12):
.word 0
LOCAL(cs_base_byte3):
.byte 0
.byte 0x9E, 0, 0
/* -- 16 bit real mode DS --
* base = 0x00000000, limit 0x0FFFF (1 B Granularity), present
* type = 16 bit data read/write, DPL = 0
*/
.word 0xFFFF, 0
.byte 0, 0x92, 0, 0
LOCAL(gdt_end):
#ifdef __APPLE__
LOCAL(relocator16_idt):
#endif
VARIABLE(grub_relocator16_idt)
.word 0
.long 0
LOCAL(relocator16_end):
VARIABLE(grub_relocator16_end)
.byte 0
|
al3xtjames/Clover
| 3,856
|
FileSystems/GrubFS/grub/grub-core/lib/ia64/setjmp.S
|
/* Copyright (C) 1999, 2000, 2001, 2002, 2008 Free Software Foundation, Inc.
Contributed by David Mosberger-Tang <davidm@hpl.hp.com>.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, write to the Free
Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
02111-1307 USA.
The layout of the jmp_buf is as follows. This is subject to change
and user-code should never depend on the particular layout of
jmp_buf!
offset: description:
------- ------------
0x000 stack pointer (r12) ; unchangeable (see _JMPBUF_UNWINDS)
0x008 r1 (gp)
0x010 caller's unat
0x018 fpsr
0x020 r4
0x028 r5
0x030 r6
0x038 r7
0x040 rp (b0)
0x048 b1
0x050 b2
0x058 b3
0x060 b4
0x068 b5
0x070 ar.pfs
0x078 ar.lc
0x080 pr
0x088 ar.bsp ; unchangeable (see __longjmp.S)
0x090 ar.unat
0x098 &__jmp_buf ; address of the jmpbuf (needed to locate NaT bits in unat)
0x0a0 f2
0x0b0 f3
0x0c0 f4
0x0d0 f5
0x0e0 f16
0x0f0 f17
0x100 f18
0x110 f19
0x120 f20
0x130 f21
0x130 f22
0x140 f23
0x150 f24
0x160 f25
0x170 f26
0x180 f27
0x190 f28
0x1a0 f29
0x1b0 f30
0x1c0 f31 */
#include <grub/symbol.h>
#include <grub/dl.h>
.file "setjmp.S"
GRUB_MOD_LICENSE "GPLv2+"
/* The following two entry points are the traditional entry points: */
.text
.proc EXT_C(grub_setjmp)
FUNCTION(grub_setjmp)
alloc r8=ar.pfs,2,0,0,0
mov in1=1
br.cond.sptk.many __sigsetjmp
.endp EXT_C(grub_setjmp)
/* __sigsetjmp(__jmp_buf buf, int savemask) */
.proc __sigsetjmp
__sigsetjmp:
//.prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(2)
alloc loc1=ar.pfs,2,2,2,0
mov r16=ar.unat
;;
mov r17=ar.fpsr
mov r2=in0
add r3=8,in0
;;
st8.spill.nta [r2]=sp,16 // r12 (sp)
st8.spill.nta [r3]=gp,16 // r1 (gp)
;;
st8.nta [r2]=r16,16 // save caller's unat
st8.nta [r3]=r17,16 // save fpsr
add r8=0xa0,in0
;;
st8.spill.nta [r2]=r4,16 // r4
st8.spill.nta [r3]=r5,16 // r5
add r9=0xb0,in0
;;
stf.spill.nta [r8]=f2,32
stf.spill.nta [r9]=f3,32
mov loc0=rp
.body
;;
stf.spill.nta [r8]=f4,32
stf.spill.nta [r9]=f5,32
mov r17=b1
;;
stf.spill.nta [r8]=f16,32
stf.spill.nta [r9]=f17,32
mov r18=b2
;;
stf.spill.nta [r8]=f18,32
stf.spill.nta [r9]=f19,32
mov r19=b3
;;
stf.spill.nta [r8]=f20,32
stf.spill.nta [r9]=f21,32
mov r20=b4
;;
stf.spill.nta [r8]=f22,32
stf.spill.nta [r9]=f23,32
mov r21=b5
;;
stf.spill.nta [r8]=f24,32
stf.spill.nta [r9]=f25,32
mov r22=ar.lc
;;
stf.spill.nta [r8]=f26,32
stf.spill.nta [r9]=f27,32
mov r24=pr
;;
stf.spill.nta [r8]=f28,32
stf.spill.nta [r9]=f29,32
;;
stf.spill.nta [r8]=f30
stf.spill.nta [r9]=f31
st8.spill.nta [r2]=r6,16 // r6
st8.spill.nta [r3]=r7,16 // r7
;;
mov r23=ar.bsp
mov r25=ar.unat
mov out0=in0
st8.nta [r2]=loc0,16 // b0
st8.nta [r3]=r17,16 // b1
mov out1=in1
;;
st8.nta [r2]=r18,16 // b2
st8.nta [r3]=r19,16 // b3
;;
st8.nta [r2]=r20,16 // b4
st8.nta [r3]=r21,16 // b5
;;
st8.nta [r2]=loc1,16 // ar.pfs
st8.nta [r3]=r22,16 // ar.lc
;;
st8.nta [r2]=r24,16 // pr
st8.nta [r3]=r23,16 // ar.bsp
;;
st8.nta [r2]=r25 // ar.unat
st8.nta [r3]=in0 // &__jmp_buf
mov r8=0
mov rp=loc0
mov ar.pfs=loc1
br.ret.sptk.many rp
.endp __sigsetjmp
|
al3xtjames/Clover
| 4,420
|
FileSystems/GrubFS/grub/grub-core/lib/ia64/longjmp.S
|
/* Copyright (C) 1999, 2000, 2001, 2002, 2008 Free Software Foundation, Inc.
Contributed by David Mosberger-Tang <davidm@hpl.hp.com>.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, write to the Free
Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
02111-1307 USA.
Note that __sigsetjmp() did NOT flush the register stack. Instead,
we do it here since __longjmp() is usually much less frequently
invoked than __sigsetjmp(). The only difficulty is that __sigsetjmp()
didn't (and wouldn't be able to) save ar.rnat either. This is a problem
because if we're not careful, we could end up loading random NaT bits.
There are two cases:
(i) ar.bsp < ia64_rse_rnat_addr(jmpbuf.ar_bsp)
ar.rnat contains the desired bits---preserve ar.rnat
across loadrs and write to ar.bspstore
(ii) ar.bsp >= ia64_rse_rnat_addr(jmpbuf.ar_bsp)
The desired ar.rnat is stored in
ia64_rse_rnat_addr(jmpbuf.ar_bsp). Load those
bits into ar.rnat after setting ar.bspstore. */
# define pPos p6 /* is rotate count positive? */
# define pNeg p7 /* is rotate count negative? */
/* __longjmp(__jmp_buf buf, int val) */
.text
.proc EXT_C(grub_longjmp)
FUNCTION(grub_longjmp)
alloc r8=ar.pfs,2,1,0,0
mov r27=ar.rsc
add r2=0x98,in0 // r2 <- &jmpbuf.orig_jmp_buf_addr
;;
ld8 r8=[r2],-16 // r8 <- orig_jmp_buf_addr
mov r10=ar.bsp
and r11=~0x3,r27 // clear ar.rsc.mode
;;
flushrs // flush dirty regs to backing store (must be first in insn grp)
ld8 r23=[r2],8 // r23 <- jmpbuf.ar_bsp
sub r8=r8,in0 // r8 <- &orig_jmpbuf - &jmpbuf
;;
ld8 r25=[r2] // r25 <- jmpbuf.ar_unat
extr.u r8=r8,3,6 // r8 <- (&orig_jmpbuf - &jmpbuf)/8 & 0x3f
;;
cmp.lt pNeg,pPos=r8,r0
mov r2=in0
;;
(pPos) mov r16=r8
(pNeg) add r16=64,r8
(pPos) sub r17=64,r8
(pNeg) sub r17=r0,r8
;;
mov ar.rsc=r11 // put RSE in enforced lazy mode
shr.u r8=r25,r16
add r3=8,in0 // r3 <- &jmpbuf.r1
shl r9=r25,r17
;;
or r25=r8,r9
;;
mov r26=ar.rnat
mov ar.unat=r25 // setup ar.unat (NaT bits for r1, r4-r7, and r12)
;;
ld8.fill.nta sp=[r2],16 // r12 (sp)
ld8.fill.nta gp=[r3],16 // r1 (gp)
dep r11=-1,r23,3,6 // r11 <- ia64_rse_rnat_addr(jmpbuf.ar_bsp)
;;
ld8.nta r16=[r2],16 // caller's unat
ld8.nta r17=[r3],16 // fpsr
;;
ld8.fill.nta r4=[r2],16 // r4
ld8.fill.nta r5=[r3],16 // r5 (gp)
cmp.geu p8,p0=r10,r11 // p8 <- (ar.bsp >= jmpbuf.ar_bsp)
;;
ld8.fill.nta r6=[r2],16 // r6
ld8.fill.nta r7=[r3],16 // r7
;;
mov ar.unat=r16 // restore caller's unat
mov ar.fpsr=r17 // restore fpsr
;;
ld8.nta r16=[r2],16 // b0
ld8.nta r17=[r3],16 // b1
;;
(p8) ld8 r26=[r11] // r26 <- *ia64_rse_rnat_addr(jmpbuf.ar_bsp)
mov ar.bspstore=r23 // restore ar.bspstore
;;
ld8.nta r18=[r2],16 // b2
ld8.nta r19=[r3],16 // b3
;;
ld8.nta r20=[r2],16 // b4
ld8.nta r21=[r3],16 // b5
;;
ld8.nta r11=[r2],16 // ar.pfs
ld8.nta r22=[r3],56 // ar.lc
;;
ld8.nta r24=[r2],32 // pr
mov b0=r16
;;
ldf.fill.nta f2=[r2],32
ldf.fill.nta f3=[r3],32
mov b1=r17
;;
ldf.fill.nta f4=[r2],32
ldf.fill.nta f5=[r3],32
mov b2=r18
;;
ldf.fill.nta f16=[r2],32
ldf.fill.nta f17=[r3],32
mov b3=r19
;;
ldf.fill.nta f18=[r2],32
ldf.fill.nta f19=[r3],32
mov b4=r20
;;
ldf.fill.nta f20=[r2],32
ldf.fill.nta f21=[r3],32
mov b5=r21
;;
ldf.fill.nta f22=[r2],32
ldf.fill.nta f23=[r3],32
mov ar.lc=r22
;;
ldf.fill.nta f24=[r2],32
ldf.fill.nta f25=[r3],32
cmp.eq p8,p9=0,in1
;;
ldf.fill.nta f26=[r2],32
ldf.fill.nta f27=[r3],32
mov ar.pfs=r11
;;
ldf.fill.nta f28=[r2],32
ldf.fill.nta f29=[r3],32
;;
ldf.fill.nta f30=[r2]
ldf.fill.nta f31=[r3]
(p8) mov r8=1
mov ar.rnat=r26 // restore ar.rnat
;;
mov ar.rsc=r27 // restore ar.rsc
(p9) mov r8=in1
invala // virt. -> phys. regnum mapping may change
mov pr=r24,-1
br.ret.dptk.few rp
.endp EXT_C(grub_longjmp)
|
al3xtjames/Clover
| 1,303
|
FileSystems/GrubFS/grub/grub-core/lib/sparc64/setjmp.S
|
/*
* GRUB -- GRand Unified Bootloader
* Copyright (C) 2005,2007,2009 Free Software Foundation, Inc.
*
* GRUB is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* GRUB is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with GRUB. If not, see <http://www.gnu.org/licenses/>.
*/
#include <grub/symbol.h>
#include <grub/dl.h>
.file "setjmp.S"
GRUB_MOD_LICENSE "GPLv3+"
.text
/*
* int grub_setjmp (grub_jmp_buf env)
*/
FUNCTION(grub_setjmp)
stx %o7, [%o0 + 0x00]
stx %sp, [%o0 + 0x08]
stx %fp, [%o0 + 0x10]
retl
clr %o0
/*
* int grub_longjmp (grub_jmp_buf env, int val)
*/
FUNCTION(grub_longjmp)
ldx [%o0 + 0x10], %g1
movrz %o1, 1, %o1
save %sp, -64, %sp
flushw
restore
ldx [%o0 + 0x00], %o7
ldx [%o0 + 0x08], %fp
sub %fp, 192, %sp
stx %g1, [%sp + 2047 + (14 * 8)]
retl
restore %o1, 0, %o0
|
al3xtjames/Clover
| 1,189
|
FileSystems/GrubFS/grub/grub-core/lib/arm/setjmp.S
|
/*
* GRUB -- GRand Unified Bootloader
* Copyright (C) 2013 Free Software Foundation, Inc.
*
* GRUB is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* GRUB is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with GRUB. If not, see <http://www.gnu.org/licenses/>.
*/
#include <grub/symbol.h>
#include <grub/dl.h>
.file "setjmp.S"
GRUB_MOD_LICENSE "GPLv3+"
.syntax unified
#if !defined (__thumb2__)
.arm
#else
.thumb
#endif
.text
/*
* int grub_setjmp (grub_jmp_buf env)
*/
FUNCTION(grub_setjmp)
mov r12, sp
stm r0, { r4-r12, lr }
mov r0, #0
bx lr
/*
* int grub_longjmp (grub_jmp_buf env, int val)
*/
FUNCTION(grub_longjmp)
ldm r0, { r4-r12, lr }
mov sp, r12
movs r0, r1
it eq
moveq r0, #1
bx lr
|
al3xtjames/Clover
| 1,342
|
FileSystems/GrubFS/grub/grub-core/lib/arm64/setjmp.S
|
/*
* GRUB -- GRand Unified Bootloader
* Copyright (C) 2013 Free Software Foundation, Inc.
*
* GRUB is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* GRUB is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with GRUB. If not, see <http://www.gnu.org/licenses/>.
*/
#include <grub/symbol.h>
.file "setjmp.S"
.text
/*
* int grub_setjmp (grub_jmp_buf env)
*/
FUNCTION(grub_setjmp)
stp x19, x20, [x0], #16
stp x21, x22, [x0], #16
stp x23, x24, [x0], #16
stp x25, x26, [x0], #16
stp x27, x28, [x0], #16
stp x29, x30, [x0], #16
mov x1, sp
str x1, [x0]
mov x0, #0
ret
/*
* int grub_longjmp (grub_jmp_buf env, int val)
*/
FUNCTION(grub_longjmp)
ldp x19, x20, [x0], #16
ldp x21, x22, [x0], #16
ldp x23, x24, [x0], #16
ldp x25, x26, [x0], #16
ldp x27, x28, [x0], #16
ldp x29, x30, [x0], #16
ldr x2, [x0]
mov sp, x2
cmp x1, #0
csel x0, x1, x0, ne
ret
|
al3xtjames/Clover
| 1,642
|
FileSystems/GrubFS/grub/grub-core/lib/x86_64/setjmp.S
|
/*
* GRUB -- GRand Unified Bootloader
* Copyright (C) 2003,2007 Free Software Foundation, Inc.
*
* GRUB is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* GRUB is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with GRUB. If not, see <http://www.gnu.org/licenses/>.
*/
#include <grub/symbol.h>
#include <grub/dl.h>
.file "setjmp.S"
GRUB_MOD_LICENSE "GPLv3+"
.text
/*
* jmp_buf:
* rbx rsp rbp r12 r13 r14 r15 rip
* 0 8 16 24 32 40 48 56
*/
/*
* int grub_setjmp (grub_jmp_buf env)
*/
FUNCTION(grub_setjmp)
pop %rsi /* Return address, and adjust the stack */
xorq %rax, %rax
movq %rbx, 0(%rdi) /* RBX */
movq %rsp, 8(%rdi) /* RSP */
push %rsi
movq %rbp, 16(%rdi) /* RBP */
movq %r12, 24(%rdi) /* R12 */
movq %r13, 32(%rdi) /* R13 */
movq %r14, 40(%rdi) /* R14 */
movq %r15, 48(%rdi) /* R15 */
movq %rsi, 56(%rdi) /* RSI */
ret
/*
* int grub_longjmp (grub_jmp_buf env, int val)
*/
FUNCTION(grub_longjmp)
movl %esi, %eax
orl %eax, %eax
jnz 1f
incl %eax
1:
movq (%rdi), %rbx
movq 8(%rdi), %rsp
movq 16(%rdi), %rbp
movq 24(%rdi), %r12
movq 32(%rdi), %r13
movq 40(%rdi), %r14
movq 48(%rdi), %r15
jmp *56(%rdi)
|
al3xtjames/Clover
| 1,840
|
FileSystems/GrubFS/grub/grub-core/lib/x86_64/relocator_asm.S
|
/*
* GRUB -- GRand Unified Bootloader
* Copyright (C) 2009 Free Software Foundation, Inc.
*
* GRUB is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* GRUB is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with GRUB. If not, see <http://www.gnu.org/licenses/>.
*/
#include <grub/symbol.h>
#include <grub/i386/memory.h>
.p2align 2
VARIABLE(grub_relocator_backward_start)
/* mov imm32, %rax */
.byte 0x48
.byte 0xb8
VARIABLE(grub_relocator_backward_dest)
.long 0, 0
movq %rax, %rdi
/* mov imm64, %rax */
.byte 0x48
.byte 0xb8
VARIABLE(grub_relocator_backward_src)
.long 0, 0
movq %rax, %rsi
/* mov imm64, %rcx */
.byte 0x48
.byte 0xb9
VARIABLE(grub_relocator_backward_chunk_size)
.long 0, 0
add %rcx, %rsi
add %rcx, %rdi
/* Backward movsb is implicitly off-by-one. compensate that. */
sub $1, %rsi
sub $1, %rdi
/* Backward copy. */
std
rep
movsb
VARIABLE(grub_relocator_backward_end)
VARIABLE(grub_relocator_forward_start)
/* mov imm64, %rax */
.byte 0x48
.byte 0xb8
VARIABLE(grub_relocator_forward_dest)
.long 0, 0
movq %rax, %rdi
/* mov imm64, %rax */
.byte 0x48
.byte 0xb8
VARIABLE(grub_relocator_forward_src)
.long 0, 0
movq %rax, %rsi
/* mov imm64, %rcx */
.byte 0x48
.byte 0xb9
VARIABLE(grub_relocator_forward_chunk_size)
.long 0, 0
/* Forward copy. */
cld
rep
movsb
VARIABLE(grub_relocator_forward_end)
|
al3xtjames/Clover
| 1,521
|
FileSystems/GrubFS/grub/grub-core/lib/mips/setjmp.S
|
/*
* GRUB -- GRand Unified Bootloader
* Copyright (C) 2003,2007,2009 Free Software Foundation, Inc.
*
* GRUB is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* GRUB is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with GRUB. If not, see <http://www.gnu.org/licenses/>.
*/
#include <grub/symbol.h>
#include <grub/dl.h>
.file "setjmp.S"
GRUB_MOD_LICENSE "GPLv3+"
.text
/*
* int grub_setjmp (grub_jmp_buf env)
*/
FUNCTION(grub_setjmp)
sw $s0, 0($a0)
sw $s1, 4($a0)
sw $s2, 8($a0)
sw $s3, 12($a0)
sw $s4, 16($a0)
sw $s5, 20($a0)
sw $s6, 24($a0)
sw $s7, 28($a0)
sw $s8, 32($a0)
sw $gp, 36($a0)
sw $sp, 40($a0)
sw $ra, 44($a0)
move $v0, $zero
move $v1, $zero
jr $ra
/*
* int grub_longjmp (grub_jmp_buf env, int val)
*/
FUNCTION(grub_longjmp)
lw $s0, 0($a0)
lw $s1, 4($a0)
lw $s2, 8($a0)
lw $s3, 12($a0)
lw $s4, 16($a0)
lw $s5, 20($a0)
lw $s6, 24($a0)
lw $s7, 28($a0)
lw $s8, 32($a0)
lw $gp, 36($a0)
lw $sp, 40($a0)
lw $ra, 44($a0)
move $v0, $a1
bne $v0, $zero, 1f
addiu $v0, $v0, 1
1:
move $v1, $zero
jr $ra
|
al3xtjames/Clover
| 1,522
|
FileSystems/GrubFS/grub/grub-core/lib/mips/relocator_asm.S
|
/*
* GRUB -- GRand Unified Bootloader
* Copyright (C) 2009 Free Software Foundation, Inc.
*
* GRUB is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* GRUB is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with GRUB. If not, see <http://www.gnu.org/licenses/>.
*/
#include <grub/symbol.h>
.p2align 4 /* force 16-byte alignment */
.set noreorder
.set nomacro
VARIABLE (grub_relocator_forward_start)
move $a0, $9
move $a1, $10
copycont1:
lb $11,0($8)
sb $11,0($9)
addiu $8, $8, 1
addiu $10, $10, -1
bne $10, $0, copycont1
addiu $9, $9, 1
#include "../../kern/mips/cache_flush.S"
VARIABLE (grub_relocator_forward_end)
VARIABLE (grub_relocator_backward_start)
move $a0, $9
move $a1, $10
addu $9, $9, $10
addu $8, $8, $10
/* Backward movsl is implicitly off-by-one. compensate that. */
addiu $9, $9, -1
addiu $8, $8, -1
copycont2:
lb $11,0($8)
sb $11,0($9)
addiu $8, $8, -1
addiu $10, $10, -1
bne $10, $0, copycont2
addiu $9, $9, -1
#include "../../kern/mips/cache_flush.S"
VARIABLE (grub_relocator_backward_end)
|
al3xtjames/Clover
| 1,688
|
FileSystems/GrubFS/grub/grub-core/lib/powerpc/setjmp.S
|
/*
* GRUB -- GRand Unified Bootloader
* Copyright (C) 2004,2007 Free Software Foundation, Inc.
*
* GRUB is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* GRUB is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with GRUB. If not, see <http://www.gnu.org/licenses/>.
*/
#include <grub/symbol.h>
#include <grub/dl.h>
.file "setjmp.S"
GRUB_MOD_LICENSE "GPLv3+"
.text
/*
* int grub_setjmp (grub_jmp_buf env)
*/
FUNCTION(grub_setjmp)
stw 1, 0(3)
stw 14, 4(3)
stw 15, 8(3)
stw 16, 12(3)
stw 17, 16(3)
stw 18, 20(3)
stw 19, 24(3)
stw 20, 28(3)
stw 21, 32(3)
stw 22, 36(3)
stw 23, 40(3)
stw 24, 44(3)
stw 25, 48(3)
stw 26, 52(3)
stw 27, 56(3)
stw 28, 60(3)
stw 29, 64(3)
stw 30, 68(3)
stw 31, 72(3)
mflr 4
stw 4, 76(3)
mfcr 4
stw 4, 80(3)
li 3, 0
blr
/*
* int grub_longjmp (grub_jmp_buf env, int val)
*/
FUNCTION(grub_longjmp)
lwz 1, 0(3)
lwz 14, 4(3)
lwz 15, 8(3)
lwz 16, 12(3)
lwz 17, 16(3)
lwz 18, 20(3)
lwz 19, 24(3)
lwz 20, 28(3)
lwz 21, 32(3)
lwz 22, 36(3)
lwz 23, 40(3)
lwz 24, 44(3)
lwz 25, 48(3)
lwz 26, 52(3)
lwz 27, 56(3)
lwz 28, 60(3)
lwz 29, 64(3)
lwz 30, 68(3)
lwz 31, 72(3)
lwz 5, 76(3)
mtlr 5
lwz 5, 80(3)
mtcr 5
mr. 3, 4
bne 1f
li 3, 1
1: blr
|
al3xtjames/Clover
| 1,456
|
FileSystems/GrubFS/grub/grub-core/lib/powerpc/relocator_asm.S
|
/*
* GRUB -- GRand Unified Bootloader
* Copyright (C) 2009,2010 Free Software Foundation, Inc.
*
* GRUB is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* GRUB is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with GRUB. If not, see <http://www.gnu.org/licenses/>.
*/
#include <grub/symbol.h>
.p2align 4 /* force 16-byte alignment */
VARIABLE (grub_relocator_forward_start)
mr 3, 9
mr 4, 10
copycont1:
lbz 11,0(8)
stb 11,0(9)
addi 8, 8, 0x1
addi 9, 9, 0x1
addi 10, 10, -1
cmpwi 10, 0
bne copycont1
#include "../../kern/powerpc/cache_flush.S"
VARIABLE (grub_relocator_forward_end)
VARIABLE (grub_relocator_backward_start)
mr 3, 9
mr 4, 10
add 9, 9, 10
add 8, 8, 10
/* Backward movsl is implicitly off-by-one. compensate that. */
addi 9, 9, -1
addi 8, 8, -1
copycont2:
lbz 11,0(8)
stb 11,0(9)
addi 8, 8, -1
addi 9, 9, -1
addi 10, 10, -1
cmpwi 10, 0
bne copycont2
#include "../../kern/powerpc/cache_flush.S"
VARIABLE (grub_relocator_backward_end)
|
al3xtjames/Clover
| 2,545
|
FileSystems/GrubFS/grub/grub-core/lib/libgcrypt/mpi/i586/mpih-mul1.S
|
/* i80586 mul_1 -- Multiply a limb vector with a limb and store
* the result in a second limb vector.
*
* Copyright (C) 1992, 1994, 1996, 1998,
* 2001, 2002 Free Software Foundation, Inc.
*
* This file is part of Libgcrypt.
*
* Libgcrypt is free software; you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as
* published by the Free Software Foundation; either version 2.1 of
* the License, or (at your option) any later version.
*
* Libgcrypt is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA
*
* Note: This code is heavily based on the GNU MP Library.
* Actually it's the same code with only minor changes in the
* way the data is stored; this is to support the abstraction
* of an optional secure memory allocation which may be used
* to avoid revealing of sensitive data due to paging etc.
*/
#include "sysdep.h"
#include "asm-syntax.h"
/*******************
* mpi_limb_t
* _gcry_mpih_mul_1( mpi_ptr_t res_ptr, (sp + 4)
* mpi_ptr_t s1_ptr, (sp + 8)
* mpi_size_t s1_size, (sp + 12)
* mpi_limb_t s2_limb) (sp + 16)
*/
#define res_ptr edi
#define s1_ptr esi
#define size ecx
#define s2_limb ebp
TEXT
ALIGN (3)
GLOBL C_SYMBOL_NAME(_gcry_mpih_mul_1)
C_SYMBOL_NAME(_gcry_mpih_mul_1:)
INSN1(push,l ,R(edi))
INSN1(push,l ,R(esi))
INSN1(push,l ,R(ebx))
INSN1(push,l ,R(ebp))
INSN2(mov,l ,R(res_ptr),MEM_DISP(esp,20))
INSN2(mov,l ,R(s1_ptr),MEM_DISP(esp,24))
INSN2(mov,l ,R(size),MEM_DISP(esp,28))
INSN2(mov,l ,R(s2_limb),MEM_DISP(esp,32))
INSN2(lea,l ,R(res_ptr),MEM_INDEX(res_ptr,size,4))
INSN2(lea,l ,R(s1_ptr),MEM_INDEX(s1_ptr,size,4))
INSN1(neg,l ,R(size))
INSN2(xor,l ,R(ebx),R(ebx))
ALIGN (3)
Loop: INSN2(adc,l ,R(ebx),$0)
INSN2(mov,l ,R(eax),MEM_INDEX(s1_ptr,size,4))
INSN1(mul,l ,R(s2_limb))
INSN2(add,l ,R(ebx),R(eax))
INSN2(mov,l ,MEM_INDEX(res_ptr,size,4),R(ebx))
INSN1(inc,l ,R(size))
INSN2(mov,l ,R(ebx),R(edx))
INSN1(jnz, ,Loop)
INSN2(adc,l ,R(ebx),$0)
INSN2(mov,l ,R(eax),R(ebx))
INSN1(pop,l ,R(ebp))
INSN1(pop,l ,R(ebx))
INSN1(pop,l ,R(esi))
INSN1(pop,l ,R(edi))
ret
|
al3xtjames/Clover
| 2,684
|
FileSystems/GrubFS/grub/grub-core/lib/libgcrypt/mpi/i586/mpih-mul2.S
|
/* i80586 addmul_1 -- Multiply a limb vector with a limb and add
* the result to a second limb vector.
*
* Copyright (C) 1992, 1994, 1998,
* 2001, 2002 Free Software Foundation, Inc.
*
* This file is part of Libgcrypt.
*
* Libgcrypt is free software; you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as
* published by the Free Software Foundation; either version 2.1 of
* the License, or (at your option) any later version.
*
* Libgcrypt is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA
*
* Note: This code is heavily based on the GNU MP Library.
* Actually it's the same code with only minor changes in the
* way the data is stored; this is to support the abstraction
* of an optional secure memory allocation which may be used
* to avoid revealing of sensitive data due to paging etc.
*/
#include "sysdep.h"
#include "asm-syntax.h"
/*******************
* mpi_limb_t
* _gcry_mpih_addmul_1( mpi_ptr_t res_ptr, (sp + 4)
* mpi_ptr_t s1_ptr, (sp + 8)
* mpi_size_t s1_size, (sp + 12)
* mpi_limb_t s2_limb) (sp + 16)
*/
#define res_ptr edi
#define s1_ptr esi
#define size ecx
#define s2_limb ebp
TEXT
ALIGN (3)
GLOBL C_SYMBOL_NAME(_gcry_mpih_addmul_1)
C_SYMBOL_NAME(_gcry_mpih_addmul_1:)
INSN1(push,l ,R(edi))
INSN1(push,l ,R(esi))
INSN1(push,l ,R(ebx))
INSN1(push,l ,R(ebp))
INSN2(mov,l ,R(res_ptr),MEM_DISP(esp,20))
INSN2(mov,l ,R(s1_ptr),MEM_DISP(esp,24))
INSN2(mov,l ,R(size),MEM_DISP(esp,28))
INSN2(mov,l ,R(s2_limb),MEM_DISP(esp,32))
INSN2(lea,l ,R(res_ptr),MEM_INDEX(res_ptr,size,4))
INSN2(lea,l ,R(s1_ptr),MEM_INDEX(s1_ptr,size,4))
INSN1(neg,l ,R(size))
INSN2(xor,l ,R(ebx),R(ebx))
ALIGN (3)
Loop: INSN2(adc,l ,R(ebx),$0)
INSN2(mov,l ,R(eax),MEM_INDEX(s1_ptr,size,4))
INSN1(mul,l ,R(s2_limb))
INSN2(add,l ,R(eax),R(ebx))
INSN2(mov,l ,R(ebx),MEM_INDEX(res_ptr,size,4))
INSN2(adc,l ,R(edx),$0)
INSN2(add,l ,R(ebx),R(eax))
INSN2(mov,l ,MEM_INDEX(res_ptr,size,4),R(ebx))
INSN1(inc,l ,R(size))
INSN2(mov,l ,R(ebx),R(edx))
INSN1(jnz, ,Loop)
INSN2(adc,l ,R(ebx),$0)
INSN2(mov,l ,R(eax),R(ebx))
INSN1(pop,l ,R(ebp))
INSN1(pop,l ,R(ebx))
INSN1(pop,l ,R(esi))
INSN1(pop,l ,R(edi))
ret
|
al3xtjames/Clover
| 4,816
|
FileSystems/GrubFS/grub/grub-core/lib/libgcrypt/mpi/i586/mpih-lshift.S
|
/* i80586 lshift
*
* Copyright (C) 1992, 1994, 1998,
* 2001, 2002 Free Software Foundation, Inc.
*
* This file is part of Libgcrypt.
*
* Libgcrypt is free software; you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as
* published by the Free Software Foundation; either version 2.1 of
* the License, or (at your option) any later version.
*
* Libgcrypt is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA
*
* Note: This code is heavily based on the GNU MP Library.
* Actually it's the same code with only minor changes in the
* way the data is stored; this is to support the abstraction
* of an optional secure memory allocation which may be used
* to avoid revealing of sensitive data due to paging etc.
*/
#include "sysdep.h"
#include "asm-syntax.h"
/*******************
* mpi_limb_t
* _gcry_mpih_lshift( mpi_ptr_t wp, (sp + 4)
* mpi_ptr_t up, (sp + 8)
* mpi_size_t usize, (sp + 12)
* unsigned cnt) (sp + 16)
*/
.text
ALIGN (3)
.globl C_SYMBOL_NAME(_gcry_mpih_lshift)
C_SYMBOL_NAME(_gcry_mpih_lshift:)
pushl %edi
pushl %esi
pushl %ebx
pushl %ebp
movl 20(%esp),%edi /* res_ptr */
movl 24(%esp),%esi /* s_ptr */
movl 28(%esp),%ebp /* size */
movl 32(%esp),%ecx /* cnt */
/* We can use faster code for shift-by-1 under certain conditions. */
cmp $1,%ecx
jne Lnormal
leal 4(%esi),%eax
cmpl %edi,%eax
jnc Lspecial /* jump if s_ptr + 1 >= res_ptr */
leal (%esi,%ebp,4),%eax
cmpl %eax,%edi
jnc Lspecial /* jump if res_ptr >= s_ptr + size */
Lnormal:
leal -4(%edi,%ebp,4),%edi
leal -4(%esi,%ebp,4),%esi
movl (%esi),%edx
subl $4,%esi
xorl %eax,%eax
shldl %cl,%edx,%eax /* compute carry limb */
pushl %eax /* push carry limb onto stack */
decl %ebp
pushl %ebp
shrl $3,%ebp
jz Lend
movl (%edi),%eax /* fetch destination cache line */
ALIGN (2)
Loop: movl -28(%edi),%eax /* fetch destination cache line */
movl %edx,%ebx
movl (%esi),%eax
movl -4(%esi),%edx
shldl %cl,%eax,%ebx
shldl %cl,%edx,%eax
movl %ebx,(%edi)
movl %eax,-4(%edi)
movl -8(%esi),%ebx
movl -12(%esi),%eax
shldl %cl,%ebx,%edx
shldl %cl,%eax,%ebx
movl %edx,-8(%edi)
movl %ebx,-12(%edi)
movl -16(%esi),%edx
movl -20(%esi),%ebx
shldl %cl,%edx,%eax
shldl %cl,%ebx,%edx
movl %eax,-16(%edi)
movl %edx,-20(%edi)
movl -24(%esi),%eax
movl -28(%esi),%edx
shldl %cl,%eax,%ebx
shldl %cl,%edx,%eax
movl %ebx,-24(%edi)
movl %eax,-28(%edi)
subl $32,%esi
subl $32,%edi
decl %ebp
jnz Loop
Lend: popl %ebp
andl $7,%ebp
jz Lend2
Loop2: movl (%esi),%eax
shldl %cl,%eax,%edx
movl %edx,(%edi)
movl %eax,%edx
subl $4,%esi
subl $4,%edi
decl %ebp
jnz Loop2
Lend2: shll %cl,%edx /* compute least significant limb */
movl %edx,(%edi) /* store it */
popl %eax /* pop carry limb */
popl %ebp
popl %ebx
popl %esi
popl %edi
ret
/* We loop from least significant end of the arrays, which is only
permissable if the source and destination don't overlap, since the
function is documented to work for overlapping source and destination.
*/
Lspecial:
movl (%esi),%edx
addl $4,%esi
decl %ebp
pushl %ebp
shrl $3,%ebp
addl %edx,%edx
incl %ebp
decl %ebp
jz LLend
movl (%edi),%eax /* fetch destination cache line */
ALIGN (2)
LLoop: movl 28(%edi),%eax /* fetch destination cache line */
movl %edx,%ebx
movl (%esi),%eax
movl 4(%esi),%edx
adcl %eax,%eax
movl %ebx,(%edi)
adcl %edx,%edx
movl %eax,4(%edi)
movl 8(%esi),%ebx
movl 12(%esi),%eax
adcl %ebx,%ebx
movl %edx,8(%edi)
adcl %eax,%eax
movl %ebx,12(%edi)
movl 16(%esi),%edx
movl 20(%esi),%ebx
adcl %edx,%edx
movl %eax,16(%edi)
adcl %ebx,%ebx
movl %edx,20(%edi)
movl 24(%esi),%eax
movl 28(%esi),%edx
adcl %eax,%eax
movl %ebx,24(%edi)
adcl %edx,%edx
movl %eax,28(%edi)
leal 32(%esi),%esi /* use leal not to clobber carry */
leal 32(%edi),%edi
decl %ebp
jnz LLoop
LLend: popl %ebp
sbbl %eax,%eax /* save carry in %eax */
andl $7,%ebp
jz LLend2
addl %eax,%eax /* restore carry from eax */
LLoop2: movl %edx,%ebx
movl (%esi),%edx
adcl %edx,%edx
movl %ebx,(%edi)
leal 4(%esi),%esi /* use leal not to clobber carry */
leal 4(%edi),%edi
decl %ebp
jnz LLoop2
jmp LL1
LLend2: addl %eax,%eax /* restore carry from eax */
LL1: movl %edx,(%edi) /* store last limb */
sbbl %eax,%eax
negl %eax
popl %ebp
popl %ebx
popl %esi
popl %edi
ret
|
al3xtjames/Clover
| 4,825
|
FileSystems/GrubFS/grub/grub-core/lib/libgcrypt/mpi/i586/mpih-rshift.S
|
/* i80586 rshift
*
* Copyright (C) 1992, 1994, 1998,
* 2001, 2002 Free Software Foundation, Inc.
*
* This file is part of Libgcrypt.
*
* Libgcrypt is free software; you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as
* published by the Free Software Foundation; either version 2.1 of
* the License, or (at your option) any later version.
*
* Libgcrypt is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA
*
* Note: This code is heavily based on the GNU MP Library.
* Actually it's the same code with only minor changes in the
* way the data is stored; this is to support the abstraction
* of an optional secure memory allocation which may be used
* to avoid revealing of sensitive data due to paging etc.
*/
#include "sysdep.h"
#include "asm-syntax.h"
/*******************
* mpi_limb_t
* _gcry_mpih_rshift( mpi_ptr_t wp, (sp + 4)
* mpi_ptr_t up, (sp + 8)
* mpi_size_t usize, (sp + 12)
* unsigned cnt) (sp + 16)
*/
.text
ALIGN (3)
.globl C_SYMBOL_NAME(_gcry_mpih_rshift)
C_SYMBOL_NAME(_gcry_mpih_rshift:)
pushl %edi
pushl %esi
pushl %ebx
pushl %ebp
movl 20(%esp),%edi /* res_ptr */
movl 24(%esp),%esi /* s_ptr */
movl 28(%esp),%ebp /* size */
movl 32(%esp),%ecx /* cnt */
/* We can use faster code for shift-by-1 under certain conditions. */
cmp $1,%ecx
jne Rnormal
leal 4(%edi),%eax
cmpl %esi,%eax
jnc Rspecial /* jump if res_ptr + 1 >= s_ptr */
leal (%edi,%ebp,4),%eax
cmpl %eax,%esi
jnc Rspecial /* jump if s_ptr >= res_ptr + size */
Rnormal:
movl (%esi),%edx
addl $4,%esi
xorl %eax,%eax
shrdl %cl,%edx,%eax /* compute carry limb */
pushl %eax /* push carry limb onto stack */
decl %ebp
pushl %ebp
shrl $3,%ebp
jz Rend
movl (%edi),%eax /* fetch destination cache line */
ALIGN (2)
Roop: movl 28(%edi),%eax /* fetch destination cache line */
movl %edx,%ebx
movl (%esi),%eax
movl 4(%esi),%edx
shrdl %cl,%eax,%ebx
shrdl %cl,%edx,%eax
movl %ebx,(%edi)
movl %eax,4(%edi)
movl 8(%esi),%ebx
movl 12(%esi),%eax
shrdl %cl,%ebx,%edx
shrdl %cl,%eax,%ebx
movl %edx,8(%edi)
movl %ebx,12(%edi)
movl 16(%esi),%edx
movl 20(%esi),%ebx
shrdl %cl,%edx,%eax
shrdl %cl,%ebx,%edx
movl %eax,16(%edi)
movl %edx,20(%edi)
movl 24(%esi),%eax
movl 28(%esi),%edx
shrdl %cl,%eax,%ebx
shrdl %cl,%edx,%eax
movl %ebx,24(%edi)
movl %eax,28(%edi)
addl $32,%esi
addl $32,%edi
decl %ebp
jnz Roop
Rend: popl %ebp
andl $7,%ebp
jz Rend2
Roop2: movl (%esi),%eax
shrdl %cl,%eax,%edx /* compute result limb */
movl %edx,(%edi)
movl %eax,%edx
addl $4,%esi
addl $4,%edi
decl %ebp
jnz Roop2
Rend2: shrl %cl,%edx /* compute most significant limb */
movl %edx,(%edi) /* store it */
popl %eax /* pop carry limb */
popl %ebp
popl %ebx
popl %esi
popl %edi
ret
/* We loop from least significant end of the arrays, which is only
permissable if the source and destination don't overlap, since the
function is documented to work for overlapping source and destination.
*/
Rspecial:
leal -4(%edi,%ebp,4),%edi
leal -4(%esi,%ebp,4),%esi
movl (%esi),%edx
subl $4,%esi
decl %ebp
pushl %ebp
shrl $3,%ebp
shrl $1,%edx
incl %ebp
decl %ebp
jz RLend
movl (%edi),%eax /* fetch destination cache line */
ALIGN (2)
RLoop: movl -28(%edi),%eax /* fetch destination cache line */
movl %edx,%ebx
movl (%esi),%eax
movl -4(%esi),%edx
rcrl $1,%eax
movl %ebx,(%edi)
rcrl $1,%edx
movl %eax,-4(%edi)
movl -8(%esi),%ebx
movl -12(%esi),%eax
rcrl $1,%ebx
movl %edx,-8(%edi)
rcrl $1,%eax
movl %ebx,-12(%edi)
movl -16(%esi),%edx
movl -20(%esi),%ebx
rcrl $1,%edx
movl %eax,-16(%edi)
rcrl $1,%ebx
movl %edx,-20(%edi)
movl -24(%esi),%eax
movl -28(%esi),%edx
rcrl $1,%eax
movl %ebx,-24(%edi)
rcrl $1,%edx
movl %eax,-28(%edi)
leal -32(%esi),%esi /* use leal not to clobber carry */
leal -32(%edi),%edi
decl %ebp
jnz RLoop
RLend: popl %ebp
sbbl %eax,%eax /* save carry in %eax */
andl $7,%ebp
jz RLend2
addl %eax,%eax /* restore carry from eax */
RLoop2: movl %edx,%ebx
movl (%esi),%edx
rcrl $1,%edx
movl %ebx,(%edi)
leal -4(%esi),%esi /* use leal not to clobber carry */
leal -4(%edi),%edi
decl %ebp
jnz RLoop2
jmp RL1
RLend2: addl %eax,%eax /* restore carry from eax */
RL1: movl %edx,(%edi) /* store last limb */
movl $0,%eax
rcrl $1,%eax
popl %ebp
popl %ebx
popl %esi
popl %edi
ret
|
al3xtjames/Clover
| 2,683
|
FileSystems/GrubFS/grub/grub-core/lib/libgcrypt/mpi/i586/mpih-mul3.S
|
/* i80586 submul_1 -- Multiply a limb vector with a limb and add
* the result to a second limb vector.
*
* Copyright (C) 1992, 1994, 1998,
* 2001, 2002 Free Software Foundation, Inc.
*
* This file is part of Libgcrypt.
*
* Libgcrypt is free software; you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as
* published by the Free Software Foundation; either version 2.1 of
* the License, or (at your option) any later version.
*
* Libgcrypt is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA
*
* Note: This code is heavily based on the GNU MP Library.
* Actually it's the same code with only minor changes in the
* way the data is stored; this is to support the abstraction
* of an optional secure memory allocation which may be used
* to avoid revealing of sensitive data due to paging etc.
*/
#include "sysdep.h"
#include "asm-syntax.h"
/*******************
* mpi_limb_t
* _gcry_mpih_submul_1( mpi_ptr_t res_ptr, (sp + 4)
* mpi_ptr_t s1_ptr, (sp + 8)
* mpi_size_t s1_size, (sp + 12)
* mpi_limb_t s2_limb) (sp + 16)
*/
#define res_ptr edi
#define s1_ptr esi
#define size ecx
#define s2_limb ebp
TEXT
ALIGN (3)
GLOBL C_SYMBOL_NAME(_gcry_mpih_submul_1)
C_SYMBOL_NAME(_gcry_mpih_submul_1:)
INSN1(push,l ,R(edi))
INSN1(push,l ,R(esi))
INSN1(push,l ,R(ebx))
INSN1(push,l ,R(ebp))
INSN2(mov,l ,R(res_ptr),MEM_DISP(esp,20))
INSN2(mov,l ,R(s1_ptr),MEM_DISP(esp,24))
INSN2(mov,l ,R(size),MEM_DISP(esp,28))
INSN2(mov,l ,R(s2_limb),MEM_DISP(esp,32))
INSN2(lea,l ,R(res_ptr),MEM_INDEX(res_ptr,size,4))
INSN2(lea,l ,R(s1_ptr),MEM_INDEX(s1_ptr,size,4))
INSN1(neg,l ,R(size))
INSN2(xor,l ,R(ebx),R(ebx))
ALIGN (3)
Loop: INSN2(adc,l ,R(ebx),$0)
INSN2(mov,l ,R(eax),MEM_INDEX(s1_ptr,size,4))
INSN1(mul,l ,R(s2_limb))
INSN2(add,l ,R(eax),R(ebx))
INSN2(mov,l ,R(ebx),MEM_INDEX(res_ptr,size,4))
INSN2(adc,l ,R(edx),$0)
INSN2(sub,l ,R(ebx),R(eax))
INSN2(mov,l ,MEM_INDEX(res_ptr,size,4),R(ebx))
INSN1(inc,l ,R(size))
INSN2(mov,l ,R(ebx),R(edx))
INSN1(jnz, ,Loop)
INSN2(adc,l ,R(ebx),$0)
INSN2(mov,l ,R(eax),R(ebx))
INSN1(pop,l ,R(ebp))
INSN1(pop,l ,R(ebx))
INSN1(pop,l ,R(esi))
INSN1(pop,l ,R(edi))
ret
|
al3xtjames/Clover
| 2,820
|
FileSystems/GrubFS/grub/grub-core/lib/libgcrypt/mpi/i586/mpih-add1.S
|
/* i80586 add_n -- Add two limb vectors of the same length > 0 and store
* sum in a third limb vector.
*
* Copyright (C) 1992, 1994, 1995, 1996, 1998,
* 2001, 2002 Free Software Foundation, Inc.
*
* This file is part of Libgcrypt.
*
* Libgcrypt is free software; you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as
* published by the Free Software Foundation; either version 2.1 of
* the License, or (at your option) any later version.
*
* Libgcrypt is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA
*/
#include "sysdep.h"
#include "asm-syntax.h"
/*******************
* mpi_limb_t
* _gcry_mpih_add_n( mpi_ptr_t res_ptr, (sp + 4)
* mpi_ptr_t s1_ptr, (sp + 8)
* mpi_ptr_t s2_ptr, (sp + 12)
* mpi_size_t size) (sp + 16)
*/
.text
ALIGN (3)
.globl C_SYMBOL_NAME(_gcry_mpih_add_n)
C_SYMBOL_NAME(_gcry_mpih_add_n:)
pushl %edi
pushl %esi
pushl %ebx
pushl %ebp
movl 20(%esp),%edi /* res_ptr */
movl 24(%esp),%esi /* s1_ptr */
movl 28(%esp),%ebp /* s2_ptr */
movl 32(%esp),%ecx /* size */
movl (%ebp),%ebx
decl %ecx
movl %ecx,%edx
shrl $3,%ecx
andl $7,%edx
testl %ecx,%ecx /* zero carry flag */
jz Lend
pushl %edx
ALIGN (3)
Loop: movl 28(%edi),%eax /* fetch destination cache line */
leal 32(%edi),%edi
L1: movl (%esi),%eax
movl 4(%esi),%edx
adcl %ebx,%eax
movl 4(%ebp),%ebx
adcl %ebx,%edx
movl 8(%ebp),%ebx
movl %eax,-32(%edi)
movl %edx,-28(%edi)
L2: movl 8(%esi),%eax
movl 12(%esi),%edx
adcl %ebx,%eax
movl 12(%ebp),%ebx
adcl %ebx,%edx
movl 16(%ebp),%ebx
movl %eax,-24(%edi)
movl %edx,-20(%edi)
L3: movl 16(%esi),%eax
movl 20(%esi),%edx
adcl %ebx,%eax
movl 20(%ebp),%ebx
adcl %ebx,%edx
movl 24(%ebp),%ebx
movl %eax,-16(%edi)
movl %edx,-12(%edi)
L4: movl 24(%esi),%eax
movl 28(%esi),%edx
adcl %ebx,%eax
movl 28(%ebp),%ebx
adcl %ebx,%edx
movl 32(%ebp),%ebx
movl %eax,-8(%edi)
movl %edx,-4(%edi)
leal 32(%esi),%esi
leal 32(%ebp),%ebp
decl %ecx
jnz Loop
popl %edx
Lend:
decl %edx /* test %edx w/o clobbering carry */
js Lend2
incl %edx
Loop2:
leal 4(%edi),%edi
movl (%esi),%eax
adcl %ebx,%eax
movl 4(%ebp),%ebx
movl %eax,-4(%edi)
leal 4(%esi),%esi
leal 4(%ebp),%ebp
decl %edx
jnz Loop2
Lend2:
movl (%esi),%eax
adcl %ebx,%eax
movl %eax,(%edi)
sbbl %eax,%eax
negl %eax
popl %ebp
popl %ebx
popl %esi
popl %edi
ret
|
al3xtjames/Clover
| 3,126
|
FileSystems/GrubFS/grub/grub-core/lib/libgcrypt/mpi/i586/mpih-sub1.S
|
/* i80586 sub_n -- Sub two limb vectors of the same length > 0 and store
* sum in a third limb vector.
*
* Copyright (C) 1992, 1994, 1995, 1998,
* 2001, 2002 Free Software Foundation, Inc.
*
* This file is part of Libgcrypt.
*
* Libgcrypt is free software; you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as
* published by the Free Software Foundation; either version 2.1 of
* the License, or (at your option) any later version.
*
* Libgcrypt is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA
*
* Note: This code is heavily based on the GNU MP Library.
* Actually it's the same code with only minor changes in the
* way the data is stored; this is to support the abstraction
* of an optional secure memory allocation which may be used
* to avoid revealing of sensitive data due to paging etc.
*/
#include "sysdep.h"
#include "asm-syntax.h"
/*******************
* mpi_limb_t
* _gcry_mpih_sub_n( mpi_ptr_t res_ptr, (sp + 4)
* mpi_ptr_t s1_ptr, (sp + 8)
* mpi_ptr_t s2_ptr, (sp + 12)
* mpi_size_t size) (sp + 16)
*/
.text
ALIGN (3)
.globl C_SYMBOL_NAME(_gcry_mpih_sub_n)
C_SYMBOL_NAME(_gcry_mpih_sub_n:)
pushl %edi
pushl %esi
pushl %ebx
pushl %ebp
movl 20(%esp),%edi /* res_ptr */
movl 24(%esp),%esi /* s1_ptr */
movl 28(%esp),%ebp /* s2_ptr */
movl 32(%esp),%ecx /* size */
movl (%ebp),%ebx
decl %ecx
movl %ecx,%edx
shrl $3,%ecx
andl $7,%edx
testl %ecx,%ecx /* zero carry flag */
jz Lend
pushl %edx
ALIGN (3)
Loop: movl 28(%edi),%eax /* fetch destination cache line */
leal 32(%edi),%edi
L1: movl (%esi),%eax
movl 4(%esi),%edx
sbbl %ebx,%eax
movl 4(%ebp),%ebx
sbbl %ebx,%edx
movl 8(%ebp),%ebx
movl %eax,-32(%edi)
movl %edx,-28(%edi)
L2: movl 8(%esi),%eax
movl 12(%esi),%edx
sbbl %ebx,%eax
movl 12(%ebp),%ebx
sbbl %ebx,%edx
movl 16(%ebp),%ebx
movl %eax,-24(%edi)
movl %edx,-20(%edi)
L3: movl 16(%esi),%eax
movl 20(%esi),%edx
sbbl %ebx,%eax
movl 20(%ebp),%ebx
sbbl %ebx,%edx
movl 24(%ebp),%ebx
movl %eax,-16(%edi)
movl %edx,-12(%edi)
L4: movl 24(%esi),%eax
movl 28(%esi),%edx
sbbl %ebx,%eax
movl 28(%ebp),%ebx
sbbl %ebx,%edx
movl 32(%ebp),%ebx
movl %eax,-8(%edi)
movl %edx,-4(%edi)
leal 32(%esi),%esi
leal 32(%ebp),%ebp
decl %ecx
jnz Loop
popl %edx
Lend:
decl %edx /* test %edx w/o clobbering carry */
js Lend2
incl %edx
Loop2:
leal 4(%edi),%edi
movl (%esi),%eax
sbbl %ebx,%eax
movl 4(%ebp),%ebx
movl %eax,-4(%edi)
leal 4(%esi),%esi
leal 4(%ebp),%ebp
decl %edx
jnz Loop2
Lend2:
movl (%esi),%eax
sbbl %ebx,%eax
movl %eax,(%edi)
sbbl %eax,%eax
negl %eax
popl %ebp
popl %ebx
popl %esi
popl %edi
ret
|
al3xtjames/Clover
| 4,214
|
FileSystems/GrubFS/grub/grub-core/lib/libgcrypt/mpi/m68k/mpih-lshift.S
|
/* mc68020 lshift -- Shift left a low-level natural-number integer.
*
* Copyright (C) 1996, 1998, 2001, 2002 Free Software Foundation, Inc.
*
* This file is part of Libgcrypt.
*
* Libgcrypt is free software; you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as
* published by the Free Software Foundation; either version 2.1 of
* the License, or (at your option) any later version.
*
* Libgcrypt is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA
*
* Note: This code is heavily based on the GNU MP Library.
* Actually it's the same code with only minor changes in the
* way the data is stored; this is to support the abstraction
* of an optional secure memory allocation which may be used
* to avoid revealing of sensitive data due to paging etc.
*/
#include "sysdep.h"
#include "asm-syntax.h"
/*******************
* mpi_limb_t
* _gcry_mpih_lshift( mpi_ptr_t wp, (sp + 4)
* mpi_ptr_t up, (sp + 8)
* mpi_size_t usize, (sp + 12)
* unsigned cnt) (sp + 16)
*/
#define res_ptr a1
#define s_ptr a0
#define s_size d6
#define cnt d4
TEXT
ALIGN
GLOBL C_SYMBOL_NAME(_gcry_mpih_lshift)
C_SYMBOL_NAME(_gcry_mpih_lshift:)
PROLOG(_gcry_mpih_lshift)
/* Save used registers on the stack. */
moveml R(d2)-R(d6)/R(a2),MEM_PREDEC(sp)
/* Copy the arguments to registers. */
movel MEM_DISP(sp,28),R(res_ptr)
movel MEM_DISP(sp,32),R(s_ptr)
movel MEM_DISP(sp,36),R(s_size)
movel MEM_DISP(sp,40),R(cnt)
moveql #1,R(d5)
cmpl R(d5),R(cnt)
bne L(Lnormal)
cmpl R(s_ptr),R(res_ptr)
bls L(Lspecial) /* jump if s_ptr >= res_ptr */
#if (defined (__mc68020__) || defined (__NeXT__) || defined(mc68020))
lea MEM_INDX1(s_ptr,s_size,l,4),R(a2)
#else /* not mc68020 */
movel R(s_size),R(d0)
asll #2,R(d0)
lea MEM_INDX(s_ptr,d0,l),R(a2)
#endif
cmpl R(res_ptr),R(a2)
bls L(Lspecial) /* jump if res_ptr >= s_ptr + s_size */
L(Lnormal:)
moveql #32,R(d5)
subl R(cnt),R(d5)
#if (defined (__mc68020__) || defined (__NeXT__) || defined(mc68020))
lea MEM_INDX1(s_ptr,s_size,l,4),R(s_ptr)
lea MEM_INDX1(res_ptr,s_size,l,4),R(res_ptr)
#else /* not mc68000 */
movel R(s_size),R(d0)
asll #2,R(d0)
addl R(s_size),R(s_ptr)
addl R(s_size),R(res_ptr)
#endif
movel MEM_PREDEC(s_ptr),R(d2)
movel R(d2),R(d0)
lsrl R(d5),R(d0) /* compute carry limb */
lsll R(cnt),R(d2)
movel R(d2),R(d1)
subql #1,R(s_size)
beq L(Lend)
lsrl #1,R(s_size)
bcs L(L1)
subql #1,R(s_size)
L(Loop:)
movel MEM_PREDEC(s_ptr),R(d2)
movel R(d2),R(d3)
lsrl R(d5),R(d3)
orl R(d3),R(d1)
movel R(d1),MEM_PREDEC(res_ptr)
lsll R(cnt),R(d2)
L(L1:)
movel MEM_PREDEC(s_ptr),R(d1)
movel R(d1),R(d3)
lsrl R(d5),R(d3)
orl R(d3),R(d2)
movel R(d2),MEM_PREDEC(res_ptr)
lsll R(cnt),R(d1)
dbf R(s_size),L(Loop)
subl #0x10000,R(s_size)
bcc L(Loop)
L(Lend:)
movel R(d1),MEM_PREDEC(res_ptr) /* store least significant limb */
/* Restore used registers from stack frame. */
moveml MEM_POSTINC(sp),R(d2)-R(d6)/R(a2)
rts
/* We loop from least significant end of the arrays, which is only
permissable if the source and destination don't overlap, since the
function is documented to work for overlapping source and destination. */
L(Lspecial:)
clrl R(d0) /* initialize carry */
eorw #1,R(s_size)
lsrl #1,R(s_size)
bcc L(LL1)
subql #1,R(s_size)
L(LLoop:)
movel MEM_POSTINC(s_ptr),R(d2)
addxl R(d2),R(d2)
movel R(d2),MEM_POSTINC(res_ptr)
L(LL1:)
movel MEM_POSTINC(s_ptr),R(d2)
addxl R(d2),R(d2)
movel R(d2),MEM_POSTINC(res_ptr)
dbf R(s_size),L(LLoop)
addxl R(d0),R(d0) /* save cy in lsb */
subl #0x10000,R(s_size)
bcs L(LLend)
lsrl #1,R(d0) /* restore cy */
bra L(LLoop)
L(LLend:)
/* Restore used registers from stack frame. */
moveml MEM_POSTINC(sp),R(d2)-R(d6)/R(a2)
rts
EPILOG(_gcry_mpih_lshift)
|
al3xtjames/Clover
| 4,201
|
FileSystems/GrubFS/grub/grub-core/lib/libgcrypt/mpi/m68k/mpih-rshift.S
|
/* mc68020 rshift -- Shift right a low-level natural-number integer.
*
* Copyright (C) 1996, 1998, 2001, 2002 Free Software Foundation, Inc.
*
* This file is part of Libgcrypt.
*
* Libgcrypt is free software; you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as
* published by the Free Software Foundation; either version 2.1 of
* the License, or (at your option) any later version.
*
* Libgcrypt is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA
*
* Note: This code is heavily based on the GNU MP Library.
* Actually it's the same code with only minor changes in the
* way the data is stored; this is to support the abstraction
* of an optional secure memory allocation which may be used
* to avoid revealing of sensitive data due to paging etc.
*/
#include "sysdep.h"
#include "asm-syntax.h"
/*******************
* mpi_limb_t
* _gcry_mpih_rshift( mpi_ptr_t wp, (sp + 4)
* mpi_ptr_t up, (sp + 8)
* mpi_size_t usize, (sp + 12)
* unsigned cnt) (sp + 16)
*/
#define res_ptr a1
#define s_ptr a0
#define s_size d6
#define cnt d4
TEXT
ALIGN
GLOBL C_SYMBOL_NAME(_gcry_mpih_rshift)
C_SYMBOL_NAME(_gcry_mpih_rshift:)
PROLOG(_gcry_mpih_rshift)
/* Save used registers on the stack. */
moveml R(d2)-R(d6)/R(a2),MEM_PREDEC(sp)
/* Copy the arguments to registers. */
movel MEM_DISP(sp,28),R(res_ptr)
movel MEM_DISP(sp,32),R(s_ptr)
movel MEM_DISP(sp,36),R(s_size)
movel MEM_DISP(sp,40),R(cnt)
moveql #1,R(d5)
cmpl R(d5),R(cnt)
bne L(Rnormal)
cmpl R(res_ptr),R(s_ptr)
bls L(Rspecial) /* jump if res_ptr >= s_ptr */
#if (defined (__mc68020__) || defined (__NeXT__) || defined(mc68020))
lea MEM_INDX1(res_ptr,s_size,l,4),R(a2)
#else /* not mc68020 */
movel R(s_size),R(d0)
asll #2,R(d0)
lea MEM_INDX(res_ptr,d0,l),R(a2)
#endif
cmpl R(s_ptr),R(a2)
bls L(Rspecial) /* jump if s_ptr >= res_ptr + s_size */
L(Rnormal:)
moveql #32,R(d5)
subl R(cnt),R(d5)
movel MEM_POSTINC(s_ptr),R(d2)
movel R(d2),R(d0)
lsll R(d5),R(d0) /* compute carry limb */
lsrl R(cnt),R(d2)
movel R(d2),R(d1)
subql #1,R(s_size)
beq L(Rend)
lsrl #1,R(s_size)
bcs L(R1)
subql #1,R(s_size)
L(Roop:)
movel MEM_POSTINC(s_ptr),R(d2)
movel R(d2),R(d3)
lsll R(d5),R(d3)
orl R(d3),R(d1)
movel R(d1),MEM_POSTINC(res_ptr)
lsrl R(cnt),R(d2)
L(R1:)
movel MEM_POSTINC(s_ptr),R(d1)
movel R(d1),R(d3)
lsll R(d5),R(d3)
orl R(d3),R(d2)
movel R(d2),MEM_POSTINC(res_ptr)
lsrl R(cnt),R(d1)
dbf R(s_size),L(Roop)
subl #0x10000,R(s_size)
bcc L(Roop)
L(Rend:)
movel R(d1),MEM(res_ptr) /* store most significant limb */
/* Restore used registers from stack frame. */
moveml MEM_POSTINC(sp),R(d2)-R(d6)/R(a2)
rts
/* We loop from most significant end of the arrays, which is only
permissable if the source and destination don't overlap, since the
function is documented to work for overlapping source and destination. */
L(Rspecial:)
#if (defined (__mc68020__) || defined (__NeXT__) || defined(mc68020))
lea MEM_INDX1(s_ptr,s_size,l,4),R(s_ptr)
lea MEM_INDX1(res_ptr,s_size,l,4),R(res_ptr)
#else /* not mc68000 */
movel R(s_size),R(d0)
asll #2,R(d0)
addl R(s_size),R(s_ptr)
addl R(s_size),R(res_ptr)
#endif
clrl R(d0) /* initialize carry */
eorw #1,R(s_size)
lsrl #1,R(s_size)
bcc L(LR1)
subql #1,R(s_size)
L(LRoop:)
movel MEM_PREDEC(s_ptr),R(d2)
roxrl #1,R(d2)
movel R(d2),MEM_PREDEC(res_ptr)
L(LR1:)
movel MEM_PREDEC(s_ptr),R(d2)
roxrl #1,R(d2)
movel R(d2),MEM_PREDEC(res_ptr)
dbf R(s_size),L(LRoop)
roxrl #1,R(d0) /* save cy in msb */
subl #0x10000,R(s_size)
bcs L(LRend)
addl R(d0),R(d0) /* restore cy */
bra L(LRoop)
L(LRend:)
/* Restore used registers from stack frame. */
moveml MEM_POSTINC(sp),R(d2)-R(d6)/R(a2)
rts
EPILOG(_gcry_mpih_rshift)
|
al3xtjames/Clover
| 2,643
|
FileSystems/GrubFS/grub/grub-core/lib/libgcrypt/mpi/m68k/mpih-add1.S
|
/* mc68020 __mpn_add_n -- Add two limb vectors of the same length > 0 and store
* sum in a third limb vector.
*
* Copyright (C) 1992, 1994,1996, 1998,
* 2001, 2002 Free Software Foundation, Inc.
*
* This file is part of Libgcrypt.
*
* Libgcrypt is free software; you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as
* published by the Free Software Foundation; either version 2.1 of
* the License, or (at your option) any later version.
*
* Libgcrypt is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA
*
* Note: This code is heavily based on the GNU MP Library.
* Actually it's the same code with only minor changes in the
* way the data is stored; this is to support the abstraction
* of an optional secure memory allocation which may be used
* to avoid revealing of sensitive data due to paging etc.
*/
#include "sysdep.h"
#include "asm-syntax.h"
/*******************
* mpi_limb_t
* _gcry_mpih_add_n( mpi_ptr_t res_ptr, (sp + 4)
* mpi_ptr_t s1_ptr, (sp + 8)
* mpi_ptr_t s2_ptr, (sp + 16)
* mpi_size_t size) (sp + 12)
*/
TEXT
ALIGN
GLOBL C_SYMBOL_NAME(_gcry_mpih_add_n)
C_SYMBOL_NAME(_gcry_mpih_add_n:)
PROLOG(_gcry_mpih_add_n)
/* Save used registers on the stack. */
movel R(d2),MEM_PREDEC(sp)
movel R(a2),MEM_PREDEC(sp)
/* Copy the arguments to registers. Better use movem? */
movel MEM_DISP(sp,12),R(a2)
movel MEM_DISP(sp,16),R(a0)
movel MEM_DISP(sp,20),R(a1)
movel MEM_DISP(sp,24),R(d2)
eorw #1,R(d2)
lsrl #1,R(d2)
bcc L(L1)
subql #1,R(d2) /* clears cy as side effect */
L(Loop:)
movel MEM_POSTINC(a0),R(d0)
movel MEM_POSTINC(a1),R(d1)
addxl R(d1),R(d0)
movel R(d0),MEM_POSTINC(a2)
L(L1:) movel MEM_POSTINC(a0),R(d0)
movel MEM_POSTINC(a1),R(d1)
addxl R(d1),R(d0)
movel R(d0),MEM_POSTINC(a2)
dbf R(d2),L(Loop) /* loop until 16 lsb of %4 == -1 */
subxl R(d0),R(d0) /* d0 <= -cy; save cy as 0 or -1 in d0 */
subl #0x10000,R(d2)
bcs L(L2)
addl R(d0),R(d0) /* restore cy */
bra L(Loop)
L(L2:)
negl R(d0)
/* Restore used registers from stack frame. */
movel MEM_POSTINC(sp),R(a2)
movel MEM_POSTINC(sp),R(d2)
rts
EPILOG(_gcry_mpih_add_n)
|
al3xtjames/Clover
| 2,652
|
FileSystems/GrubFS/grub/grub-core/lib/libgcrypt/mpi/m68k/mpih-sub1.S
|
/* mc68020 __mpn_sub_n -- Subtract two limb vectors of the same length > 0 and
* store difference in a third limb vector.
*
* Copyright (C) 1992, 1994, 1996, 1998,
* 2001, 2002 Free Software Foundation, Inc.
*
* This file is part of Libgcrypt.
*
* Libgcrypt is free software; you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as
* published by the Free Software Foundation; either version 2.1 of
* the License, or (at your option) any later version.
*
* Libgcrypt is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA
*
* Note: This code is heavily based on the GNU MP Library.
* Actually it's the same code with only minor changes in the
* way the data is stored; this is to support the abstraction
* of an optional secure memory allocation which may be used
* to avoid revealing of sensitive data due to paging etc.
*/
#include "sysdep.h"
#include "asm-syntax.h"
/*******************
* mpi_limb_t
* _gcry_mpih_sub_n( mpi_ptr_t res_ptr, (sp + 4)
* mpi_ptr_t s1_ptr, (sp + 8)
* mpi_ptr_t s2_ptr, (sp + 16)
* mpi_size_t size) (sp + 12)
*/
TEXT
ALIGN
GLOBL C_SYMBOL_NAME(_gcry_mpih_sub_n)
C_SYMBOL_NAME(_gcry_mpih_sub_n:)
PROLOG(_gcry_mpih_sub_n)
/* Save used registers on the stack. */
movel R(d2),MEM_PREDEC(sp)
movel R(a2),MEM_PREDEC(sp)
/* Copy the arguments to registers. Better use movem? */
movel MEM_DISP(sp,12),R(a2)
movel MEM_DISP(sp,16),R(a0)
movel MEM_DISP(sp,20),R(a1)
movel MEM_DISP(sp,24),R(d2)
eorw #1,R(d2)
lsrl #1,R(d2)
bcc L(L1)
subql #1,R(d2) /* clears cy as side effect */
L(Loop:)
movel MEM_POSTINC(a0),R(d0)
movel MEM_POSTINC(a1),R(d1)
subxl R(d1),R(d0)
movel R(d0),MEM_POSTINC(a2)
L(L1:) movel MEM_POSTINC(a0),R(d0)
movel MEM_POSTINC(a1),R(d1)
subxl R(d1),R(d0)
movel R(d0),MEM_POSTINC(a2)
dbf R(d2),L(Loop) /* loop until 16 lsb of %4 == -1 */
subxl R(d0),R(d0) /* d0 <= -cy; save cy as 0 or -1 in d0 */
subl #0x10000,R(d2)
bcs L(L2)
addl R(d0),R(d0) /* restore cy */
bra L(Loop)
L(L2:)
negl R(d0)
/* Restore used registers from stack frame. */
movel MEM_POSTINC(sp),R(a2)
movel MEM_POSTINC(sp),R(d2)
rts
EPILOG(_gcry_mpih_sub_n)
|
al3xtjames/Clover
| 2,509
|
FileSystems/GrubFS/grub/grub-core/lib/libgcrypt/mpi/i386/mpih-mul1.S
|
/* i80386 mul_1 -- Multiply a limb vector with a limb and store
* the result in a second limb vector.
* Copyright (C) 1992, 1994, 1998,
* 2001, 2002 Free Software Foundation, Inc.
*
* This file is part of Libgcrypt.
*
* Libgcrypt is free software; you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as
* published by the Free Software Foundation; either version 2.1 of
* the License, or (at your option) any later version.
*
* Libgcrypt is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA
*
* Note: This code is heavily based on the GNU MP Library.
* Actually it's the same code with only minor changes in the
* way the data is stored; this is to support the abstraction
* of an optional secure memory allocation which may be used
* to avoid revealing of sensitive data due to paging etc.
*/
#include "sysdep.h"
#include "asm-syntax.h"
/*******************
* mpi_limb_t
* _gcry_mpih_mul_1( mpi_ptr_t res_ptr, (sp + 4)
* mpi_ptr_t s1_ptr, (sp + 8)
* mpi_size_t s1_size, (sp + 12)
* mpi_limb_t s2_limb) (sp + 16)
*/
#define res_ptr edi
#define s1_ptr esi
#define size ecx
#define s2_limb ebp
TEXT
ALIGN (3)
GLOBL C_SYMBOL_NAME(_gcry_mpih_mul_1)
C_SYMBOL_NAME(_gcry_mpih_mul_1:)
INSN1(push,l ,R(edi))
INSN1(push,l ,R(esi))
INSN1(push,l ,R(ebx))
INSN1(push,l ,R(ebp))
INSN2(mov,l ,R(res_ptr),MEM_DISP(esp,20))
INSN2(mov,l ,R(s1_ptr),MEM_DISP(esp,24))
INSN2(mov,l ,R(size),MEM_DISP(esp,28))
INSN2(mov,l ,R(s2_limb),MEM_DISP(esp,32))
INSN2(lea,l ,R(res_ptr),MEM_INDEX(res_ptr,size,4))
INSN2(lea,l ,R(s1_ptr),MEM_INDEX(s1_ptr,size,4))
INSN1(neg,l ,R(size))
INSN2(xor,l ,R(ebx),R(ebx))
ALIGN (3)
Loop:
INSN2(mov,l ,R(eax),MEM_INDEX(s1_ptr,size,4))
INSN1(mul,l ,R(s2_limb))
INSN2(add,l ,R(eax),R(ebx))
INSN2(mov,l ,MEM_INDEX(res_ptr,size,4),R(eax))
INSN2(adc,l ,R(edx),$0)
INSN2(mov,l ,R(ebx),R(edx))
INSN1(inc,l ,R(size))
INSN1(jnz, ,Loop)
INSN2(mov,l ,R(eax),R(ebx))
INSN1(pop,l ,R(ebp))
INSN1(pop,l ,R(ebx))
INSN1(pop,l ,R(esi))
INSN1(pop,l ,R(edi))
ret
|
al3xtjames/Clover
| 2,577
|
FileSystems/GrubFS/grub/grub-core/lib/libgcrypt/mpi/i386/mpih-mul2.S
|
/* i80386 addmul_1 -- Multiply a limb vector with a limb and add
* the result to a second limb vector.
*
* Copyright (C) 1992, 1994, 1998,
* 2001, 2002 Free Software Foundation, Inc.
*
* This file is part of Libgcrypt.
*
* Libgcrypt is free software; you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as
* published by the Free Software Foundation; either version 2.1 of
* the License, or (at your option) any later version.
*
* Libgcrypt is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA
*
* Note: This code is heavily based on the GNU MP Library.
* Actually it's the same code with only minor changes in the
* way the data is stored; this is to support the abstraction
* of an optional secure memory allocation which may be used
* to avoid revealing of sensitive data due to paging etc.
*/
#include "sysdep.h"
#include "asm-syntax.h"
/*******************
* mpi_limb_t
* _gcry_mpih_addmul_1( mpi_ptr_t res_ptr, (sp + 4)
* mpi_ptr_t s1_ptr, (sp + 8)
* mpi_size_t s1_size, (sp + 12)
* mpi_limb_t s2_limb) (sp + 16)
*/
#define res_ptr edi
#define s1_ptr esi
#define size ecx
#define s2_limb ebp
TEXT
ALIGN (3)
GLOBL C_SYMBOL_NAME(_gcry_mpih_addmul_1)
C_SYMBOL_NAME(_gcry_mpih_addmul_1:)
INSN1(push,l ,R(edi))
INSN1(push,l ,R(esi))
INSN1(push,l ,R(ebx))
INSN1(push,l ,R(ebp))
INSN2(mov,l ,R(res_ptr),MEM_DISP(esp,20))
INSN2(mov,l ,R(s1_ptr),MEM_DISP(esp,24))
INSN2(mov,l ,R(size),MEM_DISP(esp,28))
INSN2(mov,l ,R(s2_limb),MEM_DISP(esp,32))
INSN2(lea,l ,R(res_ptr),MEM_INDEX(res_ptr,size,4))
INSN2(lea,l ,R(s1_ptr),MEM_INDEX(s1_ptr,size,4))
INSN1(neg,l ,R(size))
INSN2(xor,l ,R(ebx),R(ebx))
ALIGN (3)
Loop:
INSN2(mov,l ,R(eax),MEM_INDEX(s1_ptr,size,4))
INSN1(mul,l ,R(s2_limb))
INSN2(add,l ,R(eax),R(ebx))
INSN2(adc,l ,R(edx),$0)
INSN2(add,l ,MEM_INDEX(res_ptr,size,4),R(eax))
INSN2(adc,l ,R(edx),$0)
INSN2(mov,l ,R(ebx),R(edx))
INSN1(inc,l ,R(size))
INSN1(jnz, ,Loop)
INSN2(mov,l ,R(eax),R(ebx))
INSN1(pop,l ,R(ebp))
INSN1(pop,l ,R(ebx))
INSN1(pop,l ,R(esi))
INSN1(pop,l ,R(edi))
ret
|
al3xtjames/Clover
| 2,551
|
FileSystems/GrubFS/grub/grub-core/lib/libgcrypt/mpi/i386/mpih-lshift.S
|
/* i80386 lshift
* Copyright (C) 1992, 1994, 1998,
* 2001, 2002 Free Software Foundation, Inc.
*
* This file is part of Libgcrypt.
*
* Libgcrypt is free software; you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as
* published by the Free Software Foundation; either version 2.1 of
* the License, or (at your option) any later version.
*
* Libgcrypt is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA
*
* Note: This code is heavily based on the GNU MP Library.
* Actually it's the same code with only minor changes in the
* way the data is stored; this is to support the abstraction
* of an optional secure memory allocation which may be used
* to avoid revealing of sensitive data due to paging etc.
*/
#include "sysdep.h"
#include "asm-syntax.h"
/*******************
* mpi_limb_t
* _gcry_mpih_lshift( mpi_ptr_t wp, (sp + 4)
* mpi_ptr_t up, (sp + 8)
* mpi_size_t usize, (sp + 12)
* unsigned cnt) (sp + 16)
*/
.text
ALIGN (3)
.globl C_SYMBOL_NAME(_gcry_mpih_lshift)
C_SYMBOL_NAME(_gcry_mpih_lshift:)
pushl %edi
pushl %esi
pushl %ebx
movl 16(%esp),%edi /* res_ptr */
movl 20(%esp),%esi /* s_ptr */
movl 24(%esp),%edx /* size */
movl 28(%esp),%ecx /* cnt */
subl $4,%esi /* adjust s_ptr */
movl (%esi,%edx,4),%ebx /* read most significant limb */
xorl %eax,%eax
shldl %cl,%ebx,%eax /* compute carry limb */
decl %edx
jz Lend
pushl %eax /* push carry limb onto stack */
testb $1,%dl
jnz L1 /* enter loop in the middle */
movl %ebx,%eax
ALIGN (3)
Loop: movl (%esi,%edx,4),%ebx /* load next lower limb */
shldl %cl,%ebx,%eax /* compute result limb */
movl %eax,(%edi,%edx,4) /* store it */
decl %edx
L1: movl (%esi,%edx,4),%eax
shldl %cl,%eax,%ebx
movl %ebx,(%edi,%edx,4)
decl %edx
jnz Loop
shll %cl,%eax /* compute least significant limb */
movl %eax,(%edi) /* store it */
popl %eax /* pop carry limb */
popl %ebx
popl %esi
popl %edi
ret
Lend: shll %cl,%ebx /* compute least significant limb */
movl %ebx,(%edi) /* store it */
popl %ebx
popl %esi
popl %edi
ret
|
al3xtjames/Clover
| 2,583
|
FileSystems/GrubFS/grub/grub-core/lib/libgcrypt/mpi/i386/mpih-rshift.S
|
/* i80386 rshift
*
* Copyright (C) 1992, 1994, 1998,
* 2001, 2002 Free Software Foundation, Inc.
*
* This file is part of Libgcrypt.
*
* Libgcrypt is free software; you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as
* published by the Free Software Foundation; either version 2.1 of
* the License, or (at your option) any later version.
*
* Libgcrypt is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA
*
* Note: This code is heavily based on the GNU MP Library.
* Actually it's the same code with only minor changes in the
* way the data is stored; this is to support the abstraction
* of an optional secure memory allocation which may be used
* to avoid revealing of sensitive data due to paging etc.
*/
#include "sysdep.h"
#include "asm-syntax.h"
/*******************
* mpi_limb_t
* _gcry_mpih_rshift( mpi_ptr_t wp, (sp + 4)
* mpi_ptr_t up, (sp + 8)
* mpi_size_t usize, (sp + 12)
* unsigned cnt) (sp + 16)
*/
.text
ALIGN (3)
.globl C_SYMBOL_NAME(_gcry_mpih_rshift)
C_SYMBOL_NAME(_gcry_mpih_rshift:)
pushl %edi
pushl %esi
pushl %ebx
movl 16(%esp),%edi /* wp */
movl 20(%esp),%esi /* up */
movl 24(%esp),%edx /* usize */
movl 28(%esp),%ecx /* cnt */
leal -4(%edi,%edx,4),%edi
leal (%esi,%edx,4),%esi
negl %edx
movl (%esi,%edx,4),%ebx /* read least significant limb */
xorl %eax,%eax
shrdl %cl,%ebx,%eax /* compute carry limb */
incl %edx
jz Lend2
pushl %eax /* push carry limb onto stack */
testb $1,%dl
jnz L2 /* enter loop in the middle */
movl %ebx,%eax
ALIGN (3)
Loop2: movl (%esi,%edx,4),%ebx /* load next higher limb */
shrdl %cl,%ebx,%eax /* compute result limb */
movl %eax,(%edi,%edx,4) /* store it */
incl %edx
L2: movl (%esi,%edx,4),%eax
shrdl %cl,%eax,%ebx
movl %ebx,(%edi,%edx,4)
incl %edx
jnz Loop2
shrl %cl,%eax /* compute most significant limb */
movl %eax,(%edi) /* store it */
popl %eax /* pop carry limb */
popl %ebx
popl %esi
popl %edi
ret
Lend2: shrl %cl,%ebx /* compute most significant limb */
movl %ebx,(%edi) /* store it */
popl %ebx
popl %esi
popl %edi
ret
|
al3xtjames/Clover
| 2,578
|
FileSystems/GrubFS/grub/grub-core/lib/libgcrypt/mpi/i386/mpih-mul3.S
|
/* i80386 submul_1 -- Multiply a limb vector with a limb and add
* the result to a second limb vector.
*
* Copyright (C) 1992, 1994, 1998,
* 2001, 2002 Free Software Foundation, Inc.
*
* This file is part of Libgcrypt.
*
* Libgcrypt is free software; you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as
* published by the Free Software Foundation; either version 2.1 of
* the License, or (at your option) any later version.
*
* Libgcrypt is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA
*
* Note: This code is heavily based on the GNU MP Library.
* Actually it's the same code with only minor changes in the
* way the data is stored; this is to support the abstraction
* of an optional secure memory allocation which may be used
* to avoid revealing of sensitive data due to paging etc.
*/
#include "sysdep.h"
#include "asm-syntax.h"
/*******************
* mpi_limb_t
* _gcry_mpih_submul_1( mpi_ptr_t res_ptr, (sp + 4)
* mpi_ptr_t s1_ptr, (sp + 8)
* mpi_size_t s1_size, (sp + 12)
* mpi_limb_t s2_limb) (sp + 16)
*/
#define res_ptr edi
#define s1_ptr esi
#define size ecx
#define s2_limb ebp
TEXT
ALIGN (3)
GLOBL C_SYMBOL_NAME(_gcry_mpih_submul_1)
C_SYMBOL_NAME(_gcry_mpih_submul_1:)
INSN1(push,l ,R(edi))
INSN1(push,l ,R(esi))
INSN1(push,l ,R(ebx))
INSN1(push,l ,R(ebp))
INSN2(mov,l ,R(res_ptr),MEM_DISP(esp,20))
INSN2(mov,l ,R(s1_ptr),MEM_DISP(esp,24))
INSN2(mov,l ,R(size),MEM_DISP(esp,28))
INSN2(mov,l ,R(s2_limb),MEM_DISP(esp,32))
INSN2(lea,l ,R(res_ptr),MEM_INDEX(res_ptr,size,4))
INSN2(lea,l ,R(s1_ptr),MEM_INDEX(s1_ptr,size,4))
INSN1(neg,l ,R(size))
INSN2(xor,l ,R(ebx),R(ebx))
ALIGN (3)
Loop:
INSN2(mov,l ,R(eax),MEM_INDEX(s1_ptr,size,4))
INSN1(mul,l ,R(s2_limb))
INSN2(add,l ,R(eax),R(ebx))
INSN2(adc,l ,R(edx),$0)
INSN2(sub,l ,MEM_INDEX(res_ptr,size,4),R(eax))
INSN2(adc,l ,R(edx),$0)
INSN2(mov,l ,R(ebx),R(edx))
INSN1(inc,l ,R(size))
INSN1(jnz, ,Loop)
INSN2(mov,l ,R(eax),R(ebx))
INSN1(pop,l ,R(ebp))
INSN1(pop,l ,R(ebx))
INSN1(pop,l ,R(esi))
INSN1(pop,l ,R(edi))
ret
|
al3xtjames/Clover
| 3,246
|
FileSystems/GrubFS/grub/grub-core/lib/libgcrypt/mpi/i386/mpih-add1.S
|
/* i80386 add_n -- Add two limb vectors of the same length > 0 and store
* sum in a third limb vector.
*
* Copyright (C) 1992, 1994, 1995, 1998,
* 2001, 2002 Free Software Foundation, Inc.
*
* This file is part of Libgcrypt.
*
* Libgcrypt is free software; you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as
* published by the Free Software Foundation; either version 2.1 of
* the License, or (at your option) any later version.
*
* Libgcrypt is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA
*
* Note: This code is heavily based on the GNU MP Library.
* Actually it's the same code with only minor changes in the
* way the data is stored; this is to support the abstraction
* of an optional secure memory allocation which may be used
* to avoid revealing of sensitive data due to paging etc.
*/
#include "sysdep.h"
#include "asm-syntax.h"
/*******************
* mpi_limb_t
* _gcry_mpih_add_n( mpi_ptr_t res_ptr, (sp + 4)
* mpi_ptr_t s1_ptr, (sp + 8)
* mpi_ptr_t s2_ptr, (sp + 12)
* mpi_size_t size) (sp + 16)
*/
.text
ALIGN (3)
.globl C_SYMBOL_NAME(_gcry_mpih_add_n)
C_SYMBOL_NAME(_gcry_mpih_add_n:)
pushl %edi
pushl %esi
movl 12(%esp),%edi /* res_ptr */
movl 16(%esp),%esi /* s1_ptr */
movl 20(%esp),%edx /* s2_ptr */
movl 24(%esp),%ecx /* size */
movl %ecx,%eax
shrl $3,%ecx /* compute count for unrolled loop */
negl %eax
andl $7,%eax /* get index where to start loop */
jz Loop /* necessary special case for 0 */
incl %ecx /* adjust loop count */
shll $2,%eax /* adjustment for pointers... */
subl %eax,%edi /* ... since they are offset ... */
subl %eax,%esi /* ... by a constant when we ... */
subl %eax,%edx /* ... enter the loop */
shrl $2,%eax /* restore previous value */
#ifdef PIC
/* Calculate start address in loop for PIC. Due to limitations in some
assemblers, Loop-L0-3 cannot be put into the leal */
call L0
L0: leal (%eax,%eax,8),%eax
addl (%esp),%eax
addl $(Loop-L0-3),%eax
addl $4,%esp
#else
/* Calculate start address in loop for non-PIC. */
leal (Loop - 3)(%eax,%eax,8),%eax
#endif
jmp *%eax /* jump into loop */
ALIGN (3)
Loop: movl (%esi),%eax
adcl (%edx),%eax
movl %eax,(%edi)
movl 4(%esi),%eax
adcl 4(%edx),%eax
movl %eax,4(%edi)
movl 8(%esi),%eax
adcl 8(%edx),%eax
movl %eax,8(%edi)
movl 12(%esi),%eax
adcl 12(%edx),%eax
movl %eax,12(%edi)
movl 16(%esi),%eax
adcl 16(%edx),%eax
movl %eax,16(%edi)
movl 20(%esi),%eax
adcl 20(%edx),%eax
movl %eax,20(%edi)
movl 24(%esi),%eax
adcl 24(%edx),%eax
movl %eax,24(%edi)
movl 28(%esi),%eax
adcl 28(%edx),%eax
movl %eax,28(%edi)
leal 32(%edi),%edi
leal 32(%esi),%esi
leal 32(%edx),%edx
decl %ecx
jnz Loop
sbbl %eax,%eax
negl %eax
popl %esi
popl %edi
ret
|
al3xtjames/Clover
| 3,247
|
FileSystems/GrubFS/grub/grub-core/lib/libgcrypt/mpi/i386/mpih-sub1.S
|
/* i80386 sub_n -- Sub two limb vectors of the same length > 0 and store
* sum in a third limb vector.
*
* Copyright (C) 1992, 1994, 1995, 1998,
* 2001, 2002 Free Software Foundation, Inc.
*
* This file is part of Libgcrypt.
*
* Libgcrypt is free software; you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as
* published by the Free Software Foundation; either version 2.1 of
* the License, or (at your option) any later version.
*
* Libgcrypt is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA
*
* Note: This code is heavily based on the GNU MP Library.
* Actually it's the same code with only minor changes in the
* way the data is stored; this is to support the abstraction
* of an optional secure memory allocation which may be used
* to avoid revealing of sensitive data due to paging etc.
*/
#include "sysdep.h"
#include "asm-syntax.h"
/*******************
* mpi_limb_t
* _gcry_mpih_sub_n( mpi_ptr_t res_ptr, (sp + 4)
* mpi_ptr_t s1_ptr, (sp + 8)
* mpi_ptr_t s2_ptr, (sp + 12)
* mpi_size_t size) (sp + 16)
*/
.text
ALIGN (3)
.globl C_SYMBOL_NAME(_gcry_mpih_sub_n)
C_SYMBOL_NAME(_gcry_mpih_sub_n:)
pushl %edi
pushl %esi
movl 12(%esp),%edi /* res_ptr */
movl 16(%esp),%esi /* s1_ptr */
movl 20(%esp),%edx /* s2_ptr */
movl 24(%esp),%ecx /* size */
movl %ecx,%eax
shrl $3,%ecx /* compute count for unrolled loop */
negl %eax
andl $7,%eax /* get index where to start loop */
jz Loop /* necessary special case for 0 */
incl %ecx /* adjust loop count */
shll $2,%eax /* adjustment for pointers... */
subl %eax,%edi /* ... since they are offset ... */
subl %eax,%esi /* ... by a constant when we ... */
subl %eax,%edx /* ... enter the loop */
shrl $2,%eax /* restore previous value */
#ifdef PIC
/* Calculate start address in loop for PIC. Due to limitations in some
assemblers, Loop-L0-3 cannot be put into the leal */
call L0
L0: leal (%eax,%eax,8),%eax
addl (%esp),%eax
addl $(Loop-L0-3),%eax
addl $4,%esp
#else
/* Calculate start address in loop for non-PIC. */
leal (Loop - 3)(%eax,%eax,8),%eax
#endif
jmp *%eax /* jump into loop */
ALIGN (3)
Loop: movl (%esi),%eax
sbbl (%edx),%eax
movl %eax,(%edi)
movl 4(%esi),%eax
sbbl 4(%edx),%eax
movl %eax,4(%edi)
movl 8(%esi),%eax
sbbl 8(%edx),%eax
movl %eax,8(%edi)
movl 12(%esi),%eax
sbbl 12(%edx),%eax
movl %eax,12(%edi)
movl 16(%esi),%eax
sbbl 16(%edx),%eax
movl %eax,16(%edi)
movl 20(%esi),%eax
sbbl 20(%edx),%eax
movl %eax,20(%edi)
movl 24(%esi),%eax
sbbl 24(%edx),%eax
movl %eax,24(%edi)
movl 28(%esi),%eax
sbbl 28(%edx),%eax
movl %eax,28(%edi)
leal 32(%edi),%edi
leal 32(%esi),%esi
leal 32(%edx),%edx
decl %ecx
jnz Loop
sbbl %eax,%eax
negl %eax
popl %esi
popl %edi
ret
|
al3xtjames/Clover
| 2,152
|
FileSystems/GrubFS/grub/grub-core/lib/libgcrypt/mpi/sparc32/mpih-lshift.S
|
/* sparc lshift
*
* Copyright (C) 1995, 1996, 1998,
* 2001, 2002 Free Software Foundation, Inc.
*
* This file is part of Libgcrypt.
*
* Libgcrypt is free software; you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as
* published by the Free Software Foundation; either version 2.1 of
* the License, or (at your option) any later version.
*
* Libgcrypt is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA
*/
! INPUT PARAMETERS
! res_ptr %o0
! src_ptr %o1
! size %o2
! cnt %o3
#include "sysdep.h"
.text
.align 4
.global C_SYMBOL_NAME(_gcry_mpih_lshift)
C_SYMBOL_NAME(_gcry_mpih_lshift):
sll %o2,2,%g1
add %o1,%g1,%o1 ! make %o1 point at end of src
ld [%o1-4],%g2 ! load first limb
sub %g0,%o3,%o5 ! negate shift count
add %o0,%g1,%o0 ! make %o0 point at end of res
add %o2,-1,%o2
andcc %o2,4-1,%g4 ! number of limbs in first loop
srl %g2,%o5,%g1 ! compute function result
be L0 ! if multiple of 4 limbs, skip first loop
st %g1,[%sp+80]
sub %o2,%g4,%o2 ! adjust count for main loop
Loop0: ld [%o1-8],%g3
add %o0,-4,%o0
add %o1,-4,%o1
addcc %g4,-1,%g4
sll %g2,%o3,%o4
srl %g3,%o5,%g1
mov %g3,%g2
or %o4,%g1,%o4
bne Loop0
st %o4,[%o0+0]
L0: tst %o2
be Lend
nop
Loop: ld [%o1-8],%g3
add %o0,-16,%o0
addcc %o2,-4,%o2
sll %g2,%o3,%o4
srl %g3,%o5,%g1
ld [%o1-12],%g2
sll %g3,%o3,%g4
or %o4,%g1,%o4
st %o4,[%o0+12]
srl %g2,%o5,%g1
ld [%o1-16],%g3
sll %g2,%o3,%o4
or %g4,%g1,%g4
st %g4,[%o0+8]
srl %g3,%o5,%g1
ld [%o1-20],%g2
sll %g3,%o3,%g4
or %o4,%g1,%o4
st %o4,[%o0+4]
srl %g2,%o5,%g1
add %o1,-16,%o1
or %g4,%g1,%g4
bne Loop
st %g4,[%o0+0]
Lend: sll %g2,%o3,%g2
st %g2,[%o0-4]
retl
ld [%sp+80],%o0
|
al3xtjames/Clover
| 3,960
|
FileSystems/GrubFS/grub/grub-core/lib/libgcrypt/mpi/sparc32/udiv.S
|
/* SPARC v7 __udiv_qrnnd division support, used from longlong.h.
* This is for v7 CPUs without a floating-point unit.
*
* Copyright (C) 1993, 1994, 1996, 1998,
* 2001, 2002 Free Software Foundation, Inc.
*
* This file is part of Libgcrypt.
*
* Libgcrypt is free software; you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as
* published by the Free Software Foundation; either version 2.1 of
* the License, or (at your option) any later version.
*
* Libgcrypt is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA
*
* Note: This code is heavily based on the GNU MP Library.
* Actually it's the same code with only minor changes in the
* way the data is stored; this is to support the abstraction
* of an optional secure memory allocation which may be used
* to avoid revealing of sensitive data due to paging etc.
*/
! INPUT PARAMETERS
! rem_ptr o0
! n1 o1
! n0 o2
! d o3
#include "sysdep.h"
.text
.align 4
.global C_SYMBOL_NAME(__udiv_qrnnd)
C_SYMBOL_NAME(__udiv_qrnnd):
tst %o3
bneg Largedivisor
mov 8,%g1
b Lp1
addxcc %o2,%o2,%o2
Lplop: bcc Ln1
addxcc %o2,%o2,%o2
Lp1: addx %o1,%o1,%o1
subcc %o1,%o3,%o4
bcc Ln2
addxcc %o2,%o2,%o2
Lp2: addx %o1,%o1,%o1
subcc %o1,%o3,%o4
bcc Ln3
addxcc %o2,%o2,%o2
Lp3: addx %o1,%o1,%o1
subcc %o1,%o3,%o4
bcc Ln4
addxcc %o2,%o2,%o2
Lp4: addx %o1,%o1,%o1
addcc %g1,-1,%g1
bne Lplop
subcc %o1,%o3,%o4
bcc Ln5
addxcc %o2,%o2,%o2
Lp5: st %o1,[%o0]
retl
xnor %g0,%o2,%o0
Lnlop: bcc Lp1
addxcc %o2,%o2,%o2
Ln1: addx %o4,%o4,%o4
subcc %o4,%o3,%o1
bcc Lp2
addxcc %o2,%o2,%o2
Ln2: addx %o4,%o4,%o4
subcc %o4,%o3,%o1
bcc Lp3
addxcc %o2,%o2,%o2
Ln3: addx %o4,%o4,%o4
subcc %o4,%o3,%o1
bcc Lp4
addxcc %o2,%o2,%o2
Ln4: addx %o4,%o4,%o4
addcc %g1,-1,%g1
bne Lnlop
subcc %o4,%o3,%o1
bcc Lp5
addxcc %o2,%o2,%o2
Ln5: st %o4,[%o0]
retl
xnor %g0,%o2,%o0
Largedivisor:
and %o2,1,%o5 ! %o5 = n0 & 1
srl %o2,1,%o2
sll %o1,31,%g2
or %g2,%o2,%o2 ! %o2 = lo(n1n0 >> 1)
srl %o1,1,%o1 ! %o1 = hi(n1n0 >> 1)
and %o3,1,%g2
srl %o3,1,%g3 ! %g3 = floor(d / 2)
add %g3,%g2,%g3 ! %g3 = ceil(d / 2)
b LLp1
addxcc %o2,%o2,%o2
LLplop: bcc LLn1
addxcc %o2,%o2,%o2
LLp1: addx %o1,%o1,%o1
subcc %o1,%g3,%o4
bcc LLn2
addxcc %o2,%o2,%o2
LLp2: addx %o1,%o1,%o1
subcc %o1,%g3,%o4
bcc LLn3
addxcc %o2,%o2,%o2
LLp3: addx %o1,%o1,%o1
subcc %o1,%g3,%o4
bcc LLn4
addxcc %o2,%o2,%o2
LLp4: addx %o1,%o1,%o1
addcc %g1,-1,%g1
bne LLplop
subcc %o1,%g3,%o4
bcc LLn5
addxcc %o2,%o2,%o2
LLp5: add %o1,%o1,%o1 ! << 1
tst %g2
bne Oddp
add %o5,%o1,%o1
st %o1,[%o0]
retl
xnor %g0,%o2,%o0
LLnlop: bcc LLp1
addxcc %o2,%o2,%o2
LLn1: addx %o4,%o4,%o4
subcc %o4,%g3,%o1
bcc LLp2
addxcc %o2,%o2,%o2
LLn2: addx %o4,%o4,%o4
subcc %o4,%g3,%o1
bcc LLp3
addxcc %o2,%o2,%o2
LLn3: addx %o4,%o4,%o4
subcc %o4,%g3,%o1
bcc LLp4
addxcc %o2,%o2,%o2
LLn4: addx %o4,%o4,%o4
addcc %g1,-1,%g1
bne LLnlop
subcc %o4,%g3,%o1
bcc LLp5
addxcc %o2,%o2,%o2
LLn5: add %o4,%o4,%o4 ! << 1
tst %g2
bne Oddn
add %o5,%o4,%o4
st %o4,[%o0]
retl
xnor %g0,%o2,%o0
Oddp: xnor %g0,%o2,%o2
! q' in %o2. r' in %o1
addcc %o1,%o2,%o1
bcc LLp6
addx %o2,0,%o2
sub %o1,%o3,%o1
LLp6: subcc %o1,%o3,%g0
bcs LLp7
subx %o2,-1,%o2
sub %o1,%o3,%o1
LLp7: st %o1,[%o0]
retl
mov %o2,%o0
Oddn: xnor %g0,%o2,%o2
! q' in %o2. r' in %o4
addcc %o4,%o2,%o4
bcc LLn6
addx %o2,0,%o2
sub %o4,%o3,%o4
LLn6: subcc %o4,%o3,%g0
bcs LLn7
subx %o2,-1,%o2
sub %o4,%o3,%o4
LLn7: st %o4,[%o0]
retl
mov %o2,%o0
|
al3xtjames/Clover
| 2,034
|
FileSystems/GrubFS/grub/grub-core/lib/libgcrypt/mpi/sparc32/mpih-rshift.S
|
/* sparc rshift
*
* Copyright (C) 1995, 1996, 1998,
* 2001, 2002 Free Software Foundation, Inc.
*
* This file is part of Libgcrypt.
*
* Libgcrypt is free software; you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as
* published by the Free Software Foundation; either version 2.1 of
* the License, or (at your option) any later version.
*
* Libgcrypt is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA
*/
! INPUT PARAMETERS
! res_ptr %o0
! src_ptr %o1
! size %o2
! cnt %o3
#include "sysdep.h"
.text
.align 4
.global C_SYMBOL_NAME(_gcry_mpih_rshift)
C_SYMBOL_NAME(_gcry_mpih_rshift):
ld [%o1],%g2 ! load first limb
sub %g0,%o3,%o5 ! negate shift count
add %o2,-1,%o2
andcc %o2,4-1,%g4 ! number of limbs in first loop
sll %g2,%o5,%g1 ! compute function result
be L0 ! if multiple of 4 limbs, skip first loop
st %g1,[%sp+80]
sub %o2,%g4,%o2 ! adjust count for main loop
Loop0: ld [%o1+4],%g3
add %o0,4,%o0
add %o1,4,%o1
addcc %g4,-1,%g4
srl %g2,%o3,%o4
sll %g3,%o5,%g1
mov %g3,%g2
or %o4,%g1,%o4
bne Loop0
st %o4,[%o0-4]
L0: tst %o2
be Lend
nop
Loop: ld [%o1+4],%g3
add %o0,16,%o0
addcc %o2,-4,%o2
srl %g2,%o3,%o4
sll %g3,%o5,%g1
ld [%o1+8],%g2
srl %g3,%o3,%g4
or %o4,%g1,%o4
st %o4,[%o0-16]
sll %g2,%o5,%g1
ld [%o1+12],%g3
srl %g2,%o3,%o4
or %g4,%g1,%g4
st %g4,[%o0-12]
sll %g3,%o5,%g1
ld [%o1+16],%g2
srl %g3,%o3,%g4
or %o4,%g1,%o4
st %o4,[%o0-8]
sll %g2,%o5,%g1
add %o1,16,%o1
or %g4,%g1,%g4
bne Loop
st %g4,[%o0-4]
Lend: srl %g2,%o3,%g2
st %g2,[%o0-0]
retl
ld [%sp+80],%o0
|
al3xtjames/Clover
| 5,746
|
FileSystems/GrubFS/grub/grub-core/lib/libgcrypt/mpi/sparc32/mpih-add1.S
|
/* SPARC _add_n -- Add two limb vectors of the same length > 0 and store
* sum in a third limb vector.
*
* Copyright (C) 1995, 1996, 1998,
* 2001, 2002 Free Software Foundation, Inc.
*
* This file is part of Libgcrypt.
*
* Libgcrypt is free software; you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as
* published by the Free Software Foundation; either version 2.1 of
* the License, or (at your option) any later version.
*
* Libgcrypt is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA
*/
/*******************
* mpi_limb_t
* _gcry_mpih_add_n( mpi_ptr_t res_ptr,
* mpi_ptr_t s1_ptr,
* mpi_ptr_t s2_ptr,
* mpi_size_t size)
*/
! INPUT PARAMETERS
#define res_ptr %o0
#define s1_ptr %o1
#define s2_ptr %o2
#define size %o3
#include "sysdep.h"
.text
.align 4
.global C_SYMBOL_NAME(_gcry_mpih_add_n)
C_SYMBOL_NAME(_gcry_mpih_add_n):
xor s2_ptr,res_ptr,%g1
andcc %g1,4,%g0
bne L1 ! branch if alignment differs
nop
! ** V1a **
L0: andcc res_ptr,4,%g0 ! res_ptr unaligned? Side effect: cy=0
be L_v1 ! if no, branch
nop
/* Add least significant limb separately to align res_ptr and s2_ptr */
ld [s1_ptr],%g4
add s1_ptr,4,s1_ptr
ld [s2_ptr],%g2
add s2_ptr,4,s2_ptr
add size,-1,size
addcc %g4,%g2,%o4
st %o4,[res_ptr]
add res_ptr,4,res_ptr
L_v1: addx %g0,%g0,%o4 ! save cy in register
cmp size,2 ! if size < 2 ...
bl Lend2 ! ... branch to tail code
subcc %g0,%o4,%g0 ! restore cy
ld [s1_ptr+0],%g4
addcc size,-10,size
ld [s1_ptr+4],%g1
ldd [s2_ptr+0],%g2
blt Lfin1
subcc %g0,%o4,%g0 ! restore cy
/* Add blocks of 8 limbs until less than 8 limbs remain */
Loop1: addxcc %g4,%g2,%o4
ld [s1_ptr+8],%g4
addxcc %g1,%g3,%o5
ld [s1_ptr+12],%g1
ldd [s2_ptr+8],%g2
std %o4,[res_ptr+0]
addxcc %g4,%g2,%o4
ld [s1_ptr+16],%g4
addxcc %g1,%g3,%o5
ld [s1_ptr+20],%g1
ldd [s2_ptr+16],%g2
std %o4,[res_ptr+8]
addxcc %g4,%g2,%o4
ld [s1_ptr+24],%g4
addxcc %g1,%g3,%o5
ld [s1_ptr+28],%g1
ldd [s2_ptr+24],%g2
std %o4,[res_ptr+16]
addxcc %g4,%g2,%o4
ld [s1_ptr+32],%g4
addxcc %g1,%g3,%o5
ld [s1_ptr+36],%g1
ldd [s2_ptr+32],%g2
std %o4,[res_ptr+24]
addx %g0,%g0,%o4 ! save cy in register
addcc size,-8,size
add s1_ptr,32,s1_ptr
add s2_ptr,32,s2_ptr
add res_ptr,32,res_ptr
bge Loop1
subcc %g0,%o4,%g0 ! restore cy
Lfin1: addcc size,8-2,size
blt Lend1
subcc %g0,%o4,%g0 ! restore cy
/* Add blocks of 2 limbs until less than 2 limbs remain */
Loope1: addxcc %g4,%g2,%o4
ld [s1_ptr+8],%g4
addxcc %g1,%g3,%o5
ld [s1_ptr+12],%g1
ldd [s2_ptr+8],%g2
std %o4,[res_ptr+0]
addx %g0,%g0,%o4 ! save cy in register
addcc size,-2,size
add s1_ptr,8,s1_ptr
add s2_ptr,8,s2_ptr
add res_ptr,8,res_ptr
bge Loope1
subcc %g0,%o4,%g0 ! restore cy
Lend1: addxcc %g4,%g2,%o4
addxcc %g1,%g3,%o5
std %o4,[res_ptr+0]
addx %g0,%g0,%o4 ! save cy in register
andcc size,1,%g0
be Lret1
subcc %g0,%o4,%g0 ! restore cy
/* Add last limb */
ld [s1_ptr+8],%g4
ld [s2_ptr+8],%g2
addxcc %g4,%g2,%o4
st %o4,[res_ptr+8]
Lret1: retl
addx %g0,%g0,%o0 ! return carry-out from most sign. limb
L1: xor s1_ptr,res_ptr,%g1
andcc %g1,4,%g0
bne L2
nop
! ** V1b **
mov s2_ptr,%g1
mov s1_ptr,s2_ptr
b L0
mov %g1,s1_ptr
! ** V2 **
/* If we come here, the alignment of s1_ptr and res_ptr as well as the
alignment of s2_ptr and res_ptr differ. Since there are only two ways
things can be aligned (that we care about) we now know that the alignment
of s1_ptr and s2_ptr are the same. */
L2: cmp size,1
be Ljone
nop
andcc s1_ptr,4,%g0 ! s1_ptr unaligned? Side effect: cy=0
be L_v2 ! if no, branch
nop
/* Add least significant limb separately to align s1_ptr and s2_ptr */
ld [s1_ptr],%g4
add s1_ptr,4,s1_ptr
ld [s2_ptr],%g2
add s2_ptr,4,s2_ptr
add size,-1,size
addcc %g4,%g2,%o4
st %o4,[res_ptr]
add res_ptr,4,res_ptr
L_v2: addx %g0,%g0,%o4 ! save cy in register
addcc size,-8,size
blt Lfin2
subcc %g0,%o4,%g0 ! restore cy
/* Add blocks of 8 limbs until less than 8 limbs remain */
Loop2: ldd [s1_ptr+0],%g2
ldd [s2_ptr+0],%o4
addxcc %g2,%o4,%g2
st %g2,[res_ptr+0]
addxcc %g3,%o5,%g3
st %g3,[res_ptr+4]
ldd [s1_ptr+8],%g2
ldd [s2_ptr+8],%o4
addxcc %g2,%o4,%g2
st %g2,[res_ptr+8]
addxcc %g3,%o5,%g3
st %g3,[res_ptr+12]
ldd [s1_ptr+16],%g2
ldd [s2_ptr+16],%o4
addxcc %g2,%o4,%g2
st %g2,[res_ptr+16]
addxcc %g3,%o5,%g3
st %g3,[res_ptr+20]
ldd [s1_ptr+24],%g2
ldd [s2_ptr+24],%o4
addxcc %g2,%o4,%g2
st %g2,[res_ptr+24]
addxcc %g3,%o5,%g3
st %g3,[res_ptr+28]
addx %g0,%g0,%o4 ! save cy in register
addcc size,-8,size
add s1_ptr,32,s1_ptr
add s2_ptr,32,s2_ptr
add res_ptr,32,res_ptr
bge Loop2
subcc %g0,%o4,%g0 ! restore cy
Lfin2: addcc size,8-2,size
blt Lend2
subcc %g0,%o4,%g0 ! restore cy
Loope2: ldd [s1_ptr+0],%g2
ldd [s2_ptr+0],%o4
addxcc %g2,%o4,%g2
st %g2,[res_ptr+0]
addxcc %g3,%o5,%g3
st %g3,[res_ptr+4]
addx %g0,%g0,%o4 ! save cy in register
addcc size,-2,size
add s1_ptr,8,s1_ptr
add s2_ptr,8,s2_ptr
add res_ptr,8,res_ptr
bge Loope2
subcc %g0,%o4,%g0 ! restore cy
Lend2: andcc size,1,%g0
be Lret2
subcc %g0,%o4,%g0 ! restore cy
/* Add last limb */
Ljone: ld [s1_ptr],%g4
ld [s2_ptr],%g2
addxcc %g4,%g2,%o4
st %o4,[res_ptr]
Lret2: retl
addx %g0,%g0,%o0 ! return carry-out from most sign. limb
|
al3xtjames/Clover
| 2,960
|
FileSystems/GrubFS/grub/grub-core/lib/libgcrypt/mpi/power/mpih-mul1.S
|
/* IBM POWER mul_1 -- Multiply a limb vector with a limb and store
* the result in a second limb vector.
*
* Copyright (C) 1992, 1994, 1999, 2002 Free Software Foundation, Inc.
*
* This file is part of Libgcrypt.
*
* Libgcrypt is free software; you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as
* published by the Free Software Foundation; either version 2.1 of
* the License, or (at your option) any later version.
*
* Libgcrypt is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA
*/
#include "sysdep.h"
#include "asm-syntax.h"
/*
# INPUT PARAMETERS
# res_ptr r3
# s1_ptr r4
# size r5
# s2_limb r6
# The RS/6000 has no unsigned 32x32->64 bit multiplication instruction. To
# obtain that operation, we have to use the 32x32->64 signed multiplication
# instruction, and add the appropriate compensation to the high limb of the
# result. We add the multiplicand if the multiplier has its most significant
# bit set, and we add the multiplier if the multiplicand has its most
# significant bit set. We need to preserve the carry flag between each
# iteration, so we have to compute the compensation carefully (the natural,
# srai+and doesn't work). Since the POWER architecture has a branch unit
# we can branch in zero cycles, so that's how we perform the additions.
*/
.toc
.csect ._gcry_mpih_mul_1[PR]
.align 2
.globl _gcry_mpih_mul_1
.globl ._gcry_mpih_mul_1
.csect _gcry_mpih_mul_1[DS]
_gcry_mpih_mul_1:
.long ._gcry_mpih_mul_1[PR], TOC[tc0], 0
.csect ._gcry_mpih_mul_1[PR]
._gcry_mpih_mul_1:
cal 3,-4(3)
l 0,0(4)
cmpi 0,6,0
mtctr 5
mul 9,0,6
srai 7,0,31
and 7,7,6
mfmq 8
ai 0,0,0 # reset carry
cax 9,9,7
blt Lneg
Lpos: bdz Lend
Lploop: lu 0,4(4)
stu 8,4(3)
cmpi 0,0,0
mul 10,0,6
mfmq 0
ae 8,0,9
bge Lp0
cax 10,10,6 # adjust high limb for negative limb from s1
Lp0: bdz Lend0
lu 0,4(4)
stu 8,4(3)
cmpi 0,0,0
mul 9,0,6
mfmq 0
ae 8,0,10
bge Lp1
cax 9,9,6 # adjust high limb for negative limb from s1
Lp1: bdn Lploop
b Lend
Lneg: cax 9,9,0
bdz Lend
Lnloop: lu 0,4(4)
stu 8,4(3)
cmpi 0,0,0
mul 10,0,6
cax 10,10,0 # adjust high limb for negative s2_limb
mfmq 0
ae 8,0,9
bge Ln0
cax 10,10,6 # adjust high limb for negative limb from s1
Ln0: bdz Lend0
lu 0,4(4)
stu 8,4(3)
cmpi 0,0,0
mul 9,0,6
cax 9,9,0 # adjust high limb for negative s2_limb
mfmq 0
ae 8,0,10
bge Ln1
cax 9,9,6 # adjust high limb for negative limb from s1
Ln1: bdn Lnloop
b Lend
Lend0: cal 9,0(10)
Lend: st 8,4(3)
aze 3,9
br
|
al3xtjames/Clover
| 3,169
|
FileSystems/GrubFS/grub/grub-core/lib/libgcrypt/mpi/power/mpih-mul2.S
|
/* IBM POWER addmul_1 -- Multiply a limb vector with a limb and add
* the result to a second limb vector.
*
* Copyright (C) 1992, 1994, 1999, 2002 Free Software Foundation, Inc.
*
* This file is part of Libgcrypt.
*
* Libgcrypt is free software; you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as
* published by the Free Software Foundation; either version 2.1 of
* the License, or (at your option) any later version.
*
* Libgcrypt is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA
*/
#include "sysdep.h"
#include "asm-syntax.h"
/*
# INPUT PARAMETERS
# res_ptr r3
# s1_ptr r4
# size r5
# s2_limb r6
# The RS/6000 has no unsigned 32x32->64 bit multiplication instruction. To
# obtain that operation, we have to use the 32x32->64 signed multiplication
# instruction, and add the appropriate compensation to the high limb of the
# result. We add the multiplicand if the multiplier has its most significant
# bit set, and we add the multiplier if the multiplicand has its most
# significant bit set. We need to preserve the carry flag between each
# iteration, so we have to compute the compensation carefully (the natural,
# srai+and doesn't work). Since the POWER architecture has a branch unit
# we can branch in zero cycles, so that's how we perform the additions.
*/
.toc
.csect ._gcry_mpih_addmul_1[PR]
.align 2
.globl _gcry_mpih_addmul_1
.globl ._gcry_mpih_addmul_1
.csect _gcry_mpih_addmul_1[DS]
_gcry_mpih_addmul_1:
.long ._gcry_mpih_addmul_1[PR], TOC[tc0], 0
.csect ._gcry_mpih_addmul_1[PR]
._gcry_mpih_addmul_1:
cal 3,-4(3)
l 0,0(4)
cmpi 0,6,0
mtctr 5
mul 9,0,6
srai 7,0,31
and 7,7,6
mfmq 8
cax 9,9,7
l 7,4(3)
a 8,8,7 # add res_limb
blt Lneg
Lpos: bdz Lend
Lploop: lu 0,4(4)
stu 8,4(3)
cmpi 0,0,0
mul 10,0,6
mfmq 0
ae 8,0,9 # low limb + old_cy_limb + old cy
l 7,4(3)
aze 10,10 # propagate cy to new cy_limb
a 8,8,7 # add res_limb
bge Lp0
cax 10,10,6 # adjust high limb for negative limb from s1
Lp0: bdz Lend0
lu 0,4(4)
stu 8,4(3)
cmpi 0,0,0
mul 9,0,6
mfmq 0
ae 8,0,10
l 7,4(3)
aze 9,9
a 8,8,7
bge Lp1
cax 9,9,6 # adjust high limb for negative limb from s1
Lp1: bdn Lploop
b Lend
Lneg: cax 9,9,0
bdz Lend
Lnloop: lu 0,4(4)
stu 8,4(3)
cmpi 0,0,0
mul 10,0,6
mfmq 7
ae 8,7,9
l 7,4(3)
ae 10,10,0 # propagate cy to new cy_limb
a 8,8,7 # add res_limb
bge Ln0
cax 10,10,6 # adjust high limb for negative limb from s1
Ln0: bdz Lend0
lu 0,4(4)
stu 8,4(3)
cmpi 0,0,0
mul 9,0,6
mfmq 7
ae 8,7,10
l 7,4(3)
ae 9,9,0 # propagate cy to new cy_limb
a 8,8,7 # add res_limb
bge Ln1
cax 9,9,6 # adjust high limb for negative limb from s1
Ln1: bdn Lnloop
b Lend
Lend0: cal 9,0(10)
Lend: st 8,4(3)
aze 3,9
br
|
al3xtjames/Clover
| 1,943
|
FileSystems/GrubFS/grub/grub-core/lib/libgcrypt/mpi/power/mpih-lshift.S
|
/* IBM POWER lshift
*
* Copyright (C) 1992, 1994, 1999, 2002 Free Software Foundation, Inc.
*
* This file is part of Libgcrypt.
*
* Libgcrypt is free software; you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as
* published by the Free Software Foundation; either version 2.1 of
* the License, or (at your option) any later version.
*
* Libgcrypt is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA
*/
#include "sysdep.h"
#include "asm-syntax.h"
/*
# INPUT PARAMETERS
# res_ptr r3
# s_ptr r4
# size r5
# cnt r6
*/
.toc
.extern _gcry_mpih_lshift[DS]
.extern ._gcry_mpih_lshift
.csect [PR]
.align 2
.globl _gcry_mpih_lshift
.globl ._gcry_mpih_lshift
.csect _gcry_mpih_lshift[DS]
_gcry_mpih_lshift:
.long ._gcry_mpih_lshift, TOC[tc0], 0
.csect [PR]
._gcry_mpih_lshift:
sli 0,5,2
cax 9,3,0
cax 4,4,0
sfi 8,6,32
mtctr 5 # put limb count in CTR loop register
lu 0,-4(4) # read most significant limb
sre 3,0,8 # compute carry out limb, and init MQ register
bdz Lend2 # if just one limb, skip loop
lu 0,-4(4) # read 2:nd most significant limb
sreq 7,0,8 # compute most significant limb of result
bdz Lend # if just two limb, skip loop
Loop: lu 0,-4(4) # load next lower limb
stu 7,-4(9) # store previous result during read latency
sreq 7,0,8 # compute result limb
bdn Loop # loop back until CTR is zero
Lend: stu 7,-4(9) # store 2:nd least significant limb
Lend2: sle 7,0,6 # compute least significant limb
st 7,-4(9) # store it
br
|
al3xtjames/Clover
| 1,962
|
FileSystems/GrubFS/grub/grub-core/lib/libgcrypt/mpi/power/mpih-rshift.S
|
/* IBM POWER rshift
*
* Copyright (C) 1992, 1994, 1999, 2002 Free Software Foundation, Inc.
*
* This file is part of Libgcrypt.
*
* Libgcrypt is free software; you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as
* published by the Free Software Foundation; either version 2.1 of
* the License, or (at your option) any later version.
*
* Libgcrypt is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA
*/
#include "sysdep.h"
#include "asm-syntax.h"
/*
# INPUT PARAMETERS
# res_ptr r3
# s_ptr r4
# size r5
# cnt r6
*/
.toc
.extern _gcry_mpih_rshift[DS]
.extern ._gcry_mpih_rshift
.csect [PR]
.align 2
.globl _gcry_mpih_rshift
.globl ._gcry_mpih_rshift
.csect _gcry_mpih_rshift[DS]
_gcry_mpih_rshift:
.long ._gcry_mpih_rshift, TOC[tc0], 0
.csect [PR]
._gcry_mpih_rshift:
sfi 8,6,32
mtctr 5 # put limb count in CTR loop register
l 0,0(4) # read least significant limb
ai 9,3,-4 # adjust res_ptr since it's offset in the stu:s
sle 3,0,8 # compute carry limb, and init MQ register
bdz Lend2 # if just one limb, skip loop
lu 0,4(4) # read 2:nd least significant limb
sleq 7,0,8 # compute least significant limb of result
bdz Lend # if just two limb, skip loop
Loop: lu 0,4(4) # load next higher limb
stu 7,4(9) # store previous result during read latency
sleq 7,0,8 # compute result limb
bdn Loop # loop back until CTR is zero
Lend: stu 7,4(9) # store 2:nd most significant limb
Lend2: sre 7,0,6 # compute most significant limb
st 7,4(9) # store it
br
|
al3xtjames/Clover
| 3,381
|
FileSystems/GrubFS/grub/grub-core/lib/libgcrypt/mpi/power/mpih-mul3.S
|
/* IBM POWER submul_1 -- Multiply a limb vector with a limb and subtract
* the result from a second limb vector.
*
* Copyright (C) 1992, 1994, 1999, 2002 Free Software Foundation, Inc.
*
* This file is part of Libgcrypt.
*
* Libgcrypt is free software; you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as
* published by the Free Software Foundation; either version 2.1 of
* the License, or (at your option) any later version.
*
* Libgcrypt is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA
*/
#include "sysdep.h"
#include "asm-syntax.h"
/*
# INPUT PARAMETERS
# res_ptr r3
# s1_ptr r4
# size r5
# s2_limb r6
# The RS/6000 has no unsigned 32x32->64 bit multiplication instruction. To
# obtain that operation, we have to use the 32x32->64 signed multiplication
# instruction, and add the appropriate compensation to the high limb of the
# result. We add the multiplicand if the multiplier has its most significant
# bit set, and we add the multiplier if the multiplicand has its most
# significant bit set. We need to preserve the carry flag between each
# iteration, so we have to compute the compensation carefully (the natural,
# srai+and doesn't work). Since the POWER architecture has a branch unit
# we can branch in zero cycles, so that's how we perform the additions.
*/
.toc
.csect ._gcry_mpih_submul_1[PR]
.align 2
.globl _gcry_mpih_submul_1
.globl ._gcry_mpih_submul_1
.csect _gcry_mpih_submul_1[DS]
_gcry_mpih_submul_1:
.long ._gcry_mpih_submul_1[PR], TOC[tc0], 0
.csect ._gcry_mpih_submul_1[PR]
._gcry_mpih_submul_1:
cal 3,-4(3)
l 0,0(4)
cmpi 0,6,0
mtctr 5
mul 9,0,6
srai 7,0,31
and 7,7,6
mfmq 11
cax 9,9,7
l 7,4(3)
sf 8,11,7 # add res_limb
a 11,8,11 # invert cy (r11 is junk)
blt Lneg
Lpos: bdz Lend
Lploop: lu 0,4(4)
stu 8,4(3)
cmpi 0,0,0
mul 10,0,6
mfmq 0
ae 11,0,9 # low limb + old_cy_limb + old cy
l 7,4(3)
aze 10,10 # propagate cy to new cy_limb
sf 8,11,7 # add res_limb
a 11,8,11 # invert cy (r11 is junk)
bge Lp0
cax 10,10,6 # adjust high limb for negative limb from s1
Lp0: bdz Lend0
lu 0,4(4)
stu 8,4(3)
cmpi 0,0,0
mul 9,0,6
mfmq 0
ae 11,0,10
l 7,4(3)
aze 9,9
sf 8,11,7
a 11,8,11 # invert cy (r11 is junk)
bge Lp1
cax 9,9,6 # adjust high limb for negative limb from s1
Lp1: bdn Lploop
b Lend
Lneg: cax 9,9,0
bdz Lend
Lnloop: lu 0,4(4)
stu 8,4(3)
cmpi 0,0,0
mul 10,0,6
mfmq 7
ae 11,7,9
l 7,4(3)
ae 10,10,0 # propagate cy to new cy_limb
sf 8,11,7 # add res_limb
a 11,8,11 # invert cy (r11 is junk)
bge Ln0
cax 10,10,6 # adjust high limb for negative limb from s1
Ln0: bdz Lend0
lu 0,4(4)
stu 8,4(3)
cmpi 0,0,0
mul 9,0,6
mfmq 7
ae 11,7,10
l 7,4(3)
ae 9,9,0 # propagate cy to new cy_limb
sf 8,11,7 # add res_limb
a 11,8,11 # invert cy (r11 is junk)
bge Ln1
cax 9,9,6 # adjust high limb for negative limb from s1
Ln1: bdn Lnloop
b Lend
Lend0: cal 9,0(10)
Lend: st 8,4(3)
aze 3,9
br
|
al3xtjames/Clover
| 2,763
|
FileSystems/GrubFS/grub/grub-core/lib/libgcrypt/mpi/power/mpih-add1.S
|
/* IBM POWER add_n -- Add two limb vectors of equal, non-zero length.
*
* Copyright (C) 1992, 1994, 1996, 1999,
* 2002 Free Software Foundation, Inc.
*
* This file is part of Libgcrypt.
*
* Libgcrypt is free software; you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as
* published by the Free Software Foundation; either version 2.1 of
* the License, or (at your option) any later version.
*
* Libgcrypt is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA
*/
#include "sysdep.h"
#include "asm-syntax.h"
/*
# INPUT PARAMETERS
# res_ptr r3
# s1_ptr r4
# s2_ptr r5
# size r6
*/
.toc
.extern _gcry_mpih_add_n[DS]
.extern ._gcry_mpih_add_n
.csect [PR]
.align 2
.globl _gcry_mpih_add_n
.globl ._gcry_mpih_add_n
.csect _gcry_mpih_add_n[DS]
_gcry_mpih_add_n:
.long ._gcry_mpih_add_n, TOC[tc0], 0
.csect [PR]
._gcry_mpih_add_n:
andil. 10,6,1 # odd or even number of limbs?
l 8,0(4) # load least significant s1 limb
l 0,0(5) # load least significant s2 limb
cal 3,-4(3) # offset res_ptr, it's updated before it's used
sri 10,6,1 # count for unrolled loop
a 7,0,8 # add least significant limbs, set cy
mtctr 10 # copy count into CTR
beq 0,Leven # branch if even # of limbs (# of limbs >= 2)
# We have an odd # of limbs. Add the first limbs separately.
cmpi 1,10,0 # is count for unrolled loop zero?
bne 1,L1 # branch if not
st 7,4(3)
aze 3,10 # use the fact that r10 is zero...
br # return
# We added least significant limbs. Now reload the next limbs to enter loop.
L1: lu 8,4(4) # load s1 limb and update s1_ptr
lu 0,4(5) # load s2 limb and update s2_ptr
stu 7,4(3)
ae 7,0,8 # add limbs, set cy
Leven: lu 9,4(4) # load s1 limb and update s1_ptr
lu 10,4(5) # load s2 limb and update s2_ptr
bdz Lend # If done, skip loop
Loop: lu 8,4(4) # load s1 limb and update s1_ptr
lu 0,4(5) # load s2 limb and update s2_ptr
ae 11,9,10 # add previous limbs with cy, set cy
stu 7,4(3) #
lu 9,4(4) # load s1 limb and update s1_ptr
lu 10,4(5) # load s2 limb and update s2_ptr
ae 7,0,8 # add previous limbs with cy, set cy
stu 11,4(3) #
bdn Loop # decrement CTR and loop back
Lend: ae 11,9,10 # add limbs with cy, set cy
st 7,4(3) #
st 11,8(3) #
lil 3,0 # load cy into ...
aze 3,3 # ... return value register
br
|
al3xtjames/Clover
| 2,835
|
FileSystems/GrubFS/grub/grub-core/lib/libgcrypt/mpi/power/mpih-sub1.S
|
/* IBM POWER sub_n -- Subtract two limb vectors of equal, non-zero length.
*
* Copyright (C) 1992, 1994, 1995, 1996, 1999,
* 2002 Free Software Foundation, Inc.
*
* This file is part of Libgcrypt.
*
* Libgcrypt is free software; you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as
* published by the Free Software Foundation; either version 2.1 of
* the License, or (at your option) any later version.
*
* Libgcrypt is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA
*/
#include "sysdep.h"
#include "asm-syntax.h"
/*
# INPUT PARAMETERS
# res_ptr r3
# s1_ptr r4
# s2_ptr r5
# size r6
*/
.toc
.extern _gcry_mpih_sub_n[DS]
.extern ._gcry_mpih_sub_n
.csect [PR]
.align 2
.globl _gcry_mpih_sub_n
.globl ._gcry_mpih_sub_n
.csect _gcry_mpih_sub_n[DS]
_gcry_mpih_sub_n:
.long ._gcry_mpih_sub_n, TOC[tc0], 0
.csect [PR]
._gcry_mpih_sub_n:
andil. 10,6,1 # odd or even number of limbs?
l 8,0(4) # load least significant s1 limb
l 0,0(5) # load least significant s2 limb
cal 3,-4(3) # offset res_ptr, it's updated before it's used
sri 10,6,1 # count for unrolled loop
sf 7,0,8 # subtract least significant limbs, set cy
mtctr 10 # copy count into CTR
beq 0,Leven # branch if even # of limbs (# of limbs >= 2)
# We have an odd # of limbs. Add the first limbs separately.
cmpi 1,10,0 # is count for unrolled loop zero?
bne 1,L1 # branch if not
st 7,4(3)
sfe 3,0,0 # load !cy into ...
sfi 3,3,0 # ... return value register
br # return
# We added least significant limbs. Now reload the next limbs to enter loop.
L1: lu 8,4(4) # load s1 limb and update s1_ptr
lu 0,4(5) # load s2 limb and update s2_ptr
stu 7,4(3)
sfe 7,0,8 # subtract limbs, set cy
Leven: lu 9,4(4) # load s1 limb and update s1_ptr
lu 10,4(5) # load s2 limb and update s2_ptr
bdz Lend # If done, skip loop
Loop: lu 8,4(4) # load s1 limb and update s1_ptr
lu 0,4(5) # load s2 limb and update s2_ptr
sfe 11,10,9 # subtract previous limbs with cy, set cy
stu 7,4(3) #
lu 9,4(4) # load s1 limb and update s1_ptr
lu 10,4(5) # load s2 limb and update s2_ptr
sfe 7,0,8 # subtract previous limbs with cy, set cy
stu 11,4(3) #
bdn Loop # decrement CTR and loop back
Lend: sfe 11,10,9 # subtract limbs with cy, set cy
st 7,4(3) #
st 11,8(3) #
sfe 3,0,0 # load !cy into ...
sfi 3,3,0 # ... return value register
br
|
al3xtjames/Clover
| 2,600
|
FileSystems/GrubFS/grub/grub-core/lib/libgcrypt/mpi/supersparc/udiv.S
|
/* SuperSPARC __udiv_qrnnd division support, used from longlong.h.
* This is for SuperSPARC only, to compensate for its
* semi-functional udiv instruction.
*
* Copyright (C) 1993, 1994, 1996, 1998,
* 2001, 2002 Free Software Foundation, Inc.
*
* This file is part of Libgcrypt.
*
* Libgcrypt is free software; you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as
* published by the Free Software Foundation; either version 2.1 of
* the License, or (at your option) any later version.
*
* Libgcrypt is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA
*
* Note: This code is heavily based on the GNU MP Library.
* Actually it's the same code with only minor changes in the
* way the data is stored; this is to support the abstraction
* of an optional secure memory allocation which may be used
* to avoid revealing of sensitive data due to paging etc.
*/
! INPUT PARAMETERS
! rem_ptr i0
! n1 i1
! n0 i2
! d i3
#include "sysdep.h"
#undef ret /* Kludge for glibc */
.text
.align 8
LC0: .double 0r4294967296
LC1: .double 0r2147483648
.align 4
.global C_SYMBOL_NAME(__udiv_qrnnd)
C_SYMBOL_NAME(__udiv_qrnnd):
!#PROLOGUE# 0
save %sp,-104,%sp
!#PROLOGUE# 1
st %i1,[%fp-8]
ld [%fp-8],%f10
sethi %hi(LC0),%o7
fitod %f10,%f4
ldd [%o7+%lo(LC0)],%f8
cmp %i1,0
bge L248
mov %i0,%i5
faddd %f4,%f8,%f4
L248:
st %i2,[%fp-8]
ld [%fp-8],%f10
fmuld %f4,%f8,%f6
cmp %i2,0
bge L249
fitod %f10,%f2
faddd %f2,%f8,%f2
L249:
st %i3,[%fp-8]
faddd %f6,%f2,%f2
ld [%fp-8],%f10
cmp %i3,0
bge L250
fitod %f10,%f4
faddd %f4,%f8,%f4
L250:
fdivd %f2,%f4,%f2
sethi %hi(LC1),%o7
ldd [%o7+%lo(LC1)],%f4
fcmped %f2,%f4
nop
fbge,a L251
fsubd %f2,%f4,%f2
fdtoi %f2,%f2
st %f2,[%fp-8]
b L252
ld [%fp-8],%i4
L251:
fdtoi %f2,%f2
st %f2,[%fp-8]
ld [%fp-8],%i4
sethi %hi(-2147483648),%g2
xor %i4,%g2,%i4
L252:
umul %i3,%i4,%g3
rd %y,%i0
subcc %i2,%g3,%o7
subxcc %i1,%i0,%g0
be L253
cmp %o7,%i3
add %i4,-1,%i0
add %o7,%i3,%o7
st %o7,[%i5]
ret
restore
L253:
blu L246
mov %i4,%i0
add %i4,1,%i0
sub %o7,%i3,%o7
L246:
st %o7,[%i5]
ret
restore
|
al3xtjames/Clover
| 1,872
|
FileSystems/GrubFS/grub/grub-core/lib/libgcrypt/mpi/hppa/mpih-lshift.S
|
/* hppa lshift
*
* Copyright (C) 1992, 1994, 1998
* 2001, 2002 Free Software Foundation, Inc.
*
* This file is part of Libgcrypt.
*
* Libgcrypt is free software; you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as
* published by the Free Software Foundation; either version 2.1 of
* the License, or (at your option) any later version.
*
* Libgcrypt is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA
*/
/*******************
* mpi_limb_t
* _gcry_mpih_lshift( mpi_ptr_t wp, (gr26)
* mpi_ptr_t up, (gr25)
* mpi_size_t usize, (gr24)
* unsigned cnt) (gr23)
*/
.code
.export _gcry_mpih_lshift
.label _gcry_mpih_lshift
.proc
.callinfo frame=64,no_calls
.entry
sh2add %r24,%r25,%r25
sh2add %r24,%r26,%r26
ldws,mb -4(0,%r25),%r22
subi 32,%r23,%r1
mtsar %r1
addib,= -1,%r24,L$0004
vshd %r0,%r22,%r28 ; compute carry out limb
ldws,mb -4(0,%r25),%r29
addib,= -1,%r24,L$0002
vshd %r22,%r29,%r20
.label L$loop
ldws,mb -4(0,%r25),%r22
stws,mb %r20,-4(0,%r26)
addib,= -1,%r24,L$0003
vshd %r29,%r22,%r20
ldws,mb -4(0,%r25),%r29
stws,mb %r20,-4(0,%r26)
addib,<> -1,%r24,L$loop
vshd %r22,%r29,%r20
.label L$0002
stws,mb %r20,-4(0,%r26)
vshd %r29,%r0,%r20
bv 0(%r2)
stw %r20,-4(0,%r26)
.label L$0003
stws,mb %r20,-4(0,%r26)
.label L$0004
vshd %r22,%r0,%r20
bv 0(%r2)
stw %r20,-4(0,%r26)
.exit
.procend
|
al3xtjames/Clover
| 1,816
|
FileSystems/GrubFS/grub/grub-core/lib/libgcrypt/mpi/hppa/mpih-rshift.S
|
/* hppa rshift
*
* Copyright (C) 1992, 1994, 1998,
* 2001, 2002 Free Software Foundation, Inc.
*
* This file is part of Libgcrypt.
*
* Libgcrypt is free software; you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as
* published by the Free Software Foundation; either version 2.1 of
* the License, or (at your option) any later version.
*
* Libgcrypt is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA
*/
/*******************
* mpi_limb_t
* _gcry_mpih_rshift( mpi_ptr_t wp, (gr26)
* mpi_ptr_t up, (gr25)
* mpi_size_t usize, (gr24)
* unsigned cnt) (gr23)
*/
.code
.export _gcry_mpih_rshift
.label _gcry_mpih_rshift
.proc
.callinfo frame=64,no_calls
.entry
ldws,ma 4(0,%r25),%r22
mtsar %r23
addib,= -1,%r24,L$r004
vshd %r22,%r0,%r28 ; compute carry out limb
ldws,ma 4(0,%r25),%r29
addib,= -1,%r24,L$r002
vshd %r29,%r22,%r20
.label L$roop
ldws,ma 4(0,%r25),%r22
stws,ma %r20,4(0,%r26)
addib,= -1,%r24,L$r003
vshd %r22,%r29,%r20
ldws,ma 4(0,%r25),%r29
stws,ma %r20,4(0,%r26)
addib,<> -1,%r24,L$roop
vshd %r29,%r22,%r20
.label L$r002
stws,ma %r20,4(0,%r26)
vshd %r0,%r29,%r20
bv 0(%r2)
stw %r20,0(0,%r26)
.label L$r003
stws,ma %r20,4(0,%r26)
.label L$r004
vshd %r0,%r22,%r20
bv 0(%r2)
stw %r20,0(0,%r26)
.exit
.procend
|
al3xtjames/Clover
| 2,166
|
FileSystems/GrubFS/grub/grub-core/lib/libgcrypt/mpi/hppa/mpih-add1.S
|
/* hppa add_n -- Add two limb vectors of the same length > 0 and store
* sum in a third limb vector.
*
* Copyright (C) 1992, 1994, 1998,
* 2001, 2002 Fee Software Foundation, Inc.
*
* This file is part of Libgcrypt.
*
* Libgcrypt is free software; you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as
* published by the Free Software Foundation; either version 2.1 of
* the License, or (at your option) any later version.
*
* Libgcrypt is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA
*
* Note: This code is heavily based on the GNU MP Library.
* Actually it's the same code with only minor changes in the
* way the data is stored; this is to support the abstraction
* of an optional secure memory allocation which may be used
* to avoid revealing of sensitive data due to paging etc.
*/
/*******************
* mpi_limb_t
* _gcry_mpih_add_n( mpi_ptr_t res_ptr, (gr26)
* mpi_ptr_t s1_ptr, (gr25)
* mpi_ptr_t s2_ptr, (gr24)
* mpi_size_t size) (gr23)
*
* One might want to unroll this as for other processors, but it turns
* out that the data cache contention after a store makes such
* unrolling useless. We can't come under 5 cycles/limb anyway.
*/
.code
.export _gcry_mpih_add_n
.label _gcry_mpih_add_n
.proc
.callinfo frame=0,no_calls
.entry
ldws,ma 4(0,%r25),%r20
ldws,ma 4(0,%r24),%r19
addib,= -1,%r23,L$end ; check for (SIZE == 1)
add %r20,%r19,%r28 ; add first limbs ignoring cy
.label L$loop
ldws,ma 4(0,%r25),%r20
ldws,ma 4(0,%r24),%r19
stws,ma %r28,4(0,%r26)
addib,<> -1,%r23,L$loop
addc %r20,%r19,%r28
.label L$end
stws %r28,0(0,%r26)
bv 0(%r2)
addc %r0,%r0,%r28
.exit
.procend
|
al3xtjames/Clover
| 7,052
|
FileSystems/GrubFS/grub/grub-core/lib/libgcrypt/mpi/hppa/udiv-qrnnd.S
|
/* HP-PA __udiv_qrnnd division support, used from longlong.h.
* This version runs fast on pre-PA7000 CPUs.
*
* Copyright (C) 1993, 1994, 1998, 2001,
* 2002 Free Software Foundation, Inc.
*
* This file is part of Libgcrypt.
*
* Libgcrypt is free software; you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as
* published by the Free Software Foundation; either version 2.1 of
* the License, or (at your option) any later version.
*
* Libgcrypt is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA
*
* Note: This code is heavily based on the GNU MP Library.
* Actually it's the same code with only minor changes in the
* way the data is stored; this is to support the abstraction
* of an optional secure memory allocation which may be used
* to avoid revealing of sensitive data due to paging etc.
*/
/* INPUT PARAMETERS
* rem_ptr gr26
* n1 gr25
* n0 gr24
* d gr23
*
* The code size is a bit excessive. We could merge the last two ds;addc
* sequences by simply moving the "bb,< Odd" instruction down. The only
* trouble is the FFFFFFFF code that would need some hacking.
*/
.code
.export __udiv_qrnnd
.label __udiv_qrnnd
.proc
.callinfo frame=0,no_calls
.entry
comb,< %r23,0,L$largedivisor
sub %r0,%r23,%r1 ; clear cy as side-effect
ds %r0,%r1,%r0
addc %r24,%r24,%r24
ds %r25,%r23,%r25
addc %r24,%r24,%r24
ds %r25,%r23,%r25
addc %r24,%r24,%r24
ds %r25,%r23,%r25
addc %r24,%r24,%r24
ds %r25,%r23,%r25
addc %r24,%r24,%r24
ds %r25,%r23,%r25
addc %r24,%r24,%r24
ds %r25,%r23,%r25
addc %r24,%r24,%r24
ds %r25,%r23,%r25
addc %r24,%r24,%r24
ds %r25,%r23,%r25
addc %r24,%r24,%r24
ds %r25,%r23,%r25
addc %r24,%r24,%r24
ds %r25,%r23,%r25
addc %r24,%r24,%r24
ds %r25,%r23,%r25
addc %r24,%r24,%r24
ds %r25,%r23,%r25
addc %r24,%r24,%r24
ds %r25,%r23,%r25
addc %r24,%r24,%r24
ds %r25,%r23,%r25
addc %r24,%r24,%r24
ds %r25,%r23,%r25
addc %r24,%r24,%r24
ds %r25,%r23,%r25
addc %r24,%r24,%r24
ds %r25,%r23,%r25
addc %r24,%r24,%r24
ds %r25,%r23,%r25
addc %r24,%r24,%r24
ds %r25,%r23,%r25
addc %r24,%r24,%r24
ds %r25,%r23,%r25
addc %r24,%r24,%r24
ds %r25,%r23,%r25
addc %r24,%r24,%r24
ds %r25,%r23,%r25
addc %r24,%r24,%r24
ds %r25,%r23,%r25
addc %r24,%r24,%r24
ds %r25,%r23,%r25
addc %r24,%r24,%r24
ds %r25,%r23,%r25
addc %r24,%r24,%r24
ds %r25,%r23,%r25
addc %r24,%r24,%r24
ds %r25,%r23,%r25
addc %r24,%r24,%r24
ds %r25,%r23,%r25
addc %r24,%r24,%r24
ds %r25,%r23,%r25
addc %r24,%r24,%r24
ds %r25,%r23,%r25
addc %r24,%r24,%r24
ds %r25,%r23,%r25
addc %r24,%r24,%r28
ds %r25,%r23,%r25
comclr,>= %r25,%r0,%r0
addl %r25,%r23,%r25
stws %r25,0(0,%r26)
bv 0(%r2)
addc %r28,%r28,%r28
.label L$largedivisor
extru %r24,31,1,%r19 ; r19 = n0 & 1
bb,< %r23,31,L$odd
extru %r23,30,31,%r22 ; r22 = d >> 1
shd %r25,%r24,1,%r24 ; r24 = new n0
extru %r25,30,31,%r25 ; r25 = new n1
sub %r0,%r22,%r21
ds %r0,%r21,%r0
addc %r24,%r24,%r24
ds %r25,%r22,%r25
addc %r24,%r24,%r24
ds %r25,%r22,%r25
addc %r24,%r24,%r24
ds %r25,%r22,%r25
addc %r24,%r24,%r24
ds %r25,%r22,%r25
addc %r24,%r24,%r24
ds %r25,%r22,%r25
addc %r24,%r24,%r24
ds %r25,%r22,%r25
addc %r24,%r24,%r24
ds %r25,%r22,%r25
addc %r24,%r24,%r24
ds %r25,%r22,%r25
addc %r24,%r24,%r24
ds %r25,%r22,%r25
addc %r24,%r24,%r24
ds %r25,%r22,%r25
addc %r24,%r24,%r24
ds %r25,%r22,%r25
addc %r24,%r24,%r24
ds %r25,%r22,%r25
addc %r24,%r24,%r24
ds %r25,%r22,%r25
addc %r24,%r24,%r24
ds %r25,%r22,%r25
addc %r24,%r24,%r24
ds %r25,%r22,%r25
addc %r24,%r24,%r24
ds %r25,%r22,%r25
addc %r24,%r24,%r24
ds %r25,%r22,%r25
addc %r24,%r24,%r24
ds %r25,%r22,%r25
addc %r24,%r24,%r24
ds %r25,%r22,%r25
addc %r24,%r24,%r24
ds %r25,%r22,%r25
addc %r24,%r24,%r24
ds %r25,%r22,%r25
addc %r24,%r24,%r24
ds %r25,%r22,%r25
addc %r24,%r24,%r24
ds %r25,%r22,%r25
addc %r24,%r24,%r24
ds %r25,%r22,%r25
addc %r24,%r24,%r24
ds %r25,%r22,%r25
addc %r24,%r24,%r24
ds %r25,%r22,%r25
addc %r24,%r24,%r24
ds %r25,%r22,%r25
addc %r24,%r24,%r24
ds %r25,%r22,%r25
addc %r24,%r24,%r24
ds %r25,%r22,%r25
addc %r24,%r24,%r24
ds %r25,%r22,%r25
addc %r24,%r24,%r24
ds %r25,%r22,%r25
addc %r24,%r24,%r24
ds %r25,%r22,%r25
comclr,>= %r25,%r0,%r0
addl %r25,%r22,%r25
sh1addl %r25,%r19,%r25
stws %r25,0(0,%r26)
bv 0(%r2)
addc %r24,%r24,%r28
.label L$odd
addib,sv,n 1,%r22,L$FF.. ; r22 = (d / 2 + 1)
shd %r25,%r24,1,%r24 ; r24 = new n0
extru %r25,30,31,%r25 ; r25 = new n1
sub %r0,%r22,%r21
ds %r0,%r21,%r0
addc %r24,%r24,%r24
ds %r25,%r22,%r25
addc %r24,%r24,%r24
ds %r25,%r22,%r25
addc %r24,%r24,%r24
ds %r25,%r22,%r25
addc %r24,%r24,%r24
ds %r25,%r22,%r25
addc %r24,%r24,%r24
ds %r25,%r22,%r25
addc %r24,%r24,%r24
ds %r25,%r22,%r25
addc %r24,%r24,%r24
ds %r25,%r22,%r25
addc %r24,%r24,%r24
ds %r25,%r22,%r25
addc %r24,%r24,%r24
ds %r25,%r22,%r25
addc %r24,%r24,%r24
ds %r25,%r22,%r25
addc %r24,%r24,%r24
ds %r25,%r22,%r25
addc %r24,%r24,%r24
ds %r25,%r22,%r25
addc %r24,%r24,%r24
ds %r25,%r22,%r25
addc %r24,%r24,%r24
ds %r25,%r22,%r25
addc %r24,%r24,%r24
ds %r25,%r22,%r25
addc %r24,%r24,%r24
ds %r25,%r22,%r25
addc %r24,%r24,%r24
ds %r25,%r22,%r25
addc %r24,%r24,%r24
ds %r25,%r22,%r25
addc %r24,%r24,%r24
ds %r25,%r22,%r25
addc %r24,%r24,%r24
ds %r25,%r22,%r25
addc %r24,%r24,%r24
ds %r25,%r22,%r25
addc %r24,%r24,%r24
ds %r25,%r22,%r25
addc %r24,%r24,%r24
ds %r25,%r22,%r25
addc %r24,%r24,%r24
ds %r25,%r22,%r25
addc %r24,%r24,%r24
ds %r25,%r22,%r25
addc %r24,%r24,%r24
ds %r25,%r22,%r25
addc %r24,%r24,%r24
ds %r25,%r22,%r25
addc %r24,%r24,%r24
ds %r25,%r22,%r25
addc %r24,%r24,%r24
ds %r25,%r22,%r25
addc %r24,%r24,%r24
ds %r25,%r22,%r25
addc %r24,%r24,%r24
ds %r25,%r22,%r25
addc %r24,%r24,%r24
ds %r25,%r22,%r25
addc %r24,%r24,%r28
comclr,>= %r25,%r0,%r0
addl %r25,%r22,%r25
sh1addl %r25,%r19,%r25
; We have computed (n1,,n0) / (d + 1), q' = r28, r' = r25
add,nuv %r28,%r25,%r25
addl %r25,%r1,%r25
addc %r0,%r28,%r28
sub,<< %r25,%r23,%r0
addl %r25,%r1,%r25
stws %r25,0(0,%r26)
bv 0(%r2)
addc %r0,%r28,%r28
; This is just a special case of the code above.
; We come here when d == 0xFFFFFFFF
.label L$FF..
add,uv %r25,%r24,%r24
sub,<< %r24,%r23,%r0
ldo 1(%r24),%r24
stws %r24,0(0,%r26)
bv 0(%r2)
addc %r0,%r25,%r28
.exit
.procend
|
al3xtjames/Clover
| 2,245
|
FileSystems/GrubFS/grub/grub-core/lib/libgcrypt/mpi/hppa/mpih-sub1.S
|
/* hppa sub_n -- Sub two limb vectors of the same length > 0 and store
* sum in a third limb vector.
*
* Copyright (C) 1992, 1994, 1998,
* 2001, 2002 Free Software Foundation, Inc.
*
* This file is part of Libgcrypt.
*
* Libgcrypt is free software; you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as
* published by the Free Software Foundation; either version 2.1 of
* the License, or (at your option) any later version.
*
* Libgcrypt is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA
*
* Note: This code is heavily based on the GNU MP Library.
* Actually it's the same code with only minor changes in the
* way the data is stored; this is to support the abstraction
* of an optional secure memory allocation which may be used
* to avoid revealing of sensitive data due to paging etc.
*/
#include "sysdep.h"
#include "asm-syntax.h"
/*******************
* mpi_limb_t
* _gcry_mpih_sub_n( mpi_ptr_t res_ptr, (gr26)
* mpi_ptr_t s1_ptr, (gr25)
* mpi_ptr_t s2_ptr, (gr24)
* mpi_size_t size) (gr23)
*
* One might want to unroll this as for other processors, but it turns
* out that the data cache contention after a store makes such
* unrolling useless. We can't come under 5 cycles/limb anyway.
*/
.code
.export _gcry_mpih_sub_n
.label _gcry_mpih_sub_n
.proc
.callinfo frame=0,no_calls
.entry
ldws,ma 4(0,%r25),%r20
ldws,ma 4(0,%r24),%r19
addib,= -1,%r23,L$end ; check for (SIZE == 1)
sub %r20,%r19,%r28 ; subtract first limbs ignoring cy
.label L$loop
ldws,ma 4(0,%r25),%r20
ldws,ma 4(0,%r24),%r19
stws,ma %r28,4(0,%r26)
addib,<> -1,%r23,L$loop
subb %r20,%r19,%r28
.label L$end
stws %r28,0(0,%r26)
addc %r0,%r0,%r28
bv 0(%r2)
subi 1,%r28,%r28
.exit
.procend
|
al3xtjames/Clover
| 2,323
|
FileSystems/GrubFS/grub/grub-core/lib/libgcrypt/mpi/pa7100/mpih-lshift.S
|
/* hppa lshift
* optimized for the PA7100, where it runs at 3.25 cycles/limb
*
* Copyright (C) 1992, 1994, 1998,
* 2001, 2002 Free Software Foundation, Inc.
*
* This file is part of Libgcrypt.
*
* Libgcrypt is free software; you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as
* published by the Free Software Foundation; either version 2.1 of
* the License, or (at your option) any later version.
*
* Libgcrypt is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA
*/
/*******************
* mpi_limb_t
* _gcry_mpih_lshift( mpi_ptr_t wp, (gr26)
* mpi_ptr_t up, (gr25)
* mpi_size_t usize, (gr24)
* unsigned cnt) (gr23)
*/
.code
.export _gcry_mpih_lshift
.label _gcry_mpih_lshift
.proc
.callinfo frame=64,no_calls
.entry
sh2add %r24,%r25,%r25
sh2add %r24,%r26,%r26
ldws,mb -4(0,%r25),%r22
subi 32,%r23,%r1
mtsar %r1
addib,= -1,%r24,L$0004
vshd %r0,%r22,%r28 ; compute carry out limb
ldws,mb -4(0,%r25),%r29
addib,<= -5,%r24,L$rest
vshd %r22,%r29,%r20
.label L$loop
ldws,mb -4(0,%r25),%r22
stws,mb %r20,-4(0,%r26)
vshd %r29,%r22,%r20
ldws,mb -4(0,%r25),%r29
stws,mb %r20,-4(0,%r26)
vshd %r22,%r29,%r20
ldws,mb -4(0,%r25),%r22
stws,mb %r20,-4(0,%r26)
vshd %r29,%r22,%r20
ldws,mb -4(0,%r25),%r29
stws,mb %r20,-4(0,%r26)
addib,> -4,%r24,L$loop
vshd %r22,%r29,%r20
.label L$rest
addib,= 4,%r24,L$end1
nop
.label L$eloop
ldws,mb -4(0,%r25),%r22
stws,mb %r20,-4(0,%r26)
addib,<= -1,%r24,L$end2
vshd %r29,%r22,%r20
ldws,mb -4(0,%r25),%r29
stws,mb %r20,-4(0,%r26)
addib,> -1,%r24,L$eloop
vshd %r22,%r29,%r20
.label L$end1
stws,mb %r20,-4(0,%r26)
vshd %r29,%r0,%r20
bv 0(%r2)
stw %r20,-4(0,%r26)
.label L$end2
stws,mb %r20,-4(0,%r26)
.label L$0004
vshd %r22,%r0,%r20
bv 0(%r2)
stw %r20,-4(0,%r26)
.exit
.procend
|
al3xtjames/Clover
| 2,262
|
FileSystems/GrubFS/grub/grub-core/lib/libgcrypt/mpi/pa7100/mpih-rshift.S
|
/* hppa rshift
* optimized for the PA7100, where it runs at 3.25 cycles/limb
*
* Copyright (C) 1992, 1994, 1998,
* 2001, 2002 Free Software Foundation, Inc.
*
* This file is part of Libgcrypt.
*
* Libgcrypt is free software; you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as
* published by the Free Software Foundation; either version 2.1 of
* the License, or (at your option) any later version.
*
* Libgcrypt is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA
*/
/*******************
* mpi_limb_t
* _gcry_mpih_rshift( mpi_ptr_t wp, (gr26)
* mpi_ptr_t up, (gr25)
* mpi_size_t usize, (gr24)
* unsigned cnt) (gr23)
*/
.code
.export _gcry_mpih_rshift
.label _gcry_mpih_rshift
.proc
.callinfo frame=64,no_calls
.entry
ldws,ma 4(0,%r25),%r22
mtsar %r23
addib,= -1,%r24,L$r004
vshd %r22,%r0,%r28 ; compute carry out limb
ldws,ma 4(0,%r25),%r29
addib,<= -5,%r24,L$rrest
vshd %r29,%r22,%r20
.label L$roop
ldws,ma 4(0,%r25),%r22
stws,ma %r20,4(0,%r26)
vshd %r22,%r29,%r20
ldws,ma 4(0,%r25),%r29
stws,ma %r20,4(0,%r26)
vshd %r29,%r22,%r20
ldws,ma 4(0,%r25),%r22
stws,ma %r20,4(0,%r26)
vshd %r22,%r29,%r20
ldws,ma 4(0,%r25),%r29
stws,ma %r20,4(0,%r26)
addib,> -4,%r24,L$roop
vshd %r29,%r22,%r20
.label L$rrest
addib,= 4,%r24,L$rend1
nop
.label L$eroop
ldws,ma 4(0,%r25),%r22
stws,ma %r20,4(0,%r26)
addib,<= -1,%r24,L$rend2
vshd %r22,%r29,%r20
ldws,ma 4(0,%r25),%r29
stws,ma %r20,4(0,%r26)
addib,> -1,%r24,L$eroop
vshd %r29,%r22,%r20
.label L$rend1
stws,ma %r20,4(0,%r26)
vshd %r0,%r29,%r20
bv 0(%r2)
stw %r20,0(0,%r26)
.label L$rend2
stws,ma %r20,4(0,%r26)
.label L$r004
vshd %r0,%r22,%r20
bv 0(%r2)
stw %r20,0(0,%r26)
.exit
.procend
|
al3xtjames/Clover
| 2,721
|
FileSystems/GrubFS/grub/grub-core/lib/libgcrypt/mpi/sparc32v8/mpih-mul1.S
|
/* SPARC v8 __mpn_mul_1 -- Multiply a limb vector with a single limb and
* store the product in a second limb vector.
*
* Copyright (C) 1992, 1994, 1995, 1998,
* 2001, 2002 Free Software Foundation, Inc.
*
* This file is part of Libgcrypt.
*
* Libgcrypt is free software; you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as
* published by the Free Software Foundation; either version 2.1 of
* the License, or (at your option) any later version.
*
* Libgcrypt is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA
*
* Note: This code is heavily based on the GNU MP Library.
* Actually it's the same code with only minor changes in the
* way the data is stored; this is to support the abstraction
* of an optional secure memory allocation which may be used
* to avoid revealing of sensitive data due to paging etc.
*/
! INPUT PARAMETERS
! res_ptr o0
! s1_ptr o1
! size o2
! s2_limb o3
#include "sysdep.h"
.text
.align 8
.global C_SYMBOL_NAME(_gcry_mpih_mul_1)
C_SYMBOL_NAME(_gcry_mpih_mul_1):
sll %o2,4,%g1
and %g1,(4-1)<<4,%g1
#if PIC
mov %o7,%g4 ! Save return address register
call 1f
add %o7,LL-1f,%g3
1: mov %g4,%o7 ! Restore return address register
#else
sethi %hi(LL),%g3
or %g3,%lo(LL),%g3
#endif
jmp %g3+%g1
ld [%o1+0],%o4 ! 1
LL:
LL00: add %o0,-4,%o0
add %o1,-4,%o1
b Loop00 /* 4, 8, 12, ... */
orcc %g0,%g0,%g2
LL01: b Loop01 /* 1, 5, 9, ... */
orcc %g0,%g0,%g2
nop
nop
LL10: add %o0,-12,%o0 /* 2, 6, 10, ... */
add %o1,4,%o1
b Loop10
orcc %g0,%g0,%g2
nop
LL11: add %o0,-8,%o0 /* 3, 7, 11, ... */
add %o1,-8,%o1
b Loop11
orcc %g0,%g0,%g2
Loop: addcc %g3,%g2,%g3 ! 1
ld [%o1+4],%o4 ! 2
st %g3,[%o0+0] ! 1
rd %y,%g2 ! 1
Loop00: umul %o4,%o3,%g3 ! 2
addxcc %g3,%g2,%g3 ! 2
ld [%o1+8],%o4 ! 3
st %g3,[%o0+4] ! 2
rd %y,%g2 ! 2
Loop11: umul %o4,%o3,%g3 ! 3
addxcc %g3,%g2,%g3 ! 3
ld [%o1+12],%o4 ! 4
add %o1,16,%o1
st %g3,[%o0+8] ! 3
rd %y,%g2 ! 3
Loop10: umul %o4,%o3,%g3 ! 4
addxcc %g3,%g2,%g3 ! 4
ld [%o1+0],%o4 ! 1
st %g3,[%o0+12] ! 4
add %o0,16,%o0
rd %y,%g2 ! 4
addx %g0,%g2,%g2
Loop01: addcc %o2,-4,%o2
bg Loop
umul %o4,%o3,%g3 ! 1
addcc %g3,%g2,%g3 ! 4
st %g3,[%o0+0] ! 4
rd %y,%g2 ! 4
retl
addx %g0,%g2,%o0
|
al3xtjames/Clover
| 3,062
|
FileSystems/GrubFS/grub/grub-core/lib/libgcrypt/mpi/sparc32v8/mpih-mul2.S
|
/* SPARC v8 __mpn_addmul_1 -- Multiply a limb vector with a limb and
* add the result to a second limb vector.
*
* Copyright (C) 1992, 1993, 1994, 1995, 1998,
* 2001, 2002 Free Software Foundation, Inc.
*
* This file is part of Libgcrypt.
*
* Libgcrypt is free software; you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as
* published by the Free Software Foundation; either version 2.1 of
* the License, or (at your option) any later version.
*
* Libgcrypt is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA
*
* Note: This code is heavily based on the GNU MP Library.
* Actually it's the same code with only minor changes in the
* way the data is stored; this is to support the abstraction
* of an optional secure memory allocation which may be used
* to avoid revealing of sensitive data due to paging etc.
*/
! INPUT PARAMETERS
! res_ptr o0
! s1_ptr o1
! size o2
! s2_limb o3
#include "sysdep.h"
.text
.align 4
.global C_SYMBOL_NAME(_gcry_mpih_addmul_1)
C_SYMBOL_NAME(_gcry_mpih_addmul_1):
orcc %g0,%g0,%g2
ld [%o1+0],%o4 ! 1
sll %o2,4,%g1
and %g1,(4-1)<<4,%g1
#if PIC
mov %o7,%g4 ! Save return address register
call 1f
add %o7,LL-1f,%g3
1: mov %g4,%o7 ! Restore return address register
#else
sethi %hi(LL),%g3
or %g3,%lo(LL),%g3
#endif
jmp %g3+%g1
nop
LL:
LL00: add %o0,-4,%o0
b Loop00 /* 4, 8, 12, ... */
add %o1,-4,%o1
nop
LL01: b Loop01 /* 1, 5, 9, ... */
nop
nop
nop
LL10: add %o0,-12,%o0 /* 2, 6, 10, ... */
b Loop10
add %o1,4,%o1
nop
LL11: add %o0,-8,%o0 /* 3, 7, 11, ... */
b Loop11
add %o1,-8,%o1
nop
1: addcc %g3,%g2,%g3 ! 1
ld [%o1+4],%o4 ! 2
rd %y,%g2 ! 1
addx %g0,%g2,%g2
ld [%o0+0],%g1 ! 2
addcc %g1,%g3,%g3
st %g3,[%o0+0] ! 1
Loop00: umul %o4,%o3,%g3 ! 2
ld [%o0+4],%g1 ! 2
addxcc %g3,%g2,%g3 ! 2
ld [%o1+8],%o4 ! 3
rd %y,%g2 ! 2
addx %g0,%g2,%g2
nop
addcc %g1,%g3,%g3
st %g3,[%o0+4] ! 2
Loop11: umul %o4,%o3,%g3 ! 3
addxcc %g3,%g2,%g3 ! 3
ld [%o1+12],%o4 ! 4
rd %y,%g2 ! 3
add %o1,16,%o1
addx %g0,%g2,%g2
ld [%o0+8],%g1 ! 2
addcc %g1,%g3,%g3
st %g3,[%o0+8] ! 3
Loop10: umul %o4,%o3,%g3 ! 4
addxcc %g3,%g2,%g3 ! 4
ld [%o1+0],%o4 ! 1
rd %y,%g2 ! 4
addx %g0,%g2,%g2
ld [%o0+12],%g1 ! 2
addcc %g1,%g3,%g3
st %g3,[%o0+12] ! 4
add %o0,16,%o0
addx %g0,%g2,%g2
Loop01: addcc %o2,-4,%o2
bg 1b
umul %o4,%o3,%g3 ! 1
addcc %g3,%g2,%g3 ! 4
rd %y,%g2 ! 4
addx %g0,%g2,%g2
ld [%o0+0],%g1 ! 2
addcc %g1,%g3,%g3
st %g3,[%o0+0] ! 4
addx %g0,%g2,%o0
retl
nop
! umul, ld, addxcc, rd, st
! umul, ld, addxcc, rd, ld, addcc, st, addx
|
al3xtjames/Clover
| 1,935
|
FileSystems/GrubFS/grub/grub-core/lib/libgcrypt/mpi/sparc32v8/mpih-mul3.S
|
/* SPARC v8 __mpn_submul_1 -- Multiply a limb vector with a limb and
* subtract the result from a second limb vector.
*
* Copyright (C) 1992, 1993, 1994, 1998,
* 2001, 2002 Free Software Foundation, Inc.
*
* This file is part of Libgcrypt.
*
* Libgcrypt is free software; you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as
* published by the Free Software Foundation; either version 2.1 of
* the License, or (at your option) any later version.
*
* Libgcrypt is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA
*
* Note: This code is heavily based on the GNU MP Library.
* Actually it's the same code with only minor changes in the
* way the data is stored; this is to support the abstraction
* of an optional secure memory allocation which may be used
* to avoid revealing of sensitive data due to paging etc.
*/
! INPUT PARAMETERS
! res_ptr o0
! s1_ptr o1
! size o2
! s2_limb o3
#include "sysdep.h"
.text
.align 4
.global C_SYMBOL_NAME(_gcry_mpih_submul_1)
C_SYMBOL_NAME(_gcry_mpih_submul_1):
sub %g0,%o2,%o2 ! negate ...
sll %o2,2,%o2 ! ... and scale size
sub %o1,%o2,%o1 ! o1 is offset s1_ptr
sub %o0,%o2,%g1 ! g1 is offset res_ptr
mov 0,%o0 ! clear cy_limb
Loop: ld [%o1+%o2],%o4
ld [%g1+%o2],%g2
umul %o4,%o3,%o5
rd %y,%g3
addcc %o5,%o0,%o5
addx %g3,0,%o0
subcc %g2,%o5,%g2
addx %o0,0,%o0
st %g2,[%g1+%o2]
addcc %o2,4,%o2
bne Loop
nop
retl
nop
|
al3xtjames/Clover
| 3,604
|
FileSystems/GrubFS/grub/grub-core/lib/libgcrypt/mpi/powerpc32/mpih-mul1.S
|
/* PowerPC-32 mul_1 -- Multiply a limb vector with a limb and store
* the result in a second limb vector.
*
* Copyright (C) 1992, 1993, 1994, 1995,
* 1998, 2002 Free Software Foundation, Inc.
*
* This file is part of Libgcrypt.
*
* Libgcrypt is free software; you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as
* published by the Free Software Foundation; either version 2.1 of
* the License, or (at your option) any later version.
*
* Libgcrypt is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA
*/
#include "sysdep.h"
#include "asm-syntax.h"
#ifndef USE_PPC_PATCHES
/*******************
* mpi_limb_t
* _gcry_mpih_mul_1( mpi_ptr_t res_ptr, (r3)
* mpi_ptr_t s1_ptr, (r4)
* mpi_size_t s1_size, (r5)
* mpi_limb_t s2_limb) (r6)
*
* This is a fairly straightforward implementation. The timing of the PC601
* is hard to understand, so I will wait to optimize this until I have some
* hardware to play with.
*
* The code trivially generalizes to 64 bit limbs for the PC620.
*/
.toc
.csect ._gcry_mpih_mul_1[PR]
.align 2
.globl _gcry_mpih_mul_1
.globl ._gcry_mpih_mul_1
.csect _gcry_mpih_mul_1[DS]
_gcry_mpih_mul_1:
.long ._gcry_mpih_mul_1[PR], TOC[tc0], 0
.csect ._gcry_mpih_mul_1[PR]
._gcry_mpih_mul_1:
mtctr 5
lwz 0,0(4)
mullw 7,0,6
mulhwu 10,0,6
addi 3,3,-4 # adjust res_ptr
addic 5,5,0 # clear cy with dummy insn
bdz Lend
Loop: lwzu 0,4(4)
stwu 7,4(3)
mullw 8,0,6
adde 7,8,10
mulhwu 10,0,6
bdnz Loop
Lend: stw 7,4(3)
addze 3,10
blr
#else
/* Multiply a limb vector by a limb, for PowerPC.
Copyright (C) 1993, 1994, 1995, 1997 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Library General Public License as
published by the Free Software Foundation; either version 2 of the
License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Library General Public License for more details.
You should have received a copy of the GNU Library General Public
License along with the GNU C Library; see the file COPYING.LIB. If not,
write to the Free Software Foundation, Inc., 59 Temple Place - Suite 330,
Boston, MA 02111-1307, USA. */
/* mp_limb_t mpn_mul_1 (mp_ptr res_ptr, mp_srcptr s1_ptr,
mp_size_t s1_size, mp_limb_t s2_limb)
Calculate s1*s2 and put result in res_ptr; return carry. */
ENTRY(_gcry_mpih_mul_1)
mtctr %r5
lwz %r0,0(%r4)
mullw %r7,%r0,%r6
mulhwu %r10,%r0,%r6
addi %r3,%r3,-4 # adjust res_ptr
addic %r5,%r5,0 # clear cy with dummy insn
bdz 1f
0: lwzu %r0,4(%r4)
stwu %r7,4(%r3)
mullw %r8,%r0,%r6
adde %r7,%r8,%r10
mulhwu %r10,%r0,%r6
bdnz 0b
1: stw %r7,4(%r3)
addze %r3,%r10
blr
END(_gcry_mpih_mul_1)
#endif
|
al3xtjames/Clover
| 3,717
|
FileSystems/GrubFS/grub/grub-core/lib/libgcrypt/mpi/powerpc32/mpih-mul2.S
|
/* PowerPC-32 addmul_1 -- Multiply a limb vector with a limb and add
* the result to a second limb vector.
*
* Copyright (C) 1995, 1998, 2002 Free Software Foundation, Inc.
*
* This file is part of Libgcrypt.
*
* Libgcrypt is free software; you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as
* published by the Free Software Foundation; either version 2.1 of
* the License, or (at your option) any later version.
*
* Libgcrypt is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA
*/
#include "sysdep.h"
#include "asm-syntax.h"
#ifndef USE_PPC_PATCHES
/*******************
* mpi_limb_t
* _gcry_mpih_addmul_1( mpi_ptr_t res_ptr, (r3)
* mpi_ptr_t s1_ptr, (r4)
* mpi_size_t s1_size, (r5)
* mpi_limb_t s2_limb) (r6)
*
* This is a fairly straightforward implementation. The timing of the PC601
* is hard to understand, so I will wait to optimize this until I have some
* hardware to play with.
*
* The code trivially generalizes to 64 bit limbs for the PC620.
*/
.toc
.csect ._gcry_mpih_addmul_1[PR]
.align 2
.globl _gcry_mpih_addmul_1
.globl ._gcry_mpih_addmul_1
.csect _gcry_mpih_addmul_1[DS]
_gcry_mpih_addmul_1:
.long ._gcry_mpih_addmul_1[PR], TOC[tc0], 0
.csect ._gcry_mpih_addmul_1[PR]
._gcry_mpih_addmul_1:
mtctr 5
lwz 0,0(4)
mullw 7,0,6
mulhwu 10,0,6
lwz 9,0(3)
addc 8,7,9
addi 3,3,-4
bdz Lend
Loop: lwzu 0,4(4)
stwu 8,4(3)
mullw 8,0,6
adde 7,8,10
mulhwu 10,0,6
lwz 9,4(3)
addze 10,10
addc 8,7,9
bdnz Loop
Lend: stw 8,4(3)
addze 3,10
blr
#else
/* Multiply a limb vector by a single limb, for PowerPC.
Copyright (C) 1993, 1994, 1995, 1997 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Library General Public License as
published by the Free Software Foundation; either version 2 of the
License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Library General Public License for more details.
You should have received a copy of the GNU Library General Public
License along with the GNU C Library; see the file COPYING.LIB. If not,
write to the Free Software Foundation, Inc., 59 Temple Place - Suite 330,
Boston, MA 02111-1307, USA. */
/* mp_limb_t mpn_addmul_1 (mp_ptr res_ptr, mp_srcptr s1_ptr,
mp_size_t s1_size, mp_limb_t s2_limb)
Calculate res+s1*s2 and put result back in res; return carry. */
ENTRY(_gcry_mpih_addmul_1)
mtctr %r5
lwz %r0,0(%r4)
mullw %r7,%r0,%r6
mulhwu %r10,%r0,%r6
lwz %r9,0(%r3)
addc %r8,%r7,%r9
addi %r3,%r3,-4 /* adjust res_ptr */
bdz 1f
0: lwzu %r0,4(%r4)
stwu %r8,4(%r3)
mullw %r8,%r0,%r6
adde %r7,%r8,%r10
mulhwu %r10,%r0,%r6
lwz %r9,4(%r3)
addze %r10,%r10
addc %r8,%r7,%r9
bdnz 0b
1: stw %r8,4(%r3)
addze %r3,%r10
blr
END(_gcry_mpih_addmul_1)
#endif
|
al3xtjames/Clover
| 5,431
|
FileSystems/GrubFS/grub/grub-core/lib/libgcrypt/mpi/powerpc32/mpih-lshift.S
|
/* PowerPC-32 lshift
*
* Copyright (C) 1995, 1998, 2002 Free Software Foundation, Inc.
*
* This file is part of Libgcrypt.
*
* Libgcrypt is free software; you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as
* published by the Free Software Foundation; either version 2.1 of
* the License, or (at your option) any later version.
*
* Libgcrypt is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA
*/
#include "sysdep.h"
#include "asm-syntax.h"
#ifndef USE_PPC_PATCHES
/*******************
* mpi_limb_t
* _gcry_mpih_lshift( mpi_ptr_t wp, (r3)
* mpi_ptr_t up, (r4)
* mpi_size_t usize, (r5)
* unsigned cnt) (r6)
*/
.toc
.csect .text[PR]
.align 2
.globl _gcry_mpih_lshift
.globl ._gcry_mpih_lshift
.csect _gcry_mpih_lshift[DS]
_gcry_mpih_lshift:
.long ._gcry_mpih_lshift, TOC[tc0], 0
.csect .text[PR]
._gcry_mpih_lshift:
mtctr 5 # copy size into CTR
slwi 0,5,2
add 7,3,0 # make r7 point at end of res
add 4,4,0 # make r4 point at end of s1
subfic 8,6,32
lwzu 11,-4(4) # load first s1 limb
srw 3,11,8 # compute function return value
bdz Lend1
Loop: lwzu 10,-4(4)
slw 9,11,6
srw 12,10,8
or 9,9,12
stwu 9,-4(7)
bdz Lend2
lwzu 11,-4(4)
slw 9,10,6
srw 12,11,8
or 9,9,12
stwu 9,-4(7)
bdnz Loop
Lend1: slw 0,11,6
stw 0,-4(7)
blr
Lend2: slw 0,10,6
stw 0,-4(7)
blr
#else
/* Shift a limb left, low level routine.
Copyright (C) 1996, 1997 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Library General Public License as
published by the Free Software Foundation; either version 2 of the
License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Library General Public License for more details.
You should have received a copy of the GNU Library General Public
License along with the GNU C Library; see the file COPYING.LIB. If not,
write to the Free Software Foundation, Inc., 59 Temple Place - Suite 330,
Boston, MA 02111-1307, USA. */
/* mp_limb_t mpn_lshift (mp_ptr wp, mp_srcptr up, mp_size_t usize,
unsigned int cnt) */
EALIGN(_gcry_mpih_lshift,3,0)
mtctr %r5 # copy size into CTR
cmplwi %cr0,%r5,16 # is size < 16
slwi %r0,%r5,2
add %r7,%r3,%r0 # make r7 point at end of res
add %r4,%r4,%r0 # make r4 point at end of s1
lwzu %r11,-4(%r4) # load first s1 limb
subfic %r8,%r6,32
srw %r3,%r11,%r8 # compute function return value
bge %cr0,L(big) # branch if size >= 16
bdz L(end1)
0: lwzu %r10,-4(%r4)
slw %r9,%r11,%r6
srw %r12,%r10,%r8
or %r9,%r9,%r12
stwu %r9,-4(%r7)
bdz L(end2)
lwzu %r11,-4(%r4)
slw %r9,%r10,%r6
srw %r12,%r11,%r8
or %r9,%r9,%r12
stwu %r9,-4(%r7)
bdnz 0b
L(end1):slw %r0,%r11,%r6
stw %r0,-4(%r7)
blr
/* Guaranteed not to succeed. */
L(boom): tweq %r0,%r0
/* We imitate a case statement, by using (yuk!) fixed-length code chunks,
of size 4*12 bytes. We have to do this (or something) to make this PIC. */
L(big): mflr %r9
bltl- %cr0,L(boom) # Never taken, only used to set LR.
slwi %r10,%r6,4
mflr %r12
add %r10,%r12,%r10
slwi %r8,%r6,5
add %r10,%r8,%r10
mtctr %r10
addi %r5,%r5,-1
mtlr %r9
bctr
L(end2):slw %r0,%r10,%r6
stw %r0,-4(%r7)
blr
#define DO_LSHIFT(n) \
mtctr %r5; \
0: lwzu %r10,-4(%r4); \
slwi %r9,%r11,n; \
inslwi %r9,%r10,n,32-n; \
stwu %r9,-4(%r7); \
bdz- L(end2); \
lwzu %r11,-4(%r4); \
slwi %r9,%r10,n; \
inslwi %r9,%r11,n,32-n; \
stwu %r9,-4(%r7); \
bdnz 0b; \
b L(end1)
DO_LSHIFT(1)
DO_LSHIFT(2)
DO_LSHIFT(3)
DO_LSHIFT(4)
DO_LSHIFT(5)
DO_LSHIFT(6)
DO_LSHIFT(7)
DO_LSHIFT(8)
DO_LSHIFT(9)
DO_LSHIFT(10)
DO_LSHIFT(11)
DO_LSHIFT(12)
DO_LSHIFT(13)
DO_LSHIFT(14)
DO_LSHIFT(15)
DO_LSHIFT(16)
DO_LSHIFT(17)
DO_LSHIFT(18)
DO_LSHIFT(19)
DO_LSHIFT(20)
DO_LSHIFT(21)
DO_LSHIFT(22)
DO_LSHIFT(23)
DO_LSHIFT(24)
DO_LSHIFT(25)
DO_LSHIFT(26)
DO_LSHIFT(27)
DO_LSHIFT(28)
DO_LSHIFT(29)
DO_LSHIFT(30)
DO_LSHIFT(31)
END(_gcry_mpih_lshift)
#endif
|
al3xtjames/Clover
| 3,470
|
FileSystems/GrubFS/grub/grub-core/lib/libgcrypt/mpi/powerpc32/mpih-rshift.S
|
/* PowerPC-32 rshift
*
* Copyright (C) 1995, 1998, 2002 Free Software Foundation, Inc.
*
* This file is part of Libgcrypt.
*
* Libgcrypt is free software; you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as
* published by the Free Software Foundation; either version 2.1 of
* the License, or (at your option) any later version.
*
* Libgcrypt is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA
*/
#include "sysdep.h"
#include "asm-syntax.h"
#ifndef USE_PPC_PATCHES
/*******************
* mpi_limb_t
* _gcry_mpih_rshift( mpi_ptr_t wp, (r3)
* mpi_ptr_t up, (r4)
* mpi_size_t usize, (r5)
* unsigned cnt) (r6)
*/
.toc
.csect .text[PR]
.align 2
.globl _gcry_mpih_rshift
.globl ._gcry_mpih_rshift
.csect _gcry_mpih_rshift[DS]
_gcry_mpih_rshift:
.long ._gcry_mpih_rshift, TOC[tc0], 0
.csect .text[PR]
._gcry_mpih_rshift:
mtctr 5 # copy size into CTR
addi 7,3,-4 # move adjusted res_ptr to free return reg
subfic 8,6,32
lwz 11,0(4) # load first s1 limb
slw 3,11,8 # compute function return value
bdz Lend1
Loop: lwzu 10,4(4)
srw 9,11,6
slw 12,10,8
or 9,9,12
stwu 9,4(7)
bdz Lend2
lwzu 11,4(4)
srw 9,10,6
slw 12,11,8
or 9,9,12
stwu 9,4(7)
bdnz Loop
Lend1: srw 0,11,6
stw 0,4(7)
blr
Lend2: srw 0,10,6
stw 0,4(7)
blr
#else
/* Shift a limb right, low level routine.
Copyright (C) 1995, 1997 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Library General Public License as
published by the Free Software Foundation; either version 2 of the
License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Library General Public License for more details.
You should have received a copy of the GNU Library General Public
License along with the GNU C Library; see the file COPYING.LIB. If not,
write to the Free Software Foundation, Inc., 59 Temple Place - Suite 330,
Boston, MA 02111-1307, USA. */
/* INPUT PARAMETERS
res_ptr r3
s1_ptr r4
size r5
cnt r6 */
ENTRY(_gcry_mpih_rshift)
mtctr 5 # copy size into CTR
addi 7,3,-4 # move adjusted res_ptr to free return reg
subfic 8,6,32
lwz 11,0(4) # load first s1 limb
slw 3,11,8 # compute function return value
bdz 1f
0: lwzu 10,4(4)
srw 9,11,6
slw 12,10,8
or 9,9,12
stwu 9,4(7)
bdz 2f
lwzu 11,4(4)
srw 9,10,6
slw 12,11,8
or 9,9,12
stwu 9,4(7)
bdnz 0b
1: srw 0,11,6
stw 0,4(7)
blr
2: srw 0,10,6
stw 0,4(7)
blr
END(_gcry_mpih_rshift)
#endif
|
al3xtjames/Clover
| 3,915
|
FileSystems/GrubFS/grub/grub-core/lib/libgcrypt/mpi/powerpc32/mpih-mul3.S
|
/* PowerPC-32 submul_1 -- Multiply a limb vector with a limb and subtract
* the result from a second limb vector.
*
* Copyright (C) 1995, 1998, 2002 Free Software Foundation, Inc.
*
* This file is part of Libgcrypt.
*
* Libgcrypt is free software; you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as
* published by the Free Software Foundation; either version 2.1 of
* the License, or (at your option) any later version.
*
* Libgcrypt is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA
*/
#include "sysdep.h"
#include "asm-syntax.h"
#ifndef USE_PPC_PATCHES
/*******************
* mpi_limb_t
* _gcry_mpih_submul_1( mpi_ptr_t res_ptr, (r3)
* mpi_ptr_t s1_ptr, (r4)
* mpi_size_t s1_size, (r5)
* mpi_limb_t s2_limb) (r6)
*
* This is a fairly straightforward implementation. The timing of the PC601
* is hard to understand, so I will wait to optimize this until I have some
* hardware to play with.
*
* The code trivially generalizes to 64 bit limbs for the PC620.
*/
.toc
.csect ._gcry_mpih_submul_1[PR]
.align 2
.globl _gcry_mpih_submul_1
.globl ._gcry_mpih_submul_1
.csect _gcry_mpih_submul_1[DS]
_gcry_mpih_submul_1:
.long ._gcry_mpih_submul_1[PR], TOC[tc0], 0
.csect ._gcry_mpih_submul_1[PR]
._gcry_mpih_submul_1:
mtctr 5
lwz 0,0(4)
mullw 7,0,6
mulhwu 10,0,6
lwz 9,0(3)
subfc 8,7,9
addc 7,7,8 # invert cy (r7 is junk)
addi 3,3,-4
bdz Lend
Loop: lwzu 0,4(4)
stwu 8,4(3)
mullw 8,0,6
adde 7,8,10
mulhwu 10,0,6
lwz 9,4(3)
addze 10,10
subfc 8,7,9
addc 7,7,8 # invert cy (r7 is junk)
bdnz Loop
Lend: stw 8,4(3)
addze 3,10
blr
#else
/* Multiply a limb vector by a single limb, for PowerPC.
Copyright (C) 1993, 1994, 1995, 1997 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Library General Public License as
published by the Free Software Foundation; either version 2 of the
License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Library General Public License for more details.
You should have received a copy of the GNU Library General Public
License along with the GNU C Library; see the file COPYING.LIB. If not,
write to the Free Software Foundation, Inc., 59 Temple Place - Suite 330,
Boston, MA 02111-1307, USA. */
/* mp_limb_t mpn_submul_1 (mp_ptr res_ptr, mp_srcptr s1_ptr,
mp_size_t s1_size, mp_limb_t s2_limb)
Calculate res-s1*s2 and put result back in res; return carry. */
ENTRY(_gcry_mpih_submul_1)
mtctr %r5
lwz %r0,0(%r4)
mullw %r7,%r0,%r6
mulhwu %r10,%r0,%r6
lwz %r9,0(%r3)
subf %r8,%r7,%r9
addc %r7,%r7,%r8 # invert cy (r7 is junk)
addi %r3,%r3,-4 # adjust res_ptr
bdz 1f
0: lwzu %r0,4(%r4)
stwu %r8,4(%r3)
mullw %r8,%r0,%r6
adde %r7,%r8,%r10
mulhwu %r10,%r0,%r6
lwz %r9,4(%r3)
addze %r10,%r10
subf %r8,%r7,%r9
addc %r7,%r7,%r8 # invert cy (r7 is junk)
bdnz 0b
1: stw %r8,4(%r3)
addze %r3,%r10
blr
END(_gcry_mpih_submul_1)
#endif
|
al3xtjames/Clover
| 4,260
|
FileSystems/GrubFS/grub/grub-core/lib/libgcrypt/mpi/powerpc32/mpih-add1.S
|
/* PowerPC-32 add_n -- Add two limb vectors of equal, non-zero length.
*
* Copyright (C) 1992, 1994, 1995, 1998, 2002 Free Software Foundation, Inc.
*
* This file is part of Libgcrypt.
*
* Libgcrypt is free software; you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as
* published by the Free Software Foundation; either version 2.1 of
* the License, or (at your option) any later version.
*
* Libgcrypt is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA
*/
#include "sysdep.h"
#include "asm-syntax.h"
#ifndef USE_PPC_PATCHES
/*******************
* mpi_limb_t
* _gcry_mpih_add_n( mpi_ptr_t res_ptr, (r3)
* mpi_ptr_t s1_ptr, (r4)
* mpi_ptr_t s2_ptr, (r5)
* mpi_size_t size) (r6)
*/
.toc
.extern _gcry_mpih_add_n[DS]
.extern ._gcry_mpih_add_n
.csect [PR]
.align 2
.globl _gcry_mpih_add_n
.globl ._gcry_mpih_add_n
.csect _gcry_mpih_add_n[DS]
_gcry_mpih_add_n:
.long ._gcry_mpih_add_n, TOC[tc0], 0
.csect [PR]
._gcry_mpih_add_n:
mtctr 6 # copy size into CTR
lwz 8,0(4) # load least significant s1 limb
lwz 0,0(5) # load least significant s2 limb
addi 3,3,-4 # offset res_ptr, it is updated before used
addc 7,0,8 # add least significant limbs, set cy
bdz Lend # If done, skip loop
Loop: lwzu 8,4(4) # load s1 limb and update s1_ptr
lwzu 0,4(5) # load s2 limb and update s2_ptr
stwu 7,4(3) # store previous limb in load latency slot
adde 7,0,8 # add new limbs with cy, set cy
bdnz Loop # decrement CTR and loop back
Lend: stw 7,4(3) # store ultimate result limb
li 3,0 # load cy into ...
addze 3,3 # ... return value register
blr
#else
/* Add two limb vectors of equal, non-zero length for PowerPC.
Copyright (C) 1997 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Library General Public License as
published by the Free Software Foundation; either version 2 of the
License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Library General Public License for more details.
You should have received a copy of the GNU Library General Public
License along with the GNU C Library; see the file COPYING.LIB. If not,
write to the Free Software Foundation, Inc., 59 Temple Place - Suite 330,
Boston, MA 02111-1307, USA. */
#include "sysdep.h"
#include "asm-syntax.h"
/* mp_limb_t mpn_add_n (mp_ptr res_ptr, mp_srcptr s1_ptr, mp_srcptr s2_ptr,
mp_size_t size)
Calculate s1+s2 and put result in res_ptr; return carry, 0 or 1. */
/* Note on optimisation: This code is optimal for the 601. Almost every other
possible 2-unrolled inner loop will not be. Also, watch out for the
alignment... */
EALIGN(_gcry_mpih_add_n,3,0)
/* Set up for loop below. */
mtcrf 0x01,%r6
srwi. %r7,%r6,1
li %r10,0
mtctr %r7
bt 31,2f
/* Clear the carry. */
addic %r0,%r0,0
/* Adjust pointers for loop. */
addi %r3,%r3,-4
addi %r4,%r4,-4
addi %r5,%r5,-4
b 0f
2: lwz %r7,0(%r5)
lwz %r6,0(%r4)
addc %r6,%r6,%r7
stw %r6,0(%r3)
beq 1f
/* The loop. */
/* Align start of loop to an odd word boundary to guarantee that the
last two words can be fetched in one access (for 601). */
0: lwz %r9,4(%r4)
lwz %r8,4(%r5)
lwzu %r6,8(%r4)
lwzu %r7,8(%r5)
adde %r8,%r9,%r8
stw %r8,4(%r3)
adde %r6,%r6,%r7
stwu %r6,8(%r3)
bdnz 0b
/* Return the carry. */
1: addze %r3,%r10
blr
END(_gcry_mpih_add_n)
#endif
|
al3xtjames/Clover
| 4,367
|
FileSystems/GrubFS/grub/grub-core/lib/libgcrypt/mpi/powerpc32/mpih-sub1.S
|
/* PowerPC-32 sub_n -- Subtract two limb vectors of the same length > 0
* and store difference in a third limb vector.
*
* Copyright (C) 1992, 1994, 1995, 1998,
* 2002 Free Software Foundation, Inc.
*
* This file is part of Libgcrypt.
*
* Libgcrypt is free software; you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as
* published by the Free Software Foundation; either version 2.1 of
* the License, or (at your option) any later version.
*
* Libgcrypt is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA
*/
#include "sysdep.h"
#include "asm-syntax.h"
#ifndef USE_PPC_PATCHES
/*******************
* mpi_limb_t
* _gcry_mpih_sub_n( mpi_ptr_t res_ptr, (r3)
* mpi_ptr_t s1_ptr, (r4)
* mpi_ptr_t s2_ptr, (r5)
* mpi_size_t size) (r6)
*/
.toc
.extern _gcry_mpih_sub_n[DS]
.extern ._gcry_mpih_sub_n
.csect [PR]
.align 2
.globl _gcry_mpih_sub_n
.globl ._gcry_mpih_sub_n
.csect _gcry_mpih_sub_n[DS]
_gcry_mpih_sub_n:
.long ._gcry_mpih_sub_n, TOC[tc0], 0
.csect [PR]
._gcry_mpih_sub_n:
mtctr 6 # copy size into CTR
lwz 8,0(4) # load least significant s1 limb
lwz 0,0(5) # load least significant s2 limb
addi 3,3,-4 # offset res_ptr, it is updated before used
subfc 7,0,8 # add least significant limbs, set cy
bdz Lend # If done, skip loop
Loop: lwzu 8,4(4) # load s1 limb and update s1_ptr
lwzu 0,4(5) # load s2 limb and update s2_ptr
stwu 7,4(3) # store previous limb in load latency slot
subfe 7,0,8 # add new limbs with cy, set cy
bdnz Loop # decrement CTR and loop back
Lend: stw 7,4(3) # store ultimate result limb
subfe 3,0,0 # load !cy into ...
subfic 3,3,0 # ... return value register
blr
#else
/* Subtract two limb vectors of equal, non-zero length for PowerPC.
Copyright (C) 1997 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Library General Public License as
published by the Free Software Foundation; either version 2 of the
License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Library General Public License for more details.
You should have received a copy of the GNU Library General Public
License along with the GNU C Library; see the file COPYING.LIB. If not,
write to the Free Software Foundation, Inc., 59 Temple Place - Suite 330,
Boston, MA 02111-1307, USA. */
/* mp_limb_t mpn_sub_n (mp_ptr res_ptr, mp_srcptr s1_ptr, mp_srcptr s2_ptr,
mp_size_t size)
Calculate s1-s2 and put result in res_ptr; return borrow, 0 or 1. */
/* Note on optimisation: This code is optimal for the 601. Almost every other
possible 2-unrolled inner loop will not be. Also, watch out for the
alignment... */
EALIGN(_gcry_mpih_sub_n,3,1)
/* Set up for loop below. */
mtcrf 0x01,%r6
srwi. %r7,%r6,1
mtctr %r7
bt 31,2f
/* Set the carry (clear the borrow). */
subfc %r0,%r0,%r0
/* Adjust pointers for loop. */
addi %r3,%r3,-4
addi %r4,%r4,-4
addi %r5,%r5,-4
b 0f
2: lwz %r7,0(%r5)
lwz %r6,0(%r4)
subfc %r6,%r7,%r6
stw %r6,0(%r3)
beq 1f
/* Align start of loop to an odd word boundary to guarantee that the
last two words can be fetched in one access (for 601). This turns
out to be important. */
0:
lwz %r9,4(%r4)
lwz %r8,4(%r5)
lwzu %r6,8(%r4)
lwzu %r7,8(%r5)
subfe %r8,%r8,%r9
stw %r8,4(%r3)
subfe %r6,%r7,%r6
stwu %r6,8(%r3)
bdnz 0b
/* Return the borrow. */
1: subfe %r3,%r3,%r3
neg %r3,%r3
blr
END(_gcry_mpih_sub_n)
#endif
|
al3xtjames/Clover
| 2,294
|
FileSystems/GrubFS/grub/grub-core/lib/libgcrypt/mpi/mips3/mpih-mul1.S
|
/* mips3 mpih-mul1.S -- Multiply a limb vector with a limb and store
* the result in a second limb vector.
*
* Copyright (C) 1992, 1994, 1995, 1998, 2000
* 2001, 2002 Free Software Foundation, Inc.
*
* This file is part of Libgcrypt.
*
* Libgcrypt is free software; you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as
* published by the Free Software Foundation; either version 2.1 of
* the License, or (at your option) any later version.
*
* Libgcrypt is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA
*/
/*******************
* mpi_limb_t
* _gcry_mpih_mul_1( mpi_ptr_t res_ptr, (r4)
* mpi_ptr_t s1_ptr, (r5)
* mpi_size_t s1_size, (r6)
* mpi_limb_t s2_limb) (r7)
*/
.text
.align 4
.globl _gcry_mpih_mul_1
.ent _gcry_mpih_mul_1
_gcry_mpih_mul_1:
.set noreorder
.set nomacro
/* # warm up phase 0 */
ld $8,0($5)
/* # warm up phase 1 */
daddiu $5,$5,8
dmultu $8,$7
daddiu $6,$6,-1
beq $6,$0,$LC0
move $2,$0 # zero cy2
daddiu $6,$6,-1
beq $6,$0,$LC1
ld $8,0($5) # load new s1 limb as early as possible
Loop: mflo $10
mfhi $9
daddiu $5,$5,8
daddu $10,$10,$2 # add old carry limb to low product limb
dmultu $8,$7
ld $8,0($5) # load new s1 limb as early as possible
daddiu $6,$6,-1 # decrement loop counter
sltu $2,$10,$2 # carry from previous addition -> $2
sd $10,0($4)
daddiu $4,$4,8
bne $6,$0,Loop
daddu $2,$9,$2 # add high product limb and carry from addition
/* # cool down phase 1 */
$LC1: mflo $10
mfhi $9
daddu $10,$10,$2
sltu $2,$10,$2
dmultu $8,$7
sd $10,0($4)
daddiu $4,$4,8
daddu $2,$9,$2 # add high product limb and carry from addition
/* # cool down phase 0 */
$LC0: mflo $10
mfhi $9
daddu $10,$10,$2
sltu $2,$10,$2
sd $10,0($4)
j $31
daddu $2,$9,$2 # add high product limb and carry from addition
.end _gcry_mpih_mul_1
|
al3xtjames/Clover
| 2,527
|
FileSystems/GrubFS/grub/grub-core/lib/libgcrypt/mpi/mips3/mpih-mul2.S
|
/* MIPS3 addmul_1 -- Multiply a limb vector with a single limb and
* add the product to a second limb vector.
*
* Copyright (C) 1992, 1994, 1995, 1998, 2000
* 2001, 2002 Free Software Foundation, Inc.
*
* This file is part of Libgcrypt.
*
* Libgcrypt is free software; you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as
* published by the Free Software Foundation; either version 2.1 of
* the License, or (at your option) any later version.
*
* Libgcrypt is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA
*/
/*******************
* mpi_limb_t
* _gcry_mpih_addmul_1( mpi_ptr_t res_ptr, (r4)
* mpi_ptr_t s1_ptr, (r5)
* mpi_size_t s1_size, (r6)
* mpi_limb_t s2_limb) (r7)
*/
.text
.align 4
.globl _gcry_mpih_addmul_1
.ent _gcry_mpih_addmul_1
_gcry_mpih_addmul_1:
.set noreorder
.set nomacro
/* # warm up phase 0 */
ld $8,0($5)
/* # warm up phase 1 */
daddiu $5,$5,8
dmultu $8,$7
daddiu $6,$6,-1
beq $6,$0,$LC0
move $2,$0 # zero cy2
daddiu $6,$6,-1
beq $6,$0,$LC1
ld $8,0($5) # load new s1 limb as early as possible
Loop: ld $10,0($4)
mflo $3
mfhi $9
daddiu $5,$5,8
daddu $3,$3,$2 # add old carry limb to low product limb
dmultu $8,$7
ld $8,0($5) # load new s1 limb as early as possible
daddiu $6,$6,-1 # decrement loop counter
sltu $2,$3,$2 # carry from previous addition -> $2
daddu $3,$10,$3
sltu $10,$3,$10
daddu $2,$2,$10
sd $3,0($4)
daddiu $4,$4,8
bne $6,$0,Loop
daddu $2,$9,$2 # add high product limb and carry from addition
/* # cool down phase 1 */
$LC1: ld $10,0($4)
mflo $3
mfhi $9
daddu $3,$3,$2
sltu $2,$3,$2
dmultu $8,$7
daddu $3,$10,$3
sltu $10,$3,$10
daddu $2,$2,$10
sd $3,0($4)
daddiu $4,$4,8
daddu $2,$9,$2 # add high product limb and carry from addition
/* # cool down phase 0 */
$LC0: ld $10,0($4)
mflo $3
mfhi $9
daddu $3,$3,$2
sltu $2,$3,$2
daddu $3,$10,$3
sltu $10,$3,$10
daddu $2,$2,$10
sd $3,0($4)
j $31
daddu $2,$9,$2 # add high product limb and carry from addition
.end _gcry_mpih_addmul_1
|
al3xtjames/Clover
| 2,172
|
FileSystems/GrubFS/grub/grub-core/lib/libgcrypt/mpi/mips3/mpih-lshift.S
|
/* mips3 lshift
*
* Copyright (C) 1995, 1998, 2000,
* 2001, 2002 Free Software Foundation, Inc.
*
* This file is part of Libgcrypt.
*
* Libgcrypt is free software; you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as
* published by the Free Software Foundation; either version 2.1 of
* the License, or (at your option) any later version.
*
* Libgcrypt is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA
*/
/*******************
* mpi_limb_t
* _gcry_mpih_lshift( mpi_ptr_t wp, ($4)
* mpi_ptr_t up, ($5)
* mpi_size_t usize, ($6)
* unsigned cnt) ($7)
*/
.text
.align 2
.globl _gcry_mpih_lshift
.ent _gcry_mpih_lshift
_gcry_mpih_lshift:
.set noreorder
.set nomacro
dsll $2,$6,3
daddu $5,$5,$2 # make r5 point at end of src
ld $10,-8($5) # load first limb
dsubu $13,$0,$7
daddu $4,$4,$2 # make r4 point at end of res
daddiu $6,$6,-1
and $9,$6,4-1 # number of limbs in first loop
beq $9,$0,.L0 # if multiple of 4 limbs, skip first loop
dsrl $2,$10,$13 # compute function result
dsubu $6,$6,$9
.Loop0: ld $3,-16($5)
daddiu $4,$4,-8
daddiu $5,$5,-8
daddiu $9,$9,-1
dsll $11,$10,$7
dsrl $12,$3,$13
move $10,$3
or $8,$11,$12
bne $9,$0,.Loop0
sd $8,0($4)
.L0: beq $6,$0,.Lend
nop
.Loop: ld $3,-16($5)
daddiu $4,$4,-32
daddiu $6,$6,-4
dsll $11,$10,$7
dsrl $12,$3,$13
ld $10,-24($5)
dsll $14,$3,$7
or $8,$11,$12
sd $8,24($4)
dsrl $9,$10,$13
ld $3,-32($5)
dsll $11,$10,$7
or $8,$14,$9
sd $8,16($4)
dsrl $12,$3,$13
ld $10,-40($5)
dsll $14,$3,$7
or $8,$11,$12
sd $8,8($4)
dsrl $9,$10,$13
daddiu $5,$5,-32
or $8,$14,$9
bgtz $6,.Loop
sd $8,0($4)
.Lend: dsll $8,$10,$7
j $31
sd $8,-8($4)
.end _gcry_mpih_lshift
|
al3xtjames/Clover
| 2,059
|
FileSystems/GrubFS/grub/grub-core/lib/libgcrypt/mpi/mips3/mpih-rshift.S
|
/* mips3 rshift
*
* Copyright (C) 1995, 1998, 2000
* 2001, 2002 Free Software Foundation, Inc.
*
* This file is part of Libgcrypt.
*
* Libgcrypt is free software; you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as
* published by the Free Software Foundation; either version 2.1 of
* the License, or (at your option) any later version.
*
* Libgcrypt is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA
*/
/*******************
* mpi_limb_t
* _gcry_mpih_rshift( mpi_ptr_t wp, ($4)
* mpi_ptr_t up, ($5)
* mpi_size_t usize, ($6)
* unsigned cnt) ($7)
*/
.text
.align 2
.globl _gcry_mpih_rshift
.ent _gcry_mpih_rshift
_gcry_mpih_rshift:
.set noreorder
.set nomacro
ld $10,0($5) # load first limb
dsubu $13,$0,$7
daddiu $6,$6,-1
and $9,$6,4-1 # number of limbs in first loop
beq $9,$0,.L0 # if multiple of 4 limbs, skip first loop
dsll $2,$10,$13 # compute function result
dsubu $6,$6,$9
.Loop0: ld $3,8($5)
daddiu $4,$4,8
daddiu $5,$5,8
daddiu $9,$9,-1
dsrl $11,$10,$7
dsll $12,$3,$13
move $10,$3
or $8,$11,$12
bne $9,$0,.Loop0
sd $8,-8($4)
.L0: beq $6,$0,.Lend
nop
.Loop: ld $3,8($5)
daddiu $4,$4,32
daddiu $6,$6,-4
dsrl $11,$10,$7
dsll $12,$3,$13
ld $10,16($5)
dsrl $14,$3,$7
or $8,$11,$12
sd $8,-32($4)
dsll $9,$10,$13
ld $3,24($5)
dsrl $11,$10,$7
or $8,$14,$9
sd $8,-24($4)
dsll $12,$3,$13
ld $10,32($5)
dsrl $14,$3,$7
or $8,$11,$12
sd $8,-16($4)
dsll $9,$10,$13
daddiu $5,$5,32
or $8,$14,$9
bgtz $6,.Loop
sd $8,-8($4)
.Lend: dsrl $8,$10,$7
j $31
sd $8,0($4)
.end _gcry_mpih_rshift
|
al3xtjames/Clover
| 2,530
|
FileSystems/GrubFS/grub/grub-core/lib/libgcrypt/mpi/mips3/mpih-mul3.S
|
/* MIPS3 submul_1 -- Multiply a limb vector with a single limb and
* subtract the product from a second limb vector.
*
* Copyright (C) 1992, 1994, 1995, 1998, 2000
* 2001, 2002 Free Software Foundation, Inc.
*
* This file is part of Libgcrypt.
*
* Libgcrypt is free software; you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as
* published by the Free Software Foundation; either version 2.1 of
* the License, or (at your option) any later version.
*
* Libgcrypt is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA
*/
/*******************
* mpi_limb_t
* _gcry_mpih_submul_1( mpi_ptr_t res_ptr, (r4)
* mpi_ptr_t s1_ptr, (r5)
* mpi_size_t s1_size, (r6)
* mpi_limb_t s2_limb) (r7)
*/
.text
.align 4
.globl _gcry_mpih_submul_1
.ent _gcry_mpih_submul_1
_gcry_mpih_submul_1:
.set noreorder
.set nomacro
/* # warm up phase 0 */
ld $8,0($5)
/* # warm up phase 1 */
daddiu $5,$5,8
dmultu $8,$7
daddiu $6,$6,-1
beq $6,$0,$LC0
move $2,$0 # zero cy2
daddiu $6,$6,-1
beq $6,$0,$LC1
ld $8,0($5) # load new s1 limb as early as possible
Loop: ld $10,0($4)
mflo $3
mfhi $9
daddiu $5,$5,8
daddu $3,$3,$2 # add old carry limb to low product limb
dmultu $8,$7
ld $8,0($5) # load new s1 limb as early as possible
daddiu $6,$6,-1 # decrement loop counter
sltu $2,$3,$2 # carry from previous addition -> $2
dsubu $3,$10,$3
sgtu $10,$3,$10
daddu $2,$2,$10
sd $3,0($4)
daddiu $4,$4,8
bne $6,$0,Loop
daddu $2,$9,$2 # add high product limb and carry from addition
/* # cool down phase 1 */
$LC1: ld $10,0($4)
mflo $3
mfhi $9
daddu $3,$3,$2
sltu $2,$3,$2
dmultu $8,$7
dsubu $3,$10,$3
sgtu $10,$3,$10
daddu $2,$2,$10
sd $3,0($4)
daddiu $4,$4,8
daddu $2,$9,$2 # add high product limb and carry from addition
/* # cool down phase 0 */
$LC0: ld $10,0($4)
mflo $3
mfhi $9
daddu $3,$3,$2
sltu $2,$3,$2
dsubu $3,$10,$3
sgtu $10,$3,$10
daddu $2,$2,$10
sd $3,0($4)
j $31
daddu $2,$9,$2 # add high product limb and carry from addition
.end _gcry_mpih_submul_1
|
al3xtjames/Clover
| 2,478
|
FileSystems/GrubFS/grub/grub-core/lib/libgcrypt/mpi/mips3/mpih-add1.S
|
/* mips3 add_n -- Add two limb vectors of the same length > 0 and store
* sum in a third limb vector.
*
* Copyright (C) 1995, 1998, 2000
* 2001, 2002 Free Software Foundation, Inc.
*
* This file is part of Libgcrypt.
*
* Libgcrypt is free software; you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as
* published by the Free Software Foundation; either version 2.1 of
* the License, or (at your option) any later version.
*
* Libgcrypt is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA
*/
/*******************
* mpi_limb_t
* _gcry_mpih_add_n( mpi_ptr_t res_ptr, ($4)
* mpi_ptr_t s1_ptr, ($5)
* mpi_ptr_t s2_ptr, ($6)
* mpi_size_t size) ($7)
*/
.text
.align 2
.globl _gcry_mpih_add_n
.ent _gcry_mpih_add_n
_gcry_mpih_add_n:
.set noreorder
.set nomacro
ld $10,0($5)
ld $11,0($6)
daddiu $7,$7,-1
and $9,$7,4-1 # number of limbs in first loop
beq $9,$0,.L0 # if multiple of 4 limbs, skip first loop
move $2,$0
dsubu $7,$7,$9
.Loop0: daddiu $9,$9,-1
ld $12,8($5)
daddu $11,$11,$2
ld $13,8($6)
sltu $8,$11,$2
daddu $11,$10,$11
sltu $2,$11,$10
sd $11,0($4)
or $2,$2,$8
daddiu $5,$5,8
daddiu $6,$6,8
move $10,$12
move $11,$13
bne $9,$0,.Loop0
daddiu $4,$4,8
.L0: beq $7,$0,.Lend
nop
.Loop: daddiu $7,$7,-4
ld $12,8($5)
daddu $11,$11,$2
ld $13,8($6)
sltu $8,$11,$2
daddu $11,$10,$11
sltu $2,$11,$10
sd $11,0($4)
or $2,$2,$8
ld $10,16($5)
daddu $13,$13,$2
ld $11,16($6)
sltu $8,$13,$2
daddu $13,$12,$13
sltu $2,$13,$12
sd $13,8($4)
or $2,$2,$8
ld $12,24($5)
daddu $11,$11,$2
ld $13,24($6)
sltu $8,$11,$2
daddu $11,$10,$11
sltu $2,$11,$10
sd $11,16($4)
or $2,$2,$8
ld $10,32($5)
daddu $13,$13,$2
ld $11,32($6)
sltu $8,$13,$2
daddu $13,$12,$13
sltu $2,$13,$12
sd $13,24($4)
or $2,$2,$8
daddiu $5,$5,32
daddiu $6,$6,32
bne $7,$0,.Loop
daddiu $4,$4,32
.Lend: daddu $11,$11,$2
sltu $8,$11,$2
daddu $11,$10,$11
sltu $2,$11,$10
sd $11,0($4)
j $31
or $2,$2,$8
.end _gcry_mpih_add_n
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.