repo_id
stringlengths 5
115
| size
int64 590
5.01M
| file_path
stringlengths 4
212
| content
stringlengths 590
5.01M
|
|---|---|---|---|
AirFortressIlikara/LS2K0300-linux-4.19
| 53,177
|
arch/m68k/fpsp040/res_func.S
|
|
| res_func.sa 3.9 7/29/91
|
| Normalizes denormalized numbers if necessary and updates the
| stack frame. The function is then restored back into the
| machine and the 040 completes the operation. This routine
| is only used by the unsupported data type/format handler.
| (Exception vector 55).
|
| For packed move out (fmove.p fpm,<ea>) the operation is
| completed here; data is packed and moved to user memory.
| The stack is restored to the 040 only in the case of a
| reportable exception in the conversion.
|
|
| Copyright (C) Motorola, Inc. 1990
| All Rights Reserved
|
| For details on the license for this file, please see the
| file, README, in this same directory.
RES_FUNC: |idnt 2,1 | Motorola 040 Floating Point Software Package
|section 8
#include "fpsp.h"
sp_bnds: .short 0x3f81,0x407e
.short 0x3f6a,0x0000
dp_bnds: .short 0x3c01,0x43fe
.short 0x3bcd,0x0000
|xref mem_write
|xref bindec
|xref get_fline
|xref round
|xref denorm
|xref dest_ext
|xref dest_dbl
|xref dest_sgl
|xref unf_sub
|xref nrm_set
|xref dnrm_lp
|xref ovf_res
|xref reg_dest
|xref t_ovfl
|xref t_unfl
.global res_func
.global p_move
res_func:
clrb DNRM_FLG(%a6)
clrb RES_FLG(%a6)
clrb CU_ONLY(%a6)
tstb DY_MO_FLG(%a6)
beqs monadic
dyadic:
btstb #7,DTAG(%a6) |if dop = norm=000, zero=001,
| ;inf=010 or nan=011
beqs monadic |then branch
| ;else denorm
| HANDLE DESTINATION DENORM HERE
| ;set dtag to norm
| ;write the tag & fpte15 to the fstack
leal FPTEMP(%a6),%a0
bclrb #sign_bit,LOCAL_EX(%a0)
sne LOCAL_SGN(%a0)
bsr nrm_set |normalize number (exp will go negative)
bclrb #sign_bit,LOCAL_EX(%a0) |get rid of false sign
bfclr LOCAL_SGN(%a0){#0:#8} |change back to IEEE ext format
beqs dpos
bsetb #sign_bit,LOCAL_EX(%a0)
dpos:
bfclr DTAG(%a6){#0:#4} |set tag to normalized, FPTE15 = 0
bsetb #4,DTAG(%a6) |set FPTE15
orb #0x0f,DNRM_FLG(%a6)
monadic:
leal ETEMP(%a6),%a0
btstb #direction_bit,CMDREG1B(%a6) |check direction
bne opclass3 |it is a mv out
|
| At this point, only opclass 0 and 2 possible
|
btstb #7,STAG(%a6) |if sop = norm=000, zero=001,
| ;inf=010 or nan=011
bne mon_dnrm |else denorm
tstb DY_MO_FLG(%a6) |all cases of dyadic instructions would
bne normal |require normalization of denorm
| At this point:
| monadic instructions: fabs = $18 fneg = $1a ftst = $3a
| fmove = $00 fsmove = $40 fdmove = $44
| fsqrt = $05* fssqrt = $41 fdsqrt = $45
| (*fsqrt reencoded to $05)
|
movew CMDREG1B(%a6),%d0 |get command register
andil #0x7f,%d0 |strip to only command word
|
| At this point, fabs, fneg, fsmove, fdmove, ftst, fsqrt, fssqrt, and
| fdsqrt are possible.
| For cases fabs, fneg, fsmove, and fdmove goto spos (do not normalize)
| For cases fsqrt, fssqrt, and fdsqrt goto nrm_src (do normalize)
|
btstl #0,%d0
bne normal |weed out fsqrt instructions
|
| cu_norm handles fmove in instructions with normalized inputs.
| The routine round is used to correctly round the input for the
| destination precision and mode.
|
cu_norm:
st CU_ONLY(%a6) |set cu-only inst flag
movew CMDREG1B(%a6),%d0
andib #0x3b,%d0 |isolate bits to select inst
tstb %d0
beql cu_nmove |if zero, it is an fmove
cmpib #0x18,%d0
beql cu_nabs |if $18, it is fabs
cmpib #0x1a,%d0
beql cu_nneg |if $1a, it is fneg
|
| Inst is ftst. Check the source operand and set the cc's accordingly.
| No write is done, so simply rts.
|
cu_ntst:
movew LOCAL_EX(%a0),%d0
bclrl #15,%d0
sne LOCAL_SGN(%a0)
beqs cu_ntpo
orl #neg_mask,USER_FPSR(%a6) |set N
cu_ntpo:
cmpiw #0x7fff,%d0 |test for inf/nan
bnes cu_ntcz
tstl LOCAL_HI(%a0)
bnes cu_ntn
tstl LOCAL_LO(%a0)
bnes cu_ntn
orl #inf_mask,USER_FPSR(%a6)
rts
cu_ntn:
orl #nan_mask,USER_FPSR(%a6)
movel ETEMP_EX(%a6),FPTEMP_EX(%a6) |set up fptemp sign for
| ;snan handler
rts
cu_ntcz:
tstl LOCAL_HI(%a0)
bnel cu_ntsx
tstl LOCAL_LO(%a0)
bnel cu_ntsx
orl #z_mask,USER_FPSR(%a6)
cu_ntsx:
rts
|
| Inst is fabs. Execute the absolute value function on the input.
| Branch to the fmove code. If the operand is NaN, do nothing.
|
cu_nabs:
moveb STAG(%a6),%d0
btstl #5,%d0 |test for NaN or zero
bne wr_etemp |if either, simply write it
bclrb #7,LOCAL_EX(%a0) |do abs
bras cu_nmove |fmove code will finish
|
| Inst is fneg. Execute the negate value function on the input.
| Fall though to the fmove code. If the operand is NaN, do nothing.
|
cu_nneg:
moveb STAG(%a6),%d0
btstl #5,%d0 |test for NaN or zero
bne wr_etemp |if either, simply write it
bchgb #7,LOCAL_EX(%a0) |do neg
|
| Inst is fmove. This code also handles all result writes.
| If bit 2 is set, round is forced to double. If it is clear,
| and bit 6 is set, round is forced to single. If both are clear,
| the round precision is found in the fpcr. If the rounding precision
| is double or single, round the result before the write.
|
cu_nmove:
moveb STAG(%a6),%d0
andib #0xe0,%d0 |isolate stag bits
bne wr_etemp |if not norm, simply write it
btstb #2,CMDREG1B+1(%a6) |check for rd
bne cu_nmrd
btstb #6,CMDREG1B+1(%a6) |check for rs
bne cu_nmrs
|
| The move or operation is not with forced precision. Test for
| nan or inf as the input; if so, simply write it to FPn. Use the
| FPCR_MODE byte to get rounding on norms and zeros.
|
cu_nmnr:
bfextu FPCR_MODE(%a6){#0:#2},%d0
tstb %d0 |check for extended
beq cu_wrexn |if so, just write result
cmpib #1,%d0 |check for single
beq cu_nmrs |fall through to double
|
| The move is fdmove or round precision is double.
|
cu_nmrd:
movel #2,%d0 |set up the size for denorm
movew LOCAL_EX(%a0),%d1 |compare exponent to double threshold
andw #0x7fff,%d1
cmpw #0x3c01,%d1
bls cu_nunfl
bfextu FPCR_MODE(%a6){#2:#2},%d1 |get rmode
orl #0x00020000,%d1 |or in rprec (double)
clrl %d0 |clear g,r,s for round
bclrb #sign_bit,LOCAL_EX(%a0) |convert to internal format
sne LOCAL_SGN(%a0)
bsrl round
bfclr LOCAL_SGN(%a0){#0:#8}
beqs cu_nmrdc
bsetb #sign_bit,LOCAL_EX(%a0)
cu_nmrdc:
movew LOCAL_EX(%a0),%d1 |check for overflow
andw #0x7fff,%d1
cmpw #0x43ff,%d1
bge cu_novfl |take care of overflow case
bra cu_wrexn
|
| The move is fsmove or round precision is single.
|
cu_nmrs:
movel #1,%d0
movew LOCAL_EX(%a0),%d1
andw #0x7fff,%d1
cmpw #0x3f81,%d1
bls cu_nunfl
bfextu FPCR_MODE(%a6){#2:#2},%d1
orl #0x00010000,%d1
clrl %d0
bclrb #sign_bit,LOCAL_EX(%a0)
sne LOCAL_SGN(%a0)
bsrl round
bfclr LOCAL_SGN(%a0){#0:#8}
beqs cu_nmrsc
bsetb #sign_bit,LOCAL_EX(%a0)
cu_nmrsc:
movew LOCAL_EX(%a0),%d1
andw #0x7FFF,%d1
cmpw #0x407f,%d1
blt cu_wrexn
|
| The operand is above precision boundaries. Use t_ovfl to
| generate the correct value.
|
cu_novfl:
bsr t_ovfl
bra cu_wrexn
|
| The operand is below precision boundaries. Use denorm to
| generate the correct value.
|
cu_nunfl:
bclrb #sign_bit,LOCAL_EX(%a0)
sne LOCAL_SGN(%a0)
bsr denorm
bfclr LOCAL_SGN(%a0){#0:#8} |change back to IEEE ext format
beqs cu_nucont
bsetb #sign_bit,LOCAL_EX(%a0)
cu_nucont:
bfextu FPCR_MODE(%a6){#2:#2},%d1
btstb #2,CMDREG1B+1(%a6) |check for rd
bne inst_d
btstb #6,CMDREG1B+1(%a6) |check for rs
bne inst_s
swap %d1
moveb FPCR_MODE(%a6),%d1
lsrb #6,%d1
swap %d1
bra inst_sd
inst_d:
orl #0x00020000,%d1
bra inst_sd
inst_s:
orl #0x00010000,%d1
inst_sd:
bclrb #sign_bit,LOCAL_EX(%a0)
sne LOCAL_SGN(%a0)
bsrl round
bfclr LOCAL_SGN(%a0){#0:#8}
beqs cu_nuflp
bsetb #sign_bit,LOCAL_EX(%a0)
cu_nuflp:
btstb #inex2_bit,FPSR_EXCEPT(%a6)
beqs cu_nuninx
orl #aunfl_mask,USER_FPSR(%a6) |if the round was inex, set AUNFL
cu_nuninx:
tstl LOCAL_HI(%a0) |test for zero
bnes cu_nunzro
tstl LOCAL_LO(%a0)
bnes cu_nunzro
|
| The mantissa is zero from the denorm loop. Check sign and rmode
| to see if rounding should have occurred which would leave the lsb.
|
movel USER_FPCR(%a6),%d0
andil #0x30,%d0 |isolate rmode
cmpil #0x20,%d0
blts cu_nzro
bnes cu_nrp
cu_nrm:
tstw LOCAL_EX(%a0) |if positive, set lsb
bges cu_nzro
btstb #7,FPCR_MODE(%a6) |check for double
beqs cu_nincs
bras cu_nincd
cu_nrp:
tstw LOCAL_EX(%a0) |if positive, set lsb
blts cu_nzro
btstb #7,FPCR_MODE(%a6) |check for double
beqs cu_nincs
cu_nincd:
orl #0x800,LOCAL_LO(%a0) |inc for double
bra cu_nunzro
cu_nincs:
orl #0x100,LOCAL_HI(%a0) |inc for single
bra cu_nunzro
cu_nzro:
orl #z_mask,USER_FPSR(%a6)
moveb STAG(%a6),%d0
andib #0xe0,%d0
cmpib #0x40,%d0 |check if input was tagged zero
beqs cu_numv
cu_nunzro:
orl #unfl_mask,USER_FPSR(%a6) |set unfl
cu_numv:
movel (%a0),ETEMP(%a6)
movel 4(%a0),ETEMP_HI(%a6)
movel 8(%a0),ETEMP_LO(%a6)
|
| Write the result to memory, setting the fpsr cc bits. NaN and Inf
| bypass cu_wrexn.
|
cu_wrexn:
tstw LOCAL_EX(%a0) |test for zero
beqs cu_wrzero
cmpw #0x8000,LOCAL_EX(%a0) |test for zero
bnes cu_wreon
cu_wrzero:
orl #z_mask,USER_FPSR(%a6) |set Z bit
cu_wreon:
tstw LOCAL_EX(%a0)
bpl wr_etemp
orl #neg_mask,USER_FPSR(%a6)
bra wr_etemp
|
| HANDLE SOURCE DENORM HERE
|
| ;clear denorm stag to norm
| ;write the new tag & ete15 to the fstack
mon_dnrm:
|
| At this point, check for the cases in which normalizing the
| denorm produces incorrect results.
|
tstb DY_MO_FLG(%a6) |all cases of dyadic instructions would
bnes nrm_src |require normalization of denorm
| At this point:
| monadic instructions: fabs = $18 fneg = $1a ftst = $3a
| fmove = $00 fsmove = $40 fdmove = $44
| fsqrt = $05* fssqrt = $41 fdsqrt = $45
| (*fsqrt reencoded to $05)
|
movew CMDREG1B(%a6),%d0 |get command register
andil #0x7f,%d0 |strip to only command word
|
| At this point, fabs, fneg, fsmove, fdmove, ftst, fsqrt, fssqrt, and
| fdsqrt are possible.
| For cases fabs, fneg, fsmove, and fdmove goto spos (do not normalize)
| For cases fsqrt, fssqrt, and fdsqrt goto nrm_src (do normalize)
|
btstl #0,%d0
bnes nrm_src |weed out fsqrt instructions
st CU_ONLY(%a6) |set cu-only inst flag
bra cu_dnrm |fmove, fabs, fneg, ftst
| ;cases go to cu_dnrm
nrm_src:
bclrb #sign_bit,LOCAL_EX(%a0)
sne LOCAL_SGN(%a0)
bsr nrm_set |normalize number (exponent will go
| ; negative)
bclrb #sign_bit,LOCAL_EX(%a0) |get rid of false sign
bfclr LOCAL_SGN(%a0){#0:#8} |change back to IEEE ext format
beqs spos
bsetb #sign_bit,LOCAL_EX(%a0)
spos:
bfclr STAG(%a6){#0:#4} |set tag to normalized, FPTE15 = 0
bsetb #4,STAG(%a6) |set ETE15
orb #0xf0,DNRM_FLG(%a6)
normal:
tstb DNRM_FLG(%a6) |check if any of the ops were denorms
bne ck_wrap |if so, check if it is a potential
| ;wrap-around case
fix_stk:
moveb #0xfe,CU_SAVEPC(%a6)
bclrb #E1,E_BYTE(%a6)
clrw NMNEXC(%a6)
st RES_FLG(%a6) |indicate that a restore is needed
rts
|
| cu_dnrm handles all cu-only instructions (fmove, fabs, fneg, and
| ftst) completely in software without an frestore to the 040.
|
cu_dnrm:
st CU_ONLY(%a6)
movew CMDREG1B(%a6),%d0
andib #0x3b,%d0 |isolate bits to select inst
tstb %d0
beql cu_dmove |if zero, it is an fmove
cmpib #0x18,%d0
beql cu_dabs |if $18, it is fabs
cmpib #0x1a,%d0
beql cu_dneg |if $1a, it is fneg
|
| Inst is ftst. Check the source operand and set the cc's accordingly.
| No write is done, so simply rts.
|
cu_dtst:
movew LOCAL_EX(%a0),%d0
bclrl #15,%d0
sne LOCAL_SGN(%a0)
beqs cu_dtpo
orl #neg_mask,USER_FPSR(%a6) |set N
cu_dtpo:
cmpiw #0x7fff,%d0 |test for inf/nan
bnes cu_dtcz
tstl LOCAL_HI(%a0)
bnes cu_dtn
tstl LOCAL_LO(%a0)
bnes cu_dtn
orl #inf_mask,USER_FPSR(%a6)
rts
cu_dtn:
orl #nan_mask,USER_FPSR(%a6)
movel ETEMP_EX(%a6),FPTEMP_EX(%a6) |set up fptemp sign for
| ;snan handler
rts
cu_dtcz:
tstl LOCAL_HI(%a0)
bnel cu_dtsx
tstl LOCAL_LO(%a0)
bnel cu_dtsx
orl #z_mask,USER_FPSR(%a6)
cu_dtsx:
rts
|
| Inst is fabs. Execute the absolute value function on the input.
| Branch to the fmove code.
|
cu_dabs:
bclrb #7,LOCAL_EX(%a0) |do abs
bras cu_dmove |fmove code will finish
|
| Inst is fneg. Execute the negate value function on the input.
| Fall though to the fmove code.
|
cu_dneg:
bchgb #7,LOCAL_EX(%a0) |do neg
|
| Inst is fmove. This code also handles all result writes.
| If bit 2 is set, round is forced to double. If it is clear,
| and bit 6 is set, round is forced to single. If both are clear,
| the round precision is found in the fpcr. If the rounding precision
| is double or single, the result is zero, and the mode is checked
| to determine if the lsb of the result should be set.
|
cu_dmove:
btstb #2,CMDREG1B+1(%a6) |check for rd
bne cu_dmrd
btstb #6,CMDREG1B+1(%a6) |check for rs
bne cu_dmrs
|
| The move or operation is not with forced precision. Use the
| FPCR_MODE byte to get rounding.
|
cu_dmnr:
bfextu FPCR_MODE(%a6){#0:#2},%d0
tstb %d0 |check for extended
beq cu_wrexd |if so, just write result
cmpib #1,%d0 |check for single
beq cu_dmrs |fall through to double
|
| The move is fdmove or round precision is double. Result is zero.
| Check rmode for rp or rm and set lsb accordingly.
|
cu_dmrd:
bfextu FPCR_MODE(%a6){#2:#2},%d1 |get rmode
tstw LOCAL_EX(%a0) |check sign
blts cu_dmdn
cmpib #3,%d1 |check for rp
bne cu_dpd |load double pos zero
bra cu_dpdr |load double pos zero w/lsb
cu_dmdn:
cmpib #2,%d1 |check for rm
bne cu_dnd |load double neg zero
bra cu_dndr |load double neg zero w/lsb
|
| The move is fsmove or round precision is single. Result is zero.
| Check for rp or rm and set lsb accordingly.
|
cu_dmrs:
bfextu FPCR_MODE(%a6){#2:#2},%d1 |get rmode
tstw LOCAL_EX(%a0) |check sign
blts cu_dmsn
cmpib #3,%d1 |check for rp
bne cu_spd |load single pos zero
bra cu_spdr |load single pos zero w/lsb
cu_dmsn:
cmpib #2,%d1 |check for rm
bne cu_snd |load single neg zero
bra cu_sndr |load single neg zero w/lsb
|
| The precision is extended, so the result in etemp is correct.
| Simply set unfl (not inex2 or aunfl) and write the result to
| the correct fp register.
cu_wrexd:
orl #unfl_mask,USER_FPSR(%a6)
tstw LOCAL_EX(%a0)
beq wr_etemp
orl #neg_mask,USER_FPSR(%a6)
bra wr_etemp
|
| These routines write +/- zero in double format. The routines
| cu_dpdr and cu_dndr set the double lsb.
|
cu_dpd:
movel #0x3c010000,LOCAL_EX(%a0) |force pos double zero
clrl LOCAL_HI(%a0)
clrl LOCAL_LO(%a0)
orl #z_mask,USER_FPSR(%a6)
orl #unfinx_mask,USER_FPSR(%a6)
bra wr_etemp
cu_dpdr:
movel #0x3c010000,LOCAL_EX(%a0) |force pos double zero
clrl LOCAL_HI(%a0)
movel #0x800,LOCAL_LO(%a0) |with lsb set
orl #unfinx_mask,USER_FPSR(%a6)
bra wr_etemp
cu_dnd:
movel #0xbc010000,LOCAL_EX(%a0) |force pos double zero
clrl LOCAL_HI(%a0)
clrl LOCAL_LO(%a0)
orl #z_mask,USER_FPSR(%a6)
orl #neg_mask,USER_FPSR(%a6)
orl #unfinx_mask,USER_FPSR(%a6)
bra wr_etemp
cu_dndr:
movel #0xbc010000,LOCAL_EX(%a0) |force pos double zero
clrl LOCAL_HI(%a0)
movel #0x800,LOCAL_LO(%a0) |with lsb set
orl #neg_mask,USER_FPSR(%a6)
orl #unfinx_mask,USER_FPSR(%a6)
bra wr_etemp
|
| These routines write +/- zero in single format. The routines
| cu_dpdr and cu_dndr set the single lsb.
|
cu_spd:
movel #0x3f810000,LOCAL_EX(%a0) |force pos single zero
clrl LOCAL_HI(%a0)
clrl LOCAL_LO(%a0)
orl #z_mask,USER_FPSR(%a6)
orl #unfinx_mask,USER_FPSR(%a6)
bra wr_etemp
cu_spdr:
movel #0x3f810000,LOCAL_EX(%a0) |force pos single zero
movel #0x100,LOCAL_HI(%a0) |with lsb set
clrl LOCAL_LO(%a0)
orl #unfinx_mask,USER_FPSR(%a6)
bra wr_etemp
cu_snd:
movel #0xbf810000,LOCAL_EX(%a0) |force pos single zero
clrl LOCAL_HI(%a0)
clrl LOCAL_LO(%a0)
orl #z_mask,USER_FPSR(%a6)
orl #neg_mask,USER_FPSR(%a6)
orl #unfinx_mask,USER_FPSR(%a6)
bra wr_etemp
cu_sndr:
movel #0xbf810000,LOCAL_EX(%a0) |force pos single zero
movel #0x100,LOCAL_HI(%a0) |with lsb set
clrl LOCAL_LO(%a0)
orl #neg_mask,USER_FPSR(%a6)
orl #unfinx_mask,USER_FPSR(%a6)
bra wr_etemp
|
| This code checks for 16-bit overflow conditions on dyadic
| operations which are not restorable into the floating-point
| unit and must be completed in software. Basically, this
| condition exists with a very large norm and a denorm. One
| of the operands must be denormalized to enter this code.
|
| Flags used:
| DY_MO_FLG contains 0 for monadic op, $ff for dyadic
| DNRM_FLG contains $00 for neither op denormalized
| $0f for the destination op denormalized
| $f0 for the source op denormalized
| $ff for both ops denormalized
|
| The wrap-around condition occurs for add, sub, div, and cmp
| when
|
| abs(dest_exp - src_exp) >= $8000
|
| and for mul when
|
| (dest_exp + src_exp) < $0
|
| we must process the operation here if this case is true.
|
| The rts following the frcfpn routine is the exit from res_func
| for this condition. The restore flag (RES_FLG) is left clear.
| No frestore is done unless an exception is to be reported.
|
| For fadd:
| if(sign_of(dest) != sign_of(src))
| replace exponent of src with $3fff (keep sign)
| use fpu to perform dest+new_src (user's rmode and X)
| clr sticky
| else
| set sticky
| call round with user's precision and mode
| move result to fpn and wbtemp
|
| For fsub:
| if(sign_of(dest) == sign_of(src))
| replace exponent of src with $3fff (keep sign)
| use fpu to perform dest+new_src (user's rmode and X)
| clr sticky
| else
| set sticky
| call round with user's precision and mode
| move result to fpn and wbtemp
|
| For fdiv/fsgldiv:
| if(both operands are denorm)
| restore_to_fpu;
| if(dest is norm)
| force_ovf;
| else(dest is denorm)
| force_unf:
|
| For fcmp:
| if(dest is norm)
| N = sign_of(dest);
| else(dest is denorm)
| N = sign_of(src);
|
| For fmul:
| if(both operands are denorm)
| force_unf;
| if((dest_exp + src_exp) < 0)
| force_unf:
| else
| restore_to_fpu;
|
| local equates:
.set addcode,0x22
.set subcode,0x28
.set mulcode,0x23
.set divcode,0x20
.set cmpcode,0x38
ck_wrap:
| tstb DY_MO_FLG(%a6) ;check for fsqrt
beq fix_stk |if zero, it is fsqrt
movew CMDREG1B(%a6),%d0
andiw #0x3b,%d0 |strip to command bits
cmpiw #addcode,%d0
beq wrap_add
cmpiw #subcode,%d0
beq wrap_sub
cmpiw #mulcode,%d0
beq wrap_mul
cmpiw #cmpcode,%d0
beq wrap_cmp
|
| Inst is fdiv.
|
wrap_div:
cmpb #0xff,DNRM_FLG(%a6) |if both ops denorm,
beq fix_stk |restore to fpu
|
| One of the ops is denormalized. Test for wrap condition
| and force the result.
|
cmpb #0x0f,DNRM_FLG(%a6) |check for dest denorm
bnes div_srcd
div_destd:
bsrl ckinf_ns
bne fix_stk
bfextu ETEMP_EX(%a6){#1:#15},%d0 |get src exp (always pos)
bfexts FPTEMP_EX(%a6){#1:#15},%d1 |get dest exp (always neg)
subl %d1,%d0 |subtract dest from src
cmpl #0x7fff,%d0
blt fix_stk |if less, not wrap case
clrb WBTEMP_SGN(%a6)
movew ETEMP_EX(%a6),%d0 |find the sign of the result
movew FPTEMP_EX(%a6),%d1
eorw %d1,%d0
andiw #0x8000,%d0
beq force_unf
st WBTEMP_SGN(%a6)
bra force_unf
ckinf_ns:
moveb STAG(%a6),%d0 |check source tag for inf or nan
bra ck_in_com
ckinf_nd:
moveb DTAG(%a6),%d0 |check destination tag for inf or nan
ck_in_com:
andib #0x60,%d0 |isolate tag bits
cmpb #0x40,%d0 |is it inf?
beq nan_or_inf |not wrap case
cmpb #0x60,%d0 |is it nan?
beq nan_or_inf |yes, not wrap case?
cmpb #0x20,%d0 |is it a zero?
beq nan_or_inf |yes
clrl %d0
rts |then ; it is either a zero of norm,
| ;check wrap case
nan_or_inf:
moveql #-1,%d0
rts
div_srcd:
bsrl ckinf_nd
bne fix_stk
bfextu FPTEMP_EX(%a6){#1:#15},%d0 |get dest exp (always pos)
bfexts ETEMP_EX(%a6){#1:#15},%d1 |get src exp (always neg)
subl %d1,%d0 |subtract src from dest
cmpl #0x8000,%d0
blt fix_stk |if less, not wrap case
clrb WBTEMP_SGN(%a6)
movew ETEMP_EX(%a6),%d0 |find the sign of the result
movew FPTEMP_EX(%a6),%d1
eorw %d1,%d0
andiw #0x8000,%d0
beqs force_ovf
st WBTEMP_SGN(%a6)
|
| This code handles the case of the instruction resulting in
| an overflow condition.
|
force_ovf:
bclrb #E1,E_BYTE(%a6)
orl #ovfl_inx_mask,USER_FPSR(%a6)
clrw NMNEXC(%a6)
leal WBTEMP(%a6),%a0 |point a0 to memory location
movew CMDREG1B(%a6),%d0
btstl #6,%d0 |test for forced precision
beqs frcovf_fpcr
btstl #2,%d0 |check for double
bnes frcovf_dbl
movel #0x1,%d0 |inst is forced single
bras frcovf_rnd
frcovf_dbl:
movel #0x2,%d0 |inst is forced double
bras frcovf_rnd
frcovf_fpcr:
bfextu FPCR_MODE(%a6){#0:#2},%d0 |inst not forced - use fpcr prec
frcovf_rnd:
| The 881/882 does not set inex2 for the following case, so the
| line is commented out to be compatible with 881/882
| tst.b %d0
| beq.b frcovf_x
| or.l #inex2_mask,USER_FPSR(%a6) ;if prec is s or d, set inex2
|frcovf_x:
bsrl ovf_res |get correct result based on
| ;round precision/mode. This
| ;sets FPSR_CC correctly
| ;returns in external format
bfclr WBTEMP_SGN(%a6){#0:#8}
beq frcfpn
bsetb #sign_bit,WBTEMP_EX(%a6)
bra frcfpn
|
| Inst is fadd.
|
wrap_add:
cmpb #0xff,DNRM_FLG(%a6) |if both ops denorm,
beq fix_stk |restore to fpu
|
| One of the ops is denormalized. Test for wrap condition
| and complete the instruction.
|
cmpb #0x0f,DNRM_FLG(%a6) |check for dest denorm
bnes add_srcd
add_destd:
bsrl ckinf_ns
bne fix_stk
bfextu ETEMP_EX(%a6){#1:#15},%d0 |get src exp (always pos)
bfexts FPTEMP_EX(%a6){#1:#15},%d1 |get dest exp (always neg)
subl %d1,%d0 |subtract dest from src
cmpl #0x8000,%d0
blt fix_stk |if less, not wrap case
bra add_wrap
add_srcd:
bsrl ckinf_nd
bne fix_stk
bfextu FPTEMP_EX(%a6){#1:#15},%d0 |get dest exp (always pos)
bfexts ETEMP_EX(%a6){#1:#15},%d1 |get src exp (always neg)
subl %d1,%d0 |subtract src from dest
cmpl #0x8000,%d0
blt fix_stk |if less, not wrap case
|
| Check the signs of the operands. If they are unlike, the fpu
| can be used to add the norm and 1.0 with the sign of the
| denorm and it will correctly generate the result in extended
| precision. We can then call round with no sticky and the result
| will be correct for the user's rounding mode and precision. If
| the signs are the same, we call round with the sticky bit set
| and the result will be correct for the user's rounding mode and
| precision.
|
add_wrap:
movew ETEMP_EX(%a6),%d0
movew FPTEMP_EX(%a6),%d1
eorw %d1,%d0
andiw #0x8000,%d0
beq add_same
|
| The signs are unlike.
|
cmpb #0x0f,DNRM_FLG(%a6) |is dest the denorm?
bnes add_u_srcd
movew FPTEMP_EX(%a6),%d0
andiw #0x8000,%d0
orw #0x3fff,%d0 |force the exponent to +/- 1
movew %d0,FPTEMP_EX(%a6) |in the denorm
movel USER_FPCR(%a6),%d0
andil #0x30,%d0
fmovel %d0,%fpcr |set up users rmode and X
fmovex ETEMP(%a6),%fp0
faddx FPTEMP(%a6),%fp0
leal WBTEMP(%a6),%a0 |point a0 to wbtemp in frame
fmovel %fpsr,%d1
orl %d1,USER_FPSR(%a6) |capture cc's and inex from fadd
fmovex %fp0,WBTEMP(%a6) |write result to memory
lsrl #4,%d0 |put rmode in lower 2 bits
movel USER_FPCR(%a6),%d1
andil #0xc0,%d1
lsrl #6,%d1 |put precision in upper word
swap %d1
orl %d0,%d1 |set up for round call
clrl %d0 |force sticky to zero
bclrb #sign_bit,WBTEMP_EX(%a6)
sne WBTEMP_SGN(%a6)
bsrl round |round result to users rmode & prec
bfclr WBTEMP_SGN(%a6){#0:#8} |convert back to IEEE ext format
beq frcfpnr
bsetb #sign_bit,WBTEMP_EX(%a6)
bra frcfpnr
add_u_srcd:
movew ETEMP_EX(%a6),%d0
andiw #0x8000,%d0
orw #0x3fff,%d0 |force the exponent to +/- 1
movew %d0,ETEMP_EX(%a6) |in the denorm
movel USER_FPCR(%a6),%d0
andil #0x30,%d0
fmovel %d0,%fpcr |set up users rmode and X
fmovex ETEMP(%a6),%fp0
faddx FPTEMP(%a6),%fp0
fmovel %fpsr,%d1
orl %d1,USER_FPSR(%a6) |capture cc's and inex from fadd
leal WBTEMP(%a6),%a0 |point a0 to wbtemp in frame
fmovex %fp0,WBTEMP(%a6) |write result to memory
lsrl #4,%d0 |put rmode in lower 2 bits
movel USER_FPCR(%a6),%d1
andil #0xc0,%d1
lsrl #6,%d1 |put precision in upper word
swap %d1
orl %d0,%d1 |set up for round call
clrl %d0 |force sticky to zero
bclrb #sign_bit,WBTEMP_EX(%a6)
sne WBTEMP_SGN(%a6) |use internal format for round
bsrl round |round result to users rmode & prec
bfclr WBTEMP_SGN(%a6){#0:#8} |convert back to IEEE ext format
beq frcfpnr
bsetb #sign_bit,WBTEMP_EX(%a6)
bra frcfpnr
|
| Signs are alike:
|
add_same:
cmpb #0x0f,DNRM_FLG(%a6) |is dest the denorm?
bnes add_s_srcd
add_s_destd:
leal ETEMP(%a6),%a0
movel USER_FPCR(%a6),%d0
andil #0x30,%d0
lsrl #4,%d0 |put rmode in lower 2 bits
movel USER_FPCR(%a6),%d1
andil #0xc0,%d1
lsrl #6,%d1 |put precision in upper word
swap %d1
orl %d0,%d1 |set up for round call
movel #0x20000000,%d0 |set sticky for round
bclrb #sign_bit,ETEMP_EX(%a6)
sne ETEMP_SGN(%a6)
bsrl round |round result to users rmode & prec
bfclr ETEMP_SGN(%a6){#0:#8} |convert back to IEEE ext format
beqs add_s_dclr
bsetb #sign_bit,ETEMP_EX(%a6)
add_s_dclr:
leal WBTEMP(%a6),%a0
movel ETEMP(%a6),(%a0) |write result to wbtemp
movel ETEMP_HI(%a6),4(%a0)
movel ETEMP_LO(%a6),8(%a0)
tstw ETEMP_EX(%a6)
bgt add_ckovf
orl #neg_mask,USER_FPSR(%a6)
bra add_ckovf
add_s_srcd:
leal FPTEMP(%a6),%a0
movel USER_FPCR(%a6),%d0
andil #0x30,%d0
lsrl #4,%d0 |put rmode in lower 2 bits
movel USER_FPCR(%a6),%d1
andil #0xc0,%d1
lsrl #6,%d1 |put precision in upper word
swap %d1
orl %d0,%d1 |set up for round call
movel #0x20000000,%d0 |set sticky for round
bclrb #sign_bit,FPTEMP_EX(%a6)
sne FPTEMP_SGN(%a6)
bsrl round |round result to users rmode & prec
bfclr FPTEMP_SGN(%a6){#0:#8} |convert back to IEEE ext format
beqs add_s_sclr
bsetb #sign_bit,FPTEMP_EX(%a6)
add_s_sclr:
leal WBTEMP(%a6),%a0
movel FPTEMP(%a6),(%a0) |write result to wbtemp
movel FPTEMP_HI(%a6),4(%a0)
movel FPTEMP_LO(%a6),8(%a0)
tstw FPTEMP_EX(%a6)
bgt add_ckovf
orl #neg_mask,USER_FPSR(%a6)
add_ckovf:
movew WBTEMP_EX(%a6),%d0
andiw #0x7fff,%d0
cmpiw #0x7fff,%d0
bne frcfpnr
|
| The result has overflowed to $7fff exponent. Set I, ovfl,
| and aovfl, and clr the mantissa (incorrectly set by the
| round routine.)
|
orl #inf_mask+ovfl_inx_mask,USER_FPSR(%a6)
clrl 4(%a0)
bra frcfpnr
|
| Inst is fsub.
|
wrap_sub:
cmpb #0xff,DNRM_FLG(%a6) |if both ops denorm,
beq fix_stk |restore to fpu
|
| One of the ops is denormalized. Test for wrap condition
| and complete the instruction.
|
cmpb #0x0f,DNRM_FLG(%a6) |check for dest denorm
bnes sub_srcd
sub_destd:
bsrl ckinf_ns
bne fix_stk
bfextu ETEMP_EX(%a6){#1:#15},%d0 |get src exp (always pos)
bfexts FPTEMP_EX(%a6){#1:#15},%d1 |get dest exp (always neg)
subl %d1,%d0 |subtract src from dest
cmpl #0x8000,%d0
blt fix_stk |if less, not wrap case
bra sub_wrap
sub_srcd:
bsrl ckinf_nd
bne fix_stk
bfextu FPTEMP_EX(%a6){#1:#15},%d0 |get dest exp (always pos)
bfexts ETEMP_EX(%a6){#1:#15},%d1 |get src exp (always neg)
subl %d1,%d0 |subtract dest from src
cmpl #0x8000,%d0
blt fix_stk |if less, not wrap case
|
| Check the signs of the operands. If they are alike, the fpu
| can be used to subtract from the norm 1.0 with the sign of the
| denorm and it will correctly generate the result in extended
| precision. We can then call round with no sticky and the result
| will be correct for the user's rounding mode and precision. If
| the signs are unlike, we call round with the sticky bit set
| and the result will be correct for the user's rounding mode and
| precision.
|
sub_wrap:
movew ETEMP_EX(%a6),%d0
movew FPTEMP_EX(%a6),%d1
eorw %d1,%d0
andiw #0x8000,%d0
bne sub_diff
|
| The signs are alike.
|
cmpb #0x0f,DNRM_FLG(%a6) |is dest the denorm?
bnes sub_u_srcd
movew FPTEMP_EX(%a6),%d0
andiw #0x8000,%d0
orw #0x3fff,%d0 |force the exponent to +/- 1
movew %d0,FPTEMP_EX(%a6) |in the denorm
movel USER_FPCR(%a6),%d0
andil #0x30,%d0
fmovel %d0,%fpcr |set up users rmode and X
fmovex FPTEMP(%a6),%fp0
fsubx ETEMP(%a6),%fp0
fmovel %fpsr,%d1
orl %d1,USER_FPSR(%a6) |capture cc's and inex from fadd
leal WBTEMP(%a6),%a0 |point a0 to wbtemp in frame
fmovex %fp0,WBTEMP(%a6) |write result to memory
lsrl #4,%d0 |put rmode in lower 2 bits
movel USER_FPCR(%a6),%d1
andil #0xc0,%d1
lsrl #6,%d1 |put precision in upper word
swap %d1
orl %d0,%d1 |set up for round call
clrl %d0 |force sticky to zero
bclrb #sign_bit,WBTEMP_EX(%a6)
sne WBTEMP_SGN(%a6)
bsrl round |round result to users rmode & prec
bfclr WBTEMP_SGN(%a6){#0:#8} |convert back to IEEE ext format
beq frcfpnr
bsetb #sign_bit,WBTEMP_EX(%a6)
bra frcfpnr
sub_u_srcd:
movew ETEMP_EX(%a6),%d0
andiw #0x8000,%d0
orw #0x3fff,%d0 |force the exponent to +/- 1
movew %d0,ETEMP_EX(%a6) |in the denorm
movel USER_FPCR(%a6),%d0
andil #0x30,%d0
fmovel %d0,%fpcr |set up users rmode and X
fmovex FPTEMP(%a6),%fp0
fsubx ETEMP(%a6),%fp0
fmovel %fpsr,%d1
orl %d1,USER_FPSR(%a6) |capture cc's and inex from fadd
leal WBTEMP(%a6),%a0 |point a0 to wbtemp in frame
fmovex %fp0,WBTEMP(%a6) |write result to memory
lsrl #4,%d0 |put rmode in lower 2 bits
movel USER_FPCR(%a6),%d1
andil #0xc0,%d1
lsrl #6,%d1 |put precision in upper word
swap %d1
orl %d0,%d1 |set up for round call
clrl %d0 |force sticky to zero
bclrb #sign_bit,WBTEMP_EX(%a6)
sne WBTEMP_SGN(%a6)
bsrl round |round result to users rmode & prec
bfclr WBTEMP_SGN(%a6){#0:#8} |convert back to IEEE ext format
beq frcfpnr
bsetb #sign_bit,WBTEMP_EX(%a6)
bra frcfpnr
|
| Signs are unlike:
|
sub_diff:
cmpb #0x0f,DNRM_FLG(%a6) |is dest the denorm?
bnes sub_s_srcd
sub_s_destd:
leal ETEMP(%a6),%a0
movel USER_FPCR(%a6),%d0
andil #0x30,%d0
lsrl #4,%d0 |put rmode in lower 2 bits
movel USER_FPCR(%a6),%d1
andil #0xc0,%d1
lsrl #6,%d1 |put precision in upper word
swap %d1
orl %d0,%d1 |set up for round call
movel #0x20000000,%d0 |set sticky for round
|
| Since the dest is the denorm, the sign is the opposite of the
| norm sign.
|
eoriw #0x8000,ETEMP_EX(%a6) |flip sign on result
tstw ETEMP_EX(%a6)
bgts sub_s_dwr
orl #neg_mask,USER_FPSR(%a6)
sub_s_dwr:
bclrb #sign_bit,ETEMP_EX(%a6)
sne ETEMP_SGN(%a6)
bsrl round |round result to users rmode & prec
bfclr ETEMP_SGN(%a6){#0:#8} |convert back to IEEE ext format
beqs sub_s_dclr
bsetb #sign_bit,ETEMP_EX(%a6)
sub_s_dclr:
leal WBTEMP(%a6),%a0
movel ETEMP(%a6),(%a0) |write result to wbtemp
movel ETEMP_HI(%a6),4(%a0)
movel ETEMP_LO(%a6),8(%a0)
bra sub_ckovf
sub_s_srcd:
leal FPTEMP(%a6),%a0
movel USER_FPCR(%a6),%d0
andil #0x30,%d0
lsrl #4,%d0 |put rmode in lower 2 bits
movel USER_FPCR(%a6),%d1
andil #0xc0,%d1
lsrl #6,%d1 |put precision in upper word
swap %d1
orl %d0,%d1 |set up for round call
movel #0x20000000,%d0 |set sticky for round
bclrb #sign_bit,FPTEMP_EX(%a6)
sne FPTEMP_SGN(%a6)
bsrl round |round result to users rmode & prec
bfclr FPTEMP_SGN(%a6){#0:#8} |convert back to IEEE ext format
beqs sub_s_sclr
bsetb #sign_bit,FPTEMP_EX(%a6)
sub_s_sclr:
leal WBTEMP(%a6),%a0
movel FPTEMP(%a6),(%a0) |write result to wbtemp
movel FPTEMP_HI(%a6),4(%a0)
movel FPTEMP_LO(%a6),8(%a0)
tstw FPTEMP_EX(%a6)
bgt sub_ckovf
orl #neg_mask,USER_FPSR(%a6)
sub_ckovf:
movew WBTEMP_EX(%a6),%d0
andiw #0x7fff,%d0
cmpiw #0x7fff,%d0
bne frcfpnr
|
| The result has overflowed to $7fff exponent. Set I, ovfl,
| and aovfl, and clr the mantissa (incorrectly set by the
| round routine.)
|
orl #inf_mask+ovfl_inx_mask,USER_FPSR(%a6)
clrl 4(%a0)
bra frcfpnr
|
| Inst is fcmp.
|
wrap_cmp:
cmpb #0xff,DNRM_FLG(%a6) |if both ops denorm,
beq fix_stk |restore to fpu
|
| One of the ops is denormalized. Test for wrap condition
| and complete the instruction.
|
cmpb #0x0f,DNRM_FLG(%a6) |check for dest denorm
bnes cmp_srcd
cmp_destd:
bsrl ckinf_ns
bne fix_stk
bfextu ETEMP_EX(%a6){#1:#15},%d0 |get src exp (always pos)
bfexts FPTEMP_EX(%a6){#1:#15},%d1 |get dest exp (always neg)
subl %d1,%d0 |subtract dest from src
cmpl #0x8000,%d0
blt fix_stk |if less, not wrap case
tstw ETEMP_EX(%a6) |set N to ~sign_of(src)
bge cmp_setn
rts
cmp_srcd:
bsrl ckinf_nd
bne fix_stk
bfextu FPTEMP_EX(%a6){#1:#15},%d0 |get dest exp (always pos)
bfexts ETEMP_EX(%a6){#1:#15},%d1 |get src exp (always neg)
subl %d1,%d0 |subtract src from dest
cmpl #0x8000,%d0
blt fix_stk |if less, not wrap case
tstw FPTEMP_EX(%a6) |set N to sign_of(dest)
blt cmp_setn
rts
cmp_setn:
orl #neg_mask,USER_FPSR(%a6)
rts
|
| Inst is fmul.
|
wrap_mul:
cmpb #0xff,DNRM_FLG(%a6) |if both ops denorm,
beq force_unf |force an underflow (really!)
|
| One of the ops is denormalized. Test for wrap condition
| and complete the instruction.
|
cmpb #0x0f,DNRM_FLG(%a6) |check for dest denorm
bnes mul_srcd
mul_destd:
bsrl ckinf_ns
bne fix_stk
bfextu ETEMP_EX(%a6){#1:#15},%d0 |get src exp (always pos)
bfexts FPTEMP_EX(%a6){#1:#15},%d1 |get dest exp (always neg)
addl %d1,%d0 |subtract dest from src
bgt fix_stk
bra force_unf
mul_srcd:
bsrl ckinf_nd
bne fix_stk
bfextu FPTEMP_EX(%a6){#1:#15},%d0 |get dest exp (always pos)
bfexts ETEMP_EX(%a6){#1:#15},%d1 |get src exp (always neg)
addl %d1,%d0 |subtract src from dest
bgt fix_stk
|
| This code handles the case of the instruction resulting in
| an underflow condition.
|
force_unf:
bclrb #E1,E_BYTE(%a6)
orl #unfinx_mask,USER_FPSR(%a6)
clrw NMNEXC(%a6)
clrb WBTEMP_SGN(%a6)
movew ETEMP_EX(%a6),%d0 |find the sign of the result
movew FPTEMP_EX(%a6),%d1
eorw %d1,%d0
andiw #0x8000,%d0
beqs frcunfcont
st WBTEMP_SGN(%a6)
frcunfcont:
lea WBTEMP(%a6),%a0 |point a0 to memory location
movew CMDREG1B(%a6),%d0
btstl #6,%d0 |test for forced precision
beqs frcunf_fpcr
btstl #2,%d0 |check for double
bnes frcunf_dbl
movel #0x1,%d0 |inst is forced single
bras frcunf_rnd
frcunf_dbl:
movel #0x2,%d0 |inst is forced double
bras frcunf_rnd
frcunf_fpcr:
bfextu FPCR_MODE(%a6){#0:#2},%d0 |inst not forced - use fpcr prec
frcunf_rnd:
bsrl unf_sub |get correct result based on
| ;round precision/mode. This
| ;sets FPSR_CC correctly
bfclr WBTEMP_SGN(%a6){#0:#8} |convert back to IEEE ext format
beqs frcfpn
bsetb #sign_bit,WBTEMP_EX(%a6)
bra frcfpn
|
| Write the result to the user's fpn. All results must be HUGE to be
| written; otherwise the results would have overflowed or underflowed.
| If the rounding precision is single or double, the ovf_res routine
| is needed to correctly supply the max value.
|
frcfpnr:
movew CMDREG1B(%a6),%d0
btstl #6,%d0 |test for forced precision
beqs frcfpn_fpcr
btstl #2,%d0 |check for double
bnes frcfpn_dbl
movel #0x1,%d0 |inst is forced single
bras frcfpn_rnd
frcfpn_dbl:
movel #0x2,%d0 |inst is forced double
bras frcfpn_rnd
frcfpn_fpcr:
bfextu FPCR_MODE(%a6){#0:#2},%d0 |inst not forced - use fpcr prec
tstb %d0
beqs frcfpn |if extended, write what you got
frcfpn_rnd:
bclrb #sign_bit,WBTEMP_EX(%a6)
sne WBTEMP_SGN(%a6)
bsrl ovf_res |get correct result based on
| ;round precision/mode. This
| ;sets FPSR_CC correctly
bfclr WBTEMP_SGN(%a6){#0:#8} |convert back to IEEE ext format
beqs frcfpn_clr
bsetb #sign_bit,WBTEMP_EX(%a6)
frcfpn_clr:
orl #ovfinx_mask,USER_FPSR(%a6)
|
| Perform the write.
|
frcfpn:
bfextu CMDREG1B(%a6){#6:#3},%d0 |extract fp destination register
cmpib #3,%d0
bles frc0123 |check if dest is fp0-fp3
movel #7,%d1
subl %d0,%d1
clrl %d0
bsetl %d1,%d0
fmovemx WBTEMP(%a6),%d0
rts
frc0123:
cmpib #0,%d0
beqs frc0_dst
cmpib #1,%d0
beqs frc1_dst
cmpib #2,%d0
beqs frc2_dst
frc3_dst:
movel WBTEMP_EX(%a6),USER_FP3(%a6)
movel WBTEMP_HI(%a6),USER_FP3+4(%a6)
movel WBTEMP_LO(%a6),USER_FP3+8(%a6)
rts
frc2_dst:
movel WBTEMP_EX(%a6),USER_FP2(%a6)
movel WBTEMP_HI(%a6),USER_FP2+4(%a6)
movel WBTEMP_LO(%a6),USER_FP2+8(%a6)
rts
frc1_dst:
movel WBTEMP_EX(%a6),USER_FP1(%a6)
movel WBTEMP_HI(%a6),USER_FP1+4(%a6)
movel WBTEMP_LO(%a6),USER_FP1+8(%a6)
rts
frc0_dst:
movel WBTEMP_EX(%a6),USER_FP0(%a6)
movel WBTEMP_HI(%a6),USER_FP0+4(%a6)
movel WBTEMP_LO(%a6),USER_FP0+8(%a6)
rts
|
| Write etemp to fpn.
| A check is made on enabled and signalled snan exceptions,
| and the destination is not overwritten if this condition exists.
| This code is designed to make fmoveins of unsupported data types
| faster.
|
wr_etemp:
btstb #snan_bit,FPSR_EXCEPT(%a6) |if snan is set, and
beqs fmoveinc |enabled, force restore
btstb #snan_bit,FPCR_ENABLE(%a6) |and don't overwrite
beqs fmoveinc |the dest
movel ETEMP_EX(%a6),FPTEMP_EX(%a6) |set up fptemp sign for
| ;snan handler
tstb ETEMP(%a6) |check for negative
blts snan_neg
rts
snan_neg:
orl #neg_bit,USER_FPSR(%a6) |snan is negative; set N
rts
fmoveinc:
clrw NMNEXC(%a6)
bclrb #E1,E_BYTE(%a6)
moveb STAG(%a6),%d0 |check if stag is inf
andib #0xe0,%d0
cmpib #0x40,%d0
bnes fminc_cnan
orl #inf_mask,USER_FPSR(%a6) |if inf, nothing yet has set I
tstw LOCAL_EX(%a0) |check sign
bges fminc_con
orl #neg_mask,USER_FPSR(%a6)
bra fminc_con
fminc_cnan:
cmpib #0x60,%d0 |check if stag is NaN
bnes fminc_czero
orl #nan_mask,USER_FPSR(%a6) |if nan, nothing yet has set NaN
movel ETEMP_EX(%a6),FPTEMP_EX(%a6) |set up fptemp sign for
| ;snan handler
tstw LOCAL_EX(%a0) |check sign
bges fminc_con
orl #neg_mask,USER_FPSR(%a6)
bra fminc_con
fminc_czero:
cmpib #0x20,%d0 |check if zero
bnes fminc_con
orl #z_mask,USER_FPSR(%a6) |if zero, set Z
tstw LOCAL_EX(%a0) |check sign
bges fminc_con
orl #neg_mask,USER_FPSR(%a6)
fminc_con:
bfextu CMDREG1B(%a6){#6:#3},%d0 |extract fp destination register
cmpib #3,%d0
bles fp0123 |check if dest is fp0-fp3
movel #7,%d1
subl %d0,%d1
clrl %d0
bsetl %d1,%d0
fmovemx ETEMP(%a6),%d0
rts
fp0123:
cmpib #0,%d0
beqs fp0_dst
cmpib #1,%d0
beqs fp1_dst
cmpib #2,%d0
beqs fp2_dst
fp3_dst:
movel ETEMP_EX(%a6),USER_FP3(%a6)
movel ETEMP_HI(%a6),USER_FP3+4(%a6)
movel ETEMP_LO(%a6),USER_FP3+8(%a6)
rts
fp2_dst:
movel ETEMP_EX(%a6),USER_FP2(%a6)
movel ETEMP_HI(%a6),USER_FP2+4(%a6)
movel ETEMP_LO(%a6),USER_FP2+8(%a6)
rts
fp1_dst:
movel ETEMP_EX(%a6),USER_FP1(%a6)
movel ETEMP_HI(%a6),USER_FP1+4(%a6)
movel ETEMP_LO(%a6),USER_FP1+8(%a6)
rts
fp0_dst:
movel ETEMP_EX(%a6),USER_FP0(%a6)
movel ETEMP_HI(%a6),USER_FP0+4(%a6)
movel ETEMP_LO(%a6),USER_FP0+8(%a6)
rts
opclass3:
st CU_ONLY(%a6)
movew CMDREG1B(%a6),%d0 |check if packed moveout
andiw #0x0c00,%d0 |isolate last 2 bits of size field
cmpiw #0x0c00,%d0 |if size is 011 or 111, it is packed
beq pack_out |else it is norm or denorm
bra mv_out
|
| MOVE OUT
|
mv_tbl:
.long li
.long sgp
.long xp
.long mvout_end |should never be taken
.long wi
.long dp
.long bi
.long mvout_end |should never be taken
mv_out:
bfextu CMDREG1B(%a6){#3:#3},%d1 |put source specifier in d1
leal mv_tbl,%a0
movel %a0@(%d1:l:4),%a0
jmp (%a0)
|
| This exit is for move-out to memory. The aunfl bit is
| set if the result is inex and unfl is signalled.
|
mvout_end:
btstb #inex2_bit,FPSR_EXCEPT(%a6)
beqs no_aufl
btstb #unfl_bit,FPSR_EXCEPT(%a6)
beqs no_aufl
bsetb #aunfl_bit,FPSR_AEXCEPT(%a6)
no_aufl:
clrw NMNEXC(%a6)
bclrb #E1,E_BYTE(%a6)
fmovel #0,%FPSR |clear any cc bits from res_func
|
| Return ETEMP to extended format from internal extended format so
| that gen_except will have a correctly signed value for ovfl/unfl
| handlers.
|
bfclr ETEMP_SGN(%a6){#0:#8}
beqs mvout_con
bsetb #sign_bit,ETEMP_EX(%a6)
mvout_con:
rts
|
| This exit is for move-out to int register. The aunfl bit is
| not set in any case for this move.
|
mvouti_end:
clrw NMNEXC(%a6)
bclrb #E1,E_BYTE(%a6)
fmovel #0,%FPSR |clear any cc bits from res_func
|
| Return ETEMP to extended format from internal extended format so
| that gen_except will have a correctly signed value for ovfl/unfl
| handlers.
|
bfclr ETEMP_SGN(%a6){#0:#8}
beqs mvouti_con
bsetb #sign_bit,ETEMP_EX(%a6)
mvouti_con:
rts
|
| li is used to handle a long integer source specifier
|
li:
moveql #4,%d0 |set byte count
btstb #7,STAG(%a6) |check for extended denorm
bne int_dnrm |if so, branch
fmovemx ETEMP(%a6),%fp0-%fp0
fcmpd #0x41dfffffffc00000,%fp0
| 41dfffffffc00000 in dbl prec = 401d0000fffffffe00000000 in ext prec
fbge lo_plrg
fcmpd #0xc1e0000000000000,%fp0
| c1e0000000000000 in dbl prec = c01e00008000000000000000 in ext prec
fble lo_nlrg
|
| at this point, the answer is between the largest pos and neg values
|
movel USER_FPCR(%a6),%d1 |use user's rounding mode
andil #0x30,%d1
fmovel %d1,%fpcr
fmovel %fp0,L_SCR1(%a6) |let the 040 perform conversion
fmovel %fpsr,%d1
orl %d1,USER_FPSR(%a6) |capture inex2/ainex if set
bra int_wrt
lo_plrg:
movel #0x7fffffff,L_SCR1(%a6) |answer is largest positive int
fbeq int_wrt |exact answer
fcmpd #0x41dfffffffe00000,%fp0
| 41dfffffffe00000 in dbl prec = 401d0000ffffffff00000000 in ext prec
fbge int_operr |set operr
bra int_inx |set inexact
lo_nlrg:
movel #0x80000000,L_SCR1(%a6)
fbeq int_wrt |exact answer
fcmpd #0xc1e0000000100000,%fp0
| c1e0000000100000 in dbl prec = c01e00008000000080000000 in ext prec
fblt int_operr |set operr
bra int_inx |set inexact
|
| wi is used to handle a word integer source specifier
|
wi:
moveql #2,%d0 |set byte count
btstb #7,STAG(%a6) |check for extended denorm
bne int_dnrm |branch if so
fmovemx ETEMP(%a6),%fp0-%fp0
fcmps #0x46fffe00,%fp0
| 46fffe00 in sgl prec = 400d0000fffe000000000000 in ext prec
fbge wo_plrg
fcmps #0xc7000000,%fp0
| c7000000 in sgl prec = c00e00008000000000000000 in ext prec
fble wo_nlrg
|
| at this point, the answer is between the largest pos and neg values
|
movel USER_FPCR(%a6),%d1 |use user's rounding mode
andil #0x30,%d1
fmovel %d1,%fpcr
fmovew %fp0,L_SCR1(%a6) |let the 040 perform conversion
fmovel %fpsr,%d1
orl %d1,USER_FPSR(%a6) |capture inex2/ainex if set
bra int_wrt
wo_plrg:
movew #0x7fff,L_SCR1(%a6) |answer is largest positive int
fbeq int_wrt |exact answer
fcmps #0x46ffff00,%fp0
| 46ffff00 in sgl prec = 400d0000ffff000000000000 in ext prec
fbge int_operr |set operr
bra int_inx |set inexact
wo_nlrg:
movew #0x8000,L_SCR1(%a6)
fbeq int_wrt |exact answer
fcmps #0xc7000080,%fp0
| c7000080 in sgl prec = c00e00008000800000000000 in ext prec
fblt int_operr |set operr
bra int_inx |set inexact
|
| bi is used to handle a byte integer source specifier
|
bi:
moveql #1,%d0 |set byte count
btstb #7,STAG(%a6) |check for extended denorm
bne int_dnrm |branch if so
fmovemx ETEMP(%a6),%fp0-%fp0
fcmps #0x42fe0000,%fp0
| 42fe0000 in sgl prec = 40050000fe00000000000000 in ext prec
fbge by_plrg
fcmps #0xc3000000,%fp0
| c3000000 in sgl prec = c00600008000000000000000 in ext prec
fble by_nlrg
|
| at this point, the answer is between the largest pos and neg values
|
movel USER_FPCR(%a6),%d1 |use user's rounding mode
andil #0x30,%d1
fmovel %d1,%fpcr
fmoveb %fp0,L_SCR1(%a6) |let the 040 perform conversion
fmovel %fpsr,%d1
orl %d1,USER_FPSR(%a6) |capture inex2/ainex if set
bra int_wrt
by_plrg:
moveb #0x7f,L_SCR1(%a6) |answer is largest positive int
fbeq int_wrt |exact answer
fcmps #0x42ff0000,%fp0
| 42ff0000 in sgl prec = 40050000ff00000000000000 in ext prec
fbge int_operr |set operr
bra int_inx |set inexact
by_nlrg:
moveb #0x80,L_SCR1(%a6)
fbeq int_wrt |exact answer
fcmps #0xc3008000,%fp0
| c3008000 in sgl prec = c00600008080000000000000 in ext prec
fblt int_operr |set operr
bra int_inx |set inexact
|
| Common integer routines
|
| int_drnrm---account for possible nonzero result for round up with positive
| operand and round down for negative answer. In the first case (result = 1)
| byte-width (store in d0) of result must be honored. In the second case,
| -1 in L_SCR1(a6) will cover all contingencies (FMOVE.B/W/L out).
int_dnrm:
movel #0,L_SCR1(%a6) | initialize result to 0
bfextu FPCR_MODE(%a6){#2:#2},%d1 | d1 is the rounding mode
cmpb #2,%d1
bmis int_inx | if RN or RZ, done
bnes int_rp | if RP, continue below
tstw ETEMP(%a6) | RM: store -1 in L_SCR1 if src is negative
bpls int_inx | otherwise result is 0
movel #-1,L_SCR1(%a6)
bras int_inx
int_rp:
tstw ETEMP(%a6) | RP: store +1 of proper width in L_SCR1 if
| ; source is greater than 0
bmis int_inx | otherwise, result is 0
lea L_SCR1(%a6),%a1 | a1 is address of L_SCR1
addal %d0,%a1 | offset by destination width -1
subal #1,%a1
bsetb #0,(%a1) | set low bit at a1 address
int_inx:
oril #inx2a_mask,USER_FPSR(%a6)
bras int_wrt
int_operr:
fmovemx %fp0-%fp0,FPTEMP(%a6) |FPTEMP must contain the extended
| ;precision source that needs to be
| ;converted to integer this is required
| ;if the operr exception is enabled.
| ;set operr/aiop (no inex2 on int ovfl)
oril #opaop_mask,USER_FPSR(%a6)
| ;fall through to perform int_wrt
int_wrt:
movel EXC_EA(%a6),%a1 |load destination address
tstl %a1 |check to see if it is a dest register
beqs wrt_dn |write data register
lea L_SCR1(%a6),%a0 |point to supervisor source address
bsrl mem_write
bra mvouti_end
wrt_dn:
movel %d0,-(%sp) |d0 currently contains the size to write
bsrl get_fline |get_fline returns Dn in d0
andiw #0x7,%d0 |isolate register
movel (%sp)+,%d1 |get size
cmpil #4,%d1 |most frequent case
beqs sz_long
cmpil #2,%d1
bnes sz_con
orl #8,%d0 |add 'word' size to register#
bras sz_con
sz_long:
orl #0x10,%d0 |add 'long' size to register#
sz_con:
movel %d0,%d1 |reg_dest expects size:reg in d1
bsrl reg_dest |load proper data register
bra mvouti_end
xp:
lea ETEMP(%a6),%a0
bclrb #sign_bit,LOCAL_EX(%a0)
sne LOCAL_SGN(%a0)
btstb #7,STAG(%a6) |check for extended denorm
bne xdnrm
clrl %d0
bras do_fp |do normal case
sgp:
lea ETEMP(%a6),%a0
bclrb #sign_bit,LOCAL_EX(%a0)
sne LOCAL_SGN(%a0)
btstb #7,STAG(%a6) |check for extended denorm
bne sp_catas |branch if so
movew LOCAL_EX(%a0),%d0
lea sp_bnds,%a1
cmpw (%a1),%d0
blt sp_under
cmpw 2(%a1),%d0
bgt sp_over
movel #1,%d0 |set destination format to single
bras do_fp |do normal case
dp:
lea ETEMP(%a6),%a0
bclrb #sign_bit,LOCAL_EX(%a0)
sne LOCAL_SGN(%a0)
btstb #7,STAG(%a6) |check for extended denorm
bne dp_catas |branch if so
movew LOCAL_EX(%a0),%d0
lea dp_bnds,%a1
cmpw (%a1),%d0
blt dp_under
cmpw 2(%a1),%d0
bgt dp_over
movel #2,%d0 |set destination format to double
| ;fall through to do_fp
|
do_fp:
bfextu FPCR_MODE(%a6){#2:#2},%d1 |rnd mode in d1
swap %d0 |rnd prec in upper word
addl %d0,%d1 |d1 has PREC/MODE info
clrl %d0 |clear g,r,s
bsrl round |round
movel %a0,%a1
movel EXC_EA(%a6),%a0
bfextu CMDREG1B(%a6){#3:#3},%d1 |extract destination format
| ;at this point only the dest
| ;formats sgl, dbl, ext are
| ;possible
cmpb #2,%d1
bgts ddbl |double=5, extended=2, single=1
bnes dsgl
| ;fall through to dext
dext:
bsrl dest_ext
bra mvout_end
dsgl:
bsrl dest_sgl
bra mvout_end
ddbl:
bsrl dest_dbl
bra mvout_end
|
| Handle possible denorm or catastrophic underflow cases here
|
xdnrm:
bsr set_xop |initialize WBTEMP
bsetb #wbtemp15_bit,WB_BYTE(%a6) |set wbtemp15
movel %a0,%a1
movel EXC_EA(%a6),%a0 |a0 has the destination pointer
bsrl dest_ext |store to memory
bsetb #unfl_bit,FPSR_EXCEPT(%a6)
bra mvout_end
sp_under:
bsetb #etemp15_bit,STAG(%a6)
cmpw 4(%a1),%d0
blts sp_catas |catastrophic underflow case
movel #1,%d0 |load in round precision
movel #sgl_thresh,%d1 |load in single denorm threshold
bsrl dpspdnrm |expects d1 to have the proper
| ;denorm threshold
bsrl dest_sgl |stores value to destination
bsetb #unfl_bit,FPSR_EXCEPT(%a6)
bra mvout_end |exit
dp_under:
bsetb #etemp15_bit,STAG(%a6)
cmpw 4(%a1),%d0
blts dp_catas |catastrophic underflow case
movel #dbl_thresh,%d1 |load in double precision threshold
movel #2,%d0
bsrl dpspdnrm |expects d1 to have proper
| ;denorm threshold
| ;expects d0 to have round precision
bsrl dest_dbl |store value to destination
bsetb #unfl_bit,FPSR_EXCEPT(%a6)
bra mvout_end |exit
|
| Handle catastrophic underflow cases here
|
sp_catas:
| Temp fix for z bit set in unf_sub
movel USER_FPSR(%a6),-(%a7)
movel #1,%d0 |set round precision to sgl
bsrl unf_sub |a0 points to result
movel (%a7)+,USER_FPSR(%a6)
movel #1,%d0
subw %d0,LOCAL_EX(%a0) |account for difference between
| ;denorm/norm bias
movel %a0,%a1 |a1 has the operand input
movel EXC_EA(%a6),%a0 |a0 has the destination pointer
bsrl dest_sgl |store the result
oril #unfinx_mask,USER_FPSR(%a6)
bra mvout_end
dp_catas:
| Temp fix for z bit set in unf_sub
movel USER_FPSR(%a6),-(%a7)
movel #2,%d0 |set round precision to dbl
bsrl unf_sub |a0 points to result
movel (%a7)+,USER_FPSR(%a6)
movel #1,%d0
subw %d0,LOCAL_EX(%a0) |account for difference between
| ;denorm/norm bias
movel %a0,%a1 |a1 has the operand input
movel EXC_EA(%a6),%a0 |a0 has the destination pointer
bsrl dest_dbl |store the result
oril #unfinx_mask,USER_FPSR(%a6)
bra mvout_end
|
| Handle catastrophic overflow cases here
|
sp_over:
| Temp fix for z bit set in unf_sub
movel USER_FPSR(%a6),-(%a7)
movel #1,%d0
leal FP_SCR1(%a6),%a0 |use FP_SCR1 for creating result
movel ETEMP_EX(%a6),(%a0)
movel ETEMP_HI(%a6),4(%a0)
movel ETEMP_LO(%a6),8(%a0)
bsrl ovf_res
movel (%a7)+,USER_FPSR(%a6)
movel %a0,%a1
movel EXC_EA(%a6),%a0
bsrl dest_sgl
orl #ovfinx_mask,USER_FPSR(%a6)
bra mvout_end
dp_over:
| Temp fix for z bit set in ovf_res
movel USER_FPSR(%a6),-(%a7)
movel #2,%d0
leal FP_SCR1(%a6),%a0 |use FP_SCR1 for creating result
movel ETEMP_EX(%a6),(%a0)
movel ETEMP_HI(%a6),4(%a0)
movel ETEMP_LO(%a6),8(%a0)
bsrl ovf_res
movel (%a7)+,USER_FPSR(%a6)
movel %a0,%a1
movel EXC_EA(%a6),%a0
bsrl dest_dbl
orl #ovfinx_mask,USER_FPSR(%a6)
bra mvout_end
|
| DPSPDNRM
|
| This subroutine takes an extended normalized number and denormalizes
| it to the given round precision. This subroutine also decrements
| the input operand's exponent by 1 to account for the fact that
| dest_sgl or dest_dbl expects a normalized number's bias.
|
| Input: a0 points to a normalized number in internal extended format
| d0 is the round precision (=1 for sgl; =2 for dbl)
| d1 is the single precision or double precision
| denorm threshold
|
| Output: (In the format for dest_sgl or dest_dbl)
| a0 points to the destination
| a1 points to the operand
|
| Exceptions: Reports inexact 2 exception by setting USER_FPSR bits
|
dpspdnrm:
movel %d0,-(%a7) |save round precision
clrl %d0 |clear initial g,r,s
bsrl dnrm_lp |careful with d0, it's needed by round
bfextu FPCR_MODE(%a6){#2:#2},%d1 |get rounding mode
swap %d1
movew 2(%a7),%d1 |set rounding precision
swap %d1 |at this point d1 has PREC/MODE info
bsrl round |round result, sets the inex bit in
| ;USER_FPSR if needed
movew #1,%d0
subw %d0,LOCAL_EX(%a0) |account for difference in denorm
| ;vs norm bias
movel %a0,%a1 |a1 has the operand input
movel EXC_EA(%a6),%a0 |a0 has the destination pointer
addw #4,%a7 |pop stack
rts
|
| SET_XOP initialized WBTEMP with the value pointed to by a0
| input: a0 points to input operand in the internal extended format
|
set_xop:
movel LOCAL_EX(%a0),WBTEMP_EX(%a6)
movel LOCAL_HI(%a0),WBTEMP_HI(%a6)
movel LOCAL_LO(%a0),WBTEMP_LO(%a6)
bfclr WBTEMP_SGN(%a6){#0:#8}
beqs sxop
bsetb #sign_bit,WBTEMP_EX(%a6)
sxop:
bfclr STAG(%a6){#5:#4} |clear wbtm66,wbtm1,wbtm0,sbit
rts
|
| P_MOVE
|
p_movet:
.long p_move
.long p_movez
.long p_movei
.long p_moven
.long p_move
p_regd:
.long p_dyd0
.long p_dyd1
.long p_dyd2
.long p_dyd3
.long p_dyd4
.long p_dyd5
.long p_dyd6
.long p_dyd7
pack_out:
leal p_movet,%a0 |load jmp table address
movew STAG(%a6),%d0 |get source tag
bfextu %d0{#16:#3},%d0 |isolate source bits
movel (%a0,%d0.w*4),%a0 |load a0 with routine label for tag
jmp (%a0) |go to the routine
p_write:
movel #0x0c,%d0 |get byte count
movel EXC_EA(%a6),%a1 |get the destination address
bsr mem_write |write the user's destination
moveb #0,CU_SAVEPC(%a6) |set the cu save pc to all 0's
|
| Also note that the dtag must be set to norm here - this is because
| the 040 uses the dtag to execute the correct microcode.
|
bfclr DTAG(%a6){#0:#3} |set dtag to norm
rts
| Notes on handling of special case (zero, inf, and nan) inputs:
| 1. Operr is not signalled if the k-factor is greater than 18.
| 2. Per the manual, status bits are not set.
|
p_move:
movew CMDREG1B(%a6),%d0
btstl #kfact_bit,%d0 |test for dynamic k-factor
beqs statick |if clear, k-factor is static
dynamick:
bfextu %d0{#25:#3},%d0 |isolate register for dynamic k-factor
lea p_regd,%a0
movel %a0@(%d0:l:4),%a0
jmp (%a0)
statick:
andiw #0x007f,%d0 |get k-factor
bfexts %d0{#25:#7},%d0 |sign extend d0 for bindec
leal ETEMP(%a6),%a0 |a0 will point to the packed decimal
bsrl bindec |perform the convert; data at a6
leal FP_SCR1(%a6),%a0 |load a0 with result address
bral p_write
p_movez:
leal ETEMP(%a6),%a0 |a0 will point to the packed decimal
clrw 2(%a0) |clear lower word of exp
clrl 4(%a0) |load second lword of ZERO
clrl 8(%a0) |load third lword of ZERO
bra p_write |go write results
p_movei:
fmovel #0,%FPSR |clear aiop
leal ETEMP(%a6),%a0 |a0 will point to the packed decimal
clrw 2(%a0) |clear lower word of exp
bra p_write |go write the result
p_moven:
leal ETEMP(%a6),%a0 |a0 will point to the packed decimal
clrw 2(%a0) |clear lower word of exp
bra p_write |go write the result
|
| Routines to read the dynamic k-factor from Dn.
|
p_dyd0:
movel USER_D0(%a6),%d0
bras statick
p_dyd1:
movel USER_D1(%a6),%d0
bras statick
p_dyd2:
movel %d2,%d0
bras statick
p_dyd3:
movel %d3,%d0
bras statick
p_dyd4:
movel %d4,%d0
bras statick
p_dyd5:
movel %d5,%d0
bras statick
p_dyd6:
movel %d6,%d0
bra statick
p_dyd7:
movel %d7,%d0
bra statick
|end
|
AirFortressIlikara/LS2K0300-linux-4.19
| 20,372
|
arch/m68k/fpsp040/get_op.S
|
|
| get_op.sa 3.6 5/19/92
|
| get_op.sa 3.5 4/26/91
|
| Description: This routine is called by the unsupported format/data
| type exception handler ('unsupp' - vector 55) and the unimplemented
| instruction exception handler ('unimp' - vector 11). 'get_op'
| determines the opclass (0, 2, or 3) and branches to the
| opclass handler routine. See 68881/2 User's Manual table 4-11
| for a description of the opclasses.
|
| For UNSUPPORTED data/format (exception vector 55) and for
| UNIMPLEMENTED instructions (exception vector 11) the following
| applies:
|
| - For unnormalized numbers (opclass 0, 2, or 3) the
| number(s) is normalized and the operand type tag is updated.
|
| - For a packed number (opclass 2) the number is unpacked and the
| operand type tag is updated.
|
| - For denormalized numbers (opclass 0 or 2) the number(s) is not
| changed but passed to the next module. The next module for
| unimp is do_func, the next module for unsupp is res_func.
|
| For UNSUPPORTED data/format (exception vector 55) only the
| following applies:
|
| - If there is a move out with a packed number (opclass 3) the
| number is packed and written to user memory. For the other
| opclasses the number(s) are written back to the fsave stack
| and the instruction is then restored back into the '040. The
| '040 is then able to complete the instruction.
|
| For example:
| fadd.x fpm,fpn where the fpm contains an unnormalized number.
| The '040 takes an unsupported data trap and gets to this
| routine. The number is normalized, put back on the stack and
| then an frestore is done to restore the instruction back into
| the '040. The '040 then re-executes the fadd.x fpm,fpn with
| a normalized number in the source and the instruction is
| successful.
|
| Next consider if in the process of normalizing the un-
| normalized number it becomes a denormalized number. The
| routine which converts the unnorm to a norm (called mk_norm)
| detects this and tags the number as a denorm. The routine
| res_func sees the denorm tag and converts the denorm to a
| norm. The instruction is then restored back into the '040
| which re_executes the instruction.
|
|
| Copyright (C) Motorola, Inc. 1990
| All Rights Reserved
|
| For details on the license for this file, please see the
| file, README, in this same directory.
GET_OP: |idnt 2,1 | Motorola 040 Floating Point Software Package
|section 8
#include "fpsp.h"
.global PIRN,PIRZRM,PIRP
.global SMALRN,SMALRZRM,SMALRP
.global BIGRN,BIGRZRM,BIGRP
PIRN:
.long 0x40000000,0xc90fdaa2,0x2168c235 |pi
PIRZRM:
.long 0x40000000,0xc90fdaa2,0x2168c234 |pi
PIRP:
.long 0x40000000,0xc90fdaa2,0x2168c235 |pi
|round to nearest
SMALRN:
.long 0x3ffd0000,0x9a209a84,0xfbcff798 |log10(2)
.long 0x40000000,0xadf85458,0xa2bb4a9a |e
.long 0x3fff0000,0xb8aa3b29,0x5c17f0bc |log2(e)
.long 0x3ffd0000,0xde5bd8a9,0x37287195 |log10(e)
.long 0x00000000,0x00000000,0x00000000 |0.0
| round to zero;round to negative infinity
SMALRZRM:
.long 0x3ffd0000,0x9a209a84,0xfbcff798 |log10(2)
.long 0x40000000,0xadf85458,0xa2bb4a9a |e
.long 0x3fff0000,0xb8aa3b29,0x5c17f0bb |log2(e)
.long 0x3ffd0000,0xde5bd8a9,0x37287195 |log10(e)
.long 0x00000000,0x00000000,0x00000000 |0.0
| round to positive infinity
SMALRP:
.long 0x3ffd0000,0x9a209a84,0xfbcff799 |log10(2)
.long 0x40000000,0xadf85458,0xa2bb4a9b |e
.long 0x3fff0000,0xb8aa3b29,0x5c17f0bc |log2(e)
.long 0x3ffd0000,0xde5bd8a9,0x37287195 |log10(e)
.long 0x00000000,0x00000000,0x00000000 |0.0
|round to nearest
BIGRN:
.long 0x3ffe0000,0xb17217f7,0xd1cf79ac |ln(2)
.long 0x40000000,0x935d8ddd,0xaaa8ac17 |ln(10)
.long 0x3fff0000,0x80000000,0x00000000 |10 ^ 0
.global PTENRN
PTENRN:
.long 0x40020000,0xA0000000,0x00000000 |10 ^ 1
.long 0x40050000,0xC8000000,0x00000000 |10 ^ 2
.long 0x400C0000,0x9C400000,0x00000000 |10 ^ 4
.long 0x40190000,0xBEBC2000,0x00000000 |10 ^ 8
.long 0x40340000,0x8E1BC9BF,0x04000000 |10 ^ 16
.long 0x40690000,0x9DC5ADA8,0x2B70B59E |10 ^ 32
.long 0x40D30000,0xC2781F49,0xFFCFA6D5 |10 ^ 64
.long 0x41A80000,0x93BA47C9,0x80E98CE0 |10 ^ 128
.long 0x43510000,0xAA7EEBFB,0x9DF9DE8E |10 ^ 256
.long 0x46A30000,0xE319A0AE,0xA60E91C7 |10 ^ 512
.long 0x4D480000,0xC9767586,0x81750C17 |10 ^ 1024
.long 0x5A920000,0x9E8B3B5D,0xC53D5DE5 |10 ^ 2048
.long 0x75250000,0xC4605202,0x8A20979B |10 ^ 4096
|round to minus infinity
BIGRZRM:
.long 0x3ffe0000,0xb17217f7,0xd1cf79ab |ln(2)
.long 0x40000000,0x935d8ddd,0xaaa8ac16 |ln(10)
.long 0x3fff0000,0x80000000,0x00000000 |10 ^ 0
.global PTENRM
PTENRM:
.long 0x40020000,0xA0000000,0x00000000 |10 ^ 1
.long 0x40050000,0xC8000000,0x00000000 |10 ^ 2
.long 0x400C0000,0x9C400000,0x00000000 |10 ^ 4
.long 0x40190000,0xBEBC2000,0x00000000 |10 ^ 8
.long 0x40340000,0x8E1BC9BF,0x04000000 |10 ^ 16
.long 0x40690000,0x9DC5ADA8,0x2B70B59D |10 ^ 32
.long 0x40D30000,0xC2781F49,0xFFCFA6D5 |10 ^ 64
.long 0x41A80000,0x93BA47C9,0x80E98CDF |10 ^ 128
.long 0x43510000,0xAA7EEBFB,0x9DF9DE8D |10 ^ 256
.long 0x46A30000,0xE319A0AE,0xA60E91C6 |10 ^ 512
.long 0x4D480000,0xC9767586,0x81750C17 |10 ^ 1024
.long 0x5A920000,0x9E8B3B5D,0xC53D5DE5 |10 ^ 2048
.long 0x75250000,0xC4605202,0x8A20979A |10 ^ 4096
|round to positive infinity
BIGRP:
.long 0x3ffe0000,0xb17217f7,0xd1cf79ac |ln(2)
.long 0x40000000,0x935d8ddd,0xaaa8ac17 |ln(10)
.long 0x3fff0000,0x80000000,0x00000000 |10 ^ 0
.global PTENRP
PTENRP:
.long 0x40020000,0xA0000000,0x00000000 |10 ^ 1
.long 0x40050000,0xC8000000,0x00000000 |10 ^ 2
.long 0x400C0000,0x9C400000,0x00000000 |10 ^ 4
.long 0x40190000,0xBEBC2000,0x00000000 |10 ^ 8
.long 0x40340000,0x8E1BC9BF,0x04000000 |10 ^ 16
.long 0x40690000,0x9DC5ADA8,0x2B70B59E |10 ^ 32
.long 0x40D30000,0xC2781F49,0xFFCFA6D6 |10 ^ 64
.long 0x41A80000,0x93BA47C9,0x80E98CE0 |10 ^ 128
.long 0x43510000,0xAA7EEBFB,0x9DF9DE8E |10 ^ 256
.long 0x46A30000,0xE319A0AE,0xA60E91C7 |10 ^ 512
.long 0x4D480000,0xC9767586,0x81750C18 |10 ^ 1024
.long 0x5A920000,0x9E8B3B5D,0xC53D5DE6 |10 ^ 2048
.long 0x75250000,0xC4605202,0x8A20979B |10 ^ 4096
|xref nrm_zero
|xref decbin
|xref round
.global get_op
.global uns_getop
.global uni_getop
get_op:
clrb DY_MO_FLG(%a6)
tstb UFLG_TMP(%a6) |test flag for unsupp/unimp state
beq uni_getop
uns_getop:
btstb #direction_bit,CMDREG1B(%a6)
bne opclass3 |branch if a fmove out (any kind)
btstb #6,CMDREG1B(%a6)
beqs uns_notpacked
bfextu CMDREG1B(%a6){#3:#3},%d0
cmpb #3,%d0
beq pack_source |check for a packed src op, branch if so
uns_notpacked:
bsr chk_dy_mo |set the dyadic/monadic flag
tstb DY_MO_FLG(%a6)
beqs src_op_ck |if monadic, go check src op
| ;else, check dst op (fall through)
btstb #7,DTAG(%a6)
beqs src_op_ck |if dst op is norm, check src op
bras dst_ex_dnrm |else, handle destination unnorm/dnrm
uni_getop:
bfextu CMDREG1B(%a6){#0:#6},%d0 |get opclass and src fields
cmpil #0x17,%d0 |if op class and size fields are $17,
| ;it is FMOVECR; if not, continue
|
| If the instruction is fmovecr, exit get_op. It is handled
| in do_func and smovecr.sa.
|
bne not_fmovecr |handle fmovecr as an unimplemented inst
rts
not_fmovecr:
btstb #E1,E_BYTE(%a6) |if set, there is a packed operand
bne pack_source |check for packed src op, branch if so
| The following lines of are coded to optimize on normalized operands
moveb STAG(%a6),%d0
orb DTAG(%a6),%d0 |check if either of STAG/DTAG msb set
bmis dest_op_ck |if so, some op needs to be fixed
rts
dest_op_ck:
btstb #7,DTAG(%a6) |check for unsupported data types in
beqs src_op_ck |the destination, if not, check src op
bsr chk_dy_mo |set dyadic/monadic flag
tstb DY_MO_FLG(%a6) |
beqs src_op_ck |if monadic, check src op
|
| At this point, destination has an extended denorm or unnorm.
|
dst_ex_dnrm:
movew FPTEMP_EX(%a6),%d0 |get destination exponent
andiw #0x7fff,%d0 |mask sign, check if exp = 0000
beqs src_op_ck |if denorm then check source op.
| ;denorms are taken care of in res_func
| ;(unsupp) or do_func (unimp)
| ;else unnorm fall through
leal FPTEMP(%a6),%a0 |point a0 to dop - used in mk_norm
bsr mk_norm |go normalize - mk_norm returns:
| ;L_SCR1{7:5} = operand tag
| ; (000 = norm, 100 = denorm)
| ;L_SCR1{4} = fpte15 or ete15
| ; 0 = exp > $3fff
| ; 1 = exp <= $3fff
| ;and puts the normalized num back
| ;on the fsave stack
|
moveb L_SCR1(%a6),DTAG(%a6) |write the new tag & fpte15
| ;to the fsave stack and fall
| ;through to check source operand
|
src_op_ck:
btstb #7,STAG(%a6)
beq end_getop |check for unsupported data types on the
| ;source operand
btstb #5,STAG(%a6)
bnes src_sd_dnrm |if bit 5 set, handle sgl/dbl denorms
|
| At this point only unnorms or extended denorms are possible.
|
src_ex_dnrm:
movew ETEMP_EX(%a6),%d0 |get source exponent
andiw #0x7fff,%d0 |mask sign, check if exp = 0000
beq end_getop |if denorm then exit, denorms are
| ;handled in do_func
leal ETEMP(%a6),%a0 |point a0 to sop - used in mk_norm
bsr mk_norm |go normalize - mk_norm returns:
| ;L_SCR1{7:5} = operand tag
| ; (000 = norm, 100 = denorm)
| ;L_SCR1{4} = fpte15 or ete15
| ; 0 = exp > $3fff
| ; 1 = exp <= $3fff
| ;and puts the normalized num back
| ;on the fsave stack
|
moveb L_SCR1(%a6),STAG(%a6) |write the new tag & ete15
rts |end_getop
|
| At this point, only single or double denorms are possible.
| If the inst is not fmove, normalize the source. If it is,
| do nothing to the input.
|
src_sd_dnrm:
btstb #4,CMDREG1B(%a6) |differentiate between sgl/dbl denorm
bnes is_double
is_single:
movew #0x3f81,%d1 |write bias for sgl denorm
bras common |goto the common code
is_double:
movew #0x3c01,%d1 |write the bias for a dbl denorm
common:
btstb #sign_bit,ETEMP_EX(%a6) |grab sign bit of mantissa
beqs pos
bset #15,%d1 |set sign bit because it is negative
pos:
movew %d1,ETEMP_EX(%a6)
| ;put exponent on stack
movew CMDREG1B(%a6),%d1
andw #0xe3ff,%d1 |clear out source specifier
orw #0x0800,%d1 |set source specifier to extended prec
movew %d1,CMDREG1B(%a6) |write back to the command word in stack
| ;this is needed to fix unsupp data stack
leal ETEMP(%a6),%a0 |point a0 to sop
bsr mk_norm |convert sgl/dbl denorm to norm
moveb L_SCR1(%a6),STAG(%a6) |put tag into source tag reg - d0
rts |end_getop
|
| At this point, the source is definitely packed, whether
| instruction is dyadic or monadic is still unknown
|
pack_source:
movel FPTEMP_LO(%a6),ETEMP(%a6) |write ms part of packed
| ;number to etemp slot
bsr chk_dy_mo |set dyadic/monadic flag
bsr unpack
tstb DY_MO_FLG(%a6)
beqs end_getop |if monadic, exit
| ;else, fix FPTEMP
pack_dya:
bfextu CMDREG1B(%a6){#6:#3},%d0 |extract dest fp reg
movel #7,%d1
subl %d0,%d1
clrl %d0
bsetl %d1,%d0 |set up d0 as a dynamic register mask
fmovemx %d0,FPTEMP(%a6) |write to FPTEMP
btstb #7,DTAG(%a6) |check dest tag for unnorm or denorm
bne dst_ex_dnrm |else, handle the unnorm or ext denorm
|
| Dest is not denormalized. Check for norm, and set fpte15
| accordingly.
|
moveb DTAG(%a6),%d0
andib #0xf0,%d0 |strip to only dtag:fpte15
tstb %d0 |check for normalized value
bnes end_getop |if inf/nan/zero leave get_op
movew FPTEMP_EX(%a6),%d0
andiw #0x7fff,%d0
cmpiw #0x3fff,%d0 |check if fpte15 needs setting
bges end_getop |if >= $3fff, leave fpte15=0
orb #0x10,DTAG(%a6)
bras end_getop
|
| At this point, it is either an fmoveout packed, unnorm or denorm
|
opclass3:
clrb DY_MO_FLG(%a6) |set dyadic/monadic flag to monadic
bfextu CMDREG1B(%a6){#4:#2},%d0
cmpib #3,%d0
bne src_ex_dnrm |if not equal, must be unnorm or denorm
| ;else it is a packed move out
| ;exit
end_getop:
rts
|
| Sets the DY_MO_FLG correctly. This is used only on if it is an
| unsupported data type exception. Set if dyadic.
|
chk_dy_mo:
movew CMDREG1B(%a6),%d0
btstl #5,%d0 |testing extension command word
beqs set_mon |if bit 5 = 0 then monadic
btstl #4,%d0 |know that bit 5 = 1
beqs set_dya |if bit 4 = 0 then dyadic
andiw #0x007f,%d0 |get rid of all but extension bits {6:0}
cmpiw #0x0038,%d0 |if extension = $38 then fcmp (dyadic)
bnes set_mon
set_dya:
st DY_MO_FLG(%a6) |set the inst flag type to dyadic
rts
set_mon:
clrb DY_MO_FLG(%a6) |set the inst flag type to monadic
rts
|
| MK_NORM
|
| Normalizes unnormalized numbers, sets tag to norm or denorm, sets unfl
| exception if denorm.
|
| CASE opclass 0x0 unsupp
| mk_norm till msb set
| set tag = norm
|
| CASE opclass 0x0 unimp
| mk_norm till msb set or exp = 0
| if integer bit = 0
| tag = denorm
| else
| tag = norm
|
| CASE opclass 011 unsupp
| mk_norm till msb set or exp = 0
| if integer bit = 0
| tag = denorm
| set unfl_nmcexe = 1
| else
| tag = norm
|
| if exp <= $3fff
| set ete15 or fpte15 = 1
| else set ete15 or fpte15 = 0
| input:
| a0 = points to operand to be normalized
| output:
| L_SCR1{7:5} = operand tag (000 = norm, 100 = denorm)
| L_SCR1{4} = fpte15 or ete15 (0 = exp > $3fff, 1 = exp <=$3fff)
| the normalized operand is placed back on the fsave stack
mk_norm:
clrl L_SCR1(%a6)
bclrb #sign_bit,LOCAL_EX(%a0)
sne LOCAL_SGN(%a0) |transform into internal extended format
cmpib #0x2c,1+EXC_VEC(%a6) |check if unimp
bnes uns_data |branch if unsupp
bsr uni_inst |call if unimp (opclass 0x0)
bras reload
uns_data:
btstb #direction_bit,CMDREG1B(%a6) |check transfer direction
bnes bit_set |branch if set (opclass 011)
bsr uns_opx |call if opclass 0x0
bras reload
bit_set:
bsr uns_op3 |opclass 011
reload:
cmpw #0x3fff,LOCAL_EX(%a0) |if exp > $3fff
bgts end_mk | fpte15/ete15 already set to 0
bsetb #4,L_SCR1(%a6) |else set fpte15/ete15 to 1
| ;calling routine actually sets the
| ;value on the stack (along with the
| ;tag), since this routine doesn't
| ;know if it should set ete15 or fpte15
| ;ie, it doesn't know if this is the
| ;src op or dest op.
end_mk:
bfclr LOCAL_SGN(%a0){#0:#8}
beqs end_mk_pos
bsetb #sign_bit,LOCAL_EX(%a0) |convert back to IEEE format
end_mk_pos:
rts
|
| CASE opclass 011 unsupp
|
uns_op3:
bsr nrm_zero |normalize till msb = 1 or exp = zero
btstb #7,LOCAL_HI(%a0) |if msb = 1
bnes no_unfl |then branch
set_unfl:
orw #dnrm_tag,L_SCR1(%a6) |set denorm tag
bsetb #unfl_bit,FPSR_EXCEPT(%a6) |set unfl exception bit
no_unfl:
rts
|
| CASE opclass 0x0 unsupp
|
uns_opx:
bsr nrm_zero |normalize the number
btstb #7,LOCAL_HI(%a0) |check if integer bit (j-bit) is set
beqs uns_den |if clear then now have a denorm
uns_nrm:
orb #norm_tag,L_SCR1(%a6) |set tag to norm
rts
uns_den:
orb #dnrm_tag,L_SCR1(%a6) |set tag to denorm
rts
|
| CASE opclass 0x0 unimp
|
uni_inst:
bsr nrm_zero
btstb #7,LOCAL_HI(%a0) |check if integer bit (j-bit) is set
beqs uni_den |if clear then now have a denorm
uni_nrm:
orb #norm_tag,L_SCR1(%a6) |set tag to norm
rts
uni_den:
orb #dnrm_tag,L_SCR1(%a6) |set tag to denorm
rts
|
| Decimal to binary conversion
|
| Special cases of inf and NaNs are completed outside of decbin.
| If the input is an snan, the snan bit is not set.
|
| input:
| ETEMP(a6) - points to packed decimal string in memory
| output:
| fp0 - contains packed string converted to extended precision
| ETEMP - same as fp0
unpack:
movew CMDREG1B(%a6),%d0 |examine command word, looking for fmove's
andw #0x3b,%d0
beq move_unpack |special handling for fmove: must set FPSR_CC
movew ETEMP(%a6),%d0 |get word with inf information
bfextu %d0{#20:#12},%d1 |get exponent into d1
cmpiw #0x0fff,%d1 |test for inf or NaN
bnes try_zero |if not equal, it is not special
bfextu %d0{#17:#3},%d1 |get SE and y bits into d1
cmpiw #7,%d1 |SE and y bits must be on for special
bnes try_zero |if not on, it is not special
|input is of the special cases of inf and NaN
tstl ETEMP_HI(%a6) |check ms mantissa
bnes fix_nan |if non-zero, it is a NaN
tstl ETEMP_LO(%a6) |check ls mantissa
bnes fix_nan |if non-zero, it is a NaN
bra finish |special already on stack
fix_nan:
btstb #signan_bit,ETEMP_HI(%a6) |test for snan
bne finish
orl #snaniop_mask,USER_FPSR(%a6) |always set snan if it is so
bra finish
try_zero:
movew ETEMP_EX+2(%a6),%d0 |get word 4
andiw #0x000f,%d0 |clear all but last ni(y)bble
tstw %d0 |check for zero.
bne not_spec
tstl ETEMP_HI(%a6) |check words 3 and 2
bne not_spec
tstl ETEMP_LO(%a6) |check words 1 and 0
bne not_spec
tstl ETEMP(%a6) |test sign of the zero
bges pos_zero
movel #0x80000000,ETEMP(%a6) |write neg zero to etemp
clrl ETEMP_HI(%a6)
clrl ETEMP_LO(%a6)
bra finish
pos_zero:
clrl ETEMP(%a6)
clrl ETEMP_HI(%a6)
clrl ETEMP_LO(%a6)
bra finish
not_spec:
fmovemx %fp0-%fp1,-(%a7) |save fp0 - decbin returns in it
bsr decbin
fmovex %fp0,ETEMP(%a6) |put the unpacked sop in the fsave stack
fmovemx (%a7)+,%fp0-%fp1
fmovel #0,%FPSR |clr fpsr from decbin
bra finish
|
| Special handling for packed move in: Same results as all other
| packed cases, but we must set the FPSR condition codes properly.
|
move_unpack:
movew ETEMP(%a6),%d0 |get word with inf information
bfextu %d0{#20:#12},%d1 |get exponent into d1
cmpiw #0x0fff,%d1 |test for inf or NaN
bnes mtry_zero |if not equal, it is not special
bfextu %d0{#17:#3},%d1 |get SE and y bits into d1
cmpiw #7,%d1 |SE and y bits must be on for special
bnes mtry_zero |if not on, it is not special
|input is of the special cases of inf and NaN
tstl ETEMP_HI(%a6) |check ms mantissa
bnes mfix_nan |if non-zero, it is a NaN
tstl ETEMP_LO(%a6) |check ls mantissa
bnes mfix_nan |if non-zero, it is a NaN
|input is inf
orl #inf_mask,USER_FPSR(%a6) |set I bit
tstl ETEMP(%a6) |check sign
bge finish
orl #neg_mask,USER_FPSR(%a6) |set N bit
bra finish |special already on stack
mfix_nan:
orl #nan_mask,USER_FPSR(%a6) |set NaN bit
moveb #nan_tag,STAG(%a6) |set stag to NaN
btstb #signan_bit,ETEMP_HI(%a6) |test for snan
bnes mn_snan
orl #snaniop_mask,USER_FPSR(%a6) |set snan bit
btstb #snan_bit,FPCR_ENABLE(%a6) |test for snan enabled
bnes mn_snan
bsetb #signan_bit,ETEMP_HI(%a6) |force snans to qnans
mn_snan:
tstl ETEMP(%a6) |check for sign
bge finish |if clr, go on
orl #neg_mask,USER_FPSR(%a6) |set N bit
bra finish
mtry_zero:
movew ETEMP_EX+2(%a6),%d0 |get word 4
andiw #0x000f,%d0 |clear all but last ni(y)bble
tstw %d0 |check for zero.
bnes mnot_spec
tstl ETEMP_HI(%a6) |check words 3 and 2
bnes mnot_spec
tstl ETEMP_LO(%a6) |check words 1 and 0
bnes mnot_spec
tstl ETEMP(%a6) |test sign of the zero
bges mpos_zero
orl #neg_mask+z_mask,USER_FPSR(%a6) |set N and Z
movel #0x80000000,ETEMP(%a6) |write neg zero to etemp
clrl ETEMP_HI(%a6)
clrl ETEMP_LO(%a6)
bras finish
mpos_zero:
orl #z_mask,USER_FPSR(%a6) |set Z
clrl ETEMP(%a6)
clrl ETEMP_HI(%a6)
clrl ETEMP_LO(%a6)
bras finish
mnot_spec:
fmovemx %fp0-%fp1,-(%a7) |save fp0 ,fp1 - decbin returns in fp0
bsr decbin
fmovex %fp0,ETEMP(%a6)
| ;put the unpacked sop in the fsave stack
fmovemx (%a7)+,%fp0-%fp1
finish:
movew CMDREG1B(%a6),%d0 |get the command word
andw #0xfbff,%d0 |change the source specifier field to
| ;extended (was packed).
movew %d0,CMDREG1B(%a6) |write command word back to fsave stack
| ;we need to do this so the 040 will
| ;re-execute the inst. without taking
| ;another packed trap.
fix_stag:
|Converted result is now in etemp on fsave stack, now set the source
|tag (stag)
| if (ete =$7fff) then INF or NAN
| if (etemp = $x.0----0) then
| stag = INF
| else
| stag = NAN
| else
| if (ete = $0000) then
| stag = ZERO
| else
| stag = NORM
|
| Note also that the etemp_15 bit (just right of the stag) must
| be set accordingly.
|
movew ETEMP_EX(%a6),%d1
andiw #0x7fff,%d1 |strip sign
cmpw #0x7fff,%d1
bnes z_or_nrm
movel ETEMP_HI(%a6),%d1
bnes is_nan
movel ETEMP_LO(%a6),%d1
bnes is_nan
is_inf:
moveb #0x40,STAG(%a6)
movel #0x40,%d0
rts
is_nan:
moveb #0x60,STAG(%a6)
movel #0x60,%d0
rts
z_or_nrm:
tstw %d1
bnes is_nrm
is_zro:
| For a zero, set etemp_15
moveb #0x30,STAG(%a6)
movel #0x20,%d0
rts
is_nrm:
| For a norm, check if the exp <= $3fff; if so, set etemp_15
cmpiw #0x3fff,%d1
bles set_bit15
moveb #0,STAG(%a6)
bras end_is_nrm
set_bit15:
moveb #0x10,STAG(%a6)
end_is_nrm:
movel #0,%d0
end_fix:
rts
end_get:
rts
|end
|
AirFortressIlikara/LS2K0300-linux-4.19
| 5,696
|
arch/m68k/fpsp040/sint.S
|
|
| sint.sa 3.1 12/10/90
|
| The entry point sINT computes the rounded integer
| equivalent of the input argument, sINTRZ computes
| the integer rounded to zero of the input argument.
|
| Entry points sint and sintrz are called from do_func
| to emulate the fint and fintrz unimplemented instructions,
| respectively. Entry point sintdo is used by bindec.
|
| Input: (Entry points sint and sintrz) Double-extended
| number X in the ETEMP space in the floating-point
| save stack.
| (Entry point sintdo) Double-extended number X in
| location pointed to by the address register a0.
| (Entry point sintd) Double-extended denormalized
| number X in the ETEMP space in the floating-point
| save stack.
|
| Output: The function returns int(X) or intrz(X) in fp0.
|
| Modifies: fp0.
|
| Algorithm: (sint and sintrz)
|
| 1. If exp(X) >= 63, return X.
| If exp(X) < 0, return +/- 0 or +/- 1, according to
| the rounding mode.
|
| 2. (X is in range) set rsc = 63 - exp(X). Unnormalize the
| result to the exponent $403e.
|
| 3. Round the result in the mode given in USER_FPCR. For
| sintrz, force round-to-zero mode.
|
| 4. Normalize the rounded result; store in fp0.
|
| For the denormalized cases, force the correct result
| for the given sign and rounding mode.
|
| Sign(X)
| RMODE + -
| ----- --------
| RN +0 -0
| RZ +0 -0
| RM +0 -1
| RP +1 -0
|
|
| Copyright (C) Motorola, Inc. 1990
| All Rights Reserved
|
| For details on the license for this file, please see the
| file, README, in this same directory.
|SINT idnt 2,1 | Motorola 040 Floating Point Software Package
|section 8
#include "fpsp.h"
|xref dnrm_lp
|xref nrm_set
|xref round
|xref t_inx2
|xref ld_pone
|xref ld_mone
|xref ld_pzero
|xref ld_mzero
|xref snzrinx
|
| FINT
|
.global sint
sint:
bfextu FPCR_MODE(%a6){#2:#2},%d1 |use user's mode for rounding
| ;implicitly has extend precision
| ;in upper word.
movel %d1,L_SCR1(%a6) |save mode bits
bras sintexc
|
| FINT with extended denorm inputs.
|
.global sintd
sintd:
btstb #5,FPCR_MODE(%a6)
beq snzrinx |if round nearest or round zero, +/- 0
btstb #4,FPCR_MODE(%a6)
beqs rnd_mns
rnd_pls:
btstb #sign_bit,LOCAL_EX(%a0)
bnes sintmz
bsr ld_pone |if round plus inf and pos, answer is +1
bra t_inx2
rnd_mns:
btstb #sign_bit,LOCAL_EX(%a0)
beqs sintpz
bsr ld_mone |if round mns inf and neg, answer is -1
bra t_inx2
sintpz:
bsr ld_pzero
bra t_inx2
sintmz:
bsr ld_mzero
bra t_inx2
|
| FINTRZ
|
.global sintrz
sintrz:
movel #1,L_SCR1(%a6) |use rz mode for rounding
| ;implicitly has extend precision
| ;in upper word.
bras sintexc
|
| SINTDO
|
| Input: a0 points to an IEEE extended format operand
| Output: fp0 has the result
|
| Exceptions:
|
| If the subroutine results in an inexact operation, the inx2 and
| ainx bits in the USER_FPSR are set.
|
|
.global sintdo
sintdo:
bfextu FPCR_MODE(%a6){#2:#2},%d1 |use user's mode for rounding
| ;implicitly has ext precision
| ;in upper word.
movel %d1,L_SCR1(%a6) |save mode bits
|
| Real work of sint is in sintexc
|
sintexc:
bclrb #sign_bit,LOCAL_EX(%a0) |convert to internal extended
| ;format
sne LOCAL_SGN(%a0)
cmpw #0x403e,LOCAL_EX(%a0) |check if (unbiased) exp > 63
bgts out_rnge |branch if exp < 63
cmpw #0x3ffd,LOCAL_EX(%a0) |check if (unbiased) exp < 0
bgt in_rnge |if 63 >= exp > 0, do calc
|
| Input is less than zero. Restore sign, and check for directed
| rounding modes. L_SCR1 contains the rmode in the lower byte.
|
un_rnge:
btstb #1,L_SCR1+3(%a6) |check for rn and rz
beqs un_rnrz
tstb LOCAL_SGN(%a0) |check for sign
bnes un_rmrp_neg
|
| Sign is +. If rp, load +1.0, if rm, load +0.0
|
cmpib #3,L_SCR1+3(%a6) |check for rp
beqs un_ldpone |if rp, load +1.0
bsr ld_pzero |if rm, load +0.0
bra t_inx2
un_ldpone:
bsr ld_pone
bra t_inx2
|
| Sign is -. If rm, load -1.0, if rp, load -0.0
|
un_rmrp_neg:
cmpib #2,L_SCR1+3(%a6) |check for rm
beqs un_ldmone |if rm, load -1.0
bsr ld_mzero |if rp, load -0.0
bra t_inx2
un_ldmone:
bsr ld_mone
bra t_inx2
|
| Rmode is rn or rz; return signed zero
|
un_rnrz:
tstb LOCAL_SGN(%a0) |check for sign
bnes un_rnrz_neg
bsr ld_pzero
bra t_inx2
un_rnrz_neg:
bsr ld_mzero
bra t_inx2
|
| Input is greater than 2^63. All bits are significant. Return
| the input.
|
out_rnge:
bfclr LOCAL_SGN(%a0){#0:#8} |change back to IEEE ext format
beqs intps
bsetb #sign_bit,LOCAL_EX(%a0)
intps:
fmovel %fpcr,-(%sp)
fmovel #0,%fpcr
fmovex LOCAL_EX(%a0),%fp0 |if exp > 63
| ;then return X to the user
| ;there are no fraction bits
fmovel (%sp)+,%fpcr
rts
in_rnge:
| ;shift off fraction bits
clrl %d0 |clear d0 - initial g,r,s for
| ;dnrm_lp
movel #0x403e,%d1 |set threshold for dnrm_lp
| ;assumes a0 points to operand
bsr dnrm_lp
| ;returns unnormalized number
| ;pointed by a0
| ;output d0 supplies g,r,s
| ;used by round
movel L_SCR1(%a6),%d1 |use selected rounding mode
|
|
bsr round |round the unnorm based on users
| ;input a0 ptr to ext X
| ; d0 g,r,s bits
| ; d1 PREC/MODE info
| ;output a0 ptr to rounded result
| ;inexact flag set in USER_FPSR
| ;if initial grs set
|
| normalize the rounded result and store value in fp0
|
bsr nrm_set |normalize the unnorm
| ;Input: a0 points to operand to
| ;be normalized
| ;Output: a0 points to normalized
| ;result
bfclr LOCAL_SGN(%a0){#0:#8}
beqs nrmrndp
bsetb #sign_bit,LOCAL_EX(%a0) |return to IEEE extended format
nrmrndp:
fmovel %fpcr,-(%sp)
fmovel #0,%fpcr
fmovex LOCAL_EX(%a0),%fp0 |move result to fp0
fmovel (%sp)+,%fpcr
rts
|end
|
AirFortressIlikara/LS2K0300-linux-4.19
| 3,410
|
arch/m68k/fpsp040/ssinh.S
|
|
| ssinh.sa 3.1 12/10/90
|
| The entry point sSinh computes the hyperbolic sine of
| an input argument; sSinhd does the same except for denormalized
| input.
|
| Input: Double-extended number X in location pointed to
| by address register a0.
|
| Output: The value sinh(X) returned in floating-point register Fp0.
|
| Accuracy and Monotonicity: The returned result is within 3 ulps in
| 64 significant bit, i.e. within 0.5001 ulp to 53 bits if the
| result is subsequently rounded to double precision. The
| result is provably monotonic in double precision.
|
| Speed: The program sSINH takes approximately 280 cycles.
|
| Algorithm:
|
| SINH
| 1. If |X| > 16380 log2, go to 3.
|
| 2. (|X| <= 16380 log2) Sinh(X) is obtained by the formulae
| y = |X|, sgn = sign(X), and z = expm1(Y),
| sinh(X) = sgn*(1/2)*( z + z/(1+z) ).
| Exit.
|
| 3. If |X| > 16480 log2, go to 5.
|
| 4. (16380 log2 < |X| <= 16480 log2)
| sinh(X) = sign(X) * exp(|X|)/2.
| However, invoking exp(|X|) may cause premature overflow.
| Thus, we calculate sinh(X) as follows:
| Y := |X|
| sgn := sign(X)
| sgnFact := sgn * 2**(16380)
| Y' := Y - 16381 log2
| sinh(X) := sgnFact * exp(Y').
| Exit.
|
| 5. (|X| > 16480 log2) sinh(X) must overflow. Return
| sign(X)*Huge*Huge to generate overflow and an infinity with
| the appropriate sign. Huge is the largest finite number in
| extended format. Exit.
|
| Copyright (C) Motorola, Inc. 1990
| All Rights Reserved
|
| For details on the license for this file, please see the
| file, README, in this same directory.
|SSINH idnt 2,1 | Motorola 040 Floating Point Software Package
|section 8
T1: .long 0x40C62D38,0xD3D64634 | ... 16381 LOG2 LEAD
T2: .long 0x3D6F90AE,0xB1E75CC7 | ... 16381 LOG2 TRAIL
|xref t_frcinx
|xref t_ovfl
|xref t_extdnrm
|xref setox
|xref setoxm1
.global ssinhd
ssinhd:
|--SINH(X) = X FOR DENORMALIZED X
bra t_extdnrm
.global ssinh
ssinh:
fmovex (%a0),%fp0 | ...LOAD INPUT
movel (%a0),%d0
movew 4(%a0),%d0
movel %d0,%a1 | save a copy of original (compacted) operand
andl #0x7FFFFFFF,%d0
cmpl #0x400CB167,%d0
bgts SINHBIG
|--THIS IS THE USUAL CASE, |X| < 16380 LOG2
|--Y = |X|, Z = EXPM1(Y), SINH(X) = SIGN(X)*(1/2)*( Z + Z/(1+Z) )
fabsx %fp0 | ...Y = |X|
moveml %a1/%d1,-(%sp)
fmovemx %fp0-%fp0,(%a0)
clrl %d1
bsr setoxm1 | ...FP0 IS Z = EXPM1(Y)
fmovel #0,%fpcr
moveml (%sp)+,%a1/%d1
fmovex %fp0,%fp1
fadds #0x3F800000,%fp1 | ...1+Z
fmovex %fp0,-(%sp)
fdivx %fp1,%fp0 | ...Z/(1+Z)
movel %a1,%d0
andl #0x80000000,%d0
orl #0x3F000000,%d0
faddx (%sp)+,%fp0
movel %d0,-(%sp)
fmovel %d1,%fpcr
fmuls (%sp)+,%fp0 |last fp inst - possible exceptions set
bra t_frcinx
SINHBIG:
cmpl #0x400CB2B3,%d0
bgt t_ovfl
fabsx %fp0
fsubd T1(%pc),%fp0 | ...(|X|-16381LOG2_LEAD)
movel #0,-(%sp)
movel #0x80000000,-(%sp)
movel %a1,%d0
andl #0x80000000,%d0
orl #0x7FFB0000,%d0
movel %d0,-(%sp) | ...EXTENDED FMT
fsubd T2(%pc),%fp0 | ...|X| - 16381 LOG2, ACCURATE
movel %d1,-(%sp)
clrl %d1
fmovemx %fp0-%fp0,(%a0)
bsr setox
fmovel (%sp)+,%fpcr
fmulx (%sp)+,%fp0 |possible exception
bra t_frcinx
|end
|
AirFortressIlikara/LS2K0300-linux-4.19
| 11,802
|
arch/m68k/fpsp040/skeleton.S
|
|
| skeleton.sa 3.2 4/26/91
|
| This file contains code that is system dependent and will
| need to be modified to install the FPSP.
|
| Each entry point for exception 'xxxx' begins with a 'jmp fpsp_xxxx'.
| Put any target system specific handling that must be done immediately
| before the jump instruction. If there no handling necessary, then
| the 'fpsp_xxxx' handler entry point should be placed in the exception
| table so that the 'jmp' can be eliminated. If the FPSP determines that the
| exception is one that must be reported then there will be a
| return from the package by a 'jmp real_xxxx'. At that point
| the machine state will be identical to the state before
| the FPSP was entered. In particular, whatever condition
| that caused the exception will still be pending when the FPSP
| package returns. Thus, there will be system specific code
| to handle the exception.
|
| If the exception was completely handled by the package, then
| the return will be via a 'jmp fpsp_done'. Unless there is
| OS specific work to be done (such as handling a context switch or
| interrupt) the user program can be resumed via 'rte'.
|
| In the following skeleton code, some typical 'real_xxxx' handling
| code is shown. This code may need to be moved to an appropriate
| place in the target system, or rewritten.
|
| Copyright (C) Motorola, Inc. 1990
| All Rights Reserved
|
| For details on the license for this file, please see the
| file, README, in this same directory.
|
| Modified for Linux-1.3.x by Jes Sorensen (jds@kom.auc.dk)
|
#include <linux/linkage.h>
#include <asm/entry.h>
#include <asm/asm-offsets.h>
|SKELETON idnt 2,1 | Motorola 040 Floating Point Software Package
|section 15
|
| The following counters are used for standalone testing
|
|section 8
#include "fpsp.h"
|xref b1238_fix
|
| Divide by Zero exception
|
| All dz exceptions are 'real', hence no fpsp_dz entry point.
|
.global dz
.global real_dz
dz:
real_dz:
link %a6,#-LOCAL_SIZE
fsave -(%sp)
bclrb #E1,E_BYTE(%a6)
frestore (%sp)+
unlk %a6
SAVE_ALL_INT
GET_CURRENT(%d0)
movel %sp,%sp@- | stack frame pointer argument
bsrl trap_c
addql #4,%sp
bral ret_from_exception
|
| Inexact exception
|
| All inexact exceptions are real, but the 'real' handler
| will probably want to clear the pending exception.
| The provided code will clear the E3 exception (if pending),
| otherwise clear the E1 exception. The frestore is not really
| necessary for E1 exceptions.
|
| Code following the 'inex' label is to handle bug #1232. In this
| bug, if an E1 snan, ovfl, or unfl occurred, and the process was
| swapped out before taking the exception, the exception taken on
| return was inex, rather than the correct exception. The snan, ovfl,
| and unfl exception to be taken must not have been enabled. The
| fix is to check for E1, and the existence of one of snan, ovfl,
| or unfl bits set in the fpsr. If any of these are set, branch
| to the appropriate handler for the exception in the fpsr. Note
| that this fix is only for d43b parts, and is skipped if the
| version number is not $40.
|
|
.global real_inex
.global inex
inex:
link %a6,#-LOCAL_SIZE
fsave -(%sp)
cmpib #VER_40,(%sp) |test version number
bnes not_fmt40
fmovel %fpsr,-(%sp)
btstb #E1,E_BYTE(%a6) |test for E1 set
beqs not_b1232
btstb #snan_bit,2(%sp) |test for snan
beq inex_ckofl
addl #4,%sp
frestore (%sp)+
unlk %a6
bra snan
inex_ckofl:
btstb #ovfl_bit,2(%sp) |test for ovfl
beq inex_ckufl
addl #4,%sp
frestore (%sp)+
unlk %a6
bra ovfl
inex_ckufl:
btstb #unfl_bit,2(%sp) |test for unfl
beq not_b1232
addl #4,%sp
frestore (%sp)+
unlk %a6
bra unfl
|
| We do not have the bug 1232 case. Clean up the stack and call
| real_inex.
|
not_b1232:
addl #4,%sp
frestore (%sp)+
unlk %a6
real_inex:
link %a6,#-LOCAL_SIZE
fsave -(%sp)
not_fmt40:
bclrb #E3,E_BYTE(%a6) |clear and test E3 flag
beqs inex_cke1
|
| Clear dirty bit on dest resister in the frame before branching
| to b1238_fix.
|
moveml %d0/%d1,USER_DA(%a6)
bfextu CMDREG1B(%a6){#6:#3},%d0 |get dest reg no
bclrb %d0,FPR_DIRTY_BITS(%a6) |clr dest dirty bit
bsrl b1238_fix |test for bug1238 case
moveml USER_DA(%a6),%d0/%d1
bras inex_done
inex_cke1:
bclrb #E1,E_BYTE(%a6)
inex_done:
frestore (%sp)+
unlk %a6
SAVE_ALL_INT
GET_CURRENT(%d0)
movel %sp,%sp@- | stack frame pointer argument
bsrl trap_c
addql #4,%sp
bral ret_from_exception
|
| Overflow exception
|
|xref fpsp_ovfl
.global real_ovfl
.global ovfl
ovfl:
jmp fpsp_ovfl
real_ovfl:
link %a6,#-LOCAL_SIZE
fsave -(%sp)
bclrb #E3,E_BYTE(%a6) |clear and test E3 flag
bnes ovfl_done
bclrb #E1,E_BYTE(%a6)
ovfl_done:
frestore (%sp)+
unlk %a6
SAVE_ALL_INT
GET_CURRENT(%d0)
movel %sp,%sp@- | stack frame pointer argument
bsrl trap_c
addql #4,%sp
bral ret_from_exception
|
| Underflow exception
|
|xref fpsp_unfl
.global real_unfl
.global unfl
unfl:
jmp fpsp_unfl
real_unfl:
link %a6,#-LOCAL_SIZE
fsave -(%sp)
bclrb #E3,E_BYTE(%a6) |clear and test E3 flag
bnes unfl_done
bclrb #E1,E_BYTE(%a6)
unfl_done:
frestore (%sp)+
unlk %a6
SAVE_ALL_INT
GET_CURRENT(%d0)
movel %sp,%sp@- | stack frame pointer argument
bsrl trap_c
addql #4,%sp
bral ret_from_exception
|
| Signalling NAN exception
|
|xref fpsp_snan
.global real_snan
.global snan
snan:
jmp fpsp_snan
real_snan:
link %a6,#-LOCAL_SIZE
fsave -(%sp)
bclrb #E1,E_BYTE(%a6) |snan is always an E1 exception
frestore (%sp)+
unlk %a6
SAVE_ALL_INT
GET_CURRENT(%d0)
movel %sp,%sp@- | stack frame pointer argument
bsrl trap_c
addql #4,%sp
bral ret_from_exception
|
| Operand Error exception
|
|xref fpsp_operr
.global real_operr
.global operr
operr:
jmp fpsp_operr
real_operr:
link %a6,#-LOCAL_SIZE
fsave -(%sp)
bclrb #E1,E_BYTE(%a6) |operr is always an E1 exception
frestore (%sp)+
unlk %a6
SAVE_ALL_INT
GET_CURRENT(%d0)
movel %sp,%sp@- | stack frame pointer argument
bsrl trap_c
addql #4,%sp
bral ret_from_exception
|
| BSUN exception
|
| This sample handler simply clears the nan bit in the FPSR.
|
|xref fpsp_bsun
.global real_bsun
.global bsun
bsun:
jmp fpsp_bsun
real_bsun:
link %a6,#-LOCAL_SIZE
fsave -(%sp)
bclrb #E1,E_BYTE(%a6) |bsun is always an E1 exception
fmovel %FPSR,-(%sp)
bclrb #nan_bit,(%sp)
fmovel (%sp)+,%FPSR
frestore (%sp)+
unlk %a6
SAVE_ALL_INT
GET_CURRENT(%d0)
movel %sp,%sp@- | stack frame pointer argument
bsrl trap_c
addql #4,%sp
bral ret_from_exception
|
| F-line exception
|
| A 'real' F-line exception is one that the FPSP isn't supposed to
| handle. E.g. an instruction with a co-processor ID that is not 1.
|
|
|xref fpsp_fline
.global real_fline
.global fline
fline:
jmp fpsp_fline
real_fline:
SAVE_ALL_INT
GET_CURRENT(%d0)
movel %sp,%sp@- | stack frame pointer argument
bsrl trap_c
addql #4,%sp
bral ret_from_exception
|
| Unsupported data type exception
|
|xref fpsp_unsupp
.global real_unsupp
.global unsupp
unsupp:
jmp fpsp_unsupp
real_unsupp:
link %a6,#-LOCAL_SIZE
fsave -(%sp)
bclrb #E1,E_BYTE(%a6) |unsupp is always an E1 exception
frestore (%sp)+
unlk %a6
SAVE_ALL_INT
GET_CURRENT(%d0)
movel %sp,%sp@- | stack frame pointer argument
bsrl trap_c
addql #4,%sp
bral ret_from_exception
|
| Trace exception
|
.global real_trace
real_trace:
|
bral trap
|
| fpsp_fmt_error --- exit point for frame format error
|
| The fpu stack frame does not match the frames existing
| or planned at the time of this writing. The fpsp is
| unable to handle frame sizes not in the following
| version:size pairs:
|
| {4060, 4160} - busy frame
| {4028, 4130} - unimp frame
| {4000, 4100} - idle frame
|
| This entry point simply holds an f-line illegal value.
| Replace this with a call to your kernel panic code or
| code to handle future revisions of the fpu.
|
.global fpsp_fmt_error
fpsp_fmt_error:
.long 0xf27f0000 |f-line illegal
|
| fpsp_done --- FPSP exit point
|
| The exception has been handled by the package and we are ready
| to return to user mode, but there may be OS specific code
| to execute before we do. If there is, do it now.
|
|
.global fpsp_done
fpsp_done:
btst #0x5,%sp@ | supervisor bit set in saved SR?
beq .Lnotkern
rte
.Lnotkern:
SAVE_ALL_INT
GET_CURRENT(%d0)
| deliver signals, reschedule etc..
jra ret_from_exception
|
| mem_write --- write to user or supervisor address space
|
| Writes to memory while in supervisor mode. copyout accomplishes
| this via a 'moves' instruction. copyout is a UNIX SVR3 (and later) function.
| If you don't have copyout, use the local copy of the function below.
|
| a0 - supervisor source address
| a1 - user destination address
| d0 - number of bytes to write (maximum count is 12)
|
| The supervisor source address is guaranteed to point into the supervisor
| stack. The result is that a UNIX
| process is allowed to sleep as a consequence of a page fault during
| copyout. The probability of a page fault is exceedingly small because
| the 68040 always reads the destination address and thus the page
| faults should have already been handled.
|
| If the EXC_SR shows that the exception was from supervisor space,
| then just do a dumb (and slow) memory move. In a UNIX environment
| there shouldn't be any supervisor mode floating point exceptions.
|
.global mem_write
mem_write:
btstb #5,EXC_SR(%a6) |check for supervisor state
beqs user_write
super_write:
moveb (%a0)+,(%a1)+
subql #1,%d0
bnes super_write
rts
user_write:
movel %d1,-(%sp) |preserve d1 just in case
movel %d0,-(%sp)
movel %a1,-(%sp)
movel %a0,-(%sp)
jsr copyout
addw #12,%sp
movel (%sp)+,%d1
rts
|
| mem_read --- read from user or supervisor address space
|
| Reads from memory while in supervisor mode. copyin accomplishes
| this via a 'moves' instruction. copyin is a UNIX SVR3 (and later) function.
| If you don't have copyin, use the local copy of the function below.
|
| The FPSP calls mem_read to read the original F-line instruction in order
| to extract the data register number when the 'Dn' addressing mode is
| used.
|
|Input:
| a0 - user source address
| a1 - supervisor destination address
| d0 - number of bytes to read (maximum count is 12)
|
| Like mem_write, mem_read always reads with a supervisor
| destination address on the supervisor stack. Also like mem_write,
| the EXC_SR is checked and a simple memory copy is done if reading
| from supervisor space is indicated.
|
.global mem_read
mem_read:
btstb #5,EXC_SR(%a6) |check for supervisor state
beqs user_read
super_read:
moveb (%a0)+,(%a1)+
subql #1,%d0
bnes super_read
rts
user_read:
movel %d1,-(%sp) |preserve d1 just in case
movel %d0,-(%sp)
movel %a1,-(%sp)
movel %a0,-(%sp)
jsr copyin
addw #12,%sp
movel (%sp)+,%d1
rts
|
| Use these routines if your kernel doesn't have copyout/copyin equivalents.
| Assumes that D0/D1/A0/A1 are scratch registers. copyout overwrites DFC,
| and copyin overwrites SFC.
|
copyout:
movel 4(%sp),%a0 | source
movel 8(%sp),%a1 | destination
movel 12(%sp),%d0 | count
subl #1,%d0 | dec count by 1 for dbra
movel #1,%d1
| DFC is already set
| movec %d1,%DFC | set dfc for user data space
moreout:
moveb (%a0)+,%d1 | fetch supervisor byte
out_ea:
movesb %d1,(%a1)+ | write user byte
dbf %d0,moreout
rts
copyin:
movel 4(%sp),%a0 | source
movel 8(%sp),%a1 | destination
movel 12(%sp),%d0 | count
subl #1,%d0 | dec count by 1 for dbra
movel #1,%d1
| SFC is already set
| movec %d1,%SFC | set sfc for user space
morein:
in_ea:
movesb (%a0)+,%d1 | fetch user byte
moveb %d1,(%a1)+ | write supervisor byte
dbf %d0,morein
rts
.section .fixup,#alloc,#execinstr
.even
1:
jbra fpsp040_die
.section __ex_table,#alloc
.align 4
.long in_ea,1b
.long out_ea,1b
|end
|
AirFortressIlikara/LS2K0300-linux-4.19
| 12,287
|
arch/m68k/fpsp040/srem_mod.S
|
|
| srem_mod.sa 3.1 12/10/90
|
| The entry point sMOD computes the floating point MOD of the
| input values X and Y. The entry point sREM computes the floating
| point (IEEE) REM of the input values X and Y.
|
| INPUT
| -----
| Double-extended value Y is pointed to by address in register
| A0. Double-extended value X is located in -12(A0). The values
| of X and Y are both nonzero and finite; although either or both
| of them can be denormalized. The special cases of zeros, NaNs,
| and infinities are handled elsewhere.
|
| OUTPUT
| ------
| FREM(X,Y) or FMOD(X,Y), depending on entry point.
|
| ALGORITHM
| ---------
|
| Step 1. Save and strip signs of X and Y: signX := sign(X),
| signY := sign(Y), X := |X|, Y := |Y|,
| signQ := signX EOR signY. Record whether MOD or REM
| is requested.
|
| Step 2. Set L := expo(X)-expo(Y), k := 0, Q := 0.
| If (L < 0) then
| R := X, go to Step 4.
| else
| R := 2^(-L)X, j := L.
| endif
|
| Step 3. Perform MOD(X,Y)
| 3.1 If R = Y, go to Step 9.
| 3.2 If R > Y, then { R := R - Y, Q := Q + 1}
| 3.3 If j = 0, go to Step 4.
| 3.4 k := k + 1, j := j - 1, Q := 2Q, R := 2R. Go to
| Step 3.1.
|
| Step 4. At this point, R = X - QY = MOD(X,Y). Set
| Last_Subtract := false (used in Step 7 below). If
| MOD is requested, go to Step 6.
|
| Step 5. R = MOD(X,Y), but REM(X,Y) is requested.
| 5.1 If R < Y/2, then R = MOD(X,Y) = REM(X,Y). Go to
| Step 6.
| 5.2 If R > Y/2, then { set Last_Subtract := true,
| Q := Q + 1, Y := signY*Y }. Go to Step 6.
| 5.3 This is the tricky case of R = Y/2. If Q is odd,
| then { Q := Q + 1, signX := -signX }.
|
| Step 6. R := signX*R.
|
| Step 7. If Last_Subtract = true, R := R - Y.
|
| Step 8. Return signQ, last 7 bits of Q, and R as required.
|
| Step 9. At this point, R = 2^(-j)*X - Q Y = Y. Thus,
| X = 2^(j)*(Q+1)Y. set Q := 2^(j)*(Q+1),
| R := 0. Return signQ, last 7 bits of Q, and R.
|
|
| Copyright (C) Motorola, Inc. 1990
| All Rights Reserved
|
| For details on the license for this file, please see the
| file, README, in this same directory.
SREM_MOD: |idnt 2,1 | Motorola 040 Floating Point Software Package
|section 8
#include "fpsp.h"
.set Mod_Flag,L_SCR3
.set SignY,FP_SCR3+4
.set SignX,FP_SCR3+8
.set SignQ,FP_SCR3+12
.set Sc_Flag,FP_SCR4
.set Y,FP_SCR1
.set Y_Hi,Y+4
.set Y_Lo,Y+8
.set R,FP_SCR2
.set R_Hi,R+4
.set R_Lo,R+8
Scale: .long 0x00010000,0x80000000,0x00000000,0x00000000
|xref t_avoid_unsupp
.global smod
smod:
movel #0,Mod_Flag(%a6)
bras Mod_Rem
.global srem
srem:
movel #1,Mod_Flag(%a6)
Mod_Rem:
|..Save sign of X and Y
moveml %d2-%d7,-(%a7) | ...save data registers
movew (%a0),%d3
movew %d3,SignY(%a6)
andil #0x00007FFF,%d3 | ...Y := |Y|
|
movel 4(%a0),%d4
movel 8(%a0),%d5 | ...(D3,D4,D5) is |Y|
tstl %d3
bnes Y_Normal
movel #0x00003FFE,%d3 | ...$3FFD + 1
tstl %d4
bnes HiY_not0
HiY_0:
movel %d5,%d4
clrl %d5
subil #32,%d3
clrl %d6
bfffo %d4{#0:#32},%d6
lsll %d6,%d4
subl %d6,%d3 | ...(D3,D4,D5) is normalized
| ...with bias $7FFD
bras Chk_X
HiY_not0:
clrl %d6
bfffo %d4{#0:#32},%d6
subl %d6,%d3
lsll %d6,%d4
movel %d5,%d7 | ...a copy of D5
lsll %d6,%d5
negl %d6
addil #32,%d6
lsrl %d6,%d7
orl %d7,%d4 | ...(D3,D4,D5) normalized
| ...with bias $7FFD
bras Chk_X
Y_Normal:
addil #0x00003FFE,%d3 | ...(D3,D4,D5) normalized
| ...with bias $7FFD
Chk_X:
movew -12(%a0),%d0
movew %d0,SignX(%a6)
movew SignY(%a6),%d1
eorl %d0,%d1
andil #0x00008000,%d1
movew %d1,SignQ(%a6) | ...sign(Q) obtained
andil #0x00007FFF,%d0
movel -8(%a0),%d1
movel -4(%a0),%d2 | ...(D0,D1,D2) is |X|
tstl %d0
bnes X_Normal
movel #0x00003FFE,%d0
tstl %d1
bnes HiX_not0
HiX_0:
movel %d2,%d1
clrl %d2
subil #32,%d0
clrl %d6
bfffo %d1{#0:#32},%d6
lsll %d6,%d1
subl %d6,%d0 | ...(D0,D1,D2) is normalized
| ...with bias $7FFD
bras Init
HiX_not0:
clrl %d6
bfffo %d1{#0:#32},%d6
subl %d6,%d0
lsll %d6,%d1
movel %d2,%d7 | ...a copy of D2
lsll %d6,%d2
negl %d6
addil #32,%d6
lsrl %d6,%d7
orl %d7,%d1 | ...(D0,D1,D2) normalized
| ...with bias $7FFD
bras Init
X_Normal:
addil #0x00003FFE,%d0 | ...(D0,D1,D2) normalized
| ...with bias $7FFD
Init:
|
movel %d3,L_SCR1(%a6) | ...save biased expo(Y)
movel %d0,L_SCR2(%a6) |save d0
subl %d3,%d0 | ...L := expo(X)-expo(Y)
| Move.L D0,L ...D0 is j
clrl %d6 | ...D6 := carry <- 0
clrl %d3 | ...D3 is Q
moveal #0,%a1 | ...A1 is k; j+k=L, Q=0
|..(Carry,D1,D2) is R
tstl %d0
bges Mod_Loop
|..expo(X) < expo(Y). Thus X = mod(X,Y)
|
movel L_SCR2(%a6),%d0 |restore d0
bra Get_Mod
|..At this point R = 2^(-L)X; Q = 0; k = 0; and k+j = L
Mod_Loop:
tstl %d6 | ...test carry bit
bgts R_GT_Y
|..At this point carry = 0, R = (D1,D2), Y = (D4,D5)
cmpl %d4,%d1 | ...compare hi(R) and hi(Y)
bnes R_NE_Y
cmpl %d5,%d2 | ...compare lo(R) and lo(Y)
bnes R_NE_Y
|..At this point, R = Y
bra Rem_is_0
R_NE_Y:
|..use the borrow of the previous compare
bcss R_LT_Y | ...borrow is set iff R < Y
R_GT_Y:
|..If Carry is set, then Y < (Carry,D1,D2) < 2Y. Otherwise, Carry = 0
|..and Y < (D1,D2) < 2Y. Either way, perform R - Y
subl %d5,%d2 | ...lo(R) - lo(Y)
subxl %d4,%d1 | ...hi(R) - hi(Y)
clrl %d6 | ...clear carry
addql #1,%d3 | ...Q := Q + 1
R_LT_Y:
|..At this point, Carry=0, R < Y. R = 2^(k-L)X - QY; k+j = L; j >= 0.
tstl %d0 | ...see if j = 0.
beqs PostLoop
addl %d3,%d3 | ...Q := 2Q
addl %d2,%d2 | ...lo(R) = 2lo(R)
roxll #1,%d1 | ...hi(R) = 2hi(R) + carry
scs %d6 | ...set Carry if 2(R) overflows
addql #1,%a1 | ...k := k+1
subql #1,%d0 | ...j := j - 1
|..At this point, R=(Carry,D1,D2) = 2^(k-L)X - QY, j+k=L, j >= 0, R < 2Y.
bras Mod_Loop
PostLoop:
|..k = L, j = 0, Carry = 0, R = (D1,D2) = X - QY, R < Y.
|..normalize R.
movel L_SCR1(%a6),%d0 | ...new biased expo of R
tstl %d1
bnes HiR_not0
HiR_0:
movel %d2,%d1
clrl %d2
subil #32,%d0
clrl %d6
bfffo %d1{#0:#32},%d6
lsll %d6,%d1
subl %d6,%d0 | ...(D0,D1,D2) is normalized
| ...with bias $7FFD
bras Get_Mod
HiR_not0:
clrl %d6
bfffo %d1{#0:#32},%d6
bmis Get_Mod | ...already normalized
subl %d6,%d0
lsll %d6,%d1
movel %d2,%d7 | ...a copy of D2
lsll %d6,%d2
negl %d6
addil #32,%d6
lsrl %d6,%d7
orl %d7,%d1 | ...(D0,D1,D2) normalized
|
Get_Mod:
cmpil #0x000041FE,%d0
bges No_Scale
Do_Scale:
movew %d0,R(%a6)
clrw R+2(%a6)
movel %d1,R_Hi(%a6)
movel %d2,R_Lo(%a6)
movel L_SCR1(%a6),%d6
movew %d6,Y(%a6)
clrw Y+2(%a6)
movel %d4,Y_Hi(%a6)
movel %d5,Y_Lo(%a6)
fmovex R(%a6),%fp0 | ...no exception
movel #1,Sc_Flag(%a6)
bras ModOrRem
No_Scale:
movel %d1,R_Hi(%a6)
movel %d2,R_Lo(%a6)
subil #0x3FFE,%d0
movew %d0,R(%a6)
clrw R+2(%a6)
movel L_SCR1(%a6),%d6
subil #0x3FFE,%d6
movel %d6,L_SCR1(%a6)
fmovex R(%a6),%fp0
movew %d6,Y(%a6)
movel %d4,Y_Hi(%a6)
movel %d5,Y_Lo(%a6)
movel #0,Sc_Flag(%a6)
|
ModOrRem:
movel Mod_Flag(%a6),%d6
beqs Fix_Sign
movel L_SCR1(%a6),%d6 | ...new biased expo(Y)
subql #1,%d6 | ...biased expo(Y/2)
cmpl %d6,%d0
blts Fix_Sign
bgts Last_Sub
cmpl %d4,%d1
bnes Not_EQ
cmpl %d5,%d2
bnes Not_EQ
bra Tie_Case
Not_EQ:
bcss Fix_Sign
Last_Sub:
|
fsubx Y(%a6),%fp0 | ...no exceptions
addql #1,%d3 | ...Q := Q + 1
|
Fix_Sign:
|..Get sign of X
movew SignX(%a6),%d6
bges Get_Q
fnegx %fp0
|..Get Q
|
Get_Q:
clrl %d6
movew SignQ(%a6),%d6 | ...D6 is sign(Q)
movel #8,%d7
lsrl %d7,%d6
andil #0x0000007F,%d3 | ...7 bits of Q
orl %d6,%d3 | ...sign and bits of Q
swap %d3
fmovel %fpsr,%d6
andil #0xFF00FFFF,%d6
orl %d3,%d6
fmovel %d6,%fpsr | ...put Q in fpsr
|
Restore:
moveml (%a7)+,%d2-%d7
fmovel USER_FPCR(%a6),%fpcr
movel Sc_Flag(%a6),%d0
beqs Finish
fmulx Scale(%pc),%fp0 | ...may cause underflow
bra t_avoid_unsupp |check for denorm as a
| ;result of the scaling
Finish:
fmovex %fp0,%fp0 |capture exceptions & round
rts
Rem_is_0:
|..R = 2^(-j)X - Q Y = Y, thus R = 0 and quotient = 2^j (Q+1)
addql #1,%d3
cmpil #8,%d0 | ...D0 is j
bges Q_Big
lsll %d0,%d3
bras Set_R_0
Q_Big:
clrl %d3
Set_R_0:
fmoves #0x00000000,%fp0
movel #0,Sc_Flag(%a6)
bra Fix_Sign
Tie_Case:
|..Check parity of Q
movel %d3,%d6
andil #0x00000001,%d6
tstl %d6
beq Fix_Sign | ...Q is even
|..Q is odd, Q := Q + 1, signX := -signX
addql #1,%d3
movew SignX(%a6),%d6
eoril #0x00008000,%d6
movew %d6,SignX(%a6)
bra Fix_Sign
|end
|
AirFortressIlikara/LS2K0300-linux-4.19
| 2,257
|
arch/m68k/fpsp040/x_unsupp.S
|
|
| x_unsupp.sa 3.3 7/1/91
|
| fpsp_unsupp --- FPSP handler for unsupported data type exception
|
| Trap vector #55 (See table 8-1 Mc68030 User's manual).
| Invoked when the user program encounters a data format (packed) that
| hardware does not support or a data type (denormalized numbers or un-
| normalized numbers).
| Normalizes denorms and unnorms, unpacks packed numbers then stores
| them back into the machine to let the 040 finish the operation.
|
| Unsupp calls two routines:
| 1. get_op - gets the operand(s)
| 2. res_func - restore the function back into the 040 or
| if fmove.p fpm,<ea> then pack source (fpm)
| and store in users memory <ea>.
|
| Input: Long fsave stack frame
|
|
| Copyright (C) Motorola, Inc. 1990
| All Rights Reserved
|
| For details on the license for this file, please see the
| file, README, in this same directory.
X_UNSUPP: |idnt 2,1 | Motorola 040 Floating Point Software Package
|section 8
#include "fpsp.h"
|xref get_op
|xref res_func
|xref gen_except
|xref fpsp_fmt_error
.global fpsp_unsupp
fpsp_unsupp:
|
link %a6,#-LOCAL_SIZE
fsave -(%a7)
moveml %d0-%d1/%a0-%a1,USER_DA(%a6)
fmovemx %fp0-%fp3,USER_FP0(%a6)
fmoveml %fpcr/%fpsr/%fpiar,USER_FPCR(%a6)
moveb (%a7),VER_TMP(%a6) |save version number
moveb (%a7),%d0 |test for valid version num
andib #0xf0,%d0 |test for $4x
cmpib #VER_4,%d0 |must be $4x or exit
bnel fpsp_fmt_error
fmovel #0,%FPSR |clear all user status bits
fmovel #0,%FPCR |clear all user control bits
|
| The following lines are used to ensure that the FPSR
| exception byte and condition codes are clear before proceeding,
| except in the case of fmove, which leaves the cc's intact.
|
unsupp_con:
movel USER_FPSR(%a6),%d1
btst #5,CMDREG1B(%a6) |looking for fmove out
bne fmove_con
andl #0xFF00FF,%d1 |clear all but aexcs and qbyte
bras end_fix
fmove_con:
andl #0x0FFF40FF,%d1 |clear all but cc's, snan bit, aexcs, and qbyte
end_fix:
movel %d1,USER_FPSR(%a6)
st UFLG_TMP(%a6) |set flag for unsupp data
bsrl get_op |everything okay, go get operand(s)
bsrl res_func |fix up stack frame so can restore it
clrl -(%a7)
moveb VER_TMP(%a6),(%a7) |move idle fmt word to top of stack
bral gen_except
|
|end
|
AirFortressIlikara/LS2K0300-linux-4.19
| 6,961
|
arch/m68k/fpsp040/x_unfl.S
|
|
| x_unfl.sa 3.4 7/1/91
|
| fpsp_unfl --- FPSP handler for underflow exception
|
| Trap disabled results
| For 881/2 compatibility, sw must denormalize the intermediate
| result, then store the result. Denormalization is accomplished
| by taking the intermediate result (which is always normalized) and
| shifting the mantissa right while incrementing the exponent until
| it is equal to the denormalized exponent for the destination
| format. After denormalization, the result is rounded to the
| destination format.
|
| Trap enabled results
| All trap disabled code applies. In addition the exceptional
| operand needs to made available to the user with a bias of $6000
| added to the exponent.
|
| Copyright (C) Motorola, Inc. 1990
| All Rights Reserved
|
| For details on the license for this file, please see the
| file, README, in this same directory.
X_UNFL: |idnt 2,1 | Motorola 040 Floating Point Software Package
|section 8
#include "fpsp.h"
|xref denorm
|xref round
|xref store
|xref g_rndpr
|xref g_opcls
|xref g_dfmtou
|xref real_unfl
|xref real_inex
|xref fpsp_done
|xref b1238_fix
.global fpsp_unfl
fpsp_unfl:
link %a6,#-LOCAL_SIZE
fsave -(%a7)
moveml %d0-%d1/%a0-%a1,USER_DA(%a6)
fmovemx %fp0-%fp3,USER_FP0(%a6)
fmoveml %fpcr/%fpsr/%fpiar,USER_FPCR(%a6)
|
bsrl unf_res |denormalize, round & store interm op
|
| If underflow exceptions are not enabled, check for inexact
| exception
|
btstb #unfl_bit,FPCR_ENABLE(%a6)
beqs ck_inex
btstb #E3,E_BYTE(%a6)
beqs no_e3_1
|
| Clear dirty bit on dest resister in the frame before branching
| to b1238_fix.
|
bfextu CMDREG3B(%a6){#6:#3},%d0 |get dest reg no
bclrb %d0,FPR_DIRTY_BITS(%a6) |clr dest dirty bit
bsrl b1238_fix |test for bug1238 case
movel USER_FPSR(%a6),FPSR_SHADOW(%a6)
orl #sx_mask,E_BYTE(%a6)
no_e3_1:
moveml USER_DA(%a6),%d0-%d1/%a0-%a1
fmovemx USER_FP0(%a6),%fp0-%fp3
fmoveml USER_FPCR(%a6),%fpcr/%fpsr/%fpiar
frestore (%a7)+
unlk %a6
bral real_unfl
|
| It is possible to have either inex2 or inex1 exceptions with the
| unfl. If the inex enable bit is set in the FPCR, and either
| inex2 or inex1 occurred, we must clean up and branch to the
| real inex handler.
|
ck_inex:
moveb FPCR_ENABLE(%a6),%d0
andb FPSR_EXCEPT(%a6),%d0
andib #0x3,%d0
beqs unfl_done
|
| Inexact enabled and reported, and we must take an inexact exception
|
take_inex:
btstb #E3,E_BYTE(%a6)
beqs no_e3_2
|
| Clear dirty bit on dest resister in the frame before branching
| to b1238_fix.
|
bfextu CMDREG3B(%a6){#6:#3},%d0 |get dest reg no
bclrb %d0,FPR_DIRTY_BITS(%a6) |clr dest dirty bit
bsrl b1238_fix |test for bug1238 case
movel USER_FPSR(%a6),FPSR_SHADOW(%a6)
orl #sx_mask,E_BYTE(%a6)
no_e3_2:
moveb #INEX_VEC,EXC_VEC+1(%a6)
moveml USER_DA(%a6),%d0-%d1/%a0-%a1
fmovemx USER_FP0(%a6),%fp0-%fp3
fmoveml USER_FPCR(%a6),%fpcr/%fpsr/%fpiar
frestore (%a7)+
unlk %a6
bral real_inex
unfl_done:
bclrb #E3,E_BYTE(%a6)
beqs e1_set |if set then branch
|
| Clear dirty bit on dest resister in the frame before branching
| to b1238_fix.
|
bfextu CMDREG3B(%a6){#6:#3},%d0 |get dest reg no
bclrb %d0,FPR_DIRTY_BITS(%a6) |clr dest dirty bit
bsrl b1238_fix |test for bug1238 case
movel USER_FPSR(%a6),FPSR_SHADOW(%a6)
orl #sx_mask,E_BYTE(%a6)
moveml USER_DA(%a6),%d0-%d1/%a0-%a1
fmovemx USER_FP0(%a6),%fp0-%fp3
fmoveml USER_FPCR(%a6),%fpcr/%fpsr/%fpiar
frestore (%a7)+
unlk %a6
bral fpsp_done
e1_set:
moveml USER_DA(%a6),%d0-%d1/%a0-%a1
fmovemx USER_FP0(%a6),%fp0-%fp3
fmoveml USER_FPCR(%a6),%fpcr/%fpsr/%fpiar
unlk %a6
bral fpsp_done
|
| unf_res --- underflow result calculation
|
unf_res:
bsrl g_rndpr |returns RND_PREC in d0 0=ext,
| ;1=sgl, 2=dbl
| ;we need the RND_PREC in the
| ;upper word for round
movew #0,-(%a7)
movew %d0,-(%a7) |copy RND_PREC to stack
|
|
| If the exception bit set is E3, the exceptional operand from the
| fpu is in WBTEMP; else it is in FPTEMP.
|
btstb #E3,E_BYTE(%a6)
beqs unf_E1
unf_E3:
lea WBTEMP(%a6),%a0 |a0 now points to operand
|
| Test for fsgldiv and fsglmul. If the inst was one of these, then
| force the precision to extended for the denorm routine. Use
| the user's precision for the round routine.
|
movew CMDREG3B(%a6),%d1 |check for fsgldiv or fsglmul
andiw #0x7f,%d1
cmpiw #0x30,%d1 |check for sgldiv
beqs unf_sgl
cmpiw #0x33,%d1 |check for sglmul
bnes unf_cont |if not, use fpcr prec in round
unf_sgl:
clrl %d0
movew #0x1,(%a7) |override g_rndpr precision
| ;force single
bras unf_cont
unf_E1:
lea FPTEMP(%a6),%a0 |a0 now points to operand
unf_cont:
bclrb #sign_bit,LOCAL_EX(%a0) |clear sign bit
sne LOCAL_SGN(%a0) |store sign
bsrl denorm |returns denorm, a0 points to it
|
| WARNING:
| ;d0 has guard,round sticky bit
| ;make sure that it is not corrupted
| ;before it reaches the round subroutine
| ;also ensure that a0 isn't corrupted
|
| Set up d1 for round subroutine d1 contains the PREC/MODE
| information respectively on upper/lower register halves.
|
bfextu FPCR_MODE(%a6){#2:#2},%d1 |get mode from FPCR
| ;mode in lower d1
addl (%a7)+,%d1 |merge PREC/MODE
|
| WARNING: a0 and d0 are assumed to be intact between the denorm and
| round subroutines. All code between these two subroutines
| must not corrupt a0 and d0.
|
|
| Perform Round
| Input: a0 points to input operand
| d0{31:29} has guard, round, sticky
| d1{01:00} has rounding mode
| d1{17:16} has rounding precision
| Output: a0 points to rounded operand
|
bsrl round |returns rounded denorm at (a0)
|
| Differentiate between store to memory vs. store to register
|
unf_store:
bsrl g_opcls |returns opclass in d0{2:0}
cmpib #0x3,%d0
bnes not_opc011
|
| At this point, a store to memory is pending
|
opc011:
bsrl g_dfmtou
tstb %d0
beqs ext_opc011 |If extended, do not subtract
| ;If destination format is sgl/dbl,
tstb LOCAL_HI(%a0) |If rounded result is normal,don't
| ;subtract
bmis ext_opc011
subqw #1,LOCAL_EX(%a0) |account for denorm bias vs.
| ;normalized bias
| ; normalized denormalized
| ;single $7f $7e
| ;double $3ff $3fe
|
ext_opc011:
bsrl store |stores to memory
bras unf_done |finish up
|
| At this point, a store to a float register is pending
|
not_opc011:
bsrl store |stores to float register
| ;a0 is not corrupted on a store to a
| ;float register.
|
| Set the condition codes according to result
|
tstl LOCAL_HI(%a0) |check upper mantissa
bnes ck_sgn
tstl LOCAL_LO(%a0) |check lower mantissa
bnes ck_sgn
bsetb #z_bit,FPSR_CC(%a6) |set condition codes if zero
ck_sgn:
btstb #sign_bit,LOCAL_EX(%a0) |check the sign bit
beqs unf_done
bsetb #neg_bit,FPSR_CC(%a6)
|
| Finish.
|
unf_done:
btstb #inex2_bit,FPSR_EXCEPT(%a6)
beqs no_aunfl
bsetb #aunfl_bit,FPSR_AEXCEPT(%a6)
no_aunfl:
rts
|end
|
AirFortressIlikara/LS2K0300-linux-4.19
| 10,551
|
arch/m68k/ifpsp060/os.S
|
|~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|MOTOROLA MICROPROCESSOR & MEMORY TECHNOLOGY GROUP
|M68000 Hi-Performance Microprocessor Division
|M68060 Software Package
|Production Release P1.00 -- October 10, 1994
|
|M68060 Software Package Copyright © 1993, 1994 Motorola Inc. All rights reserved.
|
|THE SOFTWARE is provided on an "AS IS" basis and without warranty.
|To the maximum extent permitted by applicable law,
|MOTOROLA DISCLAIMS ALL WARRANTIES WHETHER EXPRESS OR IMPLIED,
|INCLUDING IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE
|and any warranty against infringement with regard to the SOFTWARE
|(INCLUDING ANY MODIFIED VERSIONS THEREOF) and any accompanying written materials.
|
|To the maximum extent permitted by applicable law,
|IN NO EVENT SHALL MOTOROLA BE LIABLE FOR ANY DAMAGES WHATSOEVER
|(INCLUDING WITHOUT LIMITATION, DAMAGES FOR LOSS OF BUSINESS PROFITS,
|BUSINESS INTERRUPTION, LOSS OF BUSINESS INFORMATION, OR OTHER PECUNIARY LOSS)
|ARISING OF THE USE OR INABILITY TO USE THE SOFTWARE.
|Motorola assumes no responsibility for the maintenance and support of the SOFTWARE.
|
|You are hereby granted a copyright license to use, modify, and distribute the SOFTWARE
|so long as this entire notice is retained without alteration in any modified and/or
|redistributed versions, and that such modified versions are clearly identified as such.
|No licenses are granted by implication, estoppel or otherwise under any patents
|or trademarks of Motorola, Inc.
|~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
| os.s
|
| This file contains:
| - example "Call-Out"s required by both the ISP and FPSP.
|
#include <linux/linkage.h>
|################################
| EXAMPLE CALL-OUTS #
| #
| _060_dmem_write() #
| _060_dmem_read() #
| _060_imem_read() #
| _060_dmem_read_byte() #
| _060_dmem_read_word() #
| _060_dmem_read_long() #
| _060_imem_read_word() #
| _060_imem_read_long() #
| _060_dmem_write_byte() #
| _060_dmem_write_word() #
| _060_dmem_write_long() #
| #
| _060_real_trace() #
| _060_real_access() #
|################################
|
| Each IO routine checks to see if the memory write/read is to/from user
| or supervisor application space. The examples below use simple "move"
| instructions for supervisor mode applications and call _copyin()/_copyout()
| for user mode applications.
| When installing the 060SP, the _copyin()/_copyout() equivalents for a
| given operating system should be substituted.
|
| The addresses within the 060SP are guaranteed to be on the stack.
| The result is that Unix processes are allowed to sleep as a consequence
| of a page fault during a _copyout.
|
| Linux/68k: The _060_[id]mem_{read,write}_{byte,word,long} functions
| (i.e. all the known length <= 4) are implemented by single moves
| statements instead of (more expensive) copy{in,out} calls, if
| working in user space
|
| _060_dmem_write():
|
| Writes to data memory while in supervisor mode.
|
| INPUTS:
| a0 - supervisor source address
| a1 - user destination address
| d0 - number of bytes to write
| 0x4(%a6),bit5 - 1 = supervisor mode, 0 = user mode
| OUTPUTS:
| d1 - 0 = success, !0 = failure
|
.global _060_dmem_write
_060_dmem_write:
subq.l #1,%d0
btst #0x5,0x4(%a6) | check for supervisor state
beqs user_write
super_write:
move.b (%a0)+,(%a1)+ | copy 1 byte
dbra %d0,super_write | quit if --ctr < 0
clr.l %d1 | return success
rts
user_write:
move.b (%a0)+,%d1 | copy 1 byte
copyoutae:
movs.b %d1,(%a1)+
dbra %d0,user_write | quit if --ctr < 0
clr.l %d1 | return success
rts
|
| _060_imem_read(), _060_dmem_read():
|
| Reads from data/instruction memory while in supervisor mode.
|
| INPUTS:
| a0 - user source address
| a1 - supervisor destination address
| d0 - number of bytes to read
| 0x4(%a6),bit5 - 1 = supervisor mode, 0 = user mode
| OUTPUTS:
| d1 - 0 = success, !0 = failure
|
.global _060_imem_read
.global _060_dmem_read
_060_imem_read:
_060_dmem_read:
subq.l #1,%d0
btst #0x5,0x4(%a6) | check for supervisor state
beqs user_read
super_read:
move.b (%a0)+,(%a1)+ | copy 1 byte
dbra %d0,super_read | quit if --ctr < 0
clr.l %d1 | return success
rts
user_read:
copyinae:
movs.b (%a0)+,%d1
move.b %d1,(%a1)+ | copy 1 byte
dbra %d0,user_read | quit if --ctr < 0
clr.l %d1 | return success
rts
|
| _060_dmem_read_byte():
|
| Read a data byte from user memory.
|
| INPUTS:
| a0 - user source address
| 0x4(%a6),bit5 - 1 = supervisor mode, 0 = user mode
| OUTPUTS:
| d0 - data byte in d0
| d1 - 0 = success, !0 = failure
|
.global _060_dmem_read_byte
_060_dmem_read_byte:
clr.l %d0 | clear whole longword
clr.l %d1 | assume success
btst #0x5,0x4(%a6) | check for supervisor state
bnes dmrbs | supervisor
dmrbuae:movs.b (%a0),%d0 | fetch user byte
rts
dmrbs: move.b (%a0),%d0 | fetch super byte
rts
|
| _060_dmem_read_word():
|
| Read a data word from user memory.
|
| INPUTS:
| a0 - user source address
| 0x4(%a6),bit5 - 1 = supervisor mode, 0 = user mode
| OUTPUTS:
| d0 - data word in d0
| d1 - 0 = success, !0 = failure
|
| _060_imem_read_word():
|
| Read an instruction word from user memory.
|
| INPUTS:
| a0 - user source address
| 0x4(%a6),bit5 - 1 = supervisor mode, 0 = user mode
| OUTPUTS:
| d0 - instruction word in d0
| d1 - 0 = success, !0 = failure
|
.global _060_dmem_read_word
.global _060_imem_read_word
_060_dmem_read_word:
_060_imem_read_word:
clr.l %d1 | assume success
clr.l %d0 | clear whole longword
btst #0x5,0x4(%a6) | check for supervisor state
bnes dmrws | supervisor
dmrwuae:movs.w (%a0), %d0 | fetch user word
rts
dmrws: move.w (%a0), %d0 | fetch super word
rts
|
| _060_dmem_read_long():
|
|
| INPUTS:
| a0 - user source address
| 0x4(%a6),bit5 - 1 = supervisor mode, 0 = user mode
| OUTPUTS:
| d0 - data longword in d0
| d1 - 0 = success, !0 = failure
|
| _060_imem_read_long():
|
| Read an instruction longword from user memory.
|
| INPUTS:
| a0 - user source address
| 0x4(%a6),bit5 - 1 = supervisor mode, 0 = user mode
| OUTPUTS:
| d0 - instruction longword in d0
| d1 - 0 = success, !0 = failure
|
.global _060_dmem_read_long
.global _060_imem_read_long
_060_dmem_read_long:
_060_imem_read_long:
clr.l %d1 | assume success
btst #0x5,0x4(%a6) | check for supervisor state
bnes dmrls | supervisor
dmrluae:movs.l (%a0),%d0 | fetch user longword
rts
dmrls: move.l (%a0),%d0 | fetch super longword
rts
|
| _060_dmem_write_byte():
|
| Write a data byte to user memory.
|
| INPUTS:
| a0 - user destination address
| d0 - data byte in d0
| 0x4(%a6),bit5 - 1 = supervisor mode, 0 = user mode
| OUTPUTS:
| d1 - 0 = success, !0 = failure
|
.global _060_dmem_write_byte
_060_dmem_write_byte:
clr.l %d1 | assume success
btst #0x5,0x4(%a6) | check for supervisor state
bnes dmwbs | supervisor
dmwbuae:movs.b %d0,(%a0) | store user byte
rts
dmwbs: move.b %d0,(%a0) | store super byte
rts
|
| _060_dmem_write_word():
|
| Write a data word to user memory.
|
| INPUTS:
| a0 - user destination address
| d0 - data word in d0
| 0x4(%a6),bit5 - 1 = supervisor mode, 0 = user mode
| OUTPUTS:
| d1 - 0 = success, !0 = failure
|
.global _060_dmem_write_word
_060_dmem_write_word:
clr.l %d1 | assume success
btst #0x5,0x4(%a6) | check for supervisor state
bnes dmwws | supervisor
dmwwu:
dmwwuae:movs.w %d0,(%a0) | store user word
bras dmwwr
dmwws: move.w %d0,(%a0) | store super word
dmwwr: clr.l %d1 | return success
rts
|
| _060_dmem_write_long():
|
| Write a data longword to user memory.
|
| INPUTS:
| a0 - user destination address
| d0 - data longword in d0
| 0x4(%a6),bit5 - 1 = supervisor mode, 0 = user mode
| OUTPUTS:
| d1 - 0 = success, !0 = failure
|
.global _060_dmem_write_long
_060_dmem_write_long:
clr.l %d1 | assume success
btst #0x5,0x4(%a6) | check for supervisor state
bnes dmwls | supervisor
dmwluae:movs.l %d0,(%a0) | store user longword
rts
dmwls: move.l %d0,(%a0) | store super longword
rts
#if 0
|###############################################
|
| Use these routines if your kernel doesn't have _copyout/_copyin equivalents.
| Assumes that D0/D1/A0/A1 are scratch registers. The _copyin/_copyout
| below assume that the SFC/DFC have been set previously.
|
| Linux/68k: These are basically non-inlined versions of
| memcpy_{to,from}fs, but without long-transfer optimization
| Note: Assumed that SFC/DFC are pointing correctly to user data
| space... Should be right, or are there any exceptions?
|
| int _copyout(supervisor_addr, user_addr, nbytes)
|
.global _copyout
_copyout:
move.l 4(%sp),%a0 | source
move.l 8(%sp),%a1 | destination
move.l 12(%sp),%d0 | count
subq.l #1,%d0
moreout:
move.b (%a0)+,%d1 | fetch supervisor byte
copyoutae:
movs.b %d1,(%a1)+ | store user byte
dbra %d0,moreout | are we through yet?
moveq #0,%d0 | return success
rts
|
| int _copyin(user_addr, supervisor_addr, nbytes)
|
.global _copyin
_copyin:
move.l 4(%sp),%a0 | source
move.l 8(%sp),%a1 | destination
move.l 12(%sp),%d0 | count
subq.l #1,%d0
morein:
copyinae:
movs.b (%a0)+,%d1 | fetch user byte
move.b %d1,(%a1)+ | write supervisor byte
dbra %d0,morein | are we through yet?
moveq #0,%d0 | return success
rts
#endif
|###########################################################################
|
| _060_real_trace():
|
| This is the exit point for the 060FPSP when an instruction is being traced
| and there are no other higher priority exceptions pending for this instruction
| or they have already been processed.
|
| The sample code below simply executes an "rte".
|
.global _060_real_trace
_060_real_trace:
bral trap
|
| _060_real_access():
|
| This is the exit point for the 060FPSP when an access error exception
| is encountered. The routine below should point to the operating system
| handler for access error exceptions. The exception stack frame is an
| 8-word access error frame.
|
| The sample routine below simply executes an "rte" instruction which
| is most likely the incorrect thing to do and could put the system
| into an infinite loop.
|
.global _060_real_access
_060_real_access:
bral buserr
| Execption handling for movs access to illegal memory
.section .fixup,#alloc,#execinstr
.even
1: moveq #-1,%d1
rts
.section __ex_table,#alloc
.align 4
.long dmrbuae,1b
.long dmrwuae,1b
.long dmrluae,1b
.long dmwbuae,1b
.long dmwwuae,1b
.long dmwluae,1b
.long copyoutae,1b
.long copyinae,1b
.text
|
AirFortressIlikara/LS2K0300-linux-4.19
| 9,892
|
arch/m68k/ifpsp060/iskeleton.S
|
|~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|MOTOROLA MICROPROCESSOR & MEMORY TECHNOLOGY GROUP
|M68000 Hi-Performance Microprocessor Division
|M68060 Software Package
|Production Release P1.00 -- October 10, 1994
|
|M68060 Software Package Copyright © 1993, 1994 Motorola Inc. All rights reserved.
|
|THE SOFTWARE is provided on an "AS IS" basis and without warranty.
|To the maximum extent permitted by applicable law,
|MOTOROLA DISCLAIMS ALL WARRANTIES WHETHER EXPRESS OR IMPLIED,
|INCLUDING IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE
|and any warranty against infringement with regard to the SOFTWARE
|(INCLUDING ANY MODIFIED VERSIONS THEREOF) and any accompanying written materials.
|
|To the maximum extent permitted by applicable law,
|IN NO EVENT SHALL MOTOROLA BE LIABLE FOR ANY DAMAGES WHATSOEVER
|(INCLUDING WITHOUT LIMITATION, DAMAGES FOR LOSS OF BUSINESS PROFITS,
|BUSINESS INTERRUPTION, LOSS OF BUSINESS INFORMATION, OR OTHER PECUNIARY LOSS)
|ARISING OF THE USE OR INABILITY TO USE THE SOFTWARE.
|Motorola assumes no responsibility for the maintenance and support of the SOFTWARE.
|
|You are hereby granted a copyright license to use, modify, and distribute the SOFTWARE
|so long as this entire notice is retained without alteration in any modified and/or
|redistributed versions, and that such modified versions are clearly identified as such.
|No licenses are granted by implication, estoppel or otherwise under any patents
|or trademarks of Motorola, Inc.
|~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
| iskeleton.s
|
| This file contains:
| (1) example "Call-out"s
| (2) example package entry code
| (3) example "Call-out" table
|
#include <linux/linkage.h>
#include <asm/entry.h>
#include <asm/asm-offsets.h>
|################################
| (1) EXAMPLE CALL-OUTS #
| #
| _060_isp_done() #
| _060_real_chk() #
| _060_real_divbyzero() #
| #
| _060_real_cas() #
| _060_real_cas2() #
| _060_real_lock_page() #
| _060_real_unlock_page() #
|################################
|
| _060_isp_done():
|
| This is and example main exit point for the Unimplemented Integer
| Instruction exception handler. For a normal exit, the
| _isp_unimp() branches to here so that the operating system
| can do any clean-up desired. The stack frame is the
| Unimplemented Integer Instruction stack frame with
| the PC pointing to the instruction following the instruction
| just emulated.
| To simply continue execution at the next instruction, just
| do an "rte".
|
| Linux/68k: If returning to user space, check for needed reselections.
.global _060_isp_done
_060_isp_done:
btst #0x5,%sp@ | supervisor bit set in saved SR?
beq .Lnotkern
rte
.Lnotkern:
SAVE_ALL_INT
GET_CURRENT(%d0)
| deliver signals, reschedule etc..
jra ret_from_exception
|
| _060_real_chk():
|
| This is an alternate exit point for the Unimplemented Integer
| Instruction exception handler. If the instruction was a "chk2"
| and the operand was out of bounds, then _isp_unimp() creates
| a CHK exception stack frame from the Unimplemented Integer Instrcution
| stack frame and branches to this routine.
|
| Linux/68k: commented out test for tracing
.global _060_real_chk
_060_real_chk:
| tst.b (%sp) | is tracing enabled?
| bpls real_chk_end | no
|
| CHK FRAME TRACE FRAME
| ***************** *****************
| * Current PC * * Current PC *
| ***************** *****************
| * 0x2 * 0x018 * * 0x2 * 0x024 *
| ***************** *****************
| * Next * * Next *
| * PC * * PC *
| ***************** *****************
| * SR * * SR *
| ***************** *****************
|
| move.b #0x24,0x7(%sp) | set trace vecno
| bral _060_real_trace
real_chk_end:
bral trap | jump to trap handler
|
| _060_real_divbyzero:
|
| This is an alternate exit point for the Unimplemented Integer
| Instruction exception handler isp_unimp(). If the instruction is a 64-bit
| integer divide where the source operand is a zero, then the _isp_unimp()
| creates a Divide-by-zero exception stack frame from the Unimplemented
| Integer Instruction stack frame and branches to this routine.
|
| Remember that a trace exception may be pending. The code below performs
| no action associated with the "chk" exception. If tracing is enabled,
| then it create a Trace exception stack frame from the "chk" exception
| stack frame and branches to the _real_trace() entry point.
|
| Linux/68k: commented out test for tracing
.global _060_real_divbyzero
_060_real_divbyzero:
| tst.b (%sp) | is tracing enabled?
| bpls real_divbyzero_end | no
|
| DIVBYZERO FRAME TRACE FRAME
| ***************** *****************
| * Current PC * * Current PC *
| ***************** *****************
| * 0x2 * 0x014 * * 0x2 * 0x024 *
| ***************** *****************
| * Next * * Next *
| * PC * * PC *
| ***************** *****************
| * SR * * SR *
| ***************** *****************
|
| move.b #0x24,0x7(%sp) | set trace vecno
| bral _060_real_trace
real_divbyzero_end:
bral trap | jump to trap handler
|##########################
|
| _060_real_cas():
|
| Entry point for the selected cas emulation code implementation.
| If the implementation provided by the 68060ISP is sufficient,
| then this routine simply re-enters the package through _isp_cas.
|
.global _060_real_cas
_060_real_cas:
bral _I_CALL_TOP+0x80+0x08
|
| _060_real_cas2():
|
| Entry point for the selected cas2 emulation code implementation.
| If the implementation provided by the 68060ISP is sufficient,
| then this routine simply re-enters the package through _isp_cas2.
|
.global _060_real_cas2
_060_real_cas2:
bral _I_CALL_TOP+0x80+0x10
|
| _060_lock_page():
|
| Entry point for the operating system`s routine to "lock" a page
| from being paged out. This routine is needed by the cas/cas2
| algorithms so that no page faults occur within the "core" code
| region. Note: the routine must lock two pages if the operand
| spans two pages.
| NOTE: THE ROUTINE SHOULD RETURN AN FSLW VALUE IN D0 ON FAILURE
| SO THAT THE 060SP CAN CREATE A PROPER ACCESS ERROR FRAME.
| Arguments:
| a0 = operand address
| d0 = `xxxxxxff -> supervisor; `xxxxxx00 -> user
| d1 = `xxxxxxff -> longword; `xxxxxx00 -> word
| Expected outputs:
| d0 = 0 -> success; non-zero -> failure
|
| Linux/m68k: Make sure the page is properly paged in, so we use
| plpaw and handle any exception here. The kernel must not be
| preempted until _060_unlock_page(), so that the page stays mapped.
|
.global _060_real_lock_page
_060_real_lock_page:
move.l %d2,-(%sp)
| load sfc/dfc
tst.b %d0
jne 1f
moveq #1,%d0
jra 2f
1: moveq #5,%d0
2: movec.l %dfc,%d2
movec.l %d0,%dfc
movec.l %d0,%sfc
clr.l %d0
| prefetch address
.chip 68060
move.l %a0,%a1
1: plpaw (%a1)
addq.w #1,%a0
tst.b %d1
jeq 2f
addq.w #2,%a0
2: plpaw (%a0)
3: .chip 68k
| restore sfc/dfc
movec.l %d2,%dfc
movec.l %d2,%sfc
move.l (%sp)+,%d2
rts
.section __ex_table,"a"
.align 4
.long 1b,11f
.long 2b,21f
.previous
.section .fixup,"ax"
.even
11: move.l #0x020003c0,%d0
or.l %d2,%d0
swap %d0
jra 3b
21: move.l #0x02000bc0,%d0
or.l %d2,%d0
swap %d0
jra 3b
.previous
|
| _060_unlock_page():
|
| Entry point for the operating system`s routine to "unlock" a
| page that has been "locked" previously with _real_lock_page.
| Note: the routine must unlock two pages if the operand spans
| two pages.
| Arguments:
| a0 = operand address
| d0 = `xxxxxxff -> supervisor; `xxxxxx00 -> user
| d1 = `xxxxxxff -> longword; `xxxxxx00 -> word
|
| Linux/m68k: perhaps reenable preemption here...
.global _060_real_unlock_page
_060_real_unlock_page:
clr.l %d0
rts
|###########################################################################
|#################################
| (2) EXAMPLE PACKAGE ENTRY CODE #
|#################################
.global _060_isp_unimp
_060_isp_unimp:
bral _I_CALL_TOP+0x80+0x00
.global _060_isp_cas
_060_isp_cas:
bral _I_CALL_TOP+0x80+0x08
.global _060_isp_cas2
_060_isp_cas2:
bral _I_CALL_TOP+0x80+0x10
.global _060_isp_cas_finish
_060_isp_cas_finish:
bra.l _I_CALL_TOP+0x80+0x18
.global _060_isp_cas2_finish
_060_isp_cas2_finish:
bral _I_CALL_TOP+0x80+0x20
.global _060_isp_cas_inrange
_060_isp_cas_inrange:
bral _I_CALL_TOP+0x80+0x28
.global _060_isp_cas_terminate
_060_isp_cas_terminate:
bral _I_CALL_TOP+0x80+0x30
.global _060_isp_cas_restart
_060_isp_cas_restart:
bral _I_CALL_TOP+0x80+0x38
|###########################################################################
|###############################
| (3) EXAMPLE CALL-OUT SECTION #
|###############################
| The size of this section MUST be 128 bytes!!!
_I_CALL_TOP:
.long _060_real_chk - _I_CALL_TOP
.long _060_real_divbyzero - _I_CALL_TOP
.long _060_real_trace - _I_CALL_TOP
.long _060_real_access - _I_CALL_TOP
.long _060_isp_done - _I_CALL_TOP
.long _060_real_cas - _I_CALL_TOP
.long _060_real_cas2 - _I_CALL_TOP
.long _060_real_lock_page - _I_CALL_TOP
.long _060_real_unlock_page - _I_CALL_TOP
.long 0x00000000, 0x00000000, 0x00000000, 0x00000000
.long 0x00000000, 0x00000000, 0x00000000
.long _060_imem_read - _I_CALL_TOP
.long _060_dmem_read - _I_CALL_TOP
.long _060_dmem_write - _I_CALL_TOP
.long _060_imem_read_word - _I_CALL_TOP
.long _060_imem_read_long - _I_CALL_TOP
.long _060_dmem_read_byte - _I_CALL_TOP
.long _060_dmem_read_word - _I_CALL_TOP
.long _060_dmem_read_long - _I_CALL_TOP
.long _060_dmem_write_byte - _I_CALL_TOP
.long _060_dmem_write_word - _I_CALL_TOP
.long _060_dmem_write_long - _I_CALL_TOP
.long 0x00000000
.long 0x00000000, 0x00000000, 0x00000000, 0x00000000
|###########################################################################
| 060 INTEGER KERNEL PACKAGE MUST GO HERE!!!
#include "isp.sa"
|
AirFortressIlikara/LS2K0300-linux-4.19
| 10,690
|
arch/m68k/ifpsp060/fskeleton.S
|
|~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|MOTOROLA MICROPROCESSOR & MEMORY TECHNOLOGY GROUP
|M68000 Hi-Performance Microprocessor Division
|M68060 Software Package
|Production Release P1.00 -- October 10, 1994
|
|M68060 Software Package Copyright © 1993, 1994 Motorola Inc. All rights reserved.
|
|THE SOFTWARE is provided on an "AS IS" basis and without warranty.
|To the maximum extent permitted by applicable law,
|MOTOROLA DISCLAIMS ALL WARRANTIES WHETHER EXPRESS OR IMPLIED,
|INCLUDING IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE
|and any warranty against infringement with regard to the SOFTWARE
|(INCLUDING ANY MODIFIED VERSIONS THEREOF) and any accompanying written materials.
|
|To the maximum extent permitted by applicable law,
|IN NO EVENT SHALL MOTOROLA BE LIABLE FOR ANY DAMAGES WHATSOEVER
|(INCLUDING WITHOUT LIMITATION, DAMAGES FOR LOSS OF BUSINESS PROFITS,
|BUSINESS INTERRUPTION, LOSS OF BUSINESS INFORMATION, OR OTHER PECUNIARY LOSS)
|ARISING OF THE USE OR INABILITY TO USE THE SOFTWARE.
|Motorola assumes no responsibility for the maintenance and support of the SOFTWARE.
|
|You are hereby granted a copyright license to use, modify, and distribute the SOFTWARE
|so long as this entire notice is retained without alteration in any modified and/or
|redistributed versions, and that such modified versions are clearly identified as such.
|No licenses are granted by implication, estoppel or otherwise under any patents
|or trademarks of Motorola, Inc.
|~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
| fskeleton.s
|
| This file contains:
| (1) example "Call-out"s
| (2) example package entry code
| (3) example "Call-out" table
|
#include <linux/linkage.h>
|################################
| (1) EXAMPLE CALL-OUTS #
| #
| _060_fpsp_done() #
| _060_real_ovfl() #
| _060_real_unfl() #
| _060_real_operr() #
| _060_real_snan() #
| _060_real_dz() #
| _060_real_inex() #
| _060_real_bsun() #
| _060_real_fline() #
| _060_real_fpu_disabled() #
| _060_real_trap() #
|################################
|
| _060_fpsp_done():
|
| This is the main exit point for the 68060 Floating-Point
| Software Package. For a normal exit, all 060FPSP routines call this
| routine. The operating system can do system dependent clean-up or
| simply execute an "rte" as with the sample code below.
|
.global _060_fpsp_done
_060_fpsp_done:
bral _060_isp_done | do the same as isp_done
|
| _060_real_ovfl():
|
| This is the exit point for the 060FPSP when an enabled overflow exception
| is present. The routine below should point to the operating system handler
| for enabled overflow conditions. The exception stack frame is an overflow
| stack frame. The FP state frame holds the EXCEPTIONAL OPERAND.
|
| The sample routine below simply clears the exception status bit and
| does an "rte".
|
.global _060_real_ovfl
_060_real_ovfl:
fsave -(%sp)
move.w #0x6000,0x2(%sp)
frestore (%sp)+
bral trap | jump to trap handler
|
| _060_real_unfl():
|
| This is the exit point for the 060FPSP when an enabled underflow exception
| is present. The routine below should point to the operating system handler
| for enabled underflow conditions. The exception stack frame is an underflow
| stack frame. The FP state frame holds the EXCEPTIONAL OPERAND.
|
| The sample routine below simply clears the exception status bit and
| does an "rte".
|
.global _060_real_unfl
_060_real_unfl:
fsave -(%sp)
move.w #0x6000,0x2(%sp)
frestore (%sp)+
bral trap | jump to trap handler
|
| _060_real_operr():
|
| This is the exit point for the 060FPSP when an enabled operand error exception
| is present. The routine below should point to the operating system handler
| for enabled operand error exceptions. The exception stack frame is an operand error
| stack frame. The FP state frame holds the source operand of the faulting
| instruction.
|
| The sample routine below simply clears the exception status bit and
| does an "rte".
|
.global _060_real_operr
_060_real_operr:
fsave -(%sp)
move.w #0x6000,0x2(%sp)
frestore (%sp)+
bral trap | jump to trap handler
|
| _060_real_snan():
|
| This is the exit point for the 060FPSP when an enabled signalling NaN exception
| is present. The routine below should point to the operating system handler
| for enabled signalling NaN exceptions. The exception stack frame is a signalling NaN
| stack frame. The FP state frame holds the source operand of the faulting
| instruction.
|
| The sample routine below simply clears the exception status bit and
| does an "rte".
|
.global _060_real_snan
_060_real_snan:
fsave -(%sp)
move.w #0x6000,0x2(%sp)
frestore (%sp)+
bral trap | jump to trap handler
|
| _060_real_dz():
|
| This is the exit point for the 060FPSP when an enabled divide-by-zero exception
| is present. The routine below should point to the operating system handler
| for enabled divide-by-zero exceptions. The exception stack frame is a divide-by-zero
| stack frame. The FP state frame holds the source operand of the faulting
| instruction.
|
| The sample routine below simply clears the exception status bit and
| does an "rte".
|
.global _060_real_dz
_060_real_dz:
fsave -(%sp)
move.w #0x6000,0x2(%sp)
frestore (%sp)+
bral trap | jump to trap handler
|
| _060_real_inex():
|
| This is the exit point for the 060FPSP when an enabled inexact exception
| is present. The routine below should point to the operating system handler
| for enabled inexact exceptions. The exception stack frame is an inexact
| stack frame. The FP state frame holds the source operand of the faulting
| instruction.
|
| The sample routine below simply clears the exception status bit and
| does an "rte".
|
.global _060_real_inex
_060_real_inex:
fsave -(%sp)
move.w #0x6000,0x2(%sp)
frestore (%sp)+
bral trap | jump to trap handler
|
| _060_real_bsun():
|
| This is the exit point for the 060FPSP when an enabled bsun exception
| is present. The routine below should point to the operating system handler
| for enabled bsun exceptions. The exception stack frame is a bsun
| stack frame.
|
| The sample routine below clears the exception status bit, clears the NaN
| bit in the FPSR, and does an "rte". The instruction that caused the
| bsun will now be re-executed but with the NaN FPSR bit cleared.
|
.global _060_real_bsun
_060_real_bsun:
| fsave -(%sp)
fmove.l %fpsr,-(%sp)
andi.b #0xfe,(%sp)
fmove.l (%sp)+,%fpsr
bral trap | jump to trap handler
|
| _060_real_fline():
|
| This is the exit point for the 060FPSP when an F-Line Illegal exception is
| encountered. Three different types of exceptions can enter the F-Line exception
| vector number 11: FP Unimplemented Instructions, FP implemented instructions when
| the FPU is disabled, and F-Line Illegal instructions. The 060FPSP module
| _fpsp_fline() distinguishes between the three and acts appropriately. F-Line
| Illegals branch here.
|
.global _060_real_fline
_060_real_fline:
bral trap | jump to trap handler
|
| _060_real_fpu_disabled():
|
| This is the exit point for the 060FPSP when an FPU disabled exception is
| encountered. Three different types of exceptions can enter the F-Line exception
| vector number 11: FP Unimplemented Instructions, FP implemented instructions when
| the FPU is disabled, and F-Line Illegal instructions. The 060FPSP module
| _fpsp_fline() distinguishes between the three and acts appropriately. FPU disabled
| exceptions branch here.
|
| The sample code below enables the FPU, sets the PC field in the exception stack
| frame to the PC of the instruction causing the exception, and does an "rte".
| The execution of the instruction then proceeds with an enabled floating-point
| unit.
|
.global _060_real_fpu_disabled
_060_real_fpu_disabled:
move.l %d0,-(%sp) | enabled the fpu
.long 0x4E7A0808 |movec pcr,%d0
bclr #0x1,%d0
.long 0x4E7B0808 |movec %d0,pcr
move.l (%sp)+,%d0
move.l 0xc(%sp),0x2(%sp) | set "Current PC"
rte
|
| _060_real_trap():
|
| This is the exit point for the 060FPSP when an emulated "ftrapcc" instruction
| discovers that the trap condition is true and it should branch to the operating
| system handler for the trap exception vector number 7.
|
| The sample code below simply executes an "rte".
|
.global _060_real_trap
_060_real_trap:
bral trap | jump to trap handler
|############################################################################
|#################################
| (2) EXAMPLE PACKAGE ENTRY CODE #
|#################################
.global _060_fpsp_snan
_060_fpsp_snan:
bra.l _FP_CALL_TOP+0x80+0x00
.global _060_fpsp_operr
_060_fpsp_operr:
bra.l _FP_CALL_TOP+0x80+0x08
.global _060_fpsp_ovfl
_060_fpsp_ovfl:
bra.l _FP_CALL_TOP+0x80+0x10
.global _060_fpsp_unfl
_060_fpsp_unfl:
bra.l _FP_CALL_TOP+0x80+0x18
.global _060_fpsp_dz
_060_fpsp_dz:
bra.l _FP_CALL_TOP+0x80+0x20
.global _060_fpsp_inex
_060_fpsp_inex:
bra.l _FP_CALL_TOP+0x80+0x28
.global _060_fpsp_fline
_060_fpsp_fline:
bra.l _FP_CALL_TOP+0x80+0x30
.global _060_fpsp_unsupp
_060_fpsp_unsupp:
bra.l _FP_CALL_TOP+0x80+0x38
.global _060_fpsp_effadd
_060_fpsp_effadd:
bra.l _FP_CALL_TOP+0x80+0x40
|############################################################################
|###############################
| (3) EXAMPLE CALL-OUT SECTION #
|###############################
| The size of this section MUST be 128 bytes!!!
_FP_CALL_TOP:
.long _060_real_bsun - _FP_CALL_TOP
.long _060_real_snan - _FP_CALL_TOP
.long _060_real_operr - _FP_CALL_TOP
.long _060_real_ovfl - _FP_CALL_TOP
.long _060_real_unfl - _FP_CALL_TOP
.long _060_real_dz - _FP_CALL_TOP
.long _060_real_inex - _FP_CALL_TOP
.long _060_real_fline - _FP_CALL_TOP
.long _060_real_fpu_disabled - _FP_CALL_TOP
.long _060_real_trap - _FP_CALL_TOP
.long _060_real_trace - _FP_CALL_TOP
.long _060_real_access - _FP_CALL_TOP
.long _060_fpsp_done - _FP_CALL_TOP
.long 0x00000000, 0x00000000, 0x00000000
.long _060_imem_read - _FP_CALL_TOP
.long _060_dmem_read - _FP_CALL_TOP
.long _060_dmem_write - _FP_CALL_TOP
.long _060_imem_read_word - _FP_CALL_TOP
.long _060_imem_read_long - _FP_CALL_TOP
.long _060_dmem_read_byte - _FP_CALL_TOP
.long _060_dmem_read_word - _FP_CALL_TOP
.long _060_dmem_read_long - _FP_CALL_TOP
.long _060_dmem_write_byte - _FP_CALL_TOP
.long _060_dmem_write_word - _FP_CALL_TOP
.long _060_dmem_write_long - _FP_CALL_TOP
.long 0x00000000
.long 0x00000000, 0x00000000, 0x00000000, 0x00000000
|############################################################################
| 060 FPSP KERNEL PACKAGE NEEDS TO GO HERE!!!
#include "fpsp.sa"
|
AirFortressIlikara/LS2K0300-linux-4.19
| 36,342
|
arch/m68k/math-emu/fp_util.S
|
/*
* fp_util.S
*
* Copyright Roman Zippel, 1997. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, and the entire permission notice in its entirety,
* including the disclaimer of warranties.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote
* products derived from this software without specific prior
* written permission.
*
* ALTERNATIVELY, this product may be distributed under the terms of
* the GNU General Public License, in which case the provisions of the GPL are
* required INSTEAD OF the above restrictions. (This clause is
* necessary due to a potential bad interaction between the GPL and
* the restrictions contained in a BSD-style copyright.)
*
* THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
* INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
* STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
* OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "fp_emu.h"
/*
* Here are lots of conversion and normalization functions mainly
* used by fp_scan.S
* Note that these functions are optimized for "normal" numbers,
* these are handled first and exit as fast as possible, this is
* especially important for fp_normalize_ext/fp_conv_ext2ext, as
* it's called very often.
* The register usage is optimized for fp_scan.S and which register
* is currently at that time unused, be careful if you want change
* something here. %d0 and %d1 is always usable, sometimes %d2 (or
* only the lower half) most function have to return the %a0
* unmodified, so that the caller can immediately reuse it.
*/
.globl fp_ill, fp_end
| exits from fp_scan:
| illegal instruction
fp_ill:
printf ,"fp_illegal\n"
rts
| completed instruction
fp_end:
tst.l (TASK_MM-8,%a2)
jmi 1f
tst.l (TASK_MM-4,%a2)
jmi 1f
tst.l (TASK_MM,%a2)
jpl 2f
1: printf ,"oops:%p,%p,%p\n",3,%a2@(TASK_MM-8),%a2@(TASK_MM-4),%a2@(TASK_MM)
2: clr.l %d0
rts
.globl fp_conv_long2ext, fp_conv_single2ext
.globl fp_conv_double2ext, fp_conv_ext2ext
.globl fp_normalize_ext, fp_normalize_double
.globl fp_normalize_single, fp_normalize_single_fast
.globl fp_conv_ext2double, fp_conv_ext2single
.globl fp_conv_ext2long, fp_conv_ext2short
.globl fp_conv_ext2byte
.globl fp_finalrounding_single, fp_finalrounding_single_fast
.globl fp_finalrounding_double
.globl fp_finalrounding, fp_finaltest, fp_final
/*
* First several conversion functions from a source operand
* into the extended format. Note, that only fp_conv_ext2ext
* normalizes the number and is always called after the other
* conversion functions, which only move the information into
* fp_ext structure.
*/
| fp_conv_long2ext:
|
| args: %d0 = source (32-bit long)
| %a0 = destination (ptr to struct fp_ext)
fp_conv_long2ext:
printf PCONV,"l2e: %p -> %p(",2,%d0,%a0
clr.l %d1 | sign defaults to zero
tst.l %d0
jeq fp_l2e_zero | is source zero?
jpl 1f | positive?
moveq #1,%d1
neg.l %d0
1: swap %d1
move.w #0x3fff+31,%d1
move.l %d1,(%a0)+ | set sign / exp
move.l %d0,(%a0)+ | set mantissa
clr.l (%a0)
subq.l #8,%a0 | restore %a0
printx PCONV,%a0@
printf PCONV,")\n"
rts
| source is zero
fp_l2e_zero:
clr.l (%a0)+
clr.l (%a0)+
clr.l (%a0)
subq.l #8,%a0
printx PCONV,%a0@
printf PCONV,")\n"
rts
| fp_conv_single2ext
| args: %d0 = source (single-precision fp value)
| %a0 = dest (struct fp_ext *)
fp_conv_single2ext:
printf PCONV,"s2e: %p -> %p(",2,%d0,%a0
move.l %d0,%d1
lsl.l #8,%d0 | shift mantissa
lsr.l #8,%d1 | exponent / sign
lsr.l #7,%d1
lsr.w #8,%d1
jeq fp_s2e_small | zero / denormal?
cmp.w #0xff,%d1 | NaN / Inf?
jeq fp_s2e_large
bset #31,%d0 | set explizit bit
add.w #0x3fff-0x7f,%d1 | re-bias the exponent.
9: move.l %d1,(%a0)+ | fp_ext.sign, fp_ext.exp
move.l %d0,(%a0)+ | high lword of fp_ext.mant
clr.l (%a0) | low lword = 0
subq.l #8,%a0
printx PCONV,%a0@
printf PCONV,")\n"
rts
| zeros and denormalized
fp_s2e_small:
| exponent is zero, so explizit bit is already zero too
tst.l %d0
jeq 9b
move.w #0x4000-0x7f,%d1
jra 9b
| infinities and NAN
fp_s2e_large:
bclr #31,%d0 | clear explizit bit
move.w #0x7fff,%d1
jra 9b
fp_conv_double2ext:
#ifdef FPU_EMU_DEBUG
getuser.l %a1@(0),%d0,fp_err_ua2,%a1
getuser.l %a1@(4),%d1,fp_err_ua2,%a1
printf PCONV,"d2e: %p%p -> %p(",3,%d0,%d1,%a0
#endif
getuser.l (%a1)+,%d0,fp_err_ua2,%a1
move.l %d0,%d1
lsl.l #8,%d0 | shift high mantissa
lsl.l #3,%d0
lsr.l #8,%d1 | exponent / sign
lsr.l #7,%d1
lsr.w #5,%d1
jeq fp_d2e_small | zero / denormal?
cmp.w #0x7ff,%d1 | NaN / Inf?
jeq fp_d2e_large
bset #31,%d0 | set explizit bit
add.w #0x3fff-0x3ff,%d1 | re-bias the exponent.
9: move.l %d1,(%a0)+ | fp_ext.sign, fp_ext.exp
move.l %d0,(%a0)+
getuser.l (%a1)+,%d0,fp_err_ua2,%a1
move.l %d0,%d1
lsl.l #8,%d0
lsl.l #3,%d0
move.l %d0,(%a0)
moveq #21,%d0
lsr.l %d0,%d1
or.l %d1,-(%a0)
subq.l #4,%a0
printx PCONV,%a0@
printf PCONV,")\n"
rts
| zeros and denormalized
fp_d2e_small:
| exponent is zero, so explizit bit is already zero too
tst.l %d0
jeq 9b
move.w #0x4000-0x3ff,%d1
jra 9b
| infinities and NAN
fp_d2e_large:
bclr #31,%d0 | clear explizit bit
move.w #0x7fff,%d1
jra 9b
| fp_conv_ext2ext:
| originally used to get longdouble from userspace, now it's
| called before arithmetic operations to make sure the number
| is normalized [maybe rename it?].
| args: %a0 = dest (struct fp_ext *)
| returns 0 in %d0 for a NaN, otherwise 1
fp_conv_ext2ext:
printf PCONV,"e2e: %p(",1,%a0
printx PCONV,%a0@
printf PCONV,"), "
move.l (%a0)+,%d0
cmp.w #0x7fff,%d0 | Inf / NaN?
jeq fp_e2e_large
move.l (%a0),%d0
jpl fp_e2e_small | zero / denorm?
| The high bit is set, so normalization is irrelevant.
fp_e2e_checkround:
subq.l #4,%a0
#ifdef CONFIG_M68KFPU_EMU_EXTRAPREC
move.b (%a0),%d0
jne fp_e2e_round
#endif
printf PCONV,"%p(",1,%a0
printx PCONV,%a0@
printf PCONV,")\n"
moveq #1,%d0
rts
#ifdef CONFIG_M68KFPU_EMU_EXTRAPREC
fp_e2e_round:
fp_set_sr FPSR_EXC_INEX2
clr.b (%a0)
move.w (FPD_RND,FPDATA),%d2
jne fp_e2e_roundother | %d2 == 0, round to nearest
tst.b %d0 | test guard bit
jpl 9f | zero is closer
btst #0,(11,%a0) | test lsb bit
jne fp_e2e_doroundup | round to infinity
lsl.b #1,%d0 | check low bits
jeq 9f | round to zero
fp_e2e_doroundup:
addq.l #1,(8,%a0)
jcc 9f
addq.l #1,(4,%a0)
jcc 9f
move.w #0x8000,(4,%a0)
addq.w #1,(2,%a0)
9: printf PNORM,"%p(",1,%a0
printx PNORM,%a0@
printf PNORM,")\n"
rts
fp_e2e_roundother:
subq.w #2,%d2
jcs 9b | %d2 < 2, round to zero
jhi 1f | %d2 > 2, round to +infinity
tst.b (1,%a0) | to -inf
jne fp_e2e_doroundup | negative, round to infinity
jra 9b | positive, round to zero
1: tst.b (1,%a0) | to +inf
jeq fp_e2e_doroundup | positive, round to infinity
jra 9b | negative, round to zero
#endif
| zeros and subnormals:
| try to normalize these anyway.
fp_e2e_small:
jne fp_e2e_small1 | high lword zero?
move.l (4,%a0),%d0
jne fp_e2e_small2
#ifdef CONFIG_M68KFPU_EMU_EXTRAPREC
clr.l %d0
move.b (-4,%a0),%d0
jne fp_e2e_small3
#endif
| Genuine zero.
clr.w -(%a0)
subq.l #2,%a0
printf PNORM,"%p(",1,%a0
printx PNORM,%a0@
printf PNORM,")\n"
moveq #1,%d0
rts
| definitely subnormal, need to shift all 64 bits
fp_e2e_small1:
bfffo %d0{#0,#32},%d1
move.w -(%a0),%d2
sub.w %d1,%d2
jcc 1f
| Pathologically small, denormalize.
add.w %d2,%d1
clr.w %d2
1: move.w %d2,(%a0)+
move.w %d1,%d2
jeq fp_e2e_checkround
| fancy 64-bit double-shift begins here
lsl.l %d2,%d0
move.l %d0,(%a0)+
move.l (%a0),%d0
move.l %d0,%d1
lsl.l %d2,%d0
move.l %d0,(%a0)
neg.w %d2
and.w #0x1f,%d2
lsr.l %d2,%d1
or.l %d1,-(%a0)
#ifdef CONFIG_M68KFPU_EMU_EXTRAPREC
fp_e2e_extra1:
clr.l %d0
move.b (-4,%a0),%d0
neg.w %d2
add.w #24,%d2
jcc 1f
clr.b (-4,%a0)
lsl.l %d2,%d0
or.l %d0,(4,%a0)
jra fp_e2e_checkround
1: addq.w #8,%d2
lsl.l %d2,%d0
move.b %d0,(-4,%a0)
lsr.l #8,%d0
or.l %d0,(4,%a0)
#endif
jra fp_e2e_checkround
| pathologically small subnormal
fp_e2e_small2:
bfffo %d0{#0,#32},%d1
add.w #32,%d1
move.w -(%a0),%d2
sub.w %d1,%d2
jcc 1f
| Beyond pathologically small, denormalize.
add.w %d2,%d1
clr.w %d2
1: move.w %d2,(%a0)+
ext.l %d1
jeq fp_e2e_checkround
clr.l (4,%a0)
sub.w #32,%d2
jcs 1f
lsl.l %d1,%d0 | lower lword needs only to be shifted
move.l %d0,(%a0) | into the higher lword
#ifdef CONFIG_M68KFPU_EMU_EXTRAPREC
clr.l %d0
move.b (-4,%a0),%d0
clr.b (-4,%a0)
neg.w %d1
add.w #32,%d1
bfins %d0,(%a0){%d1,#8}
#endif
jra fp_e2e_checkround
1: neg.w %d1 | lower lword is splitted between
bfins %d0,(%a0){%d1,#32} | higher and lower lword
#ifndef CONFIG_M68KFPU_EMU_EXTRAPREC
jra fp_e2e_checkround
#else
move.w %d1,%d2
jra fp_e2e_extra1
| These are extremely small numbers, that will mostly end up as zero
| anyway, so this is only important for correct rounding.
fp_e2e_small3:
bfffo %d0{#24,#8},%d1
add.w #40,%d1
move.w -(%a0),%d2
sub.w %d1,%d2
jcc 1f
| Pathologically small, denormalize.
add.w %d2,%d1
clr.w %d2
1: move.w %d2,(%a0)+
ext.l %d1
jeq fp_e2e_checkround
cmp.w #8,%d1
jcs 2f
1: clr.b (-4,%a0)
sub.w #64,%d1
jcs 1f
add.w #24,%d1
lsl.l %d1,%d0
move.l %d0,(%a0)
jra fp_e2e_checkround
1: neg.w %d1
bfins %d0,(%a0){%d1,#8}
jra fp_e2e_checkround
2: lsl.l %d1,%d0
move.b %d0,(-4,%a0)
lsr.l #8,%d0
move.b %d0,(7,%a0)
jra fp_e2e_checkround
#endif
1: move.l %d0,%d1 | lower lword is splitted between
lsl.l %d2,%d0 | higher and lower lword
move.l %d0,(%a0)
move.l %d1,%d0
neg.w %d2
add.w #32,%d2
lsr.l %d2,%d0
move.l %d0,-(%a0)
jra fp_e2e_checkround
| Infinities and NaNs
fp_e2e_large:
move.l (%a0)+,%d0
jne 3f
1: tst.l (%a0)
jne 4f
moveq #1,%d0
2: subq.l #8,%a0
printf PCONV,"%p(",1,%a0
printx PCONV,%a0@
printf PCONV,")\n"
rts
| we have maybe a NaN, shift off the highest bit
3: lsl.l #1,%d0
jeq 1b
| we have a NaN, clear the return value
4: clrl %d0
jra 2b
/*
* Normalization functions. Call these on the output of general
* FP operators, and before any conversion into the destination
* formats. fp_normalize_ext has always to be called first, the
* following conversion functions expect an already normalized
* number.
*/
| fp_normalize_ext:
| normalize an extended in extended (unpacked) format, basically
| it does the same as fp_conv_ext2ext, additionally it also does
| the necessary postprocessing checks.
| args: %a0 (struct fp_ext *)
| NOTE: it does _not_ modify %a0/%a1 and the upper word of %d2
fp_normalize_ext:
printf PNORM,"ne: %p(",1,%a0
printx PNORM,%a0@
printf PNORM,"), "
move.l (%a0)+,%d0
cmp.w #0x7fff,%d0 | Inf / NaN?
jeq fp_ne_large
move.l (%a0),%d0
jpl fp_ne_small | zero / denorm?
| The high bit is set, so normalization is irrelevant.
fp_ne_checkround:
subq.l #4,%a0
#ifdef CONFIG_M68KFPU_EMU_EXTRAPREC
move.b (%a0),%d0
jne fp_ne_round
#endif
printf PNORM,"%p(",1,%a0
printx PNORM,%a0@
printf PNORM,")\n"
rts
#ifdef CONFIG_M68KFPU_EMU_EXTRAPREC
fp_ne_round:
fp_set_sr FPSR_EXC_INEX2
clr.b (%a0)
move.w (FPD_RND,FPDATA),%d2
jne fp_ne_roundother | %d2 == 0, round to nearest
tst.b %d0 | test guard bit
jpl 9f | zero is closer
btst #0,(11,%a0) | test lsb bit
jne fp_ne_doroundup | round to infinity
lsl.b #1,%d0 | check low bits
jeq 9f | round to zero
fp_ne_doroundup:
addq.l #1,(8,%a0)
jcc 9f
addq.l #1,(4,%a0)
jcc 9f
addq.w #1,(2,%a0)
move.w #0x8000,(4,%a0)
9: printf PNORM,"%p(",1,%a0
printx PNORM,%a0@
printf PNORM,")\n"
rts
fp_ne_roundother:
subq.w #2,%d2
jcs 9b | %d2 < 2, round to zero
jhi 1f | %d2 > 2, round to +infinity
tst.b (1,%a0) | to -inf
jne fp_ne_doroundup | negative, round to infinity
jra 9b | positive, round to zero
1: tst.b (1,%a0) | to +inf
jeq fp_ne_doroundup | positive, round to infinity
jra 9b | negative, round to zero
#endif
| Zeros and subnormal numbers
| These are probably merely subnormal, rather than "denormalized"
| numbers, so we will try to make them normal again.
fp_ne_small:
jne fp_ne_small1 | high lword zero?
move.l (4,%a0),%d0
jne fp_ne_small2
#ifdef CONFIG_M68KFPU_EMU_EXTRAPREC
clr.l %d0
move.b (-4,%a0),%d0
jne fp_ne_small3
#endif
| Genuine zero.
clr.w -(%a0)
subq.l #2,%a0
printf PNORM,"%p(",1,%a0
printx PNORM,%a0@
printf PNORM,")\n"
rts
| Subnormal.
fp_ne_small1:
bfffo %d0{#0,#32},%d1
move.w -(%a0),%d2
sub.w %d1,%d2
jcc 1f
| Pathologically small, denormalize.
add.w %d2,%d1
clr.w %d2
fp_set_sr FPSR_EXC_UNFL
1: move.w %d2,(%a0)+
move.w %d1,%d2
jeq fp_ne_checkround
| This is exactly the same 64-bit double shift as seen above.
lsl.l %d2,%d0
move.l %d0,(%a0)+
move.l (%a0),%d0
move.l %d0,%d1
lsl.l %d2,%d0
move.l %d0,(%a0)
neg.w %d2
and.w #0x1f,%d2
lsr.l %d2,%d1
or.l %d1,-(%a0)
#ifdef CONFIG_M68KFPU_EMU_EXTRAPREC
fp_ne_extra1:
clr.l %d0
move.b (-4,%a0),%d0
neg.w %d2
add.w #24,%d2
jcc 1f
clr.b (-4,%a0)
lsl.l %d2,%d0
or.l %d0,(4,%a0)
jra fp_ne_checkround
1: addq.w #8,%d2
lsl.l %d2,%d0
move.b %d0,(-4,%a0)
lsr.l #8,%d0
or.l %d0,(4,%a0)
#endif
jra fp_ne_checkround
| May or may not be subnormal, if so, only 32 bits to shift.
fp_ne_small2:
bfffo %d0{#0,#32},%d1
add.w #32,%d1
move.w -(%a0),%d2
sub.w %d1,%d2
jcc 1f
| Beyond pathologically small, denormalize.
add.w %d2,%d1
clr.w %d2
fp_set_sr FPSR_EXC_UNFL
1: move.w %d2,(%a0)+
ext.l %d1
jeq fp_ne_checkround
clr.l (4,%a0)
sub.w #32,%d1
jcs 1f
lsl.l %d1,%d0 | lower lword needs only to be shifted
move.l %d0,(%a0) | into the higher lword
#ifdef CONFIG_M68KFPU_EMU_EXTRAPREC
clr.l %d0
move.b (-4,%a0),%d0
clr.b (-4,%a0)
neg.w %d1
add.w #32,%d1
bfins %d0,(%a0){%d1,#8}
#endif
jra fp_ne_checkround
1: neg.w %d1 | lower lword is splitted between
bfins %d0,(%a0){%d1,#32} | higher and lower lword
#ifndef CONFIG_M68KFPU_EMU_EXTRAPREC
jra fp_ne_checkround
#else
move.w %d1,%d2
jra fp_ne_extra1
| These are extremely small numbers, that will mostly end up as zero
| anyway, so this is only important for correct rounding.
fp_ne_small3:
bfffo %d0{#24,#8},%d1
add.w #40,%d1
move.w -(%a0),%d2
sub.w %d1,%d2
jcc 1f
| Pathologically small, denormalize.
add.w %d2,%d1
clr.w %d2
1: move.w %d2,(%a0)+
ext.l %d1
jeq fp_ne_checkround
cmp.w #8,%d1
jcs 2f
1: clr.b (-4,%a0)
sub.w #64,%d1
jcs 1f
add.w #24,%d1
lsl.l %d1,%d0
move.l %d0,(%a0)
jra fp_ne_checkround
1: neg.w %d1
bfins %d0,(%a0){%d1,#8}
jra fp_ne_checkround
2: lsl.l %d1,%d0
move.b %d0,(-4,%a0)
lsr.l #8,%d0
move.b %d0,(7,%a0)
jra fp_ne_checkround
#endif
| Infinities and NaNs, again, same as above.
fp_ne_large:
move.l (%a0)+,%d0
jne 3f
1: tst.l (%a0)
jne 4f
2: subq.l #8,%a0
printf PNORM,"%p(",1,%a0
printx PNORM,%a0@
printf PNORM,")\n"
rts
| we have maybe a NaN, shift off the highest bit
3: move.l %d0,%d1
lsl.l #1,%d1
jne 4f
clr.l (-4,%a0)
jra 1b
| we have a NaN, test if it is signaling
4: bset #30,%d0
jne 2b
fp_set_sr FPSR_EXC_SNAN
move.l %d0,(-4,%a0)
jra 2b
| these next two do rounding as per the IEEE standard.
| values for the rounding modes appear to be:
| 0: Round to nearest
| 1: Round to zero
| 2: Round to -Infinity
| 3: Round to +Infinity
| both functions expect that fp_normalize was already
| called (and extended argument is already normalized
| as far as possible), these are used if there is different
| rounding precision is selected and before converting
| into single/double
| fp_normalize_double:
| normalize an extended with double (52-bit) precision
| args: %a0 (struct fp_ext *)
fp_normalize_double:
printf PNORM,"nd: %p(",1,%a0
printx PNORM,%a0@
printf PNORM,"), "
move.l (%a0)+,%d2
tst.w %d2
jeq fp_nd_zero | zero / denormalized
cmp.w #0x7fff,%d2
jeq fp_nd_huge | NaN / infinitive.
sub.w #0x4000-0x3ff,%d2 | will the exponent fit?
jcs fp_nd_small | too small.
cmp.w #0x7fe,%d2
jcc fp_nd_large | too big.
addq.l #4,%a0
move.l (%a0),%d0 | low lword of mantissa
| now, round off the low 11 bits.
fp_nd_round:
moveq #21,%d1
lsl.l %d1,%d0 | keep 11 low bits.
jne fp_nd_checkround | Are they non-zero?
| nothing to do here
9: subq.l #8,%a0
printf PNORM,"%p(",1,%a0
printx PNORM,%a0@
printf PNORM,")\n"
rts
| Be careful with the X bit! It contains the lsb
| from the shift above, it is needed for round to nearest.
fp_nd_checkround:
fp_set_sr FPSR_EXC_INEX2 | INEX2 bit
and.w #0xf800,(2,%a0) | clear bits 0-10
move.w (FPD_RND,FPDATA),%d2 | rounding mode
jne 2f | %d2 == 0, round to nearest
tst.l %d0 | test guard bit
jpl 9b | zero is closer
| here we test the X bit by adding it to %d2
clr.w %d2 | first set z bit, addx only clears it
addx.w %d2,%d2 | test lsb bit
| IEEE754-specified "round to even" behaviour. If the guard
| bit is set, then the number is odd, so rounding works like
| in grade-school arithmetic (i.e. 1.5 rounds to 2.0)
| Otherwise, an equal distance rounds towards zero, so as not
| to produce an odd number. This is strange, but it is what
| the standard says.
jne fp_nd_doroundup | round to infinity
lsl.l #1,%d0 | check low bits
jeq 9b | round to zero
fp_nd_doroundup:
| round (the mantissa, that is) towards infinity
add.l #0x800,(%a0)
jcc 9b | no overflow, good.
addq.l #1,-(%a0) | extend to high lword
jcc 1f | no overflow, good.
| Yow! we have managed to overflow the mantissa. Since this
| only happens when %d1 was 0xfffff800, it is now zero, so
| reset the high bit, and increment the exponent.
move.w #0x8000,(%a0)
addq.w #1,-(%a0)
cmp.w #0x43ff,(%a0)+ | exponent now overflown?
jeq fp_nd_large | yes, so make it infinity.
1: subq.l #4,%a0
printf PNORM,"%p(",1,%a0
printx PNORM,%a0@
printf PNORM,")\n"
rts
2: subq.w #2,%d2
jcs 9b | %d2 < 2, round to zero
jhi 3f | %d2 > 2, round to +infinity
| Round to +Inf or -Inf. High word of %d2 contains the
| sign of the number, by the way.
swap %d2 | to -inf
tst.b %d2
jne fp_nd_doroundup | negative, round to infinity
jra 9b | positive, round to zero
3: swap %d2 | to +inf
tst.b %d2
jeq fp_nd_doroundup | positive, round to infinity
jra 9b | negative, round to zero
| Exponent underflow. Try to make a denormal, and set it to
| the smallest possible fraction if this fails.
fp_nd_small:
fp_set_sr FPSR_EXC_UNFL | set UNFL bit
move.w #0x3c01,(-2,%a0) | 2**-1022
neg.w %d2 | degree of underflow
cmp.w #32,%d2 | single or double shift?
jcc 1f
| Again, another 64-bit double shift.
move.l (%a0),%d0
move.l %d0,%d1
lsr.l %d2,%d0
move.l %d0,(%a0)+
move.l (%a0),%d0
lsr.l %d2,%d0
neg.w %d2
add.w #32,%d2
lsl.l %d2,%d1
or.l %d1,%d0
move.l (%a0),%d1
move.l %d0,(%a0)
| Check to see if we shifted off any significant bits
lsl.l %d2,%d1
jeq fp_nd_round | Nope, round.
bset #0,%d0 | Yes, so set the "sticky bit".
jra fp_nd_round | Now, round.
| Another 64-bit single shift and store
1: sub.w #32,%d2
cmp.w #32,%d2 | Do we really need to shift?
jcc 2f | No, the number is too small.
move.l (%a0),%d0
clr.l (%a0)+
move.l %d0,%d1
lsr.l %d2,%d0
neg.w %d2
add.w #32,%d2
| Again, check to see if we shifted off any significant bits.
tst.l (%a0)
jeq 1f
bset #0,%d0 | Sticky bit.
1: move.l %d0,(%a0)
lsl.l %d2,%d1
jeq fp_nd_round
bset #0,%d0
jra fp_nd_round
| Sorry, the number is just too small.
2: clr.l (%a0)+
clr.l (%a0)
moveq #1,%d0 | Smallest possible fraction,
jra fp_nd_round | round as desired.
| zero and denormalized
fp_nd_zero:
tst.l (%a0)+
jne 1f
tst.l (%a0)
jne 1f
subq.l #8,%a0
printf PNORM,"%p(",1,%a0
printx PNORM,%a0@
printf PNORM,")\n"
rts | zero. nothing to do.
| These are not merely subnormal numbers, but true denormals,
| i.e. pathologically small (exponent is 2**-16383) numbers.
| It is clearly impossible for even a normal extended number
| with that exponent to fit into double precision, so just
| write these ones off as "too darn small".
1: fp_set_sr FPSR_EXC_UNFL | Set UNFL bit
clr.l (%a0)
clr.l -(%a0)
move.w #0x3c01,-(%a0) | i.e. 2**-1022
addq.l #6,%a0
moveq #1,%d0
jra fp_nd_round | round.
| Exponent overflow. Just call it infinity.
fp_nd_large:
move.w #0x7ff,%d0
and.w (6,%a0),%d0
jeq 1f
fp_set_sr FPSR_EXC_INEX2
1: fp_set_sr FPSR_EXC_OVFL
move.w (FPD_RND,FPDATA),%d2
jne 3f | %d2 = 0 round to nearest
1: move.w #0x7fff,(-2,%a0)
clr.l (%a0)+
clr.l (%a0)
2: subq.l #8,%a0
printf PNORM,"%p(",1,%a0
printx PNORM,%a0@
printf PNORM,")\n"
rts
3: subq.w #2,%d2
jcs 5f | %d2 < 2, round to zero
jhi 4f | %d2 > 2, round to +infinity
tst.b (-3,%a0) | to -inf
jne 1b
jra 5f
4: tst.b (-3,%a0) | to +inf
jeq 1b
5: move.w #0x43fe,(-2,%a0)
moveq #-1,%d0
move.l %d0,(%a0)+
move.w #0xf800,%d0
move.l %d0,(%a0)
jra 2b
| Infinities or NaNs
fp_nd_huge:
subq.l #4,%a0
printf PNORM,"%p(",1,%a0
printx PNORM,%a0@
printf PNORM,")\n"
rts
| fp_normalize_single:
| normalize an extended with single (23-bit) precision
| args: %a0 (struct fp_ext *)
fp_normalize_single:
printf PNORM,"ns: %p(",1,%a0
printx PNORM,%a0@
printf PNORM,") "
addq.l #2,%a0
move.w (%a0)+,%d2
jeq fp_ns_zero | zero / denormalized
cmp.w #0x7fff,%d2
jeq fp_ns_huge | NaN / infinitive.
sub.w #0x4000-0x7f,%d2 | will the exponent fit?
jcs fp_ns_small | too small.
cmp.w #0xfe,%d2
jcc fp_ns_large | too big.
move.l (%a0)+,%d0 | get high lword of mantissa
fp_ns_round:
tst.l (%a0) | check the low lword
jeq 1f
| Set a sticky bit if it is non-zero. This should only
| affect the rounding in what would otherwise be equal-
| distance situations, which is what we want it to do.
bset #0,%d0
1: clr.l (%a0) | zap it from memory.
| now, round off the low 8 bits of the hi lword.
tst.b %d0 | 8 low bits.
jne fp_ns_checkround | Are they non-zero?
| nothing to do here
subq.l #8,%a0
printf PNORM,"%p(",1,%a0
printx PNORM,%a0@
printf PNORM,")\n"
rts
fp_ns_checkround:
fp_set_sr FPSR_EXC_INEX2 | INEX2 bit
clr.b -(%a0) | clear low byte of high lword
subq.l #3,%a0
move.w (FPD_RND,FPDATA),%d2 | rounding mode
jne 2f | %d2 == 0, round to nearest
tst.b %d0 | test guard bit
jpl 9f | zero is closer
btst #8,%d0 | test lsb bit
| round to even behaviour, see above.
jne fp_ns_doroundup | round to infinity
lsl.b #1,%d0 | check low bits
jeq 9f | round to zero
fp_ns_doroundup:
| round (the mantissa, that is) towards infinity
add.l #0x100,(%a0)
jcc 9f | no overflow, good.
| Overflow. This means that the %d1 was 0xffffff00, so it
| is now zero. We will set the mantissa to reflect this, and
| increment the exponent (checking for overflow there too)
move.w #0x8000,(%a0)
addq.w #1,-(%a0)
cmp.w #0x407f,(%a0)+ | exponent now overflown?
jeq fp_ns_large | yes, so make it infinity.
9: subq.l #4,%a0
printf PNORM,"%p(",1,%a0
printx PNORM,%a0@
printf PNORM,")\n"
rts
| check nondefault rounding modes
2: subq.w #2,%d2
jcs 9b | %d2 < 2, round to zero
jhi 3f | %d2 > 2, round to +infinity
tst.b (-3,%a0) | to -inf
jne fp_ns_doroundup | negative, round to infinity
jra 9b | positive, round to zero
3: tst.b (-3,%a0) | to +inf
jeq fp_ns_doroundup | positive, round to infinity
jra 9b | negative, round to zero
| Exponent underflow. Try to make a denormal, and set it to
| the smallest possible fraction if this fails.
fp_ns_small:
fp_set_sr FPSR_EXC_UNFL | set UNFL bit
move.w #0x3f81,(-2,%a0) | 2**-126
neg.w %d2 | degree of underflow
cmp.w #32,%d2 | single or double shift?
jcc 2f
| a 32-bit shift.
move.l (%a0),%d0
move.l %d0,%d1
lsr.l %d2,%d0
move.l %d0,(%a0)+
| Check to see if we shifted off any significant bits.
neg.w %d2
add.w #32,%d2
lsl.l %d2,%d1
jeq 1f
bset #0,%d0 | Sticky bit.
| Check the lower lword
1: tst.l (%a0)
jeq fp_ns_round
clr (%a0)
bset #0,%d0 | Sticky bit.
jra fp_ns_round
| Sorry, the number is just too small.
2: clr.l (%a0)+
clr.l (%a0)
moveq #1,%d0 | Smallest possible fraction,
jra fp_ns_round | round as desired.
| Exponent overflow. Just call it infinity.
fp_ns_large:
tst.b (3,%a0)
jeq 1f
fp_set_sr FPSR_EXC_INEX2
1: fp_set_sr FPSR_EXC_OVFL
move.w (FPD_RND,FPDATA),%d2
jne 3f | %d2 = 0 round to nearest
1: move.w #0x7fff,(-2,%a0)
clr.l (%a0)+
clr.l (%a0)
2: subq.l #8,%a0
printf PNORM,"%p(",1,%a0
printx PNORM,%a0@
printf PNORM,")\n"
rts
3: subq.w #2,%d2
jcs 5f | %d2 < 2, round to zero
jhi 4f | %d2 > 2, round to +infinity
tst.b (-3,%a0) | to -inf
jne 1b
jra 5f
4: tst.b (-3,%a0) | to +inf
jeq 1b
5: move.w #0x407e,(-2,%a0)
move.l #0xffffff00,(%a0)+
clr.l (%a0)
jra 2b
| zero and denormalized
fp_ns_zero:
tst.l (%a0)+
jne 1f
tst.l (%a0)
jne 1f
subq.l #8,%a0
printf PNORM,"%p(",1,%a0
printx PNORM,%a0@
printf PNORM,")\n"
rts | zero. nothing to do.
| These are not merely subnormal numbers, but true denormals,
| i.e. pathologically small (exponent is 2**-16383) numbers.
| It is clearly impossible for even a normal extended number
| with that exponent to fit into single precision, so just
| write these ones off as "too darn small".
1: fp_set_sr FPSR_EXC_UNFL | Set UNFL bit
clr.l (%a0)
clr.l -(%a0)
move.w #0x3f81,-(%a0) | i.e. 2**-126
addq.l #6,%a0
moveq #1,%d0
jra fp_ns_round | round.
| Infinities or NaNs
fp_ns_huge:
subq.l #4,%a0
printf PNORM,"%p(",1,%a0
printx PNORM,%a0@
printf PNORM,")\n"
rts
| fp_normalize_single_fast:
| normalize an extended with single (23-bit) precision
| this is only used by fsgldiv/fsgdlmul, where the
| operand is not completly normalized.
| args: %a0 (struct fp_ext *)
fp_normalize_single_fast:
printf PNORM,"nsf: %p(",1,%a0
printx PNORM,%a0@
printf PNORM,") "
addq.l #2,%a0
move.w (%a0)+,%d2
cmp.w #0x7fff,%d2
jeq fp_nsf_huge | NaN / infinitive.
move.l (%a0)+,%d0 | get high lword of mantissa
fp_nsf_round:
tst.l (%a0) | check the low lword
jeq 1f
| Set a sticky bit if it is non-zero. This should only
| affect the rounding in what would otherwise be equal-
| distance situations, which is what we want it to do.
bset #0,%d0
1: clr.l (%a0) | zap it from memory.
| now, round off the low 8 bits of the hi lword.
tst.b %d0 | 8 low bits.
jne fp_nsf_checkround | Are they non-zero?
| nothing to do here
subq.l #8,%a0
printf PNORM,"%p(",1,%a0
printx PNORM,%a0@
printf PNORM,")\n"
rts
fp_nsf_checkround:
fp_set_sr FPSR_EXC_INEX2 | INEX2 bit
clr.b -(%a0) | clear low byte of high lword
subq.l #3,%a0
move.w (FPD_RND,FPDATA),%d2 | rounding mode
jne 2f | %d2 == 0, round to nearest
tst.b %d0 | test guard bit
jpl 9f | zero is closer
btst #8,%d0 | test lsb bit
| round to even behaviour, see above.
jne fp_nsf_doroundup | round to infinity
lsl.b #1,%d0 | check low bits
jeq 9f | round to zero
fp_nsf_doroundup:
| round (the mantissa, that is) towards infinity
add.l #0x100,(%a0)
jcc 9f | no overflow, good.
| Overflow. This means that the %d1 was 0xffffff00, so it
| is now zero. We will set the mantissa to reflect this, and
| increment the exponent (checking for overflow there too)
move.w #0x8000,(%a0)
addq.w #1,-(%a0)
cmp.w #0x407f,(%a0)+ | exponent now overflown?
jeq fp_nsf_large | yes, so make it infinity.
9: subq.l #4,%a0
printf PNORM,"%p(",1,%a0
printx PNORM,%a0@
printf PNORM,")\n"
rts
| check nondefault rounding modes
2: subq.w #2,%d2
jcs 9b | %d2 < 2, round to zero
jhi 3f | %d2 > 2, round to +infinity
tst.b (-3,%a0) | to -inf
jne fp_nsf_doroundup | negative, round to infinity
jra 9b | positive, round to zero
3: tst.b (-3,%a0) | to +inf
jeq fp_nsf_doroundup | positive, round to infinity
jra 9b | negative, round to zero
| Exponent overflow. Just call it infinity.
fp_nsf_large:
tst.b (3,%a0)
jeq 1f
fp_set_sr FPSR_EXC_INEX2
1: fp_set_sr FPSR_EXC_OVFL
move.w (FPD_RND,FPDATA),%d2
jne 3f | %d2 = 0 round to nearest
1: move.w #0x7fff,(-2,%a0)
clr.l (%a0)+
clr.l (%a0)
2: subq.l #8,%a0
printf PNORM,"%p(",1,%a0
printx PNORM,%a0@
printf PNORM,")\n"
rts
3: subq.w #2,%d2
jcs 5f | %d2 < 2, round to zero
jhi 4f | %d2 > 2, round to +infinity
tst.b (-3,%a0) | to -inf
jne 1b
jra 5f
4: tst.b (-3,%a0) | to +inf
jeq 1b
5: move.w #0x407e,(-2,%a0)
move.l #0xffffff00,(%a0)+
clr.l (%a0)
jra 2b
| Infinities or NaNs
fp_nsf_huge:
subq.l #4,%a0
printf PNORM,"%p(",1,%a0
printx PNORM,%a0@
printf PNORM,")\n"
rts
| conv_ext2int (macro):
| Generates a subroutine that converts an extended value to an
| integer of a given size, again, with the appropriate type of
| rounding.
| Macro arguments:
| s: size, as given in an assembly instruction.
| b: number of bits in that size.
| Subroutine arguments:
| %a0: source (struct fp_ext *)
| Returns the integer in %d0 (like it should)
.macro conv_ext2int s,b
.set inf,(1<<(\b-1))-1 | i.e. MAXINT
printf PCONV,"e2i%d: %p(",2,#\b,%a0
printx PCONV,%a0@
printf PCONV,") "
addq.l #2,%a0
move.w (%a0)+,%d2 | exponent
jeq fp_e2i_zero\b | zero / denorm (== 0, here)
cmp.w #0x7fff,%d2
jeq fp_e2i_huge\b | Inf / NaN
sub.w #0x3ffe,%d2
jcs fp_e2i_small\b
cmp.w #\b,%d2
jhi fp_e2i_large\b
move.l (%a0),%d0
move.l %d0,%d1
lsl.l %d2,%d1
jne fp_e2i_round\b
tst.l (4,%a0)
jne fp_e2i_round\b
neg.w %d2
add.w #32,%d2
lsr.l %d2,%d0
9: tst.w (-4,%a0)
jne 1f
tst.\s %d0
jmi fp_e2i_large\b
printf PCONV,"-> %p\n",1,%d0
rts
1: neg.\s %d0
jeq 1f
jpl fp_e2i_large\b
1: printf PCONV,"-> %p\n",1,%d0
rts
fp_e2i_round\b:
fp_set_sr FPSR_EXC_INEX2 | INEX2 bit
neg.w %d2
add.w #32,%d2
.if \b>16
jeq 5f
.endif
lsr.l %d2,%d0
move.w (FPD_RND,FPDATA),%d2 | rounding mode
jne 2f | %d2 == 0, round to nearest
tst.l %d1 | test guard bit
jpl 9b | zero is closer
btst %d2,%d0 | test lsb bit (%d2 still 0)
jne fp_e2i_doroundup\b
lsl.l #1,%d1 | check low bits
jne fp_e2i_doroundup\b
tst.l (4,%a0)
jeq 9b
fp_e2i_doroundup\b:
addq.l #1,%d0
jra 9b
| check nondefault rounding modes
2: subq.w #2,%d2
jcs 9b | %d2 < 2, round to zero
jhi 3f | %d2 > 2, round to +infinity
tst.w (-4,%a0) | to -inf
jne fp_e2i_doroundup\b | negative, round to infinity
jra 9b | positive, round to zero
3: tst.w (-4,%a0) | to +inf
jeq fp_e2i_doroundup\b | positive, round to infinity
jra 9b | negative, round to zero
| we are only want -2**127 get correctly rounded here,
| since the guard bit is in the lower lword.
| everything else ends up anyway as overflow.
.if \b>16
5: move.w (FPD_RND,FPDATA),%d2 | rounding mode
jne 2b | %d2 == 0, round to nearest
move.l (4,%a0),%d1 | test guard bit
jpl 9b | zero is closer
lsl.l #1,%d1 | check low bits
jne fp_e2i_doroundup\b
jra 9b
.endif
fp_e2i_zero\b:
clr.l %d0
tst.l (%a0)+
jne 1f
tst.l (%a0)
jeq 3f
1: subq.l #4,%a0
fp_clr_sr FPSR_EXC_UNFL | fp_normalize_ext has set this bit
fp_e2i_small\b:
fp_set_sr FPSR_EXC_INEX2
clr.l %d0
move.w (FPD_RND,FPDATA),%d2 | rounding mode
subq.w #2,%d2
jcs 3f | %d2 < 2, round to nearest/zero
jhi 2f | %d2 > 2, round to +infinity
tst.w (-4,%a0) | to -inf
jeq 3f
subq.\s #1,%d0
jra 3f
2: tst.w (-4,%a0) | to +inf
jne 3f
addq.\s #1,%d0
3: printf PCONV,"-> %p\n",1,%d0
rts
fp_e2i_large\b:
fp_set_sr FPSR_EXC_OPERR
move.\s #inf,%d0
tst.w (-4,%a0)
jeq 1f
addq.\s #1,%d0
1: printf PCONV,"-> %p\n",1,%d0
rts
fp_e2i_huge\b:
move.\s (%a0),%d0
tst.l (%a0)
jne 1f
tst.l (%a0)
jeq fp_e2i_large\b
| fp_normalize_ext has set this bit already
| and made the number nonsignaling
1: fp_tst_sr FPSR_EXC_SNAN
jne 1f
fp_set_sr FPSR_EXC_OPERR
1: printf PCONV,"-> %p\n",1,%d0
rts
.endm
fp_conv_ext2long:
conv_ext2int l,32
fp_conv_ext2short:
conv_ext2int w,16
fp_conv_ext2byte:
conv_ext2int b,8
fp_conv_ext2double:
jsr fp_normalize_double
printf PCONV,"e2d: %p(",1,%a0
printx PCONV,%a0@
printf PCONV,"), "
move.l (%a0)+,%d2
cmp.w #0x7fff,%d2
jne 1f
move.w #0x7ff,%d2
move.l (%a0)+,%d0
jra 2f
1: sub.w #0x3fff-0x3ff,%d2
move.l (%a0)+,%d0
jmi 2f
clr.w %d2
2: lsl.w #5,%d2
lsl.l #7,%d2
lsl.l #8,%d2
move.l %d0,%d1
lsl.l #1,%d0
lsr.l #4,%d0
lsr.l #8,%d0
or.l %d2,%d0
putuser.l %d0,(%a1)+,fp_err_ua2,%a1
moveq #21,%d0
lsl.l %d0,%d1
move.l (%a0),%d0
lsr.l #4,%d0
lsr.l #7,%d0
or.l %d1,%d0
putuser.l %d0,(%a1),fp_err_ua2,%a1
#ifdef FPU_EMU_DEBUG
getuser.l %a1@(-4),%d0,fp_err_ua2,%a1
getuser.l %a1@(0),%d1,fp_err_ua2,%a1
printf PCONV,"%p(%08x%08x)\n",3,%a1,%d0,%d1
#endif
rts
fp_conv_ext2single:
jsr fp_normalize_single
printf PCONV,"e2s: %p(",1,%a0
printx PCONV,%a0@
printf PCONV,"), "
move.l (%a0)+,%d1
cmp.w #0x7fff,%d1
jne 1f
move.w #0xff,%d1
move.l (%a0)+,%d0
jra 2f
1: sub.w #0x3fff-0x7f,%d1
move.l (%a0)+,%d0
jmi 2f
clr.w %d1
2: lsl.w #8,%d1
lsl.l #7,%d1
lsl.l #8,%d1
bclr #31,%d0
lsr.l #8,%d0
or.l %d1,%d0
printf PCONV,"%08x\n",1,%d0
rts
| special return addresses for instr that
| encode the rounding precision in the opcode
| (e.g. fsmove,fdmove)
fp_finalrounding_single:
addq.l #8,%sp
jsr fp_normalize_ext
jsr fp_normalize_single
jra fp_finaltest
fp_finalrounding_single_fast:
addq.l #8,%sp
jsr fp_normalize_ext
jsr fp_normalize_single_fast
jra fp_finaltest
fp_finalrounding_double:
addq.l #8,%sp
jsr fp_normalize_ext
jsr fp_normalize_double
jra fp_finaltest
| fp_finaltest:
| set the emulated status register based on the outcome of an
| emulated instruction.
fp_finalrounding:
addq.l #8,%sp
| printf ,"f: %p\n",1,%a0
jsr fp_normalize_ext
move.w (FPD_PREC,FPDATA),%d0
subq.w #1,%d0
jcs fp_finaltest
jne 1f
jsr fp_normalize_single
jra 2f
1: jsr fp_normalize_double
2:| printf ,"f: %p\n",1,%a0
fp_finaltest:
| First, we do some of the obvious tests for the exception
| status byte and condition code bytes of fp_sr here, so that
| they do not have to be handled individually by every
| emulated instruction.
clr.l %d0
addq.l #1,%a0
tst.b (%a0)+ | sign
jeq 1f
bset #FPSR_CC_NEG-24,%d0 | N bit
1: cmp.w #0x7fff,(%a0)+ | exponent
jeq 2f
| test for zero
moveq #FPSR_CC_Z-24,%d1
tst.l (%a0)+
jne 9f
tst.l (%a0)
jne 9f
jra 8f
| infinitiv and NAN
2: moveq #FPSR_CC_NAN-24,%d1
move.l (%a0)+,%d2
lsl.l #1,%d2 | ignore high bit
jne 8f
tst.l (%a0)
jne 8f
moveq #FPSR_CC_INF-24,%d1
8: bset %d1,%d0
9: move.b %d0,(FPD_FPSR+0,FPDATA) | set condition test result
| move instructions enter here
| Here, we test things in the exception status byte, and set
| other things in the accrued exception byte accordingly.
| Emulated instructions can set various things in the former,
| as defined in fp_emu.h.
fp_final:
move.l (FPD_FPSR,FPDATA),%d0
#if 0
btst #FPSR_EXC_SNAN,%d0 | EXC_SNAN
jne 1f
btst #FPSR_EXC_OPERR,%d0 | EXC_OPERR
jeq 2f
1: bset #FPSR_AEXC_IOP,%d0 | set IOP bit
2: btst #FPSR_EXC_OVFL,%d0 | EXC_OVFL
jeq 1f
bset #FPSR_AEXC_OVFL,%d0 | set OVFL bit
1: btst #FPSR_EXC_UNFL,%d0 | EXC_UNFL
jeq 1f
btst #FPSR_EXC_INEX2,%d0 | EXC_INEX2
jeq 1f
bset #FPSR_AEXC_UNFL,%d0 | set UNFL bit
1: btst #FPSR_EXC_DZ,%d0 | EXC_INEX1
jeq 1f
bset #FPSR_AEXC_DZ,%d0 | set DZ bit
1: btst #FPSR_EXC_OVFL,%d0 | EXC_OVFL
jne 1f
btst #FPSR_EXC_INEX2,%d0 | EXC_INEX2
jne 1f
btst #FPSR_EXC_INEX1,%d0 | EXC_INEX1
jeq 2f
1: bset #FPSR_AEXC_INEX,%d0 | set INEX bit
2: move.l %d0,(FPD_FPSR,FPDATA)
#else
| same as above, greatly optimized, but untested (yet)
move.l %d0,%d2
lsr.l #5,%d0
move.l %d0,%d1
lsr.l #4,%d1
or.l %d0,%d1
and.b #0x08,%d1
move.l %d2,%d0
lsr.l #6,%d0
or.l %d1,%d0
move.l %d2,%d1
lsr.l #4,%d1
or.b #0xdf,%d1
and.b %d1,%d0
move.l %d2,%d1
lsr.l #7,%d1
and.b #0x80,%d1
or.b %d1,%d0
and.b #0xf8,%d0
or.b %d0,%d2
move.l %d2,(FPD_FPSR,FPDATA)
#endif
move.b (FPD_FPSR+2,FPDATA),%d0
and.b (FPD_FPCR+2,FPDATA),%d0
jeq 1f
printf ,"send signal!!!\n"
1: jra fp_end
|
AirFortressIlikara/LS2K0300-linux-4.19
| 5,656
|
arch/m68k/math-emu/fp_cond.S
|
/*
* fp_cond.S
*
* Copyright Roman Zippel, 1997. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, and the entire permission notice in its entirety,
* including the disclaimer of warranties.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote
* products derived from this software without specific prior
* written permission.
*
* ALTERNATIVELY, this product may be distributed under the terms of
* the GNU General Public License, in which case the provisions of the GPL are
* required INSTEAD OF the above restrictions. (This clause is
* necessary due to a potential bad interaction between the GPL and
* the restrictions contained in a BSD-style copyright.)
*
* THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
* INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
* STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
* OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "fp_emu.h"
#include "fp_decode.h"
.globl fp_fscc, fp_fbccw, fp_fbccl
#ifdef FPU_EMU_DEBUG
fp_fnop:
printf PDECODE,"fnop\n"
jra fp_end
#else
#define fp_fnop fp_end
#endif
fp_fbccw:
tst.w %d2
jeq fp_fnop
printf PDECODE,"fbccw "
fp_get_pc %a0
lea (-2,%a0,%d2.w),%a0
jra 1f
fp_fbccl:
printf PDECODE,"fbccl "
fp_get_pc %a0
move.l %d2,%d0
swap %d0
fp_get_instr_word %d0,fp_err_ua1
lea (-2,%a0,%d0.l),%a0
1: printf PDECODE,"%x",1,%a0
move.l %d2,%d0
swap %d0
jsr fp_compute_cond
tst.l %d0
jeq 1f
fp_put_pc %a0,1
1: printf PDECODE,"\n"
jra fp_end
fp_fdbcc:
printf PDECODE,"fdbcc "
fp_get_pc %a1 | calculate new pc
fp_get_instr_word %d0,fp_err_ua1
add.w %d0,%a1
fp_decode_addr_reg
printf PDECODE,"d%d,%x\n",2,%d0,%a1
swap %d1 | test condition in %d1
tst.w %d1
jne 2f
move.l %d0,%d1
jsr fp_get_data_reg
subq.w #1,%d0
jcs 1f
fp_put_pc %a1,1
1: jsr fp_put_data_reg
2: jra fp_end
| set flags for decode macros for fs<cc>
do_fscc=1
do_no_pc_mode=1
fp_fscc:
printf PDECODE,"fscc "
move.l %d2,%d0
jsr fp_compute_cond
move.w %d0,%d1
swap %d1
| decode addressing mode
fp_decode_addr_mode
.long fp_data, fp_fdbcc
.long fp_indirect, fp_postinc
.long fp_predecr, fp_disp16
.long fp_extmode0, fp_extmode1
| addressing mode: data register direct
fp_data:
fp_mode_data_direct
move.w %d0,%d1 | save register nr
jsr fp_get_data_reg
swap %d1
move.b %d1,%d0
swap %d1
jsr fp_put_data_reg
printf PDECODE,"\n"
jra fp_end
fp_indirect:
fp_mode_addr_indirect
jra fp_do_scc
fp_postinc:
fp_mode_addr_indirect_postinc
jra fp_do_scc
fp_predecr:
fp_mode_addr_indirect_predec
jra fp_do_scc
fp_disp16:
fp_mode_addr_indirect_disp16
jra fp_do_scc
fp_extmode0:
fp_mode_addr_indirect_extmode0
jra fp_do_scc
fp_extmode1:
bfextu %d2{#13,#3},%d0
jmp ([0f:w,%pc,%d0*4])
.align 4
0:
.long fp_absolute_short, fp_absolute_long
.long fp_ill, fp_ill | NOTE: jump here to ftrap.x
.long fp_ill, fp_ill
.long fp_ill, fp_ill
fp_absolute_short:
fp_mode_abs_short
jra fp_do_scc
fp_absolute_long:
fp_mode_abs_long
| jra fp_do_scc
fp_do_scc:
swap %d1
putuser.b %d1,(%a0),fp_err_ua1,%a0
printf PDECODE,"\n"
jra fp_end
#define tst_NAN btst #24,%d1
#define tst_Z btst #26,%d1
#define tst_N btst #27,%d1
fp_compute_cond:
move.l (FPD_FPSR,FPDATA),%d1
btst #4,%d0
jeq 1f
tst_NAN
jeq 1f
bset #15,%d1
bset #7,%d1
move.l %d1,(FPD_FPSR,FPDATA)
1: and.w #0xf,%d0
jmp ([0f:w,%pc,%d0.w*4])
.align 4
0:
.long fp_f , fp_eq , fp_ogt, fp_oge
.long fp_olt, fp_ole, fp_ogl, fp_or
.long fp_un , fp_ueq, fp_ugt, fp_uge
.long fp_ult, fp_ule, fp_ne , fp_t
fp_f:
moveq #0,%d0
rts
fp_eq:
moveq #0,%d0
tst_Z
jeq 1f
moveq #-1,%d0
1: rts
fp_ogt:
moveq #0,%d0
tst_NAN
jne 1f
tst_Z
jne 1f
tst_N
jne 1f
moveq #-1,%d0
1: rts
fp_oge:
moveq #-1,%d0
tst_Z
jne 2f
tst_NAN
jne 1f
tst_N
jeq 2f
1: moveq #0,%d0
2: rts
fp_olt:
moveq #0,%d0
tst_NAN
jne 1f
tst_Z
jne 1f
tst_N
jeq 1f
moveq #-1,%d0
1: rts
fp_ole:
moveq #-1,%d0
tst_Z
jne 2f
tst_NAN
jne 1f
tst_N
jne 2f
1: moveq #0,%d0
2: rts
fp_ogl:
moveq #0,%d0
tst_NAN
jne 1f
tst_Z
jne 1f
moveq #-1,%d0
1: rts
fp_or:
moveq #0,%d0
tst_NAN
jne 1f
moveq #-1,%d0
1: rts
fp_un:
moveq #0,%d0
tst_NAN
jeq 1f
moveq #-1,%d0
rts
fp_ueq:
moveq #-1,%d0
tst_NAN
jne 1f
tst_Z
jne 1f
moveq #0,%d0
1: rts
fp_ugt:
moveq #-1,%d0
tst_NAN
jne 2f
tst_N
jne 1f
tst_Z
jeq 2f
1: moveq #0,%d0
2: rts
fp_uge:
moveq #-1,%d0
tst_NAN
jne 1f
tst_Z
jne 1f
tst_N
jeq 1f
moveq #0,%d0
1: rts
fp_ult:
moveq #-1,%d0
tst_NAN
jne 2f
tst_Z
jne 1f
tst_N
jne 2f
1: moveq #0,%d0
2: rts
fp_ule:
moveq #-1,%d0
tst_NAN
jne 1f
tst_Z
jne 1f
tst_N
jne 1f
moveq #0,%d0
1: rts
fp_ne:
moveq #0,%d0
tst_Z
jne 1f
moveq #-1,%d0
1: rts
fp_t:
moveq #-1,%d0
rts
|
AirFortressIlikara/LS2K0300-linux-4.19
| 6,805
|
arch/m68k/math-emu/fp_entry.S
|
/*
* fp_emu.S
*
* Copyright Roman Zippel, 1997. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, and the entire permission notice in its entirety,
* including the disclaimer of warranties.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote
* products derived from this software without specific prior
* written permission.
*
* ALTERNATIVELY, this product may be distributed under the terms of
* the GNU General Public License, in which case the provisions of the GPL are
* required INSTEAD OF the above restrictions. (This clause is
* necessary due to a potential bad interaction between the GPL and
* the restrictions contained in a BSD-style copyright.)
*
* THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
* INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
* STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
* OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <linux/linkage.h>
#include <asm/entry.h>
#include "fp_emu.h"
.globl fpu_emu
.globl fp_debugprint
.globl fp_err_ua1,fp_err_ua2
.text
fpu_emu:
SAVE_ALL_INT
GET_CURRENT(%d0)
#if defined(CPU_M68020_OR_M68030) && defined(CPU_M68040_OR_M68060)
tst.l m68k_is040or060
jeq 1f
#endif
#if defined(CPU_M68040_OR_M68060)
move.l (FPS_PC2,%sp),(FPS_PC,%sp)
#endif
1:
| emulate the instruction
jsr fp_scan
#if defined(CONFIG_M68060)
#if !defined(CPU_M68060_ONLY)
btst #3,m68k_cputype+3
jeq 1f
#endif
btst #7,(FPS_SR,%sp)
jne fp_sendtrace060
#endif
1:
| emulation successful?
tst.l %d0
jeq ret_from_exception
| send some signal to program here
jra ret_from_exception
| we jump here after an access error while trying to access
| user space, we correct stackpointer and send a SIGSEGV to
| the user process
fp_err_ua2:
addq.l #4,%sp
fp_err_ua1:
addq.l #4,%sp
move.l %a0,-(%sp)
pea LSEGV_MAPERR
pea LSIGSEGV
jsr fpemu_signal
add.w #12,%sp
jra ret_from_exception
#if defined(CONFIG_M68060)
| send a trace signal if we are debugged
| it does not really belong here, but...
fp_sendtrace060:
move.l (FPS_PC,%sp),-(%sp)
pea LTRAP_TRACE
pea LSIGTRAP
jsr fpemu_signal
add.w #12,%sp
jra ret_from_exception
#endif
.globl fp_get_data_reg, fp_put_data_reg
.globl fp_get_addr_reg, fp_put_addr_reg
| Entry points to get/put a register. Some of them can be get/put
| directly, others are on the stack, as we read/write the stack
| directly here, these function may only be called from within
| instruction decoding, otherwise the stack pointer is incorrect
| and the stack gets corrupted.
fp_get_data_reg:
jmp ([0f:w,%pc,%d0.w*4])
.align 4
0:
.long fp_get_d0, fp_get_d1
.long fp_get_d2, fp_get_d3
.long fp_get_d4, fp_get_d5
.long fp_get_d6, fp_get_d7
fp_get_d0:
move.l (PT_OFF_D0+8,%sp),%d0
printf PREGISTER,"{d0->%08x}",1,%d0
rts
fp_get_d1:
move.l (PT_OFF_D1+8,%sp),%d0
printf PREGISTER,"{d1->%08x}",1,%d0
rts
fp_get_d2:
move.l (PT_OFF_D2+8,%sp),%d0
printf PREGISTER,"{d2->%08x}",1,%d0
rts
fp_get_d3:
move.l %d3,%d0
printf PREGISTER,"{d3->%08x}",1,%d0
rts
fp_get_d4:
move.l %d4,%d0
printf PREGISTER,"{d4->%08x}",1,%d0
rts
fp_get_d5:
move.l %d5,%d0
printf PREGISTER,"{d5->%08x}",1,%d0
rts
fp_get_d6:
move.l %d6,%d0
printf PREGISTER,"{d6->%08x}",1,%d0
rts
fp_get_d7:
move.l %d7,%d0
printf PREGISTER,"{d7->%08x}",1,%d0
rts
fp_put_data_reg:
jmp ([0f:w,%pc,%d1.w*4])
.align 4
0:
.long fp_put_d0, fp_put_d1
.long fp_put_d2, fp_put_d3
.long fp_put_d4, fp_put_d5
.long fp_put_d6, fp_put_d7
fp_put_d0:
printf PREGISTER,"{d0<-%08x}",1,%d0
move.l %d0,(PT_OFF_D0+8,%sp)
rts
fp_put_d1:
printf PREGISTER,"{d1<-%08x}",1,%d0
move.l %d0,(PT_OFF_D1+8,%sp)
rts
fp_put_d2:
printf PREGISTER,"{d2<-%08x}",1,%d0
move.l %d0,(PT_OFF_D2+8,%sp)
rts
fp_put_d3:
printf PREGISTER,"{d3<-%08x}",1,%d0
| move.l %d0,%d3
move.l %d0,(PT_OFF_D3+8,%sp)
rts
fp_put_d4:
printf PREGISTER,"{d4<-%08x}",1,%d0
| move.l %d0,%d4
move.l %d0,(PT_OFF_D4+8,%sp)
rts
fp_put_d5:
printf PREGISTER,"{d5<-%08x}",1,%d0
| move.l %d0,%d5
move.l %d0,(PT_OFF_D5+8,%sp)
rts
fp_put_d6:
printf PREGISTER,"{d6<-%08x}",1,%d0
move.l %d0,%d6
rts
fp_put_d7:
printf PREGISTER,"{d7<-%08x}",1,%d0
move.l %d0,%d7
rts
fp_get_addr_reg:
jmp ([0f:w,%pc,%d0.w*4])
.align 4
0:
.long fp_get_a0, fp_get_a1
.long fp_get_a2, fp_get_a3
.long fp_get_a4, fp_get_a5
.long fp_get_a6, fp_get_a7
fp_get_a0:
move.l (PT_OFF_A0+8,%sp),%a0
printf PREGISTER,"{a0->%08x}",1,%a0
rts
fp_get_a1:
move.l (PT_OFF_A1+8,%sp),%a0
printf PREGISTER,"{a1->%08x}",1,%a0
rts
fp_get_a2:
move.l (PT_OFF_A2+8,%sp),%a0
printf PREGISTER,"{a2->%08x}",1,%a0
rts
fp_get_a3:
move.l %a3,%a0
printf PREGISTER,"{a3->%08x}",1,%a0
rts
fp_get_a4:
move.l %a4,%a0
printf PREGISTER,"{a4->%08x}",1,%a0
rts
fp_get_a5:
move.l %a5,%a0
printf PREGISTER,"{a5->%08x}",1,%a0
rts
fp_get_a6:
move.l %a6,%a0
printf PREGISTER,"{a6->%08x}",1,%a0
rts
fp_get_a7:
move.l %usp,%a0
printf PREGISTER,"{a7->%08x}",1,%a0
rts
fp_put_addr_reg:
jmp ([0f:w,%pc,%d0.w*4])
.align 4
0:
.long fp_put_a0, fp_put_a1
.long fp_put_a2, fp_put_a3
.long fp_put_a4, fp_put_a5
.long fp_put_a6, fp_put_a7
fp_put_a0:
printf PREGISTER,"{a0<-%08x}",1,%a0
move.l %a0,(PT_OFF_A0+8,%sp)
rts
fp_put_a1:
printf PREGISTER,"{a1<-%08x}",1,%a0
move.l %a0,(PT_OFF_A1+8,%sp)
rts
fp_put_a2:
printf PREGISTER,"{a2<-%08x}",1,%a0
move.l %a0,(PT_OFF_A2+8,%sp)
rts
fp_put_a3:
printf PREGISTER,"{a3<-%08x}",1,%a0
move.l %a0,%a3
rts
fp_put_a4:
printf PREGISTER,"{a4<-%08x}",1,%a0
move.l %a0,%a4
rts
fp_put_a5:
printf PREGISTER,"{a5<-%08x}",1,%a0
move.l %a0,%a5
rts
fp_put_a6:
printf PREGISTER,"{a6<-%08x}",1,%a0
move.l %a0,%a6
rts
fp_put_a7:
printf PREGISTER,"{a7<-%08x}",1,%a0
move.l %a0,%usp
rts
.data
.align 4
fp_debugprint:
| .long PMDECODE
.long PMINSTR+PMDECODE+PMCONV+PMNORM
| .long PMCONV+PMNORM+PMINSTR
| .long 0
|
AirFortressIlikara/LS2K0300-linux-4.19
| 5,643
|
arch/m68k/math-emu/fp_move.S
|
/*
* fp_move.S
*
* Copyright Roman Zippel, 1997. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, and the entire permission notice in its entirety,
* including the disclaimer of warranties.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote
* products derived from this software without specific prior
* written permission.
*
* ALTERNATIVELY, this product may be distributed under the terms of
* the GNU General Public License, in which case the provisions of the GPL are
* required INSTEAD OF the above restrictions. (This clause is
* necessary due to a potential bad interaction between the GPL and
* the restrictions contained in a BSD-style copyright.)
*
* THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
* INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
* STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
* OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "fp_emu.h"
#include "fp_decode.h"
do_no_pc_mode=1
.globl fp_fmove_fp2mem
fp_fmove_fp2mem:
clr.b (2+FPD_FPSR,FPDATA)
fp_decode_dest_format
move.w %d0,%d1 | store data size twice in %d1
swap %d1 | one can be trashed below
move.w %d0,%d1
#ifdef FPU_EMU_DEBUG
lea 0f,%a0
clr.l %d0
move.b (%a0,%d1.w),%d0
printf PDECODE,"fmove.%c ",1,%d0
fp_decode_src_reg
printf PDECODE,"fp%d,",1,%d0
.data
0: .byte 'l','s','x','p','w','d','b','p'
.previous
#endif
| encode addressing mode for dest
fp_decode_addr_mode
.long fp_data, fp_ill
.long fp_indirect, fp_postinc
.long fp_predecr, fp_disp16
.long fp_extmode0, fp_extmode1
| addressing mode: data register direct
fp_data:
fp_mode_data_direct
move.w %d0,%d1
fp_decode_src_reg
fp_get_fp_reg
lea (FPD_TEMPFP1,FPDATA),%a1
move.l (%a0)+,(%a1)+
move.l (%a0)+,(%a1)+
move.l (%a0),(%a1)
lea (-8,%a1),%a0
swap %d1
move.l %d1,%d2
printf PDECODE,"\n"
jmp ([0f:w,%pc,%d1.w*4])
.align 4
0:
.long fp_data_long, fp_data_single
.long fp_ill, fp_ill
.long fp_data_word, fp_ill
.long fp_data_byte, fp_ill
fp_data_byte:
jsr fp_normalize_ext
jsr fp_conv_ext2byte
move.l %d0,%d1
swap %d2
move.w %d2,%d0
jsr fp_get_data_reg
move.b %d1,%d0
move.w %d2,%d1
jsr fp_put_data_reg
jra fp_final
fp_data_word:
jsr fp_normalize_ext
jsr fp_conv_ext2short
move.l %d0,%d1
swap %d2
move.w %d2,%d0
jsr fp_get_data_reg
move.w %d1,%d0
move.l %d2,%d1
jsr fp_put_data_reg
jra fp_final
fp_data_long:
jsr fp_normalize_ext
jsr fp_conv_ext2long
swap %d2
move.w %d2,%d1
jsr fp_put_data_reg
jra fp_final
fp_data_single:
jsr fp_normalize_ext
jsr fp_conv_ext2single
swap %d2
move.w %d2,%d1
jsr fp_put_data_reg
jra fp_final
| addressing mode: address register indirect
fp_indirect:
fp_mode_addr_indirect
jra fp_putdest
| addressing mode: address register indirect with postincrement
fp_postinc:
fp_mode_addr_indirect_postinc
jra fp_putdest
| addressing mode: address register indirect with predecrement
fp_predecr:
fp_mode_addr_indirect_predec
jra fp_putdest
| addressing mode: address register indirect with 16bit displacement
fp_disp16:
fp_mode_addr_indirect_disp16
jra fp_putdest
fp_extmode0:
fp_mode_addr_indirect_extmode0
jra fp_putdest
fp_extmode1:
fp_decode_addr_reg
jmp ([0f:w,%pc,%d0*4])
.align 4
0:
.long fp_abs_short, fp_abs_long
.long fp_ill, fp_ill
.long fp_ill, fp_ill
.long fp_ill, fp_ill
fp_abs_short:
fp_mode_abs_short
jra fp_putdest
fp_abs_long:
fp_mode_abs_long
jra fp_putdest
fp_putdest:
move.l %a0,%a1
fp_decode_src_reg
move.l %d1,%d2 | save size
fp_get_fp_reg
printf PDECODE,"\n"
addq.l #8,%a0
move.l (%a0),-(%sp)
move.l -(%a0),-(%sp)
move.l -(%a0),-(%sp)
move.l %sp,%a0
jsr fp_normalize_ext
swap %d2
jmp ([0f:w,%pc,%d2.w*4])
.align 4
0:
.long fp_format_long, fp_format_single
.long fp_format_extended, fp_format_packed
.long fp_format_word, fp_format_double
.long fp_format_byte, fp_format_packed
fp_format_long:
jsr fp_conv_ext2long
putuser.l %d0,(%a1),fp_err_ua1,%a1
jra fp_finish_move
fp_format_single:
jsr fp_conv_ext2single
putuser.l %d0,(%a1),fp_err_ua1,%a1
jra fp_finish_move
fp_format_extended:
move.l (%a0)+,%d0
lsl.w #1,%d0
lsl.l #7,%d0
lsl.l #8,%d0
putuser.l %d0,(%a1)+,fp_err_ua1,%a1
move.l (%a0)+,%d0
putuser.l %d0,(%a1)+,fp_err_ua1,%a1
move.l (%a0),%d0
putuser.l %d0,(%a1),fp_err_ua1,%a1
jra fp_finish_move
fp_format_packed:
/* not supported yet */
lea (12,%sp),%sp
jra fp_ill
fp_format_word:
jsr fp_conv_ext2short
putuser.w %d0,(%a1),fp_err_ua1,%a1
jra fp_finish_move
fp_format_double:
jsr fp_conv_ext2double
jra fp_finish_move
fp_format_byte:
jsr fp_conv_ext2byte
putuser.b %d0,(%a1),fp_err_ua1,%a1
| jra fp_finish_move
fp_finish_move:
lea (12,%sp),%sp
jra fp_final
|
AirFortressIlikara/LS2K0300-linux-4.19
| 8,176
|
arch/m68k/math-emu/fp_movem.S
|
/*
* fp_movem.S
*
* Copyright Roman Zippel, 1997. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, and the entire permission notice in its entirety,
* including the disclaimer of warranties.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote
* products derived from this software without specific prior
* written permission.
*
* ALTERNATIVELY, this product may be distributed under the terms of
* the GNU General Public License, in which case the provisions of the GPL are
* required INSTEAD OF the above restrictions. (This clause is
* necessary due to a potential bad interaction between the GPL and
* the restrictions contained in a BSD-style copyright.)
*
* THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
* INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
* STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
* OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "fp_emu.h"
#include "fp_decode.h"
| set flags for decode macros for fmovem
do_fmovem=1
.globl fp_fmovem_fp, fp_fmovem_cr
| %d1 contains the mask and count of the register list
| for other register usage see fp_decode.h
fp_fmovem_fp:
printf PDECODE,"fmovem.x "
| get register list and count them
btst #11,%d2
jne 1f
bfextu %d2{#24,#8},%d0 | static register list
jra 2f
1: bfextu %d2{#25,#3},%d0 | dynamic register list
jsr fp_get_data_reg
2: move.l %d0,%d1
swap %d1
jra 2f
1: addq.w #1,%d1 | count the # of registers in
2: lsr.b #1,%d0 | register list and keep it in %d1
jcs 1b
jne 2b
printf PDECODE,"#%08x",1,%d1
#ifdef FPU_EMU_DEBUG
btst #12,%d2
jne 1f
printf PDECODE,"-" | decremental move
jra 2f
1: printf PDECODE,"+" | incremental move
2: btst #13,%d2
jeq 1f
printf PDECODE,"->" | fpu -> cpu
jra 2f
1: printf PDECODE,"<-" | fpu <- cpu
2:
#endif
| decode address mode
fp_decode_addr_mode
.long fp_ill, fp_ill
.long fpr_indirect, fpr_postinc
.long fpr_predecr, fpr_disp16
.long fpr_extmode0, fpr_extmode1
| addressing mode: address register indirect
fpr_indirect:
fp_mode_addr_indirect
jra fpr_do_movem
| addressing mode: address register indirect with postincrement
fpr_postinc:
fp_mode_addr_indirect_postinc
jra fpr_do_movem
fpr_predecr:
fp_mode_addr_indirect_predec
jra fpr_do_movem
| addressing mode: address register/programm counter indirect
| with 16bit displacement
fpr_disp16:
fp_mode_addr_indirect_disp16
jra fpr_do_movem
fpr_extmode0:
fp_mode_addr_indirect_extmode0
jra fpr_do_movem
fpr_extmode1:
fp_decode_addr_reg
jmp ([0f:w,%pc,%d0*4])
.align 4
0:
.long fpr_absolute_short, fpr_absolute_long
.long fpr_disp16, fpr_extmode0
.long fp_ill, fp_ill
.long fp_ill, fp_ill
fpr_absolute_short:
fp_mode_abs_short
jra fpr_do_movem
fpr_absolute_long:
fp_mode_abs_long
| jra fpr_do_movem
fpr_do_movem:
swap %d1 | get fpu register list
lea (FPD_FPREG,FPDATA),%a1
moveq #12,%d0
btst #12,%d2
jne 1f
lea (-12,%a1,%d0*8),%a1
neg.l %d0
1: btst #13,%d2
jne 4f
| move register from memory into fpu
jra 3f
1: printf PMOVEM,"(%p>%p)",2,%a0,%a1
getuser.l (%a0)+,%d2,fp_err_ua1,%a0
lsr.l #8,%d2
lsr.l #7,%d2
lsr.w #1,%d2
move.l %d2,(%a1)+
getuser.l (%a0)+,%d2,fp_err_ua1,%a0
move.l %d2,(%a1)+
getuser.l (%a0),%d2,fp_err_ua1,%a0
move.l %d2,(%a1)
subq.l #8,%a0
subq.l #8,%a1
add.l %d0,%a0
2: add.l %d0,%a1
3: lsl.b #1,%d1
jcs 1b
jne 2b
jra 5f
| move register from fpu into memory
1: printf PMOVEM,"(%p>%p)",2,%a1,%a0
move.l (%a1)+,%d2
lsl.w #1,%d2
lsl.l #7,%d2
lsl.l #8,%d2
putuser.l %d2,(%a0)+,fp_err_ua1,%a0
move.l (%a1)+,%d2
putuser.l %d2,(%a0)+,fp_err_ua1,%a0
move.l (%a1),%d2
putuser.l %d2,(%a0),fp_err_ua1,%a0
subq.l #8,%a1
subq.l #8,%a0
add.l %d0,%a0
2: add.l %d0,%a1
4: lsl.b #1,%d1
jcs 1b
jne 2b
5:
printf PDECODE,"\n"
#if 0
lea (FPD_FPREG,FPDATA),%a0
printf PMOVEM,"fp:"
printx PMOVEM,%a0@(0)
printx PMOVEM,%a0@(12)
printf PMOVEM,"\n "
printx PMOVEM,%a0@(24)
printx PMOVEM,%a0@(36)
printf PMOVEM,"\n "
printx PMOVEM,%a0@(48)
printx PMOVEM,%a0@(60)
printf PMOVEM,"\n "
printx PMOVEM,%a0@(72)
printx PMOVEM,%a0@(84)
printf PMOVEM,"\n"
#endif
jra fp_end
| set flags for decode macros for fmovem control register
do_fmovem=1
do_fmovem_cr=1
fp_fmovem_cr:
printf PDECODE,"fmovem.cr "
| get register list and count them
bfextu %d2{#19,#3},%d0
move.l %d0,%d1
swap %d1
jra 2f
1: addq.w #1,%d1
2: lsr.l #1,%d0
jcs 1b
jne 2b
printf PDECODE,"#%08x",1,%d1
#ifdef FPU_EMU_DEBUG
btst #13,%d2
jeq 1f
printf PDECODE,"->" | fpu -> cpu
jra 2f
1: printf PDECODE,"<-" | fpu <- cpu
2:
#endif
| decode address mode
fp_decode_addr_mode
.long fpc_data, fpc_addr
.long fpc_indirect, fpc_postinc
.long fpc_predecr, fpc_disp16
.long fpc_extmode0, fpc_extmode1
fpc_data:
fp_mode_data_direct
move.w %d0,%d1
bfffo %d2{#19,#3},%d0
sub.w #19,%d0
lea (FPD_FPCR,FPDATA,%d0.w*4),%a1
btst #13,%d2
jne 1f
move.w %d1,%d0
jsr fp_get_data_reg
move.l %d0,(%a1)
jra fpc_movem_fin
1: move.l (%a1),%d0
jsr fp_put_data_reg
jra fpc_movem_fin
fpc_addr:
fp_decode_addr_reg
printf PDECODE,"a%d",1,%d0
btst #13,%d2
jne 1f
jsr fp_get_addr_reg
move.l %a0,(FPD_FPIAR,FPDATA)
jra fpc_movem_fin
1: move.l (FPD_FPIAR,FPDATA),%a0
jsr fp_put_addr_reg
jra fpc_movem_fin
fpc_indirect:
fp_mode_addr_indirect
jra fpc_do_movem
fpc_postinc:
fp_mode_addr_indirect_postinc
jra fpc_do_movem
fpc_predecr:
fp_mode_addr_indirect_predec
jra fpc_do_movem
fpc_disp16:
fp_mode_addr_indirect_disp16
jra fpc_do_movem
fpc_extmode0:
fp_mode_addr_indirect_extmode0
jra fpc_do_movem
fpc_extmode1:
fp_decode_addr_reg
jmp ([0f:w,%pc,%d0*4])
.align 4
0:
.long fpc_absolute_short, fpc_absolute_long
.long fpc_disp16, fpc_extmode0
.long fpc_immediate, fp_ill
.long fp_ill, fp_ill
fpc_absolute_short:
fp_mode_abs_short
jra fpc_do_movem
fpc_absolute_long:
fp_mode_abs_long
jra fpc_do_movem
fpc_immediate:
fp_get_pc %a0
lea (%a0,%d1.w*4),%a1
fp_put_pc %a1
printf PDECODE,"#imm"
| jra fpc_do_movem
#if 0
swap %d1
lsl.l #5,%d1
lea (FPD_FPCR,FPDATA),%a0
jra 3f
1: move.l %d0,(%a0)
2: addq.l #4,%a0
3: lsl.b #1,%d1
jcs 1b
jne 2b
jra fpc_movem_fin
#endif
fpc_do_movem:
swap %d1 | get fpu register list
lsl.l #5,%d1
lea (FPD_FPCR,FPDATA),%a1
1: btst #13,%d2
jne 4f
| move register from memory into fpu
jra 3f
1: printf PMOVEM,"(%p>%p)",2,%a0,%a1
getuser.l (%a0)+,%d0,fp_err_ua1,%a0
move.l %d0,(%a1)
2: addq.l #4,%a1
3: lsl.b #1,%d1
jcs 1b
jne 2b
jra fpc_movem_fin
| move register from fpu into memory
1: printf PMOVEM,"(%p>%p)",2,%a1,%a0
move.l (%a1),%d0
putuser.l %d0,(%a0)+,fp_err_ua1,%a0
2: addq.l #4,%a1
4: lsl.b #1,%d1
jcs 1b
jne 2b
fpc_movem_fin:
and.l #0x0000fff0,(FPD_FPCR,FPDATA)
and.l #0x0ffffff8,(FPD_FPSR,FPDATA)
move.l (FPD_FPCR,FPDATA),%d0
lsr.l #4,%d0
moveq #3,%d1
and.l %d0,%d1
move.w %d1,(FPD_RND,FPDATA)
lsr.l #2,%d0
moveq #3,%d1
and.l %d0,%d1
move.w %d1,(FPD_PREC,FPDATA)
printf PDECODE,"\n"
#if 0
printf PMOVEM,"fpcr : %08x\n",1,FPDATA@(FPD_FPCR)
printf PMOVEM,"fpsr : %08x\n",1,FPDATA@(FPD_FPSR)
printf PMOVEM,"fpiar: %08x\n",1,FPDATA@(FPD_FPIAR)
clr.l %d0
move.w (FPD_PREC,FPDATA),%d0
printf PMOVEM,"prec : %04x\n",1,%d0
move.w (FPD_RND,FPDATA),%d0
printf PMOVEM,"rnd : %04x\n",1,%d0
#endif
jra fp_end
|
AirFortressIlikara/LS2K0300-linux-4.19
| 11,682
|
arch/m68k/math-emu/fp_scan.S
|
/*
* fp_scan.S
*
* Copyright Roman Zippel, 1997. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, and the entire permission notice in its entirety,
* including the disclaimer of warranties.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote
* products derived from this software without specific prior
* written permission.
*
* ALTERNATIVELY, this product may be distributed under the terms of
* the GNU General Public License, in which case the provisions of the GPL are
* required INSTEAD OF the above restrictions. (This clause is
* necessary due to a potential bad interaction between the GPL and
* the restrictions contained in a BSD-style copyright.)
*
* THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
* INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
* STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
* OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "fp_emu.h"
#include "fp_decode.h"
.globl fp_scan, fp_datasize
.data
| %d2 - first two instr words
| %d1 - operand size
/* operand formats are:
Long = 0, i.e. fmove.l
Single, i.e. fmove.s
Extended, i.e. fmove.x
Packed-BCD, i.e. fmove.p
Word, i.e. fmove.w
Double, i.e. fmove.d
*/
.text
| On entry:
| FPDATA - base of emulated FPU registers
fp_scan:
| normal fpu instruction? (this excludes fsave/frestore)
fp_get_pc %a0
printf PDECODE,"%08x: ",1,%a0
getuser.b (%a0),%d0,fp_err_ua1,%a0
#if 1
cmp.b #0xf2,%d0 | cpid = 1
#else
cmp.b #0xfc,%d0 | cpid = 6
#endif
jne fp_nonstd
| first two instruction words are kept in %d2
getuser.l (%a0)+,%d2,fp_err_ua1,%a0
fp_put_pc %a0
fp_decode_cond: | separate conditional instr
fp_decode_cond_instr_type
.long fp_decode_move, fp_fscc
.long fp_fbccw, fp_fbccl
fp_decode_move: | separate move instr
fp_decode_move_instr_type
.long fp_fgen_fp, fp_ill
.long fp_fgen_ea, fp_fmove_fp2mem
.long fp_fmovem_cr, fp_fmovem_cr
.long fp_fmovem_fp, fp_fmovem_fp
| now all arithmetic instr and a few move instr are left
fp_fgen_fp: | source is a fpu register
clr.b (FPD_FPSR+2,FPDATA) | clear the exception byte
fp_decode_sourcespec
printf PDECODE,"f<op>.x fp%d",1,%d0
fp_get_fp_reg
lea (FPD_TEMPFP1,FPDATA),%a1 | copy src into a temp location
move.l (%a0)+,(%a1)+
move.l (%a0)+,(%a1)+
move.l (%a0),(%a1)
lea (-8,%a1),%a0
jra fp_getdest
fp_fgen_ea: | source is <ea>
clr.b (FPD_FPSR+2,FPDATA) | clear the exception byte
| sort out fmovecr, keep data size in %d1
fp_decode_sourcespec
cmp.w #7,%d0
jeq fp_fmovecr
move.w %d0,%d1 | store data size twice in %d1
swap %d1 | one can be trashed below
move.w %d0,%d1
#ifdef FPU_EMU_DEBUG
lea 0f,%a0
clr.l %d0
move.b (%a0,%d1.w),%d0
printf PDECODE,"f<op>.%c ",1,%d0
.data
0: .byte 'l','s','x','p','w','d','b',0
.previous
#endif
/*
fp_getsource, fp_getdest
basically, we end up with a pointer to the source operand in
%a1, and a pointer to the destination operand in %a0. both
are, of course, 96-bit extended floating point numbers.
*/
fp_getsource:
| decode addressing mode for source
fp_decode_addr_mode
.long fp_data, fp_ill
.long fp_indirect, fp_postinc
.long fp_predecr, fp_disp16
.long fp_extmode0, fp_extmode1
| addressing mode: data register direct
fp_data:
fp_mode_data_direct
jsr fp_get_data_reg
lea (FPD_TEMPFP1,FPDATA),%a0
jmp ([0f:w,%pc,%d1.w*4])
.align 4
0:
.long fp_data_long, fp_data_single
.long fp_ill, fp_ill
.long fp_data_word, fp_ill
.long fp_data_byte, fp_ill
| data types that fit in an integer data register
fp_data_byte:
extb.l %d0
jra fp_data_long
fp_data_word:
ext.l %d0
fp_data_long:
jsr fp_conv_long2ext
jra fp_getdest
fp_data_single:
jsr fp_conv_single2ext
jra fp_getdest
| addressing mode: address register indirect
fp_indirect:
fp_mode_addr_indirect
jra fp_fetchsource
| addressing mode: address register indirect with postincrement
fp_postinc:
fp_mode_addr_indirect_postinc
jra fp_fetchsource
| addressing mode: address register indirect with predecrement
fp_predecr:
fp_mode_addr_indirect_predec
jra fp_fetchsource
| addressing mode: address register/programm counter indirect
| with 16bit displacement
fp_disp16:
fp_mode_addr_indirect_disp16
jra fp_fetchsource
| all other indirect addressing modes will finally end up here
fp_extmode0:
fp_mode_addr_indirect_extmode0
jra fp_fetchsource
| all pc relative addressing modes and immediate/absolute modes end up here
| the first ones are sent to fp_extmode0 or fp_disp16
| and only the latter are handled here
fp_extmode1:
fp_decode_addr_reg
jmp ([0f:w,%pc,%d0*4])
.align 4
0:
.long fp_abs_short, fp_abs_long
.long fp_disp16, fp_extmode0
.long fp_immediate, fp_ill
.long fp_ill, fp_ill
| addressing mode: absolute short
fp_abs_short:
fp_mode_abs_short
jra fp_fetchsource
| addressing mode: absolute long
fp_abs_long:
fp_mode_abs_long
jra fp_fetchsource
| addressing mode: immediate data
fp_immediate:
printf PDECODE,"#"
fp_get_pc %a0
move.w (fp_datasize,%d1.w*2),%d0
addq.w #1,%d0
and.w #-2,%d0
#ifdef FPU_EMU_DEBUG
movem.l %d0/%d1,-(%sp)
movel %a0,%a1
clr.l %d1
jra 2f
1: getuser.b (%a1)+,%d1,fp_err_ua1,%a1
printf PDECODE,"%02x",1,%d1
2: dbra %d0,1b
movem.l (%sp)+,%d0/%d1
#endif
lea (%a0,%d0.w),%a1
fp_put_pc %a1
| jra fp_fetchsource
fp_fetchsource:
move.l %a0,%a1
swap %d1
lea (FPD_TEMPFP1,FPDATA),%a0
jmp ([0f:w,%pc,%d1.w*4])
.align 4
0: .long fp_long, fp_single
.long fp_ext, fp_pack
.long fp_word, fp_double
.long fp_byte, fp_ill
fp_long:
getuser.l (%a1),%d0,fp_err_ua1,%a1
jsr fp_conv_long2ext
jra fp_getdest
fp_single:
getuser.l (%a1),%d0,fp_err_ua1,%a1
jsr fp_conv_single2ext
jra fp_getdest
fp_ext:
getuser.l (%a1)+,%d0,fp_err_ua1,%a1
lsr.l #8,%d0
lsr.l #7,%d0
lsr.w #1,%d0
move.l %d0,(%a0)+
getuser.l (%a1)+,%d0,fp_err_ua1,%a1
move.l %d0,(%a0)+
getuser.l (%a1),%d0,fp_err_ua1,%a1
move.l %d0,(%a0)
subq.l #8,%a0
jra fp_getdest
fp_pack:
/* not supported yet */
jra fp_ill
fp_word:
getuser.w (%a1),%d0,fp_err_ua1,%a1
ext.l %d0
jsr fp_conv_long2ext
jra fp_getdest
fp_double:
jsr fp_conv_double2ext
jra fp_getdest
fp_byte:
getuser.b (%a1),%d0,fp_err_ua1,%a1
extb.l %d0
jsr fp_conv_long2ext
| jra fp_getdest
fp_getdest:
move.l %a0,%a1
bfextu %d2{#22,#3},%d0
printf PDECODE,",fp%d\n",1,%d0
fp_get_fp_reg
movem.l %a0/%a1,-(%sp)
pea fp_finalrounding
bfextu %d2{#25,#7},%d0
jmp ([0f:w,%pc,%d0*4])
.align 4
0:
.long fp_fmove_mem2fp, fp_fint, fp_fsinh, fp_fintrz
.long fp_fsqrt, fp_ill, fp_flognp1, fp_ill
.long fp_fetoxm1, fp_ftanh, fp_fatan, fp_ill
.long fp_fasin, fp_fatanh, fp_fsin, fp_ftan
.long fp_fetox, fp_ftwotox, fp_ftentox, fp_ill
.long fp_flogn, fp_flog10, fp_flog2, fp_ill
.long fp_fabs, fp_fcosh, fp_fneg, fp_ill
.long fp_facos, fp_fcos, fp_fgetexp, fp_fgetman
.long fp_fdiv, fp_fmod, fp_fadd, fp_fmul
.long fpa_fsgldiv, fp_frem, fp_fscale, fpa_fsglmul
.long fp_fsub, fp_ill, fp_ill, fp_ill
.long fp_ill, fp_ill, fp_ill, fp_ill
.long fp_fsincos0, fp_fsincos1, fp_fsincos2, fp_fsincos3
.long fp_fsincos4, fp_fsincos5, fp_fsincos6, fp_fsincos7
.long fp_fcmp, fp_ill, fp_ftst, fp_ill
.long fp_ill, fp_ill, fp_ill, fp_ill
.long fp_fsmove, fp_fssqrt, fp_ill, fp_ill
.long fp_fdmove, fp_fdsqrt, fp_ill, fp_ill
.long fp_ill, fp_ill, fp_ill, fp_ill
.long fp_ill, fp_ill, fp_ill, fp_ill
.long fp_ill, fp_ill, fp_ill, fp_ill
.long fp_ill, fp_ill, fp_ill, fp_ill
.long fp_fsabs, fp_ill, fp_fsneg, fp_ill
.long fp_fdabs, fp_ill, fp_fdneg, fp_ill
.long fp_fsdiv, fp_ill, fp_fsadd, fp_fsmul
.long fp_fddiv, fp_ill, fp_fdadd, fp_fdmul
.long fp_fssub, fp_ill, fp_ill, fp_ill
.long fp_fdsub, fp_ill, fp_ill, fp_ill
.long fp_ill, fp_ill, fp_ill, fp_ill
.long fp_ill, fp_ill, fp_ill, fp_ill
.long fp_ill, fp_ill, fp_ill, fp_ill
.long fp_ill, fp_ill, fp_ill, fp_ill
| Instructions follow
| Move an (emulated) ROM constant
fp_fmovecr:
bfextu %d2{#27,#5},%d0
printf PINSTR,"fp_fmovecr #%d",1,%d0
move.l %d0,%d1
add.l %d0,%d0
add.l %d1,%d0
lea (fp_constants,%d0*4),%a0
move.l #0x801cc0ff,%d0
addq.l #1,%d1
lsl.l %d1,%d0
jcc 1f
fp_set_sr FPSR_EXC_INEX2 | INEX2 exception
1: moveq #-128,%d0 | continue with fmove
and.l %d0,%d2
jra fp_getdest
.data
.align 4
fp_constants:
.long 0x00004000,0xc90fdaa2,0x2168c235 | pi
.extend 0,0,0,0,0,0,0,0,0,0
.long 0x00003ffd,0x9a209a84,0xfbcff798 | log10(2)
.long 0x00004000,0xadf85458,0xa2bb4a9a | e
.long 0x00003fff,0xb8aa3b29,0x5c17f0bc | log2(e)
.long 0x00003ffd,0xde5bd8a9,0x37287195 | log10(e)
.long 0x00000000,0x00000000,0x00000000 | 0.0
.long 0x00003ffe,0xb17217f7,0xd1cf79ac | 1n(2)
.long 0x00004000,0x935d8ddd,0xaaa8ac17 | 1n(10)
| read this as "1.0 * 2^0" - note the high bit in the mantissa
.long 0x00003fff,0x80000000,0x00000000 | 10^0
.long 0x00004002,0xa0000000,0x00000000 | 10^1
.long 0x00004005,0xc8000000,0x00000000 | 10^2
.long 0x0000400c,0x9c400000,0x00000000 | 10^4
.long 0x00004019,0xbebc2000,0x00000000 | 10^8
.long 0x00004034,0x8e1bc9bf,0x04000000 | 10^16
.long 0x00004069,0x9dc5ada8,0x2b70b59e | 10^32
.long 0x000040d3,0xc2781f49,0xffcfa6d5 | 10^64
.long 0x000041a8,0x93ba47c9,0x80e98ce0 | 10^128
.long 0x00004351,0xaa7eebfb,0x9df9de8e | 10^256
.long 0x000046a3,0xe319a0ae,0xa60e91c7 | 10^512
.long 0x00004d48,0xc9767586,0x81750c17 | 10^1024
.long 0x00005a92,0x9e8b3b5d,0xc53d5de5 | 10^2048
.long 0x00007525,0xc4605202,0x8a20979b | 10^4096
.previous
fp_fmove_mem2fp:
printf PINSTR,"fmove %p,%p\n",2,%a0,%a1
move.l (%a1)+,(%a0)+
move.l (%a1)+,(%a0)+
move.l (%a1),(%a0)
subq.l #8,%a0
rts
fpa_fsglmul:
move.l #fp_finalrounding_single_fast,(%sp)
jra fp_fsglmul
fpa_fsgldiv:
move.l #fp_finalrounding_single_fast,(%sp)
jra fp_fsgldiv
.macro fp_dosingleprec instr
printf PINSTR,"single "
move.l #fp_finalrounding_single,(%sp)
jra \instr
.endm
.macro fp_dodoubleprec instr
printf PINSTR,"double "
move.l #fp_finalrounding_double,(%sp)
jra \instr
.endm
fp_fsmove:
fp_dosingleprec fp_fmove_mem2fp
fp_fssqrt:
fp_dosingleprec fp_fsqrt
fp_fdmove:
fp_dodoubleprec fp_fmove_mem2fp
fp_fdsqrt:
fp_dodoubleprec fp_fsqrt
fp_fsabs:
fp_dosingleprec fp_fabs
fp_fsneg:
fp_dosingleprec fp_fneg
fp_fdabs:
fp_dodoubleprec fp_fabs
fp_fdneg:
fp_dodoubleprec fp_fneg
fp_fsdiv:
fp_dosingleprec fp_fdiv
fp_fsadd:
fp_dosingleprec fp_fadd
fp_fsmul:
fp_dosingleprec fp_fmul
fp_fddiv:
fp_dodoubleprec fp_fdiv
fp_fdadd:
fp_dodoubleprec fp_fadd
fp_fdmul:
fp_dodoubleprec fp_fmul
fp_fssub:
fp_dosingleprec fp_fsub
fp_fdsub:
fp_dodoubleprec fp_fsub
fp_nonstd:
fp_get_pc %a0
getuser.l (%a0),%d0,fp_err_ua1,%a0
printf ,"nonstd ((%08x)=%08x)\n",2,%a0,%d0
moveq #-1,%d0
rts
.data
.align 4
| data sizes corresponding to the operand formats
fp_datasize:
.word 4, 4, 12, 12, 2, 8, 1, 0
|
AirFortressIlikara/LS2K0300-linux-4.19
| 8,421
|
arch/m68k/68000/head.S
|
/* SPDX-License-Identifier: GPL-2.0 */
/*
* head.S - Common startup code for 68000 core based CPU's
*
* 2012.10.21, Luis Alves <ljalvs@gmail.com>, Single head.S file for all
* 68000 core based CPU's. Based on the sources from:
* Coldfire by Greg Ungerer <gerg@snapgear.com>
* 68328 by D. Jeff Dionne <jeff@ryeham.ee.ryerson.ca>,
* Kenneth Albanowski <kjahds@kjahds.com>,
* The Silver Hammer Group, Ltd.
*
*/
#include <linux/linkage.h>
#include <linux/init.h>
#include <asm/asm-offsets.h>
#include <asm/thread_info.h>
/*****************************************************************************
* UCSIMM and UCDIMM use CONFIG_MEMORY_RESERVE to reserve some RAM
*****************************************************************************/
#ifdef CONFIG_MEMORY_RESERVE
#define RAMEND (CONFIG_RAMBASE+CONFIG_RAMSIZE)-(CONFIG_MEMORY_RESERVE*0x100000)
#else
#define RAMEND (CONFIG_RAMBASE+CONFIG_RAMSIZE)
#endif
/*****************************************************************************/
.global _start
.global _rambase
.global _ramvec
.global _ramstart
.global _ramend
#if defined(CONFIG_PILOT) || defined(CONFIG_INIT_LCD)
.global bootlogo_bits
#endif
/* Defining DEBUG_HEAD_CODE, serial port in 68x328 is inited */
/* #define DEBUG_HEAD_CODE */
#undef DEBUG_HEAD_CODE
.data
/*****************************************************************************
* RAM setup pointers. Used by the kernel to determine RAM location and size.
*****************************************************************************/
_rambase:
.long 0
_ramvec:
.long 0
_ramstart:
.long 0
_ramend:
.long 0
__HEAD
/*****************************************************************************
* Entry point, where all begins!
*****************************************************************************/
_start:
/* Pilot need this specific signature at the start of ROM */
#ifdef CONFIG_PILOT
.byte 0x4e, 0xfa, 0x00, 0x0a /* bra opcode (jmp 10 bytes) */
.byte 'b', 'o', 'o', 't'
.word 10000
nop
moveq #0, %d0
movew %d0, 0xfffff618 /* Watchdog off */
movel #0x00011f07, 0xfffff114 /* CS A1 Mask */
#endif /* CONFIG_PILOT */
movew #0x2700, %sr /* disable all interrupts */
/*****************************************************************************
* Setup PLL and wait for it to settle (in 68x328 cpu's).
* Also, if enabled, init serial port.
*****************************************************************************/
#if defined(CONFIG_M68328) || \
defined(CONFIG_M68EZ328) || \
defined(CONFIG_M68VZ328)
/* Serial port setup. Should only be needed if debugging this startup code. */
#ifdef DEBUG_HEAD_CODE
movew #0x0800, 0xfffff906 /* Ignore CTS */
movew #0x010b, 0xfffff902 /* BAUD to 9600 */
movew #0xe100, 0xfffff900 /* enable */
#endif /* DEBUG_HEAD */
#ifdef CONFIG_PILOT
movew #0x2410, 0xfffff200 /* PLLCR */
#else
movew #0x2400, 0xfffff200 /* PLLCR */
#endif
movew #0x0123, 0xfffff202 /* PLLFSR */
moveq #0, %d0
movew #16384, %d0 /* PLL settle wait loop */
_pll_settle:
subw #1, %d0
bne _pll_settle
#endif /* CONFIG_M68x328 */
/*****************************************************************************
* If running kernel from ROM some specific initialization has to be done.
* (Assuming that everything is already init'ed when running from RAM)
*****************************************************************************/
#ifdef CONFIG_ROMKERNEL
/*****************************************************************************
* Init chip registers (uCsimm specific)
*****************************************************************************/
#ifdef CONFIG_UCSIMM
moveb #0x00, 0xfffffb0b /* Watchdog off */
moveb #0x10, 0xfffff000 /* SCR */
moveb #0x00, 0xfffff40b /* enable chip select */
moveb #0x00, 0xfffff423 /* enable /DWE */
moveb #0x08, 0xfffffd0d /* disable hardmap */
moveb #0x07, 0xfffffd0e /* level 7 interrupt clear */
movew #0x8600, 0xfffff100 /* FLASH at 0x10c00000 */
movew #0x018b, 0xfffff110 /* 2Meg, enable, 0ws */
movew #0x8f00, 0xfffffc00 /* DRAM configuration */
movew #0x9667, 0xfffffc02 /* DRAM control */
movew #0x0000, 0xfffff106 /* DRAM at 0x00000000 */
movew #0x068f, 0xfffff116 /* 8Meg, enable, 0ws */
moveb #0x40, 0xfffff300 /* IVR */
movel #0x007FFFFF, %d0 /* IMR */
movel %d0, 0xfffff304
moveb 0xfffff42b, %d0
andb #0xe0, %d0
moveb %d0, 0xfffff42b
#endif
/*****************************************************************************
* Init LCD controller.
* (Assuming that LCD controller is already init'ed when running from RAM)
*****************************************************************************/
#ifdef CONFIG_INIT_LCD
#ifdef CONFIG_PILOT
moveb #0, 0xfffffA27 /* LCKCON */
movel #_start, 0xfffffA00 /* LSSA */
moveb #0xa, 0xfffffA05 /* LVPW */
movew #0x9f, 0xFFFFFa08 /* LXMAX */
movew #0x9f, 0xFFFFFa0a /* LYMAX */
moveb #9, 0xfffffa29 /* LBAR */
moveb #0, 0xfffffa25 /* LPXCD */
moveb #0x04, 0xFFFFFa20 /* LPICF */
moveb #0x58, 0xfffffA27 /* LCKCON */
moveb #0x85, 0xfffff429 /* PFDATA */
moveb #0xd8, 0xfffffA27 /* LCKCON */
moveb #0xc5, 0xfffff429 /* PFDATA */
moveb #0xd5, 0xfffff429 /* PFDATA */
movel #bootlogo_bits, 0xFFFFFA00 /* LSSA */
moveb #10, 0xFFFFFA05 /* LVPW */
movew #160, 0xFFFFFA08 /* LXMAX */
movew #160, 0xFFFFFA0A /* LYMAX */
#else /* CONFIG_PILOT */
movel #bootlogo_bits, 0xfffffA00 /* LSSA */
moveb #0x28, 0xfffffA05 /* LVPW */
movew #0x280, 0xFFFFFa08 /* LXMAX */
movew #0x1df, 0xFFFFFa0a /* LYMAX */
moveb #0, 0xfffffa29 /* LBAR */
moveb #0, 0xfffffa25 /* LPXCD */
moveb #0x08, 0xFFFFFa20 /* LPICF */
moveb #0x01, 0xFFFFFA21 /* -ve pol */
moveb #0x81, 0xfffffA27 /* LCKCON */
movew #0xff00, 0xfffff412 /* LCD pins */
#endif /* CONFIG_PILOT */
#endif /* CONFIG_INIT_LCD */
/*****************************************************************************
* Kernel is running from FLASH/ROM (XIP)
* Copy init text & data to RAM
*****************************************************************************/
moveal #_etext, %a0
moveal #_sdata, %a1
moveal #__bss_start, %a2
_copy_initmem:
movel %a0@+, %a1@+
cmpal %a1, %a2
bhi _copy_initmem
#endif /* CONFIG_ROMKERNEL */
/*****************************************************************************
* Setup basic memory information for kernel
*****************************************************************************/
movel #CONFIG_VECTORBASE,_ramvec /* set vector base location */
movel #CONFIG_RAMBASE,_rambase /* set the base of RAM */
movel #RAMEND, _ramend /* set end ram addr */
lea __bss_stop,%a1
movel %a1,_ramstart
/*****************************************************************************
* If the kernel is in RAM, move romfs to right above bss and
* adjust _ramstart to where romfs ends.
*
* (Do this only if CONFIG_MTD_UCLINUX is true)
*****************************************************************************/
#if defined(CONFIG_ROMFS_FS) && defined(CONFIG_RAMKERNEL) && \
defined(CONFIG_MTD_UCLINUX)
lea __bss_start, %a0 /* get start of bss */
lea __bss_stop, %a1 /* set up destination */
movel %a0, %a2 /* copy of bss start */
movel 8(%a0), %d0 /* get size of ROMFS */
addql #8, %d0 /* allow for rounding */
andl #0xfffffffc, %d0 /* whole words */
addl %d0, %a0 /* copy from end */
addl %d0, %a1 /* copy from end */
movel %a1, _ramstart /* set start of ram */
_copy_romfs:
movel -(%a0), -(%a1) /* copy dword */
cmpl %a0, %a2 /* check if at end */
bne _copy_romfs
#endif /* CONFIG_ROMFS_FS && CONFIG_RAMKERNEL && CONFIG_MTD_UCLINUX */
/*****************************************************************************
* Clear bss region
*****************************************************************************/
lea __bss_start, %a0 /* get start of bss */
lea __bss_stop, %a1 /* get end of bss */
_clear_bss:
movel #0, (%a0)+ /* clear each word */
cmpl %a0, %a1 /* check if at end */
bne _clear_bss
/*****************************************************************************
* Load the current task pointer and stack.
*****************************************************************************/
lea init_thread_union,%a0
lea THREAD_SIZE(%a0),%sp
jsr start_kernel /* start Linux kernel */
_exit:
jmp _exit /* should never get here */
|
AirFortressIlikara/LS2K0300-linux-4.19
| 5,729
|
arch/m68k/68000/entry.S
|
/*
* entry.S -- non-mmu 68000 interrupt and exception entry points
*
* Copyright (C) 1991, 1992 Linus Torvalds
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file README.legal in the main directory of this archive
* for more details.
*
* Linux/m68k support by Hamish Macdonald
*/
#include <linux/linkage.h>
#include <asm/thread_info.h>
#include <asm/unistd.h>
#include <asm/errno.h>
#include <asm/setup.h>
#include <asm/segment.h>
#include <asm/traps.h>
#include <asm/asm-offsets.h>
#include <asm/entry.h>
.text
.globl system_call
.globl resume
.globl ret_from_exception
.globl ret_from_signal
.globl sys_call_table
.globl bad_interrupt
.globl inthandler1
.globl inthandler2
.globl inthandler3
.globl inthandler4
.globl inthandler5
.globl inthandler6
.globl inthandler7
badsys:
movel #-ENOSYS,%sp@(PT_OFF_D0)
jra ret_from_exception
do_trace:
movel #-ENOSYS,%sp@(PT_OFF_D0) /* needed for strace*/
subql #4,%sp
SAVE_SWITCH_STACK
jbsr syscall_trace_enter
RESTORE_SWITCH_STACK
addql #4,%sp
movel %sp@(PT_OFF_ORIG_D0),%d1
movel #-ENOSYS,%d0
cmpl #NR_syscalls,%d1
jcc 1f
lsl #2,%d1
lea sys_call_table, %a0
jbsr %a0@(%d1)
1: movel %d0,%sp@(PT_OFF_D0) /* save the return value */
subql #4,%sp /* dummy return address */
SAVE_SWITCH_STACK
jbsr syscall_trace_leave
ret_from_signal:
RESTORE_SWITCH_STACK
addql #4,%sp
jra ret_from_exception
ENTRY(system_call)
SAVE_ALL_SYS
/* save top of frame*/
pea %sp@
jbsr set_esp0
addql #4,%sp
movel %sp@(PT_OFF_ORIG_D0),%d0
movel %sp,%d1 /* get thread_info pointer */
andl #-THREAD_SIZE,%d1
movel %d1,%a2
btst #(TIF_SYSCALL_TRACE%8),%a2@(TINFO_FLAGS+(31-TIF_SYSCALL_TRACE)/8)
jne do_trace
cmpl #NR_syscalls,%d0
jcc badsys
lsl #2,%d0
lea sys_call_table,%a0
movel %a0@(%d0), %a0
jbsr %a0@
movel %d0,%sp@(PT_OFF_D0) /* save the return value*/
ret_from_exception:
btst #5,%sp@(PT_OFF_SR) /* check if returning to kernel*/
jeq Luser_return /* if so, skip resched, signals*/
Lkernel_return:
RESTORE_ALL
Luser_return:
/* only allow interrupts when we are really the last one on the*/
/* kernel stack, otherwise stack overflow can occur during*/
/* heavy interrupt load*/
andw #ALLOWINT,%sr
movel %sp,%d1 /* get thread_info pointer */
andl #-THREAD_SIZE,%d1
movel %d1,%a2
1:
move %a2@(TINFO_FLAGS),%d1 /* thread_info->flags */
jne Lwork_to_do
RESTORE_ALL
Lwork_to_do:
movel %a2@(TINFO_FLAGS),%d1 /* thread_info->flags */
btst #TIF_NEED_RESCHED,%d1
jne reschedule
Lsignal_return:
subql #4,%sp /* dummy return address*/
SAVE_SWITCH_STACK
pea %sp@(SWITCH_STACK_SIZE)
bsrw do_notify_resume
addql #4,%sp
RESTORE_SWITCH_STACK
addql #4,%sp
jra 1b
/*
* This is the main interrupt handler, responsible for calling process_int()
*/
inthandler1:
SAVE_ALL_INT
movew %sp@(PT_OFF_FORMATVEC), %d0
and #0x3ff, %d0
movel %sp,%sp@-
movel #65,%sp@- /* put vector # on stack*/
jbsr process_int /* process the IRQ*/
3: addql #8,%sp /* pop parameters off stack*/
bra ret_from_exception
inthandler2:
SAVE_ALL_INT
movew %sp@(PT_OFF_FORMATVEC), %d0
and #0x3ff, %d0
movel %sp,%sp@-
movel #66,%sp@- /* put vector # on stack*/
jbsr process_int /* process the IRQ*/
3: addql #8,%sp /* pop parameters off stack*/
bra ret_from_exception
inthandler3:
SAVE_ALL_INT
movew %sp@(PT_OFF_FORMATVEC), %d0
and #0x3ff, %d0
movel %sp,%sp@-
movel #67,%sp@- /* put vector # on stack*/
jbsr process_int /* process the IRQ*/
3: addql #8,%sp /* pop parameters off stack*/
bra ret_from_exception
inthandler4:
SAVE_ALL_INT
movew %sp@(PT_OFF_FORMATVEC), %d0
and #0x3ff, %d0
movel %sp,%sp@-
movel #68,%sp@- /* put vector # on stack*/
jbsr process_int /* process the IRQ*/
3: addql #8,%sp /* pop parameters off stack*/
bra ret_from_exception
inthandler5:
SAVE_ALL_INT
movew %sp@(PT_OFF_FORMATVEC), %d0
and #0x3ff, %d0
movel %sp,%sp@-
movel #69,%sp@- /* put vector # on stack*/
jbsr process_int /* process the IRQ*/
3: addql #8,%sp /* pop parameters off stack*/
bra ret_from_exception
inthandler6:
SAVE_ALL_INT
movew %sp@(PT_OFF_FORMATVEC), %d0
and #0x3ff, %d0
movel %sp,%sp@-
movel #70,%sp@- /* put vector # on stack*/
jbsr process_int /* process the IRQ*/
3: addql #8,%sp /* pop parameters off stack*/
bra ret_from_exception
inthandler7:
SAVE_ALL_INT
movew %sp@(PT_OFF_FORMATVEC), %d0
and #0x3ff, %d0
movel %sp,%sp@-
movel #71,%sp@- /* put vector # on stack*/
jbsr process_int /* process the IRQ*/
3: addql #8,%sp /* pop parameters off stack*/
bra ret_from_exception
inthandler:
SAVE_ALL_INT
movew %sp@(PT_OFF_FORMATVEC), %d0
and #0x3ff, %d0
movel %sp,%sp@-
movel %d0,%sp@- /* put vector # on stack*/
jbsr process_int /* process the IRQ*/
3: addql #8,%sp /* pop parameters off stack*/
bra ret_from_exception
/*
* Handler for uninitialized and spurious interrupts.
*/
ENTRY(bad_interrupt)
addql #1,irq_err_count
rte
/*
* Beware - when entering resume, prev (the current task) is
* in a0, next (the new task) is in a1, so don't change these
* registers until their contents are no longer needed.
*/
ENTRY(resume)
movel %a0,%d1 /* save prev thread in d1 */
movew %sr,%a0@(TASK_THREAD+THREAD_SR) /* save sr */
SAVE_SWITCH_STACK
movel %sp,%a0@(TASK_THREAD+THREAD_KSP) /* save kernel stack */
movel %usp,%a3 /* save usp */
movel %a3,%a0@(TASK_THREAD+THREAD_USP)
movel %a1@(TASK_THREAD+THREAD_USP),%a3 /* restore user stack */
movel %a3,%usp
movel %a1@(TASK_THREAD+THREAD_KSP),%sp /* restore new thread stack */
RESTORE_SWITCH_STACK
movew %a1@(TASK_THREAD+THREAD_SR),%sr /* restore thread status reg */
rts
|
AirFortressIlikara/LS2K0300-linux-4.19
| 3,254
|
arch/m68k/lib/umodsi3.S
|
/* libgcc1 routines for 68000 w/o floating-point hardware.
Copyright (C) 1994, 1996, 1997, 1998 Free Software Foundation, Inc.
This file is part of GNU CC.
GNU CC is free software; you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by the
Free Software Foundation; either version 2, or (at your option) any
later version.
In addition to the permissions in the GNU General Public License, the
Free Software Foundation gives you unlimited permission to link the
compiled version of this file with other programs, and to distribute
those programs without any restriction coming from the use of this
file. (The General Public License restrictions do apply in other
respects; for example, they cover modification of the file, and
distribution when not linked into another program.)
This file is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details. */
/* As a special exception, if you link this library with files
compiled with GCC to produce an executable, this does not cause
the resulting executable to be covered by the GNU General Public License.
This exception does not however invalidate any other reasons why
the executable file might be covered by the GNU General Public License. */
/* Use this one for any 680x0; assumes no floating point hardware.
The trailing " '" appearing on some lines is for ANSI preprocessors. Yuk.
Some of this code comes from MINIX, via the folks at ericsson.
D. V. Henkel-Wallace (gumby@cygnus.com) Fete Bastille, 1992
*/
#include <asm/export.h>
/* These are predefined by new versions of GNU cpp. */
#ifndef __USER_LABEL_PREFIX__
#define __USER_LABEL_PREFIX__ _
#endif
#ifndef __REGISTER_PREFIX__
#define __REGISTER_PREFIX__
#endif
#ifndef __IMMEDIATE_PREFIX__
#define __IMMEDIATE_PREFIX__ #
#endif
/* ANSI concatenation macros. */
#define CONCAT1(a, b) CONCAT2(a, b)
#define CONCAT2(a, b) a ## b
/* Use the right prefix for global labels. */
#define SYM(x) CONCAT1 (__USER_LABEL_PREFIX__, x)
/* Use the right prefix for registers. */
#define REG(x) CONCAT1 (__REGISTER_PREFIX__, x)
/* Use the right prefix for immediate values. */
#define IMM(x) CONCAT1 (__IMMEDIATE_PREFIX__, x)
#define d0 REG (d0)
#define d1 REG (d1)
#define d2 REG (d2)
#define d3 REG (d3)
#define d4 REG (d4)
#define d5 REG (d5)
#define d6 REG (d6)
#define d7 REG (d7)
#define a0 REG (a0)
#define a1 REG (a1)
#define a2 REG (a2)
#define a3 REG (a3)
#define a4 REG (a4)
#define a5 REG (a5)
#define a6 REG (a6)
#define fp REG (fp)
#define sp REG (sp)
.text
.proc
.globl SYM (__umodsi3)
SYM (__umodsi3):
movel sp@(8), d1 /* d1 = divisor */
movel sp@(4), d0 /* d0 = dividend */
movel d1, sp@-
movel d0, sp@-
jbsr SYM (__udivsi3)
addql IMM (8), sp
movel sp@(8), d1 /* d1 = divisor */
#if !(defined(__mcf5200__) || defined(__mcoldfire__))
movel d1, sp@-
movel d0, sp@-
jbsr SYM (__mulsi3) /* d0 = (a/b)*b */
addql IMM (8), sp
#else
mulsl d1,d0
#endif
movel sp@(4), d1 /* d1 = dividend */
subl d0, d1 /* d1 = a - (a/b)*b */
movel d1, d0
rts
EXPORT_SYMBOL(__umodsi3)
|
AirFortressIlikara/LS2K0300-linux-4.19
| 4,971
|
arch/m68k/lib/udivsi3.S
|
/* libgcc1 routines for 68000 w/o floating-point hardware.
Copyright (C) 1994, 1996, 1997, 1998 Free Software Foundation, Inc.
This file is part of GNU CC.
GNU CC is free software; you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by the
Free Software Foundation; either version 2, or (at your option) any
later version.
In addition to the permissions in the GNU General Public License, the
Free Software Foundation gives you unlimited permission to link the
compiled version of this file with other programs, and to distribute
those programs without any restriction coming from the use of this
file. (The General Public License restrictions do apply in other
respects; for example, they cover modification of the file, and
distribution when not linked into another program.)
This file is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details. */
/* As a special exception, if you link this library with files
compiled with GCC to produce an executable, this does not cause
the resulting executable to be covered by the GNU General Public License.
This exception does not however invalidate any other reasons why
the executable file might be covered by the GNU General Public License. */
/* Use this one for any 680x0; assumes no floating point hardware.
The trailing " '" appearing on some lines is for ANSI preprocessors. Yuk.
Some of this code comes from MINIX, via the folks at ericsson.
D. V. Henkel-Wallace (gumby@cygnus.com) Fete Bastille, 1992
*/
#include <asm/export.h>
/* These are predefined by new versions of GNU cpp. */
#ifndef __USER_LABEL_PREFIX__
#define __USER_LABEL_PREFIX__ _
#endif
#ifndef __REGISTER_PREFIX__
#define __REGISTER_PREFIX__
#endif
#ifndef __IMMEDIATE_PREFIX__
#define __IMMEDIATE_PREFIX__ #
#endif
/* ANSI concatenation macros. */
#define CONCAT1(a, b) CONCAT2(a, b)
#define CONCAT2(a, b) a ## b
/* Use the right prefix for global labels. */
#define SYM(x) CONCAT1 (__USER_LABEL_PREFIX__, x)
/* Use the right prefix for registers. */
#define REG(x) CONCAT1 (__REGISTER_PREFIX__, x)
/* Use the right prefix for immediate values. */
#define IMM(x) CONCAT1 (__IMMEDIATE_PREFIX__, x)
#define d0 REG (d0)
#define d1 REG (d1)
#define d2 REG (d2)
#define d3 REG (d3)
#define d4 REG (d4)
#define d5 REG (d5)
#define d6 REG (d6)
#define d7 REG (d7)
#define a0 REG (a0)
#define a1 REG (a1)
#define a2 REG (a2)
#define a3 REG (a3)
#define a4 REG (a4)
#define a5 REG (a5)
#define a6 REG (a6)
#define fp REG (fp)
#define sp REG (sp)
.text
.proc
.globl SYM (__udivsi3)
SYM (__udivsi3):
#if !(defined(__mcf5200__) || defined(__mcoldfire__))
movel d2, sp@-
movel sp@(12), d1 /* d1 = divisor */
movel sp@(8), d0 /* d0 = dividend */
cmpl IMM (0x10000), d1 /* divisor >= 2 ^ 16 ? */
jcc L3 /* then try next algorithm */
movel d0, d2
clrw d2
swap d2
divu d1, d2 /* high quotient in lower word */
movew d2, d0 /* save high quotient */
swap d0
movew sp@(10), d2 /* get low dividend + high rest */
divu d1, d2 /* low quotient */
movew d2, d0
jra L6
L3: movel d1, d2 /* use d2 as divisor backup */
L4: lsrl IMM (1), d1 /* shift divisor */
lsrl IMM (1), d0 /* shift dividend */
cmpl IMM (0x10000), d1 /* still divisor >= 2 ^ 16 ? */
jcc L4
divu d1, d0 /* now we have 16 bit divisor */
andl IMM (0xffff), d0 /* mask out divisor, ignore remainder */
/* Multiply the 16 bit tentative quotient with the 32 bit divisor. Because of
the operand ranges, this might give a 33 bit product. If this product is
greater than the dividend, the tentative quotient was too large. */
movel d2, d1
mulu d0, d1 /* low part, 32 bits */
swap d2
mulu d0, d2 /* high part, at most 17 bits */
swap d2 /* align high part with low part */
tstw d2 /* high part 17 bits? */
jne L5 /* if 17 bits, quotient was too large */
addl d2, d1 /* add parts */
jcs L5 /* if sum is 33 bits, quotient was too large */
cmpl sp@(8), d1 /* compare the sum with the dividend */
jls L6 /* if sum > dividend, quotient was too large */
L5: subql IMM (1), d0 /* adjust quotient */
L6: movel sp@+, d2
rts
#else /* __mcf5200__ || __mcoldfire__ */
/* Coldfire implementation of non-restoring division algorithm from
Hennessy & Patterson, Appendix A. */
link a6,IMM (-12)
moveml d2-d4,sp@
movel a6@(8),d0
movel a6@(12),d1
clrl d2 | clear p
moveq IMM (31),d4
L1: addl d0,d0 | shift reg pair (p,a) one bit left
addxl d2,d2
movl d2,d3 | subtract b from p, store in tmp.
subl d1,d3
jcs L2 | if no carry,
bset IMM (0),d0 | set the low order bit of a to 1,
movl d3,d2 | and store tmp in p.
L2: subql IMM (1),d4
jcc L1
moveml sp@,d2-d4 | restore data registers
unlk a6 | and return
rts
#endif /* __mcf5200__ || __mcoldfire__ */
EXPORT_SYMBOL(__udivsi3)
|
AirFortressIlikara/LS2K0300-linux-4.19
| 3,419
|
arch/m68k/lib/divsi3.S
|
/* libgcc1 routines for 68000 w/o floating-point hardware.
Copyright (C) 1994, 1996, 1997, 1998 Free Software Foundation, Inc.
This file is part of GNU CC.
GNU CC is free software; you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by the
Free Software Foundation; either version 2, or (at your option) any
later version.
In addition to the permissions in the GNU General Public License, the
Free Software Foundation gives you unlimited permission to link the
compiled version of this file with other programs, and to distribute
those programs without any restriction coming from the use of this
file. (The General Public License restrictions do apply in other
respects; for example, they cover modification of the file, and
distribution when not linked into another program.)
This file is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details. */
/* As a special exception, if you link this library with files
compiled with GCC to produce an executable, this does not cause
the resulting executable to be covered by the GNU General Public License.
This exception does not however invalidate any other reasons why
the executable file might be covered by the GNU General Public License. */
/* Use this one for any 680x0; assumes no floating point hardware.
The trailing " '" appearing on some lines is for ANSI preprocessors. Yuk.
Some of this code comes from MINIX, via the folks at ericsson.
D. V. Henkel-Wallace (gumby@cygnus.com) Fete Bastille, 1992
*/
#include <asm/export.h>
/* These are predefined by new versions of GNU cpp. */
#ifndef __USER_LABEL_PREFIX__
#define __USER_LABEL_PREFIX__ _
#endif
#ifndef __REGISTER_PREFIX__
#define __REGISTER_PREFIX__
#endif
#ifndef __IMMEDIATE_PREFIX__
#define __IMMEDIATE_PREFIX__ #
#endif
/* ANSI concatenation macros. */
#define CONCAT1(a, b) CONCAT2(a, b)
#define CONCAT2(a, b) a ## b
/* Use the right prefix for global labels. */
#define SYM(x) CONCAT1 (__USER_LABEL_PREFIX__, x)
/* Use the right prefix for registers. */
#define REG(x) CONCAT1 (__REGISTER_PREFIX__, x)
/* Use the right prefix for immediate values. */
#define IMM(x) CONCAT1 (__IMMEDIATE_PREFIX__, x)
#define d0 REG (d0)
#define d1 REG (d1)
#define d2 REG (d2)
#define d3 REG (d3)
#define d4 REG (d4)
#define d5 REG (d5)
#define d6 REG (d6)
#define d7 REG (d7)
#define a0 REG (a0)
#define a1 REG (a1)
#define a2 REG (a2)
#define a3 REG (a3)
#define a4 REG (a4)
#define a5 REG (a5)
#define a6 REG (a6)
#define fp REG (fp)
#define sp REG (sp)
.text
.proc
.globl SYM (__divsi3)
SYM (__divsi3):
movel d2, sp@-
moveq IMM (1), d2 /* sign of result stored in d2 (=1 or =-1) */
movel sp@(12), d1 /* d1 = divisor */
jpl L1
negl d1
#if !(defined(__mcf5200__) || defined(__mcoldfire__))
negb d2 /* change sign because divisor <0 */
#else
negl d2 /* change sign because divisor <0 */
#endif
L1: movel sp@(8), d0 /* d0 = dividend */
jpl L2
negl d0
#if !(defined(__mcf5200__) || defined(__mcoldfire__))
negb d2
#else
negl d2
#endif
L2: movel d1, sp@-
movel d0, sp@-
jbsr SYM (__udivsi3) /* divide abs(dividend) by abs(divisor) */
addql IMM (8), sp
tstb d2
jpl L3
negl d0
L3: movel sp@+, d2
rts
EXPORT_SYMBOL(__divsi3)
|
AirFortressIlikara/LS2K0300-linux-4.19
| 3,253
|
arch/m68k/lib/modsi3.S
|
/* libgcc1 routines for 68000 w/o floating-point hardware.
Copyright (C) 1994, 1996, 1997, 1998 Free Software Foundation, Inc.
This file is part of GNU CC.
GNU CC is free software; you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by the
Free Software Foundation; either version 2, or (at your option) any
later version.
In addition to the permissions in the GNU General Public License, the
Free Software Foundation gives you unlimited permission to link the
compiled version of this file with other programs, and to distribute
those programs without any restriction coming from the use of this
file. (The General Public License restrictions do apply in other
respects; for example, they cover modification of the file, and
distribution when not linked into another program.)
This file is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details. */
/* As a special exception, if you link this library with files
compiled with GCC to produce an executable, this does not cause
the resulting executable to be covered by the GNU General Public License.
This exception does not however invalidate any other reasons why
the executable file might be covered by the GNU General Public License. */
/* Use this one for any 680x0; assumes no floating point hardware.
The trailing " '" appearing on some lines is for ANSI preprocessors. Yuk.
Some of this code comes from MINIX, via the folks at ericsson.
D. V. Henkel-Wallace (gumby@cygnus.com) Fete Bastille, 1992
*/
#include <asm/export.h>
/* These are predefined by new versions of GNU cpp. */
#ifndef __USER_LABEL_PREFIX__
#define __USER_LABEL_PREFIX__ _
#endif
#ifndef __REGISTER_PREFIX__
#define __REGISTER_PREFIX__
#endif
#ifndef __IMMEDIATE_PREFIX__
#define __IMMEDIATE_PREFIX__ #
#endif
/* ANSI concatenation macros. */
#define CONCAT1(a, b) CONCAT2(a, b)
#define CONCAT2(a, b) a ## b
/* Use the right prefix for global labels. */
#define SYM(x) CONCAT1 (__USER_LABEL_PREFIX__, x)
/* Use the right prefix for registers. */
#define REG(x) CONCAT1 (__REGISTER_PREFIX__, x)
/* Use the right prefix for immediate values. */
#define IMM(x) CONCAT1 (__IMMEDIATE_PREFIX__, x)
#define d0 REG (d0)
#define d1 REG (d1)
#define d2 REG (d2)
#define d3 REG (d3)
#define d4 REG (d4)
#define d5 REG (d5)
#define d6 REG (d6)
#define d7 REG (d7)
#define a0 REG (a0)
#define a1 REG (a1)
#define a2 REG (a2)
#define a3 REG (a3)
#define a4 REG (a4)
#define a5 REG (a5)
#define a6 REG (a6)
#define fp REG (fp)
#define sp REG (sp)
.text
.proc
.globl SYM (__modsi3)
SYM (__modsi3):
movel sp@(8), d1 /* d1 = divisor */
movel sp@(4), d0 /* d0 = dividend */
movel d1, sp@-
movel d0, sp@-
jbsr SYM (__divsi3)
addql IMM (8), sp
movel sp@(8), d1 /* d1 = divisor */
#if !(defined(__mcf5200__) || defined(__mcoldfire__))
movel d1, sp@-
movel d0, sp@-
jbsr SYM (__mulsi3) /* d0 = (a/b)*b */
addql IMM (8), sp
#else
mulsl d1,d0
#endif
movel sp@(4), d1 /* d1 = dividend */
subl d0, d1 /* d1 = a - (a/b)*b */
movel d1, d0
rts
EXPORT_SYMBOL(__modsi3)
|
AirFortressIlikara/LS2K0300-linux-4.19
| 3,122
|
arch/m68k/lib/mulsi3.S
|
/* libgcc1 routines for 68000 w/o floating-point hardware.
Copyright (C) 1994, 1996, 1997, 1998 Free Software Foundation, Inc.
This file is part of GNU CC.
GNU CC is free software; you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by the
Free Software Foundation; either version 2, or (at your option) any
later version.
In addition to the permissions in the GNU General Public License, the
Free Software Foundation gives you unlimited permission to link the
compiled version of this file with other programs, and to distribute
those programs without any restriction coming from the use of this
file. (The General Public License restrictions do apply in other
respects; for example, they cover modification of the file, and
distribution when not linked into another program.)
This file is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details. */
/* As a special exception, if you link this library with files
compiled with GCC to produce an executable, this does not cause
the resulting executable to be covered by the GNU General Public License.
This exception does not however invalidate any other reasons why
the executable file might be covered by the GNU General Public License. */
/* Use this one for any 680x0; assumes no floating point hardware.
The trailing " '" appearing on some lines is for ANSI preprocessors. Yuk.
Some of this code comes from MINIX, via the folks at ericsson.
D. V. Henkel-Wallace (gumby@cygnus.com) Fete Bastille, 1992
*/
#include <asm/export.h>
/* These are predefined by new versions of GNU cpp. */
#ifndef __USER_LABEL_PREFIX__
#define __USER_LABEL_PREFIX__ _
#endif
#ifndef __REGISTER_PREFIX__
#define __REGISTER_PREFIX__
#endif
#ifndef __IMMEDIATE_PREFIX__
#define __IMMEDIATE_PREFIX__ #
#endif
/* ANSI concatenation macros. */
#define CONCAT1(a, b) CONCAT2(a, b)
#define CONCAT2(a, b) a ## b
/* Use the right prefix for global labels. */
#define SYM(x) CONCAT1 (__USER_LABEL_PREFIX__, x)
/* Use the right prefix for registers. */
#define REG(x) CONCAT1 (__REGISTER_PREFIX__, x)
/* Use the right prefix for immediate values. */
#define IMM(x) CONCAT1 (__IMMEDIATE_PREFIX__, x)
#define d0 REG (d0)
#define d1 REG (d1)
#define d2 REG (d2)
#define d3 REG (d3)
#define d4 REG (d4)
#define d5 REG (d5)
#define d6 REG (d6)
#define d7 REG (d7)
#define a0 REG (a0)
#define a1 REG (a1)
#define a2 REG (a2)
#define a3 REG (a3)
#define a4 REG (a4)
#define a5 REG (a5)
#define a6 REG (a6)
#define fp REG (fp)
#define sp REG (sp)
.text
.proc
.globl SYM (__mulsi3)
SYM (__mulsi3):
movew sp@(4), d0 /* x0 -> d0 */
muluw sp@(10), d0 /* x0*y1 */
movew sp@(6), d1 /* x1 -> d1 */
muluw sp@(8), d1 /* x1*y0 */
#if !(defined(__mcf5200__) || defined(__mcoldfire__))
addw d1, d0
#else
addl d1, d0
#endif
swap d0
clrw d0
movew sp@(6), d1 /* x1 -> d1 */
muluw sp@(10), d1 /* x1*y1 */
addl d1, d0
rts
EXPORT_SYMBOL(__mulsi3)
|
AirFortressIlikara/LS2K0300-linux-4.19
| 12,033
|
arch/m68k/mac/mac_penguin.S
|
/* SPDX-License-Identifier: GPL-2.0 */
.byte \
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xFF,0xFF,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,\
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x0F,0xFF,0xFF,0xFF,0xFF,0xF0,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,\
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x0F,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xF0,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,\
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,\
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x0F,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xF0,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,\
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0x00,0xFF,0xFF,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,\
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0x0F,0xFF,0xFF,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,\
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xF0,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,\
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xF0,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,\
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x0F,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,\
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x0F,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,\
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x0F,0xFF,0xF0,0x0F,0xFF,0xFF,0xF0,0x00,0x0F,0xFF,0xFF,0xFF,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,\
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x0F,0xFF,0x00,0x00,0xFF,0xFF,0x00,0x00,0x00,0xFF,0xFF,0xFF,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,\
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x0F,0xFF,0x00,0x00,0xFF,0xFF,0x00,0x00,0x00,0xFF,0xFF,0xFF,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,\
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xF0,0xFF,0xF0,0xFF,0xFF,0x0F,0xF0,0xF0,0x0F,0xFF,0xFF,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,\
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xF0,0xFF,0x00,0xFF,0xFF,0x0F,0xFF,0x00,0x0F,0xFF,0xFF,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,\
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xF0,0xFF,0xF0,0x0F,0xFF,0x0F,0xFF,0xF0,0x0F,0xFF,0xFF,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,\
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xFF,0x0F,0xFF,0x00,0x0F,0x0F,0xFF,0xF0,0x0F,0xFF,0xFF,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,\
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xFF,0x0F,0x00,0x00,0x00,0x00,0x0F,0xFF,0xFF,0xFF,0xFF,0xF0,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,\
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xFF,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xFF,0xFF,0xFF,0xF0,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,\
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xF0,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x0F,0xFF,0xFF,0xF0,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,\
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xF0,0x00,0x00,0x00,0x00,0x00,0x00,0xF0,0x0F,0xFF,0xFF,0xF0,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,\
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xFF,0xF0,0x00,0x00,0x00,0x00,0xFF,0x00,0xFF,0xF0,0xFF,0xF0,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,\
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xFF,0xFF,0x00,0x00,0x0F,0xF0,0x00,0x00,0xFF,0xF0,0x0F,0xFF,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,\
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xFF,0xFF,0xFF,0xFF,0xF0,0x00,0x0F,0xF0,0xFF,0xFF,0x00,0xFF,0xF0,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,\
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xFF,0xFF,0x00,0x00,0x00,0xFF,0xF0,0x00,0x0F,0xFF,0xF0,0xFF,0xF0,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,\
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x0F,0xFF,0x0F,0xFF,0x00,0xFF,0xF0,0x00,0x00,0x0F,0xFF,0xFF,0xFF,0xFF,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,\
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x0F,0xF0,0x00,0xFF,0xFF,0xFF,0x00,0x00,0x00,0x00,0xFF,0xFF,0xFF,0xFF,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,\
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xFF,0xF0,0x00,0x0F,0xFF,0xF0,0x00,0x00,0x00,0x00,0x0F,0xFF,0xFF,0xFF,0xF0,0x00,0x00,0x00,0x00,0x00,0x00,0x00,\
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x0F,0xFF,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x0F,0xFF,0xFF,0xFF,0xF0,0x00,0x00,0x00,0x00,0x00,0x00,0x00,\
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x0F,0xFF,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x0F,0xFF,0xFF,0xFF,0xFF,0x00,0x00,0x00,0x00,0x00,0x00,0x00,\
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xFF,0xF0,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xFF,0xFF,0xFF,0xFF,0xF0,0x00,0x00,0x00,0x00,0x00,0x00,\
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x0F,0xFF,0xF0,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xFF,0xF0,0xFF,0xFF,0xFF,0x00,0x00,0x00,0x00,0x00,0x00,\
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xFF,0xFF,0xF0,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xFF,0xFF,0x0F,0xFF,0xFF,0xF0,0x00,0x00,0x00,0x00,0x00,\
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x0F,0xFF,0xFF,0xF0,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x0F,0xFF,0xF0,0xFF,0xFF,0xF0,0x00,0x00,0x00,0x00,0x00,\
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x0F,0xFF,0xFF,0xF0,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xFF,0xFF,0xFF,0xFF,0xFF,0x00,0x00,0x00,0x00,0x00,\
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xFF,0x0F,0xFF,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xFF,0xFF,0x0F,0xFF,0xFF,0xF0,0x00,0x00,0x00,0x00,\
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xFF,0xFF,0xFF,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x0F,0xFF,0xFF,0xFF,0xFF,0xF0,0x00,0x00,0x00,0x00,\
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xFF,0xFF,0xF0,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x0F,0xFF,0xF0,0xFF,0xFF,0xFF,0x00,0x00,0x00,0x00,\
0x00,0x00,0x00,0x00,0x00,0x00,0x0F,0xFF,0xFF,0xF0,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x0F,0xFF,0xF0,0xFF,0xFF,0xFF,0x00,0x00,0x00,0x00,\
0x00,0x00,0x00,0x00,0x00,0x00,0x0F,0xFF,0xFF,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xFF,0xFF,0x0F,0xFF,0xFF,0x00,0x00,0x00,0x00,\
0x00,0x00,0x00,0x00,0x00,0x00,0xFF,0xFF,0xFF,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xFF,0xFF,0x0F,0xFF,0xFF,0xF0,0x00,0x00,0x00,\
0x00,0x00,0x00,0x00,0x00,0x00,0xFF,0xFF,0xF0,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xFF,0xFF,0x0F,0xFF,0xFF,0xF0,0x00,0x00,0x00,\
0x00,0x00,0x00,0x00,0x00,0x00,0xFF,0xFF,0xF0,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xFF,0xFF,0x0F,0xFF,0xFF,0xF0,0x00,0x00,0x00,\
0x00,0x00,0x00,0x00,0x00,0x0F,0xF0,0xFF,0xF0,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0x00,0x00,0x00,\
0x00,0x00,0x00,0x00,0x00,0x0F,0xF0,0xFF,0xF0,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0x00,0x00,0x00,\
0x00,0x00,0x00,0x00,0x00,0xFF,0xF0,0xFF,0xF0,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xFF,0xFF,0x0F,0xFF,0xFF,0xFF,0x00,0x00,0x00,\
0x00,0x00,0x00,0x00,0x0F,0xFF,0xF0,0xFF,0xF0,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xFF,0xFF,0x0F,0xFF,0xFF,0xFF,0x00,0x00,0x00,\
0x00,0x00,0x00,0x00,0x0F,0xFF,0xF0,0xFF,0xF0,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xFF,0xFF,0x0F,0xFF,0xFF,0xFF,0x00,0x00,0x00,\
0x00,0x00,0x00,0x00,0x0F,0xFF,0xFF,0x0F,0xF0,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xFF,0xFF,0xFF,0x00,0x00,0x00,\
0x00,0x00,0x00,0x00,0x0F,0xFF,0xFF,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x0F,0xFF,0xFF,0xFF,0xFF,0xFF,0x00,0x00,0x00,\
0x00,0x00,0x00,0x00,0x0F,0xF0,0xFF,0xF0,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0x00,0x00,0x00,\
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xFF,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0x00,0x00,0x00,\
0x00,0x00,0x00,0x00,0xF0,0x00,0x00,0x0F,0xFF,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x0F,0xF0,0xFF,0xFF,0xFF,0xFF,0xFF,0x00,0x00,0x00,0x00,\
0x00,0x00,0x00,0x00,0xF0,0x00,0x00,0x00,0xFF,0xF0,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x0F,0x00,0x0F,0xFF,0xFF,0xFF,0xF0,0x00,0x00,0x00,0x00,\
0x00,0x00,0x00,0x00,0xF0,0x00,0x00,0x00,0xFF,0xFF,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x0F,0x00,0x0F,0xFF,0xFF,0xFF,0x00,0x00,0xF0,0x00,0x00,\
0x00,0x00,0x00,0xFF,0x00,0x00,0x00,0x00,0x0F,0xFF,0xF0,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x0F,0x00,0x00,0xFF,0xFF,0xF0,0x00,0x00,0xF0,0x00,0x00,\
0x00,0xFF,0xFF,0xFF,0x00,0x00,0x00,0x00,0x00,0xFF,0xFF,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x0F,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xF0,0x00,0x00,\
0x0F,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xFF,0xFF,0xFF,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x0F,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xFF,0x00,0x00,\
0x0F,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x0F,0xFF,0xFF,0xF0,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x0F,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xFF,0x00,0x00,\
0x0F,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xFF,0xFF,0xF0,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x0F,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x0F,0xF0,0x00,\
0x0F,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xFF,0xFF,0xF0,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x0F,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xFF,0xF0,\
0x0F,0xF0,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x0F,0xFF,0xF0,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x0F,0x0F,0xF0,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x0F,\
0x0F,0xF0,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xFF,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xFF,0x0F,0xF0,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,\
0x0F,0xF0,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xF0,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x0F,0xFF,0x0F,0xF0,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,\
0x0F,0xF0,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x0F,0x0F,0xF0,0x00,0x00,0x00,0x00,0x0F,0xFF,0xFF,0x0F,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xFF,\
0x0F,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x0F,0xF0,0xFF,0xF0,0x00,0xFF,0xFF,0xFF,0xFF,0xFF,0x0F,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xFF,0x00,\
0xFF,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xF0,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0x00,0x00,0x00,0x00,0x00,0x00,0x0F,0xFF,0x00,0x00,\
0x0F,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xF0,0xFF,0x00,0x00,0x00,0x00,0x00,0x0F,0xFF,0x00,0x00,0x00,\
0x0F,0xFF,0xF0,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x0F,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xF0,0xFF,0x00,0x00,0x00,0x00,0x00,0xFF,0x00,0x00,0x00,0x00,\
0x00,0x00,0x0F,0xFF,0xFF,0x00,0x00,0x00,0x00,0x00,0x00,0x0F,0xF0,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xF0,0x00,0x00,0x00,0xFF,0xF0,0x00,0x00,0x00,0x00,\
0x00,0x00,0x00,0x00,0x0F,0xFF,0xFF,0x00,0x00,0x00,0x00,0xFF,0xFF,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xFF,0xF0,0x00,0x00,0x0F,0xFF,0x00,0x00,0x00,0x00,0x00,\
0x00,0x00,0x00,0x00,0x00,0x00,0x0F,0xFF,0xFF,0x00,0x0F,0xFF,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xFF,0xFF,0xFF,0xFF,0xFF,0xF0,0x00,0x00,0x00,0x00,0x00,\
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x0F,0xFF,0xFF,0xFF,0xF0,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x0F,0xFF,0xFF,0xFF,0xFF,0x00,0x00,0x00,0x00,0x00,0x00
|
AirFortressIlikara/LS2K0300-linux-4.19
| 30,683
|
arch/m68k/ifpsp060/src/ilsp.S
|
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
MOTOROLA MICROPROCESSOR & MEMORY TECHNOLOGY GROUP
M68000 Hi-Performance Microprocessor Division
M68060 Software Package
Production Release P1.00 -- October 10, 1994
M68060 Software Package Copyright © 1993, 1994 Motorola Inc. All rights reserved.
THE SOFTWARE is provided on an "AS IS" basis and without warranty.
To the maximum extent permitted by applicable law,
MOTOROLA DISCLAIMS ALL WARRANTIES WHETHER EXPRESS OR IMPLIED,
INCLUDING IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE
and any warranty against infringement with regard to the SOFTWARE
(INCLUDING ANY MODIFIED VERSIONS THEREOF) and any accompanying written materials.
To the maximum extent permitted by applicable law,
IN NO EVENT SHALL MOTOROLA BE LIABLE FOR ANY DAMAGES WHATSOEVER
(INCLUDING WITHOUT LIMITATION, DAMAGES FOR LOSS OF BUSINESS PROFITS,
BUSINESS INTERRUPTION, LOSS OF BUSINESS INFORMATION, OR OTHER PECUNIARY LOSS)
ARISING OF THE USE OR INABILITY TO USE THE SOFTWARE.
Motorola assumes no responsibility for the maintenance and support of the SOFTWARE.
You are hereby granted a copyright license to use, modify, and distribute the SOFTWARE
so long as this entire notice is retained without alteration in any modified and/or
redistributed versions, and that such modified versions are clearly identified as such.
No licenses are granted by implication, estoppel or otherwise under any patents
or trademarks of Motorola, Inc.
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# litop.s:
# This file is appended to the top of the 060FPLSP package
# and contains the entry points into the package. The user, in
# effect, branches to one of the branch table entries located here.
#
bra.l _060LSP__idivs64_
short 0x0000
bra.l _060LSP__idivu64_
short 0x0000
bra.l _060LSP__imuls64_
short 0x0000
bra.l _060LSP__imulu64_
short 0x0000
bra.l _060LSP__cmp2_Ab_
short 0x0000
bra.l _060LSP__cmp2_Aw_
short 0x0000
bra.l _060LSP__cmp2_Al_
short 0x0000
bra.l _060LSP__cmp2_Db_
short 0x0000
bra.l _060LSP__cmp2_Dw_
short 0x0000
bra.l _060LSP__cmp2_Dl_
short 0x0000
# leave room for future possible aditions.
align 0x200
#########################################################################
# XDEF **************************************************************** #
# _060LSP__idivu64_(): Emulate 64-bit unsigned div instruction. #
# _060LSP__idivs64_(): Emulate 64-bit signed div instruction. #
# #
# This is the library version which is accessed as a subroutine #
# and therefore does not work exactly like the 680X0 div{s,u}.l #
# 64-bit divide instruction. #
# #
# XREF **************************************************************** #
# None. #
# #
# INPUT *************************************************************** #
# 0x4(sp) = divisor #
# 0x8(sp) = hi(dividend) #
# 0xc(sp) = lo(dividend) #
# 0x10(sp) = pointer to location to place quotient/remainder #
# #
# OUTPUT ************************************************************** #
# 0x10(sp) = points to location of remainder/quotient. #
# remainder is in first longword, quotient is in 2nd. #
# #
# ALGORITHM *********************************************************** #
# If the operands are signed, make them unsigned and save the #
# sign info for later. Separate out special cases like divide-by-zero #
# or 32-bit divides if possible. Else, use a special math algorithm #
# to calculate the result. #
# Restore sign info if signed instruction. Set the condition #
# codes before performing the final "rts". If the divisor was equal to #
# zero, then perform a divide-by-zero using a 16-bit implemented #
# divide instruction. This way, the operating system can record that #
# the event occurred even though it may not point to the correct place. #
# #
#########################################################################
set POSNEG, -1
set NDIVISOR, -2
set NDIVIDEND, -3
set DDSECOND, -4
set DDNORMAL, -8
set DDQUOTIENT, -12
set DIV64_CC, -16
##########
# divs.l #
##########
global _060LSP__idivs64_
_060LSP__idivs64_:
# PROLOGUE BEGIN ########################################################
link.w %a6,&-16
movm.l &0x3f00,-(%sp) # save d2-d7
# fmovm.l &0x0,-(%sp) # save no fpregs
# PROLOGUE END ##########################################################
mov.w %cc,DIV64_CC(%a6)
st POSNEG(%a6) # signed operation
bra.b ldiv64_cont
##########
# divu.l #
##########
global _060LSP__idivu64_
_060LSP__idivu64_:
# PROLOGUE BEGIN ########################################################
link.w %a6,&-16
movm.l &0x3f00,-(%sp) # save d2-d7
# fmovm.l &0x0,-(%sp) # save no fpregs
# PROLOGUE END ##########################################################
mov.w %cc,DIV64_CC(%a6)
sf POSNEG(%a6) # unsigned operation
ldiv64_cont:
mov.l 0x8(%a6),%d7 # fetch divisor
beq.w ldiv64eq0 # divisor is = 0!!!
mov.l 0xc(%a6), %d5 # get dividend hi
mov.l 0x10(%a6), %d6 # get dividend lo
# separate signed and unsigned divide
tst.b POSNEG(%a6) # signed or unsigned?
beq.b ldspecialcases # use positive divide
# save the sign of the divisor
# make divisor unsigned if it's negative
tst.l %d7 # chk sign of divisor
slt NDIVISOR(%a6) # save sign of divisor
bpl.b ldsgndividend
neg.l %d7 # complement negative divisor
# save the sign of the dividend
# make dividend unsigned if it's negative
ldsgndividend:
tst.l %d5 # chk sign of hi(dividend)
slt NDIVIDEND(%a6) # save sign of dividend
bpl.b ldspecialcases
mov.w &0x0, %cc # clear 'X' cc bit
negx.l %d6 # complement signed dividend
negx.l %d5
# extract some special cases:
# - is (dividend == 0) ?
# - is (hi(dividend) == 0 && (divisor <= lo(dividend))) ? (32-bit div)
ldspecialcases:
tst.l %d5 # is (hi(dividend) == 0)
bne.b ldnormaldivide # no, so try it the long way
tst.l %d6 # is (lo(dividend) == 0), too
beq.w lddone # yes, so (dividend == 0)
cmp.l %d7,%d6 # is (divisor <= lo(dividend))
bls.b ld32bitdivide # yes, so use 32 bit divide
exg %d5,%d6 # q = 0, r = dividend
bra.w ldivfinish # can't divide, we're done.
ld32bitdivide:
tdivu.l %d7, %d5:%d6 # it's only a 32/32 bit div!
bra.b ldivfinish
ldnormaldivide:
# last special case:
# - is hi(dividend) >= divisor ? if yes, then overflow
cmp.l %d7,%d5
bls.b lddovf # answer won't fit in 32 bits
# perform the divide algorithm:
bsr.l ldclassical # do int divide
# separate into signed and unsigned finishes.
ldivfinish:
tst.b POSNEG(%a6) # do divs, divu separately
beq.b lddone # divu has no processing!!!
# it was a divs.l, so ccode setting is a little more complicated...
tst.b NDIVIDEND(%a6) # remainder has same sign
beq.b ldcc # as dividend.
neg.l %d5 # sgn(rem) = sgn(dividend)
ldcc:
mov.b NDIVISOR(%a6), %d0
eor.b %d0, NDIVIDEND(%a6) # chk if quotient is negative
beq.b ldqpos # branch to quot positive
# 0x80000000 is the largest number representable as a 32-bit negative
# number. the negative of 0x80000000 is 0x80000000.
cmpi.l %d6, &0x80000000 # will (-quot) fit in 32 bits?
bhi.b lddovf
neg.l %d6 # make (-quot) 2's comp
bra.b lddone
ldqpos:
btst &0x1f, %d6 # will (+quot) fit in 32 bits?
bne.b lddovf
lddone:
# if the register numbers are the same, only the quotient gets saved.
# so, if we always save the quotient second, we save ourselves a cmp&beq
andi.w &0x10,DIV64_CC(%a6)
mov.w DIV64_CC(%a6),%cc
tst.l %d6 # may set 'N' ccode bit
# here, the result is in d1 and d0. the current strategy is to save
# the values at the location pointed to by a0.
# use movm here to not disturb the condition codes.
ldexit:
movm.l &0x0060,([0x14,%a6]) # save result
# EPILOGUE BEGIN ########################################################
# fmovm.l (%sp)+,&0x0 # restore no fpregs
movm.l (%sp)+,&0x00fc # restore d2-d7
unlk %a6
# EPILOGUE END ##########################################################
rts
# the result should be the unchanged dividend
lddovf:
mov.l 0xc(%a6), %d5 # get dividend hi
mov.l 0x10(%a6), %d6 # get dividend lo
andi.w &0x1c,DIV64_CC(%a6)
ori.w &0x02,DIV64_CC(%a6) # set 'V' ccode bit
mov.w DIV64_CC(%a6),%cc
bra.b ldexit
ldiv64eq0:
mov.l 0xc(%a6),([0x14,%a6])
mov.l 0x10(%a6),([0x14,%a6],0x4)
mov.w DIV64_CC(%a6),%cc
# EPILOGUE BEGIN ########################################################
# fmovm.l (%sp)+,&0x0 # restore no fpregs
movm.l (%sp)+,&0x00fc # restore d2-d7
unlk %a6
# EPILOGUE END ##########################################################
divu.w &0x0,%d0 # force a divbyzero exception
rts
###########################################################################
#########################################################################
# This routine uses the 'classical' Algorithm D from Donald Knuth's #
# Art of Computer Programming, vol II, Seminumerical Algorithms. #
# For this implementation b=2**16, and the target is U1U2U3U4/V1V2, #
# where U,V are words of the quadword dividend and longword divisor, #
# and U1, V1 are the most significant words. #
# #
# The most sig. longword of the 64 bit dividend must be in %d5, least #
# in %d6. The divisor must be in the variable ddivisor, and the #
# signed/unsigned flag ddusign must be set (0=unsigned,1=signed). #
# The quotient is returned in %d6, remainder in %d5, unless the #
# v (overflow) bit is set in the saved %ccr. If overflow, the dividend #
# is unchanged. #
#########################################################################
ldclassical:
# if the divisor msw is 0, use simpler algorithm then the full blown
# one at ddknuth:
cmpi.l %d7, &0xffff
bhi.b lddknuth # go use D. Knuth algorithm
# Since the divisor is only a word (and larger than the mslw of the dividend),
# a simpler algorithm may be used :
# In the general case, four quotient words would be created by
# dividing the divisor word into each dividend word. In this case,
# the first two quotient words must be zero, or overflow would occur.
# Since we already checked this case above, we can treat the most significant
# longword of the dividend as (0) remainder (see Knuth) and merely complete
# the last two divisions to get a quotient longword and word remainder:
clr.l %d1
swap %d5 # same as r*b if previous step rqd
swap %d6 # get u3 to lsw position
mov.w %d6, %d5 # rb + u3
divu.w %d7, %d5
mov.w %d5, %d1 # first quotient word
swap %d6 # get u4
mov.w %d6, %d5 # rb + u4
divu.w %d7, %d5
swap %d1
mov.w %d5, %d1 # 2nd quotient 'digit'
clr.w %d5
swap %d5 # now remainder
mov.l %d1, %d6 # and quotient
rts
lddknuth:
# In this algorithm, the divisor is treated as a 2 digit (word) number
# which is divided into a 3 digit (word) dividend to get one quotient
# digit (word). After subtraction, the dividend is shifted and the
# process repeated. Before beginning, the divisor and quotient are
# 'normalized' so that the process of estimating the quotient digit
# will yield verifiably correct results..
clr.l DDNORMAL(%a6) # count of shifts for normalization
clr.b DDSECOND(%a6) # clear flag for quotient digits
clr.l %d1 # %d1 will hold trial quotient
lddnchk:
btst &31, %d7 # must we normalize? first word of
bne.b lddnormalized # divisor (V1) must be >= 65536/2
addq.l &0x1, DDNORMAL(%a6) # count normalization shifts
lsl.l &0x1, %d7 # shift the divisor
lsl.l &0x1, %d6 # shift u4,u3 with overflow to u2
roxl.l &0x1, %d5 # shift u1,u2
bra.w lddnchk
lddnormalized:
# Now calculate an estimate of the quotient words (msw first, then lsw).
# The comments use subscripts for the first quotient digit determination.
mov.l %d7, %d3 # divisor
mov.l %d5, %d2 # dividend mslw
swap %d2
swap %d3
cmp.w %d2, %d3 # V1 = U1 ?
bne.b lddqcalc1
mov.w &0xffff, %d1 # use max trial quotient word
bra.b lddadj0
lddqcalc1:
mov.l %d5, %d1
divu.w %d3, %d1 # use quotient of mslw/msw
andi.l &0x0000ffff, %d1 # zero any remainder
lddadj0:
# now test the trial quotient and adjust. This step plus the
# normalization assures (according to Knuth) that the trial
# quotient will be at worst 1 too large.
mov.l %d6, -(%sp)
clr.w %d6 # word u3 left
swap %d6 # in lsw position
lddadj1: mov.l %d7, %d3
mov.l %d1, %d2
mulu.w %d7, %d2 # V2q
swap %d3
mulu.w %d1, %d3 # V1q
mov.l %d5, %d4 # U1U2
sub.l %d3, %d4 # U1U2 - V1q
swap %d4
mov.w %d4,%d0
mov.w %d6,%d4 # insert lower word (U3)
tst.w %d0 # is upper word set?
bne.w lddadjd1
# add.l %d6, %d4 # (U1U2 - V1q) + U3
cmp.l %d2, %d4
bls.b lddadjd1 # is V2q > (U1U2-V1q) + U3 ?
subq.l &0x1, %d1 # yes, decrement and recheck
bra.b lddadj1
lddadjd1:
# now test the word by multiplying it by the divisor (V1V2) and comparing
# the 3 digit (word) result with the current dividend words
mov.l %d5, -(%sp) # save %d5 (%d6 already saved)
mov.l %d1, %d6
swap %d6 # shift answer to ms 3 words
mov.l %d7, %d5
bsr.l ldmm2
mov.l %d5, %d2 # now %d2,%d3 are trial*divisor
mov.l %d6, %d3
mov.l (%sp)+, %d5 # restore dividend
mov.l (%sp)+, %d6
sub.l %d3, %d6
subx.l %d2, %d5 # subtract double precision
bcc ldd2nd # no carry, do next quotient digit
subq.l &0x1, %d1 # q is one too large
# need to add back divisor longword to current ms 3 digits of dividend
# - according to Knuth, this is done only 2 out of 65536 times for random
# divisor, dividend selection.
clr.l %d2
mov.l %d7, %d3
swap %d3
clr.w %d3 # %d3 now ls word of divisor
add.l %d3, %d6 # aligned with 3rd word of dividend
addx.l %d2, %d5
mov.l %d7, %d3
clr.w %d3 # %d3 now ms word of divisor
swap %d3 # aligned with 2nd word of dividend
add.l %d3, %d5
ldd2nd:
tst.b DDSECOND(%a6) # both q words done?
bne.b lddremain
# first quotient digit now correct. store digit and shift the
# (subtracted) dividend
mov.w %d1, DDQUOTIENT(%a6)
clr.l %d1
swap %d5
swap %d6
mov.w %d6, %d5
clr.w %d6
st DDSECOND(%a6) # second digit
bra.w lddnormalized
lddremain:
# add 2nd word to quotient, get the remainder.
mov.w %d1, DDQUOTIENT+2(%a6)
# shift down one word/digit to renormalize remainder.
mov.w %d5, %d6
swap %d6
swap %d5
mov.l DDNORMAL(%a6), %d7 # get norm shift count
beq.b lddrn
subq.l &0x1, %d7 # set for loop count
lddnlp:
lsr.l &0x1, %d5 # shift into %d6
roxr.l &0x1, %d6
dbf %d7, lddnlp
lddrn:
mov.l %d6, %d5 # remainder
mov.l DDQUOTIENT(%a6), %d6 # quotient
rts
ldmm2:
# factors for the 32X32->64 multiplication are in %d5 and %d6.
# returns 64 bit result in %d5 (hi) %d6(lo).
# destroys %d2,%d3,%d4.
# multiply hi,lo words of each factor to get 4 intermediate products
mov.l %d6, %d2
mov.l %d6, %d3
mov.l %d5, %d4
swap %d3
swap %d4
mulu.w %d5, %d6 # %d6 <- lsw*lsw
mulu.w %d3, %d5 # %d5 <- msw-dest*lsw-source
mulu.w %d4, %d2 # %d2 <- msw-source*lsw-dest
mulu.w %d4, %d3 # %d3 <- msw*msw
# now use swap and addx to consolidate to two longwords
clr.l %d4
swap %d6
add.w %d5, %d6 # add msw of l*l to lsw of m*l product
addx.w %d4, %d3 # add any carry to m*m product
add.w %d2, %d6 # add in lsw of other m*l product
addx.w %d4, %d3 # add any carry to m*m product
swap %d6 # %d6 is low 32 bits of final product
clr.w %d5
clr.w %d2 # lsw of two mixed products used,
swap %d5 # now use msws of longwords
swap %d2
add.l %d2, %d5
add.l %d3, %d5 # %d5 now ms 32 bits of final product
rts
#########################################################################
# XDEF **************************************************************** #
# _060LSP__imulu64_(): Emulate 64-bit unsigned mul instruction #
# _060LSP__imuls64_(): Emulate 64-bit signed mul instruction. #
# #
# This is the library version which is accessed as a subroutine #
# and therefore does not work exactly like the 680X0 mul{s,u}.l #
# 64-bit multiply instruction. #
# #
# XREF **************************************************************** #
# None #
# #
# INPUT *************************************************************** #
# 0x4(sp) = multiplier #
# 0x8(sp) = multiplicand #
# 0xc(sp) = pointer to location to place 64-bit result #
# #
# OUTPUT ************************************************************** #
# 0xc(sp) = points to location of 64-bit result #
# #
# ALGORITHM *********************************************************** #
# Perform the multiply in pieces using 16x16->32 unsigned #
# multiplies and "add" instructions. #
# Set the condition codes as appropriate before performing an #
# "rts". #
# #
#########################################################################
set MUL64_CC, -4
global _060LSP__imulu64_
_060LSP__imulu64_:
# PROLOGUE BEGIN ########################################################
link.w %a6,&-4
movm.l &0x3800,-(%sp) # save d2-d4
# fmovm.l &0x0,-(%sp) # save no fpregs
# PROLOGUE END ##########################################################
mov.w %cc,MUL64_CC(%a6) # save incoming ccodes
mov.l 0x8(%a6),%d0 # store multiplier in d0
beq.w mulu64_zero # handle zero separately
mov.l 0xc(%a6),%d1 # get multiplicand in d1
beq.w mulu64_zero # handle zero separately
#########################################################################
# 63 32 0 #
# ---------------------------- #
# | hi(mplier) * hi(mplicand)| #
# ---------------------------- #
# ----------------------------- #
# | hi(mplier) * lo(mplicand) | #
# ----------------------------- #
# ----------------------------- #
# | lo(mplier) * hi(mplicand) | #
# ----------------------------- #
# | ----------------------------- #
# --|-- | lo(mplier) * lo(mplicand) | #
# | ----------------------------- #
# ======================================================== #
# -------------------------------------------------------- #
# | hi(result) | lo(result) | #
# -------------------------------------------------------- #
#########################################################################
mulu64_alg:
# load temp registers with operands
mov.l %d0,%d2 # mr in d2
mov.l %d0,%d3 # mr in d3
mov.l %d1,%d4 # md in d4
swap %d3 # hi(mr) in lo d3
swap %d4 # hi(md) in lo d4
# complete necessary multiplies:
mulu.w %d1,%d0 # [1] lo(mr) * lo(md)
mulu.w %d3,%d1 # [2] hi(mr) * lo(md)
mulu.w %d4,%d2 # [3] lo(mr) * hi(md)
mulu.w %d4,%d3 # [4] hi(mr) * hi(md)
# add lo portions of [2],[3] to hi portion of [1].
# add carries produced from these adds to [4].
# lo([1]) is the final lo 16 bits of the result.
clr.l %d4 # load d4 w/ zero value
swap %d0 # hi([1]) <==> lo([1])
add.w %d1,%d0 # hi([1]) + lo([2])
addx.l %d4,%d3 # [4] + carry
add.w %d2,%d0 # hi([1]) + lo([3])
addx.l %d4,%d3 # [4] + carry
swap %d0 # lo([1]) <==> hi([1])
# lo portions of [2],[3] have been added in to final result.
# now, clear lo, put hi in lo reg, and add to [4]
clr.w %d1 # clear lo([2])
clr.w %d2 # clear hi([3])
swap %d1 # hi([2]) in lo d1
swap %d2 # hi([3]) in lo d2
add.l %d2,%d1 # [4] + hi([2])
add.l %d3,%d1 # [4] + hi([3])
# now, grab the condition codes. only one that can be set is 'N'.
# 'N' CAN be set if the operation is unsigned if bit 63 is set.
mov.w MUL64_CC(%a6),%d4
andi.b &0x10,%d4 # keep old 'X' bit
tst.l %d1 # may set 'N' bit
bpl.b mulu64_ddone
ori.b &0x8,%d4 # set 'N' bit
mulu64_ddone:
mov.w %d4,%cc
# here, the result is in d1 and d0. the current strategy is to save
# the values at the location pointed to by a0.
# use movm here to not disturb the condition codes.
mulu64_end:
exg %d1,%d0
movm.l &0x0003,([0x10,%a6]) # save result
# EPILOGUE BEGIN ########################################################
# fmovm.l (%sp)+,&0x0 # restore no fpregs
movm.l (%sp)+,&0x001c # restore d2-d4
unlk %a6
# EPILOGUE END ##########################################################
rts
# one or both of the operands is zero so the result is also zero.
# save the zero result to the register file and set the 'Z' ccode bit.
mulu64_zero:
clr.l %d0
clr.l %d1
mov.w MUL64_CC(%a6),%d4
andi.b &0x10,%d4
ori.b &0x4,%d4
mov.w %d4,%cc # set 'Z' ccode bit
bra.b mulu64_end
##########
# muls.l #
##########
global _060LSP__imuls64_
_060LSP__imuls64_:
# PROLOGUE BEGIN ########################################################
link.w %a6,&-4
movm.l &0x3c00,-(%sp) # save d2-d5
# fmovm.l &0x0,-(%sp) # save no fpregs
# PROLOGUE END ##########################################################
mov.w %cc,MUL64_CC(%a6) # save incoming ccodes
mov.l 0x8(%a6),%d0 # store multiplier in d0
beq.b mulu64_zero # handle zero separately
mov.l 0xc(%a6),%d1 # get multiplicand in d1
beq.b mulu64_zero # handle zero separately
clr.b %d5 # clear sign tag
tst.l %d0 # is multiplier negative?
bge.b muls64_chk_md_sgn # no
neg.l %d0 # make multiplier positive
ori.b &0x1,%d5 # save multiplier sgn
# the result sign is the exclusive or of the operand sign bits.
muls64_chk_md_sgn:
tst.l %d1 # is multiplicand negative?
bge.b muls64_alg # no
neg.l %d1 # make multiplicand positive
eori.b &0x1,%d5 # calculate correct sign
#########################################################################
# 63 32 0 #
# ---------------------------- #
# | hi(mplier) * hi(mplicand)| #
# ---------------------------- #
# ----------------------------- #
# | hi(mplier) * lo(mplicand) | #
# ----------------------------- #
# ----------------------------- #
# | lo(mplier) * hi(mplicand) | #
# ----------------------------- #
# | ----------------------------- #
# --|-- | lo(mplier) * lo(mplicand) | #
# | ----------------------------- #
# ======================================================== #
# -------------------------------------------------------- #
# | hi(result) | lo(result) | #
# -------------------------------------------------------- #
#########################################################################
muls64_alg:
# load temp registers with operands
mov.l %d0,%d2 # mr in d2
mov.l %d0,%d3 # mr in d3
mov.l %d1,%d4 # md in d4
swap %d3 # hi(mr) in lo d3
swap %d4 # hi(md) in lo d4
# complete necessary multiplies:
mulu.w %d1,%d0 # [1] lo(mr) * lo(md)
mulu.w %d3,%d1 # [2] hi(mr) * lo(md)
mulu.w %d4,%d2 # [3] lo(mr) * hi(md)
mulu.w %d4,%d3 # [4] hi(mr) * hi(md)
# add lo portions of [2],[3] to hi portion of [1].
# add carries produced from these adds to [4].
# lo([1]) is the final lo 16 bits of the result.
clr.l %d4 # load d4 w/ zero value
swap %d0 # hi([1]) <==> lo([1])
add.w %d1,%d0 # hi([1]) + lo([2])
addx.l %d4,%d3 # [4] + carry
add.w %d2,%d0 # hi([1]) + lo([3])
addx.l %d4,%d3 # [4] + carry
swap %d0 # lo([1]) <==> hi([1])
# lo portions of [2],[3] have been added in to final result.
# now, clear lo, put hi in lo reg, and add to [4]
clr.w %d1 # clear lo([2])
clr.w %d2 # clear hi([3])
swap %d1 # hi([2]) in lo d1
swap %d2 # hi([3]) in lo d2
add.l %d2,%d1 # [4] + hi([2])
add.l %d3,%d1 # [4] + hi([3])
tst.b %d5 # should result be signed?
beq.b muls64_done # no
# result should be a signed negative number.
# compute 2's complement of the unsigned number:
# -negate all bits and add 1
muls64_neg:
not.l %d0 # negate lo(result) bits
not.l %d1 # negate hi(result) bits
addq.l &1,%d0 # add 1 to lo(result)
addx.l %d4,%d1 # add carry to hi(result)
muls64_done:
mov.w MUL64_CC(%a6),%d4
andi.b &0x10,%d4 # keep old 'X' bit
tst.l %d1 # may set 'N' bit
bpl.b muls64_ddone
ori.b &0x8,%d4 # set 'N' bit
muls64_ddone:
mov.w %d4,%cc
# here, the result is in d1 and d0. the current strategy is to save
# the values at the location pointed to by a0.
# use movm here to not disturb the condition codes.
muls64_end:
exg %d1,%d0
movm.l &0x0003,([0x10,%a6]) # save result at (a0)
# EPILOGUE BEGIN ########################################################
# fmovm.l (%sp)+,&0x0 # restore no fpregs
movm.l (%sp)+,&0x003c # restore d2-d5
unlk %a6
# EPILOGUE END ##########################################################
rts
# one or both of the operands is zero so the result is also zero.
# save the zero result to the register file and set the 'Z' ccode bit.
muls64_zero:
clr.l %d0
clr.l %d1
mov.w MUL64_CC(%a6),%d4
andi.b &0x10,%d4
ori.b &0x4,%d4
mov.w %d4,%cc # set 'Z' ccode bit
bra.b muls64_end
#########################################################################
# XDEF **************************************************************** #
# _060LSP__cmp2_Ab_(): Emulate "cmp2.b An,<ea>". #
# _060LSP__cmp2_Aw_(): Emulate "cmp2.w An,<ea>". #
# _060LSP__cmp2_Al_(): Emulate "cmp2.l An,<ea>". #
# _060LSP__cmp2_Db_(): Emulate "cmp2.b Dn,<ea>". #
# _060LSP__cmp2_Dw_(): Emulate "cmp2.w Dn,<ea>". #
# _060LSP__cmp2_Dl_(): Emulate "cmp2.l Dn,<ea>". #
# #
# This is the library version which is accessed as a subroutine #
# and therefore does not work exactly like the 680X0 "cmp2" #
# instruction. #
# #
# XREF **************************************************************** #
# None #
# #
# INPUT *************************************************************** #
# 0x4(sp) = Rn #
# 0x8(sp) = pointer to boundary pair #
# #
# OUTPUT ************************************************************** #
# cc = condition codes are set correctly #
# #
# ALGORITHM *********************************************************** #
# In the interest of simplicity, all operands are converted to #
# longword size whether the operation is byte, word, or long. The #
# bounds are sign extended accordingly. If Rn is a data register, Rn is #
# also sign extended. If Rn is an address register, it need not be sign #
# extended since the full register is always used. #
# The condition codes are set correctly before the final "rts". #
# #
#########################################################################
set CMP2_CC, -4
global _060LSP__cmp2_Ab_
_060LSP__cmp2_Ab_:
# PROLOGUE BEGIN ########################################################
link.w %a6,&-4
movm.l &0x3800,-(%sp) # save d2-d4
# fmovm.l &0x0,-(%sp) # save no fpregs
# PROLOGUE END ##########################################################
mov.w %cc,CMP2_CC(%a6)
mov.l 0x8(%a6), %d2 # get regval
mov.b ([0xc,%a6],0x0),%d0
mov.b ([0xc,%a6],0x1),%d1
extb.l %d0 # sign extend lo bnd
extb.l %d1 # sign extend hi bnd
bra.w l_cmp2_cmp # go do the compare emulation
global _060LSP__cmp2_Aw_
_060LSP__cmp2_Aw_:
# PROLOGUE BEGIN ########################################################
link.w %a6,&-4
movm.l &0x3800,-(%sp) # save d2-d4
# fmovm.l &0x0,-(%sp) # save no fpregs
# PROLOGUE END ##########################################################
mov.w %cc,CMP2_CC(%a6)
mov.l 0x8(%a6), %d2 # get regval
mov.w ([0xc,%a6],0x0),%d0
mov.w ([0xc,%a6],0x2),%d1
ext.l %d0 # sign extend lo bnd
ext.l %d1 # sign extend hi bnd
bra.w l_cmp2_cmp # go do the compare emulation
global _060LSP__cmp2_Al_
_060LSP__cmp2_Al_:
# PROLOGUE BEGIN ########################################################
link.w %a6,&-4
movm.l &0x3800,-(%sp) # save d2-d4
# fmovm.l &0x0,-(%sp) # save no fpregs
# PROLOGUE END ##########################################################
mov.w %cc,CMP2_CC(%a6)
mov.l 0x8(%a6), %d2 # get regval
mov.l ([0xc,%a6],0x0),%d0
mov.l ([0xc,%a6],0x4),%d1
bra.w l_cmp2_cmp # go do the compare emulation
global _060LSP__cmp2_Db_
_060LSP__cmp2_Db_:
# PROLOGUE BEGIN ########################################################
link.w %a6,&-4
movm.l &0x3800,-(%sp) # save d2-d4
# fmovm.l &0x0,-(%sp) # save no fpregs
# PROLOGUE END ##########################################################
mov.w %cc,CMP2_CC(%a6)
mov.l 0x8(%a6), %d2 # get regval
mov.b ([0xc,%a6],0x0),%d0
mov.b ([0xc,%a6],0x1),%d1
extb.l %d0 # sign extend lo bnd
extb.l %d1 # sign extend hi bnd
# operation is a data register compare.
# sign extend byte to long so we can do simple longword compares.
extb.l %d2 # sign extend data byte
bra.w l_cmp2_cmp # go do the compare emulation
global _060LSP__cmp2_Dw_
_060LSP__cmp2_Dw_:
# PROLOGUE BEGIN ########################################################
link.w %a6,&-4
movm.l &0x3800,-(%sp) # save d2-d4
# fmovm.l &0x0,-(%sp) # save no fpregs
# PROLOGUE END ##########################################################
mov.w %cc,CMP2_CC(%a6)
mov.l 0x8(%a6), %d2 # get regval
mov.w ([0xc,%a6],0x0),%d0
mov.w ([0xc,%a6],0x2),%d1
ext.l %d0 # sign extend lo bnd
ext.l %d1 # sign extend hi bnd
# operation is a data register compare.
# sign extend word to long so we can do simple longword compares.
ext.l %d2 # sign extend data word
bra.w l_cmp2_cmp # go emulate compare
global _060LSP__cmp2_Dl_
_060LSP__cmp2_Dl_:
# PROLOGUE BEGIN ########################################################
link.w %a6,&-4
movm.l &0x3800,-(%sp) # save d2-d4
# fmovm.l &0x0,-(%sp) # save no fpregs
# PROLOGUE END ##########################################################
mov.w %cc,CMP2_CC(%a6)
mov.l 0x8(%a6), %d2 # get regval
mov.l ([0xc,%a6],0x0),%d0
mov.l ([0xc,%a6],0x4),%d1
#
# To set the ccodes correctly:
# (1) save 'Z' bit from (Rn - lo)
# (2) save 'Z' and 'N' bits from ((hi - lo) - (Rn - hi))
# (3) keep 'X', 'N', and 'V' from before instruction
# (4) combine ccodes
#
l_cmp2_cmp:
sub.l %d0, %d2 # (Rn - lo)
mov.w %cc, %d3 # fetch resulting ccodes
andi.b &0x4, %d3 # keep 'Z' bit
sub.l %d0, %d1 # (hi - lo)
cmp.l %d1,%d2 # ((hi - lo) - (Rn - hi))
mov.w %cc, %d4 # fetch resulting ccodes
or.b %d4, %d3 # combine w/ earlier ccodes
andi.b &0x5, %d3 # keep 'Z' and 'N'
mov.w CMP2_CC(%a6), %d4 # fetch old ccodes
andi.b &0x1a, %d4 # keep 'X','N','V' bits
or.b %d3, %d4 # insert new ccodes
mov.w %d4,%cc # save new ccodes
# EPILOGUE BEGIN ########################################################
# fmovm.l (%sp)+,&0x0 # restore no fpregs
movm.l (%sp)+,&0x001c # restore d2-d4
unlk %a6
# EPILOGUE END ##########################################################
rts
|
AirFortressIlikara/LS2K0300-linux-4.19
| 293,708
|
arch/m68k/ifpsp060/src/fplsp.S
|
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
MOTOROLA MICROPROCESSOR & MEMORY TECHNOLOGY GROUP
M68000 Hi-Performance Microprocessor Division
M68060 Software Package
Production Release P1.00 -- October 10, 1994
M68060 Software Package Copyright © 1993, 1994 Motorola Inc. All rights reserved.
THE SOFTWARE is provided on an "AS IS" basis and without warranty.
To the maximum extent permitted by applicable law,
MOTOROLA DISCLAIMS ALL WARRANTIES WHETHER EXPRESS OR IMPLIED,
INCLUDING IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE
and any warranty against infringement with regard to the SOFTWARE
(INCLUDING ANY MODIFIED VERSIONS THEREOF) and any accompanying written materials.
To the maximum extent permitted by applicable law,
IN NO EVENT SHALL MOTOROLA BE LIABLE FOR ANY DAMAGES WHATSOEVER
(INCLUDING WITHOUT LIMITATION, DAMAGES FOR LOSS OF BUSINESS PROFITS,
BUSINESS INTERRUPTION, LOSS OF BUSINESS INFORMATION, OR OTHER PECUNIARY LOSS)
ARISING OF THE USE OR INABILITY TO USE THE SOFTWARE.
Motorola assumes no responsibility for the maintenance and support of the SOFTWARE.
You are hereby granted a copyright license to use, modify, and distribute the SOFTWARE
so long as this entire notice is retained without alteration in any modified and/or
redistributed versions, and that such modified versions are clearly identified as such.
No licenses are granted by implication, estoppel or otherwise under any patents
or trademarks of Motorola, Inc.
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# lfptop.s:
# This file is appended to the top of the 060ILSP package
# and contains the entry points into the package. The user, in
# effect, branches to one of the branch table entries located here.
#
bra.l _facoss_
short 0x0000
bra.l _facosd_
short 0x0000
bra.l _facosx_
short 0x0000
bra.l _fasins_
short 0x0000
bra.l _fasind_
short 0x0000
bra.l _fasinx_
short 0x0000
bra.l _fatans_
short 0x0000
bra.l _fatand_
short 0x0000
bra.l _fatanx_
short 0x0000
bra.l _fatanhs_
short 0x0000
bra.l _fatanhd_
short 0x0000
bra.l _fatanhx_
short 0x0000
bra.l _fcoss_
short 0x0000
bra.l _fcosd_
short 0x0000
bra.l _fcosx_
short 0x0000
bra.l _fcoshs_
short 0x0000
bra.l _fcoshd_
short 0x0000
bra.l _fcoshx_
short 0x0000
bra.l _fetoxs_
short 0x0000
bra.l _fetoxd_
short 0x0000
bra.l _fetoxx_
short 0x0000
bra.l _fetoxm1s_
short 0x0000
bra.l _fetoxm1d_
short 0x0000
bra.l _fetoxm1x_
short 0x0000
bra.l _fgetexps_
short 0x0000
bra.l _fgetexpd_
short 0x0000
bra.l _fgetexpx_
short 0x0000
bra.l _fgetmans_
short 0x0000
bra.l _fgetmand_
short 0x0000
bra.l _fgetmanx_
short 0x0000
bra.l _flog10s_
short 0x0000
bra.l _flog10d_
short 0x0000
bra.l _flog10x_
short 0x0000
bra.l _flog2s_
short 0x0000
bra.l _flog2d_
short 0x0000
bra.l _flog2x_
short 0x0000
bra.l _flogns_
short 0x0000
bra.l _flognd_
short 0x0000
bra.l _flognx_
short 0x0000
bra.l _flognp1s_
short 0x0000
bra.l _flognp1d_
short 0x0000
bra.l _flognp1x_
short 0x0000
bra.l _fmods_
short 0x0000
bra.l _fmodd_
short 0x0000
bra.l _fmodx_
short 0x0000
bra.l _frems_
short 0x0000
bra.l _fremd_
short 0x0000
bra.l _fremx_
short 0x0000
bra.l _fscales_
short 0x0000
bra.l _fscaled_
short 0x0000
bra.l _fscalex_
short 0x0000
bra.l _fsins_
short 0x0000
bra.l _fsind_
short 0x0000
bra.l _fsinx_
short 0x0000
bra.l _fsincoss_
short 0x0000
bra.l _fsincosd_
short 0x0000
bra.l _fsincosx_
short 0x0000
bra.l _fsinhs_
short 0x0000
bra.l _fsinhd_
short 0x0000
bra.l _fsinhx_
short 0x0000
bra.l _ftans_
short 0x0000
bra.l _ftand_
short 0x0000
bra.l _ftanx_
short 0x0000
bra.l _ftanhs_
short 0x0000
bra.l _ftanhd_
short 0x0000
bra.l _ftanhx_
short 0x0000
bra.l _ftentoxs_
short 0x0000
bra.l _ftentoxd_
short 0x0000
bra.l _ftentoxx_
short 0x0000
bra.l _ftwotoxs_
short 0x0000
bra.l _ftwotoxd_
short 0x0000
bra.l _ftwotoxx_
short 0x0000
bra.l _fabss_
short 0x0000
bra.l _fabsd_
short 0x0000
bra.l _fabsx_
short 0x0000
bra.l _fadds_
short 0x0000
bra.l _faddd_
short 0x0000
bra.l _faddx_
short 0x0000
bra.l _fdivs_
short 0x0000
bra.l _fdivd_
short 0x0000
bra.l _fdivx_
short 0x0000
bra.l _fints_
short 0x0000
bra.l _fintd_
short 0x0000
bra.l _fintx_
short 0x0000
bra.l _fintrzs_
short 0x0000
bra.l _fintrzd_
short 0x0000
bra.l _fintrzx_
short 0x0000
bra.l _fmuls_
short 0x0000
bra.l _fmuld_
short 0x0000
bra.l _fmulx_
short 0x0000
bra.l _fnegs_
short 0x0000
bra.l _fnegd_
short 0x0000
bra.l _fnegx_
short 0x0000
bra.l _fsqrts_
short 0x0000
bra.l _fsqrtd_
short 0x0000
bra.l _fsqrtx_
short 0x0000
bra.l _fsubs_
short 0x0000
bra.l _fsubd_
short 0x0000
bra.l _fsubx_
short 0x0000
# leave room for future possible additions
align 0x400
#
# This file contains a set of define statements for constants
# in order to promote readability within the corecode itself.
#
set LOCAL_SIZE, 192 # stack frame size(bytes)
set LV, -LOCAL_SIZE # stack offset
set EXC_SR, 0x4 # stack status register
set EXC_PC, 0x6 # stack pc
set EXC_VOFF, 0xa # stacked vector offset
set EXC_EA, 0xc # stacked <ea>
set EXC_FP, 0x0 # frame pointer
set EXC_AREGS, -68 # offset of all address regs
set EXC_DREGS, -100 # offset of all data regs
set EXC_FPREGS, -36 # offset of all fp regs
set EXC_A7, EXC_AREGS+(7*4) # offset of saved a7
set OLD_A7, EXC_AREGS+(6*4) # extra copy of saved a7
set EXC_A6, EXC_AREGS+(6*4) # offset of saved a6
set EXC_A5, EXC_AREGS+(5*4)
set EXC_A4, EXC_AREGS+(4*4)
set EXC_A3, EXC_AREGS+(3*4)
set EXC_A2, EXC_AREGS+(2*4)
set EXC_A1, EXC_AREGS+(1*4)
set EXC_A0, EXC_AREGS+(0*4)
set EXC_D7, EXC_DREGS+(7*4)
set EXC_D6, EXC_DREGS+(6*4)
set EXC_D5, EXC_DREGS+(5*4)
set EXC_D4, EXC_DREGS+(4*4)
set EXC_D3, EXC_DREGS+(3*4)
set EXC_D2, EXC_DREGS+(2*4)
set EXC_D1, EXC_DREGS+(1*4)
set EXC_D0, EXC_DREGS+(0*4)
set EXC_FP0, EXC_FPREGS+(0*12) # offset of saved fp0
set EXC_FP1, EXC_FPREGS+(1*12) # offset of saved fp1
set EXC_FP2, EXC_FPREGS+(2*12) # offset of saved fp2 (not used)
set FP_SCR1, LV+80 # fp scratch 1
set FP_SCR1_EX, FP_SCR1+0
set FP_SCR1_SGN, FP_SCR1+2
set FP_SCR1_HI, FP_SCR1+4
set FP_SCR1_LO, FP_SCR1+8
set FP_SCR0, LV+68 # fp scratch 0
set FP_SCR0_EX, FP_SCR0+0
set FP_SCR0_SGN, FP_SCR0+2
set FP_SCR0_HI, FP_SCR0+4
set FP_SCR0_LO, FP_SCR0+8
set FP_DST, LV+56 # fp destination operand
set FP_DST_EX, FP_DST+0
set FP_DST_SGN, FP_DST+2
set FP_DST_HI, FP_DST+4
set FP_DST_LO, FP_DST+8
set FP_SRC, LV+44 # fp source operand
set FP_SRC_EX, FP_SRC+0
set FP_SRC_SGN, FP_SRC+2
set FP_SRC_HI, FP_SRC+4
set FP_SRC_LO, FP_SRC+8
set USER_FPIAR, LV+40 # FP instr address register
set USER_FPSR, LV+36 # FP status register
set FPSR_CC, USER_FPSR+0 # FPSR condition codes
set FPSR_QBYTE, USER_FPSR+1 # FPSR qoutient byte
set FPSR_EXCEPT, USER_FPSR+2 # FPSR exception status byte
set FPSR_AEXCEPT, USER_FPSR+3 # FPSR accrued exception byte
set USER_FPCR, LV+32 # FP control register
set FPCR_ENABLE, USER_FPCR+2 # FPCR exception enable
set FPCR_MODE, USER_FPCR+3 # FPCR rounding mode control
set L_SCR3, LV+28 # integer scratch 3
set L_SCR2, LV+24 # integer scratch 2
set L_SCR1, LV+20 # integer scratch 1
set STORE_FLG, LV+19 # flag: operand store (ie. not fcmp/ftst)
set EXC_TEMP2, LV+24 # temporary space
set EXC_TEMP, LV+16 # temporary space
set DTAG, LV+15 # destination operand type
set STAG, LV+14 # source operand type
set SPCOND_FLG, LV+10 # flag: special case (see below)
set EXC_CC, LV+8 # saved condition codes
set EXC_EXTWPTR, LV+4 # saved current PC (active)
set EXC_EXTWORD, LV+2 # saved extension word
set EXC_CMDREG, LV+2 # saved extension word
set EXC_OPWORD, LV+0 # saved operation word
################################
# Helpful macros
set FTEMP, 0 # offsets within an
set FTEMP_EX, 0 # extended precision
set FTEMP_SGN, 2 # value saved in memory.
set FTEMP_HI, 4
set FTEMP_LO, 8
set FTEMP_GRS, 12
set LOCAL, 0 # offsets within an
set LOCAL_EX, 0 # extended precision
set LOCAL_SGN, 2 # value saved in memory.
set LOCAL_HI, 4
set LOCAL_LO, 8
set LOCAL_GRS, 12
set DST, 0 # offsets within an
set DST_EX, 0 # extended precision
set DST_HI, 4 # value saved in memory.
set DST_LO, 8
set SRC, 0 # offsets within an
set SRC_EX, 0 # extended precision
set SRC_HI, 4 # value saved in memory.
set SRC_LO, 8
set SGL_LO, 0x3f81 # min sgl prec exponent
set SGL_HI, 0x407e # max sgl prec exponent
set DBL_LO, 0x3c01 # min dbl prec exponent
set DBL_HI, 0x43fe # max dbl prec exponent
set EXT_LO, 0x0 # min ext prec exponent
set EXT_HI, 0x7ffe # max ext prec exponent
set EXT_BIAS, 0x3fff # extended precision bias
set SGL_BIAS, 0x007f # single precision bias
set DBL_BIAS, 0x03ff # double precision bias
set NORM, 0x00 # operand type for STAG/DTAG
set ZERO, 0x01 # operand type for STAG/DTAG
set INF, 0x02 # operand type for STAG/DTAG
set QNAN, 0x03 # operand type for STAG/DTAG
set DENORM, 0x04 # operand type for STAG/DTAG
set SNAN, 0x05 # operand type for STAG/DTAG
set UNNORM, 0x06 # operand type for STAG/DTAG
##################
# FPSR/FPCR bits #
##################
set neg_bit, 0x3 # negative result
set z_bit, 0x2 # zero result
set inf_bit, 0x1 # infinite result
set nan_bit, 0x0 # NAN result
set q_sn_bit, 0x7 # sign bit of quotient byte
set bsun_bit, 7 # branch on unordered
set snan_bit, 6 # signalling NAN
set operr_bit, 5 # operand error
set ovfl_bit, 4 # overflow
set unfl_bit, 3 # underflow
set dz_bit, 2 # divide by zero
set inex2_bit, 1 # inexact result 2
set inex1_bit, 0 # inexact result 1
set aiop_bit, 7 # accrued inexact operation bit
set aovfl_bit, 6 # accrued overflow bit
set aunfl_bit, 5 # accrued underflow bit
set adz_bit, 4 # accrued dz bit
set ainex_bit, 3 # accrued inexact bit
#############################
# FPSR individual bit masks #
#############################
set neg_mask, 0x08000000 # negative bit mask (lw)
set inf_mask, 0x02000000 # infinity bit mask (lw)
set z_mask, 0x04000000 # zero bit mask (lw)
set nan_mask, 0x01000000 # nan bit mask (lw)
set neg_bmask, 0x08 # negative bit mask (byte)
set inf_bmask, 0x02 # infinity bit mask (byte)
set z_bmask, 0x04 # zero bit mask (byte)
set nan_bmask, 0x01 # nan bit mask (byte)
set bsun_mask, 0x00008000 # bsun exception mask
set snan_mask, 0x00004000 # snan exception mask
set operr_mask, 0x00002000 # operr exception mask
set ovfl_mask, 0x00001000 # overflow exception mask
set unfl_mask, 0x00000800 # underflow exception mask
set dz_mask, 0x00000400 # dz exception mask
set inex2_mask, 0x00000200 # inex2 exception mask
set inex1_mask, 0x00000100 # inex1 exception mask
set aiop_mask, 0x00000080 # accrued illegal operation
set aovfl_mask, 0x00000040 # accrued overflow
set aunfl_mask, 0x00000020 # accrued underflow
set adz_mask, 0x00000010 # accrued divide by zero
set ainex_mask, 0x00000008 # accrued inexact
######################################
# FPSR combinations used in the FPSP #
######################################
set dzinf_mask, inf_mask+dz_mask+adz_mask
set opnan_mask, nan_mask+operr_mask+aiop_mask
set nzi_mask, 0x01ffffff #clears N, Z, and I
set unfinx_mask, unfl_mask+inex2_mask+aunfl_mask+ainex_mask
set unf2inx_mask, unfl_mask+inex2_mask+ainex_mask
set ovfinx_mask, ovfl_mask+inex2_mask+aovfl_mask+ainex_mask
set inx1a_mask, inex1_mask+ainex_mask
set inx2a_mask, inex2_mask+ainex_mask
set snaniop_mask, nan_mask+snan_mask+aiop_mask
set snaniop2_mask, snan_mask+aiop_mask
set naniop_mask, nan_mask+aiop_mask
set neginf_mask, neg_mask+inf_mask
set infaiop_mask, inf_mask+aiop_mask
set negz_mask, neg_mask+z_mask
set opaop_mask, operr_mask+aiop_mask
set unfl_inx_mask, unfl_mask+aunfl_mask+ainex_mask
set ovfl_inx_mask, ovfl_mask+aovfl_mask+ainex_mask
#########
# misc. #
#########
set rnd_stky_bit, 29 # stky bit pos in longword
set sign_bit, 0x7 # sign bit
set signan_bit, 0x6 # signalling nan bit
set sgl_thresh, 0x3f81 # minimum sgl exponent
set dbl_thresh, 0x3c01 # minimum dbl exponent
set x_mode, 0x0 # extended precision
set s_mode, 0x4 # single precision
set d_mode, 0x8 # double precision
set rn_mode, 0x0 # round-to-nearest
set rz_mode, 0x1 # round-to-zero
set rm_mode, 0x2 # round-tp-minus-infinity
set rp_mode, 0x3 # round-to-plus-infinity
set mantissalen, 64 # length of mantissa in bits
set BYTE, 1 # len(byte) == 1 byte
set WORD, 2 # len(word) == 2 bytes
set LONG, 4 # len(longword) == 2 bytes
set BSUN_VEC, 0xc0 # bsun vector offset
set INEX_VEC, 0xc4 # inexact vector offset
set DZ_VEC, 0xc8 # dz vector offset
set UNFL_VEC, 0xcc # unfl vector offset
set OPERR_VEC, 0xd0 # operr vector offset
set OVFL_VEC, 0xd4 # ovfl vector offset
set SNAN_VEC, 0xd8 # snan vector offset
###########################
# SPecial CONDition FLaGs #
###########################
set ftrapcc_flg, 0x01 # flag bit: ftrapcc exception
set fbsun_flg, 0x02 # flag bit: bsun exception
set mia7_flg, 0x04 # flag bit: (a7)+ <ea>
set mda7_flg, 0x08 # flag bit: -(a7) <ea>
set fmovm_flg, 0x40 # flag bit: fmovm instruction
set immed_flg, 0x80 # flag bit: &<data> <ea>
set ftrapcc_bit, 0x0
set fbsun_bit, 0x1
set mia7_bit, 0x2
set mda7_bit, 0x3
set immed_bit, 0x7
##################################
# TRANSCENDENTAL "LAST-OP" FLAGS #
##################################
set FMUL_OP, 0x0 # fmul instr performed last
set FDIV_OP, 0x1 # fdiv performed last
set FADD_OP, 0x2 # fadd performed last
set FMOV_OP, 0x3 # fmov performed last
#############
# CONSTANTS #
#############
T1: long 0x40C62D38,0xD3D64634 # 16381 LOG2 LEAD
T2: long 0x3D6F90AE,0xB1E75CC7 # 16381 LOG2 TRAIL
PI: long 0x40000000,0xC90FDAA2,0x2168C235,0x00000000
PIBY2: long 0x3FFF0000,0xC90FDAA2,0x2168C235,0x00000000
TWOBYPI:
long 0x3FE45F30,0x6DC9C883
#########################################################################
# MONADIC TEMPLATE #
#########################################################################
global _fsins_
_fsins_:
link %a6,&-LOCAL_SIZE
movm.l &0x0303,EXC_DREGS(%a6) # save d0-d1/a0-a1
fmovm.l %fpcr,%fpsr,USER_FPCR(%a6) # save ctrl regs
fmovm.x &0xc0,EXC_FP0(%a6) # save fp0/fp1
fmov.l &0x0,%fpcr # zero FPCR
#
# copy, convert, and tag input argument
#
fmov.s 0x8(%a6),%fp0 # load sgl input
fmov.x %fp0,FP_SRC(%a6)
lea FP_SRC(%a6),%a0
bsr.l tag # fetch operand type
mov.b %d0,STAG(%a6)
mov.b %d0,%d1
andi.l &0x00ff00ff,USER_FPSR(%a6)
clr.l %d0
mov.b FPCR_MODE(%a6),%d0 # pass rnd mode,prec
tst.b %d1
bne.b _L0_2s
bsr.l ssin # operand is a NORM
bra.b _L0_6s
_L0_2s:
cmpi.b %d1,&ZERO # is operand a ZERO?
bne.b _L0_3s # no
bsr.l src_zero # yes
bra.b _L0_6s
_L0_3s:
cmpi.b %d1,&INF # is operand an INF?
bne.b _L0_4s # no
bsr.l t_operr # yes
bra.b _L0_6s
_L0_4s:
cmpi.b %d1,&QNAN # is operand a QNAN?
bne.b _L0_5s # no
bsr.l src_qnan # yes
bra.b _L0_6s
_L0_5s:
bsr.l ssind # operand is a DENORM
_L0_6s:
#
# Result is now in FP0
#
movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
fmovm.l USER_FPCR(%a6),%fpcr,%fpsr # restore ctrl regs
fmovm.x EXC_FP1(%a6),&0x40 # restore fp1
unlk %a6
rts
global _fsind_
_fsind_:
link %a6,&-LOCAL_SIZE
movm.l &0x0303,EXC_DREGS(%a6) # save d0-d1/a0-a1
fmovm.l %fpcr,%fpsr,USER_FPCR(%a6) # save ctrl regs
fmovm.x &0xc0,EXC_FP0(%a6) # save fp0/fp1
fmov.l &0x0,%fpcr # zero FPCR
#
# copy, convert, and tag input argument
#
fmov.d 0x8(%a6),%fp0 # load dbl input
fmov.x %fp0,FP_SRC(%a6)
lea FP_SRC(%a6),%a0
bsr.l tag # fetch operand type
mov.b %d0,STAG(%a6)
mov.b %d0,%d1
andi.l &0x00ff00ff,USER_FPSR(%a6)
clr.l %d0
mov.b FPCR_MODE(%a6),%d0 # pass rnd mode,prec
mov.b %d1,STAG(%a6)
tst.b %d1
bne.b _L0_2d
bsr.l ssin # operand is a NORM
bra.b _L0_6d
_L0_2d:
cmpi.b %d1,&ZERO # is operand a ZERO?
bne.b _L0_3d # no
bsr.l src_zero # yes
bra.b _L0_6d
_L0_3d:
cmpi.b %d1,&INF # is operand an INF?
bne.b _L0_4d # no
bsr.l t_operr # yes
bra.b _L0_6d
_L0_4d:
cmpi.b %d1,&QNAN # is operand a QNAN?
bne.b _L0_5d # no
bsr.l src_qnan # yes
bra.b _L0_6d
_L0_5d:
bsr.l ssind # operand is a DENORM
_L0_6d:
#
# Result is now in FP0
#
movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
fmovm.l USER_FPCR(%a6),%fpcr,%fpsr # restore ctrl regs
fmovm.x EXC_FP1(%a6),&0x40 # restore fp1
unlk %a6
rts
global _fsinx_
_fsinx_:
link %a6,&-LOCAL_SIZE
movm.l &0x0303,EXC_DREGS(%a6) # save d0-d1/a0-a1
fmovm.l %fpcr,%fpsr,USER_FPCR(%a6) # save ctrl regs
fmovm.x &0xc0,EXC_FP0(%a6) # save fp0/fp1
fmov.l &0x0,%fpcr # zero FPCR
#
# copy, convert, and tag input argument
#
lea FP_SRC(%a6),%a0
mov.l 0x8+0x0(%a6),0x0(%a0) # load ext input
mov.l 0x8+0x4(%a6),0x4(%a0)
mov.l 0x8+0x8(%a6),0x8(%a0)
bsr.l tag # fetch operand type
mov.b %d0,STAG(%a6)
mov.b %d0,%d1
andi.l &0x00ff00ff,USER_FPSR(%a6)
clr.l %d0
mov.b FPCR_MODE(%a6),%d0 # pass rnd mode,prec
tst.b %d1
bne.b _L0_2x
bsr.l ssin # operand is a NORM
bra.b _L0_6x
_L0_2x:
cmpi.b %d1,&ZERO # is operand a ZERO?
bne.b _L0_3x # no
bsr.l src_zero # yes
bra.b _L0_6x
_L0_3x:
cmpi.b %d1,&INF # is operand an INF?
bne.b _L0_4x # no
bsr.l t_operr # yes
bra.b _L0_6x
_L0_4x:
cmpi.b %d1,&QNAN # is operand a QNAN?
bne.b _L0_5x # no
bsr.l src_qnan # yes
bra.b _L0_6x
_L0_5x:
bsr.l ssind # operand is a DENORM
_L0_6x:
#
# Result is now in FP0
#
movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
fmovm.l USER_FPCR(%a6),%fpcr,%fpsr # restore ctrl regs
fmovm.x EXC_FP1(%a6),&0x40 # restore fp1
unlk %a6
rts
#########################################################################
# MONADIC TEMPLATE #
#########################################################################
global _fcoss_
_fcoss_:
link %a6,&-LOCAL_SIZE
movm.l &0x0303,EXC_DREGS(%a6) # save d0-d1/a0-a1
fmovm.l %fpcr,%fpsr,USER_FPCR(%a6) # save ctrl regs
fmovm.x &0xc0,EXC_FP0(%a6) # save fp0/fp1
fmov.l &0x0,%fpcr # zero FPCR
#
# copy, convert, and tag input argument
#
fmov.s 0x8(%a6),%fp0 # load sgl input
fmov.x %fp0,FP_SRC(%a6)
lea FP_SRC(%a6),%a0
bsr.l tag # fetch operand type
mov.b %d0,STAG(%a6)
mov.b %d0,%d1
andi.l &0x00ff00ff,USER_FPSR(%a6)
clr.l %d0
mov.b FPCR_MODE(%a6),%d0 # pass rnd mode,prec
tst.b %d1
bne.b _L1_2s
bsr.l scos # operand is a NORM
bra.b _L1_6s
_L1_2s:
cmpi.b %d1,&ZERO # is operand a ZERO?
bne.b _L1_3s # no
bsr.l ld_pone # yes
bra.b _L1_6s
_L1_3s:
cmpi.b %d1,&INF # is operand an INF?
bne.b _L1_4s # no
bsr.l t_operr # yes
bra.b _L1_6s
_L1_4s:
cmpi.b %d1,&QNAN # is operand a QNAN?
bne.b _L1_5s # no
bsr.l src_qnan # yes
bra.b _L1_6s
_L1_5s:
bsr.l scosd # operand is a DENORM
_L1_6s:
#
# Result is now in FP0
#
movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
fmovm.l USER_FPCR(%a6),%fpcr,%fpsr # restore ctrl regs
fmovm.x EXC_FP1(%a6),&0x40 # restore fp1
unlk %a6
rts
global _fcosd_
_fcosd_:
link %a6,&-LOCAL_SIZE
movm.l &0x0303,EXC_DREGS(%a6) # save d0-d1/a0-a1
fmovm.l %fpcr,%fpsr,USER_FPCR(%a6) # save ctrl regs
fmovm.x &0xc0,EXC_FP0(%a6) # save fp0/fp1
fmov.l &0x0,%fpcr # zero FPCR
#
# copy, convert, and tag input argument
#
fmov.d 0x8(%a6),%fp0 # load dbl input
fmov.x %fp0,FP_SRC(%a6)
lea FP_SRC(%a6),%a0
bsr.l tag # fetch operand type
mov.b %d0,STAG(%a6)
mov.b %d0,%d1
andi.l &0x00ff00ff,USER_FPSR(%a6)
clr.l %d0
mov.b FPCR_MODE(%a6),%d0 # pass rnd mode,prec
mov.b %d1,STAG(%a6)
tst.b %d1
bne.b _L1_2d
bsr.l scos # operand is a NORM
bra.b _L1_6d
_L1_2d:
cmpi.b %d1,&ZERO # is operand a ZERO?
bne.b _L1_3d # no
bsr.l ld_pone # yes
bra.b _L1_6d
_L1_3d:
cmpi.b %d1,&INF # is operand an INF?
bne.b _L1_4d # no
bsr.l t_operr # yes
bra.b _L1_6d
_L1_4d:
cmpi.b %d1,&QNAN # is operand a QNAN?
bne.b _L1_5d # no
bsr.l src_qnan # yes
bra.b _L1_6d
_L1_5d:
bsr.l scosd # operand is a DENORM
_L1_6d:
#
# Result is now in FP0
#
movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
fmovm.l USER_FPCR(%a6),%fpcr,%fpsr # restore ctrl regs
fmovm.x EXC_FP1(%a6),&0x40 # restore fp1
unlk %a6
rts
global _fcosx_
_fcosx_:
link %a6,&-LOCAL_SIZE
movm.l &0x0303,EXC_DREGS(%a6) # save d0-d1/a0-a1
fmovm.l %fpcr,%fpsr,USER_FPCR(%a6) # save ctrl regs
fmovm.x &0xc0,EXC_FP0(%a6) # save fp0/fp1
fmov.l &0x0,%fpcr # zero FPCR
#
# copy, convert, and tag input argument
#
lea FP_SRC(%a6),%a0
mov.l 0x8+0x0(%a6),0x0(%a0) # load ext input
mov.l 0x8+0x4(%a6),0x4(%a0)
mov.l 0x8+0x8(%a6),0x8(%a0)
bsr.l tag # fetch operand type
mov.b %d0,STAG(%a6)
mov.b %d0,%d1
andi.l &0x00ff00ff,USER_FPSR(%a6)
clr.l %d0
mov.b FPCR_MODE(%a6),%d0 # pass rnd mode,prec
tst.b %d1
bne.b _L1_2x
bsr.l scos # operand is a NORM
bra.b _L1_6x
_L1_2x:
cmpi.b %d1,&ZERO # is operand a ZERO?
bne.b _L1_3x # no
bsr.l ld_pone # yes
bra.b _L1_6x
_L1_3x:
cmpi.b %d1,&INF # is operand an INF?
bne.b _L1_4x # no
bsr.l t_operr # yes
bra.b _L1_6x
_L1_4x:
cmpi.b %d1,&QNAN # is operand a QNAN?
bne.b _L1_5x # no
bsr.l src_qnan # yes
bra.b _L1_6x
_L1_5x:
bsr.l scosd # operand is a DENORM
_L1_6x:
#
# Result is now in FP0
#
movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
fmovm.l USER_FPCR(%a6),%fpcr,%fpsr # restore ctrl regs
fmovm.x EXC_FP1(%a6),&0x40 # restore fp1
unlk %a6
rts
#########################################################################
# MONADIC TEMPLATE #
#########################################################################
global _fsinhs_
_fsinhs_:
link %a6,&-LOCAL_SIZE
movm.l &0x0303,EXC_DREGS(%a6) # save d0-d1/a0-a1
fmovm.l %fpcr,%fpsr,USER_FPCR(%a6) # save ctrl regs
fmovm.x &0xc0,EXC_FP0(%a6) # save fp0/fp1
fmov.l &0x0,%fpcr # zero FPCR
#
# copy, convert, and tag input argument
#
fmov.s 0x8(%a6),%fp0 # load sgl input
fmov.x %fp0,FP_SRC(%a6)
lea FP_SRC(%a6),%a0
bsr.l tag # fetch operand type
mov.b %d0,STAG(%a6)
mov.b %d0,%d1
andi.l &0x00ff00ff,USER_FPSR(%a6)
clr.l %d0
mov.b FPCR_MODE(%a6),%d0 # pass rnd mode,prec
tst.b %d1
bne.b _L2_2s
bsr.l ssinh # operand is a NORM
bra.b _L2_6s
_L2_2s:
cmpi.b %d1,&ZERO # is operand a ZERO?
bne.b _L2_3s # no
bsr.l src_zero # yes
bra.b _L2_6s
_L2_3s:
cmpi.b %d1,&INF # is operand an INF?
bne.b _L2_4s # no
bsr.l src_inf # yes
bra.b _L2_6s
_L2_4s:
cmpi.b %d1,&QNAN # is operand a QNAN?
bne.b _L2_5s # no
bsr.l src_qnan # yes
bra.b _L2_6s
_L2_5s:
bsr.l ssinhd # operand is a DENORM
_L2_6s:
#
# Result is now in FP0
#
movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
fmovm.l USER_FPCR(%a6),%fpcr,%fpsr # restore ctrl regs
fmovm.x EXC_FP1(%a6),&0x40 # restore fp1
unlk %a6
rts
global _fsinhd_
_fsinhd_:
link %a6,&-LOCAL_SIZE
movm.l &0x0303,EXC_DREGS(%a6) # save d0-d1/a0-a1
fmovm.l %fpcr,%fpsr,USER_FPCR(%a6) # save ctrl regs
fmovm.x &0xc0,EXC_FP0(%a6) # save fp0/fp1
fmov.l &0x0,%fpcr # zero FPCR
#
# copy, convert, and tag input argument
#
fmov.d 0x8(%a6),%fp0 # load dbl input
fmov.x %fp0,FP_SRC(%a6)
lea FP_SRC(%a6),%a0
bsr.l tag # fetch operand type
mov.b %d0,STAG(%a6)
mov.b %d0,%d1
andi.l &0x00ff00ff,USER_FPSR(%a6)
clr.l %d0
mov.b FPCR_MODE(%a6),%d0 # pass rnd mode,prec
mov.b %d1,STAG(%a6)
tst.b %d1
bne.b _L2_2d
bsr.l ssinh # operand is a NORM
bra.b _L2_6d
_L2_2d:
cmpi.b %d1,&ZERO # is operand a ZERO?
bne.b _L2_3d # no
bsr.l src_zero # yes
bra.b _L2_6d
_L2_3d:
cmpi.b %d1,&INF # is operand an INF?
bne.b _L2_4d # no
bsr.l src_inf # yes
bra.b _L2_6d
_L2_4d:
cmpi.b %d1,&QNAN # is operand a QNAN?
bne.b _L2_5d # no
bsr.l src_qnan # yes
bra.b _L2_6d
_L2_5d:
bsr.l ssinhd # operand is a DENORM
_L2_6d:
#
# Result is now in FP0
#
movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
fmovm.l USER_FPCR(%a6),%fpcr,%fpsr # restore ctrl regs
fmovm.x EXC_FP1(%a6),&0x40 # restore fp1
unlk %a6
rts
global _fsinhx_
_fsinhx_:
link %a6,&-LOCAL_SIZE
movm.l &0x0303,EXC_DREGS(%a6) # save d0-d1/a0-a1
fmovm.l %fpcr,%fpsr,USER_FPCR(%a6) # save ctrl regs
fmovm.x &0xc0,EXC_FP0(%a6) # save fp0/fp1
fmov.l &0x0,%fpcr # zero FPCR
#
# copy, convert, and tag input argument
#
lea FP_SRC(%a6),%a0
mov.l 0x8+0x0(%a6),0x0(%a0) # load ext input
mov.l 0x8+0x4(%a6),0x4(%a0)
mov.l 0x8+0x8(%a6),0x8(%a0)
bsr.l tag # fetch operand type
mov.b %d0,STAG(%a6)
mov.b %d0,%d1
andi.l &0x00ff00ff,USER_FPSR(%a6)
clr.l %d0
mov.b FPCR_MODE(%a6),%d0 # pass rnd mode,prec
tst.b %d1
bne.b _L2_2x
bsr.l ssinh # operand is a NORM
bra.b _L2_6x
_L2_2x:
cmpi.b %d1,&ZERO # is operand a ZERO?
bne.b _L2_3x # no
bsr.l src_zero # yes
bra.b _L2_6x
_L2_3x:
cmpi.b %d1,&INF # is operand an INF?
bne.b _L2_4x # no
bsr.l src_inf # yes
bra.b _L2_6x
_L2_4x:
cmpi.b %d1,&QNAN # is operand a QNAN?
bne.b _L2_5x # no
bsr.l src_qnan # yes
bra.b _L2_6x
_L2_5x:
bsr.l ssinhd # operand is a DENORM
_L2_6x:
#
# Result is now in FP0
#
movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
fmovm.l USER_FPCR(%a6),%fpcr,%fpsr # restore ctrl regs
fmovm.x EXC_FP1(%a6),&0x40 # restore fp1
unlk %a6
rts
#########################################################################
# MONADIC TEMPLATE #
#########################################################################
global _flognp1s_
_flognp1s_:
link %a6,&-LOCAL_SIZE
movm.l &0x0303,EXC_DREGS(%a6) # save d0-d1/a0-a1
fmovm.l %fpcr,%fpsr,USER_FPCR(%a6) # save ctrl regs
fmovm.x &0xc0,EXC_FP0(%a6) # save fp0/fp1
fmov.l &0x0,%fpcr # zero FPCR
#
# copy, convert, and tag input argument
#
fmov.s 0x8(%a6),%fp0 # load sgl input
fmov.x %fp0,FP_SRC(%a6)
lea FP_SRC(%a6),%a0
bsr.l tag # fetch operand type
mov.b %d0,STAG(%a6)
mov.b %d0,%d1
andi.l &0x00ff00ff,USER_FPSR(%a6)
clr.l %d0
mov.b FPCR_MODE(%a6),%d0 # pass rnd mode,prec
tst.b %d1
bne.b _L3_2s
bsr.l slognp1 # operand is a NORM
bra.b _L3_6s
_L3_2s:
cmpi.b %d1,&ZERO # is operand a ZERO?
bne.b _L3_3s # no
bsr.l src_zero # yes
bra.b _L3_6s
_L3_3s:
cmpi.b %d1,&INF # is operand an INF?
bne.b _L3_4s # no
bsr.l sopr_inf # yes
bra.b _L3_6s
_L3_4s:
cmpi.b %d1,&QNAN # is operand a QNAN?
bne.b _L3_5s # no
bsr.l src_qnan # yes
bra.b _L3_6s
_L3_5s:
bsr.l slognp1d # operand is a DENORM
_L3_6s:
#
# Result is now in FP0
#
movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
fmovm.l USER_FPCR(%a6),%fpcr,%fpsr # restore ctrl regs
fmovm.x EXC_FP1(%a6),&0x40 # restore fp1
unlk %a6
rts
global _flognp1d_
_flognp1d_:
link %a6,&-LOCAL_SIZE
movm.l &0x0303,EXC_DREGS(%a6) # save d0-d1/a0-a1
fmovm.l %fpcr,%fpsr,USER_FPCR(%a6) # save ctrl regs
fmovm.x &0xc0,EXC_FP0(%a6) # save fp0/fp1
fmov.l &0x0,%fpcr # zero FPCR
#
# copy, convert, and tag input argument
#
fmov.d 0x8(%a6),%fp0 # load dbl input
fmov.x %fp0,FP_SRC(%a6)
lea FP_SRC(%a6),%a0
bsr.l tag # fetch operand type
mov.b %d0,STAG(%a6)
mov.b %d0,%d1
andi.l &0x00ff00ff,USER_FPSR(%a6)
clr.l %d0
mov.b FPCR_MODE(%a6),%d0 # pass rnd mode,prec
mov.b %d1,STAG(%a6)
tst.b %d1
bne.b _L3_2d
bsr.l slognp1 # operand is a NORM
bra.b _L3_6d
_L3_2d:
cmpi.b %d1,&ZERO # is operand a ZERO?
bne.b _L3_3d # no
bsr.l src_zero # yes
bra.b _L3_6d
_L3_3d:
cmpi.b %d1,&INF # is operand an INF?
bne.b _L3_4d # no
bsr.l sopr_inf # yes
bra.b _L3_6d
_L3_4d:
cmpi.b %d1,&QNAN # is operand a QNAN?
bne.b _L3_5d # no
bsr.l src_qnan # yes
bra.b _L3_6d
_L3_5d:
bsr.l slognp1d # operand is a DENORM
_L3_6d:
#
# Result is now in FP0
#
movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
fmovm.l USER_FPCR(%a6),%fpcr,%fpsr # restore ctrl regs
fmovm.x EXC_FP1(%a6),&0x40 # restore fp1
unlk %a6
rts
global _flognp1x_
_flognp1x_:
link %a6,&-LOCAL_SIZE
movm.l &0x0303,EXC_DREGS(%a6) # save d0-d1/a0-a1
fmovm.l %fpcr,%fpsr,USER_FPCR(%a6) # save ctrl regs
fmovm.x &0xc0,EXC_FP0(%a6) # save fp0/fp1
fmov.l &0x0,%fpcr # zero FPCR
#
# copy, convert, and tag input argument
#
lea FP_SRC(%a6),%a0
mov.l 0x8+0x0(%a6),0x0(%a0) # load ext input
mov.l 0x8+0x4(%a6),0x4(%a0)
mov.l 0x8+0x8(%a6),0x8(%a0)
bsr.l tag # fetch operand type
mov.b %d0,STAG(%a6)
mov.b %d0,%d1
andi.l &0x00ff00ff,USER_FPSR(%a6)
clr.l %d0
mov.b FPCR_MODE(%a6),%d0 # pass rnd mode,prec
tst.b %d1
bne.b _L3_2x
bsr.l slognp1 # operand is a NORM
bra.b _L3_6x
_L3_2x:
cmpi.b %d1,&ZERO # is operand a ZERO?
bne.b _L3_3x # no
bsr.l src_zero # yes
bra.b _L3_6x
_L3_3x:
cmpi.b %d1,&INF # is operand an INF?
bne.b _L3_4x # no
bsr.l sopr_inf # yes
bra.b _L3_6x
_L3_4x:
cmpi.b %d1,&QNAN # is operand a QNAN?
bne.b _L3_5x # no
bsr.l src_qnan # yes
bra.b _L3_6x
_L3_5x:
bsr.l slognp1d # operand is a DENORM
_L3_6x:
#
# Result is now in FP0
#
movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
fmovm.l USER_FPCR(%a6),%fpcr,%fpsr # restore ctrl regs
fmovm.x EXC_FP1(%a6),&0x40 # restore fp1
unlk %a6
rts
#########################################################################
# MONADIC TEMPLATE #
#########################################################################
global _fetoxm1s_
_fetoxm1s_:
link %a6,&-LOCAL_SIZE
movm.l &0x0303,EXC_DREGS(%a6) # save d0-d1/a0-a1
fmovm.l %fpcr,%fpsr,USER_FPCR(%a6) # save ctrl regs
fmovm.x &0xc0,EXC_FP0(%a6) # save fp0/fp1
fmov.l &0x0,%fpcr # zero FPCR
#
# copy, convert, and tag input argument
#
fmov.s 0x8(%a6),%fp0 # load sgl input
fmov.x %fp0,FP_SRC(%a6)
lea FP_SRC(%a6),%a0
bsr.l tag # fetch operand type
mov.b %d0,STAG(%a6)
mov.b %d0,%d1
andi.l &0x00ff00ff,USER_FPSR(%a6)
clr.l %d0
mov.b FPCR_MODE(%a6),%d0 # pass rnd mode,prec
tst.b %d1
bne.b _L4_2s
bsr.l setoxm1 # operand is a NORM
bra.b _L4_6s
_L4_2s:
cmpi.b %d1,&ZERO # is operand a ZERO?
bne.b _L4_3s # no
bsr.l src_zero # yes
bra.b _L4_6s
_L4_3s:
cmpi.b %d1,&INF # is operand an INF?
bne.b _L4_4s # no
bsr.l setoxm1i # yes
bra.b _L4_6s
_L4_4s:
cmpi.b %d1,&QNAN # is operand a QNAN?
bne.b _L4_5s # no
bsr.l src_qnan # yes
bra.b _L4_6s
_L4_5s:
bsr.l setoxm1d # operand is a DENORM
_L4_6s:
#
# Result is now in FP0
#
movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
fmovm.l USER_FPCR(%a6),%fpcr,%fpsr # restore ctrl regs
fmovm.x EXC_FP1(%a6),&0x40 # restore fp1
unlk %a6
rts
global _fetoxm1d_
_fetoxm1d_:
link %a6,&-LOCAL_SIZE
movm.l &0x0303,EXC_DREGS(%a6) # save d0-d1/a0-a1
fmovm.l %fpcr,%fpsr,USER_FPCR(%a6) # save ctrl regs
fmovm.x &0xc0,EXC_FP0(%a6) # save fp0/fp1
fmov.l &0x0,%fpcr # zero FPCR
#
# copy, convert, and tag input argument
#
fmov.d 0x8(%a6),%fp0 # load dbl input
fmov.x %fp0,FP_SRC(%a6)
lea FP_SRC(%a6),%a0
bsr.l tag # fetch operand type
mov.b %d0,STAG(%a6)
mov.b %d0,%d1
andi.l &0x00ff00ff,USER_FPSR(%a6)
clr.l %d0
mov.b FPCR_MODE(%a6),%d0 # pass rnd mode,prec
mov.b %d1,STAG(%a6)
tst.b %d1
bne.b _L4_2d
bsr.l setoxm1 # operand is a NORM
bra.b _L4_6d
_L4_2d:
cmpi.b %d1,&ZERO # is operand a ZERO?
bne.b _L4_3d # no
bsr.l src_zero # yes
bra.b _L4_6d
_L4_3d:
cmpi.b %d1,&INF # is operand an INF?
bne.b _L4_4d # no
bsr.l setoxm1i # yes
bra.b _L4_6d
_L4_4d:
cmpi.b %d1,&QNAN # is operand a QNAN?
bne.b _L4_5d # no
bsr.l src_qnan # yes
bra.b _L4_6d
_L4_5d:
bsr.l setoxm1d # operand is a DENORM
_L4_6d:
#
# Result is now in FP0
#
movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
fmovm.l USER_FPCR(%a6),%fpcr,%fpsr # restore ctrl regs
fmovm.x EXC_FP1(%a6),&0x40 # restore fp1
unlk %a6
rts
global _fetoxm1x_
_fetoxm1x_:
link %a6,&-LOCAL_SIZE
movm.l &0x0303,EXC_DREGS(%a6) # save d0-d1/a0-a1
fmovm.l %fpcr,%fpsr,USER_FPCR(%a6) # save ctrl regs
fmovm.x &0xc0,EXC_FP0(%a6) # save fp0/fp1
fmov.l &0x0,%fpcr # zero FPCR
#
# copy, convert, and tag input argument
#
lea FP_SRC(%a6),%a0
mov.l 0x8+0x0(%a6),0x0(%a0) # load ext input
mov.l 0x8+0x4(%a6),0x4(%a0)
mov.l 0x8+0x8(%a6),0x8(%a0)
bsr.l tag # fetch operand type
mov.b %d0,STAG(%a6)
mov.b %d0,%d1
andi.l &0x00ff00ff,USER_FPSR(%a6)
clr.l %d0
mov.b FPCR_MODE(%a6),%d0 # pass rnd mode,prec
tst.b %d1
bne.b _L4_2x
bsr.l setoxm1 # operand is a NORM
bra.b _L4_6x
_L4_2x:
cmpi.b %d1,&ZERO # is operand a ZERO?
bne.b _L4_3x # no
bsr.l src_zero # yes
bra.b _L4_6x
_L4_3x:
cmpi.b %d1,&INF # is operand an INF?
bne.b _L4_4x # no
bsr.l setoxm1i # yes
bra.b _L4_6x
_L4_4x:
cmpi.b %d1,&QNAN # is operand a QNAN?
bne.b _L4_5x # no
bsr.l src_qnan # yes
bra.b _L4_6x
_L4_5x:
bsr.l setoxm1d # operand is a DENORM
_L4_6x:
#
# Result is now in FP0
#
movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
fmovm.l USER_FPCR(%a6),%fpcr,%fpsr # restore ctrl regs
fmovm.x EXC_FP1(%a6),&0x40 # restore fp1
unlk %a6
rts
#########################################################################
# MONADIC TEMPLATE #
#########################################################################
global _ftanhs_
_ftanhs_:
link %a6,&-LOCAL_SIZE
movm.l &0x0303,EXC_DREGS(%a6) # save d0-d1/a0-a1
fmovm.l %fpcr,%fpsr,USER_FPCR(%a6) # save ctrl regs
fmovm.x &0xc0,EXC_FP0(%a6) # save fp0/fp1
fmov.l &0x0,%fpcr # zero FPCR
#
# copy, convert, and tag input argument
#
fmov.s 0x8(%a6),%fp0 # load sgl input
fmov.x %fp0,FP_SRC(%a6)
lea FP_SRC(%a6),%a0
bsr.l tag # fetch operand type
mov.b %d0,STAG(%a6)
mov.b %d0,%d1
andi.l &0x00ff00ff,USER_FPSR(%a6)
clr.l %d0
mov.b FPCR_MODE(%a6),%d0 # pass rnd mode,prec
tst.b %d1
bne.b _L5_2s
bsr.l stanh # operand is a NORM
bra.b _L5_6s
_L5_2s:
cmpi.b %d1,&ZERO # is operand a ZERO?
bne.b _L5_3s # no
bsr.l src_zero # yes
bra.b _L5_6s
_L5_3s:
cmpi.b %d1,&INF # is operand an INF?
bne.b _L5_4s # no
bsr.l src_one # yes
bra.b _L5_6s
_L5_4s:
cmpi.b %d1,&QNAN # is operand a QNAN?
bne.b _L5_5s # no
bsr.l src_qnan # yes
bra.b _L5_6s
_L5_5s:
bsr.l stanhd # operand is a DENORM
_L5_6s:
#
# Result is now in FP0
#
movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
fmovm.l USER_FPCR(%a6),%fpcr,%fpsr # restore ctrl regs
fmovm.x EXC_FP1(%a6),&0x40 # restore fp1
unlk %a6
rts
global _ftanhd_
_ftanhd_:
link %a6,&-LOCAL_SIZE
movm.l &0x0303,EXC_DREGS(%a6) # save d0-d1/a0-a1
fmovm.l %fpcr,%fpsr,USER_FPCR(%a6) # save ctrl regs
fmovm.x &0xc0,EXC_FP0(%a6) # save fp0/fp1
fmov.l &0x0,%fpcr # zero FPCR
#
# copy, convert, and tag input argument
#
fmov.d 0x8(%a6),%fp0 # load dbl input
fmov.x %fp0,FP_SRC(%a6)
lea FP_SRC(%a6),%a0
bsr.l tag # fetch operand type
mov.b %d0,STAG(%a6)
mov.b %d0,%d1
andi.l &0x00ff00ff,USER_FPSR(%a6)
clr.l %d0
mov.b FPCR_MODE(%a6),%d0 # pass rnd mode,prec
mov.b %d1,STAG(%a6)
tst.b %d1
bne.b _L5_2d
bsr.l stanh # operand is a NORM
bra.b _L5_6d
_L5_2d:
cmpi.b %d1,&ZERO # is operand a ZERO?
bne.b _L5_3d # no
bsr.l src_zero # yes
bra.b _L5_6d
_L5_3d:
cmpi.b %d1,&INF # is operand an INF?
bne.b _L5_4d # no
bsr.l src_one # yes
bra.b _L5_6d
_L5_4d:
cmpi.b %d1,&QNAN # is operand a QNAN?
bne.b _L5_5d # no
bsr.l src_qnan # yes
bra.b _L5_6d
_L5_5d:
bsr.l stanhd # operand is a DENORM
_L5_6d:
#
# Result is now in FP0
#
movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
fmovm.l USER_FPCR(%a6),%fpcr,%fpsr # restore ctrl regs
fmovm.x EXC_FP1(%a6),&0x40 # restore fp1
unlk %a6
rts
global _ftanhx_
_ftanhx_:
link %a6,&-LOCAL_SIZE
movm.l &0x0303,EXC_DREGS(%a6) # save d0-d1/a0-a1
fmovm.l %fpcr,%fpsr,USER_FPCR(%a6) # save ctrl regs
fmovm.x &0xc0,EXC_FP0(%a6) # save fp0/fp1
fmov.l &0x0,%fpcr # zero FPCR
#
# copy, convert, and tag input argument
#
lea FP_SRC(%a6),%a0
mov.l 0x8+0x0(%a6),0x0(%a0) # load ext input
mov.l 0x8+0x4(%a6),0x4(%a0)
mov.l 0x8+0x8(%a6),0x8(%a0)
bsr.l tag # fetch operand type
mov.b %d0,STAG(%a6)
mov.b %d0,%d1
andi.l &0x00ff00ff,USER_FPSR(%a6)
clr.l %d0
mov.b FPCR_MODE(%a6),%d0 # pass rnd mode,prec
tst.b %d1
bne.b _L5_2x
bsr.l stanh # operand is a NORM
bra.b _L5_6x
_L5_2x:
cmpi.b %d1,&ZERO # is operand a ZERO?
bne.b _L5_3x # no
bsr.l src_zero # yes
bra.b _L5_6x
_L5_3x:
cmpi.b %d1,&INF # is operand an INF?
bne.b _L5_4x # no
bsr.l src_one # yes
bra.b _L5_6x
_L5_4x:
cmpi.b %d1,&QNAN # is operand a QNAN?
bne.b _L5_5x # no
bsr.l src_qnan # yes
bra.b _L5_6x
_L5_5x:
bsr.l stanhd # operand is a DENORM
_L5_6x:
#
# Result is now in FP0
#
movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
fmovm.l USER_FPCR(%a6),%fpcr,%fpsr # restore ctrl regs
fmovm.x EXC_FP1(%a6),&0x40 # restore fp1
unlk %a6
rts
#########################################################################
# MONADIC TEMPLATE #
#########################################################################
global _fatans_
_fatans_:
link %a6,&-LOCAL_SIZE
movm.l &0x0303,EXC_DREGS(%a6) # save d0-d1/a0-a1
fmovm.l %fpcr,%fpsr,USER_FPCR(%a6) # save ctrl regs
fmovm.x &0xc0,EXC_FP0(%a6) # save fp0/fp1
fmov.l &0x0,%fpcr # zero FPCR
#
# copy, convert, and tag input argument
#
fmov.s 0x8(%a6),%fp0 # load sgl input
fmov.x %fp0,FP_SRC(%a6)
lea FP_SRC(%a6),%a0
bsr.l tag # fetch operand type
mov.b %d0,STAG(%a6)
mov.b %d0,%d1
andi.l &0x00ff00ff,USER_FPSR(%a6)
clr.l %d0
mov.b FPCR_MODE(%a6),%d0 # pass rnd mode,prec
tst.b %d1
bne.b _L6_2s
bsr.l satan # operand is a NORM
bra.b _L6_6s
_L6_2s:
cmpi.b %d1,&ZERO # is operand a ZERO?
bne.b _L6_3s # no
bsr.l src_zero # yes
bra.b _L6_6s
_L6_3s:
cmpi.b %d1,&INF # is operand an INF?
bne.b _L6_4s # no
bsr.l spi_2 # yes
bra.b _L6_6s
_L6_4s:
cmpi.b %d1,&QNAN # is operand a QNAN?
bne.b _L6_5s # no
bsr.l src_qnan # yes
bra.b _L6_6s
_L6_5s:
bsr.l satand # operand is a DENORM
_L6_6s:
#
# Result is now in FP0
#
movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
fmovm.l USER_FPCR(%a6),%fpcr,%fpsr # restore ctrl regs
fmovm.x EXC_FP1(%a6),&0x40 # restore fp1
unlk %a6
rts
global _fatand_
_fatand_:
link %a6,&-LOCAL_SIZE
movm.l &0x0303,EXC_DREGS(%a6) # save d0-d1/a0-a1
fmovm.l %fpcr,%fpsr,USER_FPCR(%a6) # save ctrl regs
fmovm.x &0xc0,EXC_FP0(%a6) # save fp0/fp1
fmov.l &0x0,%fpcr # zero FPCR
#
# copy, convert, and tag input argument
#
fmov.d 0x8(%a6),%fp0 # load dbl input
fmov.x %fp0,FP_SRC(%a6)
lea FP_SRC(%a6),%a0
bsr.l tag # fetch operand type
mov.b %d0,STAG(%a6)
mov.b %d0,%d1
andi.l &0x00ff00ff,USER_FPSR(%a6)
clr.l %d0
mov.b FPCR_MODE(%a6),%d0 # pass rnd mode,prec
mov.b %d1,STAG(%a6)
tst.b %d1
bne.b _L6_2d
bsr.l satan # operand is a NORM
bra.b _L6_6d
_L6_2d:
cmpi.b %d1,&ZERO # is operand a ZERO?
bne.b _L6_3d # no
bsr.l src_zero # yes
bra.b _L6_6d
_L6_3d:
cmpi.b %d1,&INF # is operand an INF?
bne.b _L6_4d # no
bsr.l spi_2 # yes
bra.b _L6_6d
_L6_4d:
cmpi.b %d1,&QNAN # is operand a QNAN?
bne.b _L6_5d # no
bsr.l src_qnan # yes
bra.b _L6_6d
_L6_5d:
bsr.l satand # operand is a DENORM
_L6_6d:
#
# Result is now in FP0
#
movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
fmovm.l USER_FPCR(%a6),%fpcr,%fpsr # restore ctrl regs
fmovm.x EXC_FP1(%a6),&0x40 # restore fp1
unlk %a6
rts
global _fatanx_
_fatanx_:
link %a6,&-LOCAL_SIZE
movm.l &0x0303,EXC_DREGS(%a6) # save d0-d1/a0-a1
fmovm.l %fpcr,%fpsr,USER_FPCR(%a6) # save ctrl regs
fmovm.x &0xc0,EXC_FP0(%a6) # save fp0/fp1
fmov.l &0x0,%fpcr # zero FPCR
#
# copy, convert, and tag input argument
#
lea FP_SRC(%a6),%a0
mov.l 0x8+0x0(%a6),0x0(%a0) # load ext input
mov.l 0x8+0x4(%a6),0x4(%a0)
mov.l 0x8+0x8(%a6),0x8(%a0)
bsr.l tag # fetch operand type
mov.b %d0,STAG(%a6)
mov.b %d0,%d1
andi.l &0x00ff00ff,USER_FPSR(%a6)
clr.l %d0
mov.b FPCR_MODE(%a6),%d0 # pass rnd mode,prec
tst.b %d1
bne.b _L6_2x
bsr.l satan # operand is a NORM
bra.b _L6_6x
_L6_2x:
cmpi.b %d1,&ZERO # is operand a ZERO?
bne.b _L6_3x # no
bsr.l src_zero # yes
bra.b _L6_6x
_L6_3x:
cmpi.b %d1,&INF # is operand an INF?
bne.b _L6_4x # no
bsr.l spi_2 # yes
bra.b _L6_6x
_L6_4x:
cmpi.b %d1,&QNAN # is operand a QNAN?
bne.b _L6_5x # no
bsr.l src_qnan # yes
bra.b _L6_6x
_L6_5x:
bsr.l satand # operand is a DENORM
_L6_6x:
#
# Result is now in FP0
#
movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
fmovm.l USER_FPCR(%a6),%fpcr,%fpsr # restore ctrl regs
fmovm.x EXC_FP1(%a6),&0x40 # restore fp1
unlk %a6
rts
#########################################################################
# MONADIC TEMPLATE #
#########################################################################
global _fasins_
_fasins_:
link %a6,&-LOCAL_SIZE
movm.l &0x0303,EXC_DREGS(%a6) # save d0-d1/a0-a1
fmovm.l %fpcr,%fpsr,USER_FPCR(%a6) # save ctrl regs
fmovm.x &0xc0,EXC_FP0(%a6) # save fp0/fp1
fmov.l &0x0,%fpcr # zero FPCR
#
# copy, convert, and tag input argument
#
fmov.s 0x8(%a6),%fp0 # load sgl input
fmov.x %fp0,FP_SRC(%a6)
lea FP_SRC(%a6),%a0
bsr.l tag # fetch operand type
mov.b %d0,STAG(%a6)
mov.b %d0,%d1
andi.l &0x00ff00ff,USER_FPSR(%a6)
clr.l %d0
mov.b FPCR_MODE(%a6),%d0 # pass rnd mode,prec
tst.b %d1
bne.b _L7_2s
bsr.l sasin # operand is a NORM
bra.b _L7_6s
_L7_2s:
cmpi.b %d1,&ZERO # is operand a ZERO?
bne.b _L7_3s # no
bsr.l src_zero # yes
bra.b _L7_6s
_L7_3s:
cmpi.b %d1,&INF # is operand an INF?
bne.b _L7_4s # no
bsr.l t_operr # yes
bra.b _L7_6s
_L7_4s:
cmpi.b %d1,&QNAN # is operand a QNAN?
bne.b _L7_5s # no
bsr.l src_qnan # yes
bra.b _L7_6s
_L7_5s:
bsr.l sasind # operand is a DENORM
_L7_6s:
#
# Result is now in FP0
#
movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
fmovm.l USER_FPCR(%a6),%fpcr,%fpsr # restore ctrl regs
fmovm.x EXC_FP1(%a6),&0x40 # restore fp1
unlk %a6
rts
global _fasind_
_fasind_:
link %a6,&-LOCAL_SIZE
movm.l &0x0303,EXC_DREGS(%a6) # save d0-d1/a0-a1
fmovm.l %fpcr,%fpsr,USER_FPCR(%a6) # save ctrl regs
fmovm.x &0xc0,EXC_FP0(%a6) # save fp0/fp1
fmov.l &0x0,%fpcr # zero FPCR
#
# copy, convert, and tag input argument
#
fmov.d 0x8(%a6),%fp0 # load dbl input
fmov.x %fp0,FP_SRC(%a6)
lea FP_SRC(%a6),%a0
bsr.l tag # fetch operand type
mov.b %d0,STAG(%a6)
mov.b %d0,%d1
andi.l &0x00ff00ff,USER_FPSR(%a6)
clr.l %d0
mov.b FPCR_MODE(%a6),%d0 # pass rnd mode,prec
mov.b %d1,STAG(%a6)
tst.b %d1
bne.b _L7_2d
bsr.l sasin # operand is a NORM
bra.b _L7_6d
_L7_2d:
cmpi.b %d1,&ZERO # is operand a ZERO?
bne.b _L7_3d # no
bsr.l src_zero # yes
bra.b _L7_6d
_L7_3d:
cmpi.b %d1,&INF # is operand an INF?
bne.b _L7_4d # no
bsr.l t_operr # yes
bra.b _L7_6d
_L7_4d:
cmpi.b %d1,&QNAN # is operand a QNAN?
bne.b _L7_5d # no
bsr.l src_qnan # yes
bra.b _L7_6d
_L7_5d:
bsr.l sasind # operand is a DENORM
_L7_6d:
#
# Result is now in FP0
#
movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
fmovm.l USER_FPCR(%a6),%fpcr,%fpsr # restore ctrl regs
fmovm.x EXC_FP1(%a6),&0x40 # restore fp1
unlk %a6
rts
global _fasinx_
_fasinx_:
link %a6,&-LOCAL_SIZE
movm.l &0x0303,EXC_DREGS(%a6) # save d0-d1/a0-a1
fmovm.l %fpcr,%fpsr,USER_FPCR(%a6) # save ctrl regs
fmovm.x &0xc0,EXC_FP0(%a6) # save fp0/fp1
fmov.l &0x0,%fpcr # zero FPCR
#
# copy, convert, and tag input argument
#
lea FP_SRC(%a6),%a0
mov.l 0x8+0x0(%a6),0x0(%a0) # load ext input
mov.l 0x8+0x4(%a6),0x4(%a0)
mov.l 0x8+0x8(%a6),0x8(%a0)
bsr.l tag # fetch operand type
mov.b %d0,STAG(%a6)
mov.b %d0,%d1
andi.l &0x00ff00ff,USER_FPSR(%a6)
clr.l %d0
mov.b FPCR_MODE(%a6),%d0 # pass rnd mode,prec
tst.b %d1
bne.b _L7_2x
bsr.l sasin # operand is a NORM
bra.b _L7_6x
_L7_2x:
cmpi.b %d1,&ZERO # is operand a ZERO?
bne.b _L7_3x # no
bsr.l src_zero # yes
bra.b _L7_6x
_L7_3x:
cmpi.b %d1,&INF # is operand an INF?
bne.b _L7_4x # no
bsr.l t_operr # yes
bra.b _L7_6x
_L7_4x:
cmpi.b %d1,&QNAN # is operand a QNAN?
bne.b _L7_5x # no
bsr.l src_qnan # yes
bra.b _L7_6x
_L7_5x:
bsr.l sasind # operand is a DENORM
_L7_6x:
#
# Result is now in FP0
#
movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
fmovm.l USER_FPCR(%a6),%fpcr,%fpsr # restore ctrl regs
fmovm.x EXC_FP1(%a6),&0x40 # restore fp1
unlk %a6
rts
#########################################################################
# MONADIC TEMPLATE #
#########################################################################
global _fatanhs_
_fatanhs_:
link %a6,&-LOCAL_SIZE
movm.l &0x0303,EXC_DREGS(%a6) # save d0-d1/a0-a1
fmovm.l %fpcr,%fpsr,USER_FPCR(%a6) # save ctrl regs
fmovm.x &0xc0,EXC_FP0(%a6) # save fp0/fp1
fmov.l &0x0,%fpcr # zero FPCR
#
# copy, convert, and tag input argument
#
fmov.s 0x8(%a6),%fp0 # load sgl input
fmov.x %fp0,FP_SRC(%a6)
lea FP_SRC(%a6),%a0
bsr.l tag # fetch operand type
mov.b %d0,STAG(%a6)
mov.b %d0,%d1
andi.l &0x00ff00ff,USER_FPSR(%a6)
clr.l %d0
mov.b FPCR_MODE(%a6),%d0 # pass rnd mode,prec
tst.b %d1
bne.b _L8_2s
bsr.l satanh # operand is a NORM
bra.b _L8_6s
_L8_2s:
cmpi.b %d1,&ZERO # is operand a ZERO?
bne.b _L8_3s # no
bsr.l src_zero # yes
bra.b _L8_6s
_L8_3s:
cmpi.b %d1,&INF # is operand an INF?
bne.b _L8_4s # no
bsr.l t_operr # yes
bra.b _L8_6s
_L8_4s:
cmpi.b %d1,&QNAN # is operand a QNAN?
bne.b _L8_5s # no
bsr.l src_qnan # yes
bra.b _L8_6s
_L8_5s:
bsr.l satanhd # operand is a DENORM
_L8_6s:
#
# Result is now in FP0
#
movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
fmovm.l USER_FPCR(%a6),%fpcr,%fpsr # restore ctrl regs
fmovm.x EXC_FP1(%a6),&0x40 # restore fp1
unlk %a6
rts
global _fatanhd_
_fatanhd_:
link %a6,&-LOCAL_SIZE
movm.l &0x0303,EXC_DREGS(%a6) # save d0-d1/a0-a1
fmovm.l %fpcr,%fpsr,USER_FPCR(%a6) # save ctrl regs
fmovm.x &0xc0,EXC_FP0(%a6) # save fp0/fp1
fmov.l &0x0,%fpcr # zero FPCR
#
# copy, convert, and tag input argument
#
fmov.d 0x8(%a6),%fp0 # load dbl input
fmov.x %fp0,FP_SRC(%a6)
lea FP_SRC(%a6),%a0
bsr.l tag # fetch operand type
mov.b %d0,STAG(%a6)
mov.b %d0,%d1
andi.l &0x00ff00ff,USER_FPSR(%a6)
clr.l %d0
mov.b FPCR_MODE(%a6),%d0 # pass rnd mode,prec
mov.b %d1,STAG(%a6)
tst.b %d1
bne.b _L8_2d
bsr.l satanh # operand is a NORM
bra.b _L8_6d
_L8_2d:
cmpi.b %d1,&ZERO # is operand a ZERO?
bne.b _L8_3d # no
bsr.l src_zero # yes
bra.b _L8_6d
_L8_3d:
cmpi.b %d1,&INF # is operand an INF?
bne.b _L8_4d # no
bsr.l t_operr # yes
bra.b _L8_6d
_L8_4d:
cmpi.b %d1,&QNAN # is operand a QNAN?
bne.b _L8_5d # no
bsr.l src_qnan # yes
bra.b _L8_6d
_L8_5d:
bsr.l satanhd # operand is a DENORM
_L8_6d:
#
# Result is now in FP0
#
movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
fmovm.l USER_FPCR(%a6),%fpcr,%fpsr # restore ctrl regs
fmovm.x EXC_FP1(%a6),&0x40 # restore fp1
unlk %a6
rts
global _fatanhx_
_fatanhx_:
link %a6,&-LOCAL_SIZE
movm.l &0x0303,EXC_DREGS(%a6) # save d0-d1/a0-a1
fmovm.l %fpcr,%fpsr,USER_FPCR(%a6) # save ctrl regs
fmovm.x &0xc0,EXC_FP0(%a6) # save fp0/fp1
fmov.l &0x0,%fpcr # zero FPCR
#
# copy, convert, and tag input argument
#
lea FP_SRC(%a6),%a0
mov.l 0x8+0x0(%a6),0x0(%a0) # load ext input
mov.l 0x8+0x4(%a6),0x4(%a0)
mov.l 0x8+0x8(%a6),0x8(%a0)
bsr.l tag # fetch operand type
mov.b %d0,STAG(%a6)
mov.b %d0,%d1
andi.l &0x00ff00ff,USER_FPSR(%a6)
clr.l %d0
mov.b FPCR_MODE(%a6),%d0 # pass rnd mode,prec
tst.b %d1
bne.b _L8_2x
bsr.l satanh # operand is a NORM
bra.b _L8_6x
_L8_2x:
cmpi.b %d1,&ZERO # is operand a ZERO?
bne.b _L8_3x # no
bsr.l src_zero # yes
bra.b _L8_6x
_L8_3x:
cmpi.b %d1,&INF # is operand an INF?
bne.b _L8_4x # no
bsr.l t_operr # yes
bra.b _L8_6x
_L8_4x:
cmpi.b %d1,&QNAN # is operand a QNAN?
bne.b _L8_5x # no
bsr.l src_qnan # yes
bra.b _L8_6x
_L8_5x:
bsr.l satanhd # operand is a DENORM
_L8_6x:
#
# Result is now in FP0
#
movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
fmovm.l USER_FPCR(%a6),%fpcr,%fpsr # restore ctrl regs
fmovm.x EXC_FP1(%a6),&0x40 # restore fp1
unlk %a6
rts
#########################################################################
# MONADIC TEMPLATE #
#########################################################################
global _ftans_
_ftans_:
link %a6,&-LOCAL_SIZE
movm.l &0x0303,EXC_DREGS(%a6) # save d0-d1/a0-a1
fmovm.l %fpcr,%fpsr,USER_FPCR(%a6) # save ctrl regs
fmovm.x &0xc0,EXC_FP0(%a6) # save fp0/fp1
fmov.l &0x0,%fpcr # zero FPCR
#
# copy, convert, and tag input argument
#
fmov.s 0x8(%a6),%fp0 # load sgl input
fmov.x %fp0,FP_SRC(%a6)
lea FP_SRC(%a6),%a0
bsr.l tag # fetch operand type
mov.b %d0,STAG(%a6)
mov.b %d0,%d1
andi.l &0x00ff00ff,USER_FPSR(%a6)
clr.l %d0
mov.b FPCR_MODE(%a6),%d0 # pass rnd mode,prec
tst.b %d1
bne.b _L9_2s
bsr.l stan # operand is a NORM
bra.b _L9_6s
_L9_2s:
cmpi.b %d1,&ZERO # is operand a ZERO?
bne.b _L9_3s # no
bsr.l src_zero # yes
bra.b _L9_6s
_L9_3s:
cmpi.b %d1,&INF # is operand an INF?
bne.b _L9_4s # no
bsr.l t_operr # yes
bra.b _L9_6s
_L9_4s:
cmpi.b %d1,&QNAN # is operand a QNAN?
bne.b _L9_5s # no
bsr.l src_qnan # yes
bra.b _L9_6s
_L9_5s:
bsr.l stand # operand is a DENORM
_L9_6s:
#
# Result is now in FP0
#
movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
fmovm.l USER_FPCR(%a6),%fpcr,%fpsr # restore ctrl regs
fmovm.x EXC_FP1(%a6),&0x40 # restore fp1
unlk %a6
rts
global _ftand_
_ftand_:
link %a6,&-LOCAL_SIZE
movm.l &0x0303,EXC_DREGS(%a6) # save d0-d1/a0-a1
fmovm.l %fpcr,%fpsr,USER_FPCR(%a6) # save ctrl regs
fmovm.x &0xc0,EXC_FP0(%a6) # save fp0/fp1
fmov.l &0x0,%fpcr # zero FPCR
#
# copy, convert, and tag input argument
#
fmov.d 0x8(%a6),%fp0 # load dbl input
fmov.x %fp0,FP_SRC(%a6)
lea FP_SRC(%a6),%a0
bsr.l tag # fetch operand type
mov.b %d0,STAG(%a6)
mov.b %d0,%d1
andi.l &0x00ff00ff,USER_FPSR(%a6)
clr.l %d0
mov.b FPCR_MODE(%a6),%d0 # pass rnd mode,prec
mov.b %d1,STAG(%a6)
tst.b %d1
bne.b _L9_2d
bsr.l stan # operand is a NORM
bra.b _L9_6d
_L9_2d:
cmpi.b %d1,&ZERO # is operand a ZERO?
bne.b _L9_3d # no
bsr.l src_zero # yes
bra.b _L9_6d
_L9_3d:
cmpi.b %d1,&INF # is operand an INF?
bne.b _L9_4d # no
bsr.l t_operr # yes
bra.b _L9_6d
_L9_4d:
cmpi.b %d1,&QNAN # is operand a QNAN?
bne.b _L9_5d # no
bsr.l src_qnan # yes
bra.b _L9_6d
_L9_5d:
bsr.l stand # operand is a DENORM
_L9_6d:
#
# Result is now in FP0
#
movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
fmovm.l USER_FPCR(%a6),%fpcr,%fpsr # restore ctrl regs
fmovm.x EXC_FP1(%a6),&0x40 # restore fp1
unlk %a6
rts
global _ftanx_
_ftanx_:
link %a6,&-LOCAL_SIZE
movm.l &0x0303,EXC_DREGS(%a6) # save d0-d1/a0-a1
fmovm.l %fpcr,%fpsr,USER_FPCR(%a6) # save ctrl regs
fmovm.x &0xc0,EXC_FP0(%a6) # save fp0/fp1
fmov.l &0x0,%fpcr # zero FPCR
#
# copy, convert, and tag input argument
#
lea FP_SRC(%a6),%a0
mov.l 0x8+0x0(%a6),0x0(%a0) # load ext input
mov.l 0x8+0x4(%a6),0x4(%a0)
mov.l 0x8+0x8(%a6),0x8(%a0)
bsr.l tag # fetch operand type
mov.b %d0,STAG(%a6)
mov.b %d0,%d1
andi.l &0x00ff00ff,USER_FPSR(%a6)
clr.l %d0
mov.b FPCR_MODE(%a6),%d0 # pass rnd mode,prec
tst.b %d1
bne.b _L9_2x
bsr.l stan # operand is a NORM
bra.b _L9_6x
_L9_2x:
cmpi.b %d1,&ZERO # is operand a ZERO?
bne.b _L9_3x # no
bsr.l src_zero # yes
bra.b _L9_6x
_L9_3x:
cmpi.b %d1,&INF # is operand an INF?
bne.b _L9_4x # no
bsr.l t_operr # yes
bra.b _L9_6x
_L9_4x:
cmpi.b %d1,&QNAN # is operand a QNAN?
bne.b _L9_5x # no
bsr.l src_qnan # yes
bra.b _L9_6x
_L9_5x:
bsr.l stand # operand is a DENORM
_L9_6x:
#
# Result is now in FP0
#
movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
fmovm.l USER_FPCR(%a6),%fpcr,%fpsr # restore ctrl regs
fmovm.x EXC_FP1(%a6),&0x40 # restore fp1
unlk %a6
rts
#########################################################################
# MONADIC TEMPLATE #
#########################################################################
global _fetoxs_
_fetoxs_:
link %a6,&-LOCAL_SIZE
movm.l &0x0303,EXC_DREGS(%a6) # save d0-d1/a0-a1
fmovm.l %fpcr,%fpsr,USER_FPCR(%a6) # save ctrl regs
fmovm.x &0xc0,EXC_FP0(%a6) # save fp0/fp1
fmov.l &0x0,%fpcr # zero FPCR
#
# copy, convert, and tag input argument
#
fmov.s 0x8(%a6),%fp0 # load sgl input
fmov.x %fp0,FP_SRC(%a6)
lea FP_SRC(%a6),%a0
bsr.l tag # fetch operand type
mov.b %d0,STAG(%a6)
mov.b %d0,%d1
andi.l &0x00ff00ff,USER_FPSR(%a6)
clr.l %d0
mov.b FPCR_MODE(%a6),%d0 # pass rnd mode,prec
tst.b %d1
bne.b _L10_2s
bsr.l setox # operand is a NORM
bra.b _L10_6s
_L10_2s:
cmpi.b %d1,&ZERO # is operand a ZERO?
bne.b _L10_3s # no
bsr.l ld_pone # yes
bra.b _L10_6s
_L10_3s:
cmpi.b %d1,&INF # is operand an INF?
bne.b _L10_4s # no
bsr.l szr_inf # yes
bra.b _L10_6s
_L10_4s:
cmpi.b %d1,&QNAN # is operand a QNAN?
bne.b _L10_5s # no
bsr.l src_qnan # yes
bra.b _L10_6s
_L10_5s:
bsr.l setoxd # operand is a DENORM
_L10_6s:
#
# Result is now in FP0
#
movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
fmovm.l USER_FPCR(%a6),%fpcr,%fpsr # restore ctrl regs
fmovm.x EXC_FP1(%a6),&0x40 # restore fp1
unlk %a6
rts
global _fetoxd_
_fetoxd_:
link %a6,&-LOCAL_SIZE
movm.l &0x0303,EXC_DREGS(%a6) # save d0-d1/a0-a1
fmovm.l %fpcr,%fpsr,USER_FPCR(%a6) # save ctrl regs
fmovm.x &0xc0,EXC_FP0(%a6) # save fp0/fp1
fmov.l &0x0,%fpcr # zero FPCR
#
# copy, convert, and tag input argument
#
fmov.d 0x8(%a6),%fp0 # load dbl input
fmov.x %fp0,FP_SRC(%a6)
lea FP_SRC(%a6),%a0
bsr.l tag # fetch operand type
mov.b %d0,STAG(%a6)
mov.b %d0,%d1
andi.l &0x00ff00ff,USER_FPSR(%a6)
clr.l %d0
mov.b FPCR_MODE(%a6),%d0 # pass rnd mode,prec
mov.b %d1,STAG(%a6)
tst.b %d1
bne.b _L10_2d
bsr.l setox # operand is a NORM
bra.b _L10_6d
_L10_2d:
cmpi.b %d1,&ZERO # is operand a ZERO?
bne.b _L10_3d # no
bsr.l ld_pone # yes
bra.b _L10_6d
_L10_3d:
cmpi.b %d1,&INF # is operand an INF?
bne.b _L10_4d # no
bsr.l szr_inf # yes
bra.b _L10_6d
_L10_4d:
cmpi.b %d1,&QNAN # is operand a QNAN?
bne.b _L10_5d # no
bsr.l src_qnan # yes
bra.b _L10_6d
_L10_5d:
bsr.l setoxd # operand is a DENORM
_L10_6d:
#
# Result is now in FP0
#
movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
fmovm.l USER_FPCR(%a6),%fpcr,%fpsr # restore ctrl regs
fmovm.x EXC_FP1(%a6),&0x40 # restore fp1
unlk %a6
rts
global _fetoxx_
_fetoxx_:
link %a6,&-LOCAL_SIZE
movm.l &0x0303,EXC_DREGS(%a6) # save d0-d1/a0-a1
fmovm.l %fpcr,%fpsr,USER_FPCR(%a6) # save ctrl regs
fmovm.x &0xc0,EXC_FP0(%a6) # save fp0/fp1
fmov.l &0x0,%fpcr # zero FPCR
#
# copy, convert, and tag input argument
#
lea FP_SRC(%a6),%a0
mov.l 0x8+0x0(%a6),0x0(%a0) # load ext input
mov.l 0x8+0x4(%a6),0x4(%a0)
mov.l 0x8+0x8(%a6),0x8(%a0)
bsr.l tag # fetch operand type
mov.b %d0,STAG(%a6)
mov.b %d0,%d1
andi.l &0x00ff00ff,USER_FPSR(%a6)
clr.l %d0
mov.b FPCR_MODE(%a6),%d0 # pass rnd mode,prec
tst.b %d1
bne.b _L10_2x
bsr.l setox # operand is a NORM
bra.b _L10_6x
_L10_2x:
cmpi.b %d1,&ZERO # is operand a ZERO?
bne.b _L10_3x # no
bsr.l ld_pone # yes
bra.b _L10_6x
_L10_3x:
cmpi.b %d1,&INF # is operand an INF?
bne.b _L10_4x # no
bsr.l szr_inf # yes
bra.b _L10_6x
_L10_4x:
cmpi.b %d1,&QNAN # is operand a QNAN?
bne.b _L10_5x # no
bsr.l src_qnan # yes
bra.b _L10_6x
_L10_5x:
bsr.l setoxd # operand is a DENORM
_L10_6x:
#
# Result is now in FP0
#
movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
fmovm.l USER_FPCR(%a6),%fpcr,%fpsr # restore ctrl regs
fmovm.x EXC_FP1(%a6),&0x40 # restore fp1
unlk %a6
rts
#########################################################################
# MONADIC TEMPLATE #
#########################################################################
global _ftwotoxs_
_ftwotoxs_:
link %a6,&-LOCAL_SIZE
movm.l &0x0303,EXC_DREGS(%a6) # save d0-d1/a0-a1
fmovm.l %fpcr,%fpsr,USER_FPCR(%a6) # save ctrl regs
fmovm.x &0xc0,EXC_FP0(%a6) # save fp0/fp1
fmov.l &0x0,%fpcr # zero FPCR
#
# copy, convert, and tag input argument
#
fmov.s 0x8(%a6),%fp0 # load sgl input
fmov.x %fp0,FP_SRC(%a6)
lea FP_SRC(%a6),%a0
bsr.l tag # fetch operand type
mov.b %d0,STAG(%a6)
mov.b %d0,%d1
andi.l &0x00ff00ff,USER_FPSR(%a6)
clr.l %d0
mov.b FPCR_MODE(%a6),%d0 # pass rnd mode,prec
tst.b %d1
bne.b _L11_2s
bsr.l stwotox # operand is a NORM
bra.b _L11_6s
_L11_2s:
cmpi.b %d1,&ZERO # is operand a ZERO?
bne.b _L11_3s # no
bsr.l ld_pone # yes
bra.b _L11_6s
_L11_3s:
cmpi.b %d1,&INF # is operand an INF?
bne.b _L11_4s # no
bsr.l szr_inf # yes
bra.b _L11_6s
_L11_4s:
cmpi.b %d1,&QNAN # is operand a QNAN?
bne.b _L11_5s # no
bsr.l src_qnan # yes
bra.b _L11_6s
_L11_5s:
bsr.l stwotoxd # operand is a DENORM
_L11_6s:
#
# Result is now in FP0
#
movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
fmovm.l USER_FPCR(%a6),%fpcr,%fpsr # restore ctrl regs
fmovm.x EXC_FP1(%a6),&0x40 # restore fp1
unlk %a6
rts
global _ftwotoxd_
_ftwotoxd_:
link %a6,&-LOCAL_SIZE
movm.l &0x0303,EXC_DREGS(%a6) # save d0-d1/a0-a1
fmovm.l %fpcr,%fpsr,USER_FPCR(%a6) # save ctrl regs
fmovm.x &0xc0,EXC_FP0(%a6) # save fp0/fp1
fmov.l &0x0,%fpcr # zero FPCR
#
# copy, convert, and tag input argument
#
fmov.d 0x8(%a6),%fp0 # load dbl input
fmov.x %fp0,FP_SRC(%a6)
lea FP_SRC(%a6),%a0
bsr.l tag # fetch operand type
mov.b %d0,STAG(%a6)
mov.b %d0,%d1
andi.l &0x00ff00ff,USER_FPSR(%a6)
clr.l %d0
mov.b FPCR_MODE(%a6),%d0 # pass rnd mode,prec
mov.b %d1,STAG(%a6)
tst.b %d1
bne.b _L11_2d
bsr.l stwotox # operand is a NORM
bra.b _L11_6d
_L11_2d:
cmpi.b %d1,&ZERO # is operand a ZERO?
bne.b _L11_3d # no
bsr.l ld_pone # yes
bra.b _L11_6d
_L11_3d:
cmpi.b %d1,&INF # is operand an INF?
bne.b _L11_4d # no
bsr.l szr_inf # yes
bra.b _L11_6d
_L11_4d:
cmpi.b %d1,&QNAN # is operand a QNAN?
bne.b _L11_5d # no
bsr.l src_qnan # yes
bra.b _L11_6d
_L11_5d:
bsr.l stwotoxd # operand is a DENORM
_L11_6d:
#
# Result is now in FP0
#
movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
fmovm.l USER_FPCR(%a6),%fpcr,%fpsr # restore ctrl regs
fmovm.x EXC_FP1(%a6),&0x40 # restore fp1
unlk %a6
rts
global _ftwotoxx_
_ftwotoxx_:
link %a6,&-LOCAL_SIZE
movm.l &0x0303,EXC_DREGS(%a6) # save d0-d1/a0-a1
fmovm.l %fpcr,%fpsr,USER_FPCR(%a6) # save ctrl regs
fmovm.x &0xc0,EXC_FP0(%a6) # save fp0/fp1
fmov.l &0x0,%fpcr # zero FPCR
#
# copy, convert, and tag input argument
#
lea FP_SRC(%a6),%a0
mov.l 0x8+0x0(%a6),0x0(%a0) # load ext input
mov.l 0x8+0x4(%a6),0x4(%a0)
mov.l 0x8+0x8(%a6),0x8(%a0)
bsr.l tag # fetch operand type
mov.b %d0,STAG(%a6)
mov.b %d0,%d1
andi.l &0x00ff00ff,USER_FPSR(%a6)
clr.l %d0
mov.b FPCR_MODE(%a6),%d0 # pass rnd mode,prec
tst.b %d1
bne.b _L11_2x
bsr.l stwotox # operand is a NORM
bra.b _L11_6x
_L11_2x:
cmpi.b %d1,&ZERO # is operand a ZERO?
bne.b _L11_3x # no
bsr.l ld_pone # yes
bra.b _L11_6x
_L11_3x:
cmpi.b %d1,&INF # is operand an INF?
bne.b _L11_4x # no
bsr.l szr_inf # yes
bra.b _L11_6x
_L11_4x:
cmpi.b %d1,&QNAN # is operand a QNAN?
bne.b _L11_5x # no
bsr.l src_qnan # yes
bra.b _L11_6x
_L11_5x:
bsr.l stwotoxd # operand is a DENORM
_L11_6x:
#
# Result is now in FP0
#
movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
fmovm.l USER_FPCR(%a6),%fpcr,%fpsr # restore ctrl regs
fmovm.x EXC_FP1(%a6),&0x40 # restore fp1
unlk %a6
rts
#########################################################################
# MONADIC TEMPLATE #
#########################################################################
global _ftentoxs_
_ftentoxs_:
link %a6,&-LOCAL_SIZE
movm.l &0x0303,EXC_DREGS(%a6) # save d0-d1/a0-a1
fmovm.l %fpcr,%fpsr,USER_FPCR(%a6) # save ctrl regs
fmovm.x &0xc0,EXC_FP0(%a6) # save fp0/fp1
fmov.l &0x0,%fpcr # zero FPCR
#
# copy, convert, and tag input argument
#
fmov.s 0x8(%a6),%fp0 # load sgl input
fmov.x %fp0,FP_SRC(%a6)
lea FP_SRC(%a6),%a0
bsr.l tag # fetch operand type
mov.b %d0,STAG(%a6)
mov.b %d0,%d1
andi.l &0x00ff00ff,USER_FPSR(%a6)
clr.l %d0
mov.b FPCR_MODE(%a6),%d0 # pass rnd mode,prec
tst.b %d1
bne.b _L12_2s
bsr.l stentox # operand is a NORM
bra.b _L12_6s
_L12_2s:
cmpi.b %d1,&ZERO # is operand a ZERO?
bne.b _L12_3s # no
bsr.l ld_pone # yes
bra.b _L12_6s
_L12_3s:
cmpi.b %d1,&INF # is operand an INF?
bne.b _L12_4s # no
bsr.l szr_inf # yes
bra.b _L12_6s
_L12_4s:
cmpi.b %d1,&QNAN # is operand a QNAN?
bne.b _L12_5s # no
bsr.l src_qnan # yes
bra.b _L12_6s
_L12_5s:
bsr.l stentoxd # operand is a DENORM
_L12_6s:
#
# Result is now in FP0
#
movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
fmovm.l USER_FPCR(%a6),%fpcr,%fpsr # restore ctrl regs
fmovm.x EXC_FP1(%a6),&0x40 # restore fp1
unlk %a6
rts
global _ftentoxd_
_ftentoxd_:
link %a6,&-LOCAL_SIZE
movm.l &0x0303,EXC_DREGS(%a6) # save d0-d1/a0-a1
fmovm.l %fpcr,%fpsr,USER_FPCR(%a6) # save ctrl regs
fmovm.x &0xc0,EXC_FP0(%a6) # save fp0/fp1
fmov.l &0x0,%fpcr # zero FPCR
#
# copy, convert, and tag input argument
#
fmov.d 0x8(%a6),%fp0 # load dbl input
fmov.x %fp0,FP_SRC(%a6)
lea FP_SRC(%a6),%a0
bsr.l tag # fetch operand type
mov.b %d0,STAG(%a6)
mov.b %d0,%d1
andi.l &0x00ff00ff,USER_FPSR(%a6)
clr.l %d0
mov.b FPCR_MODE(%a6),%d0 # pass rnd mode,prec
mov.b %d1,STAG(%a6)
tst.b %d1
bne.b _L12_2d
bsr.l stentox # operand is a NORM
bra.b _L12_6d
_L12_2d:
cmpi.b %d1,&ZERO # is operand a ZERO?
bne.b _L12_3d # no
bsr.l ld_pone # yes
bra.b _L12_6d
_L12_3d:
cmpi.b %d1,&INF # is operand an INF?
bne.b _L12_4d # no
bsr.l szr_inf # yes
bra.b _L12_6d
_L12_4d:
cmpi.b %d1,&QNAN # is operand a QNAN?
bne.b _L12_5d # no
bsr.l src_qnan # yes
bra.b _L12_6d
_L12_5d:
bsr.l stentoxd # operand is a DENORM
_L12_6d:
#
# Result is now in FP0
#
movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
fmovm.l USER_FPCR(%a6),%fpcr,%fpsr # restore ctrl regs
fmovm.x EXC_FP1(%a6),&0x40 # restore fp1
unlk %a6
rts
global _ftentoxx_
_ftentoxx_:
link %a6,&-LOCAL_SIZE
movm.l &0x0303,EXC_DREGS(%a6) # save d0-d1/a0-a1
fmovm.l %fpcr,%fpsr,USER_FPCR(%a6) # save ctrl regs
fmovm.x &0xc0,EXC_FP0(%a6) # save fp0/fp1
fmov.l &0x0,%fpcr # zero FPCR
#
# copy, convert, and tag input argument
#
lea FP_SRC(%a6),%a0
mov.l 0x8+0x0(%a6),0x0(%a0) # load ext input
mov.l 0x8+0x4(%a6),0x4(%a0)
mov.l 0x8+0x8(%a6),0x8(%a0)
bsr.l tag # fetch operand type
mov.b %d0,STAG(%a6)
mov.b %d0,%d1
andi.l &0x00ff00ff,USER_FPSR(%a6)
clr.l %d0
mov.b FPCR_MODE(%a6),%d0 # pass rnd mode,prec
tst.b %d1
bne.b _L12_2x
bsr.l stentox # operand is a NORM
bra.b _L12_6x
_L12_2x:
cmpi.b %d1,&ZERO # is operand a ZERO?
bne.b _L12_3x # no
bsr.l ld_pone # yes
bra.b _L12_6x
_L12_3x:
cmpi.b %d1,&INF # is operand an INF?
bne.b _L12_4x # no
bsr.l szr_inf # yes
bra.b _L12_6x
_L12_4x:
cmpi.b %d1,&QNAN # is operand a QNAN?
bne.b _L12_5x # no
bsr.l src_qnan # yes
bra.b _L12_6x
_L12_5x:
bsr.l stentoxd # operand is a DENORM
_L12_6x:
#
# Result is now in FP0
#
movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
fmovm.l USER_FPCR(%a6),%fpcr,%fpsr # restore ctrl regs
fmovm.x EXC_FP1(%a6),&0x40 # restore fp1
unlk %a6
rts
#########################################################################
# MONADIC TEMPLATE #
#########################################################################
global _flogns_
_flogns_:
link %a6,&-LOCAL_SIZE
movm.l &0x0303,EXC_DREGS(%a6) # save d0-d1/a0-a1
fmovm.l %fpcr,%fpsr,USER_FPCR(%a6) # save ctrl regs
fmovm.x &0xc0,EXC_FP0(%a6) # save fp0/fp1
fmov.l &0x0,%fpcr # zero FPCR
#
# copy, convert, and tag input argument
#
fmov.s 0x8(%a6),%fp0 # load sgl input
fmov.x %fp0,FP_SRC(%a6)
lea FP_SRC(%a6),%a0
bsr.l tag # fetch operand type
mov.b %d0,STAG(%a6)
mov.b %d0,%d1
andi.l &0x00ff00ff,USER_FPSR(%a6)
clr.l %d0
mov.b FPCR_MODE(%a6),%d0 # pass rnd mode,prec
tst.b %d1
bne.b _L13_2s
bsr.l slogn # operand is a NORM
bra.b _L13_6s
_L13_2s:
cmpi.b %d1,&ZERO # is operand a ZERO?
bne.b _L13_3s # no
bsr.l t_dz2 # yes
bra.b _L13_6s
_L13_3s:
cmpi.b %d1,&INF # is operand an INF?
bne.b _L13_4s # no
bsr.l sopr_inf # yes
bra.b _L13_6s
_L13_4s:
cmpi.b %d1,&QNAN # is operand a QNAN?
bne.b _L13_5s # no
bsr.l src_qnan # yes
bra.b _L13_6s
_L13_5s:
bsr.l slognd # operand is a DENORM
_L13_6s:
#
# Result is now in FP0
#
movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
fmovm.l USER_FPCR(%a6),%fpcr,%fpsr # restore ctrl regs
fmovm.x EXC_FP1(%a6),&0x40 # restore fp1
unlk %a6
rts
global _flognd_
_flognd_:
link %a6,&-LOCAL_SIZE
movm.l &0x0303,EXC_DREGS(%a6) # save d0-d1/a0-a1
fmovm.l %fpcr,%fpsr,USER_FPCR(%a6) # save ctrl regs
fmovm.x &0xc0,EXC_FP0(%a6) # save fp0/fp1
fmov.l &0x0,%fpcr # zero FPCR
#
# copy, convert, and tag input argument
#
fmov.d 0x8(%a6),%fp0 # load dbl input
fmov.x %fp0,FP_SRC(%a6)
lea FP_SRC(%a6),%a0
bsr.l tag # fetch operand type
mov.b %d0,STAG(%a6)
mov.b %d0,%d1
andi.l &0x00ff00ff,USER_FPSR(%a6)
clr.l %d0
mov.b FPCR_MODE(%a6),%d0 # pass rnd mode,prec
mov.b %d1,STAG(%a6)
tst.b %d1
bne.b _L13_2d
bsr.l slogn # operand is a NORM
bra.b _L13_6d
_L13_2d:
cmpi.b %d1,&ZERO # is operand a ZERO?
bne.b _L13_3d # no
bsr.l t_dz2 # yes
bra.b _L13_6d
_L13_3d:
cmpi.b %d1,&INF # is operand an INF?
bne.b _L13_4d # no
bsr.l sopr_inf # yes
bra.b _L13_6d
_L13_4d:
cmpi.b %d1,&QNAN # is operand a QNAN?
bne.b _L13_5d # no
bsr.l src_qnan # yes
bra.b _L13_6d
_L13_5d:
bsr.l slognd # operand is a DENORM
_L13_6d:
#
# Result is now in FP0
#
movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
fmovm.l USER_FPCR(%a6),%fpcr,%fpsr # restore ctrl regs
fmovm.x EXC_FP1(%a6),&0x40 # restore fp1
unlk %a6
rts
global _flognx_
_flognx_:
link %a6,&-LOCAL_SIZE
movm.l &0x0303,EXC_DREGS(%a6) # save d0-d1/a0-a1
fmovm.l %fpcr,%fpsr,USER_FPCR(%a6) # save ctrl regs
fmovm.x &0xc0,EXC_FP0(%a6) # save fp0/fp1
fmov.l &0x0,%fpcr # zero FPCR
#
# copy, convert, and tag input argument
#
lea FP_SRC(%a6),%a0
mov.l 0x8+0x0(%a6),0x0(%a0) # load ext input
mov.l 0x8+0x4(%a6),0x4(%a0)
mov.l 0x8+0x8(%a6),0x8(%a0)
bsr.l tag # fetch operand type
mov.b %d0,STAG(%a6)
mov.b %d0,%d1
andi.l &0x00ff00ff,USER_FPSR(%a6)
clr.l %d0
mov.b FPCR_MODE(%a6),%d0 # pass rnd mode,prec
tst.b %d1
bne.b _L13_2x
bsr.l slogn # operand is a NORM
bra.b _L13_6x
_L13_2x:
cmpi.b %d1,&ZERO # is operand a ZERO?
bne.b _L13_3x # no
bsr.l t_dz2 # yes
bra.b _L13_6x
_L13_3x:
cmpi.b %d1,&INF # is operand an INF?
bne.b _L13_4x # no
bsr.l sopr_inf # yes
bra.b _L13_6x
_L13_4x:
cmpi.b %d1,&QNAN # is operand a QNAN?
bne.b _L13_5x # no
bsr.l src_qnan # yes
bra.b _L13_6x
_L13_5x:
bsr.l slognd # operand is a DENORM
_L13_6x:
#
# Result is now in FP0
#
movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
fmovm.l USER_FPCR(%a6),%fpcr,%fpsr # restore ctrl regs
fmovm.x EXC_FP1(%a6),&0x40 # restore fp1
unlk %a6
rts
#########################################################################
# MONADIC TEMPLATE #
#########################################################################
global _flog10s_
_flog10s_:
link %a6,&-LOCAL_SIZE
movm.l &0x0303,EXC_DREGS(%a6) # save d0-d1/a0-a1
fmovm.l %fpcr,%fpsr,USER_FPCR(%a6) # save ctrl regs
fmovm.x &0xc0,EXC_FP0(%a6) # save fp0/fp1
fmov.l &0x0,%fpcr # zero FPCR
#
# copy, convert, and tag input argument
#
fmov.s 0x8(%a6),%fp0 # load sgl input
fmov.x %fp0,FP_SRC(%a6)
lea FP_SRC(%a6),%a0
bsr.l tag # fetch operand type
mov.b %d0,STAG(%a6)
mov.b %d0,%d1
andi.l &0x00ff00ff,USER_FPSR(%a6)
clr.l %d0
mov.b FPCR_MODE(%a6),%d0 # pass rnd mode,prec
tst.b %d1
bne.b _L14_2s
bsr.l slog10 # operand is a NORM
bra.b _L14_6s
_L14_2s:
cmpi.b %d1,&ZERO # is operand a ZERO?
bne.b _L14_3s # no
bsr.l t_dz2 # yes
bra.b _L14_6s
_L14_3s:
cmpi.b %d1,&INF # is operand an INF?
bne.b _L14_4s # no
bsr.l sopr_inf # yes
bra.b _L14_6s
_L14_4s:
cmpi.b %d1,&QNAN # is operand a QNAN?
bne.b _L14_5s # no
bsr.l src_qnan # yes
bra.b _L14_6s
_L14_5s:
bsr.l slog10d # operand is a DENORM
_L14_6s:
#
# Result is now in FP0
#
movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
fmovm.l USER_FPCR(%a6),%fpcr,%fpsr # restore ctrl regs
fmovm.x EXC_FP1(%a6),&0x40 # restore fp1
unlk %a6
rts
global _flog10d_
_flog10d_:
link %a6,&-LOCAL_SIZE
movm.l &0x0303,EXC_DREGS(%a6) # save d0-d1/a0-a1
fmovm.l %fpcr,%fpsr,USER_FPCR(%a6) # save ctrl regs
fmovm.x &0xc0,EXC_FP0(%a6) # save fp0/fp1
fmov.l &0x0,%fpcr # zero FPCR
#
# copy, convert, and tag input argument
#
fmov.d 0x8(%a6),%fp0 # load dbl input
fmov.x %fp0,FP_SRC(%a6)
lea FP_SRC(%a6),%a0
bsr.l tag # fetch operand type
mov.b %d0,STAG(%a6)
mov.b %d0,%d1
andi.l &0x00ff00ff,USER_FPSR(%a6)
clr.l %d0
mov.b FPCR_MODE(%a6),%d0 # pass rnd mode,prec
mov.b %d1,STAG(%a6)
tst.b %d1
bne.b _L14_2d
bsr.l slog10 # operand is a NORM
bra.b _L14_6d
_L14_2d:
cmpi.b %d1,&ZERO # is operand a ZERO?
bne.b _L14_3d # no
bsr.l t_dz2 # yes
bra.b _L14_6d
_L14_3d:
cmpi.b %d1,&INF # is operand an INF?
bne.b _L14_4d # no
bsr.l sopr_inf # yes
bra.b _L14_6d
_L14_4d:
cmpi.b %d1,&QNAN # is operand a QNAN?
bne.b _L14_5d # no
bsr.l src_qnan # yes
bra.b _L14_6d
_L14_5d:
bsr.l slog10d # operand is a DENORM
_L14_6d:
#
# Result is now in FP0
#
movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
fmovm.l USER_FPCR(%a6),%fpcr,%fpsr # restore ctrl regs
fmovm.x EXC_FP1(%a6),&0x40 # restore fp1
unlk %a6
rts
global _flog10x_
_flog10x_:
link %a6,&-LOCAL_SIZE
movm.l &0x0303,EXC_DREGS(%a6) # save d0-d1/a0-a1
fmovm.l %fpcr,%fpsr,USER_FPCR(%a6) # save ctrl regs
fmovm.x &0xc0,EXC_FP0(%a6) # save fp0/fp1
fmov.l &0x0,%fpcr # zero FPCR
#
# copy, convert, and tag input argument
#
lea FP_SRC(%a6),%a0
mov.l 0x8+0x0(%a6),0x0(%a0) # load ext input
mov.l 0x8+0x4(%a6),0x4(%a0)
mov.l 0x8+0x8(%a6),0x8(%a0)
bsr.l tag # fetch operand type
mov.b %d0,STAG(%a6)
mov.b %d0,%d1
andi.l &0x00ff00ff,USER_FPSR(%a6)
clr.l %d0
mov.b FPCR_MODE(%a6),%d0 # pass rnd mode,prec
tst.b %d1
bne.b _L14_2x
bsr.l slog10 # operand is a NORM
bra.b _L14_6x
_L14_2x:
cmpi.b %d1,&ZERO # is operand a ZERO?
bne.b _L14_3x # no
bsr.l t_dz2 # yes
bra.b _L14_6x
_L14_3x:
cmpi.b %d1,&INF # is operand an INF?
bne.b _L14_4x # no
bsr.l sopr_inf # yes
bra.b _L14_6x
_L14_4x:
cmpi.b %d1,&QNAN # is operand a QNAN?
bne.b _L14_5x # no
bsr.l src_qnan # yes
bra.b _L14_6x
_L14_5x:
bsr.l slog10d # operand is a DENORM
_L14_6x:
#
# Result is now in FP0
#
movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
fmovm.l USER_FPCR(%a6),%fpcr,%fpsr # restore ctrl regs
fmovm.x EXC_FP1(%a6),&0x40 # restore fp1
unlk %a6
rts
#########################################################################
# MONADIC TEMPLATE #
#########################################################################
global _flog2s_
_flog2s_:
link %a6,&-LOCAL_SIZE
movm.l &0x0303,EXC_DREGS(%a6) # save d0-d1/a0-a1
fmovm.l %fpcr,%fpsr,USER_FPCR(%a6) # save ctrl regs
fmovm.x &0xc0,EXC_FP0(%a6) # save fp0/fp1
fmov.l &0x0,%fpcr # zero FPCR
#
# copy, convert, and tag input argument
#
fmov.s 0x8(%a6),%fp0 # load sgl input
fmov.x %fp0,FP_SRC(%a6)
lea FP_SRC(%a6),%a0
bsr.l tag # fetch operand type
mov.b %d0,STAG(%a6)
mov.b %d0,%d1
andi.l &0x00ff00ff,USER_FPSR(%a6)
clr.l %d0
mov.b FPCR_MODE(%a6),%d0 # pass rnd mode,prec
tst.b %d1
bne.b _L15_2s
bsr.l slog2 # operand is a NORM
bra.b _L15_6s
_L15_2s:
cmpi.b %d1,&ZERO # is operand a ZERO?
bne.b _L15_3s # no
bsr.l t_dz2 # yes
bra.b _L15_6s
_L15_3s:
cmpi.b %d1,&INF # is operand an INF?
bne.b _L15_4s # no
bsr.l sopr_inf # yes
bra.b _L15_6s
_L15_4s:
cmpi.b %d1,&QNAN # is operand a QNAN?
bne.b _L15_5s # no
bsr.l src_qnan # yes
bra.b _L15_6s
_L15_5s:
bsr.l slog2d # operand is a DENORM
_L15_6s:
#
# Result is now in FP0
#
movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
fmovm.l USER_FPCR(%a6),%fpcr,%fpsr # restore ctrl regs
fmovm.x EXC_FP1(%a6),&0x40 # restore fp1
unlk %a6
rts
global _flog2d_
_flog2d_:
link %a6,&-LOCAL_SIZE
movm.l &0x0303,EXC_DREGS(%a6) # save d0-d1/a0-a1
fmovm.l %fpcr,%fpsr,USER_FPCR(%a6) # save ctrl regs
fmovm.x &0xc0,EXC_FP0(%a6) # save fp0/fp1
fmov.l &0x0,%fpcr # zero FPCR
#
# copy, convert, and tag input argument
#
fmov.d 0x8(%a6),%fp0 # load dbl input
fmov.x %fp0,FP_SRC(%a6)
lea FP_SRC(%a6),%a0
bsr.l tag # fetch operand type
mov.b %d0,STAG(%a6)
mov.b %d0,%d1
andi.l &0x00ff00ff,USER_FPSR(%a6)
clr.l %d0
mov.b FPCR_MODE(%a6),%d0 # pass rnd mode,prec
mov.b %d1,STAG(%a6)
tst.b %d1
bne.b _L15_2d
bsr.l slog2 # operand is a NORM
bra.b _L15_6d
_L15_2d:
cmpi.b %d1,&ZERO # is operand a ZERO?
bne.b _L15_3d # no
bsr.l t_dz2 # yes
bra.b _L15_6d
_L15_3d:
cmpi.b %d1,&INF # is operand an INF?
bne.b _L15_4d # no
bsr.l sopr_inf # yes
bra.b _L15_6d
_L15_4d:
cmpi.b %d1,&QNAN # is operand a QNAN?
bne.b _L15_5d # no
bsr.l src_qnan # yes
bra.b _L15_6d
_L15_5d:
bsr.l slog2d # operand is a DENORM
_L15_6d:
#
# Result is now in FP0
#
movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
fmovm.l USER_FPCR(%a6),%fpcr,%fpsr # restore ctrl regs
fmovm.x EXC_FP1(%a6),&0x40 # restore fp1
unlk %a6
rts
global _flog2x_
_flog2x_:
link %a6,&-LOCAL_SIZE
movm.l &0x0303,EXC_DREGS(%a6) # save d0-d1/a0-a1
fmovm.l %fpcr,%fpsr,USER_FPCR(%a6) # save ctrl regs
fmovm.x &0xc0,EXC_FP0(%a6) # save fp0/fp1
fmov.l &0x0,%fpcr # zero FPCR
#
# copy, convert, and tag input argument
#
lea FP_SRC(%a6),%a0
mov.l 0x8+0x0(%a6),0x0(%a0) # load ext input
mov.l 0x8+0x4(%a6),0x4(%a0)
mov.l 0x8+0x8(%a6),0x8(%a0)
bsr.l tag # fetch operand type
mov.b %d0,STAG(%a6)
mov.b %d0,%d1
andi.l &0x00ff00ff,USER_FPSR(%a6)
clr.l %d0
mov.b FPCR_MODE(%a6),%d0 # pass rnd mode,prec
tst.b %d1
bne.b _L15_2x
bsr.l slog2 # operand is a NORM
bra.b _L15_6x
_L15_2x:
cmpi.b %d1,&ZERO # is operand a ZERO?
bne.b _L15_3x # no
bsr.l t_dz2 # yes
bra.b _L15_6x
_L15_3x:
cmpi.b %d1,&INF # is operand an INF?
bne.b _L15_4x # no
bsr.l sopr_inf # yes
bra.b _L15_6x
_L15_4x:
cmpi.b %d1,&QNAN # is operand a QNAN?
bne.b _L15_5x # no
bsr.l src_qnan # yes
bra.b _L15_6x
_L15_5x:
bsr.l slog2d # operand is a DENORM
_L15_6x:
#
# Result is now in FP0
#
movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
fmovm.l USER_FPCR(%a6),%fpcr,%fpsr # restore ctrl regs
fmovm.x EXC_FP1(%a6),&0x40 # restore fp1
unlk %a6
rts
#########################################################################
# MONADIC TEMPLATE #
#########################################################################
global _fcoshs_
_fcoshs_:
link %a6,&-LOCAL_SIZE
movm.l &0x0303,EXC_DREGS(%a6) # save d0-d1/a0-a1
fmovm.l %fpcr,%fpsr,USER_FPCR(%a6) # save ctrl regs
fmovm.x &0xc0,EXC_FP0(%a6) # save fp0/fp1
fmov.l &0x0,%fpcr # zero FPCR
#
# copy, convert, and tag input argument
#
fmov.s 0x8(%a6),%fp0 # load sgl input
fmov.x %fp0,FP_SRC(%a6)
lea FP_SRC(%a6),%a0
bsr.l tag # fetch operand type
mov.b %d0,STAG(%a6)
mov.b %d0,%d1
andi.l &0x00ff00ff,USER_FPSR(%a6)
clr.l %d0
mov.b FPCR_MODE(%a6),%d0 # pass rnd mode,prec
tst.b %d1
bne.b _L16_2s
bsr.l scosh # operand is a NORM
bra.b _L16_6s
_L16_2s:
cmpi.b %d1,&ZERO # is operand a ZERO?
bne.b _L16_3s # no
bsr.l ld_pone # yes
bra.b _L16_6s
_L16_3s:
cmpi.b %d1,&INF # is operand an INF?
bne.b _L16_4s # no
bsr.l ld_pinf # yes
bra.b _L16_6s
_L16_4s:
cmpi.b %d1,&QNAN # is operand a QNAN?
bne.b _L16_5s # no
bsr.l src_qnan # yes
bra.b _L16_6s
_L16_5s:
bsr.l scoshd # operand is a DENORM
_L16_6s:
#
# Result is now in FP0
#
movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
fmovm.l USER_FPCR(%a6),%fpcr,%fpsr # restore ctrl regs
fmovm.x EXC_FP1(%a6),&0x40 # restore fp1
unlk %a6
rts
global _fcoshd_
_fcoshd_:
link %a6,&-LOCAL_SIZE
movm.l &0x0303,EXC_DREGS(%a6) # save d0-d1/a0-a1
fmovm.l %fpcr,%fpsr,USER_FPCR(%a6) # save ctrl regs
fmovm.x &0xc0,EXC_FP0(%a6) # save fp0/fp1
fmov.l &0x0,%fpcr # zero FPCR
#
# copy, convert, and tag input argument
#
fmov.d 0x8(%a6),%fp0 # load dbl input
fmov.x %fp0,FP_SRC(%a6)
lea FP_SRC(%a6),%a0
bsr.l tag # fetch operand type
mov.b %d0,STAG(%a6)
mov.b %d0,%d1
andi.l &0x00ff00ff,USER_FPSR(%a6)
clr.l %d0
mov.b FPCR_MODE(%a6),%d0 # pass rnd mode,prec
mov.b %d1,STAG(%a6)
tst.b %d1
bne.b _L16_2d
bsr.l scosh # operand is a NORM
bra.b _L16_6d
_L16_2d:
cmpi.b %d1,&ZERO # is operand a ZERO?
bne.b _L16_3d # no
bsr.l ld_pone # yes
bra.b _L16_6d
_L16_3d:
cmpi.b %d1,&INF # is operand an INF?
bne.b _L16_4d # no
bsr.l ld_pinf # yes
bra.b _L16_6d
_L16_4d:
cmpi.b %d1,&QNAN # is operand a QNAN?
bne.b _L16_5d # no
bsr.l src_qnan # yes
bra.b _L16_6d
_L16_5d:
bsr.l scoshd # operand is a DENORM
_L16_6d:
#
# Result is now in FP0
#
movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
fmovm.l USER_FPCR(%a6),%fpcr,%fpsr # restore ctrl regs
fmovm.x EXC_FP1(%a6),&0x40 # restore fp1
unlk %a6
rts
global _fcoshx_
_fcoshx_:
link %a6,&-LOCAL_SIZE
movm.l &0x0303,EXC_DREGS(%a6) # save d0-d1/a0-a1
fmovm.l %fpcr,%fpsr,USER_FPCR(%a6) # save ctrl regs
fmovm.x &0xc0,EXC_FP0(%a6) # save fp0/fp1
fmov.l &0x0,%fpcr # zero FPCR
#
# copy, convert, and tag input argument
#
lea FP_SRC(%a6),%a0
mov.l 0x8+0x0(%a6),0x0(%a0) # load ext input
mov.l 0x8+0x4(%a6),0x4(%a0)
mov.l 0x8+0x8(%a6),0x8(%a0)
bsr.l tag # fetch operand type
mov.b %d0,STAG(%a6)
mov.b %d0,%d1
andi.l &0x00ff00ff,USER_FPSR(%a6)
clr.l %d0
mov.b FPCR_MODE(%a6),%d0 # pass rnd mode,prec
tst.b %d1
bne.b _L16_2x
bsr.l scosh # operand is a NORM
bra.b _L16_6x
_L16_2x:
cmpi.b %d1,&ZERO # is operand a ZERO?
bne.b _L16_3x # no
bsr.l ld_pone # yes
bra.b _L16_6x
_L16_3x:
cmpi.b %d1,&INF # is operand an INF?
bne.b _L16_4x # no
bsr.l ld_pinf # yes
bra.b _L16_6x
_L16_4x:
cmpi.b %d1,&QNAN # is operand a QNAN?
bne.b _L16_5x # no
bsr.l src_qnan # yes
bra.b _L16_6x
_L16_5x:
bsr.l scoshd # operand is a DENORM
_L16_6x:
#
# Result is now in FP0
#
movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
fmovm.l USER_FPCR(%a6),%fpcr,%fpsr # restore ctrl regs
fmovm.x EXC_FP1(%a6),&0x40 # restore fp1
unlk %a6
rts
#########################################################################
# MONADIC TEMPLATE #
#########################################################################
global _facoss_
_facoss_:
link %a6,&-LOCAL_SIZE
movm.l &0x0303,EXC_DREGS(%a6) # save d0-d1/a0-a1
fmovm.l %fpcr,%fpsr,USER_FPCR(%a6) # save ctrl regs
fmovm.x &0xc0,EXC_FP0(%a6) # save fp0/fp1
fmov.l &0x0,%fpcr # zero FPCR
#
# copy, convert, and tag input argument
#
fmov.s 0x8(%a6),%fp0 # load sgl input
fmov.x %fp0,FP_SRC(%a6)
lea FP_SRC(%a6),%a0
bsr.l tag # fetch operand type
mov.b %d0,STAG(%a6)
mov.b %d0,%d1
andi.l &0x00ff00ff,USER_FPSR(%a6)
clr.l %d0
mov.b FPCR_MODE(%a6),%d0 # pass rnd mode,prec
tst.b %d1
bne.b _L17_2s
bsr.l sacos # operand is a NORM
bra.b _L17_6s
_L17_2s:
cmpi.b %d1,&ZERO # is operand a ZERO?
bne.b _L17_3s # no
bsr.l ld_ppi2 # yes
bra.b _L17_6s
_L17_3s:
cmpi.b %d1,&INF # is operand an INF?
bne.b _L17_4s # no
bsr.l t_operr # yes
bra.b _L17_6s
_L17_4s:
cmpi.b %d1,&QNAN # is operand a QNAN?
bne.b _L17_5s # no
bsr.l src_qnan # yes
bra.b _L17_6s
_L17_5s:
bsr.l sacosd # operand is a DENORM
_L17_6s:
#
# Result is now in FP0
#
movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
fmovm.l USER_FPCR(%a6),%fpcr,%fpsr # restore ctrl regs
fmovm.x EXC_FP1(%a6),&0x40 # restore fp1
unlk %a6
rts
global _facosd_
_facosd_:
link %a6,&-LOCAL_SIZE
movm.l &0x0303,EXC_DREGS(%a6) # save d0-d1/a0-a1
fmovm.l %fpcr,%fpsr,USER_FPCR(%a6) # save ctrl regs
fmovm.x &0xc0,EXC_FP0(%a6) # save fp0/fp1
fmov.l &0x0,%fpcr # zero FPCR
#
# copy, convert, and tag input argument
#
fmov.d 0x8(%a6),%fp0 # load dbl input
fmov.x %fp0,FP_SRC(%a6)
lea FP_SRC(%a6),%a0
bsr.l tag # fetch operand type
mov.b %d0,STAG(%a6)
mov.b %d0,%d1
andi.l &0x00ff00ff,USER_FPSR(%a6)
clr.l %d0
mov.b FPCR_MODE(%a6),%d0 # pass rnd mode,prec
mov.b %d1,STAG(%a6)
tst.b %d1
bne.b _L17_2d
bsr.l sacos # operand is a NORM
bra.b _L17_6d
_L17_2d:
cmpi.b %d1,&ZERO # is operand a ZERO?
bne.b _L17_3d # no
bsr.l ld_ppi2 # yes
bra.b _L17_6d
_L17_3d:
cmpi.b %d1,&INF # is operand an INF?
bne.b _L17_4d # no
bsr.l t_operr # yes
bra.b _L17_6d
_L17_4d:
cmpi.b %d1,&QNAN # is operand a QNAN?
bne.b _L17_5d # no
bsr.l src_qnan # yes
bra.b _L17_6d
_L17_5d:
bsr.l sacosd # operand is a DENORM
_L17_6d:
#
# Result is now in FP0
#
movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
fmovm.l USER_FPCR(%a6),%fpcr,%fpsr # restore ctrl regs
fmovm.x EXC_FP1(%a6),&0x40 # restore fp1
unlk %a6
rts
global _facosx_
_facosx_:
link %a6,&-LOCAL_SIZE
movm.l &0x0303,EXC_DREGS(%a6) # save d0-d1/a0-a1
fmovm.l %fpcr,%fpsr,USER_FPCR(%a6) # save ctrl regs
fmovm.x &0xc0,EXC_FP0(%a6) # save fp0/fp1
fmov.l &0x0,%fpcr # zero FPCR
#
# copy, convert, and tag input argument
#
lea FP_SRC(%a6),%a0
mov.l 0x8+0x0(%a6),0x0(%a0) # load ext input
mov.l 0x8+0x4(%a6),0x4(%a0)
mov.l 0x8+0x8(%a6),0x8(%a0)
bsr.l tag # fetch operand type
mov.b %d0,STAG(%a6)
mov.b %d0,%d1
andi.l &0x00ff00ff,USER_FPSR(%a6)
clr.l %d0
mov.b FPCR_MODE(%a6),%d0 # pass rnd mode,prec
tst.b %d1
bne.b _L17_2x
bsr.l sacos # operand is a NORM
bra.b _L17_6x
_L17_2x:
cmpi.b %d1,&ZERO # is operand a ZERO?
bne.b _L17_3x # no
bsr.l ld_ppi2 # yes
bra.b _L17_6x
_L17_3x:
cmpi.b %d1,&INF # is operand an INF?
bne.b _L17_4x # no
bsr.l t_operr # yes
bra.b _L17_6x
_L17_4x:
cmpi.b %d1,&QNAN # is operand a QNAN?
bne.b _L17_5x # no
bsr.l src_qnan # yes
bra.b _L17_6x
_L17_5x:
bsr.l sacosd # operand is a DENORM
_L17_6x:
#
# Result is now in FP0
#
movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
fmovm.l USER_FPCR(%a6),%fpcr,%fpsr # restore ctrl regs
fmovm.x EXC_FP1(%a6),&0x40 # restore fp1
unlk %a6
rts
#########################################################################
# MONADIC TEMPLATE #
#########################################################################
global _fgetexps_
_fgetexps_:
link %a6,&-LOCAL_SIZE
movm.l &0x0303,EXC_DREGS(%a6) # save d0-d1/a0-a1
fmovm.l %fpcr,%fpsr,USER_FPCR(%a6) # save ctrl regs
fmovm.x &0xc0,EXC_FP0(%a6) # save fp0/fp1
fmov.l &0x0,%fpcr # zero FPCR
#
# copy, convert, and tag input argument
#
fmov.s 0x8(%a6),%fp0 # load sgl input
fmov.x %fp0,FP_SRC(%a6)
lea FP_SRC(%a6),%a0
bsr.l tag # fetch operand type
mov.b %d0,STAG(%a6)
mov.b %d0,%d1
andi.l &0x00ff00ff,USER_FPSR(%a6)
clr.l %d0
mov.b FPCR_MODE(%a6),%d0 # pass rnd mode,prec
tst.b %d1
bne.b _L18_2s
bsr.l sgetexp # operand is a NORM
bra.b _L18_6s
_L18_2s:
cmpi.b %d1,&ZERO # is operand a ZERO?
bne.b _L18_3s # no
bsr.l src_zero # yes
bra.b _L18_6s
_L18_3s:
cmpi.b %d1,&INF # is operand an INF?
bne.b _L18_4s # no
bsr.l t_operr # yes
bra.b _L18_6s
_L18_4s:
cmpi.b %d1,&QNAN # is operand a QNAN?
bne.b _L18_5s # no
bsr.l src_qnan # yes
bra.b _L18_6s
_L18_5s:
bsr.l sgetexpd # operand is a DENORM
_L18_6s:
#
# Result is now in FP0
#
movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
fmovm.l USER_FPCR(%a6),%fpcr,%fpsr # restore ctrl regs
fmovm.x EXC_FP1(%a6),&0x40 # restore fp1
unlk %a6
rts
global _fgetexpd_
_fgetexpd_:
link %a6,&-LOCAL_SIZE
movm.l &0x0303,EXC_DREGS(%a6) # save d0-d1/a0-a1
fmovm.l %fpcr,%fpsr,USER_FPCR(%a6) # save ctrl regs
fmovm.x &0xc0,EXC_FP0(%a6) # save fp0/fp1
fmov.l &0x0,%fpcr # zero FPCR
#
# copy, convert, and tag input argument
#
fmov.d 0x8(%a6),%fp0 # load dbl input
fmov.x %fp0,FP_SRC(%a6)
lea FP_SRC(%a6),%a0
bsr.l tag # fetch operand type
mov.b %d0,STAG(%a6)
mov.b %d0,%d1
andi.l &0x00ff00ff,USER_FPSR(%a6)
clr.l %d0
mov.b FPCR_MODE(%a6),%d0 # pass rnd mode,prec
mov.b %d1,STAG(%a6)
tst.b %d1
bne.b _L18_2d
bsr.l sgetexp # operand is a NORM
bra.b _L18_6d
_L18_2d:
cmpi.b %d1,&ZERO # is operand a ZERO?
bne.b _L18_3d # no
bsr.l src_zero # yes
bra.b _L18_6d
_L18_3d:
cmpi.b %d1,&INF # is operand an INF?
bne.b _L18_4d # no
bsr.l t_operr # yes
bra.b _L18_6d
_L18_4d:
cmpi.b %d1,&QNAN # is operand a QNAN?
bne.b _L18_5d # no
bsr.l src_qnan # yes
bra.b _L18_6d
_L18_5d:
bsr.l sgetexpd # operand is a DENORM
_L18_6d:
#
# Result is now in FP0
#
movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
fmovm.l USER_FPCR(%a6),%fpcr,%fpsr # restore ctrl regs
fmovm.x EXC_FP1(%a6),&0x40 # restore fp1
unlk %a6
rts
global _fgetexpx_
_fgetexpx_:
link %a6,&-LOCAL_SIZE
movm.l &0x0303,EXC_DREGS(%a6) # save d0-d1/a0-a1
fmovm.l %fpcr,%fpsr,USER_FPCR(%a6) # save ctrl regs
fmovm.x &0xc0,EXC_FP0(%a6) # save fp0/fp1
fmov.l &0x0,%fpcr # zero FPCR
#
# copy, convert, and tag input argument
#
lea FP_SRC(%a6),%a0
mov.l 0x8+0x0(%a6),0x0(%a0) # load ext input
mov.l 0x8+0x4(%a6),0x4(%a0)
mov.l 0x8+0x8(%a6),0x8(%a0)
bsr.l tag # fetch operand type
mov.b %d0,STAG(%a6)
mov.b %d0,%d1
andi.l &0x00ff00ff,USER_FPSR(%a6)
clr.l %d0
mov.b FPCR_MODE(%a6),%d0 # pass rnd mode,prec
tst.b %d1
bne.b _L18_2x
bsr.l sgetexp # operand is a NORM
bra.b _L18_6x
_L18_2x:
cmpi.b %d1,&ZERO # is operand a ZERO?
bne.b _L18_3x # no
bsr.l src_zero # yes
bra.b _L18_6x
_L18_3x:
cmpi.b %d1,&INF # is operand an INF?
bne.b _L18_4x # no
bsr.l t_operr # yes
bra.b _L18_6x
_L18_4x:
cmpi.b %d1,&QNAN # is operand a QNAN?
bne.b _L18_5x # no
bsr.l src_qnan # yes
bra.b _L18_6x
_L18_5x:
bsr.l sgetexpd # operand is a DENORM
_L18_6x:
#
# Result is now in FP0
#
movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
fmovm.l USER_FPCR(%a6),%fpcr,%fpsr # restore ctrl regs
fmovm.x EXC_FP1(%a6),&0x40 # restore fp1
unlk %a6
rts
#########################################################################
# MONADIC TEMPLATE #
#########################################################################
global _fgetmans_
_fgetmans_:
link %a6,&-LOCAL_SIZE
movm.l &0x0303,EXC_DREGS(%a6) # save d0-d1/a0-a1
fmovm.l %fpcr,%fpsr,USER_FPCR(%a6) # save ctrl regs
fmovm.x &0xc0,EXC_FP0(%a6) # save fp0/fp1
fmov.l &0x0,%fpcr # zero FPCR
#
# copy, convert, and tag input argument
#
fmov.s 0x8(%a6),%fp0 # load sgl input
fmov.x %fp0,FP_SRC(%a6)
lea FP_SRC(%a6),%a0
bsr.l tag # fetch operand type
mov.b %d0,STAG(%a6)
mov.b %d0,%d1
andi.l &0x00ff00ff,USER_FPSR(%a6)
clr.l %d0
mov.b FPCR_MODE(%a6),%d0 # pass rnd mode,prec
tst.b %d1
bne.b _L19_2s
bsr.l sgetman # operand is a NORM
bra.b _L19_6s
_L19_2s:
cmpi.b %d1,&ZERO # is operand a ZERO?
bne.b _L19_3s # no
bsr.l src_zero # yes
bra.b _L19_6s
_L19_3s:
cmpi.b %d1,&INF # is operand an INF?
bne.b _L19_4s # no
bsr.l t_operr # yes
bra.b _L19_6s
_L19_4s:
cmpi.b %d1,&QNAN # is operand a QNAN?
bne.b _L19_5s # no
bsr.l src_qnan # yes
bra.b _L19_6s
_L19_5s:
bsr.l sgetmand # operand is a DENORM
_L19_6s:
#
# Result is now in FP0
#
movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
fmovm.l USER_FPCR(%a6),%fpcr,%fpsr # restore ctrl regs
fmovm.x EXC_FP1(%a6),&0x40 # restore fp1
unlk %a6
rts
global _fgetmand_
_fgetmand_:
link %a6,&-LOCAL_SIZE
movm.l &0x0303,EXC_DREGS(%a6) # save d0-d1/a0-a1
fmovm.l %fpcr,%fpsr,USER_FPCR(%a6) # save ctrl regs
fmovm.x &0xc0,EXC_FP0(%a6) # save fp0/fp1
fmov.l &0x0,%fpcr # zero FPCR
#
# copy, convert, and tag input argument
#
fmov.d 0x8(%a6),%fp0 # load dbl input
fmov.x %fp0,FP_SRC(%a6)
lea FP_SRC(%a6),%a0
bsr.l tag # fetch operand type
mov.b %d0,STAG(%a6)
mov.b %d0,%d1
andi.l &0x00ff00ff,USER_FPSR(%a6)
clr.l %d0
mov.b FPCR_MODE(%a6),%d0 # pass rnd mode,prec
mov.b %d1,STAG(%a6)
tst.b %d1
bne.b _L19_2d
bsr.l sgetman # operand is a NORM
bra.b _L19_6d
_L19_2d:
cmpi.b %d1,&ZERO # is operand a ZERO?
bne.b _L19_3d # no
bsr.l src_zero # yes
bra.b _L19_6d
_L19_3d:
cmpi.b %d1,&INF # is operand an INF?
bne.b _L19_4d # no
bsr.l t_operr # yes
bra.b _L19_6d
_L19_4d:
cmpi.b %d1,&QNAN # is operand a QNAN?
bne.b _L19_5d # no
bsr.l src_qnan # yes
bra.b _L19_6d
_L19_5d:
bsr.l sgetmand # operand is a DENORM
_L19_6d:
#
# Result is now in FP0
#
movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
fmovm.l USER_FPCR(%a6),%fpcr,%fpsr # restore ctrl regs
fmovm.x EXC_FP1(%a6),&0x40 # restore fp1
unlk %a6
rts
global _fgetmanx_
_fgetmanx_:
link %a6,&-LOCAL_SIZE
movm.l &0x0303,EXC_DREGS(%a6) # save d0-d1/a0-a1
fmovm.l %fpcr,%fpsr,USER_FPCR(%a6) # save ctrl regs
fmovm.x &0xc0,EXC_FP0(%a6) # save fp0/fp1
fmov.l &0x0,%fpcr # zero FPCR
#
# copy, convert, and tag input argument
#
lea FP_SRC(%a6),%a0
mov.l 0x8+0x0(%a6),0x0(%a0) # load ext input
mov.l 0x8+0x4(%a6),0x4(%a0)
mov.l 0x8+0x8(%a6),0x8(%a0)
bsr.l tag # fetch operand type
mov.b %d0,STAG(%a6)
mov.b %d0,%d1
andi.l &0x00ff00ff,USER_FPSR(%a6)
clr.l %d0
mov.b FPCR_MODE(%a6),%d0 # pass rnd mode,prec
tst.b %d1
bne.b _L19_2x
bsr.l sgetman # operand is a NORM
bra.b _L19_6x
_L19_2x:
cmpi.b %d1,&ZERO # is operand a ZERO?
bne.b _L19_3x # no
bsr.l src_zero # yes
bra.b _L19_6x
_L19_3x:
cmpi.b %d1,&INF # is operand an INF?
bne.b _L19_4x # no
bsr.l t_operr # yes
bra.b _L19_6x
_L19_4x:
cmpi.b %d1,&QNAN # is operand a QNAN?
bne.b _L19_5x # no
bsr.l src_qnan # yes
bra.b _L19_6x
_L19_5x:
bsr.l sgetmand # operand is a DENORM
_L19_6x:
#
# Result is now in FP0
#
movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
fmovm.l USER_FPCR(%a6),%fpcr,%fpsr # restore ctrl regs
fmovm.x EXC_FP1(%a6),&0x40 # restore fp1
unlk %a6
rts
#########################################################################
# MONADIC TEMPLATE #
#########################################################################
global _fsincoss_
_fsincoss_:
link %a6,&-LOCAL_SIZE
movm.l &0x0303,EXC_DREGS(%a6) # save d0-d1/a0-a1
fmovm.l %fpcr,%fpsr,USER_FPCR(%a6) # save ctrl regs
fmovm.x &0xc0,EXC_FP0(%a6) # save fp0/fp1
fmov.l &0x0,%fpcr # zero FPCR
#
# copy, convert, and tag input argument
#
fmov.s 0x8(%a6),%fp0 # load sgl input
fmov.x %fp0,FP_SRC(%a6)
lea FP_SRC(%a6),%a0
bsr.l tag # fetch operand type
mov.b %d0,STAG(%a6)
mov.b %d0,%d1
andi.l &0x00ff00ff,USER_FPSR(%a6)
clr.l %d0
mov.b FPCR_MODE(%a6),%d0 # pass rnd mode,prec
tst.b %d1
bne.b _L20_2s
bsr.l ssincos # operand is a NORM
bra.b _L20_6s
_L20_2s:
cmpi.b %d1,&ZERO # is operand a ZERO?
bne.b _L20_3s # no
bsr.l ssincosz # yes
bra.b _L20_6s
_L20_3s:
cmpi.b %d1,&INF # is operand an INF?
bne.b _L20_4s # no
bsr.l ssincosi # yes
bra.b _L20_6s
_L20_4s:
cmpi.b %d1,&QNAN # is operand a QNAN?
bne.b _L20_5s # no
bsr.l ssincosqnan # yes
bra.b _L20_6s
_L20_5s:
bsr.l ssincosd # operand is a DENORM
_L20_6s:
#
# Result is now in FP0
#
movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
fmovm.l USER_FPCR(%a6),%fpcr,%fpsr # restore ctrl regs
fmovm.x &0x03,-(%sp) # store off fp0/fp1
fmovm.x (%sp)+,&0x40 # fp0 now in fp1
fmovm.x (%sp)+,&0x80 # fp1 now in fp0
unlk %a6
rts
global _fsincosd_
_fsincosd_:
link %a6,&-LOCAL_SIZE
movm.l &0x0303,EXC_DREGS(%a6) # save d0-d1/a0-a1
fmovm.l %fpcr,%fpsr,USER_FPCR(%a6) # save ctrl regs
fmovm.x &0xc0,EXC_FP0(%a6) # save fp0/fp1
fmov.l &0x0,%fpcr # zero FPCR
#
# copy, convert, and tag input argument
#
fmov.d 0x8(%a6),%fp0 # load dbl input
fmov.x %fp0,FP_SRC(%a6)
lea FP_SRC(%a6),%a0
bsr.l tag # fetch operand type
mov.b %d0,STAG(%a6)
mov.b %d0,%d1
andi.l &0x00ff00ff,USER_FPSR(%a6)
clr.l %d0
mov.b FPCR_MODE(%a6),%d0 # pass rnd mode,prec
mov.b %d1,STAG(%a6)
tst.b %d1
bne.b _L20_2d
bsr.l ssincos # operand is a NORM
bra.b _L20_6d
_L20_2d:
cmpi.b %d1,&ZERO # is operand a ZERO?
bne.b _L20_3d # no
bsr.l ssincosz # yes
bra.b _L20_6d
_L20_3d:
cmpi.b %d1,&INF # is operand an INF?
bne.b _L20_4d # no
bsr.l ssincosi # yes
bra.b _L20_6d
_L20_4d:
cmpi.b %d1,&QNAN # is operand a QNAN?
bne.b _L20_5d # no
bsr.l ssincosqnan # yes
bra.b _L20_6d
_L20_5d:
bsr.l ssincosd # operand is a DENORM
_L20_6d:
#
# Result is now in FP0
#
movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
fmovm.l USER_FPCR(%a6),%fpcr,%fpsr # restore ctrl regs
fmovm.x &0x03,-(%sp) # store off fp0/fp1
fmovm.x (%sp)+,&0x40 # fp0 now in fp1
fmovm.x (%sp)+,&0x80 # fp1 now in fp0
unlk %a6
rts
global _fsincosx_
_fsincosx_:
link %a6,&-LOCAL_SIZE
movm.l &0x0303,EXC_DREGS(%a6) # save d0-d1/a0-a1
fmovm.l %fpcr,%fpsr,USER_FPCR(%a6) # save ctrl regs
fmovm.x &0xc0,EXC_FP0(%a6) # save fp0/fp1
fmov.l &0x0,%fpcr # zero FPCR
#
# copy, convert, and tag input argument
#
lea FP_SRC(%a6),%a0
mov.l 0x8+0x0(%a6),0x0(%a0) # load ext input
mov.l 0x8+0x4(%a6),0x4(%a0)
mov.l 0x8+0x8(%a6),0x8(%a0)
bsr.l tag # fetch operand type
mov.b %d0,STAG(%a6)
mov.b %d0,%d1
andi.l &0x00ff00ff,USER_FPSR(%a6)
clr.l %d0
mov.b FPCR_MODE(%a6),%d0 # pass rnd mode,prec
tst.b %d1
bne.b _L20_2x
bsr.l ssincos # operand is a NORM
bra.b _L20_6x
_L20_2x:
cmpi.b %d1,&ZERO # is operand a ZERO?
bne.b _L20_3x # no
bsr.l ssincosz # yes
bra.b _L20_6x
_L20_3x:
cmpi.b %d1,&INF # is operand an INF?
bne.b _L20_4x # no
bsr.l ssincosi # yes
bra.b _L20_6x
_L20_4x:
cmpi.b %d1,&QNAN # is operand a QNAN?
bne.b _L20_5x # no
bsr.l ssincosqnan # yes
bra.b _L20_6x
_L20_5x:
bsr.l ssincosd # operand is a DENORM
_L20_6x:
#
# Result is now in FP0
#
movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
fmovm.l USER_FPCR(%a6),%fpcr,%fpsr # restore ctrl regs
fmovm.x &0x03,-(%sp) # store off fp0/fp1
fmovm.x (%sp)+,&0x40 # fp0 now in fp1
fmovm.x (%sp)+,&0x80 # fp1 now in fp0
unlk %a6
rts
#########################################################################
# DYADIC TEMPLATE #
#########################################################################
global _frems_
_frems_:
link %a6,&-LOCAL_SIZE
movm.l &0x0303,EXC_DREGS(%a6) # save d0-d1/a0-a1
fmovm.l %fpcr,%fpsr,USER_FPCR(%a6) # save ctrl regs
fmovm.x &0xc0,EXC_FP0(%a6) # save fp0/fp1
fmov.l &0x0,%fpcr # zero FPCR
#
# copy, convert, and tag input argument
#
fmov.s 0x8(%a6),%fp0 # load sgl dst
fmov.x %fp0,FP_DST(%a6)
lea FP_DST(%a6),%a0
bsr.l tag # fetch operand type
mov.b %d0,DTAG(%a6)
fmov.s 0xc(%a6),%fp0 # load sgl src
fmov.x %fp0,FP_SRC(%a6)
lea FP_SRC(%a6),%a0
bsr.l tag # fetch operand type
mov.b %d0,STAG(%a6)
mov.l %d0,%d1
andi.l &0x00ff00ff,USER_FPSR(%a6)
clr.l %d0
mov.b FPCR_MODE(%a6),%d0 # pass rnd mode,prec
lea FP_SRC(%a6),%a0 # pass ptr to src
lea FP_DST(%a6),%a1 # pass ptr to dst
tst.b %d1
bne.b _L21_2s
bsr.l srem_snorm # operand is a NORM
bra.b _L21_6s
_L21_2s:
cmpi.b %d1,&ZERO # is operand a ZERO?
bne.b _L21_3s # no
bsr.l srem_szero # yes
bra.b _L21_6s
_L21_3s:
cmpi.b %d1,&INF # is operand an INF?
bne.b _L21_4s # no
bsr.l srem_sinf # yes
bra.b _L21_6s
_L21_4s:
cmpi.b %d1,&QNAN # is operand a QNAN?
bne.b _L21_5s # no
bsr.l sop_sqnan # yes
bra.b _L21_6s
_L21_5s:
bsr.l srem_sdnrm # operand is a DENORM
_L21_6s:
#
# Result is now in FP0
#
movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
fmovm.l USER_FPCR(%a6),%fpcr,%fpsr # restore ctrl regs
fmovm.x EXC_FP1(%a6),&0x40 # restore fp1
unlk %a6
rts
global _fremd_
_fremd_:
link %a6,&-LOCAL_SIZE
movm.l &0x0303,EXC_DREGS(%a6) # save d0-d1/a0-a1
fmovm.l %fpcr,%fpsr,USER_FPCR(%a6) # save ctrl regs
fmovm.x &0xc0,EXC_FP0(%a6) # save fp0/fp1
fmov.l &0x0,%fpcr # zero FPCR
#
# copy, convert, and tag input argument
#
fmov.d 0x8(%a6),%fp0 # load dbl dst
fmov.x %fp0,FP_DST(%a6)
lea FP_DST(%a6),%a0
bsr.l tag # fetch operand type
mov.b %d0,DTAG(%a6)
fmov.d 0x10(%a6),%fp0 # load dbl src
fmov.x %fp0,FP_SRC(%a6)
lea FP_SRC(%a6),%a0
bsr.l tag # fetch operand type
mov.b %d0,STAG(%a6)
mov.l %d0,%d1
andi.l &0x00ff00ff,USER_FPSR(%a6)
clr.l %d0
mov.b FPCR_MODE(%a6),%d0 # pass rnd mode,prec
lea FP_SRC(%a6),%a0 # pass ptr to src
lea FP_DST(%a6),%a1 # pass ptr to dst
tst.b %d1
bne.b _L21_2d
bsr.l srem_snorm # operand is a NORM
bra.b _L21_6d
_L21_2d:
cmpi.b %d1,&ZERO # is operand a ZERO?
bne.b _L21_3d # no
bsr.l srem_szero # yes
bra.b _L21_6d
_L21_3d:
cmpi.b %d1,&INF # is operand an INF?
bne.b _L21_4d # no
bsr.l srem_sinf # yes
bra.b _L21_6d
_L21_4d:
cmpi.b %d1,&QNAN # is operand a QNAN?
bne.b _L21_5d # no
bsr.l sop_sqnan # yes
bra.b _L21_6d
_L21_5d:
bsr.l srem_sdnrm # operand is a DENORM
_L21_6d:
#
# Result is now in FP0
#
movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
fmovm.l USER_FPCR(%a6),%fpcr,%fpsr # restore ctrl regs
fmovm.x EXC_FP1(%a6),&0x40 # restore fp1
unlk %a6
rts
global _fremx_
_fremx_:
link %a6,&-LOCAL_SIZE
movm.l &0x0303,EXC_DREGS(%a6) # save d0-d1/a0-a1
fmovm.l %fpcr,%fpsr,USER_FPCR(%a6) # save ctrl regs
fmovm.x &0xc0,EXC_FP0(%a6) # save fp0/fp1
fmov.l &0x0,%fpcr # zero FPCR
#
# copy, convert, and tag input argument
#
lea FP_DST(%a6),%a0
mov.l 0x8+0x0(%a6),0x0(%a0) # load ext dst
mov.l 0x8+0x4(%a6),0x4(%a0)
mov.l 0x8+0x8(%a6),0x8(%a0)
bsr.l tag # fetch operand type
mov.b %d0,DTAG(%a6)
lea FP_SRC(%a6),%a0
mov.l 0x14+0x0(%a6),0x0(%a0) # load ext src
mov.l 0x14+0x4(%a6),0x4(%a0)
mov.l 0x14+0x8(%a6),0x8(%a0)
bsr.l tag # fetch operand type
mov.b %d0,STAG(%a6)
mov.l %d0,%d1
andi.l &0x00ff00ff,USER_FPSR(%a6)
clr.l %d0
mov.b FPCR_MODE(%a6),%d0 # pass rnd mode,prec
lea FP_SRC(%a6),%a0 # pass ptr to src
lea FP_DST(%a6),%a1 # pass ptr to dst
tst.b %d1
bne.b _L21_2x
bsr.l srem_snorm # operand is a NORM
bra.b _L21_6x
_L21_2x:
cmpi.b %d1,&ZERO # is operand a ZERO?
bne.b _L21_3x # no
bsr.l srem_szero # yes
bra.b _L21_6x
_L21_3x:
cmpi.b %d1,&INF # is operand an INF?
bne.b _L21_4x # no
bsr.l srem_sinf # yes
bra.b _L21_6x
_L21_4x:
cmpi.b %d1,&QNAN # is operand a QNAN?
bne.b _L21_5x # no
bsr.l sop_sqnan # yes
bra.b _L21_6x
_L21_5x:
bsr.l srem_sdnrm # operand is a DENORM
_L21_6x:
#
# Result is now in FP0
#
movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
fmovm.l USER_FPCR(%a6),%fpcr,%fpsr # restore ctrl regs
fmovm.x EXC_FP1(%a6),&0x40 # restore fp1
unlk %a6
rts
#########################################################################
# DYADIC TEMPLATE #
#########################################################################
global _fmods_
_fmods_:
link %a6,&-LOCAL_SIZE
movm.l &0x0303,EXC_DREGS(%a6) # save d0-d1/a0-a1
fmovm.l %fpcr,%fpsr,USER_FPCR(%a6) # save ctrl regs
fmovm.x &0xc0,EXC_FP0(%a6) # save fp0/fp1
fmov.l &0x0,%fpcr # zero FPCR
#
# copy, convert, and tag input argument
#
fmov.s 0x8(%a6),%fp0 # load sgl dst
fmov.x %fp0,FP_DST(%a6)
lea FP_DST(%a6),%a0
bsr.l tag # fetch operand type
mov.b %d0,DTAG(%a6)
fmov.s 0xc(%a6),%fp0 # load sgl src
fmov.x %fp0,FP_SRC(%a6)
lea FP_SRC(%a6),%a0
bsr.l tag # fetch operand type
mov.b %d0,STAG(%a6)
mov.l %d0,%d1
andi.l &0x00ff00ff,USER_FPSR(%a6)
clr.l %d0
mov.b FPCR_MODE(%a6),%d0 # pass rnd mode,prec
lea FP_SRC(%a6),%a0 # pass ptr to src
lea FP_DST(%a6),%a1 # pass ptr to dst
tst.b %d1
bne.b _L22_2s
bsr.l smod_snorm # operand is a NORM
bra.b _L22_6s
_L22_2s:
cmpi.b %d1,&ZERO # is operand a ZERO?
bne.b _L22_3s # no
bsr.l smod_szero # yes
bra.b _L22_6s
_L22_3s:
cmpi.b %d1,&INF # is operand an INF?
bne.b _L22_4s # no
bsr.l smod_sinf # yes
bra.b _L22_6s
_L22_4s:
cmpi.b %d1,&QNAN # is operand a QNAN?
bne.b _L22_5s # no
bsr.l sop_sqnan # yes
bra.b _L22_6s
_L22_5s:
bsr.l smod_sdnrm # operand is a DENORM
_L22_6s:
#
# Result is now in FP0
#
movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
fmovm.l USER_FPCR(%a6),%fpcr,%fpsr # restore ctrl regs
fmovm.x EXC_FP1(%a6),&0x40 # restore fp1
unlk %a6
rts
global _fmodd_
_fmodd_:
link %a6,&-LOCAL_SIZE
movm.l &0x0303,EXC_DREGS(%a6) # save d0-d1/a0-a1
fmovm.l %fpcr,%fpsr,USER_FPCR(%a6) # save ctrl regs
fmovm.x &0xc0,EXC_FP0(%a6) # save fp0/fp1
fmov.l &0x0,%fpcr # zero FPCR
#
# copy, convert, and tag input argument
#
fmov.d 0x8(%a6),%fp0 # load dbl dst
fmov.x %fp0,FP_DST(%a6)
lea FP_DST(%a6),%a0
bsr.l tag # fetch operand type
mov.b %d0,DTAG(%a6)
fmov.d 0x10(%a6),%fp0 # load dbl src
fmov.x %fp0,FP_SRC(%a6)
lea FP_SRC(%a6),%a0
bsr.l tag # fetch operand type
mov.b %d0,STAG(%a6)
mov.l %d0,%d1
andi.l &0x00ff00ff,USER_FPSR(%a6)
clr.l %d0
mov.b FPCR_MODE(%a6),%d0 # pass rnd mode,prec
lea FP_SRC(%a6),%a0 # pass ptr to src
lea FP_DST(%a6),%a1 # pass ptr to dst
tst.b %d1
bne.b _L22_2d
bsr.l smod_snorm # operand is a NORM
bra.b _L22_6d
_L22_2d:
cmpi.b %d1,&ZERO # is operand a ZERO?
bne.b _L22_3d # no
bsr.l smod_szero # yes
bra.b _L22_6d
_L22_3d:
cmpi.b %d1,&INF # is operand an INF?
bne.b _L22_4d # no
bsr.l smod_sinf # yes
bra.b _L22_6d
_L22_4d:
cmpi.b %d1,&QNAN # is operand a QNAN?
bne.b _L22_5d # no
bsr.l sop_sqnan # yes
bra.b _L22_6d
_L22_5d:
bsr.l smod_sdnrm # operand is a DENORM
_L22_6d:
#
# Result is now in FP0
#
movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
fmovm.l USER_FPCR(%a6),%fpcr,%fpsr # restore ctrl regs
fmovm.x EXC_FP1(%a6),&0x40 # restore fp1
unlk %a6
rts
global _fmodx_
_fmodx_:
link %a6,&-LOCAL_SIZE
movm.l &0x0303,EXC_DREGS(%a6) # save d0-d1/a0-a1
fmovm.l %fpcr,%fpsr,USER_FPCR(%a6) # save ctrl regs
fmovm.x &0xc0,EXC_FP0(%a6) # save fp0/fp1
fmov.l &0x0,%fpcr # zero FPCR
#
# copy, convert, and tag input argument
#
lea FP_DST(%a6),%a0
mov.l 0x8+0x0(%a6),0x0(%a0) # load ext dst
mov.l 0x8+0x4(%a6),0x4(%a0)
mov.l 0x8+0x8(%a6),0x8(%a0)
bsr.l tag # fetch operand type
mov.b %d0,DTAG(%a6)
lea FP_SRC(%a6),%a0
mov.l 0x14+0x0(%a6),0x0(%a0) # load ext src
mov.l 0x14+0x4(%a6),0x4(%a0)
mov.l 0x14+0x8(%a6),0x8(%a0)
bsr.l tag # fetch operand type
mov.b %d0,STAG(%a6)
mov.l %d0,%d1
andi.l &0x00ff00ff,USER_FPSR(%a6)
clr.l %d0
mov.b FPCR_MODE(%a6),%d0 # pass rnd mode,prec
lea FP_SRC(%a6),%a0 # pass ptr to src
lea FP_DST(%a6),%a1 # pass ptr to dst
tst.b %d1
bne.b _L22_2x
bsr.l smod_snorm # operand is a NORM
bra.b _L22_6x
_L22_2x:
cmpi.b %d1,&ZERO # is operand a ZERO?
bne.b _L22_3x # no
bsr.l smod_szero # yes
bra.b _L22_6x
_L22_3x:
cmpi.b %d1,&INF # is operand an INF?
bne.b _L22_4x # no
bsr.l smod_sinf # yes
bra.b _L22_6x
_L22_4x:
cmpi.b %d1,&QNAN # is operand a QNAN?
bne.b _L22_5x # no
bsr.l sop_sqnan # yes
bra.b _L22_6x
_L22_5x:
bsr.l smod_sdnrm # operand is a DENORM
_L22_6x:
#
# Result is now in FP0
#
movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
fmovm.l USER_FPCR(%a6),%fpcr,%fpsr # restore ctrl regs
fmovm.x EXC_FP1(%a6),&0x40 # restore fp1
unlk %a6
rts
#########################################################################
# DYADIC TEMPLATE #
#########################################################################
global _fscales_
_fscales_:
link %a6,&-LOCAL_SIZE
movm.l &0x0303,EXC_DREGS(%a6) # save d0-d1/a0-a1
fmovm.l %fpcr,%fpsr,USER_FPCR(%a6) # save ctrl regs
fmovm.x &0xc0,EXC_FP0(%a6) # save fp0/fp1
fmov.l &0x0,%fpcr # zero FPCR
#
# copy, convert, and tag input argument
#
fmov.s 0x8(%a6),%fp0 # load sgl dst
fmov.x %fp0,FP_DST(%a6)
lea FP_DST(%a6),%a0
bsr.l tag # fetch operand type
mov.b %d0,DTAG(%a6)
fmov.s 0xc(%a6),%fp0 # load sgl src
fmov.x %fp0,FP_SRC(%a6)
lea FP_SRC(%a6),%a0
bsr.l tag # fetch operand type
mov.b %d0,STAG(%a6)
mov.l %d0,%d1
andi.l &0x00ff00ff,USER_FPSR(%a6)
clr.l %d0
mov.b FPCR_MODE(%a6),%d0 # pass rnd mode,prec
lea FP_SRC(%a6),%a0 # pass ptr to src
lea FP_DST(%a6),%a1 # pass ptr to dst
tst.b %d1
bne.b _L23_2s
bsr.l sscale_snorm # operand is a NORM
bra.b _L23_6s
_L23_2s:
cmpi.b %d1,&ZERO # is operand a ZERO?
bne.b _L23_3s # no
bsr.l sscale_szero # yes
bra.b _L23_6s
_L23_3s:
cmpi.b %d1,&INF # is operand an INF?
bne.b _L23_4s # no
bsr.l sscale_sinf # yes
bra.b _L23_6s
_L23_4s:
cmpi.b %d1,&QNAN # is operand a QNAN?
bne.b _L23_5s # no
bsr.l sop_sqnan # yes
bra.b _L23_6s
_L23_5s:
bsr.l sscale_sdnrm # operand is a DENORM
_L23_6s:
#
# Result is now in FP0
#
movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
fmovm.l USER_FPCR(%a6),%fpcr,%fpsr # restore ctrl regs
fmovm.x EXC_FP1(%a6),&0x40 # restore fp1
unlk %a6
rts
global _fscaled_
_fscaled_:
link %a6,&-LOCAL_SIZE
movm.l &0x0303,EXC_DREGS(%a6) # save d0-d1/a0-a1
fmovm.l %fpcr,%fpsr,USER_FPCR(%a6) # save ctrl regs
fmovm.x &0xc0,EXC_FP0(%a6) # save fp0/fp1
fmov.l &0x0,%fpcr # zero FPCR
#
# copy, convert, and tag input argument
#
fmov.d 0x8(%a6),%fp0 # load dbl dst
fmov.x %fp0,FP_DST(%a6)
lea FP_DST(%a6),%a0
bsr.l tag # fetch operand type
mov.b %d0,DTAG(%a6)
fmov.d 0x10(%a6),%fp0 # load dbl src
fmov.x %fp0,FP_SRC(%a6)
lea FP_SRC(%a6),%a0
bsr.l tag # fetch operand type
mov.b %d0,STAG(%a6)
mov.l %d0,%d1
andi.l &0x00ff00ff,USER_FPSR(%a6)
clr.l %d0
mov.b FPCR_MODE(%a6),%d0 # pass rnd mode,prec
lea FP_SRC(%a6),%a0 # pass ptr to src
lea FP_DST(%a6),%a1 # pass ptr to dst
tst.b %d1
bne.b _L23_2d
bsr.l sscale_snorm # operand is a NORM
bra.b _L23_6d
_L23_2d:
cmpi.b %d1,&ZERO # is operand a ZERO?
bne.b _L23_3d # no
bsr.l sscale_szero # yes
bra.b _L23_6d
_L23_3d:
cmpi.b %d1,&INF # is operand an INF?
bne.b _L23_4d # no
bsr.l sscale_sinf # yes
bra.b _L23_6d
_L23_4d:
cmpi.b %d1,&QNAN # is operand a QNAN?
bne.b _L23_5d # no
bsr.l sop_sqnan # yes
bra.b _L23_6d
_L23_5d:
bsr.l sscale_sdnrm # operand is a DENORM
_L23_6d:
#
# Result is now in FP0
#
movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
fmovm.l USER_FPCR(%a6),%fpcr,%fpsr # restore ctrl regs
fmovm.x EXC_FP1(%a6),&0x40 # restore fp1
unlk %a6
rts
global _fscalex_
_fscalex_:
link %a6,&-LOCAL_SIZE
movm.l &0x0303,EXC_DREGS(%a6) # save d0-d1/a0-a1
fmovm.l %fpcr,%fpsr,USER_FPCR(%a6) # save ctrl regs
fmovm.x &0xc0,EXC_FP0(%a6) # save fp0/fp1
fmov.l &0x0,%fpcr # zero FPCR
#
# copy, convert, and tag input argument
#
lea FP_DST(%a6),%a0
mov.l 0x8+0x0(%a6),0x0(%a0) # load ext dst
mov.l 0x8+0x4(%a6),0x4(%a0)
mov.l 0x8+0x8(%a6),0x8(%a0)
bsr.l tag # fetch operand type
mov.b %d0,DTAG(%a6)
lea FP_SRC(%a6),%a0
mov.l 0x14+0x0(%a6),0x0(%a0) # load ext src
mov.l 0x14+0x4(%a6),0x4(%a0)
mov.l 0x14+0x8(%a6),0x8(%a0)
bsr.l tag # fetch operand type
mov.b %d0,STAG(%a6)
mov.l %d0,%d1
andi.l &0x00ff00ff,USER_FPSR(%a6)
clr.l %d0
mov.b FPCR_MODE(%a6),%d0 # pass rnd mode,prec
lea FP_SRC(%a6),%a0 # pass ptr to src
lea FP_DST(%a6),%a1 # pass ptr to dst
tst.b %d1
bne.b _L23_2x
bsr.l sscale_snorm # operand is a NORM
bra.b _L23_6x
_L23_2x:
cmpi.b %d1,&ZERO # is operand a ZERO?
bne.b _L23_3x # no
bsr.l sscale_szero # yes
bra.b _L23_6x
_L23_3x:
cmpi.b %d1,&INF # is operand an INF?
bne.b _L23_4x # no
bsr.l sscale_sinf # yes
bra.b _L23_6x
_L23_4x:
cmpi.b %d1,&QNAN # is operand a QNAN?
bne.b _L23_5x # no
bsr.l sop_sqnan # yes
bra.b _L23_6x
_L23_5x:
bsr.l sscale_sdnrm # operand is a DENORM
_L23_6x:
#
# Result is now in FP0
#
movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
fmovm.l USER_FPCR(%a6),%fpcr,%fpsr # restore ctrl regs
fmovm.x EXC_FP1(%a6),&0x40 # restore fp1
unlk %a6
rts
#########################################################################
# ssin(): computes the sine of a normalized input #
# ssind(): computes the sine of a denormalized input #
# scos(): computes the cosine of a normalized input #
# scosd(): computes the cosine of a denormalized input #
# ssincos(): computes the sine and cosine of a normalized input #
# ssincosd(): computes the sine and cosine of a denormalized input #
# #
# INPUT *************************************************************** #
# a0 = pointer to extended precision input #
# d0 = round precision,mode #
# #
# OUTPUT ************************************************************** #
# fp0 = sin(X) or cos(X) #
# #
# For ssincos(X): #
# fp0 = sin(X) #
# fp1 = cos(X) #
# #
# ACCURACY and MONOTONICITY ******************************************* #
# The returned result is within 1 ulp in 64 significant bit, i.e. #
# within 0.5001 ulp to 53 bits if the result is subsequently #
# rounded to double precision. The result is provably monotonic #
# in double precision. #
# #
# ALGORITHM *********************************************************** #
# #
# SIN and COS: #
# 1. If SIN is invoked, set AdjN := 0; otherwise, set AdjN := 1. #
# #
# 2. If |X| >= 15Pi or |X| < 2**(-40), go to 7. #
# #
# 3. Decompose X as X = N(Pi/2) + r where |r| <= Pi/4. Let #
# k = N mod 4, so in particular, k = 0,1,2,or 3. #
# Overwrite k by k := k + AdjN. #
# #
# 4. If k is even, go to 6. #
# #
# 5. (k is odd) Set j := (k-1)/2, sgn := (-1)**j. #
# Return sgn*cos(r) where cos(r) is approximated by an #
# even polynomial in r, 1 + r*r*(B1+s*(B2+ ... + s*B8)), #
# s = r*r. #
# Exit. #
# #
# 6. (k is even) Set j := k/2, sgn := (-1)**j. Return sgn*sin(r) #
# where sin(r) is approximated by an odd polynomial in r #
# r + r*s*(A1+s*(A2+ ... + s*A7)), s = r*r. #
# Exit. #
# #
# 7. If |X| > 1, go to 9. #
# #
# 8. (|X|<2**(-40)) If SIN is invoked, return X; #
# otherwise return 1. #
# #
# 9. Overwrite X by X := X rem 2Pi. Now that |X| <= Pi, #
# go back to 3. #
# #
# SINCOS: #
# 1. If |X| >= 15Pi or |X| < 2**(-40), go to 6. #
# #
# 2. Decompose X as X = N(Pi/2) + r where |r| <= Pi/4. Let #
# k = N mod 4, so in particular, k = 0,1,2,or 3. #
# #
# 3. If k is even, go to 5. #
# #
# 4. (k is odd) Set j1 := (k-1)/2, j2 := j1 (EOR) (k mod 2), ie. #
# j1 exclusive or with the l.s.b. of k. #
# sgn1 := (-1)**j1, sgn2 := (-1)**j2. #
# SIN(X) = sgn1 * cos(r) and COS(X) = sgn2*sin(r) where #
# sin(r) and cos(r) are computed as odd and even #
# polynomials in r, respectively. Exit #
# #
# 5. (k is even) Set j1 := k/2, sgn1 := (-1)**j1. #
# SIN(X) = sgn1 * sin(r) and COS(X) = sgn1*cos(r) where #
# sin(r) and cos(r) are computed as odd and even #
# polynomials in r, respectively. Exit #
# #
# 6. If |X| > 1, go to 8. #
# #
# 7. (|X|<2**(-40)) SIN(X) = X and COS(X) = 1. Exit. #
# #
# 8. Overwrite X by X := X rem 2Pi. Now that |X| <= Pi, #
# go back to 2. #
# #
#########################################################################
SINA7: long 0xBD6AAA77,0xCCC994F5
SINA6: long 0x3DE61209,0x7AAE8DA1
SINA5: long 0xBE5AE645,0x2A118AE4
SINA4: long 0x3EC71DE3,0xA5341531
SINA3: long 0xBF2A01A0,0x1A018B59,0x00000000,0x00000000
SINA2: long 0x3FF80000,0x88888888,0x888859AF,0x00000000
SINA1: long 0xBFFC0000,0xAAAAAAAA,0xAAAAAA99,0x00000000
COSB8: long 0x3D2AC4D0,0xD6011EE3
COSB7: long 0xBDA9396F,0x9F45AC19
COSB6: long 0x3E21EED9,0x0612C972
COSB5: long 0xBE927E4F,0xB79D9FCF
COSB4: long 0x3EFA01A0,0x1A01D423,0x00000000,0x00000000
COSB3: long 0xBFF50000,0xB60B60B6,0x0B61D438,0x00000000
COSB2: long 0x3FFA0000,0xAAAAAAAA,0xAAAAAB5E
COSB1: long 0xBF000000
set INARG,FP_SCR0
set X,FP_SCR0
# set XDCARE,X+2
set XFRAC,X+4
set RPRIME,FP_SCR0
set SPRIME,FP_SCR1
set POSNEG1,L_SCR1
set TWOTO63,L_SCR1
set ENDFLAG,L_SCR2
set INT,L_SCR2
set ADJN,L_SCR3
############################################
global ssin
ssin:
mov.l &0,ADJN(%a6) # yes; SET ADJN TO 0
bra.b SINBGN
############################################
global scos
scos:
mov.l &1,ADJN(%a6) # yes; SET ADJN TO 1
############################################
SINBGN:
#--SAVE FPCR, FP1. CHECK IF |X| IS TOO SMALL OR LARGE
fmov.x (%a0),%fp0 # LOAD INPUT
fmov.x %fp0,X(%a6) # save input at X
# "COMPACTIFY" X
mov.l (%a0),%d1 # put exp in hi word
mov.w 4(%a0),%d1 # fetch hi(man)
and.l &0x7FFFFFFF,%d1 # strip sign
cmpi.l %d1,&0x3FD78000 # is |X| >= 2**(-40)?
bge.b SOK1 # no
bra.w SINSM # yes; input is very small
SOK1:
cmp.l %d1,&0x4004BC7E # is |X| < 15 PI?
blt.b SINMAIN # no
bra.w SREDUCEX # yes; input is very large
#--THIS IS THE USUAL CASE, |X| <= 15 PI.
#--THE ARGUMENT REDUCTION IS DONE BY TABLE LOOK UP.
SINMAIN:
fmov.x %fp0,%fp1
fmul.d TWOBYPI(%pc),%fp1 # X*2/PI
lea PITBL+0x200(%pc),%a1 # TABLE OF N*PI/2, N = -32,...,32
fmov.l %fp1,INT(%a6) # CONVERT TO INTEGER
mov.l INT(%a6),%d1 # make a copy of N
asl.l &4,%d1 # N *= 16
add.l %d1,%a1 # tbl_addr = a1 + (N*16)
# A1 IS THE ADDRESS OF N*PIBY2
# ...WHICH IS IN TWO PIECES Y1 & Y2
fsub.x (%a1)+,%fp0 # X-Y1
fsub.s (%a1),%fp0 # fp0 = R = (X-Y1)-Y2
SINCONT:
#--continuation from REDUCEX
#--GET N+ADJN AND SEE IF SIN(R) OR COS(R) IS NEEDED
mov.l INT(%a6),%d1
add.l ADJN(%a6),%d1 # SEE IF D0 IS ODD OR EVEN
ror.l &1,%d1 # D0 WAS ODD IFF D0 IS NEGATIVE
cmp.l %d1,&0
blt.w COSPOLY
#--LET J BE THE LEAST SIG. BIT OF D0, LET SGN := (-1)**J.
#--THEN WE RETURN SGN*SIN(R). SGN*SIN(R) IS COMPUTED BY
#--R' + R'*S*(A1 + S(A2 + S(A3 + S(A4 + ... + SA7)))), WHERE
#--R' = SGN*R, S=R*R. THIS CAN BE REWRITTEN AS
#--R' + R'*S*( [A1+T(A3+T(A5+TA7))] + [S(A2+T(A4+TA6))])
#--WHERE T=S*S.
#--NOTE THAT A3 THROUGH A7 ARE STORED IN DOUBLE PRECISION
#--WHILE A1 AND A2 ARE IN DOUBLE-EXTENDED FORMAT.
SINPOLY:
fmovm.x &0x0c,-(%sp) # save fp2/fp3
fmov.x %fp0,X(%a6) # X IS R
fmul.x %fp0,%fp0 # FP0 IS S
fmov.d SINA7(%pc),%fp3
fmov.d SINA6(%pc),%fp2
fmov.x %fp0,%fp1
fmul.x %fp1,%fp1 # FP1 IS T
ror.l &1,%d1
and.l &0x80000000,%d1
# ...LEAST SIG. BIT OF D0 IN SIGN POSITION
eor.l %d1,X(%a6) # X IS NOW R'= SGN*R
fmul.x %fp1,%fp3 # TA7
fmul.x %fp1,%fp2 # TA6
fadd.d SINA5(%pc),%fp3 # A5+TA7
fadd.d SINA4(%pc),%fp2 # A4+TA6
fmul.x %fp1,%fp3 # T(A5+TA7)
fmul.x %fp1,%fp2 # T(A4+TA6)
fadd.d SINA3(%pc),%fp3 # A3+T(A5+TA7)
fadd.x SINA2(%pc),%fp2 # A2+T(A4+TA6)
fmul.x %fp3,%fp1 # T(A3+T(A5+TA7))
fmul.x %fp0,%fp2 # S(A2+T(A4+TA6))
fadd.x SINA1(%pc),%fp1 # A1+T(A3+T(A5+TA7))
fmul.x X(%a6),%fp0 # R'*S
fadd.x %fp2,%fp1 # [A1+T(A3+T(A5+TA7))]+[S(A2+T(A4+TA6))]
fmul.x %fp1,%fp0 # SIN(R')-R'
fmovm.x (%sp)+,&0x30 # restore fp2/fp3
fmov.l %d0,%fpcr # restore users round mode,prec
fadd.x X(%a6),%fp0 # last inst - possible exception set
bra t_inx2
#--LET J BE THE LEAST SIG. BIT OF D0, LET SGN := (-1)**J.
#--THEN WE RETURN SGN*COS(R). SGN*COS(R) IS COMPUTED BY
#--SGN + S'*(B1 + S(B2 + S(B3 + S(B4 + ... + SB8)))), WHERE
#--S=R*R AND S'=SGN*S. THIS CAN BE REWRITTEN AS
#--SGN + S'*([B1+T(B3+T(B5+TB7))] + [S(B2+T(B4+T(B6+TB8)))])
#--WHERE T=S*S.
#--NOTE THAT B4 THROUGH B8 ARE STORED IN DOUBLE PRECISION
#--WHILE B2 AND B3 ARE IN DOUBLE-EXTENDED FORMAT, B1 IS -1/2
#--AND IS THEREFORE STORED AS SINGLE PRECISION.
COSPOLY:
fmovm.x &0x0c,-(%sp) # save fp2/fp3
fmul.x %fp0,%fp0 # FP0 IS S
fmov.d COSB8(%pc),%fp2
fmov.d COSB7(%pc),%fp3
fmov.x %fp0,%fp1
fmul.x %fp1,%fp1 # FP1 IS T
fmov.x %fp0,X(%a6) # X IS S
ror.l &1,%d1
and.l &0x80000000,%d1
# ...LEAST SIG. BIT OF D0 IN SIGN POSITION
fmul.x %fp1,%fp2 # TB8
eor.l %d1,X(%a6) # X IS NOW S'= SGN*S
and.l &0x80000000,%d1
fmul.x %fp1,%fp3 # TB7
or.l &0x3F800000,%d1 # D0 IS SGN IN SINGLE
mov.l %d1,POSNEG1(%a6)
fadd.d COSB6(%pc),%fp2 # B6+TB8
fadd.d COSB5(%pc),%fp3 # B5+TB7
fmul.x %fp1,%fp2 # T(B6+TB8)
fmul.x %fp1,%fp3 # T(B5+TB7)
fadd.d COSB4(%pc),%fp2 # B4+T(B6+TB8)
fadd.x COSB3(%pc),%fp3 # B3+T(B5+TB7)
fmul.x %fp1,%fp2 # T(B4+T(B6+TB8))
fmul.x %fp3,%fp1 # T(B3+T(B5+TB7))
fadd.x COSB2(%pc),%fp2 # B2+T(B4+T(B6+TB8))
fadd.s COSB1(%pc),%fp1 # B1+T(B3+T(B5+TB7))
fmul.x %fp2,%fp0 # S(B2+T(B4+T(B6+TB8)))
fadd.x %fp1,%fp0
fmul.x X(%a6),%fp0
fmovm.x (%sp)+,&0x30 # restore fp2/fp3
fmov.l %d0,%fpcr # restore users round mode,prec
fadd.s POSNEG1(%a6),%fp0 # last inst - possible exception set
bra t_inx2
##############################################
# SINe: Big OR Small?
#--IF |X| > 15PI, WE USE THE GENERAL ARGUMENT REDUCTION.
#--IF |X| < 2**(-40), RETURN X OR 1.
SINBORS:
cmp.l %d1,&0x3FFF8000
bgt.l SREDUCEX
SINSM:
mov.l ADJN(%a6),%d1
cmp.l %d1,&0
bgt.b COSTINY
# here, the operation may underflow iff the precision is sgl or dbl.
# extended denorms are handled through another entry point.
SINTINY:
# mov.w &0x0000,XDCARE(%a6) # JUST IN CASE
fmov.l %d0,%fpcr # restore users round mode,prec
mov.b &FMOV_OP,%d1 # last inst is MOVE
fmov.x X(%a6),%fp0 # last inst - possible exception set
bra t_catch
COSTINY:
fmov.s &0x3F800000,%fp0 # fp0 = 1.0
fmov.l %d0,%fpcr # restore users round mode,prec
fadd.s &0x80800000,%fp0 # last inst - possible exception set
bra t_pinx2
################################################
global ssind
#--SIN(X) = X FOR DENORMALIZED X
ssind:
bra t_extdnrm
############################################
global scosd
#--COS(X) = 1 FOR DENORMALIZED X
scosd:
fmov.s &0x3F800000,%fp0 # fp0 = 1.0
bra t_pinx2
##################################################
global ssincos
ssincos:
#--SET ADJN TO 4
mov.l &4,ADJN(%a6)
fmov.x (%a0),%fp0 # LOAD INPUT
fmov.x %fp0,X(%a6)
mov.l (%a0),%d1
mov.w 4(%a0),%d1
and.l &0x7FFFFFFF,%d1 # COMPACTIFY X
cmp.l %d1,&0x3FD78000 # |X| >= 2**(-40)?
bge.b SCOK1
bra.w SCSM
SCOK1:
cmp.l %d1,&0x4004BC7E # |X| < 15 PI?
blt.b SCMAIN
bra.w SREDUCEX
#--THIS IS THE USUAL CASE, |X| <= 15 PI.
#--THE ARGUMENT REDUCTION IS DONE BY TABLE LOOK UP.
SCMAIN:
fmov.x %fp0,%fp1
fmul.d TWOBYPI(%pc),%fp1 # X*2/PI
lea PITBL+0x200(%pc),%a1 # TABLE OF N*PI/2, N = -32,...,32
fmov.l %fp1,INT(%a6) # CONVERT TO INTEGER
mov.l INT(%a6),%d1
asl.l &4,%d1
add.l %d1,%a1 # ADDRESS OF N*PIBY2, IN Y1, Y2
fsub.x (%a1)+,%fp0 # X-Y1
fsub.s (%a1),%fp0 # FP0 IS R = (X-Y1)-Y2
SCCONT:
#--continuation point from REDUCEX
mov.l INT(%a6),%d1
ror.l &1,%d1
cmp.l %d1,&0 # D0 < 0 IFF N IS ODD
bge.w NEVEN
SNODD:
#--REGISTERS SAVED SO FAR: D0, A0, FP2.
fmovm.x &0x04,-(%sp) # save fp2
fmov.x %fp0,RPRIME(%a6)
fmul.x %fp0,%fp0 # FP0 IS S = R*R
fmov.d SINA7(%pc),%fp1 # A7
fmov.d COSB8(%pc),%fp2 # B8
fmul.x %fp0,%fp1 # SA7
fmul.x %fp0,%fp2 # SB8
mov.l %d2,-(%sp)
mov.l %d1,%d2
ror.l &1,%d2
and.l &0x80000000,%d2
eor.l %d1,%d2
and.l &0x80000000,%d2
fadd.d SINA6(%pc),%fp1 # A6+SA7
fadd.d COSB7(%pc),%fp2 # B7+SB8
fmul.x %fp0,%fp1 # S(A6+SA7)
eor.l %d2,RPRIME(%a6)
mov.l (%sp)+,%d2
fmul.x %fp0,%fp2 # S(B7+SB8)
ror.l &1,%d1
and.l &0x80000000,%d1
mov.l &0x3F800000,POSNEG1(%a6)
eor.l %d1,POSNEG1(%a6)
fadd.d SINA5(%pc),%fp1 # A5+S(A6+SA7)
fadd.d COSB6(%pc),%fp2 # B6+S(B7+SB8)
fmul.x %fp0,%fp1 # S(A5+S(A6+SA7))
fmul.x %fp0,%fp2 # S(B6+S(B7+SB8))
fmov.x %fp0,SPRIME(%a6)
fadd.d SINA4(%pc),%fp1 # A4+S(A5+S(A6+SA7))
eor.l %d1,SPRIME(%a6)
fadd.d COSB5(%pc),%fp2 # B5+S(B6+S(B7+SB8))
fmul.x %fp0,%fp1 # S(A4+...)
fmul.x %fp0,%fp2 # S(B5+...)
fadd.d SINA3(%pc),%fp1 # A3+S(A4+...)
fadd.d COSB4(%pc),%fp2 # B4+S(B5+...)
fmul.x %fp0,%fp1 # S(A3+...)
fmul.x %fp0,%fp2 # S(B4+...)
fadd.x SINA2(%pc),%fp1 # A2+S(A3+...)
fadd.x COSB3(%pc),%fp2 # B3+S(B4+...)
fmul.x %fp0,%fp1 # S(A2+...)
fmul.x %fp0,%fp2 # S(B3+...)
fadd.x SINA1(%pc),%fp1 # A1+S(A2+...)
fadd.x COSB2(%pc),%fp2 # B2+S(B3+...)
fmul.x %fp0,%fp1 # S(A1+...)
fmul.x %fp2,%fp0 # S(B2+...)
fmul.x RPRIME(%a6),%fp1 # R'S(A1+...)
fadd.s COSB1(%pc),%fp0 # B1+S(B2...)
fmul.x SPRIME(%a6),%fp0 # S'(B1+S(B2+...))
fmovm.x (%sp)+,&0x20 # restore fp2
fmov.l %d0,%fpcr
fadd.x RPRIME(%a6),%fp1 # COS(X)
bsr sto_cos # store cosine result
fadd.s POSNEG1(%a6),%fp0 # SIN(X)
bra t_inx2
NEVEN:
#--REGISTERS SAVED SO FAR: FP2.
fmovm.x &0x04,-(%sp) # save fp2
fmov.x %fp0,RPRIME(%a6)
fmul.x %fp0,%fp0 # FP0 IS S = R*R
fmov.d COSB8(%pc),%fp1 # B8
fmov.d SINA7(%pc),%fp2 # A7
fmul.x %fp0,%fp1 # SB8
fmov.x %fp0,SPRIME(%a6)
fmul.x %fp0,%fp2 # SA7
ror.l &1,%d1
and.l &0x80000000,%d1
fadd.d COSB7(%pc),%fp1 # B7+SB8
fadd.d SINA6(%pc),%fp2 # A6+SA7
eor.l %d1,RPRIME(%a6)
eor.l %d1,SPRIME(%a6)
fmul.x %fp0,%fp1 # S(B7+SB8)
or.l &0x3F800000,%d1
mov.l %d1,POSNEG1(%a6)
fmul.x %fp0,%fp2 # S(A6+SA7)
fadd.d COSB6(%pc),%fp1 # B6+S(B7+SB8)
fadd.d SINA5(%pc),%fp2 # A5+S(A6+SA7)
fmul.x %fp0,%fp1 # S(B6+S(B7+SB8))
fmul.x %fp0,%fp2 # S(A5+S(A6+SA7))
fadd.d COSB5(%pc),%fp1 # B5+S(B6+S(B7+SB8))
fadd.d SINA4(%pc),%fp2 # A4+S(A5+S(A6+SA7))
fmul.x %fp0,%fp1 # S(B5+...)
fmul.x %fp0,%fp2 # S(A4+...)
fadd.d COSB4(%pc),%fp1 # B4+S(B5+...)
fadd.d SINA3(%pc),%fp2 # A3+S(A4+...)
fmul.x %fp0,%fp1 # S(B4+...)
fmul.x %fp0,%fp2 # S(A3+...)
fadd.x COSB3(%pc),%fp1 # B3+S(B4+...)
fadd.x SINA2(%pc),%fp2 # A2+S(A3+...)
fmul.x %fp0,%fp1 # S(B3+...)
fmul.x %fp0,%fp2 # S(A2+...)
fadd.x COSB2(%pc),%fp1 # B2+S(B3+...)
fadd.x SINA1(%pc),%fp2 # A1+S(A2+...)
fmul.x %fp0,%fp1 # S(B2+...)
fmul.x %fp2,%fp0 # s(a1+...)
fadd.s COSB1(%pc),%fp1 # B1+S(B2...)
fmul.x RPRIME(%a6),%fp0 # R'S(A1+...)
fmul.x SPRIME(%a6),%fp1 # S'(B1+S(B2+...))
fmovm.x (%sp)+,&0x20 # restore fp2
fmov.l %d0,%fpcr
fadd.s POSNEG1(%a6),%fp1 # COS(X)
bsr sto_cos # store cosine result
fadd.x RPRIME(%a6),%fp0 # SIN(X)
bra t_inx2
################################################
SCBORS:
cmp.l %d1,&0x3FFF8000
bgt.w SREDUCEX
################################################
SCSM:
# mov.w &0x0000,XDCARE(%a6)
fmov.s &0x3F800000,%fp1
fmov.l %d0,%fpcr
fsub.s &0x00800000,%fp1
bsr sto_cos # store cosine result
fmov.l %fpcr,%d0 # d0 must have fpcr,too
mov.b &FMOV_OP,%d1 # last inst is MOVE
fmov.x X(%a6),%fp0
bra t_catch
##############################################
global ssincosd
#--SIN AND COS OF X FOR DENORMALIZED X
ssincosd:
mov.l %d0,-(%sp) # save d0
fmov.s &0x3F800000,%fp1
bsr sto_cos # store cosine result
mov.l (%sp)+,%d0 # restore d0
bra t_extdnrm
############################################
#--WHEN REDUCEX IS USED, THE CODE WILL INEVITABLY BE SLOW.
#--THIS REDUCTION METHOD, HOWEVER, IS MUCH FASTER THAN USING
#--THE REMAINDER INSTRUCTION WHICH IS NOW IN SOFTWARE.
SREDUCEX:
fmovm.x &0x3c,-(%sp) # save {fp2-fp5}
mov.l %d2,-(%sp) # save d2
fmov.s &0x00000000,%fp1 # fp1 = 0
#--If compact form of abs(arg) in d0=$7ffeffff, argument is so large that
#--there is a danger of unwanted overflow in first LOOP iteration. In this
#--case, reduce argument by one remainder step to make subsequent reduction
#--safe.
cmp.l %d1,&0x7ffeffff # is arg dangerously large?
bne.b SLOOP # no
# yes; create 2**16383*PI/2
mov.w &0x7ffe,FP_SCR0_EX(%a6)
mov.l &0xc90fdaa2,FP_SCR0_HI(%a6)
clr.l FP_SCR0_LO(%a6)
# create low half of 2**16383*PI/2 at FP_SCR1
mov.w &0x7fdc,FP_SCR1_EX(%a6)
mov.l &0x85a308d3,FP_SCR1_HI(%a6)
clr.l FP_SCR1_LO(%a6)
ftest.x %fp0 # test sign of argument
fblt.w sred_neg
or.b &0x80,FP_SCR0_EX(%a6) # positive arg
or.b &0x80,FP_SCR1_EX(%a6)
sred_neg:
fadd.x FP_SCR0(%a6),%fp0 # high part of reduction is exact
fmov.x %fp0,%fp1 # save high result in fp1
fadd.x FP_SCR1(%a6),%fp0 # low part of reduction
fsub.x %fp0,%fp1 # determine low component of result
fadd.x FP_SCR1(%a6),%fp1 # fp0/fp1 are reduced argument.
#--ON ENTRY, FP0 IS X, ON RETURN, FP0 IS X REM PI/2, |X| <= PI/4.
#--integer quotient will be stored in N
#--Intermeditate remainder is 66-bit long; (R,r) in (FP0,FP1)
SLOOP:
fmov.x %fp0,INARG(%a6) # +-2**K * F, 1 <= F < 2
mov.w INARG(%a6),%d1
mov.l %d1,%a1 # save a copy of D0
and.l &0x00007FFF,%d1
sub.l &0x00003FFF,%d1 # d0 = K
cmp.l %d1,&28
ble.b SLASTLOOP
SCONTLOOP:
sub.l &27,%d1 # d0 = L := K-27
mov.b &0,ENDFLAG(%a6)
bra.b SWORK
SLASTLOOP:
clr.l %d1 # d0 = L := 0
mov.b &1,ENDFLAG(%a6)
SWORK:
#--FIND THE REMAINDER OF (R,r) W.R.T. 2**L * (PI/2). L IS SO CHOSEN
#--THAT INT( X * (2/PI) / 2**(L) ) < 2**29.
#--CREATE 2**(-L) * (2/PI), SIGN(INARG)*2**(63),
#--2**L * (PIby2_1), 2**L * (PIby2_2)
mov.l &0x00003FFE,%d2 # BIASED EXP OF 2/PI
sub.l %d1,%d2 # BIASED EXP OF 2**(-L)*(2/PI)
mov.l &0xA2F9836E,FP_SCR0_HI(%a6)
mov.l &0x4E44152A,FP_SCR0_LO(%a6)
mov.w %d2,FP_SCR0_EX(%a6) # FP_SCR0 = 2**(-L)*(2/PI)
fmov.x %fp0,%fp2
fmul.x FP_SCR0(%a6),%fp2 # fp2 = X * 2**(-L)*(2/PI)
#--WE MUST NOW FIND INT(FP2). SINCE WE NEED THIS VALUE IN
#--FLOATING POINT FORMAT, THE TWO FMOVE'S FMOVE.L FP <--> N
#--WILL BE TOO INEFFICIENT. THE WAY AROUND IT IS THAT
#--(SIGN(INARG)*2**63 + FP2) - SIGN(INARG)*2**63 WILL GIVE
#--US THE DESIRED VALUE IN FLOATING POINT.
mov.l %a1,%d2
swap %d2
and.l &0x80000000,%d2
or.l &0x5F000000,%d2 # d2 = SIGN(INARG)*2**63 IN SGL
mov.l %d2,TWOTO63(%a6)
fadd.s TWOTO63(%a6),%fp2 # THE FRACTIONAL PART OF FP1 IS ROUNDED
fsub.s TWOTO63(%a6),%fp2 # fp2 = N
# fint.x %fp2
#--CREATING 2**(L)*Piby2_1 and 2**(L)*Piby2_2
mov.l %d1,%d2 # d2 = L
add.l &0x00003FFF,%d2 # BIASED EXP OF 2**L * (PI/2)
mov.w %d2,FP_SCR0_EX(%a6)
mov.l &0xC90FDAA2,FP_SCR0_HI(%a6)
clr.l FP_SCR0_LO(%a6) # FP_SCR0 = 2**(L) * Piby2_1
add.l &0x00003FDD,%d1
mov.w %d1,FP_SCR1_EX(%a6)
mov.l &0x85A308D3,FP_SCR1_HI(%a6)
clr.l FP_SCR1_LO(%a6) # FP_SCR1 = 2**(L) * Piby2_2
mov.b ENDFLAG(%a6),%d1
#--We are now ready to perform (R+r) - N*P1 - N*P2, P1 = 2**(L) * Piby2_1 and
#--P2 = 2**(L) * Piby2_2
fmov.x %fp2,%fp4 # fp4 = N
fmul.x FP_SCR0(%a6),%fp4 # fp4 = W = N*P1
fmov.x %fp2,%fp5 # fp5 = N
fmul.x FP_SCR1(%a6),%fp5 # fp5 = w = N*P2
fmov.x %fp4,%fp3 # fp3 = W = N*P1
#--we want P+p = W+w but |p| <= half ulp of P
#--Then, we need to compute A := R-P and a := r-p
fadd.x %fp5,%fp3 # fp3 = P
fsub.x %fp3,%fp4 # fp4 = W-P
fsub.x %fp3,%fp0 # fp0 = A := R - P
fadd.x %fp5,%fp4 # fp4 = p = (W-P)+w
fmov.x %fp0,%fp3 # fp3 = A
fsub.x %fp4,%fp1 # fp1 = a := r - p
#--Now we need to normalize (A,a) to "new (R,r)" where R+r = A+a but
#--|r| <= half ulp of R.
fadd.x %fp1,%fp0 # fp0 = R := A+a
#--No need to calculate r if this is the last loop
cmp.b %d1,&0
bgt.w SRESTORE
#--Need to calculate r
fsub.x %fp0,%fp3 # fp3 = A-R
fadd.x %fp3,%fp1 # fp1 = r := (A-R)+a
bra.w SLOOP
SRESTORE:
fmov.l %fp2,INT(%a6)
mov.l (%sp)+,%d2 # restore d2
fmovm.x (%sp)+,&0x3c # restore {fp2-fp5}
mov.l ADJN(%a6),%d1
cmp.l %d1,&4
blt.w SINCONT
bra.w SCCONT
#########################################################################
# stan(): computes the tangent of a normalized input #
# stand(): computes the tangent of a denormalized input #
# #
# INPUT *************************************************************** #
# a0 = pointer to extended precision input #
# d0 = round precision,mode #
# #
# OUTPUT ************************************************************** #
# fp0 = tan(X) #
# #
# ACCURACY and MONOTONICITY ******************************************* #
# The returned result is within 3 ulp in 64 significant bit, i.e. #
# within 0.5001 ulp to 53 bits if the result is subsequently #
# rounded to double precision. The result is provably monotonic #
# in double precision. #
# #
# ALGORITHM *********************************************************** #
# #
# 1. If |X| >= 15Pi or |X| < 2**(-40), go to 6. #
# #
# 2. Decompose X as X = N(Pi/2) + r where |r| <= Pi/4. Let #
# k = N mod 2, so in particular, k = 0 or 1. #
# #
# 3. If k is odd, go to 5. #
# #
# 4. (k is even) Tan(X) = tan(r) and tan(r) is approximated by a #
# rational function U/V where #
# U = r + r*s*(P1 + s*(P2 + s*P3)), and #
# V = 1 + s*(Q1 + s*(Q2 + s*(Q3 + s*Q4))), s = r*r. #
# Exit. #
# #
# 4. (k is odd) Tan(X) = -cot(r). Since tan(r) is approximated by #
# a rational function U/V where #
# U = r + r*s*(P1 + s*(P2 + s*P3)), and #
# V = 1 + s*(Q1 + s*(Q2 + s*(Q3 + s*Q4))), s = r*r, #
# -Cot(r) = -V/U. Exit. #
# #
# 6. If |X| > 1, go to 8. #
# #
# 7. (|X|<2**(-40)) Tan(X) = X. Exit. #
# #
# 8. Overwrite X by X := X rem 2Pi. Now that |X| <= Pi, go back #
# to 2. #
# #
#########################################################################
TANQ4:
long 0x3EA0B759,0xF50F8688
TANP3:
long 0xBEF2BAA5,0xA8924F04
TANQ3:
long 0xBF346F59,0xB39BA65F,0x00000000,0x00000000
TANP2:
long 0x3FF60000,0xE073D3FC,0x199C4A00,0x00000000
TANQ2:
long 0x3FF90000,0xD23CD684,0x15D95FA1,0x00000000
TANP1:
long 0xBFFC0000,0x8895A6C5,0xFB423BCA,0x00000000
TANQ1:
long 0xBFFD0000,0xEEF57E0D,0xA84BC8CE,0x00000000
INVTWOPI:
long 0x3FFC0000,0xA2F9836E,0x4E44152A,0x00000000
TWOPI1:
long 0x40010000,0xC90FDAA2,0x00000000,0x00000000
TWOPI2:
long 0x3FDF0000,0x85A308D4,0x00000000,0x00000000
#--N*PI/2, -32 <= N <= 32, IN A LEADING TERM IN EXT. AND TRAILING
#--TERM IN SGL. NOTE THAT PI IS 64-BIT LONG, THUS N*PI/2 IS AT
#--MOST 69 BITS LONG.
# global PITBL
PITBL:
long 0xC0040000,0xC90FDAA2,0x2168C235,0x21800000
long 0xC0040000,0xC2C75BCD,0x105D7C23,0xA0D00000
long 0xC0040000,0xBC7EDCF7,0xFF523611,0xA1E80000
long 0xC0040000,0xB6365E22,0xEE46F000,0x21480000
long 0xC0040000,0xAFEDDF4D,0xDD3BA9EE,0xA1200000
long 0xC0040000,0xA9A56078,0xCC3063DD,0x21FC0000
long 0xC0040000,0xA35CE1A3,0xBB251DCB,0x21100000
long 0xC0040000,0x9D1462CE,0xAA19D7B9,0xA1580000
long 0xC0040000,0x96CBE3F9,0x990E91A8,0x21E00000
long 0xC0040000,0x90836524,0x88034B96,0x20B00000
long 0xC0040000,0x8A3AE64F,0x76F80584,0xA1880000
long 0xC0040000,0x83F2677A,0x65ECBF73,0x21C40000
long 0xC0030000,0xFB53D14A,0xA9C2F2C2,0x20000000
long 0xC0030000,0xEEC2D3A0,0x87AC669F,0x21380000
long 0xC0030000,0xE231D5F6,0x6595DA7B,0xA1300000
long 0xC0030000,0xD5A0D84C,0x437F4E58,0x9FC00000
long 0xC0030000,0xC90FDAA2,0x2168C235,0x21000000
long 0xC0030000,0xBC7EDCF7,0xFF523611,0xA1680000
long 0xC0030000,0xAFEDDF4D,0xDD3BA9EE,0xA0A00000
long 0xC0030000,0xA35CE1A3,0xBB251DCB,0x20900000
long 0xC0030000,0x96CBE3F9,0x990E91A8,0x21600000
long 0xC0030000,0x8A3AE64F,0x76F80584,0xA1080000
long 0xC0020000,0xFB53D14A,0xA9C2F2C2,0x1F800000
long 0xC0020000,0xE231D5F6,0x6595DA7B,0xA0B00000
long 0xC0020000,0xC90FDAA2,0x2168C235,0x20800000
long 0xC0020000,0xAFEDDF4D,0xDD3BA9EE,0xA0200000
long 0xC0020000,0x96CBE3F9,0x990E91A8,0x20E00000
long 0xC0010000,0xFB53D14A,0xA9C2F2C2,0x1F000000
long 0xC0010000,0xC90FDAA2,0x2168C235,0x20000000
long 0xC0010000,0x96CBE3F9,0x990E91A8,0x20600000
long 0xC0000000,0xC90FDAA2,0x2168C235,0x1F800000
long 0xBFFF0000,0xC90FDAA2,0x2168C235,0x1F000000
long 0x00000000,0x00000000,0x00000000,0x00000000
long 0x3FFF0000,0xC90FDAA2,0x2168C235,0x9F000000
long 0x40000000,0xC90FDAA2,0x2168C235,0x9F800000
long 0x40010000,0x96CBE3F9,0x990E91A8,0xA0600000
long 0x40010000,0xC90FDAA2,0x2168C235,0xA0000000
long 0x40010000,0xFB53D14A,0xA9C2F2C2,0x9F000000
long 0x40020000,0x96CBE3F9,0x990E91A8,0xA0E00000
long 0x40020000,0xAFEDDF4D,0xDD3BA9EE,0x20200000
long 0x40020000,0xC90FDAA2,0x2168C235,0xA0800000
long 0x40020000,0xE231D5F6,0x6595DA7B,0x20B00000
long 0x40020000,0xFB53D14A,0xA9C2F2C2,0x9F800000
long 0x40030000,0x8A3AE64F,0x76F80584,0x21080000
long 0x40030000,0x96CBE3F9,0x990E91A8,0xA1600000
long 0x40030000,0xA35CE1A3,0xBB251DCB,0xA0900000
long 0x40030000,0xAFEDDF4D,0xDD3BA9EE,0x20A00000
long 0x40030000,0xBC7EDCF7,0xFF523611,0x21680000
long 0x40030000,0xC90FDAA2,0x2168C235,0xA1000000
long 0x40030000,0xD5A0D84C,0x437F4E58,0x1FC00000
long 0x40030000,0xE231D5F6,0x6595DA7B,0x21300000
long 0x40030000,0xEEC2D3A0,0x87AC669F,0xA1380000
long 0x40030000,0xFB53D14A,0xA9C2F2C2,0xA0000000
long 0x40040000,0x83F2677A,0x65ECBF73,0xA1C40000
long 0x40040000,0x8A3AE64F,0x76F80584,0x21880000
long 0x40040000,0x90836524,0x88034B96,0xA0B00000
long 0x40040000,0x96CBE3F9,0x990E91A8,0xA1E00000
long 0x40040000,0x9D1462CE,0xAA19D7B9,0x21580000
long 0x40040000,0xA35CE1A3,0xBB251DCB,0xA1100000
long 0x40040000,0xA9A56078,0xCC3063DD,0xA1FC0000
long 0x40040000,0xAFEDDF4D,0xDD3BA9EE,0x21200000
long 0x40040000,0xB6365E22,0xEE46F000,0xA1480000
long 0x40040000,0xBC7EDCF7,0xFF523611,0x21E80000
long 0x40040000,0xC2C75BCD,0x105D7C23,0x20D00000
long 0x40040000,0xC90FDAA2,0x2168C235,0xA1800000
set INARG,FP_SCR0
set TWOTO63,L_SCR1
set INT,L_SCR1
set ENDFLAG,L_SCR2
global stan
stan:
fmov.x (%a0),%fp0 # LOAD INPUT
mov.l (%a0),%d1
mov.w 4(%a0),%d1
and.l &0x7FFFFFFF,%d1
cmp.l %d1,&0x3FD78000 # |X| >= 2**(-40)?
bge.b TANOK1
bra.w TANSM
TANOK1:
cmp.l %d1,&0x4004BC7E # |X| < 15 PI?
blt.b TANMAIN
bra.w REDUCEX
TANMAIN:
#--THIS IS THE USUAL CASE, |X| <= 15 PI.
#--THE ARGUMENT REDUCTION IS DONE BY TABLE LOOK UP.
fmov.x %fp0,%fp1
fmul.d TWOBYPI(%pc),%fp1 # X*2/PI
lea.l PITBL+0x200(%pc),%a1 # TABLE OF N*PI/2, N = -32,...,32
fmov.l %fp1,%d1 # CONVERT TO INTEGER
asl.l &4,%d1
add.l %d1,%a1 # ADDRESS N*PIBY2 IN Y1, Y2
fsub.x (%a1)+,%fp0 # X-Y1
fsub.s (%a1),%fp0 # FP0 IS R = (X-Y1)-Y2
ror.l &5,%d1
and.l &0x80000000,%d1 # D0 WAS ODD IFF D0 < 0
TANCONT:
fmovm.x &0x0c,-(%sp) # save fp2,fp3
cmp.l %d1,&0
blt.w NODD
fmov.x %fp0,%fp1
fmul.x %fp1,%fp1 # S = R*R
fmov.d TANQ4(%pc),%fp3
fmov.d TANP3(%pc),%fp2
fmul.x %fp1,%fp3 # SQ4
fmul.x %fp1,%fp2 # SP3
fadd.d TANQ3(%pc),%fp3 # Q3+SQ4
fadd.x TANP2(%pc),%fp2 # P2+SP3
fmul.x %fp1,%fp3 # S(Q3+SQ4)
fmul.x %fp1,%fp2 # S(P2+SP3)
fadd.x TANQ2(%pc),%fp3 # Q2+S(Q3+SQ4)
fadd.x TANP1(%pc),%fp2 # P1+S(P2+SP3)
fmul.x %fp1,%fp3 # S(Q2+S(Q3+SQ4))
fmul.x %fp1,%fp2 # S(P1+S(P2+SP3))
fadd.x TANQ1(%pc),%fp3 # Q1+S(Q2+S(Q3+SQ4))
fmul.x %fp0,%fp2 # RS(P1+S(P2+SP3))
fmul.x %fp3,%fp1 # S(Q1+S(Q2+S(Q3+SQ4)))
fadd.x %fp2,%fp0 # R+RS(P1+S(P2+SP3))
fadd.s &0x3F800000,%fp1 # 1+S(Q1+...)
fmovm.x (%sp)+,&0x30 # restore fp2,fp3
fmov.l %d0,%fpcr # restore users round mode,prec
fdiv.x %fp1,%fp0 # last inst - possible exception set
bra t_inx2
NODD:
fmov.x %fp0,%fp1
fmul.x %fp0,%fp0 # S = R*R
fmov.d TANQ4(%pc),%fp3
fmov.d TANP3(%pc),%fp2
fmul.x %fp0,%fp3 # SQ4
fmul.x %fp0,%fp2 # SP3
fadd.d TANQ3(%pc),%fp3 # Q3+SQ4
fadd.x TANP2(%pc),%fp2 # P2+SP3
fmul.x %fp0,%fp3 # S(Q3+SQ4)
fmul.x %fp0,%fp2 # S(P2+SP3)
fadd.x TANQ2(%pc),%fp3 # Q2+S(Q3+SQ4)
fadd.x TANP1(%pc),%fp2 # P1+S(P2+SP3)
fmul.x %fp0,%fp3 # S(Q2+S(Q3+SQ4))
fmul.x %fp0,%fp2 # S(P1+S(P2+SP3))
fadd.x TANQ1(%pc),%fp3 # Q1+S(Q2+S(Q3+SQ4))
fmul.x %fp1,%fp2 # RS(P1+S(P2+SP3))
fmul.x %fp3,%fp0 # S(Q1+S(Q2+S(Q3+SQ4)))
fadd.x %fp2,%fp1 # R+RS(P1+S(P2+SP3))
fadd.s &0x3F800000,%fp0 # 1+S(Q1+...)
fmovm.x (%sp)+,&0x30 # restore fp2,fp3
fmov.x %fp1,-(%sp)
eor.l &0x80000000,(%sp)
fmov.l %d0,%fpcr # restore users round mode,prec
fdiv.x (%sp)+,%fp0 # last inst - possible exception set
bra t_inx2
TANBORS:
#--IF |X| > 15PI, WE USE THE GENERAL ARGUMENT REDUCTION.
#--IF |X| < 2**(-40), RETURN X OR 1.
cmp.l %d1,&0x3FFF8000
bgt.b REDUCEX
TANSM:
fmov.x %fp0,-(%sp)
fmov.l %d0,%fpcr # restore users round mode,prec
mov.b &FMOV_OP,%d1 # last inst is MOVE
fmov.x (%sp)+,%fp0 # last inst - posibble exception set
bra t_catch
global stand
#--TAN(X) = X FOR DENORMALIZED X
stand:
bra t_extdnrm
#--WHEN REDUCEX IS USED, THE CODE WILL INEVITABLY BE SLOW.
#--THIS REDUCTION METHOD, HOWEVER, IS MUCH FASTER THAN USING
#--THE REMAINDER INSTRUCTION WHICH IS NOW IN SOFTWARE.
REDUCEX:
fmovm.x &0x3c,-(%sp) # save {fp2-fp5}
mov.l %d2,-(%sp) # save d2
fmov.s &0x00000000,%fp1 # fp1 = 0
#--If compact form of abs(arg) in d0=$7ffeffff, argument is so large that
#--there is a danger of unwanted overflow in first LOOP iteration. In this
#--case, reduce argument by one remainder step to make subsequent reduction
#--safe.
cmp.l %d1,&0x7ffeffff # is arg dangerously large?
bne.b LOOP # no
# yes; create 2**16383*PI/2
mov.w &0x7ffe,FP_SCR0_EX(%a6)
mov.l &0xc90fdaa2,FP_SCR0_HI(%a6)
clr.l FP_SCR0_LO(%a6)
# create low half of 2**16383*PI/2 at FP_SCR1
mov.w &0x7fdc,FP_SCR1_EX(%a6)
mov.l &0x85a308d3,FP_SCR1_HI(%a6)
clr.l FP_SCR1_LO(%a6)
ftest.x %fp0 # test sign of argument
fblt.w red_neg
or.b &0x80,FP_SCR0_EX(%a6) # positive arg
or.b &0x80,FP_SCR1_EX(%a6)
red_neg:
fadd.x FP_SCR0(%a6),%fp0 # high part of reduction is exact
fmov.x %fp0,%fp1 # save high result in fp1
fadd.x FP_SCR1(%a6),%fp0 # low part of reduction
fsub.x %fp0,%fp1 # determine low component of result
fadd.x FP_SCR1(%a6),%fp1 # fp0/fp1 are reduced argument.
#--ON ENTRY, FP0 IS X, ON RETURN, FP0 IS X REM PI/2, |X| <= PI/4.
#--integer quotient will be stored in N
#--Intermeditate remainder is 66-bit long; (R,r) in (FP0,FP1)
LOOP:
fmov.x %fp0,INARG(%a6) # +-2**K * F, 1 <= F < 2
mov.w INARG(%a6),%d1
mov.l %d1,%a1 # save a copy of D0
and.l &0x00007FFF,%d1
sub.l &0x00003FFF,%d1 # d0 = K
cmp.l %d1,&28
ble.b LASTLOOP
CONTLOOP:
sub.l &27,%d1 # d0 = L := K-27
mov.b &0,ENDFLAG(%a6)
bra.b WORK
LASTLOOP:
clr.l %d1 # d0 = L := 0
mov.b &1,ENDFLAG(%a6)
WORK:
#--FIND THE REMAINDER OF (R,r) W.R.T. 2**L * (PI/2). L IS SO CHOSEN
#--THAT INT( X * (2/PI) / 2**(L) ) < 2**29.
#--CREATE 2**(-L) * (2/PI), SIGN(INARG)*2**(63),
#--2**L * (PIby2_1), 2**L * (PIby2_2)
mov.l &0x00003FFE,%d2 # BIASED EXP OF 2/PI
sub.l %d1,%d2 # BIASED EXP OF 2**(-L)*(2/PI)
mov.l &0xA2F9836E,FP_SCR0_HI(%a6)
mov.l &0x4E44152A,FP_SCR0_LO(%a6)
mov.w %d2,FP_SCR0_EX(%a6) # FP_SCR0 = 2**(-L)*(2/PI)
fmov.x %fp0,%fp2
fmul.x FP_SCR0(%a6),%fp2 # fp2 = X * 2**(-L)*(2/PI)
#--WE MUST NOW FIND INT(FP2). SINCE WE NEED THIS VALUE IN
#--FLOATING POINT FORMAT, THE TWO FMOVE'S FMOVE.L FP <--> N
#--WILL BE TOO INEFFICIENT. THE WAY AROUND IT IS THAT
#--(SIGN(INARG)*2**63 + FP2) - SIGN(INARG)*2**63 WILL GIVE
#--US THE DESIRED VALUE IN FLOATING POINT.
mov.l %a1,%d2
swap %d2
and.l &0x80000000,%d2
or.l &0x5F000000,%d2 # d2 = SIGN(INARG)*2**63 IN SGL
mov.l %d2,TWOTO63(%a6)
fadd.s TWOTO63(%a6),%fp2 # THE FRACTIONAL PART OF FP1 IS ROUNDED
fsub.s TWOTO63(%a6),%fp2 # fp2 = N
# fintrz.x %fp2,%fp2
#--CREATING 2**(L)*Piby2_1 and 2**(L)*Piby2_2
mov.l %d1,%d2 # d2 = L
add.l &0x00003FFF,%d2 # BIASED EXP OF 2**L * (PI/2)
mov.w %d2,FP_SCR0_EX(%a6)
mov.l &0xC90FDAA2,FP_SCR0_HI(%a6)
clr.l FP_SCR0_LO(%a6) # FP_SCR0 = 2**(L) * Piby2_1
add.l &0x00003FDD,%d1
mov.w %d1,FP_SCR1_EX(%a6)
mov.l &0x85A308D3,FP_SCR1_HI(%a6)
clr.l FP_SCR1_LO(%a6) # FP_SCR1 = 2**(L) * Piby2_2
mov.b ENDFLAG(%a6),%d1
#--We are now ready to perform (R+r) - N*P1 - N*P2, P1 = 2**(L) * Piby2_1 and
#--P2 = 2**(L) * Piby2_2
fmov.x %fp2,%fp4 # fp4 = N
fmul.x FP_SCR0(%a6),%fp4 # fp4 = W = N*P1
fmov.x %fp2,%fp5 # fp5 = N
fmul.x FP_SCR1(%a6),%fp5 # fp5 = w = N*P2
fmov.x %fp4,%fp3 # fp3 = W = N*P1
#--we want P+p = W+w but |p| <= half ulp of P
#--Then, we need to compute A := R-P and a := r-p
fadd.x %fp5,%fp3 # fp3 = P
fsub.x %fp3,%fp4 # fp4 = W-P
fsub.x %fp3,%fp0 # fp0 = A := R - P
fadd.x %fp5,%fp4 # fp4 = p = (W-P)+w
fmov.x %fp0,%fp3 # fp3 = A
fsub.x %fp4,%fp1 # fp1 = a := r - p
#--Now we need to normalize (A,a) to "new (R,r)" where R+r = A+a but
#--|r| <= half ulp of R.
fadd.x %fp1,%fp0 # fp0 = R := A+a
#--No need to calculate r if this is the last loop
cmp.b %d1,&0
bgt.w RESTORE
#--Need to calculate r
fsub.x %fp0,%fp3 # fp3 = A-R
fadd.x %fp3,%fp1 # fp1 = r := (A-R)+a
bra.w LOOP
RESTORE:
fmov.l %fp2,INT(%a6)
mov.l (%sp)+,%d2 # restore d2
fmovm.x (%sp)+,&0x3c # restore {fp2-fp5}
mov.l INT(%a6),%d1
ror.l &1,%d1
bra.w TANCONT
#########################################################################
# satan(): computes the arctangent of a normalized number #
# satand(): computes the arctangent of a denormalized number #
# #
# INPUT *************************************************************** #
# a0 = pointer to extended precision input #
# d0 = round precision,mode #
# #
# OUTPUT ************************************************************** #
# fp0 = arctan(X) #
# #
# ACCURACY and MONOTONICITY ******************************************* #
# The returned result is within 2 ulps in 64 significant bit, #
# i.e. within 0.5001 ulp to 53 bits if the result is subsequently #
# rounded to double precision. The result is provably monotonic #
# in double precision. #
# #
# ALGORITHM *********************************************************** #
# Step 1. If |X| >= 16 or |X| < 1/16, go to Step 5. #
# #
# Step 2. Let X = sgn * 2**k * 1.xxxxxxxx...x. #
# Note that k = -4, -3,..., or 3. #
# Define F = sgn * 2**k * 1.xxxx1, i.e. the first 5 #
# significant bits of X with a bit-1 attached at the 6-th #
# bit position. Define u to be u = (X-F) / (1 + X*F). #
# #
# Step 3. Approximate arctan(u) by a polynomial poly. #
# #
# Step 4. Return arctan(F) + poly, arctan(F) is fetched from a #
# table of values calculated beforehand. Exit. #
# #
# Step 5. If |X| >= 16, go to Step 7. #
# #
# Step 6. Approximate arctan(X) by an odd polynomial in X. Exit. #
# #
# Step 7. Define X' = -1/X. Approximate arctan(X') by an odd #
# polynomial in X'. #
# Arctan(X) = sign(X)*Pi/2 + arctan(X'). Exit. #
# #
#########################################################################
ATANA3: long 0xBFF6687E,0x314987D8
ATANA2: long 0x4002AC69,0x34A26DB3
ATANA1: long 0xBFC2476F,0x4E1DA28E
ATANB6: long 0x3FB34444,0x7F876989
ATANB5: long 0xBFB744EE,0x7FAF45DB
ATANB4: long 0x3FBC71C6,0x46940220
ATANB3: long 0xBFC24924,0x921872F9
ATANB2: long 0x3FC99999,0x99998FA9
ATANB1: long 0xBFD55555,0x55555555
ATANC5: long 0xBFB70BF3,0x98539E6A
ATANC4: long 0x3FBC7187,0x962D1D7D
ATANC3: long 0xBFC24924,0x827107B8
ATANC2: long 0x3FC99999,0x9996263E
ATANC1: long 0xBFD55555,0x55555536
PPIBY2: long 0x3FFF0000,0xC90FDAA2,0x2168C235,0x00000000
NPIBY2: long 0xBFFF0000,0xC90FDAA2,0x2168C235,0x00000000
PTINY: long 0x00010000,0x80000000,0x00000000,0x00000000
NTINY: long 0x80010000,0x80000000,0x00000000,0x00000000
ATANTBL:
long 0x3FFB0000,0x83D152C5,0x060B7A51,0x00000000
long 0x3FFB0000,0x8BC85445,0x65498B8B,0x00000000
long 0x3FFB0000,0x93BE4060,0x17626B0D,0x00000000
long 0x3FFB0000,0x9BB3078D,0x35AEC202,0x00000000
long 0x3FFB0000,0xA3A69A52,0x5DDCE7DE,0x00000000
long 0x3FFB0000,0xAB98E943,0x62765619,0x00000000
long 0x3FFB0000,0xB389E502,0xF9C59862,0x00000000
long 0x3FFB0000,0xBB797E43,0x6B09E6FB,0x00000000
long 0x3FFB0000,0xC367A5C7,0x39E5F446,0x00000000
long 0x3FFB0000,0xCB544C61,0xCFF7D5C6,0x00000000
long 0x3FFB0000,0xD33F62F8,0x2488533E,0x00000000
long 0x3FFB0000,0xDB28DA81,0x62404C77,0x00000000
long 0x3FFB0000,0xE310A407,0x8AD34F18,0x00000000
long 0x3FFB0000,0xEAF6B0A8,0x188EE1EB,0x00000000
long 0x3FFB0000,0xF2DAF194,0x9DBE79D5,0x00000000
long 0x3FFB0000,0xFABD5813,0x61D47E3E,0x00000000
long 0x3FFC0000,0x8346AC21,0x0959ECC4,0x00000000
long 0x3FFC0000,0x8B232A08,0x304282D8,0x00000000
long 0x3FFC0000,0x92FB70B8,0xD29AE2F9,0x00000000
long 0x3FFC0000,0x9ACF476F,0x5CCD1CB4,0x00000000
long 0x3FFC0000,0xA29E7630,0x4954F23F,0x00000000
long 0x3FFC0000,0xAA68C5D0,0x8AB85230,0x00000000
long 0x3FFC0000,0xB22DFFFD,0x9D539F83,0x00000000
long 0x3FFC0000,0xB9EDEF45,0x3E900EA5,0x00000000
long 0x3FFC0000,0xC1A85F1C,0xC75E3EA5,0x00000000
long 0x3FFC0000,0xC95D1BE8,0x28138DE6,0x00000000
long 0x3FFC0000,0xD10BF300,0x840D2DE4,0x00000000
long 0x3FFC0000,0xD8B4B2BA,0x6BC05E7A,0x00000000
long 0x3FFC0000,0xE0572A6B,0xB42335F6,0x00000000
long 0x3FFC0000,0xE7F32A70,0xEA9CAA8F,0x00000000
long 0x3FFC0000,0xEF888432,0x64ECEFAA,0x00000000
long 0x3FFC0000,0xF7170A28,0xECC06666,0x00000000
long 0x3FFD0000,0x812FD288,0x332DAD32,0x00000000
long 0x3FFD0000,0x88A8D1B1,0x218E4D64,0x00000000
long 0x3FFD0000,0x9012AB3F,0x23E4AEE8,0x00000000
long 0x3FFD0000,0x976CC3D4,0x11E7F1B9,0x00000000
long 0x3FFD0000,0x9EB68949,0x3889A227,0x00000000
long 0x3FFD0000,0xA5EF72C3,0x4487361B,0x00000000
long 0x3FFD0000,0xAD1700BA,0xF07A7227,0x00000000
long 0x3FFD0000,0xB42CBCFA,0xFD37EFB7,0x00000000
long 0x3FFD0000,0xBB303A94,0x0BA80F89,0x00000000
long 0x3FFD0000,0xC22115C6,0xFCAEBBAF,0x00000000
long 0x3FFD0000,0xC8FEF3E6,0x86331221,0x00000000
long 0x3FFD0000,0xCFC98330,0xB4000C70,0x00000000
long 0x3FFD0000,0xD6807AA1,0x102C5BF9,0x00000000
long 0x3FFD0000,0xDD2399BC,0x31252AA3,0x00000000
long 0x3FFD0000,0xE3B2A855,0x6B8FC517,0x00000000
long 0x3FFD0000,0xEA2D764F,0x64315989,0x00000000
long 0x3FFD0000,0xF3BF5BF8,0xBAD1A21D,0x00000000
long 0x3FFE0000,0x801CE39E,0x0D205C9A,0x00000000
long 0x3FFE0000,0x8630A2DA,0xDA1ED066,0x00000000
long 0x3FFE0000,0x8C1AD445,0xF3E09B8C,0x00000000
long 0x3FFE0000,0x91DB8F16,0x64F350E2,0x00000000
long 0x3FFE0000,0x97731420,0x365E538C,0x00000000
long 0x3FFE0000,0x9CE1C8E6,0xA0B8CDBA,0x00000000
long 0x3FFE0000,0xA22832DB,0xCADAAE09,0x00000000
long 0x3FFE0000,0xA746F2DD,0xB7602294,0x00000000
long 0x3FFE0000,0xAC3EC0FB,0x997DD6A2,0x00000000
long 0x3FFE0000,0xB110688A,0xEBDC6F6A,0x00000000
long 0x3FFE0000,0xB5BCC490,0x59ECC4B0,0x00000000
long 0x3FFE0000,0xBA44BC7D,0xD470782F,0x00000000
long 0x3FFE0000,0xBEA94144,0xFD049AAC,0x00000000
long 0x3FFE0000,0xC2EB4ABB,0x661628B6,0x00000000
long 0x3FFE0000,0xC70BD54C,0xE602EE14,0x00000000
long 0x3FFE0000,0xCD000549,0xADEC7159,0x00000000
long 0x3FFE0000,0xD48457D2,0xD8EA4EA3,0x00000000
long 0x3FFE0000,0xDB948DA7,0x12DECE3B,0x00000000
long 0x3FFE0000,0xE23855F9,0x69E8096A,0x00000000
long 0x3FFE0000,0xE8771129,0xC4353259,0x00000000
long 0x3FFE0000,0xEE57C16E,0x0D379C0D,0x00000000
long 0x3FFE0000,0xF3E10211,0xA87C3779,0x00000000
long 0x3FFE0000,0xF919039D,0x758B8D41,0x00000000
long 0x3FFE0000,0xFE058B8F,0x64935FB3,0x00000000
long 0x3FFF0000,0x8155FB49,0x7B685D04,0x00000000
long 0x3FFF0000,0x83889E35,0x49D108E1,0x00000000
long 0x3FFF0000,0x859CFA76,0x511D724B,0x00000000
long 0x3FFF0000,0x87952ECF,0xFF8131E7,0x00000000
long 0x3FFF0000,0x89732FD1,0x9557641B,0x00000000
long 0x3FFF0000,0x8B38CAD1,0x01932A35,0x00000000
long 0x3FFF0000,0x8CE7A8D8,0x301EE6B5,0x00000000
long 0x3FFF0000,0x8F46A39E,0x2EAE5281,0x00000000
long 0x3FFF0000,0x922DA7D7,0x91888487,0x00000000
long 0x3FFF0000,0x94D19FCB,0xDEDF5241,0x00000000
long 0x3FFF0000,0x973AB944,0x19D2A08B,0x00000000
long 0x3FFF0000,0x996FF00E,0x08E10B96,0x00000000
long 0x3FFF0000,0x9B773F95,0x12321DA7,0x00000000
long 0x3FFF0000,0x9D55CC32,0x0F935624,0x00000000
long 0x3FFF0000,0x9F100575,0x006CC571,0x00000000
long 0x3FFF0000,0xA0A9C290,0xD97CC06C,0x00000000
long 0x3FFF0000,0xA22659EB,0xEBC0630A,0x00000000
long 0x3FFF0000,0xA388B4AF,0xF6EF0EC9,0x00000000
long 0x3FFF0000,0xA4D35F10,0x61D292C4,0x00000000
long 0x3FFF0000,0xA60895DC,0xFBE3187E,0x00000000
long 0x3FFF0000,0xA72A51DC,0x7367BEAC,0x00000000
long 0x3FFF0000,0xA83A5153,0x0956168F,0x00000000
long 0x3FFF0000,0xA93A2007,0x7539546E,0x00000000
long 0x3FFF0000,0xAA9E7245,0x023B2605,0x00000000
long 0x3FFF0000,0xAC4C84BA,0x6FE4D58F,0x00000000
long 0x3FFF0000,0xADCE4A4A,0x606B9712,0x00000000
long 0x3FFF0000,0xAF2A2DCD,0x8D263C9C,0x00000000
long 0x3FFF0000,0xB0656F81,0xF22265C7,0x00000000
long 0x3FFF0000,0xB1846515,0x0F71496A,0x00000000
long 0x3FFF0000,0xB28AAA15,0x6F9ADA35,0x00000000
long 0x3FFF0000,0xB37B44FF,0x3766B895,0x00000000
long 0x3FFF0000,0xB458C3DC,0xE9630433,0x00000000
long 0x3FFF0000,0xB525529D,0x562246BD,0x00000000
long 0x3FFF0000,0xB5E2CCA9,0x5F9D88CC,0x00000000
long 0x3FFF0000,0xB692CADA,0x7ACA1ADA,0x00000000
long 0x3FFF0000,0xB736AEA7,0xA6925838,0x00000000
long 0x3FFF0000,0xB7CFAB28,0x7E9F7B36,0x00000000
long 0x3FFF0000,0xB85ECC66,0xCB219835,0x00000000
long 0x3FFF0000,0xB8E4FD5A,0x20A593DA,0x00000000
long 0x3FFF0000,0xB99F41F6,0x4AFF9BB5,0x00000000
long 0x3FFF0000,0xBA7F1E17,0x842BBE7B,0x00000000
long 0x3FFF0000,0xBB471285,0x7637E17D,0x00000000
long 0x3FFF0000,0xBBFABE8A,0x4788DF6F,0x00000000
long 0x3FFF0000,0xBC9D0FAD,0x2B689D79,0x00000000
long 0x3FFF0000,0xBD306A39,0x471ECD86,0x00000000
long 0x3FFF0000,0xBDB6C731,0x856AF18A,0x00000000
long 0x3FFF0000,0xBE31CAC5,0x02E80D70,0x00000000
long 0x3FFF0000,0xBEA2D55C,0xE33194E2,0x00000000
long 0x3FFF0000,0xBF0B10B7,0xC03128F0,0x00000000
long 0x3FFF0000,0xBF6B7A18,0xDACB778D,0x00000000
long 0x3FFF0000,0xBFC4EA46,0x63FA18F6,0x00000000
long 0x3FFF0000,0xC0181BDE,0x8B89A454,0x00000000
long 0x3FFF0000,0xC065B066,0xCFBF6439,0x00000000
long 0x3FFF0000,0xC0AE345F,0x56340AE6,0x00000000
long 0x3FFF0000,0xC0F22291,0x9CB9E6A7,0x00000000
set X,FP_SCR0
set XDCARE,X+2
set XFRAC,X+4
set XFRACLO,X+8
set ATANF,FP_SCR1
set ATANFHI,ATANF+4
set ATANFLO,ATANF+8
global satan
#--ENTRY POINT FOR ATAN(X), HERE X IS FINITE, NON-ZERO, AND NOT NAN'S
satan:
fmov.x (%a0),%fp0 # LOAD INPUT
mov.l (%a0),%d1
mov.w 4(%a0),%d1
fmov.x %fp0,X(%a6)
and.l &0x7FFFFFFF,%d1
cmp.l %d1,&0x3FFB8000 # |X| >= 1/16?
bge.b ATANOK1
bra.w ATANSM
ATANOK1:
cmp.l %d1,&0x4002FFFF # |X| < 16 ?
ble.b ATANMAIN
bra.w ATANBIG
#--THE MOST LIKELY CASE, |X| IN [1/16, 16). WE USE TABLE TECHNIQUE
#--THE IDEA IS ATAN(X) = ATAN(F) + ATAN( [X-F] / [1+XF] ).
#--SO IF F IS CHOSEN TO BE CLOSE TO X AND ATAN(F) IS STORED IN
#--A TABLE, ALL WE NEED IS TO APPROXIMATE ATAN(U) WHERE
#--U = (X-F)/(1+XF) IS SMALL (REMEMBER F IS CLOSE TO X). IT IS
#--TRUE THAT A DIVIDE IS NOW NEEDED, BUT THE APPROXIMATION FOR
#--ATAN(U) IS A VERY SHORT POLYNOMIAL AND THE INDEXING TO
#--FETCH F AND SAVING OF REGISTERS CAN BE ALL HIDED UNDER THE
#--DIVIDE. IN THE END THIS METHOD IS MUCH FASTER THAN A TRADITIONAL
#--ONE. NOTE ALSO THAT THE TRADITIONAL SCHEME THAT APPROXIMATE
#--ATAN(X) DIRECTLY WILL NEED TO USE A RATIONAL APPROXIMATION
#--(DIVISION NEEDED) ANYWAY BECAUSE A POLYNOMIAL APPROXIMATION
#--WILL INVOLVE A VERY LONG POLYNOMIAL.
#--NOW WE SEE X AS +-2^K * 1.BBBBBBB....B <- 1. + 63 BITS
#--WE CHOSE F TO BE +-2^K * 1.BBBB1
#--THAT IS IT MATCHES THE EXPONENT AND FIRST 5 BITS OF X, THE
#--SIXTH BITS IS SET TO BE 1. SINCE K = -4, -3, ..., 3, THERE
#--ARE ONLY 8 TIMES 16 = 2^7 = 128 |F|'S. SINCE ATAN(-|F|) IS
#-- -ATAN(|F|), WE NEED TO STORE ONLY ATAN(|F|).
ATANMAIN:
and.l &0xF8000000,XFRAC(%a6) # FIRST 5 BITS
or.l &0x04000000,XFRAC(%a6) # SET 6-TH BIT TO 1
mov.l &0x00000000,XFRACLO(%a6) # LOCATION OF X IS NOW F
fmov.x %fp0,%fp1 # FP1 IS X
fmul.x X(%a6),%fp1 # FP1 IS X*F, NOTE THAT X*F > 0
fsub.x X(%a6),%fp0 # FP0 IS X-F
fadd.s &0x3F800000,%fp1 # FP1 IS 1 + X*F
fdiv.x %fp1,%fp0 # FP0 IS U = (X-F)/(1+X*F)
#--WHILE THE DIVISION IS TAKING ITS TIME, WE FETCH ATAN(|F|)
#--CREATE ATAN(F) AND STORE IT IN ATANF, AND
#--SAVE REGISTERS FP2.
mov.l %d2,-(%sp) # SAVE d2 TEMPORARILY
mov.l %d1,%d2 # THE EXP AND 16 BITS OF X
and.l &0x00007800,%d1 # 4 VARYING BITS OF F'S FRACTION
and.l &0x7FFF0000,%d2 # EXPONENT OF F
sub.l &0x3FFB0000,%d2 # K+4
asr.l &1,%d2
add.l %d2,%d1 # THE 7 BITS IDENTIFYING F
asr.l &7,%d1 # INDEX INTO TBL OF ATAN(|F|)
lea ATANTBL(%pc),%a1
add.l %d1,%a1 # ADDRESS OF ATAN(|F|)
mov.l (%a1)+,ATANF(%a6)
mov.l (%a1)+,ATANFHI(%a6)
mov.l (%a1)+,ATANFLO(%a6) # ATANF IS NOW ATAN(|F|)
mov.l X(%a6),%d1 # LOAD SIGN AND EXPO. AGAIN
and.l &0x80000000,%d1 # SIGN(F)
or.l %d1,ATANF(%a6) # ATANF IS NOW SIGN(F)*ATAN(|F|)
mov.l (%sp)+,%d2 # RESTORE d2
#--THAT'S ALL I HAVE TO DO FOR NOW,
#--BUT ALAS, THE DIVIDE IS STILL CRANKING!
#--U IN FP0, WE ARE NOW READY TO COMPUTE ATAN(U) AS
#--U + A1*U*V*(A2 + V*(A3 + V)), V = U*U
#--THE POLYNOMIAL MAY LOOK STRANGE, BUT IS NEVERTHELESS CORRECT.
#--THE NATURAL FORM IS U + U*V*(A1 + V*(A2 + V*A3))
#--WHAT WE HAVE HERE IS MERELY A1 = A3, A2 = A1/A3, A3 = A2/A3.
#--THE REASON FOR THIS REARRANGEMENT IS TO MAKE THE INDEPENDENT
#--PARTS A1*U*V AND (A2 + ... STUFF) MORE LOAD-BALANCED
fmovm.x &0x04,-(%sp) # save fp2
fmov.x %fp0,%fp1
fmul.x %fp1,%fp1
fmov.d ATANA3(%pc),%fp2
fadd.x %fp1,%fp2 # A3+V
fmul.x %fp1,%fp2 # V*(A3+V)
fmul.x %fp0,%fp1 # U*V
fadd.d ATANA2(%pc),%fp2 # A2+V*(A3+V)
fmul.d ATANA1(%pc),%fp1 # A1*U*V
fmul.x %fp2,%fp1 # A1*U*V*(A2+V*(A3+V))
fadd.x %fp1,%fp0 # ATAN(U), FP1 RELEASED
fmovm.x (%sp)+,&0x20 # restore fp2
fmov.l %d0,%fpcr # restore users rnd mode,prec
fadd.x ATANF(%a6),%fp0 # ATAN(X)
bra t_inx2
ATANBORS:
#--|X| IS IN d0 IN COMPACT FORM. FP1, d0 SAVED.
#--FP0 IS X AND |X| <= 1/16 OR |X| >= 16.
cmp.l %d1,&0x3FFF8000
bgt.w ATANBIG # I.E. |X| >= 16
ATANSM:
#--|X| <= 1/16
#--IF |X| < 2^(-40), RETURN X AS ANSWER. OTHERWISE, APPROXIMATE
#--ATAN(X) BY X + X*Y*(B1+Y*(B2+Y*(B3+Y*(B4+Y*(B5+Y*B6)))))
#--WHICH IS X + X*Y*( [B1+Z*(B3+Z*B5)] + [Y*(B2+Z*(B4+Z*B6)] )
#--WHERE Y = X*X, AND Z = Y*Y.
cmp.l %d1,&0x3FD78000
blt.w ATANTINY
#--COMPUTE POLYNOMIAL
fmovm.x &0x0c,-(%sp) # save fp2/fp3
fmul.x %fp0,%fp0 # FPO IS Y = X*X
fmov.x %fp0,%fp1
fmul.x %fp1,%fp1 # FP1 IS Z = Y*Y
fmov.d ATANB6(%pc),%fp2
fmov.d ATANB5(%pc),%fp3
fmul.x %fp1,%fp2 # Z*B6
fmul.x %fp1,%fp3 # Z*B5
fadd.d ATANB4(%pc),%fp2 # B4+Z*B6
fadd.d ATANB3(%pc),%fp3 # B3+Z*B5
fmul.x %fp1,%fp2 # Z*(B4+Z*B6)
fmul.x %fp3,%fp1 # Z*(B3+Z*B5)
fadd.d ATANB2(%pc),%fp2 # B2+Z*(B4+Z*B6)
fadd.d ATANB1(%pc),%fp1 # B1+Z*(B3+Z*B5)
fmul.x %fp0,%fp2 # Y*(B2+Z*(B4+Z*B6))
fmul.x X(%a6),%fp0 # X*Y
fadd.x %fp2,%fp1 # [B1+Z*(B3+Z*B5)]+[Y*(B2+Z*(B4+Z*B6))]
fmul.x %fp1,%fp0 # X*Y*([B1+Z*(B3+Z*B5)]+[Y*(B2+Z*(B4+Z*B6))])
fmovm.x (%sp)+,&0x30 # restore fp2/fp3
fmov.l %d0,%fpcr # restore users rnd mode,prec
fadd.x X(%a6),%fp0
bra t_inx2
ATANTINY:
#--|X| < 2^(-40), ATAN(X) = X
fmov.l %d0,%fpcr # restore users rnd mode,prec
mov.b &FMOV_OP,%d1 # last inst is MOVE
fmov.x X(%a6),%fp0 # last inst - possible exception set
bra t_catch
ATANBIG:
#--IF |X| > 2^(100), RETURN SIGN(X)*(PI/2 - TINY). OTHERWISE,
#--RETURN SIGN(X)*PI/2 + ATAN(-1/X).
cmp.l %d1,&0x40638000
bgt.w ATANHUGE
#--APPROXIMATE ATAN(-1/X) BY
#--X'+X'*Y*(C1+Y*(C2+Y*(C3+Y*(C4+Y*C5)))), X' = -1/X, Y = X'*X'
#--THIS CAN BE RE-WRITTEN AS
#--X'+X'*Y*( [C1+Z*(C3+Z*C5)] + [Y*(C2+Z*C4)] ), Z = Y*Y.
fmovm.x &0x0c,-(%sp) # save fp2/fp3
fmov.s &0xBF800000,%fp1 # LOAD -1
fdiv.x %fp0,%fp1 # FP1 IS -1/X
#--DIVIDE IS STILL CRANKING
fmov.x %fp1,%fp0 # FP0 IS X'
fmul.x %fp0,%fp0 # FP0 IS Y = X'*X'
fmov.x %fp1,X(%a6) # X IS REALLY X'
fmov.x %fp0,%fp1
fmul.x %fp1,%fp1 # FP1 IS Z = Y*Y
fmov.d ATANC5(%pc),%fp3
fmov.d ATANC4(%pc),%fp2
fmul.x %fp1,%fp3 # Z*C5
fmul.x %fp1,%fp2 # Z*B4
fadd.d ATANC3(%pc),%fp3 # C3+Z*C5
fadd.d ATANC2(%pc),%fp2 # C2+Z*C4
fmul.x %fp3,%fp1 # Z*(C3+Z*C5), FP3 RELEASED
fmul.x %fp0,%fp2 # Y*(C2+Z*C4)
fadd.d ATANC1(%pc),%fp1 # C1+Z*(C3+Z*C5)
fmul.x X(%a6),%fp0 # X'*Y
fadd.x %fp2,%fp1 # [Y*(C2+Z*C4)]+[C1+Z*(C3+Z*C5)]
fmul.x %fp1,%fp0 # X'*Y*([B1+Z*(B3+Z*B5)]
# ... +[Y*(B2+Z*(B4+Z*B6))])
fadd.x X(%a6),%fp0
fmovm.x (%sp)+,&0x30 # restore fp2/fp3
fmov.l %d0,%fpcr # restore users rnd mode,prec
tst.b (%a0)
bpl.b pos_big
neg_big:
fadd.x NPIBY2(%pc),%fp0
bra t_minx2
pos_big:
fadd.x PPIBY2(%pc),%fp0
bra t_pinx2
ATANHUGE:
#--RETURN SIGN(X)*(PIBY2 - TINY) = SIGN(X)*PIBY2 - SIGN(X)*TINY
tst.b (%a0)
bpl.b pos_huge
neg_huge:
fmov.x NPIBY2(%pc),%fp0
fmov.l %d0,%fpcr
fadd.x PTINY(%pc),%fp0
bra t_minx2
pos_huge:
fmov.x PPIBY2(%pc),%fp0
fmov.l %d0,%fpcr
fadd.x NTINY(%pc),%fp0
bra t_pinx2
global satand
#--ENTRY POINT FOR ATAN(X) FOR DENORMALIZED ARGUMENT
satand:
bra t_extdnrm
#########################################################################
# sasin(): computes the inverse sine of a normalized input #
# sasind(): computes the inverse sine of a denormalized input #
# #
# INPUT *************************************************************** #
# a0 = pointer to extended precision input #
# d0 = round precision,mode #
# #
# OUTPUT ************************************************************** #
# fp0 = arcsin(X) #
# #
# ACCURACY and MONOTONICITY ******************************************* #
# The returned result is within 3 ulps in 64 significant bit, #
# i.e. within 0.5001 ulp to 53 bits if the result is subsequently #
# rounded to double precision. The result is provably monotonic #
# in double precision. #
# #
# ALGORITHM *********************************************************** #
# #
# ASIN #
# 1. If |X| >= 1, go to 3. #
# #
# 2. (|X| < 1) Calculate asin(X) by #
# z := sqrt( [1-X][1+X] ) #
# asin(X) = atan( x / z ). #
# Exit. #
# #
# 3. If |X| > 1, go to 5. #
# #
# 4. (|X| = 1) sgn := sign(X), return asin(X) := sgn * Pi/2. Exit.#
# #
# 5. (|X| > 1) Generate an invalid operation by 0 * infinity. #
# Exit. #
# #
#########################################################################
global sasin
sasin:
fmov.x (%a0),%fp0 # LOAD INPUT
mov.l (%a0),%d1
mov.w 4(%a0),%d1
and.l &0x7FFFFFFF,%d1
cmp.l %d1,&0x3FFF8000
bge.b ASINBIG
# This catch is added here for the '060 QSP. Originally, the call to
# satan() would handle this case by causing the exception which would
# not be caught until gen_except(). Now, with the exceptions being
# detected inside of satan(), the exception would have been handled there
# instead of inside sasin() as expected.
cmp.l %d1,&0x3FD78000
blt.w ASINTINY
#--THIS IS THE USUAL CASE, |X| < 1
#--ASIN(X) = ATAN( X / SQRT( (1-X)(1+X) ) )
ASINMAIN:
fmov.s &0x3F800000,%fp1
fsub.x %fp0,%fp1 # 1-X
fmovm.x &0x4,-(%sp) # {fp2}
fmov.s &0x3F800000,%fp2
fadd.x %fp0,%fp2 # 1+X
fmul.x %fp2,%fp1 # (1+X)(1-X)
fmovm.x (%sp)+,&0x20 # {fp2}
fsqrt.x %fp1 # SQRT([1-X][1+X])
fdiv.x %fp1,%fp0 # X/SQRT([1-X][1+X])
fmovm.x &0x01,-(%sp) # save X/SQRT(...)
lea (%sp),%a0 # pass ptr to X/SQRT(...)
bsr satan
add.l &0xc,%sp # clear X/SQRT(...) from stack
bra t_inx2
ASINBIG:
fabs.x %fp0 # |X|
fcmp.s %fp0,&0x3F800000
fbgt t_operr # cause an operr exception
#--|X| = 1, ASIN(X) = +- PI/2.
ASINONE:
fmov.x PIBY2(%pc),%fp0
mov.l (%a0),%d1
and.l &0x80000000,%d1 # SIGN BIT OF X
or.l &0x3F800000,%d1 # +-1 IN SGL FORMAT
mov.l %d1,-(%sp) # push SIGN(X) IN SGL-FMT
fmov.l %d0,%fpcr
fmul.s (%sp)+,%fp0
bra t_inx2
#--|X| < 2^(-40), ATAN(X) = X
ASINTINY:
fmov.l %d0,%fpcr # restore users rnd mode,prec
mov.b &FMOV_OP,%d1 # last inst is MOVE
fmov.x (%a0),%fp0 # last inst - possible exception
bra t_catch
global sasind
#--ASIN(X) = X FOR DENORMALIZED X
sasind:
bra t_extdnrm
#########################################################################
# sacos(): computes the inverse cosine of a normalized input #
# sacosd(): computes the inverse cosine of a denormalized input #
# #
# INPUT *************************************************************** #
# a0 = pointer to extended precision input #
# d0 = round precision,mode #
# #
# OUTPUT ************************************************************** #
# fp0 = arccos(X) #
# #
# ACCURACY and MONOTONICITY ******************************************* #
# The returned result is within 3 ulps in 64 significant bit, #
# i.e. within 0.5001 ulp to 53 bits if the result is subsequently #
# rounded to double precision. The result is provably monotonic #
# in double precision. #
# #
# ALGORITHM *********************************************************** #
# #
# ACOS #
# 1. If |X| >= 1, go to 3. #
# #
# 2. (|X| < 1) Calculate acos(X) by #
# z := (1-X) / (1+X) #
# acos(X) = 2 * atan( sqrt(z) ). #
# Exit. #
# #
# 3. If |X| > 1, go to 5. #
# #
# 4. (|X| = 1) If X > 0, return 0. Otherwise, return Pi. Exit. #
# #
# 5. (|X| > 1) Generate an invalid operation by 0 * infinity. #
# Exit. #
# #
#########################################################################
global sacos
sacos:
fmov.x (%a0),%fp0 # LOAD INPUT
mov.l (%a0),%d1 # pack exp w/ upper 16 fraction
mov.w 4(%a0),%d1
and.l &0x7FFFFFFF,%d1
cmp.l %d1,&0x3FFF8000
bge.b ACOSBIG
#--THIS IS THE USUAL CASE, |X| < 1
#--ACOS(X) = 2 * ATAN( SQRT( (1-X)/(1+X) ) )
ACOSMAIN:
fmov.s &0x3F800000,%fp1
fadd.x %fp0,%fp1 # 1+X
fneg.x %fp0 # -X
fadd.s &0x3F800000,%fp0 # 1-X
fdiv.x %fp1,%fp0 # (1-X)/(1+X)
fsqrt.x %fp0 # SQRT((1-X)/(1+X))
mov.l %d0,-(%sp) # save original users fpcr
clr.l %d0
fmovm.x &0x01,-(%sp) # save SQRT(...) to stack
lea (%sp),%a0 # pass ptr to sqrt
bsr satan # ATAN(SQRT([1-X]/[1+X]))
add.l &0xc,%sp # clear SQRT(...) from stack
fmov.l (%sp)+,%fpcr # restore users round prec,mode
fadd.x %fp0,%fp0 # 2 * ATAN( STUFF )
bra t_pinx2
ACOSBIG:
fabs.x %fp0
fcmp.s %fp0,&0x3F800000
fbgt t_operr # cause an operr exception
#--|X| = 1, ACOS(X) = 0 OR PI
tst.b (%a0) # is X positive or negative?
bpl.b ACOSP1
#--X = -1
#Returns PI and inexact exception
ACOSM1:
fmov.x PI(%pc),%fp0 # load PI
fmov.l %d0,%fpcr # load round mode,prec
fadd.s &0x00800000,%fp0 # add a small value
bra t_pinx2
ACOSP1:
bra ld_pzero # answer is positive zero
global sacosd
#--ACOS(X) = PI/2 FOR DENORMALIZED X
sacosd:
fmov.l %d0,%fpcr # load user's rnd mode/prec
fmov.x PIBY2(%pc),%fp0
bra t_pinx2
#########################################################################
# setox(): computes the exponential for a normalized input #
# setoxd(): computes the exponential for a denormalized input #
# setoxm1(): computes the exponential minus 1 for a normalized input #
# setoxm1d(): computes the exponential minus 1 for a denormalized input #
# #
# INPUT *************************************************************** #
# a0 = pointer to extended precision input #
# d0 = round precision,mode #
# #
# OUTPUT ************************************************************** #
# fp0 = exp(X) or exp(X)-1 #
# #
# ACCURACY and MONOTONICITY ******************************************* #
# The returned result is within 0.85 ulps in 64 significant bit, #
# i.e. within 0.5001 ulp to 53 bits if the result is subsequently #
# rounded to double precision. The result is provably monotonic #
# in double precision. #
# #
# ALGORITHM and IMPLEMENTATION **************************************** #
# #
# setoxd #
# ------ #
# Step 1. Set ans := 1.0 #
# #
# Step 2. Return ans := ans + sign(X)*2^(-126). Exit. #
# Notes: This will always generate one exception -- inexact. #
# #
# #
# setox #
# ----- #
# #
# Step 1. Filter out extreme cases of input argument. #
# 1.1 If |X| >= 2^(-65), go to Step 1.3. #
# 1.2 Go to Step 7. #
# 1.3 If |X| < 16380 log(2), go to Step 2. #
# 1.4 Go to Step 8. #
# Notes: The usual case should take the branches 1.1 -> 1.3 -> 2.#
# To avoid the use of floating-point comparisons, a #
# compact representation of |X| is used. This format is a #
# 32-bit integer, the upper (more significant) 16 bits #
# are the sign and biased exponent field of |X|; the #
# lower 16 bits are the 16 most significant fraction #
# (including the explicit bit) bits of |X|. Consequently, #
# the comparisons in Steps 1.1 and 1.3 can be performed #
# by integer comparison. Note also that the constant #
# 16380 log(2) used in Step 1.3 is also in the compact #
# form. Thus taking the branch to Step 2 guarantees #
# |X| < 16380 log(2). There is no harm to have a small #
# number of cases where |X| is less than, but close to, #
# 16380 log(2) and the branch to Step 9 is taken. #
# #
# Step 2. Calculate N = round-to-nearest-int( X * 64/log2 ). #
# 2.1 Set AdjFlag := 0 (indicates the branch 1.3 -> 2 #
# was taken) #
# 2.2 N := round-to-nearest-integer( X * 64/log2 ). #
# 2.3 Calculate J = N mod 64; so J = 0,1,2,..., #
# or 63. #
# 2.4 Calculate M = (N - J)/64; so N = 64M + J. #
# 2.5 Calculate the address of the stored value of #
# 2^(J/64). #
# 2.6 Create the value Scale = 2^M. #
# Notes: The calculation in 2.2 is really performed by #
# Z := X * constant #
# N := round-to-nearest-integer(Z) #
# where #
# constant := single-precision( 64/log 2 ). #
# #
# Using a single-precision constant avoids memory #
# access. Another effect of using a single-precision #
# "constant" is that the calculated value Z is #
# #
# Z = X*(64/log2)*(1+eps), |eps| <= 2^(-24). #
# #
# This error has to be considered later in Steps 3 and 4. #
# #
# Step 3. Calculate X - N*log2/64. #
# 3.1 R := X + N*L1, #
# where L1 := single-precision(-log2/64). #
# 3.2 R := R + N*L2, #
# L2 := extended-precision(-log2/64 - L1).#
# Notes: a) The way L1 and L2 are chosen ensures L1+L2 #
# approximate the value -log2/64 to 88 bits of accuracy. #
# b) N*L1 is exact because N is no longer than 22 bits #
# and L1 is no longer than 24 bits. #
# c) The calculation X+N*L1 is also exact due to #
# cancellation. Thus, R is practically X+N(L1+L2) to full #
# 64 bits. #
# d) It is important to estimate how large can |R| be #
# after Step 3.2. #
# #
# N = rnd-to-int( X*64/log2 (1+eps) ), |eps|<=2^(-24) #
# X*64/log2 (1+eps) = N + f, |f| <= 0.5 #
# X*64/log2 - N = f - eps*X 64/log2 #
# X - N*log2/64 = f*log2/64 - eps*X #
# #
# #
# Now |X| <= 16446 log2, thus #
# #
# |X - N*log2/64| <= (0.5 + 16446/2^(18))*log2/64 #
# <= 0.57 log2/64. #
# This bound will be used in Step 4. #
# #
# Step 4. Approximate exp(R)-1 by a polynomial #
# p = R + R*R*(A1 + R*(A2 + R*(A3 + R*(A4 + R*A5)))) #
# Notes: a) In order to reduce memory access, the coefficients #
# are made as "short" as possible: A1 (which is 1/2), A4 #
# and A5 are single precision; A2 and A3 are double #
# precision. #
# b) Even with the restrictions above, #
# |p - (exp(R)-1)| < 2^(-68.8) for all |R| <= 0.0062. #
# Note that 0.0062 is slightly bigger than 0.57 log2/64. #
# c) To fully utilize the pipeline, p is separated into #
# two independent pieces of roughly equal complexities #
# p = [ R + R*S*(A2 + S*A4) ] + #
# [ S*(A1 + S*(A3 + S*A5)) ] #
# where S = R*R. #
# #
# Step 5. Compute 2^(J/64)*exp(R) = 2^(J/64)*(1+p) by #
# ans := T + ( T*p + t) #
# where T and t are the stored values for 2^(J/64). #
# Notes: 2^(J/64) is stored as T and t where T+t approximates #
# 2^(J/64) to roughly 85 bits; T is in extended precision #
# and t is in single precision. Note also that T is #
# rounded to 62 bits so that the last two bits of T are #
# zero. The reason for such a special form is that T-1, #
# T-2, and T-8 will all be exact --- a property that will #
# give much more accurate computation of the function #
# EXPM1. #
# #
# Step 6. Reconstruction of exp(X) #
# exp(X) = 2^M * 2^(J/64) * exp(R). #
# 6.1 If AdjFlag = 0, go to 6.3 #
# 6.2 ans := ans * AdjScale #
# 6.3 Restore the user FPCR #
# 6.4 Return ans := ans * Scale. Exit. #
# Notes: If AdjFlag = 0, we have X = Mlog2 + Jlog2/64 + R, #
# |M| <= 16380, and Scale = 2^M. Moreover, exp(X) will #
# neither overflow nor underflow. If AdjFlag = 1, that #
# means that #
# X = (M1+M)log2 + Jlog2/64 + R, |M1+M| >= 16380. #
# Hence, exp(X) may overflow or underflow or neither. #
# When that is the case, AdjScale = 2^(M1) where M1 is #
# approximately M. Thus 6.2 will never cause #
# over/underflow. Possible exception in 6.4 is overflow #
# or underflow. The inexact exception is not generated in #
# 6.4. Although one can argue that the inexact flag #
# should always be raised, to simulate that exception #
# cost to much than the flag is worth in practical uses. #
# #
# Step 7. Return 1 + X. #
# 7.1 ans := X #
# 7.2 Restore user FPCR. #
# 7.3 Return ans := 1 + ans. Exit #
# Notes: For non-zero X, the inexact exception will always be #
# raised by 7.3. That is the only exception raised by 7.3.#
# Note also that we use the FMOVEM instruction to move X #
# in Step 7.1 to avoid unnecessary trapping. (Although #
# the FMOVEM may not seem relevant since X is normalized, #
# the precaution will be useful in the library version of #
# this code where the separate entry for denormalized #
# inputs will be done away with.) #
# #
# Step 8. Handle exp(X) where |X| >= 16380log2. #
# 8.1 If |X| > 16480 log2, go to Step 9. #
# (mimic 2.2 - 2.6) #
# 8.2 N := round-to-integer( X * 64/log2 ) #
# 8.3 Calculate J = N mod 64, J = 0,1,...,63 #
# 8.4 K := (N-J)/64, M1 := truncate(K/2), M = K-M1, #
# AdjFlag := 1. #
# 8.5 Calculate the address of the stored value #
# 2^(J/64). #
# 8.6 Create the values Scale = 2^M, AdjScale = 2^M1. #
# 8.7 Go to Step 3. #
# Notes: Refer to notes for 2.2 - 2.6. #
# #
# Step 9. Handle exp(X), |X| > 16480 log2. #
# 9.1 If X < 0, go to 9.3 #
# 9.2 ans := Huge, go to 9.4 #
# 9.3 ans := Tiny. #
# 9.4 Restore user FPCR. #
# 9.5 Return ans := ans * ans. Exit. #
# Notes: Exp(X) will surely overflow or underflow, depending on #
# X's sign. "Huge" and "Tiny" are respectively large/tiny #
# extended-precision numbers whose square over/underflow #
# with an inexact result. Thus, 9.5 always raises the #
# inexact together with either overflow or underflow. #
# #
# setoxm1d #
# -------- #
# #
# Step 1. Set ans := 0 #
# #
# Step 2. Return ans := X + ans. Exit. #
# Notes: This will return X with the appropriate rounding #
# precision prescribed by the user FPCR. #
# #
# setoxm1 #
# ------- #
# #
# Step 1. Check |X| #
# 1.1 If |X| >= 1/4, go to Step 1.3. #
# 1.2 Go to Step 7. #
# 1.3 If |X| < 70 log(2), go to Step 2. #
# 1.4 Go to Step 10. #
# Notes: The usual case should take the branches 1.1 -> 1.3 -> 2.#
# However, it is conceivable |X| can be small very often #
# because EXPM1 is intended to evaluate exp(X)-1 #
# accurately when |X| is small. For further details on #
# the comparisons, see the notes on Step 1 of setox. #
# #
# Step 2. Calculate N = round-to-nearest-int( X * 64/log2 ). #
# 2.1 N := round-to-nearest-integer( X * 64/log2 ). #
# 2.2 Calculate J = N mod 64; so J = 0,1,2,..., #
# or 63. #
# 2.3 Calculate M = (N - J)/64; so N = 64M + J. #
# 2.4 Calculate the address of the stored value of #
# 2^(J/64). #
# 2.5 Create the values Sc = 2^M and #
# OnebySc := -2^(-M). #
# Notes: See the notes on Step 2 of setox. #
# #
# Step 3. Calculate X - N*log2/64. #
# 3.1 R := X + N*L1, #
# where L1 := single-precision(-log2/64). #
# 3.2 R := R + N*L2, #
# L2 := extended-precision(-log2/64 - L1).#
# Notes: Applying the analysis of Step 3 of setox in this case #
# shows that |R| <= 0.0055 (note that |X| <= 70 log2 in #
# this case). #
# #
# Step 4. Approximate exp(R)-1 by a polynomial #
# p = R+R*R*(A1+R*(A2+R*(A3+R*(A4+R*(A5+R*A6))))) #
# Notes: a) In order to reduce memory access, the coefficients #
# are made as "short" as possible: A1 (which is 1/2), A5 #
# and A6 are single precision; A2, A3 and A4 are double #
# precision. #
# b) Even with the restriction above, #
# |p - (exp(R)-1)| < |R| * 2^(-72.7) #
# for all |R| <= 0.0055. #
# c) To fully utilize the pipeline, p is separated into #
# two independent pieces of roughly equal complexity #
# p = [ R*S*(A2 + S*(A4 + S*A6)) ] + #
# [ R + S*(A1 + S*(A3 + S*A5)) ] #
# where S = R*R. #
# #
# Step 5. Compute 2^(J/64)*p by #
# p := T*p #
# where T and t are the stored values for 2^(J/64). #
# Notes: 2^(J/64) is stored as T and t where T+t approximates #
# 2^(J/64) to roughly 85 bits; T is in extended precision #
# and t is in single precision. Note also that T is #
# rounded to 62 bits so that the last two bits of T are #
# zero. The reason for such a special form is that T-1, #
# T-2, and T-8 will all be exact --- a property that will #
# be exploited in Step 6 below. The total relative error #
# in p is no bigger than 2^(-67.7) compared to the final #
# result. #
# #
# Step 6. Reconstruction of exp(X)-1 #
# exp(X)-1 = 2^M * ( 2^(J/64) + p - 2^(-M) ). #
# 6.1 If M <= 63, go to Step 6.3. #
# 6.2 ans := T + (p + (t + OnebySc)). Go to 6.6 #
# 6.3 If M >= -3, go to 6.5. #
# 6.4 ans := (T + (p + t)) + OnebySc. Go to 6.6 #
# 6.5 ans := (T + OnebySc) + (p + t). #
# 6.6 Restore user FPCR. #
# 6.7 Return ans := Sc * ans. Exit. #
# Notes: The various arrangements of the expressions give #
# accurate evaluations. #
# #
# Step 7. exp(X)-1 for |X| < 1/4. #
# 7.1 If |X| >= 2^(-65), go to Step 9. #
# 7.2 Go to Step 8. #
# #
# Step 8. Calculate exp(X)-1, |X| < 2^(-65). #
# 8.1 If |X| < 2^(-16312), goto 8.3 #
# 8.2 Restore FPCR; return ans := X - 2^(-16382). #
# Exit. #
# 8.3 X := X * 2^(140). #
# 8.4 Restore FPCR; ans := ans - 2^(-16382). #
# Return ans := ans*2^(140). Exit #
# Notes: The idea is to return "X - tiny" under the user #
# precision and rounding modes. To avoid unnecessary #
# inefficiency, we stay away from denormalized numbers #
# the best we can. For |X| >= 2^(-16312), the #
# straightforward 8.2 generates the inexact exception as #
# the case warrants. #
# #
# Step 9. Calculate exp(X)-1, |X| < 1/4, by a polynomial #
# p = X + X*X*(B1 + X*(B2 + ... + X*B12)) #
# Notes: a) In order to reduce memory access, the coefficients #
# are made as "short" as possible: B1 (which is 1/2), B9 #
# to B12 are single precision; B3 to B8 are double #
# precision; and B2 is double extended. #
# b) Even with the restriction above, #
# |p - (exp(X)-1)| < |X| 2^(-70.6) #
# for all |X| <= 0.251. #
# Note that 0.251 is slightly bigger than 1/4. #
# c) To fully preserve accuracy, the polynomial is #
# computed as #
# X + ( S*B1 + Q ) where S = X*X and #
# Q = X*S*(B2 + X*(B3 + ... + X*B12)) #
# d) To fully utilize the pipeline, Q is separated into #
# two independent pieces of roughly equal complexity #
# Q = [ X*S*(B2 + S*(B4 + ... + S*B12)) ] + #
# [ S*S*(B3 + S*(B5 + ... + S*B11)) ] #
# #
# Step 10. Calculate exp(X)-1 for |X| >= 70 log 2. #
# 10.1 If X >= 70log2 , exp(X) - 1 = exp(X) for all #
# practical purposes. Therefore, go to Step 1 of setox. #
# 10.2 If X <= -70log2, exp(X) - 1 = -1 for all practical #
# purposes. #
# ans := -1 #
# Restore user FPCR #
# Return ans := ans + 2^(-126). Exit. #
# Notes: 10.2 will always create an inexact and return -1 + tiny #
# in the user rounding precision and mode. #
# #
#########################################################################
L2: long 0x3FDC0000,0x82E30865,0x4361C4C6,0x00000000
EEXPA3: long 0x3FA55555,0x55554CC1
EEXPA2: long 0x3FC55555,0x55554A54
EM1A4: long 0x3F811111,0x11174385
EM1A3: long 0x3FA55555,0x55554F5A
EM1A2: long 0x3FC55555,0x55555555,0x00000000,0x00000000
EM1B8: long 0x3EC71DE3,0xA5774682
EM1B7: long 0x3EFA01A0,0x19D7CB68
EM1B6: long 0x3F2A01A0,0x1A019DF3
EM1B5: long 0x3F56C16C,0x16C170E2
EM1B4: long 0x3F811111,0x11111111
EM1B3: long 0x3FA55555,0x55555555
EM1B2: long 0x3FFC0000,0xAAAAAAAA,0xAAAAAAAB
long 0x00000000
TWO140: long 0x48B00000,0x00000000
TWON140:
long 0x37300000,0x00000000
EEXPTBL:
long 0x3FFF0000,0x80000000,0x00000000,0x00000000
long 0x3FFF0000,0x8164D1F3,0xBC030774,0x9F841A9B
long 0x3FFF0000,0x82CD8698,0xAC2BA1D8,0x9FC1D5B9
long 0x3FFF0000,0x843A28C3,0xACDE4048,0xA0728369
long 0x3FFF0000,0x85AAC367,0xCC487B14,0x1FC5C95C
long 0x3FFF0000,0x871F6196,0x9E8D1010,0x1EE85C9F
long 0x3FFF0000,0x88980E80,0x92DA8528,0x9FA20729
long 0x3FFF0000,0x8A14D575,0x496EFD9C,0xA07BF9AF
long 0x3FFF0000,0x8B95C1E3,0xEA8BD6E8,0xA0020DCF
long 0x3FFF0000,0x8D1ADF5B,0x7E5BA9E4,0x205A63DA
long 0x3FFF0000,0x8EA4398B,0x45CD53C0,0x1EB70051
long 0x3FFF0000,0x9031DC43,0x1466B1DC,0x1F6EB029
long 0x3FFF0000,0x91C3D373,0xAB11C338,0xA0781494
long 0x3FFF0000,0x935A2B2F,0x13E6E92C,0x9EB319B0
long 0x3FFF0000,0x94F4EFA8,0xFEF70960,0x2017457D
long 0x3FFF0000,0x96942D37,0x20185A00,0x1F11D537
long 0x3FFF0000,0x9837F051,0x8DB8A970,0x9FB952DD
long 0x3FFF0000,0x99E04593,0x20B7FA64,0x1FE43087
long 0x3FFF0000,0x9B8D39B9,0xD54E5538,0x1FA2A818
long 0x3FFF0000,0x9D3ED9A7,0x2CFFB750,0x1FDE494D
long 0x3FFF0000,0x9EF53260,0x91A111AC,0x20504890
long 0x3FFF0000,0xA0B0510F,0xB9714FC4,0xA073691C
long 0x3FFF0000,0xA2704303,0x0C496818,0x1F9B7A05
long 0x3FFF0000,0xA43515AE,0x09E680A0,0xA0797126
long 0x3FFF0000,0xA5FED6A9,0xB15138EC,0xA071A140
long 0x3FFF0000,0xA7CD93B4,0xE9653568,0x204F62DA
long 0x3FFF0000,0xA9A15AB4,0xEA7C0EF8,0x1F283C4A
long 0x3FFF0000,0xAB7A39B5,0xA93ED338,0x9F9A7FDC
long 0x3FFF0000,0xAD583EEA,0x42A14AC8,0xA05B3FAC
long 0x3FFF0000,0xAF3B78AD,0x690A4374,0x1FDF2610
long 0x3FFF0000,0xB123F581,0xD2AC2590,0x9F705F90
long 0x3FFF0000,0xB311C412,0xA9112488,0x201F678A
long 0x3FFF0000,0xB504F333,0xF9DE6484,0x1F32FB13
long 0x3FFF0000,0xB6FD91E3,0x28D17790,0x20038B30
long 0x3FFF0000,0xB8FBAF47,0x62FB9EE8,0x200DC3CC
long 0x3FFF0000,0xBAFF5AB2,0x133E45FC,0x9F8B2AE6
long 0x3FFF0000,0xBD08A39F,0x580C36C0,0xA02BBF70
long 0x3FFF0000,0xBF1799B6,0x7A731084,0xA00BF518
long 0x3FFF0000,0xC12C4CCA,0x66709458,0xA041DD41
long 0x3FFF0000,0xC346CCDA,0x24976408,0x9FDF137B
long 0x3FFF0000,0xC5672A11,0x5506DADC,0x201F1568
long 0x3FFF0000,0xC78D74C8,0xABB9B15C,0x1FC13A2E
long 0x3FFF0000,0xC9B9BD86,0x6E2F27A4,0xA03F8F03
long 0x3FFF0000,0xCBEC14FE,0xF2727C5C,0x1FF4907D
long 0x3FFF0000,0xCE248C15,0x1F8480E4,0x9E6E53E4
long 0x3FFF0000,0xD06333DA,0xEF2B2594,0x1FD6D45C
long 0x3FFF0000,0xD2A81D91,0xF12AE45C,0xA076EDB9
long 0x3FFF0000,0xD4F35AAB,0xCFEDFA20,0x9FA6DE21
long 0x3FFF0000,0xD744FCCA,0xD69D6AF4,0x1EE69A2F
long 0x3FFF0000,0xD99D15C2,0x78AFD7B4,0x207F439F
long 0x3FFF0000,0xDBFBB797,0xDAF23754,0x201EC207
long 0x3FFF0000,0xDE60F482,0x5E0E9124,0x9E8BE175
long 0x3FFF0000,0xE0CCDEEC,0x2A94E110,0x20032C4B
long 0x3FFF0000,0xE33F8972,0xBE8A5A50,0x2004DFF5
long 0x3FFF0000,0xE5B906E7,0x7C8348A8,0x1E72F47A
long 0x3FFF0000,0xE8396A50,0x3C4BDC68,0x1F722F22
long 0x3FFF0000,0xEAC0C6E7,0xDD243930,0xA017E945
long 0x3FFF0000,0xED4F301E,0xD9942B84,0x1F401A5B
long 0x3FFF0000,0xEFE4B99B,0xDCDAF5CC,0x9FB9A9E3
long 0x3FFF0000,0xF281773C,0x59FFB138,0x20744C05
long 0x3FFF0000,0xF5257D15,0x2486CC2C,0x1F773A19
long 0x3FFF0000,0xF7D0DF73,0x0AD13BB8,0x1FFE90D5
long 0x3FFF0000,0xFA83B2DB,0x722A033C,0xA041ED22
long 0x3FFF0000,0xFD3E0C0C,0xF486C174,0x1F853F3A
set ADJFLAG,L_SCR2
set SCALE,FP_SCR0
set ADJSCALE,FP_SCR1
set SC,FP_SCR0
set ONEBYSC,FP_SCR1
global setox
setox:
#--entry point for EXP(X), here X is finite, non-zero, and not NaN's
#--Step 1.
mov.l (%a0),%d1 # load part of input X
and.l &0x7FFF0000,%d1 # biased expo. of X
cmp.l %d1,&0x3FBE0000 # 2^(-65)
bge.b EXPC1 # normal case
bra EXPSM
EXPC1:
#--The case |X| >= 2^(-65)
mov.w 4(%a0),%d1 # expo. and partial sig. of |X|
cmp.l %d1,&0x400CB167 # 16380 log2 trunc. 16 bits
blt.b EXPMAIN # normal case
bra EEXPBIG
EXPMAIN:
#--Step 2.
#--This is the normal branch: 2^(-65) <= |X| < 16380 log2.
fmov.x (%a0),%fp0 # load input from (a0)
fmov.x %fp0,%fp1
fmul.s &0x42B8AA3B,%fp0 # 64/log2 * X
fmovm.x &0xc,-(%sp) # save fp2 {%fp2/%fp3}
mov.l &0,ADJFLAG(%a6)
fmov.l %fp0,%d1 # N = int( X * 64/log2 )
lea EEXPTBL(%pc),%a1
fmov.l %d1,%fp0 # convert to floating-format
mov.l %d1,L_SCR1(%a6) # save N temporarily
and.l &0x3F,%d1 # D0 is J = N mod 64
lsl.l &4,%d1
add.l %d1,%a1 # address of 2^(J/64)
mov.l L_SCR1(%a6),%d1
asr.l &6,%d1 # D0 is M
add.w &0x3FFF,%d1 # biased expo. of 2^(M)
mov.w L2(%pc),L_SCR1(%a6) # prefetch L2, no need in CB
EXPCONT1:
#--Step 3.
#--fp1,fp2 saved on the stack. fp0 is N, fp1 is X,
#--a0 points to 2^(J/64), D0 is biased expo. of 2^(M)
fmov.x %fp0,%fp2
fmul.s &0xBC317218,%fp0 # N * L1, L1 = lead(-log2/64)
fmul.x L2(%pc),%fp2 # N * L2, L1+L2 = -log2/64
fadd.x %fp1,%fp0 # X + N*L1
fadd.x %fp2,%fp0 # fp0 is R, reduced arg.
#--Step 4.
#--WE NOW COMPUTE EXP(R)-1 BY A POLYNOMIAL
#-- R + R*R*(A1 + R*(A2 + R*(A3 + R*(A4 + R*A5))))
#--TO FULLY UTILIZE THE PIPELINE, WE COMPUTE S = R*R
#--[R+R*S*(A2+S*A4)] + [S*(A1+S*(A3+S*A5))]
fmov.x %fp0,%fp1
fmul.x %fp1,%fp1 # fp1 IS S = R*R
fmov.s &0x3AB60B70,%fp2 # fp2 IS A5
fmul.x %fp1,%fp2 # fp2 IS S*A5
fmov.x %fp1,%fp3
fmul.s &0x3C088895,%fp3 # fp3 IS S*A4
fadd.d EEXPA3(%pc),%fp2 # fp2 IS A3+S*A5
fadd.d EEXPA2(%pc),%fp3 # fp3 IS A2+S*A4
fmul.x %fp1,%fp2 # fp2 IS S*(A3+S*A5)
mov.w %d1,SCALE(%a6) # SCALE is 2^(M) in extended
mov.l &0x80000000,SCALE+4(%a6)
clr.l SCALE+8(%a6)
fmul.x %fp1,%fp3 # fp3 IS S*(A2+S*A4)
fadd.s &0x3F000000,%fp2 # fp2 IS A1+S*(A3+S*A5)
fmul.x %fp0,%fp3 # fp3 IS R*S*(A2+S*A4)
fmul.x %fp1,%fp2 # fp2 IS S*(A1+S*(A3+S*A5))
fadd.x %fp3,%fp0 # fp0 IS R+R*S*(A2+S*A4),
fmov.x (%a1)+,%fp1 # fp1 is lead. pt. of 2^(J/64)
fadd.x %fp2,%fp0 # fp0 is EXP(R) - 1
#--Step 5
#--final reconstruction process
#--EXP(X) = 2^M * ( 2^(J/64) + 2^(J/64)*(EXP(R)-1) )
fmul.x %fp1,%fp0 # 2^(J/64)*(Exp(R)-1)
fmovm.x (%sp)+,&0x30 # fp2 restored {%fp2/%fp3}
fadd.s (%a1),%fp0 # accurate 2^(J/64)
fadd.x %fp1,%fp0 # 2^(J/64) + 2^(J/64)*...
mov.l ADJFLAG(%a6),%d1
#--Step 6
tst.l %d1
beq.b NORMAL
ADJUST:
fmul.x ADJSCALE(%a6),%fp0
NORMAL:
fmov.l %d0,%fpcr # restore user FPCR
mov.b &FMUL_OP,%d1 # last inst is MUL
fmul.x SCALE(%a6),%fp0 # multiply 2^(M)
bra t_catch
EXPSM:
#--Step 7
fmovm.x (%a0),&0x80 # load X
fmov.l %d0,%fpcr
fadd.s &0x3F800000,%fp0 # 1+X in user mode
bra t_pinx2
EEXPBIG:
#--Step 8
cmp.l %d1,&0x400CB27C # 16480 log2
bgt.b EXP2BIG
#--Steps 8.2 -- 8.6
fmov.x (%a0),%fp0 # load input from (a0)
fmov.x %fp0,%fp1
fmul.s &0x42B8AA3B,%fp0 # 64/log2 * X
fmovm.x &0xc,-(%sp) # save fp2 {%fp2/%fp3}
mov.l &1,ADJFLAG(%a6)
fmov.l %fp0,%d1 # N = int( X * 64/log2 )
lea EEXPTBL(%pc),%a1
fmov.l %d1,%fp0 # convert to floating-format
mov.l %d1,L_SCR1(%a6) # save N temporarily
and.l &0x3F,%d1 # D0 is J = N mod 64
lsl.l &4,%d1
add.l %d1,%a1 # address of 2^(J/64)
mov.l L_SCR1(%a6),%d1
asr.l &6,%d1 # D0 is K
mov.l %d1,L_SCR1(%a6) # save K temporarily
asr.l &1,%d1 # D0 is M1
sub.l %d1,L_SCR1(%a6) # a1 is M
add.w &0x3FFF,%d1 # biased expo. of 2^(M1)
mov.w %d1,ADJSCALE(%a6) # ADJSCALE := 2^(M1)
mov.l &0x80000000,ADJSCALE+4(%a6)
clr.l ADJSCALE+8(%a6)
mov.l L_SCR1(%a6),%d1 # D0 is M
add.w &0x3FFF,%d1 # biased expo. of 2^(M)
bra.w EXPCONT1 # go back to Step 3
EXP2BIG:
#--Step 9
tst.b (%a0) # is X positive or negative?
bmi t_unfl2
bra t_ovfl2
global setoxd
setoxd:
#--entry point for EXP(X), X is denormalized
mov.l (%a0),-(%sp)
andi.l &0x80000000,(%sp)
ori.l &0x00800000,(%sp) # sign(X)*2^(-126)
fmov.s &0x3F800000,%fp0
fmov.l %d0,%fpcr
fadd.s (%sp)+,%fp0
bra t_pinx2
global setoxm1
setoxm1:
#--entry point for EXPM1(X), here X is finite, non-zero, non-NaN
#--Step 1.
#--Step 1.1
mov.l (%a0),%d1 # load part of input X
and.l &0x7FFF0000,%d1 # biased expo. of X
cmp.l %d1,&0x3FFD0000 # 1/4
bge.b EM1CON1 # |X| >= 1/4
bra EM1SM
EM1CON1:
#--Step 1.3
#--The case |X| >= 1/4
mov.w 4(%a0),%d1 # expo. and partial sig. of |X|
cmp.l %d1,&0x4004C215 # 70log2 rounded up to 16 bits
ble.b EM1MAIN # 1/4 <= |X| <= 70log2
bra EM1BIG
EM1MAIN:
#--Step 2.
#--This is the case: 1/4 <= |X| <= 70 log2.
fmov.x (%a0),%fp0 # load input from (a0)
fmov.x %fp0,%fp1
fmul.s &0x42B8AA3B,%fp0 # 64/log2 * X
fmovm.x &0xc,-(%sp) # save fp2 {%fp2/%fp3}
fmov.l %fp0,%d1 # N = int( X * 64/log2 )
lea EEXPTBL(%pc),%a1
fmov.l %d1,%fp0 # convert to floating-format
mov.l %d1,L_SCR1(%a6) # save N temporarily
and.l &0x3F,%d1 # D0 is J = N mod 64
lsl.l &4,%d1
add.l %d1,%a1 # address of 2^(J/64)
mov.l L_SCR1(%a6),%d1
asr.l &6,%d1 # D0 is M
mov.l %d1,L_SCR1(%a6) # save a copy of M
#--Step 3.
#--fp1,fp2 saved on the stack. fp0 is N, fp1 is X,
#--a0 points to 2^(J/64), D0 and a1 both contain M
fmov.x %fp0,%fp2
fmul.s &0xBC317218,%fp0 # N * L1, L1 = lead(-log2/64)
fmul.x L2(%pc),%fp2 # N * L2, L1+L2 = -log2/64
fadd.x %fp1,%fp0 # X + N*L1
fadd.x %fp2,%fp0 # fp0 is R, reduced arg.
add.w &0x3FFF,%d1 # D0 is biased expo. of 2^M
#--Step 4.
#--WE NOW COMPUTE EXP(R)-1 BY A POLYNOMIAL
#-- R + R*R*(A1 + R*(A2 + R*(A3 + R*(A4 + R*(A5 + R*A6)))))
#--TO FULLY UTILIZE THE PIPELINE, WE COMPUTE S = R*R
#--[R*S*(A2+S*(A4+S*A6))] + [R+S*(A1+S*(A3+S*A5))]
fmov.x %fp0,%fp1
fmul.x %fp1,%fp1 # fp1 IS S = R*R
fmov.s &0x3950097B,%fp2 # fp2 IS a6
fmul.x %fp1,%fp2 # fp2 IS S*A6
fmov.x %fp1,%fp3
fmul.s &0x3AB60B6A,%fp3 # fp3 IS S*A5
fadd.d EM1A4(%pc),%fp2 # fp2 IS A4+S*A6
fadd.d EM1A3(%pc),%fp3 # fp3 IS A3+S*A5
mov.w %d1,SC(%a6) # SC is 2^(M) in extended
mov.l &0x80000000,SC+4(%a6)
clr.l SC+8(%a6)
fmul.x %fp1,%fp2 # fp2 IS S*(A4+S*A6)
mov.l L_SCR1(%a6),%d1 # D0 is M
neg.w %d1 # D0 is -M
fmul.x %fp1,%fp3 # fp3 IS S*(A3+S*A5)
add.w &0x3FFF,%d1 # biased expo. of 2^(-M)
fadd.d EM1A2(%pc),%fp2 # fp2 IS A2+S*(A4+S*A6)
fadd.s &0x3F000000,%fp3 # fp3 IS A1+S*(A3+S*A5)
fmul.x %fp1,%fp2 # fp2 IS S*(A2+S*(A4+S*A6))
or.w &0x8000,%d1 # signed/expo. of -2^(-M)
mov.w %d1,ONEBYSC(%a6) # OnebySc is -2^(-M)
mov.l &0x80000000,ONEBYSC+4(%a6)
clr.l ONEBYSC+8(%a6)
fmul.x %fp3,%fp1 # fp1 IS S*(A1+S*(A3+S*A5))
fmul.x %fp0,%fp2 # fp2 IS R*S*(A2+S*(A4+S*A6))
fadd.x %fp1,%fp0 # fp0 IS R+S*(A1+S*(A3+S*A5))
fadd.x %fp2,%fp0 # fp0 IS EXP(R)-1
fmovm.x (%sp)+,&0x30 # fp2 restored {%fp2/%fp3}
#--Step 5
#--Compute 2^(J/64)*p
fmul.x (%a1),%fp0 # 2^(J/64)*(Exp(R)-1)
#--Step 6
#--Step 6.1
mov.l L_SCR1(%a6),%d1 # retrieve M
cmp.l %d1,&63
ble.b MLE63
#--Step 6.2 M >= 64
fmov.s 12(%a1),%fp1 # fp1 is t
fadd.x ONEBYSC(%a6),%fp1 # fp1 is t+OnebySc
fadd.x %fp1,%fp0 # p+(t+OnebySc), fp1 released
fadd.x (%a1),%fp0 # T+(p+(t+OnebySc))
bra EM1SCALE
MLE63:
#--Step 6.3 M <= 63
cmp.l %d1,&-3
bge.b MGEN3
MLTN3:
#--Step 6.4 M <= -4
fadd.s 12(%a1),%fp0 # p+t
fadd.x (%a1),%fp0 # T+(p+t)
fadd.x ONEBYSC(%a6),%fp0 # OnebySc + (T+(p+t))
bra EM1SCALE
MGEN3:
#--Step 6.5 -3 <= M <= 63
fmov.x (%a1)+,%fp1 # fp1 is T
fadd.s (%a1),%fp0 # fp0 is p+t
fadd.x ONEBYSC(%a6),%fp1 # fp1 is T+OnebySc
fadd.x %fp1,%fp0 # (T+OnebySc)+(p+t)
EM1SCALE:
#--Step 6.6
fmov.l %d0,%fpcr
fmul.x SC(%a6),%fp0
bra t_inx2
EM1SM:
#--Step 7 |X| < 1/4.
cmp.l %d1,&0x3FBE0000 # 2^(-65)
bge.b EM1POLY
EM1TINY:
#--Step 8 |X| < 2^(-65)
cmp.l %d1,&0x00330000 # 2^(-16312)
blt.b EM12TINY
#--Step 8.2
mov.l &0x80010000,SC(%a6) # SC is -2^(-16382)
mov.l &0x80000000,SC+4(%a6)
clr.l SC+8(%a6)
fmov.x (%a0),%fp0
fmov.l %d0,%fpcr
mov.b &FADD_OP,%d1 # last inst is ADD
fadd.x SC(%a6),%fp0
bra t_catch
EM12TINY:
#--Step 8.3
fmov.x (%a0),%fp0
fmul.d TWO140(%pc),%fp0
mov.l &0x80010000,SC(%a6)
mov.l &0x80000000,SC+4(%a6)
clr.l SC+8(%a6)
fadd.x SC(%a6),%fp0
fmov.l %d0,%fpcr
mov.b &FMUL_OP,%d1 # last inst is MUL
fmul.d TWON140(%pc),%fp0
bra t_catch
EM1POLY:
#--Step 9 exp(X)-1 by a simple polynomial
fmov.x (%a0),%fp0 # fp0 is X
fmul.x %fp0,%fp0 # fp0 is S := X*X
fmovm.x &0xc,-(%sp) # save fp2 {%fp2/%fp3}
fmov.s &0x2F30CAA8,%fp1 # fp1 is B12
fmul.x %fp0,%fp1 # fp1 is S*B12
fmov.s &0x310F8290,%fp2 # fp2 is B11
fadd.s &0x32D73220,%fp1 # fp1 is B10+S*B12
fmul.x %fp0,%fp2 # fp2 is S*B11
fmul.x %fp0,%fp1 # fp1 is S*(B10 + ...
fadd.s &0x3493F281,%fp2 # fp2 is B9+S*...
fadd.d EM1B8(%pc),%fp1 # fp1 is B8+S*...
fmul.x %fp0,%fp2 # fp2 is S*(B9+...
fmul.x %fp0,%fp1 # fp1 is S*(B8+...
fadd.d EM1B7(%pc),%fp2 # fp2 is B7+S*...
fadd.d EM1B6(%pc),%fp1 # fp1 is B6+S*...
fmul.x %fp0,%fp2 # fp2 is S*(B7+...
fmul.x %fp0,%fp1 # fp1 is S*(B6+...
fadd.d EM1B5(%pc),%fp2 # fp2 is B5+S*...
fadd.d EM1B4(%pc),%fp1 # fp1 is B4+S*...
fmul.x %fp0,%fp2 # fp2 is S*(B5+...
fmul.x %fp0,%fp1 # fp1 is S*(B4+...
fadd.d EM1B3(%pc),%fp2 # fp2 is B3+S*...
fadd.x EM1B2(%pc),%fp1 # fp1 is B2+S*...
fmul.x %fp0,%fp2 # fp2 is S*(B3+...
fmul.x %fp0,%fp1 # fp1 is S*(B2+...
fmul.x %fp0,%fp2 # fp2 is S*S*(B3+...)
fmul.x (%a0),%fp1 # fp1 is X*S*(B2...
fmul.s &0x3F000000,%fp0 # fp0 is S*B1
fadd.x %fp2,%fp1 # fp1 is Q
fmovm.x (%sp)+,&0x30 # fp2 restored {%fp2/%fp3}
fadd.x %fp1,%fp0 # fp0 is S*B1+Q
fmov.l %d0,%fpcr
fadd.x (%a0),%fp0
bra t_inx2
EM1BIG:
#--Step 10 |X| > 70 log2
mov.l (%a0),%d1
cmp.l %d1,&0
bgt.w EXPC1
#--Step 10.2
fmov.s &0xBF800000,%fp0 # fp0 is -1
fmov.l %d0,%fpcr
fadd.s &0x00800000,%fp0 # -1 + 2^(-126)
bra t_minx2
global setoxm1d
setoxm1d:
#--entry point for EXPM1(X), here X is denormalized
#--Step 0.
bra t_extdnrm
#########################################################################
# sgetexp(): returns the exponent portion of the input argument. #
# The exponent bias is removed and the exponent value is #
# returned as an extended precision number in fp0. #
# sgetexpd(): handles denormalized numbers. #
# #
# sgetman(): extracts the mantissa of the input argument. The #
# mantissa is converted to an extended precision number w/ #
# an exponent of $3fff and is returned in fp0. The range of #
# the result is [1.0 - 2.0). #
# sgetmand(): handles denormalized numbers. #
# #
# INPUT *************************************************************** #
# a0 = pointer to extended precision input #
# #
# OUTPUT ************************************************************** #
# fp0 = exponent(X) or mantissa(X) #
# #
#########################################################################
global sgetexp
sgetexp:
mov.w SRC_EX(%a0),%d0 # get the exponent
bclr &0xf,%d0 # clear the sign bit
subi.w &0x3fff,%d0 # subtract off the bias
fmov.w %d0,%fp0 # return exp in fp0
blt.b sgetexpn # it's negative
rts
sgetexpn:
mov.b &neg_bmask,FPSR_CC(%a6) # set 'N' ccode bit
rts
global sgetexpd
sgetexpd:
bsr.l norm # normalize
neg.w %d0 # new exp = -(shft amt)
subi.w &0x3fff,%d0 # subtract off the bias
fmov.w %d0,%fp0 # return exp in fp0
mov.b &neg_bmask,FPSR_CC(%a6) # set 'N' ccode bit
rts
global sgetman
sgetman:
mov.w SRC_EX(%a0),%d0 # get the exp
ori.w &0x7fff,%d0 # clear old exp
bclr &0xe,%d0 # make it the new exp +-3fff
# here, we build the result in a tmp location so as not to disturb the input
mov.l SRC_HI(%a0),FP_SCR0_HI(%a6) # copy to tmp loc
mov.l SRC_LO(%a0),FP_SCR0_LO(%a6) # copy to tmp loc
mov.w %d0,FP_SCR0_EX(%a6) # insert new exponent
fmov.x FP_SCR0(%a6),%fp0 # put new value back in fp0
bmi.b sgetmann # it's negative
rts
sgetmann:
mov.b &neg_bmask,FPSR_CC(%a6) # set 'N' ccode bit
rts
#
# For denormalized numbers, shift the mantissa until the j-bit = 1,
# then load the exponent with +/1 $3fff.
#
global sgetmand
sgetmand:
bsr.l norm # normalize exponent
bra.b sgetman
#########################################################################
# scosh(): computes the hyperbolic cosine of a normalized input #
# scoshd(): computes the hyperbolic cosine of a denormalized input #
# #
# INPUT *************************************************************** #
# a0 = pointer to extended precision input #
# d0 = round precision,mode #
# #
# OUTPUT ************************************************************** #
# fp0 = cosh(X) #
# #
# ACCURACY and MONOTONICITY ******************************************* #
# The returned result is within 3 ulps in 64 significant bit, #
# i.e. within 0.5001 ulp to 53 bits if the result is subsequently #
# rounded to double precision. The result is provably monotonic #
# in double precision. #
# #
# ALGORITHM *********************************************************** #
# #
# COSH #
# 1. If |X| > 16380 log2, go to 3. #
# #
# 2. (|X| <= 16380 log2) Cosh(X) is obtained by the formulae #
# y = |X|, z = exp(Y), and #
# cosh(X) = (1/2)*( z + 1/z ). #
# Exit. #
# #
# 3. (|X| > 16380 log2). If |X| > 16480 log2, go to 5. #
# #
# 4. (16380 log2 < |X| <= 16480 log2) #
# cosh(X) = sign(X) * exp(|X|)/2. #
# However, invoking exp(|X|) may cause premature #
# overflow. Thus, we calculate sinh(X) as follows: #
# Y := |X| #
# Fact := 2**(16380) #
# Y' := Y - 16381 log2 #
# cosh(X) := Fact * exp(Y'). #
# Exit. #
# #
# 5. (|X| > 16480 log2) sinh(X) must overflow. Return #
# Huge*Huge to generate overflow and an infinity with #
# the appropriate sign. Huge is the largest finite number #
# in extended format. Exit. #
# #
#########################################################################
TWO16380:
long 0x7FFB0000,0x80000000,0x00000000,0x00000000
global scosh
scosh:
fmov.x (%a0),%fp0 # LOAD INPUT
mov.l (%a0),%d1
mov.w 4(%a0),%d1
and.l &0x7FFFFFFF,%d1
cmp.l %d1,&0x400CB167
bgt.b COSHBIG
#--THIS IS THE USUAL CASE, |X| < 16380 LOG2
#--COSH(X) = (1/2) * ( EXP(X) + 1/EXP(X) )
fabs.x %fp0 # |X|
mov.l %d0,-(%sp)
clr.l %d0
fmovm.x &0x01,-(%sp) # save |X| to stack
lea (%sp),%a0 # pass ptr to |X|
bsr setox # FP0 IS EXP(|X|)
add.l &0xc,%sp # erase |X| from stack
fmul.s &0x3F000000,%fp0 # (1/2)EXP(|X|)
mov.l (%sp)+,%d0
fmov.s &0x3E800000,%fp1 # (1/4)
fdiv.x %fp0,%fp1 # 1/(2 EXP(|X|))
fmov.l %d0,%fpcr
mov.b &FADD_OP,%d1 # last inst is ADD
fadd.x %fp1,%fp0
bra t_catch
COSHBIG:
cmp.l %d1,&0x400CB2B3
bgt.b COSHHUGE
fabs.x %fp0
fsub.d T1(%pc),%fp0 # (|X|-16381LOG2_LEAD)
fsub.d T2(%pc),%fp0 # |X| - 16381 LOG2, ACCURATE
mov.l %d0,-(%sp)
clr.l %d0
fmovm.x &0x01,-(%sp) # save fp0 to stack
lea (%sp),%a0 # pass ptr to fp0
bsr setox
add.l &0xc,%sp # clear fp0 from stack
mov.l (%sp)+,%d0
fmov.l %d0,%fpcr
mov.b &FMUL_OP,%d1 # last inst is MUL
fmul.x TWO16380(%pc),%fp0
bra t_catch
COSHHUGE:
bra t_ovfl2
global scoshd
#--COSH(X) = 1 FOR DENORMALIZED X
scoshd:
fmov.s &0x3F800000,%fp0
fmov.l %d0,%fpcr
fadd.s &0x00800000,%fp0
bra t_pinx2
#########################################################################
# ssinh(): computes the hyperbolic sine of a normalized input #
# ssinhd(): computes the hyperbolic sine of a denormalized input #
# #
# INPUT *************************************************************** #
# a0 = pointer to extended precision input #
# d0 = round precision,mode #
# #
# OUTPUT ************************************************************** #
# fp0 = sinh(X) #
# #
# ACCURACY and MONOTONICITY ******************************************* #
# The returned result is within 3 ulps in 64 significant bit, #
# i.e. within 0.5001 ulp to 53 bits if the result is subsequently #
# rounded to double precision. The result is provably monotonic #
# in double precision. #
# #
# ALGORITHM *********************************************************** #
# #
# SINH #
# 1. If |X| > 16380 log2, go to 3. #
# #
# 2. (|X| <= 16380 log2) Sinh(X) is obtained by the formula #
# y = |X|, sgn = sign(X), and z = expm1(Y), #
# sinh(X) = sgn*(1/2)*( z + z/(1+z) ). #
# Exit. #
# #
# 3. If |X| > 16480 log2, go to 5. #
# #
# 4. (16380 log2 < |X| <= 16480 log2) #
# sinh(X) = sign(X) * exp(|X|)/2. #
# However, invoking exp(|X|) may cause premature overflow. #
# Thus, we calculate sinh(X) as follows: #
# Y := |X| #
# sgn := sign(X) #
# sgnFact := sgn * 2**(16380) #
# Y' := Y - 16381 log2 #
# sinh(X) := sgnFact * exp(Y'). #
# Exit. #
# #
# 5. (|X| > 16480 log2) sinh(X) must overflow. Return #
# sign(X)*Huge*Huge to generate overflow and an infinity with #
# the appropriate sign. Huge is the largest finite number in #
# extended format. Exit. #
# #
#########################################################################
global ssinh
ssinh:
fmov.x (%a0),%fp0 # LOAD INPUT
mov.l (%a0),%d1
mov.w 4(%a0),%d1
mov.l %d1,%a1 # save (compacted) operand
and.l &0x7FFFFFFF,%d1
cmp.l %d1,&0x400CB167
bgt.b SINHBIG
#--THIS IS THE USUAL CASE, |X| < 16380 LOG2
#--Y = |X|, Z = EXPM1(Y), SINH(X) = SIGN(X)*(1/2)*( Z + Z/(1+Z) )
fabs.x %fp0 # Y = |X|
movm.l &0x8040,-(%sp) # {a1/d0}
fmovm.x &0x01,-(%sp) # save Y on stack
lea (%sp),%a0 # pass ptr to Y
clr.l %d0
bsr setoxm1 # FP0 IS Z = EXPM1(Y)
add.l &0xc,%sp # clear Y from stack
fmov.l &0,%fpcr
movm.l (%sp)+,&0x0201 # {a1/d0}
fmov.x %fp0,%fp1
fadd.s &0x3F800000,%fp1 # 1+Z
fmov.x %fp0,-(%sp)
fdiv.x %fp1,%fp0 # Z/(1+Z)
mov.l %a1,%d1
and.l &0x80000000,%d1
or.l &0x3F000000,%d1
fadd.x (%sp)+,%fp0
mov.l %d1,-(%sp)
fmov.l %d0,%fpcr
mov.b &FMUL_OP,%d1 # last inst is MUL
fmul.s (%sp)+,%fp0 # last fp inst - possible exceptions set
bra t_catch
SINHBIG:
cmp.l %d1,&0x400CB2B3
bgt t_ovfl
fabs.x %fp0
fsub.d T1(%pc),%fp0 # (|X|-16381LOG2_LEAD)
mov.l &0,-(%sp)
mov.l &0x80000000,-(%sp)
mov.l %a1,%d1
and.l &0x80000000,%d1
or.l &0x7FFB0000,%d1
mov.l %d1,-(%sp) # EXTENDED FMT
fsub.d T2(%pc),%fp0 # |X| - 16381 LOG2, ACCURATE
mov.l %d0,-(%sp)
clr.l %d0
fmovm.x &0x01,-(%sp) # save fp0 on stack
lea (%sp),%a0 # pass ptr to fp0
bsr setox
add.l &0xc,%sp # clear fp0 from stack
mov.l (%sp)+,%d0
fmov.l %d0,%fpcr
mov.b &FMUL_OP,%d1 # last inst is MUL
fmul.x (%sp)+,%fp0 # possible exception
bra t_catch
global ssinhd
#--SINH(X) = X FOR DENORMALIZED X
ssinhd:
bra t_extdnrm
#########################################################################
# stanh(): computes the hyperbolic tangent of a normalized input #
# stanhd(): computes the hyperbolic tangent of a denormalized input #
# #
# INPUT *************************************************************** #
# a0 = pointer to extended precision input #
# d0 = round precision,mode #
# #
# OUTPUT ************************************************************** #
# fp0 = tanh(X) #
# #
# ACCURACY and MONOTONICITY ******************************************* #
# The returned result is within 3 ulps in 64 significant bit, #
# i.e. within 0.5001 ulp to 53 bits if the result is subsequently #
# rounded to double precision. The result is provably monotonic #
# in double precision. #
# #
# ALGORITHM *********************************************************** #
# #
# TANH #
# 1. If |X| >= (5/2) log2 or |X| <= 2**(-40), go to 3. #
# #
# 2. (2**(-40) < |X| < (5/2) log2) Calculate tanh(X) by #
# sgn := sign(X), y := 2|X|, z := expm1(Y), and #
# tanh(X) = sgn*( z/(2+z) ). #
# Exit. #
# #
# 3. (|X| <= 2**(-40) or |X| >= (5/2) log2). If |X| < 1, #
# go to 7. #
# #
# 4. (|X| >= (5/2) log2) If |X| >= 50 log2, go to 6. #
# #
# 5. ((5/2) log2 <= |X| < 50 log2) Calculate tanh(X) by #
# sgn := sign(X), y := 2|X|, z := exp(Y), #
# tanh(X) = sgn - [ sgn*2/(1+z) ]. #
# Exit. #
# #
# 6. (|X| >= 50 log2) Tanh(X) = +-1 (round to nearest). Thus, we #
# calculate Tanh(X) by #
# sgn := sign(X), Tiny := 2**(-126), #
# tanh(X) := sgn - sgn*Tiny. #
# Exit. #
# #
# 7. (|X| < 2**(-40)). Tanh(X) = X. Exit. #
# #
#########################################################################
set X,FP_SCR0
set XFRAC,X+4
set SGN,L_SCR3
set V,FP_SCR0
global stanh
stanh:
fmov.x (%a0),%fp0 # LOAD INPUT
fmov.x %fp0,X(%a6)
mov.l (%a0),%d1
mov.w 4(%a0),%d1
mov.l %d1,X(%a6)
and.l &0x7FFFFFFF,%d1
cmp.l %d1, &0x3fd78000 # is |X| < 2^(-40)?
blt.w TANHBORS # yes
cmp.l %d1, &0x3fffddce # is |X| > (5/2)LOG2?
bgt.w TANHBORS # yes
#--THIS IS THE USUAL CASE
#--Y = 2|X|, Z = EXPM1(Y), TANH(X) = SIGN(X) * Z / (Z+2).
mov.l X(%a6),%d1
mov.l %d1,SGN(%a6)
and.l &0x7FFF0000,%d1
add.l &0x00010000,%d1 # EXPONENT OF 2|X|
mov.l %d1,X(%a6)
and.l &0x80000000,SGN(%a6)
fmov.x X(%a6),%fp0 # FP0 IS Y = 2|X|
mov.l %d0,-(%sp)
clr.l %d0
fmovm.x &0x1,-(%sp) # save Y on stack
lea (%sp),%a0 # pass ptr to Y
bsr setoxm1 # FP0 IS Z = EXPM1(Y)
add.l &0xc,%sp # clear Y from stack
mov.l (%sp)+,%d0
fmov.x %fp0,%fp1
fadd.s &0x40000000,%fp1 # Z+2
mov.l SGN(%a6),%d1
fmov.x %fp1,V(%a6)
eor.l %d1,V(%a6)
fmov.l %d0,%fpcr # restore users round prec,mode
fdiv.x V(%a6),%fp0
bra t_inx2
TANHBORS:
cmp.l %d1,&0x3FFF8000
blt.w TANHSM
cmp.l %d1,&0x40048AA1
bgt.w TANHHUGE
#-- (5/2) LOG2 < |X| < 50 LOG2,
#--TANH(X) = 1 - (2/[EXP(2X)+1]). LET Y = 2|X|, SGN = SIGN(X),
#--TANH(X) = SGN - SGN*2/[EXP(Y)+1].
mov.l X(%a6),%d1
mov.l %d1,SGN(%a6)
and.l &0x7FFF0000,%d1
add.l &0x00010000,%d1 # EXPO OF 2|X|
mov.l %d1,X(%a6) # Y = 2|X|
and.l &0x80000000,SGN(%a6)
mov.l SGN(%a6),%d1
fmov.x X(%a6),%fp0 # Y = 2|X|
mov.l %d0,-(%sp)
clr.l %d0
fmovm.x &0x01,-(%sp) # save Y on stack
lea (%sp),%a0 # pass ptr to Y
bsr setox # FP0 IS EXP(Y)
add.l &0xc,%sp # clear Y from stack
mov.l (%sp)+,%d0
mov.l SGN(%a6),%d1
fadd.s &0x3F800000,%fp0 # EXP(Y)+1
eor.l &0xC0000000,%d1 # -SIGN(X)*2
fmov.s %d1,%fp1 # -SIGN(X)*2 IN SGL FMT
fdiv.x %fp0,%fp1 # -SIGN(X)2 / [EXP(Y)+1 ]
mov.l SGN(%a6),%d1
or.l &0x3F800000,%d1 # SGN
fmov.s %d1,%fp0 # SGN IN SGL FMT
fmov.l %d0,%fpcr # restore users round prec,mode
mov.b &FADD_OP,%d1 # last inst is ADD
fadd.x %fp1,%fp0
bra t_inx2
TANHSM:
fmov.l %d0,%fpcr # restore users round prec,mode
mov.b &FMOV_OP,%d1 # last inst is MOVE
fmov.x X(%a6),%fp0 # last inst - possible exception set
bra t_catch
#---RETURN SGN(X) - SGN(X)EPS
TANHHUGE:
mov.l X(%a6),%d1
and.l &0x80000000,%d1
or.l &0x3F800000,%d1
fmov.s %d1,%fp0
and.l &0x80000000,%d1
eor.l &0x80800000,%d1 # -SIGN(X)*EPS
fmov.l %d0,%fpcr # restore users round prec,mode
fadd.s %d1,%fp0
bra t_inx2
global stanhd
#--TANH(X) = X FOR DENORMALIZED X
stanhd:
bra t_extdnrm
#########################################################################
# slogn(): computes the natural logarithm of a normalized input #
# slognd(): computes the natural logarithm of a denormalized input #
# slognp1(): computes the log(1+X) of a normalized input #
# slognp1d(): computes the log(1+X) of a denormalized input #
# #
# INPUT *************************************************************** #
# a0 = pointer to extended precision input #
# d0 = round precision,mode #
# #
# OUTPUT ************************************************************** #
# fp0 = log(X) or log(1+X) #
# #
# ACCURACY and MONOTONICITY ******************************************* #
# The returned result is within 2 ulps in 64 significant bit, #
# i.e. within 0.5001 ulp to 53 bits if the result is subsequently #
# rounded to double precision. The result is provably monotonic #
# in double precision. #
# #
# ALGORITHM *********************************************************** #
# LOGN: #
# Step 1. If |X-1| < 1/16, approximate log(X) by an odd #
# polynomial in u, where u = 2(X-1)/(X+1). Otherwise, #
# move on to Step 2. #
# #
# Step 2. X = 2**k * Y where 1 <= Y < 2. Define F to be the first #
# seven significant bits of Y plus 2**(-7), i.e. #
# F = 1.xxxxxx1 in base 2 where the six "x" match those #
# of Y. Note that |Y-F| <= 2**(-7). #
# #
# Step 3. Define u = (Y-F)/F. Approximate log(1+u) by a #
# polynomial in u, log(1+u) = poly. #
# #
# Step 4. Reconstruct #
# log(X) = log( 2**k * Y ) = k*log(2) + log(F) + log(1+u) #
# by k*log(2) + (log(F) + poly). The values of log(F) are #
# calculated beforehand and stored in the program. #
# #
# lognp1: #
# Step 1: If |X| < 1/16, approximate log(1+X) by an odd #
# polynomial in u where u = 2X/(2+X). Otherwise, move on #
# to Step 2. #
# #
# Step 2: Let 1+X = 2**k * Y, where 1 <= Y < 2. Define F as done #
# in Step 2 of the algorithm for LOGN and compute #
# log(1+X) as k*log(2) + log(F) + poly where poly #
# approximates log(1+u), u = (Y-F)/F. #
# #
# Implementation Notes: #
# Note 1. There are 64 different possible values for F, thus 64 #
# log(F)'s need to be tabulated. Moreover, the values of #
# 1/F are also tabulated so that the division in (Y-F)/F #
# can be performed by a multiplication. #
# #
# Note 2. In Step 2 of lognp1, in order to preserved accuracy, #
# the value Y-F has to be calculated carefully when #
# 1/2 <= X < 3/2. #
# #
# Note 3. To fully exploit the pipeline, polynomials are usually #
# separated into two parts evaluated independently before #
# being added up. #
# #
#########################################################################
LOGOF2:
long 0x3FFE0000,0xB17217F7,0xD1CF79AC,0x00000000
one:
long 0x3F800000
zero:
long 0x00000000
infty:
long 0x7F800000
negone:
long 0xBF800000
LOGA6:
long 0x3FC2499A,0xB5E4040B
LOGA5:
long 0xBFC555B5,0x848CB7DB
LOGA4:
long 0x3FC99999,0x987D8730
LOGA3:
long 0xBFCFFFFF,0xFF6F7E97
LOGA2:
long 0x3FD55555,0x555555A4
LOGA1:
long 0xBFE00000,0x00000008
LOGB5:
long 0x3F175496,0xADD7DAD6
LOGB4:
long 0x3F3C71C2,0xFE80C7E0
LOGB3:
long 0x3F624924,0x928BCCFF
LOGB2:
long 0x3F899999,0x999995EC
LOGB1:
long 0x3FB55555,0x55555555
TWO:
long 0x40000000,0x00000000
LTHOLD:
long 0x3f990000,0x80000000,0x00000000,0x00000000
LOGTBL:
long 0x3FFE0000,0xFE03F80F,0xE03F80FE,0x00000000
long 0x3FF70000,0xFF015358,0x833C47E2,0x00000000
long 0x3FFE0000,0xFA232CF2,0x52138AC0,0x00000000
long 0x3FF90000,0xBDC8D83E,0xAD88D549,0x00000000
long 0x3FFE0000,0xF6603D98,0x0F6603DA,0x00000000
long 0x3FFA0000,0x9CF43DCF,0xF5EAFD48,0x00000000
long 0x3FFE0000,0xF2B9D648,0x0F2B9D65,0x00000000
long 0x3FFA0000,0xDA16EB88,0xCB8DF614,0x00000000
long 0x3FFE0000,0xEF2EB71F,0xC4345238,0x00000000
long 0x3FFB0000,0x8B29B775,0x1BD70743,0x00000000
long 0x3FFE0000,0xEBBDB2A5,0xC1619C8C,0x00000000
long 0x3FFB0000,0xA8D839F8,0x30C1FB49,0x00000000
long 0x3FFE0000,0xE865AC7B,0x7603A197,0x00000000
long 0x3FFB0000,0xC61A2EB1,0x8CD907AD,0x00000000
long 0x3FFE0000,0xE525982A,0xF70C880E,0x00000000
long 0x3FFB0000,0xE2F2A47A,0xDE3A18AF,0x00000000
long 0x3FFE0000,0xE1FC780E,0x1FC780E2,0x00000000
long 0x3FFB0000,0xFF64898E,0xDF55D551,0x00000000
long 0x3FFE0000,0xDEE95C4C,0xA037BA57,0x00000000
long 0x3FFC0000,0x8DB956A9,0x7B3D0148,0x00000000
long 0x3FFE0000,0xDBEB61EE,0xD19C5958,0x00000000
long 0x3FFC0000,0x9B8FE100,0xF47BA1DE,0x00000000
long 0x3FFE0000,0xD901B203,0x6406C80E,0x00000000
long 0x3FFC0000,0xA9372F1D,0x0DA1BD17,0x00000000
long 0x3FFE0000,0xD62B80D6,0x2B80D62C,0x00000000
long 0x3FFC0000,0xB6B07F38,0xCE90E46B,0x00000000
long 0x3FFE0000,0xD3680D36,0x80D3680D,0x00000000
long 0x3FFC0000,0xC3FD0329,0x06488481,0x00000000
long 0x3FFE0000,0xD0B69FCB,0xD2580D0B,0x00000000
long 0x3FFC0000,0xD11DE0FF,0x15AB18CA,0x00000000
long 0x3FFE0000,0xCE168A77,0x25080CE1,0x00000000
long 0x3FFC0000,0xDE1433A1,0x6C66B150,0x00000000
long 0x3FFE0000,0xCB8727C0,0x65C393E0,0x00000000
long 0x3FFC0000,0xEAE10B5A,0x7DDC8ADD,0x00000000
long 0x3FFE0000,0xC907DA4E,0x871146AD,0x00000000
long 0x3FFC0000,0xF7856E5E,0xE2C9B291,0x00000000
long 0x3FFE0000,0xC6980C69,0x80C6980C,0x00000000
long 0x3FFD0000,0x82012CA5,0xA68206D7,0x00000000
long 0x3FFE0000,0xC4372F85,0x5D824CA6,0x00000000
long 0x3FFD0000,0x882C5FCD,0x7256A8C5,0x00000000
long 0x3FFE0000,0xC1E4BBD5,0x95F6E947,0x00000000
long 0x3FFD0000,0x8E44C60B,0x4CCFD7DE,0x00000000
long 0x3FFE0000,0xBFA02FE8,0x0BFA02FF,0x00000000
long 0x3FFD0000,0x944AD09E,0xF4351AF6,0x00000000
long 0x3FFE0000,0xBD691047,0x07661AA3,0x00000000
long 0x3FFD0000,0x9A3EECD4,0xC3EAA6B2,0x00000000
long 0x3FFE0000,0xBB3EE721,0xA54D880C,0x00000000
long 0x3FFD0000,0xA0218434,0x353F1DE8,0x00000000
long 0x3FFE0000,0xB92143FA,0x36F5E02E,0x00000000
long 0x3FFD0000,0xA5F2FCAB,0xBBC506DA,0x00000000
long 0x3FFE0000,0xB70FBB5A,0x19BE3659,0x00000000
long 0x3FFD0000,0xABB3B8BA,0x2AD362A5,0x00000000
long 0x3FFE0000,0xB509E68A,0x9B94821F,0x00000000
long 0x3FFD0000,0xB1641795,0xCE3CA97B,0x00000000
long 0x3FFE0000,0xB30F6352,0x8917C80B,0x00000000
long 0x3FFD0000,0xB7047551,0x5D0F1C61,0x00000000
long 0x3FFE0000,0xB11FD3B8,0x0B11FD3C,0x00000000
long 0x3FFD0000,0xBC952AFE,0xEA3D13E1,0x00000000
long 0x3FFE0000,0xAF3ADDC6,0x80AF3ADE,0x00000000
long 0x3FFD0000,0xC2168ED0,0xF458BA4A,0x00000000
long 0x3FFE0000,0xAD602B58,0x0AD602B6,0x00000000
long 0x3FFD0000,0xC788F439,0xB3163BF1,0x00000000
long 0x3FFE0000,0xAB8F69E2,0x8359CD11,0x00000000
long 0x3FFD0000,0xCCECAC08,0xBF04565D,0x00000000
long 0x3FFE0000,0xA9C84A47,0xA07F5638,0x00000000
long 0x3FFD0000,0xD2420487,0x2DD85160,0x00000000
long 0x3FFE0000,0xA80A80A8,0x0A80A80B,0x00000000
long 0x3FFD0000,0xD7894992,0x3BC3588A,0x00000000
long 0x3FFE0000,0xA655C439,0x2D7B73A8,0x00000000
long 0x3FFD0000,0xDCC2C4B4,0x9887DACC,0x00000000
long 0x3FFE0000,0xA4A9CF1D,0x96833751,0x00000000
long 0x3FFD0000,0xE1EEBD3E,0x6D6A6B9E,0x00000000
long 0x3FFE0000,0xA3065E3F,0xAE7CD0E0,0x00000000
long 0x3FFD0000,0xE70D785C,0x2F9F5BDC,0x00000000
long 0x3FFE0000,0xA16B312E,0xA8FC377D,0x00000000
long 0x3FFD0000,0xEC1F392C,0x5179F283,0x00000000
long 0x3FFE0000,0x9FD809FD,0x809FD80A,0x00000000
long 0x3FFD0000,0xF12440D3,0xE36130E6,0x00000000
long 0x3FFE0000,0x9E4CAD23,0xDD5F3A20,0x00000000
long 0x3FFD0000,0xF61CCE92,0x346600BB,0x00000000
long 0x3FFE0000,0x9CC8E160,0xC3FB19B9,0x00000000
long 0x3FFD0000,0xFB091FD3,0x8145630A,0x00000000
long 0x3FFE0000,0x9B4C6F9E,0xF03A3CAA,0x00000000
long 0x3FFD0000,0xFFE97042,0xBFA4C2AD,0x00000000
long 0x3FFE0000,0x99D722DA,0xBDE58F06,0x00000000
long 0x3FFE0000,0x825EFCED,0x49369330,0x00000000
long 0x3FFE0000,0x9868C809,0x868C8098,0x00000000
long 0x3FFE0000,0x84C37A7A,0xB9A905C9,0x00000000
long 0x3FFE0000,0x97012E02,0x5C04B809,0x00000000
long 0x3FFE0000,0x87224C2E,0x8E645FB7,0x00000000
long 0x3FFE0000,0x95A02568,0x095A0257,0x00000000
long 0x3FFE0000,0x897B8CAC,0x9F7DE298,0x00000000
long 0x3FFE0000,0x94458094,0x45809446,0x00000000
long 0x3FFE0000,0x8BCF55DE,0xC4CD05FE,0x00000000
long 0x3FFE0000,0x92F11384,0x0497889C,0x00000000
long 0x3FFE0000,0x8E1DC0FB,0x89E125E5,0x00000000
long 0x3FFE0000,0x91A2B3C4,0xD5E6F809,0x00000000
long 0x3FFE0000,0x9066E68C,0x955B6C9B,0x00000000
long 0x3FFE0000,0x905A3863,0x3E06C43B,0x00000000
long 0x3FFE0000,0x92AADE74,0xC7BE59E0,0x00000000
long 0x3FFE0000,0x8F1779D9,0xFDC3A219,0x00000000
long 0x3FFE0000,0x94E9BFF6,0x15845643,0x00000000
long 0x3FFE0000,0x8DDA5202,0x37694809,0x00000000
long 0x3FFE0000,0x9723A1B7,0x20134203,0x00000000
long 0x3FFE0000,0x8CA29C04,0x6514E023,0x00000000
long 0x3FFE0000,0x995899C8,0x90EB8990,0x00000000
long 0x3FFE0000,0x8B70344A,0x139BC75A,0x00000000
long 0x3FFE0000,0x9B88BDAA,0x3A3DAE2F,0x00000000
long 0x3FFE0000,0x8A42F870,0x5669DB46,0x00000000
long 0x3FFE0000,0x9DB4224F,0xFFE1157C,0x00000000
long 0x3FFE0000,0x891AC73A,0xE9819B50,0x00000000
long 0x3FFE0000,0x9FDADC26,0x8B7A12DA,0x00000000
long 0x3FFE0000,0x87F78087,0xF78087F8,0x00000000
long 0x3FFE0000,0xA1FCFF17,0xCE733BD4,0x00000000
long 0x3FFE0000,0x86D90544,0x7A34ACC6,0x00000000
long 0x3FFE0000,0xA41A9E8F,0x5446FB9F,0x00000000
long 0x3FFE0000,0x85BF3761,0x2CEE3C9B,0x00000000
long 0x3FFE0000,0xA633CD7E,0x6771CD8B,0x00000000
long 0x3FFE0000,0x84A9F9C8,0x084A9F9D,0x00000000
long 0x3FFE0000,0xA8489E60,0x0B435A5E,0x00000000
long 0x3FFE0000,0x83993052,0x3FBE3368,0x00000000
long 0x3FFE0000,0xAA59233C,0xCCA4BD49,0x00000000
long 0x3FFE0000,0x828CBFBE,0xB9A020A3,0x00000000
long 0x3FFE0000,0xAC656DAE,0x6BCC4985,0x00000000
long 0x3FFE0000,0x81848DA8,0xFAF0D277,0x00000000
long 0x3FFE0000,0xAE6D8EE3,0x60BB2468,0x00000000
long 0x3FFE0000,0x80808080,0x80808081,0x00000000
long 0x3FFE0000,0xB07197A2,0x3C46C654,0x00000000
set ADJK,L_SCR1
set X,FP_SCR0
set XDCARE,X+2
set XFRAC,X+4
set F,FP_SCR1
set FFRAC,F+4
set KLOG2,FP_SCR0
set SAVEU,FP_SCR0
global slogn
#--ENTRY POINT FOR LOG(X) FOR X FINITE, NON-ZERO, NOT NAN'S
slogn:
fmov.x (%a0),%fp0 # LOAD INPUT
mov.l &0x00000000,ADJK(%a6)
LOGBGN:
#--FPCR SAVED AND CLEARED, INPUT IS 2^(ADJK)*FP0, FP0 CONTAINS
#--A FINITE, NON-ZERO, NORMALIZED NUMBER.
mov.l (%a0),%d1
mov.w 4(%a0),%d1
mov.l (%a0),X(%a6)
mov.l 4(%a0),X+4(%a6)
mov.l 8(%a0),X+8(%a6)
cmp.l %d1,&0 # CHECK IF X IS NEGATIVE
blt.w LOGNEG # LOG OF NEGATIVE ARGUMENT IS INVALID
# X IS POSITIVE, CHECK IF X IS NEAR 1
cmp.l %d1,&0x3ffef07d # IS X < 15/16?
blt.b LOGMAIN # YES
cmp.l %d1,&0x3fff8841 # IS X > 17/16?
ble.w LOGNEAR1 # NO
LOGMAIN:
#--THIS SHOULD BE THE USUAL CASE, X NOT VERY CLOSE TO 1
#--X = 2^(K) * Y, 1 <= Y < 2. THUS, Y = 1.XXXXXXXX....XX IN BINARY.
#--WE DEFINE F = 1.XXXXXX1, I.E. FIRST 7 BITS OF Y AND ATTACH A 1.
#--THE IDEA IS THAT LOG(X) = K*LOG2 + LOG(Y)
#-- = K*LOG2 + LOG(F) + LOG(1 + (Y-F)/F).
#--NOTE THAT U = (Y-F)/F IS VERY SMALL AND THUS APPROXIMATING
#--LOG(1+U) CAN BE VERY EFFICIENT.
#--ALSO NOTE THAT THE VALUE 1/F IS STORED IN A TABLE SO THAT NO
#--DIVISION IS NEEDED TO CALCULATE (Y-F)/F.
#--GET K, Y, F, AND ADDRESS OF 1/F.
asr.l &8,%d1
asr.l &8,%d1 # SHIFTED 16 BITS, BIASED EXPO. OF X
sub.l &0x3FFF,%d1 # THIS IS K
add.l ADJK(%a6),%d1 # ADJUST K, ORIGINAL INPUT MAY BE DENORM.
lea LOGTBL(%pc),%a0 # BASE ADDRESS OF 1/F AND LOG(F)
fmov.l %d1,%fp1 # CONVERT K TO FLOATING-POINT FORMAT
#--WHILE THE CONVERSION IS GOING ON, WE GET F AND ADDRESS OF 1/F
mov.l &0x3FFF0000,X(%a6) # X IS NOW Y, I.E. 2^(-K)*X
mov.l XFRAC(%a6),FFRAC(%a6)
and.l &0xFE000000,FFRAC(%a6) # FIRST 7 BITS OF Y
or.l &0x01000000,FFRAC(%a6) # GET F: ATTACH A 1 AT THE EIGHTH BIT
mov.l FFRAC(%a6),%d1 # READY TO GET ADDRESS OF 1/F
and.l &0x7E000000,%d1
asr.l &8,%d1
asr.l &8,%d1
asr.l &4,%d1 # SHIFTED 20, D0 IS THE DISPLACEMENT
add.l %d1,%a0 # A0 IS THE ADDRESS FOR 1/F
fmov.x X(%a6),%fp0
mov.l &0x3fff0000,F(%a6)
clr.l F+8(%a6)
fsub.x F(%a6),%fp0 # Y-F
fmovm.x &0xc,-(%sp) # SAVE FP2-3 WHILE FP0 IS NOT READY
#--SUMMARY: FP0 IS Y-F, A0 IS ADDRESS OF 1/F, FP1 IS K
#--REGISTERS SAVED: FPCR, FP1, FP2
LP1CONT1:
#--AN RE-ENTRY POINT FOR LOGNP1
fmul.x (%a0),%fp0 # FP0 IS U = (Y-F)/F
fmul.x LOGOF2(%pc),%fp1 # GET K*LOG2 WHILE FP0 IS NOT READY
fmov.x %fp0,%fp2
fmul.x %fp2,%fp2 # FP2 IS V=U*U
fmov.x %fp1,KLOG2(%a6) # PUT K*LOG2 IN MEMEORY, FREE FP1
#--LOG(1+U) IS APPROXIMATED BY
#--U + V*(A1+U*(A2+U*(A3+U*(A4+U*(A5+U*A6))))) WHICH IS
#--[U + V*(A1+V*(A3+V*A5))] + [U*V*(A2+V*(A4+V*A6))]
fmov.x %fp2,%fp3
fmov.x %fp2,%fp1
fmul.d LOGA6(%pc),%fp1 # V*A6
fmul.d LOGA5(%pc),%fp2 # V*A5
fadd.d LOGA4(%pc),%fp1 # A4+V*A6
fadd.d LOGA3(%pc),%fp2 # A3+V*A5
fmul.x %fp3,%fp1 # V*(A4+V*A6)
fmul.x %fp3,%fp2 # V*(A3+V*A5)
fadd.d LOGA2(%pc),%fp1 # A2+V*(A4+V*A6)
fadd.d LOGA1(%pc),%fp2 # A1+V*(A3+V*A5)
fmul.x %fp3,%fp1 # V*(A2+V*(A4+V*A6))
add.l &16,%a0 # ADDRESS OF LOG(F)
fmul.x %fp3,%fp2 # V*(A1+V*(A3+V*A5))
fmul.x %fp0,%fp1 # U*V*(A2+V*(A4+V*A6))
fadd.x %fp2,%fp0 # U+V*(A1+V*(A3+V*A5))
fadd.x (%a0),%fp1 # LOG(F)+U*V*(A2+V*(A4+V*A6))
fmovm.x (%sp)+,&0x30 # RESTORE FP2-3
fadd.x %fp1,%fp0 # FP0 IS LOG(F) + LOG(1+U)
fmov.l %d0,%fpcr
fadd.x KLOG2(%a6),%fp0 # FINAL ADD
bra t_inx2
LOGNEAR1:
# if the input is exactly equal to one, then exit through ld_pzero.
# if these 2 lines weren't here, the correct answer would be returned
# but the INEX2 bit would be set.
fcmp.b %fp0,&0x1 # is it equal to one?
fbeq.l ld_pzero # yes
#--REGISTERS SAVED: FPCR, FP1. FP0 CONTAINS THE INPUT.
fmov.x %fp0,%fp1
fsub.s one(%pc),%fp1 # FP1 IS X-1
fadd.s one(%pc),%fp0 # FP0 IS X+1
fadd.x %fp1,%fp1 # FP1 IS 2(X-1)
#--LOG(X) = LOG(1+U/2)-LOG(1-U/2) WHICH IS AN ODD POLYNOMIAL
#--IN U, U = 2(X-1)/(X+1) = FP1/FP0
LP1CONT2:
#--THIS IS AN RE-ENTRY POINT FOR LOGNP1
fdiv.x %fp0,%fp1 # FP1 IS U
fmovm.x &0xc,-(%sp) # SAVE FP2-3
#--REGISTERS SAVED ARE NOW FPCR,FP1,FP2,FP3
#--LET V=U*U, W=V*V, CALCULATE
#--U + U*V*(B1 + V*(B2 + V*(B3 + V*(B4 + V*B5)))) BY
#--U + U*V*( [B1 + W*(B3 + W*B5)] + [V*(B2 + W*B4)] )
fmov.x %fp1,%fp0
fmul.x %fp0,%fp0 # FP0 IS V
fmov.x %fp1,SAVEU(%a6) # STORE U IN MEMORY, FREE FP1
fmov.x %fp0,%fp1
fmul.x %fp1,%fp1 # FP1 IS W
fmov.d LOGB5(%pc),%fp3
fmov.d LOGB4(%pc),%fp2
fmul.x %fp1,%fp3 # W*B5
fmul.x %fp1,%fp2 # W*B4
fadd.d LOGB3(%pc),%fp3 # B3+W*B5
fadd.d LOGB2(%pc),%fp2 # B2+W*B4
fmul.x %fp3,%fp1 # W*(B3+W*B5), FP3 RELEASED
fmul.x %fp0,%fp2 # V*(B2+W*B4)
fadd.d LOGB1(%pc),%fp1 # B1+W*(B3+W*B5)
fmul.x SAVEU(%a6),%fp0 # FP0 IS U*V
fadd.x %fp2,%fp1 # B1+W*(B3+W*B5) + V*(B2+W*B4), FP2 RELEASED
fmovm.x (%sp)+,&0x30 # FP2-3 RESTORED
fmul.x %fp1,%fp0 # U*V*( [B1+W*(B3+W*B5)] + [V*(B2+W*B4)] )
fmov.l %d0,%fpcr
fadd.x SAVEU(%a6),%fp0
bra t_inx2
#--REGISTERS SAVED FPCR. LOG(-VE) IS INVALID
LOGNEG:
bra t_operr
global slognd
slognd:
#--ENTRY POINT FOR LOG(X) FOR DENORMALIZED INPUT
mov.l &-100,ADJK(%a6) # INPUT = 2^(ADJK) * FP0
#----normalize the input value by left shifting k bits (k to be determined
#----below), adjusting exponent and storing -k to ADJK
#----the value TWOTO100 is no longer needed.
#----Note that this code assumes the denormalized input is NON-ZERO.
movm.l &0x3f00,-(%sp) # save some registers {d2-d7}
mov.l (%a0),%d3 # D3 is exponent of smallest norm. #
mov.l 4(%a0),%d4
mov.l 8(%a0),%d5 # (D4,D5) is (Hi_X,Lo_X)
clr.l %d2 # D2 used for holding K
tst.l %d4
bne.b Hi_not0
Hi_0:
mov.l %d5,%d4
clr.l %d5
mov.l &32,%d2
clr.l %d6
bfffo %d4{&0:&32},%d6
lsl.l %d6,%d4
add.l %d6,%d2 # (D3,D4,D5) is normalized
mov.l %d3,X(%a6)
mov.l %d4,XFRAC(%a6)
mov.l %d5,XFRAC+4(%a6)
neg.l %d2
mov.l %d2,ADJK(%a6)
fmov.x X(%a6),%fp0
movm.l (%sp)+,&0xfc # restore registers {d2-d7}
lea X(%a6),%a0
bra.w LOGBGN # begin regular log(X)
Hi_not0:
clr.l %d6
bfffo %d4{&0:&32},%d6 # find first 1
mov.l %d6,%d2 # get k
lsl.l %d6,%d4
mov.l %d5,%d7 # a copy of D5
lsl.l %d6,%d5
neg.l %d6
add.l &32,%d6
lsr.l %d6,%d7
or.l %d7,%d4 # (D3,D4,D5) normalized
mov.l %d3,X(%a6)
mov.l %d4,XFRAC(%a6)
mov.l %d5,XFRAC+4(%a6)
neg.l %d2
mov.l %d2,ADJK(%a6)
fmov.x X(%a6),%fp0
movm.l (%sp)+,&0xfc # restore registers {d2-d7}
lea X(%a6),%a0
bra.w LOGBGN # begin regular log(X)
global slognp1
#--ENTRY POINT FOR LOG(1+X) FOR X FINITE, NON-ZERO, NOT NAN'S
slognp1:
fmov.x (%a0),%fp0 # LOAD INPUT
fabs.x %fp0 # test magnitude
fcmp.x %fp0,LTHOLD(%pc) # compare with min threshold
fbgt.w LP1REAL # if greater, continue
fmov.l %d0,%fpcr
mov.b &FMOV_OP,%d1 # last inst is MOVE
fmov.x (%a0),%fp0 # return signed argument
bra t_catch
LP1REAL:
fmov.x (%a0),%fp0 # LOAD INPUT
mov.l &0x00000000,ADJK(%a6)
fmov.x %fp0,%fp1 # FP1 IS INPUT Z
fadd.s one(%pc),%fp0 # X := ROUND(1+Z)
fmov.x %fp0,X(%a6)
mov.w XFRAC(%a6),XDCARE(%a6)
mov.l X(%a6),%d1
cmp.l %d1,&0
ble.w LP1NEG0 # LOG OF ZERO OR -VE
cmp.l %d1,&0x3ffe8000 # IS BOUNDS [1/2,3/2]?
blt.w LOGMAIN
cmp.l %d1,&0x3fffc000
bgt.w LOGMAIN
#--IF 1+Z > 3/2 OR 1+Z < 1/2, THEN X, WHICH IS ROUNDING 1+Z,
#--CONTAINS AT LEAST 63 BITS OF INFORMATION OF Z. IN THAT CASE,
#--SIMPLY INVOKE LOG(X) FOR LOG(1+Z).
LP1NEAR1:
#--NEXT SEE IF EXP(-1/16) < X < EXP(1/16)
cmp.l %d1,&0x3ffef07d
blt.w LP1CARE
cmp.l %d1,&0x3fff8841
bgt.w LP1CARE
LP1ONE16:
#--EXP(-1/16) < X < EXP(1/16). LOG(1+Z) = LOG(1+U/2) - LOG(1-U/2)
#--WHERE U = 2Z/(2+Z) = 2Z/(1+X).
fadd.x %fp1,%fp1 # FP1 IS 2Z
fadd.s one(%pc),%fp0 # FP0 IS 1+X
#--U = FP1/FP0
bra.w LP1CONT2
LP1CARE:
#--HERE WE USE THE USUAL TABLE DRIVEN APPROACH. CARE HAS TO BE
#--TAKEN BECAUSE 1+Z CAN HAVE 67 BITS OF INFORMATION AND WE MUST
#--PRESERVE ALL THE INFORMATION. BECAUSE 1+Z IS IN [1/2,3/2],
#--THERE ARE ONLY TWO CASES.
#--CASE 1: 1+Z < 1, THEN K = -1 AND Y-F = (2-F) + 2Z
#--CASE 2: 1+Z > 1, THEN K = 0 AND Y-F = (1-F) + Z
#--ON RETURNING TO LP1CONT1, WE MUST HAVE K IN FP1, ADDRESS OF
#--(1/F) IN A0, Y-F IN FP0, AND FP2 SAVED.
mov.l XFRAC(%a6),FFRAC(%a6)
and.l &0xFE000000,FFRAC(%a6)
or.l &0x01000000,FFRAC(%a6) # F OBTAINED
cmp.l %d1,&0x3FFF8000 # SEE IF 1+Z > 1
bge.b KISZERO
KISNEG1:
fmov.s TWO(%pc),%fp0
mov.l &0x3fff0000,F(%a6)
clr.l F+8(%a6)
fsub.x F(%a6),%fp0 # 2-F
mov.l FFRAC(%a6),%d1
and.l &0x7E000000,%d1
asr.l &8,%d1
asr.l &8,%d1
asr.l &4,%d1 # D0 CONTAINS DISPLACEMENT FOR 1/F
fadd.x %fp1,%fp1 # GET 2Z
fmovm.x &0xc,-(%sp) # SAVE FP2 {%fp2/%fp3}
fadd.x %fp1,%fp0 # FP0 IS Y-F = (2-F)+2Z
lea LOGTBL(%pc),%a0 # A0 IS ADDRESS OF 1/F
add.l %d1,%a0
fmov.s negone(%pc),%fp1 # FP1 IS K = -1
bra.w LP1CONT1
KISZERO:
fmov.s one(%pc),%fp0
mov.l &0x3fff0000,F(%a6)
clr.l F+8(%a6)
fsub.x F(%a6),%fp0 # 1-F
mov.l FFRAC(%a6),%d1
and.l &0x7E000000,%d1
asr.l &8,%d1
asr.l &8,%d1
asr.l &4,%d1
fadd.x %fp1,%fp0 # FP0 IS Y-F
fmovm.x &0xc,-(%sp) # FP2 SAVED {%fp2/%fp3}
lea LOGTBL(%pc),%a0
add.l %d1,%a0 # A0 IS ADDRESS OF 1/F
fmov.s zero(%pc),%fp1 # FP1 IS K = 0
bra.w LP1CONT1
LP1NEG0:
#--FPCR SAVED. D0 IS X IN COMPACT FORM.
cmp.l %d1,&0
blt.b LP1NEG
LP1ZERO:
fmov.s negone(%pc),%fp0
fmov.l %d0,%fpcr
bra t_dz
LP1NEG:
fmov.s zero(%pc),%fp0
fmov.l %d0,%fpcr
bra t_operr
global slognp1d
#--ENTRY POINT FOR LOG(1+Z) FOR DENORMALIZED INPUT
# Simply return the denorm
slognp1d:
bra t_extdnrm
#########################################################################
# satanh(): computes the inverse hyperbolic tangent of a norm input #
# satanhd(): computes the inverse hyperbolic tangent of a denorm input #
# #
# INPUT *************************************************************** #
# a0 = pointer to extended precision input #
# d0 = round precision,mode #
# #
# OUTPUT ************************************************************** #
# fp0 = arctanh(X) #
# #
# ACCURACY and MONOTONICITY ******************************************* #
# The returned result is within 3 ulps in 64 significant bit, #
# i.e. within 0.5001 ulp to 53 bits if the result is subsequently #
# rounded to double precision. The result is provably monotonic #
# in double precision. #
# #
# ALGORITHM *********************************************************** #
# #
# ATANH #
# 1. If |X| >= 1, go to 3. #
# #
# 2. (|X| < 1) Calculate atanh(X) by #
# sgn := sign(X) #
# y := |X| #
# z := 2y/(1-y) #
# atanh(X) := sgn * (1/2) * logp1(z) #
# Exit. #
# #
# 3. If |X| > 1, go to 5. #
# #
# 4. (|X| = 1) Generate infinity with an appropriate sign and #
# divide-by-zero by #
# sgn := sign(X) #
# atan(X) := sgn / (+0). #
# Exit. #
# #
# 5. (|X| > 1) Generate an invalid operation by 0 * infinity. #
# Exit. #
# #
#########################################################################
global satanh
satanh:
mov.l (%a0),%d1
mov.w 4(%a0),%d1
and.l &0x7FFFFFFF,%d1
cmp.l %d1,&0x3FFF8000
bge.b ATANHBIG
#--THIS IS THE USUAL CASE, |X| < 1
#--Y = |X|, Z = 2Y/(1-Y), ATANH(X) = SIGN(X) * (1/2) * LOG1P(Z).
fabs.x (%a0),%fp0 # Y = |X|
fmov.x %fp0,%fp1
fneg.x %fp1 # -Y
fadd.x %fp0,%fp0 # 2Y
fadd.s &0x3F800000,%fp1 # 1-Y
fdiv.x %fp1,%fp0 # 2Y/(1-Y)
mov.l (%a0),%d1
and.l &0x80000000,%d1
or.l &0x3F000000,%d1 # SIGN(X)*HALF
mov.l %d1,-(%sp)
mov.l %d0,-(%sp) # save rnd prec,mode
clr.l %d0 # pass ext prec,RN
fmovm.x &0x01,-(%sp) # save Z on stack
lea (%sp),%a0 # pass ptr to Z
bsr slognp1 # LOG1P(Z)
add.l &0xc,%sp # clear Z from stack
mov.l (%sp)+,%d0 # fetch old prec,mode
fmov.l %d0,%fpcr # load it
mov.b &FMUL_OP,%d1 # last inst is MUL
fmul.s (%sp)+,%fp0
bra t_catch
ATANHBIG:
fabs.x (%a0),%fp0 # |X|
fcmp.s %fp0,&0x3F800000
fbgt t_operr
bra t_dz
global satanhd
#--ATANH(X) = X FOR DENORMALIZED X
satanhd:
bra t_extdnrm
#########################################################################
# slog10(): computes the base-10 logarithm of a normalized input #
# slog10d(): computes the base-10 logarithm of a denormalized input #
# slog2(): computes the base-2 logarithm of a normalized input #
# slog2d(): computes the base-2 logarithm of a denormalized input #
# #
# INPUT *************************************************************** #
# a0 = pointer to extended precision input #
# d0 = round precision,mode #
# #
# OUTPUT ************************************************************** #
# fp0 = log_10(X) or log_2(X) #
# #
# ACCURACY and MONOTONICITY ******************************************* #
# The returned result is within 1.7 ulps in 64 significant bit, #
# i.e. within 0.5003 ulp to 53 bits if the result is subsequently #
# rounded to double precision. The result is provably monotonic #
# in double precision. #
# #
# ALGORITHM *********************************************************** #
# #
# slog10d: #
# #
# Step 0. If X < 0, create a NaN and raise the invalid operation #
# flag. Otherwise, save FPCR in D1; set FpCR to default. #
# Notes: Default means round-to-nearest mode, no floating-point #
# traps, and precision control = double extended. #
# #
# Step 1. Call slognd to obtain Y = log(X), the natural log of X. #
# Notes: Even if X is denormalized, log(X) is always normalized. #
# #
# Step 2. Compute log_10(X) = log(X) * (1/log(10)). #
# 2.1 Restore the user FPCR #
# 2.2 Return ans := Y * INV_L10. #
# #
# slog10: #
# #
# Step 0. If X < 0, create a NaN and raise the invalid operation #
# flag. Otherwise, save FPCR in D1; set FpCR to default. #
# Notes: Default means round-to-nearest mode, no floating-point #
# traps, and precision control = double extended. #
# #
# Step 1. Call sLogN to obtain Y = log(X), the natural log of X. #
# #
# Step 2. Compute log_10(X) = log(X) * (1/log(10)). #
# 2.1 Restore the user FPCR #
# 2.2 Return ans := Y * INV_L10. #
# #
# sLog2d: #
# #
# Step 0. If X < 0, create a NaN and raise the invalid operation #
# flag. Otherwise, save FPCR in D1; set FpCR to default. #
# Notes: Default means round-to-nearest mode, no floating-point #
# traps, and precision control = double extended. #
# #
# Step 1. Call slognd to obtain Y = log(X), the natural log of X. #
# Notes: Even if X is denormalized, log(X) is always normalized. #
# #
# Step 2. Compute log_10(X) = log(X) * (1/log(2)). #
# 2.1 Restore the user FPCR #
# 2.2 Return ans := Y * INV_L2. #
# #
# sLog2: #
# #
# Step 0. If X < 0, create a NaN and raise the invalid operation #
# flag. Otherwise, save FPCR in D1; set FpCR to default. #
# Notes: Default means round-to-nearest mode, no floating-point #
# traps, and precision control = double extended. #
# #
# Step 1. If X is not an integer power of two, i.e., X != 2^k, #
# go to Step 3. #
# #
# Step 2. Return k. #
# 2.1 Get integer k, X = 2^k. #
# 2.2 Restore the user FPCR. #
# 2.3 Return ans := convert-to-double-extended(k). #
# #
# Step 3. Call sLogN to obtain Y = log(X), the natural log of X. #
# #
# Step 4. Compute log_2(X) = log(X) * (1/log(2)). #
# 4.1 Restore the user FPCR #
# 4.2 Return ans := Y * INV_L2. #
# #
#########################################################################
INV_L10:
long 0x3FFD0000,0xDE5BD8A9,0x37287195,0x00000000
INV_L2:
long 0x3FFF0000,0xB8AA3B29,0x5C17F0BC,0x00000000
global slog10
#--entry point for Log10(X), X is normalized
slog10:
fmov.b &0x1,%fp0
fcmp.x %fp0,(%a0) # if operand == 1,
fbeq.l ld_pzero # return an EXACT zero
mov.l (%a0),%d1
blt.w invalid
mov.l %d0,-(%sp)
clr.l %d0
bsr slogn # log(X), X normal.
fmov.l (%sp)+,%fpcr
fmul.x INV_L10(%pc),%fp0
bra t_inx2
global slog10d
#--entry point for Log10(X), X is denormalized
slog10d:
mov.l (%a0),%d1
blt.w invalid
mov.l %d0,-(%sp)
clr.l %d0
bsr slognd # log(X), X denorm.
fmov.l (%sp)+,%fpcr
fmul.x INV_L10(%pc),%fp0
bra t_minx2
global slog2
#--entry point for Log2(X), X is normalized
slog2:
mov.l (%a0),%d1
blt.w invalid
mov.l 8(%a0),%d1
bne.b continue # X is not 2^k
mov.l 4(%a0),%d1
and.l &0x7FFFFFFF,%d1
bne.b continue
#--X = 2^k.
mov.w (%a0),%d1
and.l &0x00007FFF,%d1
sub.l &0x3FFF,%d1
beq.l ld_pzero
fmov.l %d0,%fpcr
fmov.l %d1,%fp0
bra t_inx2
continue:
mov.l %d0,-(%sp)
clr.l %d0
bsr slogn # log(X), X normal.
fmov.l (%sp)+,%fpcr
fmul.x INV_L2(%pc),%fp0
bra t_inx2
invalid:
bra t_operr
global slog2d
#--entry point for Log2(X), X is denormalized
slog2d:
mov.l (%a0),%d1
blt.w invalid
mov.l %d0,-(%sp)
clr.l %d0
bsr slognd # log(X), X denorm.
fmov.l (%sp)+,%fpcr
fmul.x INV_L2(%pc),%fp0
bra t_minx2
#########################################################################
# stwotox(): computes 2**X for a normalized input #
# stwotoxd(): computes 2**X for a denormalized input #
# stentox(): computes 10**X for a normalized input #
# stentoxd(): computes 10**X for a denormalized input #
# #
# INPUT *************************************************************** #
# a0 = pointer to extended precision input #
# d0 = round precision,mode #
# #
# OUTPUT ************************************************************** #
# fp0 = 2**X or 10**X #
# #
# ACCURACY and MONOTONICITY ******************************************* #
# The returned result is within 2 ulps in 64 significant bit, #
# i.e. within 0.5001 ulp to 53 bits if the result is subsequently #
# rounded to double precision. The result is provably monotonic #
# in double precision. #
# #
# ALGORITHM *********************************************************** #
# #
# twotox #
# 1. If |X| > 16480, go to ExpBig. #
# #
# 2. If |X| < 2**(-70), go to ExpSm. #
# #
# 3. Decompose X as X = N/64 + r where |r| <= 1/128. Furthermore #
# decompose N as #
# N = 64(M + M') + j, j = 0,1,2,...,63. #
# #
# 4. Overwrite r := r * log2. Then #
# 2**X = 2**(M') * 2**(M) * 2**(j/64) * exp(r). #
# Go to expr to compute that expression. #
# #
# tentox #
# 1. If |X| > 16480*log_10(2) (base 10 log of 2), go to ExpBig. #
# #
# 2. If |X| < 2**(-70), go to ExpSm. #
# #
# 3. Set y := X*log_2(10)*64 (base 2 log of 10). Set #
# N := round-to-int(y). Decompose N as #
# N = 64(M + M') + j, j = 0,1,2,...,63. #
# #
# 4. Define r as #
# r := ((X - N*L1)-N*L2) * L10 #
# where L1, L2 are the leading and trailing parts of #
# log_10(2)/64 and L10 is the natural log of 10. Then #
# 10**X = 2**(M') * 2**(M) * 2**(j/64) * exp(r). #
# Go to expr to compute that expression. #
# #
# expr #
# 1. Fetch 2**(j/64) from table as Fact1 and Fact2. #
# #
# 2. Overwrite Fact1 and Fact2 by #
# Fact1 := 2**(M) * Fact1 #
# Fact2 := 2**(M) * Fact2 #
# Thus Fact1 + Fact2 = 2**(M) * 2**(j/64). #
# #
# 3. Calculate P where 1 + P approximates exp(r): #
# P = r + r*r*(A1+r*(A2+...+r*A5)). #
# #
# 4. Let AdjFact := 2**(M'). Return #
# AdjFact * ( Fact1 + ((Fact1*P) + Fact2) ). #
# Exit. #
# #
# ExpBig #
# 1. Generate overflow by Huge * Huge if X > 0; otherwise, #
# generate underflow by Tiny * Tiny. #
# #
# ExpSm #
# 1. Return 1 + X. #
# #
#########################################################################
L2TEN64:
long 0x406A934F,0x0979A371 # 64LOG10/LOG2
L10TWO1:
long 0x3F734413,0x509F8000 # LOG2/64LOG10
L10TWO2:
long 0xBFCD0000,0xC0219DC1,0xDA994FD2,0x00000000
LOG10: long 0x40000000,0x935D8DDD,0xAAA8AC17,0x00000000
LOG2: long 0x3FFE0000,0xB17217F7,0xD1CF79AC,0x00000000
EXPA5: long 0x3F56C16D,0x6F7BD0B2
EXPA4: long 0x3F811112,0x302C712C
EXPA3: long 0x3FA55555,0x55554CC1
EXPA2: long 0x3FC55555,0x55554A54
EXPA1: long 0x3FE00000,0x00000000,0x00000000,0x00000000
TEXPTBL:
long 0x3FFF0000,0x80000000,0x00000000,0x3F738000
long 0x3FFF0000,0x8164D1F3,0xBC030773,0x3FBEF7CA
long 0x3FFF0000,0x82CD8698,0xAC2BA1D7,0x3FBDF8A9
long 0x3FFF0000,0x843A28C3,0xACDE4046,0x3FBCD7C9
long 0x3FFF0000,0x85AAC367,0xCC487B15,0xBFBDE8DA
long 0x3FFF0000,0x871F6196,0x9E8D1010,0x3FBDE85C
long 0x3FFF0000,0x88980E80,0x92DA8527,0x3FBEBBF1
long 0x3FFF0000,0x8A14D575,0x496EFD9A,0x3FBB80CA
long 0x3FFF0000,0x8B95C1E3,0xEA8BD6E7,0xBFBA8373
long 0x3FFF0000,0x8D1ADF5B,0x7E5BA9E6,0xBFBE9670
long 0x3FFF0000,0x8EA4398B,0x45CD53C0,0x3FBDB700
long 0x3FFF0000,0x9031DC43,0x1466B1DC,0x3FBEEEB0
long 0x3FFF0000,0x91C3D373,0xAB11C336,0x3FBBFD6D
long 0x3FFF0000,0x935A2B2F,0x13E6E92C,0xBFBDB319
long 0x3FFF0000,0x94F4EFA8,0xFEF70961,0x3FBDBA2B
long 0x3FFF0000,0x96942D37,0x20185A00,0x3FBE91D5
long 0x3FFF0000,0x9837F051,0x8DB8A96F,0x3FBE8D5A
long 0x3FFF0000,0x99E04593,0x20B7FA65,0xBFBCDE7B
long 0x3FFF0000,0x9B8D39B9,0xD54E5539,0xBFBEBAAF
long 0x3FFF0000,0x9D3ED9A7,0x2CFFB751,0xBFBD86DA
long 0x3FFF0000,0x9EF53260,0x91A111AE,0xBFBEBEDD
long 0x3FFF0000,0xA0B0510F,0xB9714FC2,0x3FBCC96E
long 0x3FFF0000,0xA2704303,0x0C496819,0xBFBEC90B
long 0x3FFF0000,0xA43515AE,0x09E6809E,0x3FBBD1DB
long 0x3FFF0000,0xA5FED6A9,0xB15138EA,0x3FBCE5EB
long 0x3FFF0000,0xA7CD93B4,0xE965356A,0xBFBEC274
long 0x3FFF0000,0xA9A15AB4,0xEA7C0EF8,0x3FBEA83C
long 0x3FFF0000,0xAB7A39B5,0xA93ED337,0x3FBECB00
long 0x3FFF0000,0xAD583EEA,0x42A14AC6,0x3FBE9301
long 0x3FFF0000,0xAF3B78AD,0x690A4375,0xBFBD8367
long 0x3FFF0000,0xB123F581,0xD2AC2590,0xBFBEF05F
long 0x3FFF0000,0xB311C412,0xA9112489,0x3FBDFB3C
long 0x3FFF0000,0xB504F333,0xF9DE6484,0x3FBEB2FB
long 0x3FFF0000,0xB6FD91E3,0x28D17791,0x3FBAE2CB
long 0x3FFF0000,0xB8FBAF47,0x62FB9EE9,0x3FBCDC3C
long 0x3FFF0000,0xBAFF5AB2,0x133E45FB,0x3FBEE9AA
long 0x3FFF0000,0xBD08A39F,0x580C36BF,0xBFBEAEFD
long 0x3FFF0000,0xBF1799B6,0x7A731083,0xBFBCBF51
long 0x3FFF0000,0xC12C4CCA,0x66709456,0x3FBEF88A
long 0x3FFF0000,0xC346CCDA,0x24976407,0x3FBD83B2
long 0x3FFF0000,0xC5672A11,0x5506DADD,0x3FBDF8AB
long 0x3FFF0000,0xC78D74C8,0xABB9B15D,0xBFBDFB17
long 0x3FFF0000,0xC9B9BD86,0x6E2F27A3,0xBFBEFE3C
long 0x3FFF0000,0xCBEC14FE,0xF2727C5D,0xBFBBB6F8
long 0x3FFF0000,0xCE248C15,0x1F8480E4,0xBFBCEE53
long 0x3FFF0000,0xD06333DA,0xEF2B2595,0xBFBDA4AE
long 0x3FFF0000,0xD2A81D91,0xF12AE45A,0x3FBC9124
long 0x3FFF0000,0xD4F35AAB,0xCFEDFA1F,0x3FBEB243
long 0x3FFF0000,0xD744FCCA,0xD69D6AF4,0x3FBDE69A
long 0x3FFF0000,0xD99D15C2,0x78AFD7B6,0xBFB8BC61
long 0x3FFF0000,0xDBFBB797,0xDAF23755,0x3FBDF610
long 0x3FFF0000,0xDE60F482,0x5E0E9124,0xBFBD8BE1
long 0x3FFF0000,0xE0CCDEEC,0x2A94E111,0x3FBACB12
long 0x3FFF0000,0xE33F8972,0xBE8A5A51,0x3FBB9BFE
long 0x3FFF0000,0xE5B906E7,0x7C8348A8,0x3FBCF2F4
long 0x3FFF0000,0xE8396A50,0x3C4BDC68,0x3FBEF22F
long 0x3FFF0000,0xEAC0C6E7,0xDD24392F,0xBFBDBF4A
long 0x3FFF0000,0xED4F301E,0xD9942B84,0x3FBEC01A
long 0x3FFF0000,0xEFE4B99B,0xDCDAF5CB,0x3FBE8CAC
long 0x3FFF0000,0xF281773C,0x59FFB13A,0xBFBCBB3F
long 0x3FFF0000,0xF5257D15,0x2486CC2C,0x3FBEF73A
long 0x3FFF0000,0xF7D0DF73,0x0AD13BB9,0xBFB8B795
long 0x3FFF0000,0xFA83B2DB,0x722A033A,0x3FBEF84B
long 0x3FFF0000,0xFD3E0C0C,0xF486C175,0xBFBEF581
set INT,L_SCR1
set X,FP_SCR0
set XDCARE,X+2
set XFRAC,X+4
set ADJFACT,FP_SCR0
set FACT1,FP_SCR0
set FACT1HI,FACT1+4
set FACT1LOW,FACT1+8
set FACT2,FP_SCR1
set FACT2HI,FACT2+4
set FACT2LOW,FACT2+8
global stwotox
#--ENTRY POINT FOR 2**(X), HERE X IS FINITE, NON-ZERO, AND NOT NAN'S
stwotox:
fmovm.x (%a0),&0x80 # LOAD INPUT
mov.l (%a0),%d1
mov.w 4(%a0),%d1
fmov.x %fp0,X(%a6)
and.l &0x7FFFFFFF,%d1
cmp.l %d1,&0x3FB98000 # |X| >= 2**(-70)?
bge.b TWOOK1
bra.w EXPBORS
TWOOK1:
cmp.l %d1,&0x400D80C0 # |X| > 16480?
ble.b TWOMAIN
bra.w EXPBORS
TWOMAIN:
#--USUAL CASE, 2^(-70) <= |X| <= 16480
fmov.x %fp0,%fp1
fmul.s &0x42800000,%fp1 # 64 * X
fmov.l %fp1,INT(%a6) # N = ROUND-TO-INT(64 X)
mov.l %d2,-(%sp)
lea TEXPTBL(%pc),%a1 # LOAD ADDRESS OF TABLE OF 2^(J/64)
fmov.l INT(%a6),%fp1 # N --> FLOATING FMT
mov.l INT(%a6),%d1
mov.l %d1,%d2
and.l &0x3F,%d1 # D0 IS J
asl.l &4,%d1 # DISPLACEMENT FOR 2^(J/64)
add.l %d1,%a1 # ADDRESS FOR 2^(J/64)
asr.l &6,%d2 # d2 IS L, N = 64L + J
mov.l %d2,%d1
asr.l &1,%d1 # D0 IS M
sub.l %d1,%d2 # d2 IS M', N = 64(M+M') + J
add.l &0x3FFF,%d2
#--SUMMARY: a1 IS ADDRESS FOR THE LEADING PORTION OF 2^(J/64),
#--D0 IS M WHERE N = 64(M+M') + J. NOTE THAT |M| <= 16140 BY DESIGN.
#--ADJFACT = 2^(M').
#--REGISTERS SAVED SO FAR ARE (IN ORDER) FPCR, D0, FP1, a1, AND FP2.
fmovm.x &0x0c,-(%sp) # save fp2/fp3
fmul.s &0x3C800000,%fp1 # (1/64)*N
mov.l (%a1)+,FACT1(%a6)
mov.l (%a1)+,FACT1HI(%a6)
mov.l (%a1)+,FACT1LOW(%a6)
mov.w (%a1)+,FACT2(%a6)
fsub.x %fp1,%fp0 # X - (1/64)*INT(64 X)
mov.w (%a1)+,FACT2HI(%a6)
clr.w FACT2HI+2(%a6)
clr.l FACT2LOW(%a6)
add.w %d1,FACT1(%a6)
fmul.x LOG2(%pc),%fp0 # FP0 IS R
add.w %d1,FACT2(%a6)
bra.w expr
EXPBORS:
#--FPCR, D0 SAVED
cmp.l %d1,&0x3FFF8000
bgt.b TEXPBIG
#--|X| IS SMALL, RETURN 1 + X
fmov.l %d0,%fpcr # restore users round prec,mode
fadd.s &0x3F800000,%fp0 # RETURN 1 + X
bra t_pinx2
TEXPBIG:
#--|X| IS LARGE, GENERATE OVERFLOW IF X > 0; ELSE GENERATE UNDERFLOW
#--REGISTERS SAVE SO FAR ARE FPCR AND D0
mov.l X(%a6),%d1
cmp.l %d1,&0
blt.b EXPNEG
bra t_ovfl2 # t_ovfl expects positive value
EXPNEG:
bra t_unfl2 # t_unfl expects positive value
global stwotoxd
stwotoxd:
#--ENTRY POINT FOR 2**(X) FOR DENORMALIZED ARGUMENT
fmov.l %d0,%fpcr # set user's rounding mode/precision
fmov.s &0x3F800000,%fp0 # RETURN 1 + X
mov.l (%a0),%d1
or.l &0x00800001,%d1
fadd.s %d1,%fp0
bra t_pinx2
global stentox
#--ENTRY POINT FOR 10**(X), HERE X IS FINITE, NON-ZERO, AND NOT NAN'S
stentox:
fmovm.x (%a0),&0x80 # LOAD INPUT
mov.l (%a0),%d1
mov.w 4(%a0),%d1
fmov.x %fp0,X(%a6)
and.l &0x7FFFFFFF,%d1
cmp.l %d1,&0x3FB98000 # |X| >= 2**(-70)?
bge.b TENOK1
bra.w EXPBORS
TENOK1:
cmp.l %d1,&0x400B9B07 # |X| <= 16480*log2/log10 ?
ble.b TENMAIN
bra.w EXPBORS
TENMAIN:
#--USUAL CASE, 2^(-70) <= |X| <= 16480 LOG 2 / LOG 10
fmov.x %fp0,%fp1
fmul.d L2TEN64(%pc),%fp1 # X*64*LOG10/LOG2
fmov.l %fp1,INT(%a6) # N=INT(X*64*LOG10/LOG2)
mov.l %d2,-(%sp)
lea TEXPTBL(%pc),%a1 # LOAD ADDRESS OF TABLE OF 2^(J/64)
fmov.l INT(%a6),%fp1 # N --> FLOATING FMT
mov.l INT(%a6),%d1
mov.l %d1,%d2
and.l &0x3F,%d1 # D0 IS J
asl.l &4,%d1 # DISPLACEMENT FOR 2^(J/64)
add.l %d1,%a1 # ADDRESS FOR 2^(J/64)
asr.l &6,%d2 # d2 IS L, N = 64L + J
mov.l %d2,%d1
asr.l &1,%d1 # D0 IS M
sub.l %d1,%d2 # d2 IS M', N = 64(M+M') + J
add.l &0x3FFF,%d2
#--SUMMARY: a1 IS ADDRESS FOR THE LEADING PORTION OF 2^(J/64),
#--D0 IS M WHERE N = 64(M+M') + J. NOTE THAT |M| <= 16140 BY DESIGN.
#--ADJFACT = 2^(M').
#--REGISTERS SAVED SO FAR ARE (IN ORDER) FPCR, D0, FP1, a1, AND FP2.
fmovm.x &0x0c,-(%sp) # save fp2/fp3
fmov.x %fp1,%fp2
fmul.d L10TWO1(%pc),%fp1 # N*(LOG2/64LOG10)_LEAD
mov.l (%a1)+,FACT1(%a6)
fmul.x L10TWO2(%pc),%fp2 # N*(LOG2/64LOG10)_TRAIL
mov.l (%a1)+,FACT1HI(%a6)
mov.l (%a1)+,FACT1LOW(%a6)
fsub.x %fp1,%fp0 # X - N L_LEAD
mov.w (%a1)+,FACT2(%a6)
fsub.x %fp2,%fp0 # X - N L_TRAIL
mov.w (%a1)+,FACT2HI(%a6)
clr.w FACT2HI+2(%a6)
clr.l FACT2LOW(%a6)
fmul.x LOG10(%pc),%fp0 # FP0 IS R
add.w %d1,FACT1(%a6)
add.w %d1,FACT2(%a6)
expr:
#--FPCR, FP2, FP3 ARE SAVED IN ORDER AS SHOWN.
#--ADJFACT CONTAINS 2**(M'), FACT1 + FACT2 = 2**(M) * 2**(J/64).
#--FP0 IS R. THE FOLLOWING CODE COMPUTES
#-- 2**(M'+M) * 2**(J/64) * EXP(R)
fmov.x %fp0,%fp1
fmul.x %fp1,%fp1 # FP1 IS S = R*R
fmov.d EXPA5(%pc),%fp2 # FP2 IS A5
fmov.d EXPA4(%pc),%fp3 # FP3 IS A4
fmul.x %fp1,%fp2 # FP2 IS S*A5
fmul.x %fp1,%fp3 # FP3 IS S*A4
fadd.d EXPA3(%pc),%fp2 # FP2 IS A3+S*A5
fadd.d EXPA2(%pc),%fp3 # FP3 IS A2+S*A4
fmul.x %fp1,%fp2 # FP2 IS S*(A3+S*A5)
fmul.x %fp1,%fp3 # FP3 IS S*(A2+S*A4)
fadd.d EXPA1(%pc),%fp2 # FP2 IS A1+S*(A3+S*A5)
fmul.x %fp0,%fp3 # FP3 IS R*S*(A2+S*A4)
fmul.x %fp1,%fp2 # FP2 IS S*(A1+S*(A3+S*A5))
fadd.x %fp3,%fp0 # FP0 IS R+R*S*(A2+S*A4)
fadd.x %fp2,%fp0 # FP0 IS EXP(R) - 1
fmovm.x (%sp)+,&0x30 # restore fp2/fp3
#--FINAL RECONSTRUCTION PROCESS
#--EXP(X) = 2^M*2^(J/64) + 2^M*2^(J/64)*(EXP(R)-1) - (1 OR 0)
fmul.x FACT1(%a6),%fp0
fadd.x FACT2(%a6),%fp0
fadd.x FACT1(%a6),%fp0
fmov.l %d0,%fpcr # restore users round prec,mode
mov.w %d2,ADJFACT(%a6) # INSERT EXPONENT
mov.l (%sp)+,%d2
mov.l &0x80000000,ADJFACT+4(%a6)
clr.l ADJFACT+8(%a6)
mov.b &FMUL_OP,%d1 # last inst is MUL
fmul.x ADJFACT(%a6),%fp0 # FINAL ADJUSTMENT
bra t_catch
global stentoxd
stentoxd:
#--ENTRY POINT FOR 10**(X) FOR DENORMALIZED ARGUMENT
fmov.l %d0,%fpcr # set user's rounding mode/precision
fmov.s &0x3F800000,%fp0 # RETURN 1 + X
mov.l (%a0),%d1
or.l &0x00800001,%d1
fadd.s %d1,%fp0
bra t_pinx2
#########################################################################
# sscale(): computes the destination operand scaled by the source #
# operand. If the absoulute value of the source operand is #
# >= 2^14, an overflow or underflow is returned. #
# #
# INPUT *************************************************************** #
# a0 = pointer to double-extended source operand X #
# a1 = pointer to double-extended destination operand Y #
# #
# OUTPUT ************************************************************** #
# fp0 = scale(X,Y) #
# #
#########################################################################
set SIGN, L_SCR1
global sscale
sscale:
mov.l %d0,-(%sp) # store off ctrl bits for now
mov.w DST_EX(%a1),%d1 # get dst exponent
smi.b SIGN(%a6) # use SIGN to hold dst sign
andi.l &0x00007fff,%d1 # strip sign from dst exp
mov.w SRC_EX(%a0),%d0 # check src bounds
andi.w &0x7fff,%d0 # clr src sign bit
cmpi.w %d0,&0x3fff # is src ~ ZERO?
blt.w src_small # yes
cmpi.w %d0,&0x400c # no; is src too big?
bgt.w src_out # yes
#
# Source is within 2^14 range.
#
src_ok:
fintrz.x SRC(%a0),%fp0 # calc int of src
fmov.l %fp0,%d0 # int src to d0
# don't want any accrued bits from the fintrz showing up later since
# we may need to read the fpsr for the last fp op in t_catch2().
fmov.l &0x0,%fpsr
tst.b DST_HI(%a1) # is dst denormalized?
bmi.b sok_norm
# the dst is a DENORM. normalize the DENORM and add the adjustment to
# the src value. then, jump to the norm part of the routine.
sok_dnrm:
mov.l %d0,-(%sp) # save src for now
mov.w DST_EX(%a1),FP_SCR0_EX(%a6) # make a copy
mov.l DST_HI(%a1),FP_SCR0_HI(%a6)
mov.l DST_LO(%a1),FP_SCR0_LO(%a6)
lea FP_SCR0(%a6),%a0 # pass ptr to DENORM
bsr.l norm # normalize the DENORM
neg.l %d0
add.l (%sp)+,%d0 # add adjustment to src
fmovm.x FP_SCR0(%a6),&0x80 # load normalized DENORM
cmpi.w %d0,&-0x3fff # is the shft amt really low?
bge.b sok_norm2 # thank goodness no
# the multiply factor that we're trying to create should be a denorm
# for the multiply to work. therefore, we're going to actually do a
# multiply with a denorm which will cause an unimplemented data type
# exception to be put into the machine which will be caught and corrected
# later. we don't do this with the DENORMs above because this method
# is slower. but, don't fret, I don't see it being used much either.
fmov.l (%sp)+,%fpcr # restore user fpcr
mov.l &0x80000000,%d1 # load normalized mantissa
subi.l &-0x3fff,%d0 # how many should we shift?
neg.l %d0 # make it positive
cmpi.b %d0,&0x20 # is it > 32?
bge.b sok_dnrm_32 # yes
lsr.l %d0,%d1 # no; bit stays in upper lw
clr.l -(%sp) # insert zero low mantissa
mov.l %d1,-(%sp) # insert new high mantissa
clr.l -(%sp) # make zero exponent
bra.b sok_norm_cont
sok_dnrm_32:
subi.b &0x20,%d0 # get shift count
lsr.l %d0,%d1 # make low mantissa longword
mov.l %d1,-(%sp) # insert new low mantissa
clr.l -(%sp) # insert zero high mantissa
clr.l -(%sp) # make zero exponent
bra.b sok_norm_cont
# the src will force the dst to a DENORM value or worse. so, let's
# create an fp multiply that will create the result.
sok_norm:
fmovm.x DST(%a1),&0x80 # load fp0 with normalized src
sok_norm2:
fmov.l (%sp)+,%fpcr # restore user fpcr
addi.w &0x3fff,%d0 # turn src amt into exp value
swap %d0 # put exponent in high word
clr.l -(%sp) # insert new exponent
mov.l &0x80000000,-(%sp) # insert new high mantissa
mov.l %d0,-(%sp) # insert new lo mantissa
sok_norm_cont:
fmov.l %fpcr,%d0 # d0 needs fpcr for t_catch2
mov.b &FMUL_OP,%d1 # last inst is MUL
fmul.x (%sp)+,%fp0 # do the multiply
bra t_catch2 # catch any exceptions
#
# Source is outside of 2^14 range. Test the sign and branch
# to the appropriate exception handler.
#
src_out:
mov.l (%sp)+,%d0 # restore ctrl bits
exg %a0,%a1 # swap src,dst ptrs
tst.b SRC_EX(%a1) # is src negative?
bmi t_unfl # yes; underflow
bra t_ovfl_sc # no; overflow
#
# The source input is below 1, so we check for denormalized numbers
# and set unfl.
#
src_small:
tst.b DST_HI(%a1) # is dst denormalized?
bpl.b ssmall_done # yes
mov.l (%sp)+,%d0
fmov.l %d0,%fpcr # no; load control bits
mov.b &FMOV_OP,%d1 # last inst is MOVE
fmov.x DST(%a1),%fp0 # simply return dest
bra t_catch2
ssmall_done:
mov.l (%sp)+,%d0 # load control bits into d1
mov.l %a1,%a0 # pass ptr to dst
bra t_resdnrm
#########################################################################
# smod(): computes the fp MOD of the input values X,Y. #
# srem(): computes the fp (IEEE) REM of the input values X,Y. #
# #
# INPUT *************************************************************** #
# a0 = pointer to extended precision input X #
# a1 = pointer to extended precision input Y #
# d0 = round precision,mode #
# #
# The input operands X and Y can be either normalized or #
# denormalized. #
# #
# OUTPUT ************************************************************** #
# fp0 = FREM(X,Y) or FMOD(X,Y) #
# #
# ALGORITHM *********************************************************** #
# #
# Step 1. Save and strip signs of X and Y: signX := sign(X), #
# signY := sign(Y), X := |X|, Y := |Y|, #
# signQ := signX EOR signY. Record whether MOD or REM #
# is requested. #
# #
# Step 2. Set L := expo(X)-expo(Y), k := 0, Q := 0. #
# If (L < 0) then #
# R := X, go to Step 4. #
# else #
# R := 2^(-L)X, j := L. #
# endif #
# #
# Step 3. Perform MOD(X,Y) #
# 3.1 If R = Y, go to Step 9. #
# 3.2 If R > Y, then { R := R - Y, Q := Q + 1} #
# 3.3 If j = 0, go to Step 4. #
# 3.4 k := k + 1, j := j - 1, Q := 2Q, R := 2R. Go to #
# Step 3.1. #
# #
# Step 4. At this point, R = X - QY = MOD(X,Y). Set #
# Last_Subtract := false (used in Step 7 below). If #
# MOD is requested, go to Step 6. #
# #
# Step 5. R = MOD(X,Y), but REM(X,Y) is requested. #
# 5.1 If R < Y/2, then R = MOD(X,Y) = REM(X,Y). Go to #
# Step 6. #
# 5.2 If R > Y/2, then { set Last_Subtract := true, #
# Q := Q + 1, Y := signY*Y }. Go to Step 6. #
# 5.3 This is the tricky case of R = Y/2. If Q is odd, #
# then { Q := Q + 1, signX := -signX }. #
# #
# Step 6. R := signX*R. #
# #
# Step 7. If Last_Subtract = true, R := R - Y. #
# #
# Step 8. Return signQ, last 7 bits of Q, and R as required. #
# #
# Step 9. At this point, R = 2^(-j)*X - Q Y = Y. Thus, #
# X = 2^(j)*(Q+1)Y. set Q := 2^(j)*(Q+1), #
# R := 0. Return signQ, last 7 bits of Q, and R. #
# #
#########################################################################
set Mod_Flag,L_SCR3
set Sc_Flag,L_SCR3+1
set SignY,L_SCR2
set SignX,L_SCR2+2
set SignQ,L_SCR3+2
set Y,FP_SCR0
set Y_Hi,Y+4
set Y_Lo,Y+8
set R,FP_SCR1
set R_Hi,R+4
set R_Lo,R+8
Scale:
long 0x00010000,0x80000000,0x00000000,0x00000000
global smod
smod:
clr.b FPSR_QBYTE(%a6)
mov.l %d0,-(%sp) # save ctrl bits
clr.b Mod_Flag(%a6)
bra.b Mod_Rem
global srem
srem:
clr.b FPSR_QBYTE(%a6)
mov.l %d0,-(%sp) # save ctrl bits
mov.b &0x1,Mod_Flag(%a6)
Mod_Rem:
#..Save sign of X and Y
movm.l &0x3f00,-(%sp) # save data registers
mov.w SRC_EX(%a0),%d3
mov.w %d3,SignY(%a6)
and.l &0x00007FFF,%d3 # Y := |Y|
#
mov.l SRC_HI(%a0),%d4
mov.l SRC_LO(%a0),%d5 # (D3,D4,D5) is |Y|
tst.l %d3
bne.b Y_Normal
mov.l &0x00003FFE,%d3 # $3FFD + 1
tst.l %d4
bne.b HiY_not0
HiY_0:
mov.l %d5,%d4
clr.l %d5
sub.l &32,%d3
clr.l %d6
bfffo %d4{&0:&32},%d6
lsl.l %d6,%d4
sub.l %d6,%d3 # (D3,D4,D5) is normalized
# ...with bias $7FFD
bra.b Chk_X
HiY_not0:
clr.l %d6
bfffo %d4{&0:&32},%d6
sub.l %d6,%d3
lsl.l %d6,%d4
mov.l %d5,%d7 # a copy of D5
lsl.l %d6,%d5
neg.l %d6
add.l &32,%d6
lsr.l %d6,%d7
or.l %d7,%d4 # (D3,D4,D5) normalized
# ...with bias $7FFD
bra.b Chk_X
Y_Normal:
add.l &0x00003FFE,%d3 # (D3,D4,D5) normalized
# ...with bias $7FFD
Chk_X:
mov.w DST_EX(%a1),%d0
mov.w %d0,SignX(%a6)
mov.w SignY(%a6),%d1
eor.l %d0,%d1
and.l &0x00008000,%d1
mov.w %d1,SignQ(%a6) # sign(Q) obtained
and.l &0x00007FFF,%d0
mov.l DST_HI(%a1),%d1
mov.l DST_LO(%a1),%d2 # (D0,D1,D2) is |X|
tst.l %d0
bne.b X_Normal
mov.l &0x00003FFE,%d0
tst.l %d1
bne.b HiX_not0
HiX_0:
mov.l %d2,%d1
clr.l %d2
sub.l &32,%d0
clr.l %d6
bfffo %d1{&0:&32},%d6
lsl.l %d6,%d1
sub.l %d6,%d0 # (D0,D1,D2) is normalized
# ...with bias $7FFD
bra.b Init
HiX_not0:
clr.l %d6
bfffo %d1{&0:&32},%d6
sub.l %d6,%d0
lsl.l %d6,%d1
mov.l %d2,%d7 # a copy of D2
lsl.l %d6,%d2
neg.l %d6
add.l &32,%d6
lsr.l %d6,%d7
or.l %d7,%d1 # (D0,D1,D2) normalized
# ...with bias $7FFD
bra.b Init
X_Normal:
add.l &0x00003FFE,%d0 # (D0,D1,D2) normalized
# ...with bias $7FFD
Init:
#
mov.l %d3,L_SCR1(%a6) # save biased exp(Y)
mov.l %d0,-(%sp) # save biased exp(X)
sub.l %d3,%d0 # L := expo(X)-expo(Y)
clr.l %d6 # D6 := carry <- 0
clr.l %d3 # D3 is Q
mov.l &0,%a1 # A1 is k; j+k=L, Q=0
#..(Carry,D1,D2) is R
tst.l %d0
bge.b Mod_Loop_pre
#..expo(X) < expo(Y). Thus X = mod(X,Y)
#
mov.l (%sp)+,%d0 # restore d0
bra.w Get_Mod
Mod_Loop_pre:
addq.l &0x4,%sp # erase exp(X)
#..At this point R = 2^(-L)X; Q = 0; k = 0; and k+j = L
Mod_Loop:
tst.l %d6 # test carry bit
bgt.b R_GT_Y
#..At this point carry = 0, R = (D1,D2), Y = (D4,D5)
cmp.l %d1,%d4 # compare hi(R) and hi(Y)
bne.b R_NE_Y
cmp.l %d2,%d5 # compare lo(R) and lo(Y)
bne.b R_NE_Y
#..At this point, R = Y
bra.w Rem_is_0
R_NE_Y:
#..use the borrow of the previous compare
bcs.b R_LT_Y # borrow is set iff R < Y
R_GT_Y:
#..If Carry is set, then Y < (Carry,D1,D2) < 2Y. Otherwise, Carry = 0
#..and Y < (D1,D2) < 2Y. Either way, perform R - Y
sub.l %d5,%d2 # lo(R) - lo(Y)
subx.l %d4,%d1 # hi(R) - hi(Y)
clr.l %d6 # clear carry
addq.l &1,%d3 # Q := Q + 1
R_LT_Y:
#..At this point, Carry=0, R < Y. R = 2^(k-L)X - QY; k+j = L; j >= 0.
tst.l %d0 # see if j = 0.
beq.b PostLoop
add.l %d3,%d3 # Q := 2Q
add.l %d2,%d2 # lo(R) = 2lo(R)
roxl.l &1,%d1 # hi(R) = 2hi(R) + carry
scs %d6 # set Carry if 2(R) overflows
addq.l &1,%a1 # k := k+1
subq.l &1,%d0 # j := j - 1
#..At this point, R=(Carry,D1,D2) = 2^(k-L)X - QY, j+k=L, j >= 0, R < 2Y.
bra.b Mod_Loop
PostLoop:
#..k = L, j = 0, Carry = 0, R = (D1,D2) = X - QY, R < Y.
#..normalize R.
mov.l L_SCR1(%a6),%d0 # new biased expo of R
tst.l %d1
bne.b HiR_not0
HiR_0:
mov.l %d2,%d1
clr.l %d2
sub.l &32,%d0
clr.l %d6
bfffo %d1{&0:&32},%d6
lsl.l %d6,%d1
sub.l %d6,%d0 # (D0,D1,D2) is normalized
# ...with bias $7FFD
bra.b Get_Mod
HiR_not0:
clr.l %d6
bfffo %d1{&0:&32},%d6
bmi.b Get_Mod # already normalized
sub.l %d6,%d0
lsl.l %d6,%d1
mov.l %d2,%d7 # a copy of D2
lsl.l %d6,%d2
neg.l %d6
add.l &32,%d6
lsr.l %d6,%d7
or.l %d7,%d1 # (D0,D1,D2) normalized
#
Get_Mod:
cmp.l %d0,&0x000041FE
bge.b No_Scale
Do_Scale:
mov.w %d0,R(%a6)
mov.l %d1,R_Hi(%a6)
mov.l %d2,R_Lo(%a6)
mov.l L_SCR1(%a6),%d6
mov.w %d6,Y(%a6)
mov.l %d4,Y_Hi(%a6)
mov.l %d5,Y_Lo(%a6)
fmov.x R(%a6),%fp0 # no exception
mov.b &1,Sc_Flag(%a6)
bra.b ModOrRem
No_Scale:
mov.l %d1,R_Hi(%a6)
mov.l %d2,R_Lo(%a6)
sub.l &0x3FFE,%d0
mov.w %d0,R(%a6)
mov.l L_SCR1(%a6),%d6
sub.l &0x3FFE,%d6
mov.l %d6,L_SCR1(%a6)
fmov.x R(%a6),%fp0
mov.w %d6,Y(%a6)
mov.l %d4,Y_Hi(%a6)
mov.l %d5,Y_Lo(%a6)
clr.b Sc_Flag(%a6)
#
ModOrRem:
tst.b Mod_Flag(%a6)
beq.b Fix_Sign
mov.l L_SCR1(%a6),%d6 # new biased expo(Y)
subq.l &1,%d6 # biased expo(Y/2)
cmp.l %d0,%d6
blt.b Fix_Sign
bgt.b Last_Sub
cmp.l %d1,%d4
bne.b Not_EQ
cmp.l %d2,%d5
bne.b Not_EQ
bra.w Tie_Case
Not_EQ:
bcs.b Fix_Sign
Last_Sub:
#
fsub.x Y(%a6),%fp0 # no exceptions
addq.l &1,%d3 # Q := Q + 1
#
Fix_Sign:
#..Get sign of X
mov.w SignX(%a6),%d6
bge.b Get_Q
fneg.x %fp0
#..Get Q
#
Get_Q:
clr.l %d6
mov.w SignQ(%a6),%d6 # D6 is sign(Q)
mov.l &8,%d7
lsr.l %d7,%d6
and.l &0x0000007F,%d3 # 7 bits of Q
or.l %d6,%d3 # sign and bits of Q
# swap %d3
# fmov.l %fpsr,%d6
# and.l &0xFF00FFFF,%d6
# or.l %d3,%d6
# fmov.l %d6,%fpsr # put Q in fpsr
mov.b %d3,FPSR_QBYTE(%a6) # put Q in fpsr
#
Restore:
movm.l (%sp)+,&0xfc # {%d2-%d7}
mov.l (%sp)+,%d0
fmov.l %d0,%fpcr
tst.b Sc_Flag(%a6)
beq.b Finish
mov.b &FMUL_OP,%d1 # last inst is MUL
fmul.x Scale(%pc),%fp0 # may cause underflow
bra t_catch2
# the '040 package did this apparently to see if the dst operand for the
# preceding fmul was a denorm. but, it better not have been since the
# algorithm just got done playing with fp0 and expected no exceptions
# as a result. trust me...
# bra t_avoid_unsupp # check for denorm as a
# ;result of the scaling
Finish:
mov.b &FMOV_OP,%d1 # last inst is MOVE
fmov.x %fp0,%fp0 # capture exceptions & round
bra t_catch2
Rem_is_0:
#..R = 2^(-j)X - Q Y = Y, thus R = 0 and quotient = 2^j (Q+1)
addq.l &1,%d3
cmp.l %d0,&8 # D0 is j
bge.b Q_Big
lsl.l %d0,%d3
bra.b Set_R_0
Q_Big:
clr.l %d3
Set_R_0:
fmov.s &0x00000000,%fp0
clr.b Sc_Flag(%a6)
bra.w Fix_Sign
Tie_Case:
#..Check parity of Q
mov.l %d3,%d6
and.l &0x00000001,%d6
tst.l %d6
beq.w Fix_Sign # Q is even
#..Q is odd, Q := Q + 1, signX := -signX
addq.l &1,%d3
mov.w SignX(%a6),%d6
eor.l &0x00008000,%d6
mov.w %d6,SignX(%a6)
bra.w Fix_Sign
#########################################################################
# XDEF **************************************************************** #
# tag(): return the optype of the input ext fp number #
# #
# This routine is used by the 060FPLSP. #
# #
# XREF **************************************************************** #
# None #
# #
# INPUT *************************************************************** #
# a0 = pointer to extended precision operand #
# #
# OUTPUT ************************************************************** #
# d0 = value of type tag #
# one of: NORM, INF, QNAN, SNAN, DENORM, ZERO #
# #
# ALGORITHM *********************************************************** #
# Simply test the exponent, j-bit, and mantissa values to #
# determine the type of operand. #
# If it's an unnormalized zero, alter the operand and force it #
# to be a normal zero. #
# #
#########################################################################
global tag
tag:
mov.w FTEMP_EX(%a0), %d0 # extract exponent
andi.w &0x7fff, %d0 # strip off sign
cmpi.w %d0, &0x7fff # is (EXP == MAX)?
beq.b inf_or_nan_x
not_inf_or_nan_x:
btst &0x7,FTEMP_HI(%a0)
beq.b not_norm_x
is_norm_x:
mov.b &NORM, %d0
rts
not_norm_x:
tst.w %d0 # is exponent = 0?
bne.b is_unnorm_x
not_unnorm_x:
tst.l FTEMP_HI(%a0)
bne.b is_denorm_x
tst.l FTEMP_LO(%a0)
bne.b is_denorm_x
is_zero_x:
mov.b &ZERO, %d0
rts
is_denorm_x:
mov.b &DENORM, %d0
rts
is_unnorm_x:
bsr.l unnorm_fix # convert to norm,denorm,or zero
rts
is_unnorm_reg_x:
mov.b &UNNORM, %d0
rts
inf_or_nan_x:
tst.l FTEMP_LO(%a0)
bne.b is_nan_x
mov.l FTEMP_HI(%a0), %d0
and.l &0x7fffffff, %d0 # msb is a don't care!
bne.b is_nan_x
is_inf_x:
mov.b &INF, %d0
rts
is_nan_x:
mov.b &QNAN, %d0
rts
#############################################################
qnan: long 0x7fff0000, 0xffffffff, 0xffffffff
#########################################################################
# XDEF **************************************************************** #
# t_dz(): Handle 060FPLSP dz exception for "flogn" emulation. #
# t_dz2(): Handle 060FPLSP dz exception for "fatanh" emulation. #
# #
# These rouitnes are used by the 060FPLSP package. #
# #
# XREF **************************************************************** #
# None #
# #
# INPUT *************************************************************** #
# a0 = pointer to extended precision source operand. #
# #
# OUTPUT ************************************************************** #
# fp0 = default DZ result. #
# #
# ALGORITHM *********************************************************** #
# Transcendental emulation for the 060FPLSP has detected that #
# a DZ exception should occur for the instruction. If DZ is disabled, #
# return the default result. #
# If DZ is enabled, the dst operand should be returned unscathed #
# in fp0 while fp1 is used to create a DZ exception so that the #
# operating system can log that such an event occurred. #
# #
#########################################################################
global t_dz
t_dz:
tst.b SRC_EX(%a0) # check sign for neg or pos
bpl.b dz_pinf # branch if pos sign
global t_dz2
t_dz2:
ori.l &dzinf_mask+neg_mask,USER_FPSR(%a6) # set N/I/DZ/ADZ
btst &dz_bit,FPCR_ENABLE(%a6)
bne.b dz_minf_ena
# dz is disabled. return a -INF.
fmov.s &0xff800000,%fp0 # return -INF
rts
# dz is enabled. create a dz exception so the user can record it
# but use fp1 instead. return the dst operand unscathed in fp0.
dz_minf_ena:
fmovm.x EXC_FP0(%a6),&0x80 # return fp0 unscathed
fmov.l USER_FPCR(%a6),%fpcr
fmov.s &0xbf800000,%fp1 # load -1
fdiv.s &0x00000000,%fp1 # -1 / 0
rts
dz_pinf:
ori.l &dzinf_mask,USER_FPSR(%a6) # set I/DZ/ADZ
btst &dz_bit,FPCR_ENABLE(%a6)
bne.b dz_pinf_ena
# dz is disabled. return a +INF.
fmov.s &0x7f800000,%fp0 # return +INF
rts
# dz is enabled. create a dz exception so the user can record it
# but use fp1 instead. return the dst operand unscathed in fp0.
dz_pinf_ena:
fmovm.x EXC_FP0(%a6),&0x80 # return fp0 unscathed
fmov.l USER_FPCR(%a6),%fpcr
fmov.s &0x3f800000,%fp1 # load +1
fdiv.s &0x00000000,%fp1 # +1 / 0
rts
#########################################################################
# XDEF **************************************************************** #
# t_operr(): Handle 060FPLSP OPERR exception during emulation. #
# #
# This routine is used by the 060FPLSP package. #
# #
# XREF **************************************************************** #
# None. #
# #
# INPUT *************************************************************** #
# fp1 = source operand #
# #
# OUTPUT ************************************************************** #
# fp0 = default result #
# fp1 = unchanged #
# #
# ALGORITHM *********************************************************** #
# An operand error should occur as the result of transcendental #
# emulation in the 060FPLSP. If OPERR is disabled, just return a NAN #
# in fp0. If OPERR is enabled, return the dst operand unscathed in fp0 #
# and the source operand in fp1. Use fp2 to create an OPERR exception #
# so that the operating system can log the event. #
# #
#########################################################################
global t_operr
t_operr:
ori.l &opnan_mask,USER_FPSR(%a6) # set NAN/OPERR/AIOP
btst &operr_bit,FPCR_ENABLE(%a6)
bne.b operr_ena
# operr is disabled. return a QNAN in fp0
fmovm.x qnan(%pc),&0x80 # return QNAN
rts
# operr is enabled. create an operr exception so the user can record it
# but use fp2 instead. return the dst operand unscathed in fp0.
operr_ena:
fmovm.x EXC_FP0(%a6),&0x80 # return fp0 unscathed
fmov.l USER_FPCR(%a6),%fpcr
fmovm.x &0x04,-(%sp) # save fp2
fmov.s &0x7f800000,%fp2 # load +INF
fmul.s &0x00000000,%fp2 # +INF x 0
fmovm.x (%sp)+,&0x20 # restore fp2
rts
pls_huge:
long 0x7ffe0000,0xffffffff,0xffffffff
mns_huge:
long 0xfffe0000,0xffffffff,0xffffffff
pls_tiny:
long 0x00000000,0x80000000,0x00000000
mns_tiny:
long 0x80000000,0x80000000,0x00000000
#########################################################################
# XDEF **************************************************************** #
# t_unfl(): Handle 060FPLSP underflow exception during emulation. #
# t_unfl2(): Handle 060FPLSP underflow exception during #
# emulation. result always positive. #
# #
# This routine is used by the 060FPLSP package. #
# #
# XREF **************************************************************** #
# None. #
# #
# INPUT *************************************************************** #
# a0 = pointer to extended precision source operand #
# #
# OUTPUT ************************************************************** #
# fp0 = default underflow result #
# #
# ALGORITHM *********************************************************** #
# An underflow should occur as the result of transcendental #
# emulation in the 060FPLSP. Create an underflow by using "fmul" #
# and two very small numbers of appropriate sign so the operating #
# system can log the event. #
# #
#########################################################################
global t_unfl
t_unfl:
tst.b SRC_EX(%a0)
bpl.b unf_pos
global t_unfl2
t_unfl2:
ori.l &unfinx_mask+neg_mask,USER_FPSR(%a6) # set N/UNFL/INEX2/AUNFL/AINEX
fmov.l USER_FPCR(%a6),%fpcr
fmovm.x mns_tiny(%pc),&0x80
fmul.x pls_tiny(%pc),%fp0
fmov.l %fpsr,%d0
rol.l &0x8,%d0
mov.b %d0,FPSR_CC(%a6)
rts
unf_pos:
ori.w &unfinx_mask,FPSR_EXCEPT(%a6) # set UNFL/INEX2/AUNFL/AINEX
fmov.l USER_FPCR(%a6),%fpcr
fmovm.x pls_tiny(%pc),&0x80
fmul.x %fp0,%fp0
fmov.l %fpsr,%d0
rol.l &0x8,%d0
mov.b %d0,FPSR_CC(%a6)
rts
#########################################################################
# XDEF **************************************************************** #
# t_ovfl(): Handle 060FPLSP overflow exception during emulation. #
# (monadic) #
# t_ovfl2(): Handle 060FPLSP overflow exception during #
# emulation. result always positive. (dyadic) #
# t_ovfl_sc(): Handle 060FPLSP overflow exception during #
# emulation for "fscale". #
# #
# This routine is used by the 060FPLSP package. #
# #
# XREF **************************************************************** #
# None. #
# #
# INPUT *************************************************************** #
# a0 = pointer to extended precision source operand #
# #
# OUTPUT ************************************************************** #
# fp0 = default underflow result #
# #
# ALGORITHM *********************************************************** #
# An overflow should occur as the result of transcendental #
# emulation in the 060FPLSP. Create an overflow by using "fmul" #
# and two very lareg numbers of appropriate sign so the operating #
# system can log the event. #
# For t_ovfl_sc() we take special care not to lose the INEX2 bit. #
# #
#########################################################################
global t_ovfl_sc
t_ovfl_sc:
ori.l &ovfl_inx_mask,USER_FPSR(%a6) # set OVFL/AOVFL/AINEX
mov.b %d0,%d1 # fetch rnd prec,mode
andi.b &0xc0,%d1 # extract prec
beq.w ovfl_work
# dst op is a DENORM. we have to normalize the mantissa to see if the
# result would be inexact for the given precision. make a copy of the
# dst so we don't screw up the version passed to us.
mov.w LOCAL_EX(%a0),FP_SCR0_EX(%a6)
mov.l LOCAL_HI(%a0),FP_SCR0_HI(%a6)
mov.l LOCAL_LO(%a0),FP_SCR0_LO(%a6)
lea FP_SCR0(%a6),%a0 # pass ptr to FP_SCR0
movm.l &0xc080,-(%sp) # save d0-d1/a0
bsr.l norm # normalize mantissa
movm.l (%sp)+,&0x0103 # restore d0-d1/a0
cmpi.b %d1,&0x40 # is precision sgl?
bne.b ovfl_sc_dbl # no; dbl
ovfl_sc_sgl:
tst.l LOCAL_LO(%a0) # is lo lw of sgl set?
bne.b ovfl_sc_inx # yes
tst.b 3+LOCAL_HI(%a0) # is lo byte of hi lw set?
bne.b ovfl_sc_inx # yes
bra.w ovfl_work # don't set INEX2
ovfl_sc_dbl:
mov.l LOCAL_LO(%a0),%d1 # are any of lo 11 bits of
andi.l &0x7ff,%d1 # dbl mantissa set?
beq.w ovfl_work # no; don't set INEX2
ovfl_sc_inx:
ori.l &inex2_mask,USER_FPSR(%a6) # set INEX2
bra.b ovfl_work # continue
global t_ovfl
t_ovfl:
ori.w &ovfinx_mask,FPSR_EXCEPT(%a6) # set OVFL/INEX2/AOVFL/AINEX
ovfl_work:
tst.b SRC_EX(%a0)
bpl.b ovfl_p
ovfl_m:
fmov.l USER_FPCR(%a6),%fpcr
fmovm.x mns_huge(%pc),&0x80
fmul.x pls_huge(%pc),%fp0
fmov.l %fpsr,%d0
rol.l &0x8,%d0
ori.b &neg_mask,%d0
mov.b %d0,FPSR_CC(%a6)
rts
ovfl_p:
fmov.l USER_FPCR(%a6),%fpcr
fmovm.x pls_huge(%pc),&0x80
fmul.x pls_huge(%pc),%fp0
fmov.l %fpsr,%d0
rol.l &0x8,%d0
mov.b %d0,FPSR_CC(%a6)
rts
global t_ovfl2
t_ovfl2:
ori.w &ovfinx_mask,FPSR_EXCEPT(%a6) # set OVFL/INEX2/AOVFL/AINEX
fmov.l USER_FPCR(%a6),%fpcr
fmovm.x pls_huge(%pc),&0x80
fmul.x pls_huge(%pc),%fp0
fmov.l %fpsr,%d0
rol.l &0x8,%d0
mov.b %d0,FPSR_CC(%a6)
rts
#########################################################################
# XDEF **************************************************************** #
# t_catch(): Handle 060FPLSP OVFL,UNFL,or INEX2 exception during #
# emulation. #
# t_catch2(): Handle 060FPLSP OVFL,UNFL,or INEX2 exception during #
# emulation. #
# #
# These routines are used by the 060FPLSP package. #
# #
# XREF **************************************************************** #
# None. #
# #
# INPUT *************************************************************** #
# fp0 = default underflow or overflow result #
# #
# OUTPUT ************************************************************** #
# fp0 = default result #
# #
# ALGORITHM *********************************************************** #
# If an overflow or underflow occurred during the last #
# instruction of transcendental 060FPLSP emulation, then it has already #
# occurred and has been logged. Now we need to see if an inexact #
# exception should occur. #
# #
#########################################################################
global t_catch2
t_catch2:
fmov.l %fpsr,%d0
or.l %d0,USER_FPSR(%a6)
bra.b inx2_work
global t_catch
t_catch:
fmov.l %fpsr,%d0
or.l %d0,USER_FPSR(%a6)
#########################################################################
# XDEF **************************************************************** #
# t_inx2(): Handle inexact 060FPLSP exception during emulation. #
# t_pinx2(): Handle inexact 060FPLSP exception for "+" results. #
# t_minx2(): Handle inexact 060FPLSP exception for "-" results. #
# #
# XREF **************************************************************** #
# None. #
# #
# INPUT *************************************************************** #
# fp0 = default result #
# #
# OUTPUT ************************************************************** #
# fp0 = default result #
# #
# ALGORITHM *********************************************************** #
# The last instruction of transcendental emulation for the #
# 060FPLSP should be inexact. So, if inexact is enabled, then we create #
# the event here by adding a large and very small number together #
# so that the operating system can log the event. #
# Must check, too, if the result was zero, in which case we just #
# set the FPSR bits and return. #
# #
#########################################################################
global t_inx2
t_inx2:
fblt.w t_minx2
fbeq.w inx2_zero
global t_pinx2
t_pinx2:
ori.w &inx2a_mask,FPSR_EXCEPT(%a6) # set INEX2/AINEX
bra.b inx2_work
global t_minx2
t_minx2:
ori.l &inx2a_mask+neg_mask,USER_FPSR(%a6)
inx2_work:
btst &inex2_bit,FPCR_ENABLE(%a6) # is inexact enabled?
bne.b inx2_work_ena # yes
rts
inx2_work_ena:
fmov.l USER_FPCR(%a6),%fpcr # insert user's exceptions
fmov.s &0x3f800000,%fp1 # load +1
fadd.x pls_tiny(%pc),%fp1 # cause exception
rts
inx2_zero:
mov.b &z_bmask,FPSR_CC(%a6)
ori.w &inx2a_mask,2+USER_FPSR(%a6) # set INEX/AINEX
rts
#########################################################################
# XDEF **************************************************************** #
# t_extdnrm(): Handle DENORM inputs in 060FPLSP. #
# t_resdnrm(): Handle DENORM inputs in 060FPLSP for "fscale". #
# #
# This routine is used by the 060FPLSP package. #
# #
# XREF **************************************************************** #
# None. #
# #
# INPUT *************************************************************** #
# a0 = pointer to extended precision input operand #
# #
# OUTPUT ************************************************************** #
# fp0 = default result #
# #
# ALGORITHM *********************************************************** #
# For all functions that have a denormalized input and that #
# f(x)=x, this is the entry point. #
# DENORM value is moved using "fmove" which triggers an exception #
# if enabled so the operating system can log the event. #
# #
#########################################################################
global t_extdnrm
t_extdnrm:
fmov.l USER_FPCR(%a6),%fpcr
fmov.x SRC_EX(%a0),%fp0
fmov.l %fpsr,%d0
ori.l &unfinx_mask,%d0
or.l %d0,USER_FPSR(%a6)
rts
global t_resdnrm
t_resdnrm:
fmov.l USER_FPCR(%a6),%fpcr
fmov.x SRC_EX(%a0),%fp0
fmov.l %fpsr,%d0
or.l %d0,USER_FPSR(%a6)
rts
##########################################
#
# sto_cos:
# This is used by fsincos library emulation. The correct
# values are already in fp0 and fp1 so we do nothing here.
#
global sto_cos
sto_cos:
rts
##########################################
#
# dst_qnan --- force result when destination is a NaN
#
global dst_qnan
dst_qnan:
fmov.x DST(%a1),%fp0
tst.b DST_EX(%a1)
bmi.b dst_qnan_m
dst_qnan_p:
mov.b &nan_bmask,FPSR_CC(%a6)
rts
dst_qnan_m:
mov.b &nan_bmask+neg_bmask,FPSR_CC(%a6)
rts
#
# src_qnan --- force result when source is a NaN
#
global src_qnan
src_qnan:
fmov.x SRC(%a0),%fp0
tst.b SRC_EX(%a0)
bmi.b src_qnan_m
src_qnan_p:
mov.b &nan_bmask,FPSR_CC(%a6)
rts
src_qnan_m:
mov.b &nan_bmask+neg_bmask,FPSR_CC(%a6)
rts
##########################################
#
# Native instruction support
#
# Some systems may need entry points even for 68060 native
# instructions. These routines are provided for
# convenience.
#
global _fadds_
_fadds_:
fmov.l %fpcr,-(%sp) # save fpcr
fmov.l &0x00000000,%fpcr # clear fpcr for load
fmov.s 0x8(%sp),%fp0 # load sgl dst
fmov.l (%sp)+,%fpcr # restore fpcr
fadd.s 0x8(%sp),%fp0 # fadd w/ sgl src
rts
global _faddd_
_faddd_:
fmov.l %fpcr,-(%sp) # save fpcr
fmov.l &0x00000000,%fpcr # clear fpcr for load
fmov.d 0x8(%sp),%fp0 # load dbl dst
fmov.l (%sp)+,%fpcr # restore fpcr
fadd.d 0xc(%sp),%fp0 # fadd w/ dbl src
rts
global _faddx_
_faddx_:
fmovm.x 0x4(%sp),&0x80 # load ext dst
fadd.x 0x10(%sp),%fp0 # fadd w/ ext src
rts
global _fsubs_
_fsubs_:
fmov.l %fpcr,-(%sp) # save fpcr
fmov.l &0x00000000,%fpcr # clear fpcr for load
fmov.s 0x8(%sp),%fp0 # load sgl dst
fmov.l (%sp)+,%fpcr # restore fpcr
fsub.s 0x8(%sp),%fp0 # fsub w/ sgl src
rts
global _fsubd_
_fsubd_:
fmov.l %fpcr,-(%sp) # save fpcr
fmov.l &0x00000000,%fpcr # clear fpcr for load
fmov.d 0x8(%sp),%fp0 # load dbl dst
fmov.l (%sp)+,%fpcr # restore fpcr
fsub.d 0xc(%sp),%fp0 # fsub w/ dbl src
rts
global _fsubx_
_fsubx_:
fmovm.x 0x4(%sp),&0x80 # load ext dst
fsub.x 0x10(%sp),%fp0 # fsub w/ ext src
rts
global _fmuls_
_fmuls_:
fmov.l %fpcr,-(%sp) # save fpcr
fmov.l &0x00000000,%fpcr # clear fpcr for load
fmov.s 0x8(%sp),%fp0 # load sgl dst
fmov.l (%sp)+,%fpcr # restore fpcr
fmul.s 0x8(%sp),%fp0 # fmul w/ sgl src
rts
global _fmuld_
_fmuld_:
fmov.l %fpcr,-(%sp) # save fpcr
fmov.l &0x00000000,%fpcr # clear fpcr for load
fmov.d 0x8(%sp),%fp0 # load dbl dst
fmov.l (%sp)+,%fpcr # restore fpcr
fmul.d 0xc(%sp),%fp0 # fmul w/ dbl src
rts
global _fmulx_
_fmulx_:
fmovm.x 0x4(%sp),&0x80 # load ext dst
fmul.x 0x10(%sp),%fp0 # fmul w/ ext src
rts
global _fdivs_
_fdivs_:
fmov.l %fpcr,-(%sp) # save fpcr
fmov.l &0x00000000,%fpcr # clear fpcr for load
fmov.s 0x8(%sp),%fp0 # load sgl dst
fmov.l (%sp)+,%fpcr # restore fpcr
fdiv.s 0x8(%sp),%fp0 # fdiv w/ sgl src
rts
global _fdivd_
_fdivd_:
fmov.l %fpcr,-(%sp) # save fpcr
fmov.l &0x00000000,%fpcr # clear fpcr for load
fmov.d 0x8(%sp),%fp0 # load dbl dst
fmov.l (%sp)+,%fpcr # restore fpcr
fdiv.d 0xc(%sp),%fp0 # fdiv w/ dbl src
rts
global _fdivx_
_fdivx_:
fmovm.x 0x4(%sp),&0x80 # load ext dst
fdiv.x 0x10(%sp),%fp0 # fdiv w/ ext src
rts
global _fabss_
_fabss_:
fabs.s 0x4(%sp),%fp0 # fabs w/ sgl src
rts
global _fabsd_
_fabsd_:
fabs.d 0x4(%sp),%fp0 # fabs w/ dbl src
rts
global _fabsx_
_fabsx_:
fabs.x 0x4(%sp),%fp0 # fabs w/ ext src
rts
global _fnegs_
_fnegs_:
fneg.s 0x4(%sp),%fp0 # fneg w/ sgl src
rts
global _fnegd_
_fnegd_:
fneg.d 0x4(%sp),%fp0 # fneg w/ dbl src
rts
global _fnegx_
_fnegx_:
fneg.x 0x4(%sp),%fp0 # fneg w/ ext src
rts
global _fsqrts_
_fsqrts_:
fsqrt.s 0x4(%sp),%fp0 # fsqrt w/ sgl src
rts
global _fsqrtd_
_fsqrtd_:
fsqrt.d 0x4(%sp),%fp0 # fsqrt w/ dbl src
rts
global _fsqrtx_
_fsqrtx_:
fsqrt.x 0x4(%sp),%fp0 # fsqrt w/ ext src
rts
global _fints_
_fints_:
fint.s 0x4(%sp),%fp0 # fint w/ sgl src
rts
global _fintd_
_fintd_:
fint.d 0x4(%sp),%fp0 # fint w/ dbl src
rts
global _fintx_
_fintx_:
fint.x 0x4(%sp),%fp0 # fint w/ ext src
rts
global _fintrzs_
_fintrzs_:
fintrz.s 0x4(%sp),%fp0 # fintrz w/ sgl src
rts
global _fintrzd_
_fintrzd_:
fintrz.d 0x4(%sp),%fp0 # fintrx w/ dbl src
rts
global _fintrzx_
_fintrzx_:
fintrz.x 0x4(%sp),%fp0 # fintrz w/ ext src
rts
########################################################################
#########################################################################
# src_zero(): Return signed zero according to sign of src operand. #
#########################################################################
global src_zero
src_zero:
tst.b SRC_EX(%a0) # get sign of src operand
bmi.b ld_mzero # if neg, load neg zero
#
# ld_pzero(): return a positive zero.
#
global ld_pzero
ld_pzero:
fmov.s &0x00000000,%fp0 # load +0
mov.b &z_bmask,FPSR_CC(%a6) # set 'Z' ccode bit
rts
# ld_mzero(): return a negative zero.
global ld_mzero
ld_mzero:
fmov.s &0x80000000,%fp0 # load -0
mov.b &neg_bmask+z_bmask,FPSR_CC(%a6) # set 'N','Z' ccode bits
rts
#########################################################################
# dst_zero(): Return signed zero according to sign of dst operand. #
#########################################################################
global dst_zero
dst_zero:
tst.b DST_EX(%a1) # get sign of dst operand
bmi.b ld_mzero # if neg, load neg zero
bra.b ld_pzero # load positive zero
#########################################################################
# src_inf(): Return signed inf according to sign of src operand. #
#########################################################################
global src_inf
src_inf:
tst.b SRC_EX(%a0) # get sign of src operand
bmi.b ld_minf # if negative branch
#
# ld_pinf(): return a positive infinity.
#
global ld_pinf
ld_pinf:
fmov.s &0x7f800000,%fp0 # load +INF
mov.b &inf_bmask,FPSR_CC(%a6) # set 'INF' ccode bit
rts
#
# ld_minf():return a negative infinity.
#
global ld_minf
ld_minf:
fmov.s &0xff800000,%fp0 # load -INF
mov.b &neg_bmask+inf_bmask,FPSR_CC(%a6) # set 'N','I' ccode bits
rts
#########################################################################
# dst_inf(): Return signed inf according to sign of dst operand. #
#########################################################################
global dst_inf
dst_inf:
tst.b DST_EX(%a1) # get sign of dst operand
bmi.b ld_minf # if negative branch
bra.b ld_pinf
global szr_inf
#################################################################
# szr_inf(): Return +ZERO for a negative src operand or #
# +INF for a positive src operand. #
# Routine used for fetox, ftwotox, and ftentox. #
#################################################################
szr_inf:
tst.b SRC_EX(%a0) # check sign of source
bmi.b ld_pzero
bra.b ld_pinf
#########################################################################
# sopr_inf(): Return +INF for a positive src operand or #
# jump to operand error routine for a negative src operand. #
# Routine used for flogn, flognp1, flog10, and flog2. #
#########################################################################
global sopr_inf
sopr_inf:
tst.b SRC_EX(%a0) # check sign of source
bmi.w t_operr
bra.b ld_pinf
#################################################################
# setoxm1i(): Return minus one for a negative src operand or #
# positive infinity for a positive src operand. #
# Routine used for fetoxm1. #
#################################################################
global setoxm1i
setoxm1i:
tst.b SRC_EX(%a0) # check sign of source
bmi.b ld_mone
bra.b ld_pinf
#########################################################################
# src_one(): Return signed one according to sign of src operand. #
#########################################################################
global src_one
src_one:
tst.b SRC_EX(%a0) # check sign of source
bmi.b ld_mone
#
# ld_pone(): return positive one.
#
global ld_pone
ld_pone:
fmov.s &0x3f800000,%fp0 # load +1
clr.b FPSR_CC(%a6)
rts
#
# ld_mone(): return negative one.
#
global ld_mone
ld_mone:
fmov.s &0xbf800000,%fp0 # load -1
mov.b &neg_bmask,FPSR_CC(%a6) # set 'N' ccode bit
rts
ppiby2: long 0x3fff0000, 0xc90fdaa2, 0x2168c235
mpiby2: long 0xbfff0000, 0xc90fdaa2, 0x2168c235
#################################################################
# spi_2(): Return signed PI/2 according to sign of src operand. #
#################################################################
global spi_2
spi_2:
tst.b SRC_EX(%a0) # check sign of source
bmi.b ld_mpi2
#
# ld_ppi2(): return positive PI/2.
#
global ld_ppi2
ld_ppi2:
fmov.l %d0,%fpcr
fmov.x ppiby2(%pc),%fp0 # load +pi/2
bra.w t_pinx2 # set INEX2
#
# ld_mpi2(): return negative PI/2.
#
global ld_mpi2
ld_mpi2:
fmov.l %d0,%fpcr
fmov.x mpiby2(%pc),%fp0 # load -pi/2
bra.w t_minx2 # set INEX2
####################################################
# The following routines give support for fsincos. #
####################################################
#
# ssincosz(): When the src operand is ZERO, store a one in the
# cosine register and return a ZERO in fp0 w/ the same sign
# as the src operand.
#
global ssincosz
ssincosz:
fmov.s &0x3f800000,%fp1
tst.b SRC_EX(%a0) # test sign
bpl.b sincoszp
fmov.s &0x80000000,%fp0 # return sin result in fp0
mov.b &z_bmask+neg_bmask,FPSR_CC(%a6)
rts
sincoszp:
fmov.s &0x00000000,%fp0 # return sin result in fp0
mov.b &z_bmask,FPSR_CC(%a6)
rts
#
# ssincosi(): When the src operand is INF, store a QNAN in the cosine
# register and jump to the operand error routine for negative
# src operands.
#
global ssincosi
ssincosi:
fmov.x qnan(%pc),%fp1 # load NAN
bra.w t_operr
#
# ssincosqnan(): When the src operand is a QNAN, store the QNAN in the cosine
# register and branch to the src QNAN routine.
#
global ssincosqnan
ssincosqnan:
fmov.x LOCAL_EX(%a0),%fp1
bra.w src_qnan
########################################################################
global smod_sdnrm
global smod_snorm
smod_sdnrm:
smod_snorm:
mov.b DTAG(%a6),%d1
beq.l smod
cmpi.b %d1,&ZERO
beq.w smod_zro
cmpi.b %d1,&INF
beq.l t_operr
cmpi.b %d1,&DENORM
beq.l smod
bra.l dst_qnan
global smod_szero
smod_szero:
mov.b DTAG(%a6),%d1
beq.l t_operr
cmpi.b %d1,&ZERO
beq.l t_operr
cmpi.b %d1,&INF
beq.l t_operr
cmpi.b %d1,&DENORM
beq.l t_operr
bra.l dst_qnan
global smod_sinf
smod_sinf:
mov.b DTAG(%a6),%d1
beq.l smod_fpn
cmpi.b %d1,&ZERO
beq.l smod_zro
cmpi.b %d1,&INF
beq.l t_operr
cmpi.b %d1,&DENORM
beq.l smod_fpn
bra.l dst_qnan
smod_zro:
srem_zro:
mov.b SRC_EX(%a0),%d1 # get src sign
mov.b DST_EX(%a1),%d0 # get dst sign
eor.b %d0,%d1 # get qbyte sign
andi.b &0x80,%d1
mov.b %d1,FPSR_QBYTE(%a6)
tst.b %d0
bpl.w ld_pzero
bra.w ld_mzero
smod_fpn:
srem_fpn:
clr.b FPSR_QBYTE(%a6)
mov.l %d0,-(%sp)
mov.b SRC_EX(%a0),%d1 # get src sign
mov.b DST_EX(%a1),%d0 # get dst sign
eor.b %d0,%d1 # get qbyte sign
andi.b &0x80,%d1
mov.b %d1,FPSR_QBYTE(%a6)
cmpi.b DTAG(%a6),&DENORM
bne.b smod_nrm
lea DST(%a1),%a0
mov.l (%sp)+,%d0
bra t_resdnrm
smod_nrm:
fmov.l (%sp)+,%fpcr
fmov.x DST(%a1),%fp0
tst.b DST_EX(%a1)
bmi.b smod_nrm_neg
rts
smod_nrm_neg:
mov.b &neg_bmask,FPSR_CC(%a6) # set 'N' code
rts
#########################################################################
global srem_snorm
global srem_sdnrm
srem_sdnrm:
srem_snorm:
mov.b DTAG(%a6),%d1
beq.l srem
cmpi.b %d1,&ZERO
beq.w srem_zro
cmpi.b %d1,&INF
beq.l t_operr
cmpi.b %d1,&DENORM
beq.l srem
bra.l dst_qnan
global srem_szero
srem_szero:
mov.b DTAG(%a6),%d1
beq.l t_operr
cmpi.b %d1,&ZERO
beq.l t_operr
cmpi.b %d1,&INF
beq.l t_operr
cmpi.b %d1,&DENORM
beq.l t_operr
bra.l dst_qnan
global srem_sinf
srem_sinf:
mov.b DTAG(%a6),%d1
beq.w srem_fpn
cmpi.b %d1,&ZERO
beq.w srem_zro
cmpi.b %d1,&INF
beq.l t_operr
cmpi.b %d1,&DENORM
beq.l srem_fpn
bra.l dst_qnan
#########################################################################
global sscale_snorm
global sscale_sdnrm
sscale_snorm:
sscale_sdnrm:
mov.b DTAG(%a6),%d1
beq.l sscale
cmpi.b %d1,&ZERO
beq.l dst_zero
cmpi.b %d1,&INF
beq.l dst_inf
cmpi.b %d1,&DENORM
beq.l sscale
bra.l dst_qnan
global sscale_szero
sscale_szero:
mov.b DTAG(%a6),%d1
beq.l sscale
cmpi.b %d1,&ZERO
beq.l dst_zero
cmpi.b %d1,&INF
beq.l dst_inf
cmpi.b %d1,&DENORM
beq.l sscale
bra.l dst_qnan
global sscale_sinf
sscale_sinf:
mov.b DTAG(%a6),%d1
beq.l t_operr
cmpi.b %d1,&QNAN
beq.l dst_qnan
bra.l t_operr
########################################################################
global sop_sqnan
sop_sqnan:
mov.b DTAG(%a6),%d1
cmpi.b %d1,&QNAN
beq.l dst_qnan
bra.l src_qnan
#########################################################################
# norm(): normalize the mantissa of an extended precision input. the #
# input operand should not be normalized already. #
# #
# XDEF **************************************************************** #
# norm() #
# #
# XREF **************************************************************** #
# none #
# #
# INPUT *************************************************************** #
# a0 = pointer fp extended precision operand to normalize #
# #
# OUTPUT ************************************************************** #
# d0 = number of bit positions the mantissa was shifted #
# a0 = the input operand's mantissa is normalized; the exponent #
# is unchanged. #
# #
#########################################################################
global norm
norm:
mov.l %d2, -(%sp) # create some temp regs
mov.l %d3, -(%sp)
mov.l FTEMP_HI(%a0), %d0 # load hi(mantissa)
mov.l FTEMP_LO(%a0), %d1 # load lo(mantissa)
bfffo %d0{&0:&32}, %d2 # how many places to shift?
beq.b norm_lo # hi(man) is all zeroes!
norm_hi:
lsl.l %d2, %d0 # left shift hi(man)
bfextu %d1{&0:%d2}, %d3 # extract lo bits
or.l %d3, %d0 # create hi(man)
lsl.l %d2, %d1 # create lo(man)
mov.l %d0, FTEMP_HI(%a0) # store new hi(man)
mov.l %d1, FTEMP_LO(%a0) # store new lo(man)
mov.l %d2, %d0 # return shift amount
mov.l (%sp)+, %d3 # restore temp regs
mov.l (%sp)+, %d2
rts
norm_lo:
bfffo %d1{&0:&32}, %d2 # how many places to shift?
lsl.l %d2, %d1 # shift lo(man)
add.l &32, %d2 # add 32 to shft amount
mov.l %d1, FTEMP_HI(%a0) # store hi(man)
clr.l FTEMP_LO(%a0) # lo(man) is now zero
mov.l %d2, %d0 # return shift amount
mov.l (%sp)+, %d3 # restore temp regs
mov.l (%sp)+, %d2
rts
#########################################################################
# unnorm_fix(): - changes an UNNORM to one of NORM, DENORM, or ZERO #
# - returns corresponding optype tag #
# #
# XDEF **************************************************************** #
# unnorm_fix() #
# #
# XREF **************************************************************** #
# norm() - normalize the mantissa #
# #
# INPUT *************************************************************** #
# a0 = pointer to unnormalized extended precision number #
# #
# OUTPUT ************************************************************** #
# d0 = optype tag - is corrected to one of NORM, DENORM, or ZERO #
# a0 = input operand has been converted to a norm, denorm, or #
# zero; both the exponent and mantissa are changed. #
# #
#########################################################################
global unnorm_fix
unnorm_fix:
bfffo FTEMP_HI(%a0){&0:&32}, %d0 # how many shifts are needed?
bne.b unnorm_shift # hi(man) is not all zeroes
#
# hi(man) is all zeroes so see if any bits in lo(man) are set
#
unnorm_chk_lo:
bfffo FTEMP_LO(%a0){&0:&32}, %d0 # is operand really a zero?
beq.w unnorm_zero # yes
add.w &32, %d0 # no; fix shift distance
#
# d0 = # shifts needed for complete normalization
#
unnorm_shift:
clr.l %d1 # clear top word
mov.w FTEMP_EX(%a0), %d1 # extract exponent
and.w &0x7fff, %d1 # strip off sgn
cmp.w %d0, %d1 # will denorm push exp < 0?
bgt.b unnorm_nrm_zero # yes; denorm only until exp = 0
#
# exponent would not go < 0. therefore, number stays normalized
#
sub.w %d0, %d1 # shift exponent value
mov.w FTEMP_EX(%a0), %d0 # load old exponent
and.w &0x8000, %d0 # save old sign
or.w %d0, %d1 # {sgn,new exp}
mov.w %d1, FTEMP_EX(%a0) # insert new exponent
bsr.l norm # normalize UNNORM
mov.b &NORM, %d0 # return new optype tag
rts
#
# exponent would go < 0, so only denormalize until exp = 0
#
unnorm_nrm_zero:
cmp.b %d1, &32 # is exp <= 32?
bgt.b unnorm_nrm_zero_lrg # no; go handle large exponent
bfextu FTEMP_HI(%a0){%d1:&32}, %d0 # extract new hi(man)
mov.l %d0, FTEMP_HI(%a0) # save new hi(man)
mov.l FTEMP_LO(%a0), %d0 # fetch old lo(man)
lsl.l %d1, %d0 # extract new lo(man)
mov.l %d0, FTEMP_LO(%a0) # save new lo(man)
and.w &0x8000, FTEMP_EX(%a0) # set exp = 0
mov.b &DENORM, %d0 # return new optype tag
rts
#
# only mantissa bits set are in lo(man)
#
unnorm_nrm_zero_lrg:
sub.w &32, %d1 # adjust shft amt by 32
mov.l FTEMP_LO(%a0), %d0 # fetch old lo(man)
lsl.l %d1, %d0 # left shift lo(man)
mov.l %d0, FTEMP_HI(%a0) # store new hi(man)
clr.l FTEMP_LO(%a0) # lo(man) = 0
and.w &0x8000, FTEMP_EX(%a0) # set exp = 0
mov.b &DENORM, %d0 # return new optype tag
rts
#
# whole mantissa is zero so this UNNORM is actually a zero
#
unnorm_zero:
and.w &0x8000, FTEMP_EX(%a0) # force exponent to zero
mov.b &ZERO, %d0 # fix optype tag
rts
|
AirFortressIlikara/LS2K0300-linux-4.19
| 462,529
|
arch/m68k/ifpsp060/src/pfpsp.S
|
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
MOTOROLA MICROPROCESSOR & MEMORY TECHNOLOGY GROUP
M68000 Hi-Performance Microprocessor Division
M68060 Software Package
Production Release P1.00 -- October 10, 1994
M68060 Software Package Copyright © 1993, 1994 Motorola Inc. All rights reserved.
THE SOFTWARE is provided on an "AS IS" basis and without warranty.
To the maximum extent permitted by applicable law,
MOTOROLA DISCLAIMS ALL WARRANTIES WHETHER EXPRESS OR IMPLIED,
INCLUDING IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE
and any warranty against infringement with regard to the SOFTWARE
(INCLUDING ANY MODIFIED VERSIONS THEREOF) and any accompanying written materials.
To the maximum extent permitted by applicable law,
IN NO EVENT SHALL MOTOROLA BE LIABLE FOR ANY DAMAGES WHATSOEVER
(INCLUDING WITHOUT LIMITATION, DAMAGES FOR LOSS OF BUSINESS PROFITS,
BUSINESS INTERRUPTION, LOSS OF BUSINESS INFORMATION, OR OTHER PECUNIARY LOSS)
ARISING OF THE USE OR INABILITY TO USE THE SOFTWARE.
Motorola assumes no responsibility for the maintenance and support of the SOFTWARE.
You are hereby granted a copyright license to use, modify, and distribute the SOFTWARE
so long as this entire notice is retained without alteration in any modified and/or
redistributed versions, and that such modified versions are clearly identified as such.
No licenses are granted by implication, estoppel or otherwise under any patents
or trademarks of Motorola, Inc.
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# freal.s:
# This file is appended to the top of the 060FPSP package
# and contains the entry points into the package. The user, in
# effect, branches to one of the branch table entries located
# after _060FPSP_TABLE.
# Also, subroutine stubs exist in this file (_fpsp_done for
# example) that are referenced by the FPSP package itself in order
# to call a given routine. The stub routine actually performs the
# callout. The FPSP code does a "bsr" to the stub routine. This
# extra layer of hierarchy adds a slight performance penalty but
# it makes the FPSP code easier to read and more mainatinable.
#
set _off_bsun, 0x00
set _off_snan, 0x04
set _off_operr, 0x08
set _off_ovfl, 0x0c
set _off_unfl, 0x10
set _off_dz, 0x14
set _off_inex, 0x18
set _off_fline, 0x1c
set _off_fpu_dis, 0x20
set _off_trap, 0x24
set _off_trace, 0x28
set _off_access, 0x2c
set _off_done, 0x30
set _off_imr, 0x40
set _off_dmr, 0x44
set _off_dmw, 0x48
set _off_irw, 0x4c
set _off_irl, 0x50
set _off_drb, 0x54
set _off_drw, 0x58
set _off_drl, 0x5c
set _off_dwb, 0x60
set _off_dww, 0x64
set _off_dwl, 0x68
_060FPSP_TABLE:
###############################################################
# Here's the table of ENTRY POINTS for those linking the package.
bra.l _fpsp_snan
short 0x0000
bra.l _fpsp_operr
short 0x0000
bra.l _fpsp_ovfl
short 0x0000
bra.l _fpsp_unfl
short 0x0000
bra.l _fpsp_dz
short 0x0000
bra.l _fpsp_inex
short 0x0000
bra.l _fpsp_fline
short 0x0000
bra.l _fpsp_unsupp
short 0x0000
bra.l _fpsp_effadd
short 0x0000
space 56
###############################################################
global _fpsp_done
_fpsp_done:
mov.l %d0,-(%sp)
mov.l (_060FPSP_TABLE-0x80+_off_done,%pc),%d0
pea.l (_060FPSP_TABLE-0x80,%pc,%d0)
mov.l 0x4(%sp),%d0
rtd &0x4
global _real_ovfl
_real_ovfl:
mov.l %d0,-(%sp)
mov.l (_060FPSP_TABLE-0x80+_off_ovfl,%pc),%d0
pea.l (_060FPSP_TABLE-0x80,%pc,%d0)
mov.l 0x4(%sp),%d0
rtd &0x4
global _real_unfl
_real_unfl:
mov.l %d0,-(%sp)
mov.l (_060FPSP_TABLE-0x80+_off_unfl,%pc),%d0
pea.l (_060FPSP_TABLE-0x80,%pc,%d0)
mov.l 0x4(%sp),%d0
rtd &0x4
global _real_inex
_real_inex:
mov.l %d0,-(%sp)
mov.l (_060FPSP_TABLE-0x80+_off_inex,%pc),%d0
pea.l (_060FPSP_TABLE-0x80,%pc,%d0)
mov.l 0x4(%sp),%d0
rtd &0x4
global _real_bsun
_real_bsun:
mov.l %d0,-(%sp)
mov.l (_060FPSP_TABLE-0x80+_off_bsun,%pc),%d0
pea.l (_060FPSP_TABLE-0x80,%pc,%d0)
mov.l 0x4(%sp),%d0
rtd &0x4
global _real_operr
_real_operr:
mov.l %d0,-(%sp)
mov.l (_060FPSP_TABLE-0x80+_off_operr,%pc),%d0
pea.l (_060FPSP_TABLE-0x80,%pc,%d0)
mov.l 0x4(%sp),%d0
rtd &0x4
global _real_snan
_real_snan:
mov.l %d0,-(%sp)
mov.l (_060FPSP_TABLE-0x80+_off_snan,%pc),%d0
pea.l (_060FPSP_TABLE-0x80,%pc,%d0)
mov.l 0x4(%sp),%d0
rtd &0x4
global _real_dz
_real_dz:
mov.l %d0,-(%sp)
mov.l (_060FPSP_TABLE-0x80+_off_dz,%pc),%d0
pea.l (_060FPSP_TABLE-0x80,%pc,%d0)
mov.l 0x4(%sp),%d0
rtd &0x4
global _real_fline
_real_fline:
mov.l %d0,-(%sp)
mov.l (_060FPSP_TABLE-0x80+_off_fline,%pc),%d0
pea.l (_060FPSP_TABLE-0x80,%pc,%d0)
mov.l 0x4(%sp),%d0
rtd &0x4
global _real_fpu_disabled
_real_fpu_disabled:
mov.l %d0,-(%sp)
mov.l (_060FPSP_TABLE-0x80+_off_fpu_dis,%pc),%d0
pea.l (_060FPSP_TABLE-0x80,%pc,%d0)
mov.l 0x4(%sp),%d0
rtd &0x4
global _real_trap
_real_trap:
mov.l %d0,-(%sp)
mov.l (_060FPSP_TABLE-0x80+_off_trap,%pc),%d0
pea.l (_060FPSP_TABLE-0x80,%pc,%d0)
mov.l 0x4(%sp),%d0
rtd &0x4
global _real_trace
_real_trace:
mov.l %d0,-(%sp)
mov.l (_060FPSP_TABLE-0x80+_off_trace,%pc),%d0
pea.l (_060FPSP_TABLE-0x80,%pc,%d0)
mov.l 0x4(%sp),%d0
rtd &0x4
global _real_access
_real_access:
mov.l %d0,-(%sp)
mov.l (_060FPSP_TABLE-0x80+_off_access,%pc),%d0
pea.l (_060FPSP_TABLE-0x80,%pc,%d0)
mov.l 0x4(%sp),%d0
rtd &0x4
#######################################
global _imem_read
_imem_read:
mov.l %d0,-(%sp)
mov.l (_060FPSP_TABLE-0x80+_off_imr,%pc),%d0
pea.l (_060FPSP_TABLE-0x80,%pc,%d0)
mov.l 0x4(%sp),%d0
rtd &0x4
global _dmem_read
_dmem_read:
mov.l %d0,-(%sp)
mov.l (_060FPSP_TABLE-0x80+_off_dmr,%pc),%d0
pea.l (_060FPSP_TABLE-0x80,%pc,%d0)
mov.l 0x4(%sp),%d0
rtd &0x4
global _dmem_write
_dmem_write:
mov.l %d0,-(%sp)
mov.l (_060FPSP_TABLE-0x80+_off_dmw,%pc),%d0
pea.l (_060FPSP_TABLE-0x80,%pc,%d0)
mov.l 0x4(%sp),%d0
rtd &0x4
global _imem_read_word
_imem_read_word:
mov.l %d0,-(%sp)
mov.l (_060FPSP_TABLE-0x80+_off_irw,%pc),%d0
pea.l (_060FPSP_TABLE-0x80,%pc,%d0)
mov.l 0x4(%sp),%d0
rtd &0x4
global _imem_read_long
_imem_read_long:
mov.l %d0,-(%sp)
mov.l (_060FPSP_TABLE-0x80+_off_irl,%pc),%d0
pea.l (_060FPSP_TABLE-0x80,%pc,%d0)
mov.l 0x4(%sp),%d0
rtd &0x4
global _dmem_read_byte
_dmem_read_byte:
mov.l %d0,-(%sp)
mov.l (_060FPSP_TABLE-0x80+_off_drb,%pc),%d0
pea.l (_060FPSP_TABLE-0x80,%pc,%d0)
mov.l 0x4(%sp),%d0
rtd &0x4
global _dmem_read_word
_dmem_read_word:
mov.l %d0,-(%sp)
mov.l (_060FPSP_TABLE-0x80+_off_drw,%pc),%d0
pea.l (_060FPSP_TABLE-0x80,%pc,%d0)
mov.l 0x4(%sp),%d0
rtd &0x4
global _dmem_read_long
_dmem_read_long:
mov.l %d0,-(%sp)
mov.l (_060FPSP_TABLE-0x80+_off_drl,%pc),%d0
pea.l (_060FPSP_TABLE-0x80,%pc,%d0)
mov.l 0x4(%sp),%d0
rtd &0x4
global _dmem_write_byte
_dmem_write_byte:
mov.l %d0,-(%sp)
mov.l (_060FPSP_TABLE-0x80+_off_dwb,%pc),%d0
pea.l (_060FPSP_TABLE-0x80,%pc,%d0)
mov.l 0x4(%sp),%d0
rtd &0x4
global _dmem_write_word
_dmem_write_word:
mov.l %d0,-(%sp)
mov.l (_060FPSP_TABLE-0x80+_off_dww,%pc),%d0
pea.l (_060FPSP_TABLE-0x80,%pc,%d0)
mov.l 0x4(%sp),%d0
rtd &0x4
global _dmem_write_long
_dmem_write_long:
mov.l %d0,-(%sp)
mov.l (_060FPSP_TABLE-0x80+_off_dwl,%pc),%d0
pea.l (_060FPSP_TABLE-0x80,%pc,%d0)
mov.l 0x4(%sp),%d0
rtd &0x4
#
# This file contains a set of define statements for constants
# in order to promote readability within the corecode itself.
#
set LOCAL_SIZE, 192 # stack frame size(bytes)
set LV, -LOCAL_SIZE # stack offset
set EXC_SR, 0x4 # stack status register
set EXC_PC, 0x6 # stack pc
set EXC_VOFF, 0xa # stacked vector offset
set EXC_EA, 0xc # stacked <ea>
set EXC_FP, 0x0 # frame pointer
set EXC_AREGS, -68 # offset of all address regs
set EXC_DREGS, -100 # offset of all data regs
set EXC_FPREGS, -36 # offset of all fp regs
set EXC_A7, EXC_AREGS+(7*4) # offset of saved a7
set OLD_A7, EXC_AREGS+(6*4) # extra copy of saved a7
set EXC_A6, EXC_AREGS+(6*4) # offset of saved a6
set EXC_A5, EXC_AREGS+(5*4)
set EXC_A4, EXC_AREGS+(4*4)
set EXC_A3, EXC_AREGS+(3*4)
set EXC_A2, EXC_AREGS+(2*4)
set EXC_A1, EXC_AREGS+(1*4)
set EXC_A0, EXC_AREGS+(0*4)
set EXC_D7, EXC_DREGS+(7*4)
set EXC_D6, EXC_DREGS+(6*4)
set EXC_D5, EXC_DREGS+(5*4)
set EXC_D4, EXC_DREGS+(4*4)
set EXC_D3, EXC_DREGS+(3*4)
set EXC_D2, EXC_DREGS+(2*4)
set EXC_D1, EXC_DREGS+(1*4)
set EXC_D0, EXC_DREGS+(0*4)
set EXC_FP0, EXC_FPREGS+(0*12) # offset of saved fp0
set EXC_FP1, EXC_FPREGS+(1*12) # offset of saved fp1
set EXC_FP2, EXC_FPREGS+(2*12) # offset of saved fp2 (not used)
set FP_SCR1, LV+80 # fp scratch 1
set FP_SCR1_EX, FP_SCR1+0
set FP_SCR1_SGN, FP_SCR1+2
set FP_SCR1_HI, FP_SCR1+4
set FP_SCR1_LO, FP_SCR1+8
set FP_SCR0, LV+68 # fp scratch 0
set FP_SCR0_EX, FP_SCR0+0
set FP_SCR0_SGN, FP_SCR0+2
set FP_SCR0_HI, FP_SCR0+4
set FP_SCR0_LO, FP_SCR0+8
set FP_DST, LV+56 # fp destination operand
set FP_DST_EX, FP_DST+0
set FP_DST_SGN, FP_DST+2
set FP_DST_HI, FP_DST+4
set FP_DST_LO, FP_DST+8
set FP_SRC, LV+44 # fp source operand
set FP_SRC_EX, FP_SRC+0
set FP_SRC_SGN, FP_SRC+2
set FP_SRC_HI, FP_SRC+4
set FP_SRC_LO, FP_SRC+8
set USER_FPIAR, LV+40 # FP instr address register
set USER_FPSR, LV+36 # FP status register
set FPSR_CC, USER_FPSR+0 # FPSR condition codes
set FPSR_QBYTE, USER_FPSR+1 # FPSR qoutient byte
set FPSR_EXCEPT, USER_FPSR+2 # FPSR exception status byte
set FPSR_AEXCEPT, USER_FPSR+3 # FPSR accrued exception byte
set USER_FPCR, LV+32 # FP control register
set FPCR_ENABLE, USER_FPCR+2 # FPCR exception enable
set FPCR_MODE, USER_FPCR+3 # FPCR rounding mode control
set L_SCR3, LV+28 # integer scratch 3
set L_SCR2, LV+24 # integer scratch 2
set L_SCR1, LV+20 # integer scratch 1
set STORE_FLG, LV+19 # flag: operand store (ie. not fcmp/ftst)
set EXC_TEMP2, LV+24 # temporary space
set EXC_TEMP, LV+16 # temporary space
set DTAG, LV+15 # destination operand type
set STAG, LV+14 # source operand type
set SPCOND_FLG, LV+10 # flag: special case (see below)
set EXC_CC, LV+8 # saved condition codes
set EXC_EXTWPTR, LV+4 # saved current PC (active)
set EXC_EXTWORD, LV+2 # saved extension word
set EXC_CMDREG, LV+2 # saved extension word
set EXC_OPWORD, LV+0 # saved operation word
################################
# Helpful macros
set FTEMP, 0 # offsets within an
set FTEMP_EX, 0 # extended precision
set FTEMP_SGN, 2 # value saved in memory.
set FTEMP_HI, 4
set FTEMP_LO, 8
set FTEMP_GRS, 12
set LOCAL, 0 # offsets within an
set LOCAL_EX, 0 # extended precision
set LOCAL_SGN, 2 # value saved in memory.
set LOCAL_HI, 4
set LOCAL_LO, 8
set LOCAL_GRS, 12
set DST, 0 # offsets within an
set DST_EX, 0 # extended precision
set DST_HI, 4 # value saved in memory.
set DST_LO, 8
set SRC, 0 # offsets within an
set SRC_EX, 0 # extended precision
set SRC_HI, 4 # value saved in memory.
set SRC_LO, 8
set SGL_LO, 0x3f81 # min sgl prec exponent
set SGL_HI, 0x407e # max sgl prec exponent
set DBL_LO, 0x3c01 # min dbl prec exponent
set DBL_HI, 0x43fe # max dbl prec exponent
set EXT_LO, 0x0 # min ext prec exponent
set EXT_HI, 0x7ffe # max ext prec exponent
set EXT_BIAS, 0x3fff # extended precision bias
set SGL_BIAS, 0x007f # single precision bias
set DBL_BIAS, 0x03ff # double precision bias
set NORM, 0x00 # operand type for STAG/DTAG
set ZERO, 0x01 # operand type for STAG/DTAG
set INF, 0x02 # operand type for STAG/DTAG
set QNAN, 0x03 # operand type for STAG/DTAG
set DENORM, 0x04 # operand type for STAG/DTAG
set SNAN, 0x05 # operand type for STAG/DTAG
set UNNORM, 0x06 # operand type for STAG/DTAG
##################
# FPSR/FPCR bits #
##################
set neg_bit, 0x3 # negative result
set z_bit, 0x2 # zero result
set inf_bit, 0x1 # infinite result
set nan_bit, 0x0 # NAN result
set q_sn_bit, 0x7 # sign bit of quotient byte
set bsun_bit, 7 # branch on unordered
set snan_bit, 6 # signalling NAN
set operr_bit, 5 # operand error
set ovfl_bit, 4 # overflow
set unfl_bit, 3 # underflow
set dz_bit, 2 # divide by zero
set inex2_bit, 1 # inexact result 2
set inex1_bit, 0 # inexact result 1
set aiop_bit, 7 # accrued inexact operation bit
set aovfl_bit, 6 # accrued overflow bit
set aunfl_bit, 5 # accrued underflow bit
set adz_bit, 4 # accrued dz bit
set ainex_bit, 3 # accrued inexact bit
#############################
# FPSR individual bit masks #
#############################
set neg_mask, 0x08000000 # negative bit mask (lw)
set inf_mask, 0x02000000 # infinity bit mask (lw)
set z_mask, 0x04000000 # zero bit mask (lw)
set nan_mask, 0x01000000 # nan bit mask (lw)
set neg_bmask, 0x08 # negative bit mask (byte)
set inf_bmask, 0x02 # infinity bit mask (byte)
set z_bmask, 0x04 # zero bit mask (byte)
set nan_bmask, 0x01 # nan bit mask (byte)
set bsun_mask, 0x00008000 # bsun exception mask
set snan_mask, 0x00004000 # snan exception mask
set operr_mask, 0x00002000 # operr exception mask
set ovfl_mask, 0x00001000 # overflow exception mask
set unfl_mask, 0x00000800 # underflow exception mask
set dz_mask, 0x00000400 # dz exception mask
set inex2_mask, 0x00000200 # inex2 exception mask
set inex1_mask, 0x00000100 # inex1 exception mask
set aiop_mask, 0x00000080 # accrued illegal operation
set aovfl_mask, 0x00000040 # accrued overflow
set aunfl_mask, 0x00000020 # accrued underflow
set adz_mask, 0x00000010 # accrued divide by zero
set ainex_mask, 0x00000008 # accrued inexact
######################################
# FPSR combinations used in the FPSP #
######################################
set dzinf_mask, inf_mask+dz_mask+adz_mask
set opnan_mask, nan_mask+operr_mask+aiop_mask
set nzi_mask, 0x01ffffff #clears N, Z, and I
set unfinx_mask, unfl_mask+inex2_mask+aunfl_mask+ainex_mask
set unf2inx_mask, unfl_mask+inex2_mask+ainex_mask
set ovfinx_mask, ovfl_mask+inex2_mask+aovfl_mask+ainex_mask
set inx1a_mask, inex1_mask+ainex_mask
set inx2a_mask, inex2_mask+ainex_mask
set snaniop_mask, nan_mask+snan_mask+aiop_mask
set snaniop2_mask, snan_mask+aiop_mask
set naniop_mask, nan_mask+aiop_mask
set neginf_mask, neg_mask+inf_mask
set infaiop_mask, inf_mask+aiop_mask
set negz_mask, neg_mask+z_mask
set opaop_mask, operr_mask+aiop_mask
set unfl_inx_mask, unfl_mask+aunfl_mask+ainex_mask
set ovfl_inx_mask, ovfl_mask+aovfl_mask+ainex_mask
#########
# misc. #
#########
set rnd_stky_bit, 29 # stky bit pos in longword
set sign_bit, 0x7 # sign bit
set signan_bit, 0x6 # signalling nan bit
set sgl_thresh, 0x3f81 # minimum sgl exponent
set dbl_thresh, 0x3c01 # minimum dbl exponent
set x_mode, 0x0 # extended precision
set s_mode, 0x4 # single precision
set d_mode, 0x8 # double precision
set rn_mode, 0x0 # round-to-nearest
set rz_mode, 0x1 # round-to-zero
set rm_mode, 0x2 # round-tp-minus-infinity
set rp_mode, 0x3 # round-to-plus-infinity
set mantissalen, 64 # length of mantissa in bits
set BYTE, 1 # len(byte) == 1 byte
set WORD, 2 # len(word) == 2 bytes
set LONG, 4 # len(longword) == 2 bytes
set BSUN_VEC, 0xc0 # bsun vector offset
set INEX_VEC, 0xc4 # inexact vector offset
set DZ_VEC, 0xc8 # dz vector offset
set UNFL_VEC, 0xcc # unfl vector offset
set OPERR_VEC, 0xd0 # operr vector offset
set OVFL_VEC, 0xd4 # ovfl vector offset
set SNAN_VEC, 0xd8 # snan vector offset
###########################
# SPecial CONDition FLaGs #
###########################
set ftrapcc_flg, 0x01 # flag bit: ftrapcc exception
set fbsun_flg, 0x02 # flag bit: bsun exception
set mia7_flg, 0x04 # flag bit: (a7)+ <ea>
set mda7_flg, 0x08 # flag bit: -(a7) <ea>
set fmovm_flg, 0x40 # flag bit: fmovm instruction
set immed_flg, 0x80 # flag bit: &<data> <ea>
set ftrapcc_bit, 0x0
set fbsun_bit, 0x1
set mia7_bit, 0x2
set mda7_bit, 0x3
set immed_bit, 0x7
##################################
# TRANSCENDENTAL "LAST-OP" FLAGS #
##################################
set FMUL_OP, 0x0 # fmul instr performed last
set FDIV_OP, 0x1 # fdiv performed last
set FADD_OP, 0x2 # fadd performed last
set FMOV_OP, 0x3 # fmov performed last
#############
# CONSTANTS #
#############
T1: long 0x40C62D38,0xD3D64634 # 16381 LOG2 LEAD
T2: long 0x3D6F90AE,0xB1E75CC7 # 16381 LOG2 TRAIL
PI: long 0x40000000,0xC90FDAA2,0x2168C235,0x00000000
PIBY2: long 0x3FFF0000,0xC90FDAA2,0x2168C235,0x00000000
TWOBYPI:
long 0x3FE45F30,0x6DC9C883
#########################################################################
# XDEF **************************************************************** #
# _fpsp_ovfl(): 060FPSP entry point for FP Overflow exception. #
# #
# This handler should be the first code executed upon taking the #
# FP Overflow exception in an operating system. #
# #
# XREF **************************************************************** #
# _imem_read_long() - read instruction longword #
# fix_skewed_ops() - adjust src operand in fsave frame #
# set_tag_x() - determine optype of src/dst operands #
# store_fpreg() - store opclass 0 or 2 result to FP regfile #
# unnorm_fix() - change UNNORM operands to NORM or ZERO #
# load_fpn2() - load dst operand from FP regfile #
# fout() - emulate an opclass 3 instruction #
# tbl_unsupp - add of table of emulation routines for opclass 0,2 #
# _fpsp_done() - "callout" for 060FPSP exit (all work done!) #
# _real_ovfl() - "callout" for Overflow exception enabled code #
# _real_inex() - "callout" for Inexact exception enabled code #
# _real_trace() - "callout" for Trace exception code #
# #
# INPUT *************************************************************** #
# - The system stack contains the FP Ovfl exception stack frame #
# - The fsave frame contains the source operand #
# #
# OUTPUT ************************************************************** #
# Overflow Exception enabled: #
# - The system stack is unchanged #
# - The fsave frame contains the adjusted src op for opclass 0,2 #
# Overflow Exception disabled: #
# - The system stack is unchanged #
# - The "exception present" flag in the fsave frame is cleared #
# #
# ALGORITHM *********************************************************** #
# On the 060, if an FP overflow is present as the result of any #
# instruction, the 060 will take an overflow exception whether the #
# exception is enabled or disabled in the FPCR. For the disabled case, #
# This handler emulates the instruction to determine what the correct #
# default result should be for the operation. This default result is #
# then stored in either the FP regfile, data regfile, or memory. #
# Finally, the handler exits through the "callout" _fpsp_done() #
# denoting that no exceptional conditions exist within the machine. #
# If the exception is enabled, then this handler must create the #
# exceptional operand and plave it in the fsave state frame, and store #
# the default result (only if the instruction is opclass 3). For #
# exceptions enabled, this handler must exit through the "callout" #
# _real_ovfl() so that the operating system enabled overflow handler #
# can handle this case. #
# Two other conditions exist. First, if overflow was disabled #
# but the inexact exception was enabled, this handler must exit #
# through the "callout" _real_inex() regardless of whether the result #
# was inexact. #
# Also, in the case of an opclass three instruction where #
# overflow was disabled and the trace exception was enabled, this #
# handler must exit through the "callout" _real_trace(). #
# #
#########################################################################
global _fpsp_ovfl
_fpsp_ovfl:
#$# sub.l &24,%sp # make room for src/dst
link.w %a6,&-LOCAL_SIZE # init stack frame
fsave FP_SRC(%a6) # grab the "busy" frame
movm.l &0x0303,EXC_DREGS(%a6) # save d0-d1/a0-a1
fmovm.l %fpcr,%fpsr,%fpiar,USER_FPCR(%a6) # save ctrl regs
fmovm.x &0xc0,EXC_FPREGS(%a6) # save fp0-fp1 on stack
# the FPIAR holds the "current PC" of the faulting instruction
mov.l USER_FPIAR(%a6),EXC_EXTWPTR(%a6)
mov.l EXC_EXTWPTR(%a6),%a0 # fetch instruction addr
addq.l &0x4,EXC_EXTWPTR(%a6) # incr instruction ptr
bsr.l _imem_read_long # fetch the instruction words
mov.l %d0,EXC_OPWORD(%a6)
##############################################################################
btst &0x5,EXC_CMDREG(%a6) # is instr an fmove out?
bne.w fovfl_out
lea FP_SRC(%a6),%a0 # pass: ptr to src op
bsr.l fix_skewed_ops # fix src op
# since, I believe, only NORMs and DENORMs can come through here,
# maybe we can avoid the subroutine call.
lea FP_SRC(%a6),%a0 # pass: ptr to src op
bsr.l set_tag_x # tag the operand type
mov.b %d0,STAG(%a6) # maybe NORM,DENORM
# bit five of the fp extension word separates the monadic and dyadic operations
# that can pass through fpsp_ovfl(). remember that fcmp, ftst, and fsincos
# will never take this exception.
btst &0x5,1+EXC_CMDREG(%a6) # is operation monadic or dyadic?
beq.b fovfl_extract # monadic
bfextu EXC_CMDREG(%a6){&6:&3},%d0 # dyadic; load dst reg
bsr.l load_fpn2 # load dst into FP_DST
lea FP_DST(%a6),%a0 # pass: ptr to dst op
bsr.l set_tag_x # tag the operand type
cmpi.b %d0,&UNNORM # is operand an UNNORM?
bne.b fovfl_op2_done # no
bsr.l unnorm_fix # yes; convert to NORM,DENORM,or ZERO
fovfl_op2_done:
mov.b %d0,DTAG(%a6) # save dst optype tag
fovfl_extract:
#$# mov.l FP_SRC_EX(%a6),TRAP_SRCOP_EX(%a6)
#$# mov.l FP_SRC_HI(%a6),TRAP_SRCOP_HI(%a6)
#$# mov.l FP_SRC_LO(%a6),TRAP_SRCOP_LO(%a6)
#$# mov.l FP_DST_EX(%a6),TRAP_DSTOP_EX(%a6)
#$# mov.l FP_DST_HI(%a6),TRAP_DSTOP_HI(%a6)
#$# mov.l FP_DST_LO(%a6),TRAP_DSTOP_LO(%a6)
clr.l %d0
mov.b FPCR_MODE(%a6),%d0 # pass rnd prec/mode
mov.b 1+EXC_CMDREG(%a6),%d1
andi.w &0x007f,%d1 # extract extension
andi.l &0x00ff01ff,USER_FPSR(%a6) # zero all but accured field
fmov.l &0x0,%fpcr # zero current control regs
fmov.l &0x0,%fpsr
lea FP_SRC(%a6),%a0
lea FP_DST(%a6),%a1
# maybe we can make these entry points ONLY the OVFL entry points of each routine.
mov.l (tbl_unsupp.l,%pc,%d1.w*4),%d1 # fetch routine addr
jsr (tbl_unsupp.l,%pc,%d1.l*1)
# the operation has been emulated. the result is in fp0.
# the EXOP, if an exception occurred, is in fp1.
# we must save the default result regardless of whether
# traps are enabled or disabled.
bfextu EXC_CMDREG(%a6){&6:&3},%d0
bsr.l store_fpreg
# the exceptional possibilities we have left ourselves with are ONLY overflow
# and inexact. and, the inexact is such that overflow occurred and was disabled
# but inexact was enabled.
btst &ovfl_bit,FPCR_ENABLE(%a6)
bne.b fovfl_ovfl_on
btst &inex2_bit,FPCR_ENABLE(%a6)
bne.b fovfl_inex_on
fmovm.x EXC_FPREGS(%a6),&0xc0 # restore fp0-fp1
fmovm.l USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
unlk %a6
#$# add.l &24,%sp
bra.l _fpsp_done
# overflow is enabled AND overflow, of course, occurred. so, we have the EXOP
# in fp1. now, simply jump to _real_ovfl()!
fovfl_ovfl_on:
fmovm.x &0x40,FP_SRC(%a6) # save EXOP (fp1) to stack
mov.w &0xe005,2+FP_SRC(%a6) # save exc status
fmovm.x EXC_FPREGS(%a6),&0xc0 # restore fp0-fp1
fmovm.l USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
frestore FP_SRC(%a6) # do this after fmovm,other f<op>s!
unlk %a6
bra.l _real_ovfl
# overflow occurred but is disabled. meanwhile, inexact is enabled. Therefore,
# we must jump to real_inex().
fovfl_inex_on:
fmovm.x &0x40,FP_SRC(%a6) # save EXOP (fp1) to stack
mov.b &0xc4,1+EXC_VOFF(%a6) # vector offset = 0xc4
mov.w &0xe001,2+FP_SRC(%a6) # save exc status
fmovm.x EXC_FPREGS(%a6),&0xc0 # restore fp0-fp1
fmovm.l USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
frestore FP_SRC(%a6) # do this after fmovm,other f<op>s!
unlk %a6
bra.l _real_inex
########################################################################
fovfl_out:
#$# mov.l FP_SRC_EX(%a6),TRAP_SRCOP_EX(%a6)
#$# mov.l FP_SRC_HI(%a6),TRAP_SRCOP_HI(%a6)
#$# mov.l FP_SRC_LO(%a6),TRAP_SRCOP_LO(%a6)
# the src operand is definitely a NORM(!), so tag it as such
mov.b &NORM,STAG(%a6) # set src optype tag
clr.l %d0
mov.b FPCR_MODE(%a6),%d0 # pass rnd prec/mode
and.l &0xffff00ff,USER_FPSR(%a6) # zero all but accured field
fmov.l &0x0,%fpcr # zero current control regs
fmov.l &0x0,%fpsr
lea FP_SRC(%a6),%a0 # pass ptr to src operand
bsr.l fout
btst &ovfl_bit,FPCR_ENABLE(%a6)
bne.w fovfl_ovfl_on
btst &inex2_bit,FPCR_ENABLE(%a6)
bne.w fovfl_inex_on
fmovm.x EXC_FPREGS(%a6),&0xc0 # restore fp0-fp1
fmovm.l USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
unlk %a6
#$# add.l &24,%sp
btst &0x7,(%sp) # is trace on?
beq.l _fpsp_done # no
fmov.l %fpiar,0x8(%sp) # "Current PC" is in FPIAR
mov.w &0x2024,0x6(%sp) # stk fmt = 0x2; voff = 0x024
bra.l _real_trace
#########################################################################
# XDEF **************************************************************** #
# _fpsp_unfl(): 060FPSP entry point for FP Underflow exception. #
# #
# This handler should be the first code executed upon taking the #
# FP Underflow exception in an operating system. #
# #
# XREF **************************************************************** #
# _imem_read_long() - read instruction longword #
# fix_skewed_ops() - adjust src operand in fsave frame #
# set_tag_x() - determine optype of src/dst operands #
# store_fpreg() - store opclass 0 or 2 result to FP regfile #
# unnorm_fix() - change UNNORM operands to NORM or ZERO #
# load_fpn2() - load dst operand from FP regfile #
# fout() - emulate an opclass 3 instruction #
# tbl_unsupp - add of table of emulation routines for opclass 0,2 #
# _fpsp_done() - "callout" for 060FPSP exit (all work done!) #
# _real_ovfl() - "callout" for Overflow exception enabled code #
# _real_inex() - "callout" for Inexact exception enabled code #
# _real_trace() - "callout" for Trace exception code #
# #
# INPUT *************************************************************** #
# - The system stack contains the FP Unfl exception stack frame #
# - The fsave frame contains the source operand #
# #
# OUTPUT ************************************************************** #
# Underflow Exception enabled: #
# - The system stack is unchanged #
# - The fsave frame contains the adjusted src op for opclass 0,2 #
# Underflow Exception disabled: #
# - The system stack is unchanged #
# - The "exception present" flag in the fsave frame is cleared #
# #
# ALGORITHM *********************************************************** #
# On the 060, if an FP underflow is present as the result of any #
# instruction, the 060 will take an underflow exception whether the #
# exception is enabled or disabled in the FPCR. For the disabled case, #
# This handler emulates the instruction to determine what the correct #
# default result should be for the operation. This default result is #
# then stored in either the FP regfile, data regfile, or memory. #
# Finally, the handler exits through the "callout" _fpsp_done() #
# denoting that no exceptional conditions exist within the machine. #
# If the exception is enabled, then this handler must create the #
# exceptional operand and plave it in the fsave state frame, and store #
# the default result (only if the instruction is opclass 3). For #
# exceptions enabled, this handler must exit through the "callout" #
# _real_unfl() so that the operating system enabled overflow handler #
# can handle this case. #
# Two other conditions exist. First, if underflow was disabled #
# but the inexact exception was enabled and the result was inexact, #
# this handler must exit through the "callout" _real_inex(). #
# was inexact. #
# Also, in the case of an opclass three instruction where #
# underflow was disabled and the trace exception was enabled, this #
# handler must exit through the "callout" _real_trace(). #
# #
#########################################################################
global _fpsp_unfl
_fpsp_unfl:
#$# sub.l &24,%sp # make room for src/dst
link.w %a6,&-LOCAL_SIZE # init stack frame
fsave FP_SRC(%a6) # grab the "busy" frame
movm.l &0x0303,EXC_DREGS(%a6) # save d0-d1/a0-a1
fmovm.l %fpcr,%fpsr,%fpiar,USER_FPCR(%a6) # save ctrl regs
fmovm.x &0xc0,EXC_FPREGS(%a6) # save fp0-fp1 on stack
# the FPIAR holds the "current PC" of the faulting instruction
mov.l USER_FPIAR(%a6),EXC_EXTWPTR(%a6)
mov.l EXC_EXTWPTR(%a6),%a0 # fetch instruction addr
addq.l &0x4,EXC_EXTWPTR(%a6) # incr instruction ptr
bsr.l _imem_read_long # fetch the instruction words
mov.l %d0,EXC_OPWORD(%a6)
##############################################################################
btst &0x5,EXC_CMDREG(%a6) # is instr an fmove out?
bne.w funfl_out
lea FP_SRC(%a6),%a0 # pass: ptr to src op
bsr.l fix_skewed_ops # fix src op
lea FP_SRC(%a6),%a0 # pass: ptr to src op
bsr.l set_tag_x # tag the operand type
mov.b %d0,STAG(%a6) # maybe NORM,DENORM
# bit five of the fp ext word separates the monadic and dyadic operations
# that can pass through fpsp_unfl(). remember that fcmp, and ftst
# will never take this exception.
btst &0x5,1+EXC_CMDREG(%a6) # is op monadic or dyadic?
beq.b funfl_extract # monadic
# now, what's left that's not dyadic is fsincos. we can distinguish it
# from all dyadics by the '0110xxx pattern
btst &0x4,1+EXC_CMDREG(%a6) # is op an fsincos?
bne.b funfl_extract # yes
bfextu EXC_CMDREG(%a6){&6:&3},%d0 # dyadic; load dst reg
bsr.l load_fpn2 # load dst into FP_DST
lea FP_DST(%a6),%a0 # pass: ptr to dst op
bsr.l set_tag_x # tag the operand type
cmpi.b %d0,&UNNORM # is operand an UNNORM?
bne.b funfl_op2_done # no
bsr.l unnorm_fix # yes; convert to NORM,DENORM,or ZERO
funfl_op2_done:
mov.b %d0,DTAG(%a6) # save dst optype tag
funfl_extract:
#$# mov.l FP_SRC_EX(%a6),TRAP_SRCOP_EX(%a6)
#$# mov.l FP_SRC_HI(%a6),TRAP_SRCOP_HI(%a6)
#$# mov.l FP_SRC_LO(%a6),TRAP_SRCOP_LO(%a6)
#$# mov.l FP_DST_EX(%a6),TRAP_DSTOP_EX(%a6)
#$# mov.l FP_DST_HI(%a6),TRAP_DSTOP_HI(%a6)
#$# mov.l FP_DST_LO(%a6),TRAP_DSTOP_LO(%a6)
clr.l %d0
mov.b FPCR_MODE(%a6),%d0 # pass rnd prec/mode
mov.b 1+EXC_CMDREG(%a6),%d1
andi.w &0x007f,%d1 # extract extension
andi.l &0x00ff01ff,USER_FPSR(%a6)
fmov.l &0x0,%fpcr # zero current control regs
fmov.l &0x0,%fpsr
lea FP_SRC(%a6),%a0
lea FP_DST(%a6),%a1
# maybe we can make these entry points ONLY the OVFL entry points of each routine.
mov.l (tbl_unsupp.l,%pc,%d1.w*4),%d1 # fetch routine addr
jsr (tbl_unsupp.l,%pc,%d1.l*1)
bfextu EXC_CMDREG(%a6){&6:&3},%d0
bsr.l store_fpreg
# The `060 FPU multiplier hardware is such that if the result of a
# multiply operation is the smallest possible normalized number
# (0x00000000_80000000_00000000), then the machine will take an
# underflow exception. Since this is incorrect, we need to check
# if our emulation, after re-doing the operation, decided that
# no underflow was called for. We do these checks only in
# funfl_{unfl,inex}_on() because w/ both exceptions disabled, this
# special case will simply exit gracefully with the correct result.
# the exceptional possibilities we have left ourselves with are ONLY overflow
# and inexact. and, the inexact is such that overflow occurred and was disabled
# but inexact was enabled.
btst &unfl_bit,FPCR_ENABLE(%a6)
bne.b funfl_unfl_on
funfl_chkinex:
btst &inex2_bit,FPCR_ENABLE(%a6)
bne.b funfl_inex_on
funfl_exit:
fmovm.x EXC_FPREGS(%a6),&0xc0 # restore fp0-fp1
fmovm.l USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
unlk %a6
#$# add.l &24,%sp
bra.l _fpsp_done
# overflow is enabled AND overflow, of course, occurred. so, we have the EXOP
# in fp1 (don't forget to save fp0). what to do now?
# well, we simply have to get to go to _real_unfl()!
funfl_unfl_on:
# The `060 FPU multiplier hardware is such that if the result of a
# multiply operation is the smallest possible normalized number
# (0x00000000_80000000_00000000), then the machine will take an
# underflow exception. Since this is incorrect, we check here to see
# if our emulation, after re-doing the operation, decided that
# no underflow was called for.
btst &unfl_bit,FPSR_EXCEPT(%a6)
beq.w funfl_chkinex
funfl_unfl_on2:
fmovm.x &0x40,FP_SRC(%a6) # save EXOP (fp1) to stack
mov.w &0xe003,2+FP_SRC(%a6) # save exc status
fmovm.x EXC_FPREGS(%a6),&0xc0 # restore fp0-fp1
fmovm.l USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
frestore FP_SRC(%a6) # do this after fmovm,other f<op>s!
unlk %a6
bra.l _real_unfl
# underflow occurred but is disabled. meanwhile, inexact is enabled. Therefore,
# we must jump to real_inex().
funfl_inex_on:
# The `060 FPU multiplier hardware is such that if the result of a
# multiply operation is the smallest possible normalized number
# (0x00000000_80000000_00000000), then the machine will take an
# underflow exception.
# But, whether bogus or not, if inexact is enabled AND it occurred,
# then we have to branch to real_inex.
btst &inex2_bit,FPSR_EXCEPT(%a6)
beq.w funfl_exit
funfl_inex_on2:
fmovm.x &0x40,FP_SRC(%a6) # save EXOP to stack
mov.b &0xc4,1+EXC_VOFF(%a6) # vector offset = 0xc4
mov.w &0xe001,2+FP_SRC(%a6) # save exc status
fmovm.x EXC_FPREGS(%a6),&0xc0 # restore fp0-fp1
fmovm.l USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
frestore FP_SRC(%a6) # do this after fmovm,other f<op>s!
unlk %a6
bra.l _real_inex
#######################################################################
funfl_out:
#$# mov.l FP_SRC_EX(%a6),TRAP_SRCOP_EX(%a6)
#$# mov.l FP_SRC_HI(%a6),TRAP_SRCOP_HI(%a6)
#$# mov.l FP_SRC_LO(%a6),TRAP_SRCOP_LO(%a6)
# the src operand is definitely a NORM(!), so tag it as such
mov.b &NORM,STAG(%a6) # set src optype tag
clr.l %d0
mov.b FPCR_MODE(%a6),%d0 # pass rnd prec/mode
and.l &0xffff00ff,USER_FPSR(%a6) # zero all but accured field
fmov.l &0x0,%fpcr # zero current control regs
fmov.l &0x0,%fpsr
lea FP_SRC(%a6),%a0 # pass ptr to src operand
bsr.l fout
btst &unfl_bit,FPCR_ENABLE(%a6)
bne.w funfl_unfl_on2
btst &inex2_bit,FPCR_ENABLE(%a6)
bne.w funfl_inex_on2
fmovm.x EXC_FPREGS(%a6),&0xc0 # restore fp0-fp1
fmovm.l USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
unlk %a6
#$# add.l &24,%sp
btst &0x7,(%sp) # is trace on?
beq.l _fpsp_done # no
fmov.l %fpiar,0x8(%sp) # "Current PC" is in FPIAR
mov.w &0x2024,0x6(%sp) # stk fmt = 0x2; voff = 0x024
bra.l _real_trace
#########################################################################
# XDEF **************************************************************** #
# _fpsp_unsupp(): 060FPSP entry point for FP "Unimplemented #
# Data Type" exception. #
# #
# This handler should be the first code executed upon taking the #
# FP Unimplemented Data Type exception in an operating system. #
# #
# XREF **************************************************************** #
# _imem_read_{word,long}() - read instruction word/longword #
# fix_skewed_ops() - adjust src operand in fsave frame #
# set_tag_x() - determine optype of src/dst operands #
# store_fpreg() - store opclass 0 or 2 result to FP regfile #
# unnorm_fix() - change UNNORM operands to NORM or ZERO #
# load_fpn2() - load dst operand from FP regfile #
# load_fpn1() - load src operand from FP regfile #
# fout() - emulate an opclass 3 instruction #
# tbl_unsupp - add of table of emulation routines for opclass 0,2 #
# _real_inex() - "callout" to operating system inexact handler #
# _fpsp_done() - "callout" for exit; work all done #
# _real_trace() - "callout" for Trace enabled exception #
# funimp_skew() - adjust fsave src ops to "incorrect" value #
# _real_snan() - "callout" for SNAN exception #
# _real_operr() - "callout" for OPERR exception #
# _real_ovfl() - "callout" for OVFL exception #
# _real_unfl() - "callout" for UNFL exception #
# get_packed() - fetch packed operand from memory #
# #
# INPUT *************************************************************** #
# - The system stack contains the "Unimp Data Type" stk frame #
# - The fsave frame contains the ssrc op (for UNNORM/DENORM) #
# #
# OUTPUT ************************************************************** #
# If Inexact exception (opclass 3): #
# - The system stack is changed to an Inexact exception stk frame #
# If SNAN exception (opclass 3): #
# - The system stack is changed to an SNAN exception stk frame #
# If OPERR exception (opclass 3): #
# - The system stack is changed to an OPERR exception stk frame #
# If OVFL exception (opclass 3): #
# - The system stack is changed to an OVFL exception stk frame #
# If UNFL exception (opclass 3): #
# - The system stack is changed to an UNFL exception stack frame #
# If Trace exception enabled: #
# - The system stack is changed to a Trace exception stack frame #
# Else: (normal case) #
# - Correct result has been stored as appropriate #
# #
# ALGORITHM *********************************************************** #
# Two main instruction types can enter here: (1) DENORM or UNNORM #
# unimplemented data types. These can be either opclass 0,2 or 3 #
# instructions, and (2) PACKED unimplemented data format instructions #
# also of opclasses 0,2, or 3. #
# For UNNORM/DENORM opclass 0 and 2, the handler fetches the src #
# operand from the fsave state frame and the dst operand (if dyadic) #
# from the FP register file. The instruction is then emulated by #
# choosing an emulation routine from a table of routines indexed by #
# instruction type. Once the instruction has been emulated and result #
# saved, then we check to see if any enabled exceptions resulted from #
# instruction emulation. If none, then we exit through the "callout" #
# _fpsp_done(). If there is an enabled FP exception, then we insert #
# this exception into the FPU in the fsave state frame and then exit #
# through _fpsp_done(). #
# PACKED opclass 0 and 2 is similar in how the instruction is #
# emulated and exceptions handled. The differences occur in how the #
# handler loads the packed op (by calling get_packed() routine) and #
# by the fact that a Trace exception could be pending for PACKED ops. #
# If a Trace exception is pending, then the current exception stack #
# frame is changed to a Trace exception stack frame and an exit is #
# made through _real_trace(). #
# For UNNORM/DENORM opclass 3, the actual move out to memory is #
# performed by calling the routine fout(). If no exception should occur #
# as the result of emulation, then an exit either occurs through #
# _fpsp_done() or through _real_trace() if a Trace exception is pending #
# (a Trace stack frame must be created here, too). If an FP exception #
# should occur, then we must create an exception stack frame of that #
# type and jump to either _real_snan(), _real_operr(), _real_inex(), #
# _real_unfl(), or _real_ovfl() as appropriate. PACKED opclass 3 #
# emulation is performed in a similar manner. #
# #
#########################################################################
#
# (1) DENORM and UNNORM (unimplemented) data types:
#
# post-instruction
# *****************
# * EA *
# pre-instruction * *
# ***************** *****************
# * 0x0 * 0x0dc * * 0x3 * 0x0dc *
# ***************** *****************
# * Next * * Next *
# * PC * * PC *
# ***************** *****************
# * SR * * SR *
# ***************** *****************
#
# (2) PACKED format (unsupported) opclasses two and three:
# *****************
# * EA *
# * *
# *****************
# * 0x2 * 0x0dc *
# *****************
# * Next *
# * PC *
# *****************
# * SR *
# *****************
#
global _fpsp_unsupp
_fpsp_unsupp:
link.w %a6,&-LOCAL_SIZE # init stack frame
fsave FP_SRC(%a6) # save fp state
movm.l &0x0303,EXC_DREGS(%a6) # save d0-d1/a0-a1
fmovm.l %fpcr,%fpsr,%fpiar,USER_FPCR(%a6) # save ctrl regs
fmovm.x &0xc0,EXC_FPREGS(%a6) # save fp0-fp1 on stack
btst &0x5,EXC_SR(%a6) # user or supervisor mode?
bne.b fu_s
fu_u:
mov.l %usp,%a0 # fetch user stack pointer
mov.l %a0,EXC_A7(%a6) # save on stack
bra.b fu_cont
# if the exception is an opclass zero or two unimplemented data type
# exception, then the a7' calculated here is wrong since it doesn't
# stack an ea. however, we don't need an a7' for this case anyways.
fu_s:
lea 0x4+EXC_EA(%a6),%a0 # load old a7'
mov.l %a0,EXC_A7(%a6) # save on stack
fu_cont:
# the FPIAR holds the "current PC" of the faulting instruction
# the FPIAR should be set correctly for ALL exceptions passing through
# this point.
mov.l USER_FPIAR(%a6),EXC_EXTWPTR(%a6)
mov.l EXC_EXTWPTR(%a6),%a0 # fetch instruction addr
addq.l &0x4,EXC_EXTWPTR(%a6) # incr instruction ptr
bsr.l _imem_read_long # fetch the instruction words
mov.l %d0,EXC_OPWORD(%a6) # store OPWORD and EXTWORD
############################
clr.b SPCOND_FLG(%a6) # clear special condition flag
# Separate opclass three (fpn-to-mem) ops since they have a different
# stack frame and protocol.
btst &0x5,EXC_CMDREG(%a6) # is it an fmove out?
bne.w fu_out # yes
# Separate packed opclass two instructions.
bfextu EXC_CMDREG(%a6){&0:&6},%d0
cmpi.b %d0,&0x13
beq.w fu_in_pack
# I'm not sure at this point what FPSR bits are valid for this instruction.
# so, since the emulation routines re-create them anyways, zero exception field
andi.l &0x00ff00ff,USER_FPSR(%a6) # zero exception field
fmov.l &0x0,%fpcr # zero current control regs
fmov.l &0x0,%fpsr
# Opclass two w/ memory-to-fpn operation will have an incorrect extended
# precision format if the src format was single or double and the
# source data type was an INF, NAN, DENORM, or UNNORM
lea FP_SRC(%a6),%a0 # pass ptr to input
bsr.l fix_skewed_ops
# we don't know whether the src operand or the dst operand (or both) is the
# UNNORM or DENORM. call the function that tags the operand type. if the
# input is an UNNORM, then convert it to a NORM, DENORM, or ZERO.
lea FP_SRC(%a6),%a0 # pass: ptr to src op
bsr.l set_tag_x # tag the operand type
cmpi.b %d0,&UNNORM # is operand an UNNORM?
bne.b fu_op2 # no
bsr.l unnorm_fix # yes; convert to NORM,DENORM,or ZERO
fu_op2:
mov.b %d0,STAG(%a6) # save src optype tag
bfextu EXC_CMDREG(%a6){&6:&3},%d0 # dyadic; load dst reg
# bit five of the fp extension word separates the monadic and dyadic operations
# at this point
btst &0x5,1+EXC_CMDREG(%a6) # is operation monadic or dyadic?
beq.b fu_extract # monadic
cmpi.b 1+EXC_CMDREG(%a6),&0x3a # is operation an ftst?
beq.b fu_extract # yes, so it's monadic, too
bsr.l load_fpn2 # load dst into FP_DST
lea FP_DST(%a6),%a0 # pass: ptr to dst op
bsr.l set_tag_x # tag the operand type
cmpi.b %d0,&UNNORM # is operand an UNNORM?
bne.b fu_op2_done # no
bsr.l unnorm_fix # yes; convert to NORM,DENORM,or ZERO
fu_op2_done:
mov.b %d0,DTAG(%a6) # save dst optype tag
fu_extract:
clr.l %d0
mov.b FPCR_MODE(%a6),%d0 # fetch rnd mode/prec
bfextu 1+EXC_CMDREG(%a6){&1:&7},%d1 # extract extension
lea FP_SRC(%a6),%a0
lea FP_DST(%a6),%a1
mov.l (tbl_unsupp.l,%pc,%d1.l*4),%d1 # fetch routine addr
jsr (tbl_unsupp.l,%pc,%d1.l*1)
#
# Exceptions in order of precedence:
# BSUN : none
# SNAN : all dyadic ops
# OPERR : fsqrt(-NORM)
# OVFL : all except ftst,fcmp
# UNFL : all except ftst,fcmp
# DZ : fdiv
# INEX2 : all except ftst,fcmp
# INEX1 : none (packed doesn't go through here)
#
# we determine the highest priority exception(if any) set by the
# emulation routine that has also been enabled by the user.
mov.b FPCR_ENABLE(%a6),%d0 # fetch exceptions set
bne.b fu_in_ena # some are enabled
fu_in_cont:
# fcmp and ftst do not store any result.
mov.b 1+EXC_CMDREG(%a6),%d0 # fetch extension
andi.b &0x38,%d0 # extract bits 3-5
cmpi.b %d0,&0x38 # is instr fcmp or ftst?
beq.b fu_in_exit # yes
bfextu EXC_CMDREG(%a6){&6:&3},%d0 # dyadic; load dst reg
bsr.l store_fpreg # store the result
fu_in_exit:
fmovm.x EXC_FPREGS(%a6),&0xc0 # restore fp0/fp1
fmovm.l USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
unlk %a6
bra.l _fpsp_done
fu_in_ena:
and.b FPSR_EXCEPT(%a6),%d0 # keep only ones enabled
bfffo %d0{&24:&8},%d0 # find highest priority exception
bne.b fu_in_exc # there is at least one set
#
# No exceptions occurred that were also enabled. Now:
#
# if (OVFL && ovfl_disabled && inexact_enabled) {
# branch to _real_inex() (even if the result was exact!);
# } else {
# save the result in the proper fp reg (unless the op is fcmp or ftst);
# return;
# }
#
btst &ovfl_bit,FPSR_EXCEPT(%a6) # was overflow set?
beq.b fu_in_cont # no
fu_in_ovflchk:
btst &inex2_bit,FPCR_ENABLE(%a6) # was inexact enabled?
beq.b fu_in_cont # no
bra.w fu_in_exc_ovfl # go insert overflow frame
#
# An exception occurred and that exception was enabled:
#
# shift enabled exception field into lo byte of d0;
# if (((INEX2 || INEX1) && inex_enabled && OVFL && ovfl_disabled) ||
# ((INEX2 || INEX1) && inex_enabled && UNFL && unfl_disabled)) {
# /*
# * this is the case where we must call _real_inex() now or else
# * there will be no other way to pass it the exceptional operand
# */
# call _real_inex();
# } else {
# restore exc state (SNAN||OPERR||OVFL||UNFL||DZ||INEX) into the FPU;
# }
#
fu_in_exc:
subi.l &24,%d0 # fix offset to be 0-8
cmpi.b %d0,&0x6 # is exception INEX? (6)
bne.b fu_in_exc_exit # no
# the enabled exception was inexact
btst &unfl_bit,FPSR_EXCEPT(%a6) # did disabled underflow occur?
bne.w fu_in_exc_unfl # yes
btst &ovfl_bit,FPSR_EXCEPT(%a6) # did disabled overflow occur?
bne.w fu_in_exc_ovfl # yes
# here, we insert the correct fsave status value into the fsave frame for the
# corresponding exception. the operand in the fsave frame should be the original
# src operand.
fu_in_exc_exit:
mov.l %d0,-(%sp) # save d0
bsr.l funimp_skew # skew sgl or dbl inputs
mov.l (%sp)+,%d0 # restore d0
mov.w (tbl_except.b,%pc,%d0.w*2),2+FP_SRC(%a6) # create exc status
fmovm.x EXC_FPREGS(%a6),&0xc0 # restore fp0/fp1
fmovm.l USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
frestore FP_SRC(%a6) # restore src op
unlk %a6
bra.l _fpsp_done
tbl_except:
short 0xe000,0xe006,0xe004,0xe005
short 0xe003,0xe002,0xe001,0xe001
fu_in_exc_unfl:
mov.w &0x4,%d0
bra.b fu_in_exc_exit
fu_in_exc_ovfl:
mov.w &0x03,%d0
bra.b fu_in_exc_exit
# If the input operand to this operation was opclass two and a single
# or double precision denorm, inf, or nan, the operand needs to be
# "corrected" in order to have the proper equivalent extended precision
# number.
global fix_skewed_ops
fix_skewed_ops:
bfextu EXC_CMDREG(%a6){&0:&6},%d0 # extract opclass,src fmt
cmpi.b %d0,&0x11 # is class = 2 & fmt = sgl?
beq.b fso_sgl # yes
cmpi.b %d0,&0x15 # is class = 2 & fmt = dbl?
beq.b fso_dbl # yes
rts # no
fso_sgl:
mov.w LOCAL_EX(%a0),%d0 # fetch src exponent
andi.w &0x7fff,%d0 # strip sign
cmpi.w %d0,&0x3f80 # is |exp| == $3f80?
beq.b fso_sgl_dnrm_zero # yes
cmpi.w %d0,&0x407f # no; is |exp| == $407f?
beq.b fso_infnan # yes
rts # no
fso_sgl_dnrm_zero:
andi.l &0x7fffffff,LOCAL_HI(%a0) # clear j-bit
beq.b fso_zero # it's a skewed zero
fso_sgl_dnrm:
# here, we count on norm not to alter a0...
bsr.l norm # normalize mantissa
neg.w %d0 # -shft amt
addi.w &0x3f81,%d0 # adjust new exponent
andi.w &0x8000,LOCAL_EX(%a0) # clear old exponent
or.w %d0,LOCAL_EX(%a0) # insert new exponent
rts
fso_zero:
andi.w &0x8000,LOCAL_EX(%a0) # clear bogus exponent
rts
fso_infnan:
andi.b &0x7f,LOCAL_HI(%a0) # clear j-bit
ori.w &0x7fff,LOCAL_EX(%a0) # make exponent = $7fff
rts
fso_dbl:
mov.w LOCAL_EX(%a0),%d0 # fetch src exponent
andi.w &0x7fff,%d0 # strip sign
cmpi.w %d0,&0x3c00 # is |exp| == $3c00?
beq.b fso_dbl_dnrm_zero # yes
cmpi.w %d0,&0x43ff # no; is |exp| == $43ff?
beq.b fso_infnan # yes
rts # no
fso_dbl_dnrm_zero:
andi.l &0x7fffffff,LOCAL_HI(%a0) # clear j-bit
bne.b fso_dbl_dnrm # it's a skewed denorm
tst.l LOCAL_LO(%a0) # is it a zero?
beq.b fso_zero # yes
fso_dbl_dnrm:
# here, we count on norm not to alter a0...
bsr.l norm # normalize mantissa
neg.w %d0 # -shft amt
addi.w &0x3c01,%d0 # adjust new exponent
andi.w &0x8000,LOCAL_EX(%a0) # clear old exponent
or.w %d0,LOCAL_EX(%a0) # insert new exponent
rts
#################################################################
# fmove out took an unimplemented data type exception.
# the src operand is in FP_SRC. Call _fout() to write out the result and
# to determine which exceptions, if any, to take.
fu_out:
# Separate packed move outs from the UNNORM and DENORM move outs.
bfextu EXC_CMDREG(%a6){&3:&3},%d0
cmpi.b %d0,&0x3
beq.w fu_out_pack
cmpi.b %d0,&0x7
beq.w fu_out_pack
# I'm not sure at this point what FPSR bits are valid for this instruction.
# so, since the emulation routines re-create them anyways, zero exception field.
# fmove out doesn't affect ccodes.
and.l &0xffff00ff,USER_FPSR(%a6) # zero exception field
fmov.l &0x0,%fpcr # zero current control regs
fmov.l &0x0,%fpsr
# the src can ONLY be a DENORM or an UNNORM! so, don't make any big subroutine
# call here. just figure out what it is...
mov.w FP_SRC_EX(%a6),%d0 # get exponent
andi.w &0x7fff,%d0 # strip sign
beq.b fu_out_denorm # it's a DENORM
lea FP_SRC(%a6),%a0
bsr.l unnorm_fix # yes; fix it
mov.b %d0,STAG(%a6)
bra.b fu_out_cont
fu_out_denorm:
mov.b &DENORM,STAG(%a6)
fu_out_cont:
clr.l %d0
mov.b FPCR_MODE(%a6),%d0 # fetch rnd mode/prec
lea FP_SRC(%a6),%a0 # pass ptr to src operand
mov.l (%a6),EXC_A6(%a6) # in case a6 changes
bsr.l fout # call fmove out routine
# Exceptions in order of precedence:
# BSUN : none
# SNAN : none
# OPERR : fmove.{b,w,l} out of large UNNORM
# OVFL : fmove.{s,d}
# UNFL : fmove.{s,d,x}
# DZ : none
# INEX2 : all
# INEX1 : none (packed doesn't travel through here)
# determine the highest priority exception(if any) set by the
# emulation routine that has also been enabled by the user.
mov.b FPCR_ENABLE(%a6),%d0 # fetch exceptions enabled
bne.w fu_out_ena # some are enabled
fu_out_done:
mov.l EXC_A6(%a6),(%a6) # in case a6 changed
# on extended precision opclass three instructions using pre-decrement or
# post-increment addressing mode, the address register is not updated. is the
# address register was the stack pointer used from user mode, then let's update
# it here. if it was used from supervisor mode, then we have to handle this
# as a special case.
btst &0x5,EXC_SR(%a6)
bne.b fu_out_done_s
mov.l EXC_A7(%a6),%a0 # restore a7
mov.l %a0,%usp
fu_out_done_cont:
fmovm.x EXC_FPREGS(%a6),&0xc0 # restore fp0/fp1
fmovm.l USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
unlk %a6
btst &0x7,(%sp) # is trace on?
bne.b fu_out_trace # yes
bra.l _fpsp_done
# is the ea mode pre-decrement of the stack pointer from supervisor mode?
# ("fmov.x fpm,-(a7)") if so,
fu_out_done_s:
cmpi.b SPCOND_FLG(%a6),&mda7_flg
bne.b fu_out_done_cont
# the extended precision result is still in fp0. but, we need to save it
# somewhere on the stack until we can copy it to its final resting place.
# here, we're counting on the top of the stack to be the old place-holders
# for fp0/fp1 which have already been restored. that way, we can write
# over those destinations with the shifted stack frame.
fmovm.x &0x80,FP_SRC(%a6) # put answer on stack
fmovm.x EXC_FPREGS(%a6),&0xc0 # restore fp0/fp1
fmovm.l USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
mov.l (%a6),%a6 # restore frame pointer
mov.l LOCAL_SIZE+EXC_SR(%sp),LOCAL_SIZE+EXC_SR-0xc(%sp)
mov.l LOCAL_SIZE+2+EXC_PC(%sp),LOCAL_SIZE+2+EXC_PC-0xc(%sp)
# now, copy the result to the proper place on the stack
mov.l LOCAL_SIZE+FP_SRC_EX(%sp),LOCAL_SIZE+EXC_SR+0x0(%sp)
mov.l LOCAL_SIZE+FP_SRC_HI(%sp),LOCAL_SIZE+EXC_SR+0x4(%sp)
mov.l LOCAL_SIZE+FP_SRC_LO(%sp),LOCAL_SIZE+EXC_SR+0x8(%sp)
add.l &LOCAL_SIZE-0x8,%sp
btst &0x7,(%sp)
bne.b fu_out_trace
bra.l _fpsp_done
fu_out_ena:
and.b FPSR_EXCEPT(%a6),%d0 # keep only ones enabled
bfffo %d0{&24:&8},%d0 # find highest priority exception
bne.b fu_out_exc # there is at least one set
# no exceptions were set.
# if a disabled overflow occurred and inexact was enabled but the result
# was exact, then a branch to _real_inex() is made.
btst &ovfl_bit,FPSR_EXCEPT(%a6) # was overflow set?
beq.w fu_out_done # no
fu_out_ovflchk:
btst &inex2_bit,FPCR_ENABLE(%a6) # was inexact enabled?
beq.w fu_out_done # no
bra.w fu_inex # yes
#
# The fp move out that took the "Unimplemented Data Type" exception was
# being traced. Since the stack frames are similar, get the "current" PC
# from FPIAR and put it in the trace stack frame then jump to _real_trace().
#
# UNSUPP FRAME TRACE FRAME
# ***************** *****************
# * EA * * Current *
# * * * PC *
# ***************** *****************
# * 0x3 * 0x0dc * * 0x2 * 0x024 *
# ***************** *****************
# * Next * * Next *
# * PC * * PC *
# ***************** *****************
# * SR * * SR *
# ***************** *****************
#
fu_out_trace:
mov.w &0x2024,0x6(%sp)
fmov.l %fpiar,0x8(%sp)
bra.l _real_trace
# an exception occurred and that exception was enabled.
fu_out_exc:
subi.l &24,%d0 # fix offset to be 0-8
# we don't mess with the existing fsave frame. just re-insert it and
# jump to the "_real_{}()" handler...
mov.w (tbl_fu_out.b,%pc,%d0.w*2),%d0
jmp (tbl_fu_out.b,%pc,%d0.w*1)
swbeg &0x8
tbl_fu_out:
short tbl_fu_out - tbl_fu_out # BSUN can't happen
short tbl_fu_out - tbl_fu_out # SNAN can't happen
short fu_operr - tbl_fu_out # OPERR
short fu_ovfl - tbl_fu_out # OVFL
short fu_unfl - tbl_fu_out # UNFL
short tbl_fu_out - tbl_fu_out # DZ can't happen
short fu_inex - tbl_fu_out # INEX2
short tbl_fu_out - tbl_fu_out # INEX1 won't make it here
# for snan,operr,ovfl,unfl, src op is still in FP_SRC so just
# frestore it.
fu_snan:
fmovm.x EXC_FPREGS(%a6),&0xc0 # restore fp0/fp1
fmovm.l USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
mov.w &0x30d8,EXC_VOFF(%a6) # vector offset = 0xd8
mov.w &0xe006,2+FP_SRC(%a6)
frestore FP_SRC(%a6)
unlk %a6
bra.l _real_snan
fu_operr:
fmovm.x EXC_FPREGS(%a6),&0xc0 # restore fp0/fp1
fmovm.l USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
mov.w &0x30d0,EXC_VOFF(%a6) # vector offset = 0xd0
mov.w &0xe004,2+FP_SRC(%a6)
frestore FP_SRC(%a6)
unlk %a6
bra.l _real_operr
fu_ovfl:
fmovm.x &0x40,FP_SRC(%a6) # save EXOP to the stack
fmovm.x EXC_FPREGS(%a6),&0xc0 # restore fp0/fp1
fmovm.l USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
mov.w &0x30d4,EXC_VOFF(%a6) # vector offset = 0xd4
mov.w &0xe005,2+FP_SRC(%a6)
frestore FP_SRC(%a6) # restore EXOP
unlk %a6
bra.l _real_ovfl
# underflow can happen for extended precision. extended precision opclass
# three instruction exceptions don't update the stack pointer. so, if the
# exception occurred from user mode, then simply update a7 and exit normally.
# if the exception occurred from supervisor mode, check if
fu_unfl:
mov.l EXC_A6(%a6),(%a6) # restore a6
btst &0x5,EXC_SR(%a6)
bne.w fu_unfl_s
mov.l EXC_A7(%a6),%a0 # restore a7 whether we need
mov.l %a0,%usp # to or not...
fu_unfl_cont:
fmovm.x &0x40,FP_SRC(%a6) # save EXOP to the stack
fmovm.x EXC_FPREGS(%a6),&0xc0 # restore fp0/fp1
fmovm.l USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
mov.w &0x30cc,EXC_VOFF(%a6) # vector offset = 0xcc
mov.w &0xe003,2+FP_SRC(%a6)
frestore FP_SRC(%a6) # restore EXOP
unlk %a6
bra.l _real_unfl
fu_unfl_s:
cmpi.b SPCOND_FLG(%a6),&mda7_flg # was the <ea> mode -(sp)?
bne.b fu_unfl_cont
# the extended precision result is still in fp0. but, we need to save it
# somewhere on the stack until we can copy it to its final resting place
# (where the exc frame is currently). make sure it's not at the top of the
# frame or it will get overwritten when the exc stack frame is shifted "down".
fmovm.x &0x80,FP_SRC(%a6) # put answer on stack
fmovm.x &0x40,FP_DST(%a6) # put EXOP on stack
fmovm.x EXC_FPREGS(%a6),&0xc0 # restore fp0/fp1
fmovm.l USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
mov.w &0x30cc,EXC_VOFF(%a6) # vector offset = 0xcc
mov.w &0xe003,2+FP_DST(%a6)
frestore FP_DST(%a6) # restore EXOP
mov.l (%a6),%a6 # restore frame pointer
mov.l LOCAL_SIZE+EXC_SR(%sp),LOCAL_SIZE+EXC_SR-0xc(%sp)
mov.l LOCAL_SIZE+2+EXC_PC(%sp),LOCAL_SIZE+2+EXC_PC-0xc(%sp)
mov.l LOCAL_SIZE+EXC_EA(%sp),LOCAL_SIZE+EXC_EA-0xc(%sp)
# now, copy the result to the proper place on the stack
mov.l LOCAL_SIZE+FP_SRC_EX(%sp),LOCAL_SIZE+EXC_SR+0x0(%sp)
mov.l LOCAL_SIZE+FP_SRC_HI(%sp),LOCAL_SIZE+EXC_SR+0x4(%sp)
mov.l LOCAL_SIZE+FP_SRC_LO(%sp),LOCAL_SIZE+EXC_SR+0x8(%sp)
add.l &LOCAL_SIZE-0x8,%sp
bra.l _real_unfl
# fmove in and out enter here.
fu_inex:
fmovm.x &0x40,FP_SRC(%a6) # save EXOP to the stack
fmovm.x EXC_FPREGS(%a6),&0xc0 # restore fp0/fp1
fmovm.l USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
mov.w &0x30c4,EXC_VOFF(%a6) # vector offset = 0xc4
mov.w &0xe001,2+FP_SRC(%a6)
frestore FP_SRC(%a6) # restore EXOP
unlk %a6
bra.l _real_inex
#########################################################################
#########################################################################
fu_in_pack:
# I'm not sure at this point what FPSR bits are valid for this instruction.
# so, since the emulation routines re-create them anyways, zero exception field
andi.l &0x0ff00ff,USER_FPSR(%a6) # zero exception field
fmov.l &0x0,%fpcr # zero current control regs
fmov.l &0x0,%fpsr
bsr.l get_packed # fetch packed src operand
lea FP_SRC(%a6),%a0 # pass ptr to src
bsr.l set_tag_x # set src optype tag
mov.b %d0,STAG(%a6) # save src optype tag
bfextu EXC_CMDREG(%a6){&6:&3},%d0 # dyadic; load dst reg
# bit five of the fp extension word separates the monadic and dyadic operations
# at this point
btst &0x5,1+EXC_CMDREG(%a6) # is operation monadic or dyadic?
beq.b fu_extract_p # monadic
cmpi.b 1+EXC_CMDREG(%a6),&0x3a # is operation an ftst?
beq.b fu_extract_p # yes, so it's monadic, too
bsr.l load_fpn2 # load dst into FP_DST
lea FP_DST(%a6),%a0 # pass: ptr to dst op
bsr.l set_tag_x # tag the operand type
cmpi.b %d0,&UNNORM # is operand an UNNORM?
bne.b fu_op2_done_p # no
bsr.l unnorm_fix # yes; convert to NORM,DENORM,or ZERO
fu_op2_done_p:
mov.b %d0,DTAG(%a6) # save dst optype tag
fu_extract_p:
clr.l %d0
mov.b FPCR_MODE(%a6),%d0 # fetch rnd mode/prec
bfextu 1+EXC_CMDREG(%a6){&1:&7},%d1 # extract extension
lea FP_SRC(%a6),%a0
lea FP_DST(%a6),%a1
mov.l (tbl_unsupp.l,%pc,%d1.l*4),%d1 # fetch routine addr
jsr (tbl_unsupp.l,%pc,%d1.l*1)
#
# Exceptions in order of precedence:
# BSUN : none
# SNAN : all dyadic ops
# OPERR : fsqrt(-NORM)
# OVFL : all except ftst,fcmp
# UNFL : all except ftst,fcmp
# DZ : fdiv
# INEX2 : all except ftst,fcmp
# INEX1 : all
#
# we determine the highest priority exception(if any) set by the
# emulation routine that has also been enabled by the user.
mov.b FPCR_ENABLE(%a6),%d0 # fetch exceptions enabled
bne.w fu_in_ena_p # some are enabled
fu_in_cont_p:
# fcmp and ftst do not store any result.
mov.b 1+EXC_CMDREG(%a6),%d0 # fetch extension
andi.b &0x38,%d0 # extract bits 3-5
cmpi.b %d0,&0x38 # is instr fcmp or ftst?
beq.b fu_in_exit_p # yes
bfextu EXC_CMDREG(%a6){&6:&3},%d0 # dyadic; load dst reg
bsr.l store_fpreg # store the result
fu_in_exit_p:
btst &0x5,EXC_SR(%a6) # user or supervisor?
bne.w fu_in_exit_s_p # supervisor
mov.l EXC_A7(%a6),%a0 # update user a7
mov.l %a0,%usp
fu_in_exit_cont_p:
fmovm.x EXC_FPREGS(%a6),&0xc0 # restore fp0/fp1
fmovm.l USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
unlk %a6 # unravel stack frame
btst &0x7,(%sp) # is trace on?
bne.w fu_trace_p # yes
bra.l _fpsp_done # exit to os
# the exception occurred in supervisor mode. check to see if the
# addressing mode was (a7)+. if so, we'll need to shift the
# stack frame "up".
fu_in_exit_s_p:
btst &mia7_bit,SPCOND_FLG(%a6) # was ea mode (a7)+
beq.b fu_in_exit_cont_p # no
fmovm.x EXC_FPREGS(%a6),&0xc0 # restore fp0/fp1
fmovm.l USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
unlk %a6 # unravel stack frame
# shift the stack frame "up". we don't really care about the <ea> field.
mov.l 0x4(%sp),0x10(%sp)
mov.l 0x0(%sp),0xc(%sp)
add.l &0xc,%sp
btst &0x7,(%sp) # is trace on?
bne.w fu_trace_p # yes
bra.l _fpsp_done # exit to os
fu_in_ena_p:
and.b FPSR_EXCEPT(%a6),%d0 # keep only ones enabled & set
bfffo %d0{&24:&8},%d0 # find highest priority exception
bne.b fu_in_exc_p # at least one was set
#
# No exceptions occurred that were also enabled. Now:
#
# if (OVFL && ovfl_disabled && inexact_enabled) {
# branch to _real_inex() (even if the result was exact!);
# } else {
# save the result in the proper fp reg (unless the op is fcmp or ftst);
# return;
# }
#
btst &ovfl_bit,FPSR_EXCEPT(%a6) # was overflow set?
beq.w fu_in_cont_p # no
fu_in_ovflchk_p:
btst &inex2_bit,FPCR_ENABLE(%a6) # was inexact enabled?
beq.w fu_in_cont_p # no
bra.w fu_in_exc_ovfl_p # do _real_inex() now
#
# An exception occurred and that exception was enabled:
#
# shift enabled exception field into lo byte of d0;
# if (((INEX2 || INEX1) && inex_enabled && OVFL && ovfl_disabled) ||
# ((INEX2 || INEX1) && inex_enabled && UNFL && unfl_disabled)) {
# /*
# * this is the case where we must call _real_inex() now or else
# * there will be no other way to pass it the exceptional operand
# */
# call _real_inex();
# } else {
# restore exc state (SNAN||OPERR||OVFL||UNFL||DZ||INEX) into the FPU;
# }
#
fu_in_exc_p:
subi.l &24,%d0 # fix offset to be 0-8
cmpi.b %d0,&0x6 # is exception INEX? (6 or 7)
blt.b fu_in_exc_exit_p # no
# the enabled exception was inexact
btst &unfl_bit,FPSR_EXCEPT(%a6) # did disabled underflow occur?
bne.w fu_in_exc_unfl_p # yes
btst &ovfl_bit,FPSR_EXCEPT(%a6) # did disabled overflow occur?
bne.w fu_in_exc_ovfl_p # yes
# here, we insert the correct fsave status value into the fsave frame for the
# corresponding exception. the operand in the fsave frame should be the original
# src operand.
# as a reminder for future predicted pain and agony, we are passing in fsave the
# "non-skewed" operand for cases of sgl and dbl src INFs,NANs, and DENORMs.
# this is INCORRECT for enabled SNAN which would give to the user the skewed SNAN!!!
fu_in_exc_exit_p:
btst &0x5,EXC_SR(%a6) # user or supervisor?
bne.w fu_in_exc_exit_s_p # supervisor
mov.l EXC_A7(%a6),%a0 # update user a7
mov.l %a0,%usp
fu_in_exc_exit_cont_p:
mov.w (tbl_except_p.b,%pc,%d0.w*2),2+FP_SRC(%a6)
fmovm.x EXC_FPREGS(%a6),&0xc0 # restore fp0/fp1
fmovm.l USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
frestore FP_SRC(%a6) # restore src op
unlk %a6
btst &0x7,(%sp) # is trace enabled?
bne.w fu_trace_p # yes
bra.l _fpsp_done
tbl_except_p:
short 0xe000,0xe006,0xe004,0xe005
short 0xe003,0xe002,0xe001,0xe001
fu_in_exc_ovfl_p:
mov.w &0x3,%d0
bra.w fu_in_exc_exit_p
fu_in_exc_unfl_p:
mov.w &0x4,%d0
bra.w fu_in_exc_exit_p
fu_in_exc_exit_s_p:
btst &mia7_bit,SPCOND_FLG(%a6)
beq.b fu_in_exc_exit_cont_p
mov.w (tbl_except_p.b,%pc,%d0.w*2),2+FP_SRC(%a6)
fmovm.x EXC_FPREGS(%a6),&0xc0 # restore fp0/fp1
fmovm.l USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
frestore FP_SRC(%a6) # restore src op
unlk %a6 # unravel stack frame
# shift stack frame "up". who cares about <ea> field.
mov.l 0x4(%sp),0x10(%sp)
mov.l 0x0(%sp),0xc(%sp)
add.l &0xc,%sp
btst &0x7,(%sp) # is trace on?
bne.b fu_trace_p # yes
bra.l _fpsp_done # exit to os
#
# The opclass two PACKED instruction that took an "Unimplemented Data Type"
# exception was being traced. Make the "current" PC the FPIAR and put it in the
# trace stack frame then jump to _real_trace().
#
# UNSUPP FRAME TRACE FRAME
# ***************** *****************
# * EA * * Current *
# * * * PC *
# ***************** *****************
# * 0x2 * 0x0dc * * 0x2 * 0x024 *
# ***************** *****************
# * Next * * Next *
# * PC * * PC *
# ***************** *****************
# * SR * * SR *
# ***************** *****************
fu_trace_p:
mov.w &0x2024,0x6(%sp)
fmov.l %fpiar,0x8(%sp)
bra.l _real_trace
#########################################################
#########################################################
fu_out_pack:
# I'm not sure at this point what FPSR bits are valid for this instruction.
# so, since the emulation routines re-create them anyways, zero exception field.
# fmove out doesn't affect ccodes.
and.l &0xffff00ff,USER_FPSR(%a6) # zero exception field
fmov.l &0x0,%fpcr # zero current control regs
fmov.l &0x0,%fpsr
bfextu EXC_CMDREG(%a6){&6:&3},%d0
bsr.l load_fpn1
# unlike other opclass 3, unimplemented data type exceptions, packed must be
# able to detect all operand types.
lea FP_SRC(%a6),%a0
bsr.l set_tag_x # tag the operand type
cmpi.b %d0,&UNNORM # is operand an UNNORM?
bne.b fu_op2_p # no
bsr.l unnorm_fix # yes; convert to NORM,DENORM,or ZERO
fu_op2_p:
mov.b %d0,STAG(%a6) # save src optype tag
clr.l %d0
mov.b FPCR_MODE(%a6),%d0 # fetch rnd mode/prec
lea FP_SRC(%a6),%a0 # pass ptr to src operand
mov.l (%a6),EXC_A6(%a6) # in case a6 changes
bsr.l fout # call fmove out routine
# Exceptions in order of precedence:
# BSUN : no
# SNAN : yes
# OPERR : if ((k_factor > +17) || (dec. exp exceeds 3 digits))
# OVFL : no
# UNFL : no
# DZ : no
# INEX2 : yes
# INEX1 : no
# determine the highest priority exception(if any) set by the
# emulation routine that has also been enabled by the user.
mov.b FPCR_ENABLE(%a6),%d0 # fetch exceptions enabled
bne.w fu_out_ena_p # some are enabled
fu_out_exit_p:
mov.l EXC_A6(%a6),(%a6) # restore a6
btst &0x5,EXC_SR(%a6) # user or supervisor?
bne.b fu_out_exit_s_p # supervisor
mov.l EXC_A7(%a6),%a0 # update user a7
mov.l %a0,%usp
fu_out_exit_cont_p:
fmovm.x EXC_FPREGS(%a6),&0xc0 # restore fp0/fp1
fmovm.l USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
unlk %a6 # unravel stack frame
btst &0x7,(%sp) # is trace on?
bne.w fu_trace_p # yes
bra.l _fpsp_done # exit to os
# the exception occurred in supervisor mode. check to see if the
# addressing mode was -(a7). if so, we'll need to shift the
# stack frame "down".
fu_out_exit_s_p:
btst &mda7_bit,SPCOND_FLG(%a6) # was ea mode -(a7)
beq.b fu_out_exit_cont_p # no
fmovm.x EXC_FPREGS(%a6),&0xc0 # restore fp0/fp1
fmovm.l USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
mov.l (%a6),%a6 # restore frame pointer
mov.l LOCAL_SIZE+EXC_SR(%sp),LOCAL_SIZE+EXC_SR-0xc(%sp)
mov.l LOCAL_SIZE+2+EXC_PC(%sp),LOCAL_SIZE+2+EXC_PC-0xc(%sp)
# now, copy the result to the proper place on the stack
mov.l LOCAL_SIZE+FP_DST_EX(%sp),LOCAL_SIZE+EXC_SR+0x0(%sp)
mov.l LOCAL_SIZE+FP_DST_HI(%sp),LOCAL_SIZE+EXC_SR+0x4(%sp)
mov.l LOCAL_SIZE+FP_DST_LO(%sp),LOCAL_SIZE+EXC_SR+0x8(%sp)
add.l &LOCAL_SIZE-0x8,%sp
btst &0x7,(%sp)
bne.w fu_trace_p
bra.l _fpsp_done
fu_out_ena_p:
and.b FPSR_EXCEPT(%a6),%d0 # keep only ones enabled
bfffo %d0{&24:&8},%d0 # find highest priority exception
beq.w fu_out_exit_p
mov.l EXC_A6(%a6),(%a6) # restore a6
# an exception occurred and that exception was enabled.
# the only exception possible on packed move out are INEX, OPERR, and SNAN.
fu_out_exc_p:
cmpi.b %d0,&0x1a
bgt.w fu_inex_p2
beq.w fu_operr_p
fu_snan_p:
btst &0x5,EXC_SR(%a6)
bne.b fu_snan_s_p
mov.l EXC_A7(%a6),%a0
mov.l %a0,%usp
bra.w fu_snan
fu_snan_s_p:
cmpi.b SPCOND_FLG(%a6),&mda7_flg
bne.w fu_snan
# the instruction was "fmove.p fpn,-(a7)" from supervisor mode.
# the strategy is to move the exception frame "down" 12 bytes. then, we
# can store the default result where the exception frame was.
fmovm.x EXC_FPREGS(%a6),&0xc0 # restore fp0/fp1
fmovm.l USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
mov.w &0x30d8,EXC_VOFF(%a6) # vector offset = 0xd0
mov.w &0xe006,2+FP_SRC(%a6) # set fsave status
frestore FP_SRC(%a6) # restore src operand
mov.l (%a6),%a6 # restore frame pointer
mov.l LOCAL_SIZE+EXC_SR(%sp),LOCAL_SIZE+EXC_SR-0xc(%sp)
mov.l LOCAL_SIZE+2+EXC_PC(%sp),LOCAL_SIZE+2+EXC_PC-0xc(%sp)
mov.l LOCAL_SIZE+EXC_EA(%sp),LOCAL_SIZE+EXC_EA-0xc(%sp)
# now, we copy the default result to its proper location
mov.l LOCAL_SIZE+FP_DST_EX(%sp),LOCAL_SIZE+0x4(%sp)
mov.l LOCAL_SIZE+FP_DST_HI(%sp),LOCAL_SIZE+0x8(%sp)
mov.l LOCAL_SIZE+FP_DST_LO(%sp),LOCAL_SIZE+0xc(%sp)
add.l &LOCAL_SIZE-0x8,%sp
bra.l _real_snan
fu_operr_p:
btst &0x5,EXC_SR(%a6)
bne.w fu_operr_p_s
mov.l EXC_A7(%a6),%a0
mov.l %a0,%usp
bra.w fu_operr
fu_operr_p_s:
cmpi.b SPCOND_FLG(%a6),&mda7_flg
bne.w fu_operr
# the instruction was "fmove.p fpn,-(a7)" from supervisor mode.
# the strategy is to move the exception frame "down" 12 bytes. then, we
# can store the default result where the exception frame was.
fmovm.x EXC_FPREGS(%a6),&0xc0 # restore fp0/fp1
fmovm.l USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
mov.w &0x30d0,EXC_VOFF(%a6) # vector offset = 0xd0
mov.w &0xe004,2+FP_SRC(%a6) # set fsave status
frestore FP_SRC(%a6) # restore src operand
mov.l (%a6),%a6 # restore frame pointer
mov.l LOCAL_SIZE+EXC_SR(%sp),LOCAL_SIZE+EXC_SR-0xc(%sp)
mov.l LOCAL_SIZE+2+EXC_PC(%sp),LOCAL_SIZE+2+EXC_PC-0xc(%sp)
mov.l LOCAL_SIZE+EXC_EA(%sp),LOCAL_SIZE+EXC_EA-0xc(%sp)
# now, we copy the default result to its proper location
mov.l LOCAL_SIZE+FP_DST_EX(%sp),LOCAL_SIZE+0x4(%sp)
mov.l LOCAL_SIZE+FP_DST_HI(%sp),LOCAL_SIZE+0x8(%sp)
mov.l LOCAL_SIZE+FP_DST_LO(%sp),LOCAL_SIZE+0xc(%sp)
add.l &LOCAL_SIZE-0x8,%sp
bra.l _real_operr
fu_inex_p2:
btst &0x5,EXC_SR(%a6)
bne.w fu_inex_s_p2
mov.l EXC_A7(%a6),%a0
mov.l %a0,%usp
bra.w fu_inex
fu_inex_s_p2:
cmpi.b SPCOND_FLG(%a6),&mda7_flg
bne.w fu_inex
# the instruction was "fmove.p fpn,-(a7)" from supervisor mode.
# the strategy is to move the exception frame "down" 12 bytes. then, we
# can store the default result where the exception frame was.
fmovm.x EXC_FPREGS(%a6),&0xc0 # restore fp0/fp1
fmovm.l USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
mov.w &0x30c4,EXC_VOFF(%a6) # vector offset = 0xc4
mov.w &0xe001,2+FP_SRC(%a6) # set fsave status
frestore FP_SRC(%a6) # restore src operand
mov.l (%a6),%a6 # restore frame pointer
mov.l LOCAL_SIZE+EXC_SR(%sp),LOCAL_SIZE+EXC_SR-0xc(%sp)
mov.l LOCAL_SIZE+2+EXC_PC(%sp),LOCAL_SIZE+2+EXC_PC-0xc(%sp)
mov.l LOCAL_SIZE+EXC_EA(%sp),LOCAL_SIZE+EXC_EA-0xc(%sp)
# now, we copy the default result to its proper location
mov.l LOCAL_SIZE+FP_DST_EX(%sp),LOCAL_SIZE+0x4(%sp)
mov.l LOCAL_SIZE+FP_DST_HI(%sp),LOCAL_SIZE+0x8(%sp)
mov.l LOCAL_SIZE+FP_DST_LO(%sp),LOCAL_SIZE+0xc(%sp)
add.l &LOCAL_SIZE-0x8,%sp
bra.l _real_inex
#########################################################################
#
# if we're stuffing a source operand back into an fsave frame then we
# have to make sure that for single or double source operands that the
# format stuffed is as weird as the hardware usually makes it.
#
global funimp_skew
funimp_skew:
bfextu EXC_EXTWORD(%a6){&3:&3},%d0 # extract src specifier
cmpi.b %d0,&0x1 # was src sgl?
beq.b funimp_skew_sgl # yes
cmpi.b %d0,&0x5 # was src dbl?
beq.b funimp_skew_dbl # yes
rts
funimp_skew_sgl:
mov.w FP_SRC_EX(%a6),%d0 # fetch DENORM exponent
andi.w &0x7fff,%d0 # strip sign
beq.b funimp_skew_sgl_not
cmpi.w %d0,&0x3f80
bgt.b funimp_skew_sgl_not
neg.w %d0 # make exponent negative
addi.w &0x3f81,%d0 # find amt to shift
mov.l FP_SRC_HI(%a6),%d1 # fetch DENORM hi(man)
lsr.l %d0,%d1 # shift it
bset &31,%d1 # set j-bit
mov.l %d1,FP_SRC_HI(%a6) # insert new hi(man)
andi.w &0x8000,FP_SRC_EX(%a6) # clear old exponent
ori.w &0x3f80,FP_SRC_EX(%a6) # insert new "skewed" exponent
funimp_skew_sgl_not:
rts
funimp_skew_dbl:
mov.w FP_SRC_EX(%a6),%d0 # fetch DENORM exponent
andi.w &0x7fff,%d0 # strip sign
beq.b funimp_skew_dbl_not
cmpi.w %d0,&0x3c00
bgt.b funimp_skew_dbl_not
tst.b FP_SRC_EX(%a6) # make "internal format"
smi.b 0x2+FP_SRC(%a6)
mov.w %d0,FP_SRC_EX(%a6) # insert exponent with cleared sign
clr.l %d0 # clear g,r,s
lea FP_SRC(%a6),%a0 # pass ptr to src op
mov.w &0x3c01,%d1 # pass denorm threshold
bsr.l dnrm_lp # denorm it
mov.w &0x3c00,%d0 # new exponent
tst.b 0x2+FP_SRC(%a6) # is sign set?
beq.b fss_dbl_denorm_done # no
bset &15,%d0 # set sign
fss_dbl_denorm_done:
bset &0x7,FP_SRC_HI(%a6) # set j-bit
mov.w %d0,FP_SRC_EX(%a6) # insert new exponent
funimp_skew_dbl_not:
rts
#########################################################################
global _mem_write2
_mem_write2:
btst &0x5,EXC_SR(%a6)
beq.l _dmem_write
mov.l 0x0(%a0),FP_DST_EX(%a6)
mov.l 0x4(%a0),FP_DST_HI(%a6)
mov.l 0x8(%a0),FP_DST_LO(%a6)
clr.l %d1
rts
#########################################################################
# XDEF **************************************************************** #
# _fpsp_effadd(): 060FPSP entry point for FP "Unimplemented #
# effective address" exception. #
# #
# This handler should be the first code executed upon taking the #
# FP Unimplemented Effective Address exception in an operating #
# system. #
# #
# XREF **************************************************************** #
# _imem_read_long() - read instruction longword #
# fix_skewed_ops() - adjust src operand in fsave frame #
# set_tag_x() - determine optype of src/dst operands #
# store_fpreg() - store opclass 0 or 2 result to FP regfile #
# unnorm_fix() - change UNNORM operands to NORM or ZERO #
# load_fpn2() - load dst operand from FP regfile #
# tbl_unsupp - add of table of emulation routines for opclass 0,2 #
# decbin() - convert packed data to FP binary data #
# _real_fpu_disabled() - "callout" for "FPU disabled" exception #
# _real_access() - "callout" for access error exception #
# _mem_read() - read extended immediate operand from memory #
# _fpsp_done() - "callout" for exit; work all done #
# _real_trace() - "callout" for Trace enabled exception #
# fmovm_dynamic() - emulate dynamic fmovm instruction #
# fmovm_ctrl() - emulate fmovm control instruction #
# #
# INPUT *************************************************************** #
# - The system stack contains the "Unimplemented <ea>" stk frame #
# #
# OUTPUT ************************************************************** #
# If access error: #
# - The system stack is changed to an access error stack frame #
# If FPU disabled: #
# - The system stack is changed to an FPU disabled stack frame #
# If Trace exception enabled: #
# - The system stack is changed to a Trace exception stack frame #
# Else: (normal case) #
# - None (correct result has been stored as appropriate) #
# #
# ALGORITHM *********************************************************** #
# This exception handles 3 types of operations: #
# (1) FP Instructions using extended precision or packed immediate #
# addressing mode. #
# (2) The "fmovm.x" instruction w/ dynamic register specification. #
# (3) The "fmovm.l" instruction w/ 2 or 3 control registers. #
# #
# For immediate data operations, the data is read in w/ a #
# _mem_read() "callout", converted to FP binary (if packed), and used #
# as the source operand to the instruction specified by the instruction #
# word. If no FP exception should be reported ads a result of the #
# emulation, then the result is stored to the destination register and #
# the handler exits through _fpsp_done(). If an enabled exc has been #
# signalled as a result of emulation, then an fsave state frame #
# corresponding to the FP exception type must be entered into the 060 #
# FPU before exiting. In either the enabled or disabled cases, we #
# must also check if a Trace exception is pending, in which case, we #
# must create a Trace exception stack frame from the current exception #
# stack frame. If no Trace is pending, we simply exit through #
# _fpsp_done(). #
# For "fmovm.x", call the routine fmovm_dynamic() which will #
# decode and emulate the instruction. No FP exceptions can be pending #
# as a result of this operation emulation. A Trace exception can be #
# pending, though, which means the current stack frame must be changed #
# to a Trace stack frame and an exit made through _real_trace(). #
# For the case of "fmovm.x Dn,-(a7)", where the offending instruction #
# was executed from supervisor mode, this handler must store the FP #
# register file values to the system stack by itself since #
# fmovm_dynamic() can't handle this. A normal exit is made through #
# fpsp_done(). #
# For "fmovm.l", fmovm_ctrl() is used to emulate the instruction. #
# Again, a Trace exception may be pending and an exit made through #
# _real_trace(). Else, a normal exit is made through _fpsp_done(). #
# #
# Before any of the above is attempted, it must be checked to #
# see if the FPU is disabled. Since the "Unimp <ea>" exception is taken #
# before the "FPU disabled" exception, but the "FPU disabled" exception #
# has higher priority, we check the disabled bit in the PCR. If set, #
# then we must create an 8 word "FPU disabled" exception stack frame #
# from the current 4 word exception stack frame. This includes #
# reproducing the effective address of the instruction to put on the #
# new stack frame. #
# #
# In the process of all emulation work, if a _mem_read() #
# "callout" returns a failing result indicating an access error, then #
# we must create an access error stack frame from the current stack #
# frame. This information includes a faulting address and a fault- #
# status-longword. These are created within this handler. #
# #
#########################################################################
global _fpsp_effadd
_fpsp_effadd:
# This exception type takes priority over the "Line F Emulator"
# exception. Therefore, the FPU could be disabled when entering here.
# So, we must check to see if it's disabled and handle that case separately.
mov.l %d0,-(%sp) # save d0
movc %pcr,%d0 # load proc cr
btst &0x1,%d0 # is FPU disabled?
bne.w iea_disabled # yes
mov.l (%sp)+,%d0 # restore d0
link %a6,&-LOCAL_SIZE # init stack frame
movm.l &0x0303,EXC_DREGS(%a6) # save d0-d1/a0-a1
fmovm.l %fpcr,%fpsr,%fpiar,USER_FPCR(%a6) # save ctrl regs
fmovm.x &0xc0,EXC_FPREGS(%a6) # save fp0-fp1 on stack
# PC of instruction that took the exception is the PC in the frame
mov.l EXC_PC(%a6),EXC_EXTWPTR(%a6)
mov.l EXC_EXTWPTR(%a6),%a0 # fetch instruction addr
addq.l &0x4,EXC_EXTWPTR(%a6) # incr instruction ptr
bsr.l _imem_read_long # fetch the instruction words
mov.l %d0,EXC_OPWORD(%a6) # store OPWORD and EXTWORD
#########################################################################
tst.w %d0 # is operation fmovem?
bmi.w iea_fmovm # yes
#
# here, we will have:
# fabs fdabs fsabs facos fmod
# fadd fdadd fsadd fasin frem
# fcmp fatan fscale
# fdiv fddiv fsdiv fatanh fsin
# fint fcos fsincos
# fintrz fcosh fsinh
# fmove fdmove fsmove fetox ftan
# fmul fdmul fsmul fetoxm1 ftanh
# fneg fdneg fsneg fgetexp ftentox
# fsgldiv fgetman ftwotox
# fsglmul flog10
# fsqrt flog2
# fsub fdsub fssub flogn
# ftst flognp1
# which can all use f<op>.{x,p}
# so, now it's immediate data extended precision AND PACKED FORMAT!
#
iea_op:
andi.l &0x00ff00ff,USER_FPSR(%a6)
btst &0xa,%d0 # is src fmt x or p?
bne.b iea_op_pack # packed
mov.l EXC_EXTWPTR(%a6),%a0 # pass: ptr to #<data>
lea FP_SRC(%a6),%a1 # pass: ptr to super addr
mov.l &0xc,%d0 # pass: 12 bytes
bsr.l _imem_read # read extended immediate
tst.l %d1 # did ifetch fail?
bne.w iea_iacc # yes
bra.b iea_op_setsrc
iea_op_pack:
mov.l EXC_EXTWPTR(%a6),%a0 # pass: ptr to #<data>
lea FP_SRC(%a6),%a1 # pass: ptr to super dst
mov.l &0xc,%d0 # pass: 12 bytes
bsr.l _imem_read # read packed operand
tst.l %d1 # did ifetch fail?
bne.w iea_iacc # yes
# The packed operand is an INF or a NAN if the exponent field is all ones.
bfextu FP_SRC(%a6){&1:&15},%d0 # get exp
cmpi.w %d0,&0x7fff # INF or NAN?
beq.b iea_op_setsrc # operand is an INF or NAN
# The packed operand is a zero if the mantissa is all zero, else it's
# a normal packed op.
mov.b 3+FP_SRC(%a6),%d0 # get byte 4
andi.b &0x0f,%d0 # clear all but last nybble
bne.b iea_op_gp_not_spec # not a zero
tst.l FP_SRC_HI(%a6) # is lw 2 zero?
bne.b iea_op_gp_not_spec # not a zero
tst.l FP_SRC_LO(%a6) # is lw 3 zero?
beq.b iea_op_setsrc # operand is a ZERO
iea_op_gp_not_spec:
lea FP_SRC(%a6),%a0 # pass: ptr to packed op
bsr.l decbin # convert to extended
fmovm.x &0x80,FP_SRC(%a6) # make this the srcop
iea_op_setsrc:
addi.l &0xc,EXC_EXTWPTR(%a6) # update extension word pointer
# FP_SRC now holds the src operand.
lea FP_SRC(%a6),%a0 # pass: ptr to src op
bsr.l set_tag_x # tag the operand type
mov.b %d0,STAG(%a6) # could be ANYTHING!!!
cmpi.b %d0,&UNNORM # is operand an UNNORM?
bne.b iea_op_getdst # no
bsr.l unnorm_fix # yes; convert to NORM/DENORM/ZERO
mov.b %d0,STAG(%a6) # set new optype tag
iea_op_getdst:
clr.b STORE_FLG(%a6) # clear "store result" boolean
btst &0x5,1+EXC_CMDREG(%a6) # is operation monadic or dyadic?
beq.b iea_op_extract # monadic
btst &0x4,1+EXC_CMDREG(%a6) # is operation fsincos,ftst,fcmp?
bne.b iea_op_spec # yes
iea_op_loaddst:
bfextu EXC_CMDREG(%a6){&6:&3},%d0 # fetch dst regno
bsr.l load_fpn2 # load dst operand
lea FP_DST(%a6),%a0 # pass: ptr to dst op
bsr.l set_tag_x # tag the operand type
mov.b %d0,DTAG(%a6) # could be ANYTHING!!!
cmpi.b %d0,&UNNORM # is operand an UNNORM?
bne.b iea_op_extract # no
bsr.l unnorm_fix # yes; convert to NORM/DENORM/ZERO
mov.b %d0,DTAG(%a6) # set new optype tag
bra.b iea_op_extract
# the operation is fsincos, ftst, or fcmp. only fcmp is dyadic
iea_op_spec:
btst &0x3,1+EXC_CMDREG(%a6) # is operation fsincos?
beq.b iea_op_extract # yes
# now, we're left with ftst and fcmp. so, first let's tag them so that they don't
# store a result. then, only fcmp will branch back and pick up a dst operand.
st STORE_FLG(%a6) # don't store a final result
btst &0x1,1+EXC_CMDREG(%a6) # is operation fcmp?
beq.b iea_op_loaddst # yes
iea_op_extract:
clr.l %d0
mov.b FPCR_MODE(%a6),%d0 # pass: rnd mode,prec
mov.b 1+EXC_CMDREG(%a6),%d1
andi.w &0x007f,%d1 # extract extension
fmov.l &0x0,%fpcr
fmov.l &0x0,%fpsr
lea FP_SRC(%a6),%a0
lea FP_DST(%a6),%a1
mov.l (tbl_unsupp.l,%pc,%d1.w*4),%d1 # fetch routine addr
jsr (tbl_unsupp.l,%pc,%d1.l*1)
#
# Exceptions in order of precedence:
# BSUN : none
# SNAN : all operations
# OPERR : all reg-reg or mem-reg operations that can normally operr
# OVFL : same as OPERR
# UNFL : same as OPERR
# DZ : same as OPERR
# INEX2 : same as OPERR
# INEX1 : all packed immediate operations
#
# we determine the highest priority exception(if any) set by the
# emulation routine that has also been enabled by the user.
mov.b FPCR_ENABLE(%a6),%d0 # fetch exceptions enabled
bne.b iea_op_ena # some are enabled
# now, we save the result, unless, of course, the operation was ftst or fcmp.
# these don't save results.
iea_op_save:
tst.b STORE_FLG(%a6) # does this op store a result?
bne.b iea_op_exit1 # exit with no frestore
iea_op_store:
bfextu EXC_CMDREG(%a6){&6:&3},%d0 # fetch dst regno
bsr.l store_fpreg # store the result
iea_op_exit1:
mov.l EXC_PC(%a6),USER_FPIAR(%a6) # set FPIAR to "Current PC"
mov.l EXC_EXTWPTR(%a6),EXC_PC(%a6) # set "Next PC" in exc frame
fmovm.x EXC_FPREGS(%a6),&0xc0 # restore fp0-fp1
fmovm.l USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
unlk %a6 # unravel the frame
btst &0x7,(%sp) # is trace on?
bne.w iea_op_trace # yes
bra.l _fpsp_done # exit to os
iea_op_ena:
and.b FPSR_EXCEPT(%a6),%d0 # keep only ones enable and set
bfffo %d0{&24:&8},%d0 # find highest priority exception
bne.b iea_op_exc # at least one was set
# no exception occurred. now, did a disabled, exact overflow occur with inexact
# enabled? if so, then we have to stuff an overflow frame into the FPU.
btst &ovfl_bit,FPSR_EXCEPT(%a6) # did overflow occur?
beq.b iea_op_save
iea_op_ovfl:
btst &inex2_bit,FPCR_ENABLE(%a6) # is inexact enabled?
beq.b iea_op_store # no
bra.b iea_op_exc_ovfl # yes
# an enabled exception occurred. we have to insert the exception type back into
# the machine.
iea_op_exc:
subi.l &24,%d0 # fix offset to be 0-8
cmpi.b %d0,&0x6 # is exception INEX?
bne.b iea_op_exc_force # no
# the enabled exception was inexact. so, if it occurs with an overflow
# or underflow that was disabled, then we have to force an overflow or
# underflow frame.
btst &ovfl_bit,FPSR_EXCEPT(%a6) # did overflow occur?
bne.b iea_op_exc_ovfl # yes
btst &unfl_bit,FPSR_EXCEPT(%a6) # did underflow occur?
bne.b iea_op_exc_unfl # yes
iea_op_exc_force:
mov.w (tbl_iea_except.b,%pc,%d0.w*2),2+FP_SRC(%a6)
bra.b iea_op_exit2 # exit with frestore
tbl_iea_except:
short 0xe002, 0xe006, 0xe004, 0xe005
short 0xe003, 0xe002, 0xe001, 0xe001
iea_op_exc_ovfl:
mov.w &0xe005,2+FP_SRC(%a6)
bra.b iea_op_exit2
iea_op_exc_unfl:
mov.w &0xe003,2+FP_SRC(%a6)
iea_op_exit2:
mov.l EXC_PC(%a6),USER_FPIAR(%a6) # set FPIAR to "Current PC"
mov.l EXC_EXTWPTR(%a6),EXC_PC(%a6) # set "Next PC" in exc frame
fmovm.x EXC_FPREGS(%a6),&0xc0 # restore fp0-fp1
fmovm.l USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
frestore FP_SRC(%a6) # restore exceptional state
unlk %a6 # unravel the frame
btst &0x7,(%sp) # is trace on?
bne.b iea_op_trace # yes
bra.l _fpsp_done # exit to os
#
# The opclass two instruction that took an "Unimplemented Effective Address"
# exception was being traced. Make the "current" PC the FPIAR and put it in
# the trace stack frame then jump to _real_trace().
#
# UNIMP EA FRAME TRACE FRAME
# ***************** *****************
# * 0x0 * 0x0f0 * * Current *
# ***************** * PC *
# * Current * *****************
# * PC * * 0x2 * 0x024 *
# ***************** *****************
# * SR * * Next *
# ***************** * PC *
# *****************
# * SR *
# *****************
iea_op_trace:
mov.l (%sp),-(%sp) # shift stack frame "down"
mov.w 0x8(%sp),0x4(%sp)
mov.w &0x2024,0x6(%sp) # stk fmt = 0x2; voff = 0x024
fmov.l %fpiar,0x8(%sp) # "Current PC" is in FPIAR
bra.l _real_trace
#########################################################################
iea_fmovm:
btst &14,%d0 # ctrl or data reg
beq.w iea_fmovm_ctrl
iea_fmovm_data:
btst &0x5,EXC_SR(%a6) # user or supervisor mode
bne.b iea_fmovm_data_s
iea_fmovm_data_u:
mov.l %usp,%a0
mov.l %a0,EXC_A7(%a6) # store current a7
bsr.l fmovm_dynamic # do dynamic fmovm
mov.l EXC_A7(%a6),%a0 # load possibly new a7
mov.l %a0,%usp # update usp
bra.w iea_fmovm_exit
iea_fmovm_data_s:
clr.b SPCOND_FLG(%a6)
lea 0x2+EXC_VOFF(%a6),%a0
mov.l %a0,EXC_A7(%a6)
bsr.l fmovm_dynamic # do dynamic fmovm
cmpi.b SPCOND_FLG(%a6),&mda7_flg
beq.w iea_fmovm_data_predec
cmpi.b SPCOND_FLG(%a6),&mia7_flg
bne.w iea_fmovm_exit
# right now, d0 = the size.
# the data has been fetched from the supervisor stack, but we have not
# incremented the stack pointer by the appropriate number of bytes.
# do it here.
iea_fmovm_data_postinc:
btst &0x7,EXC_SR(%a6)
bne.b iea_fmovm_data_pi_trace
mov.w EXC_SR(%a6),(EXC_SR,%a6,%d0)
mov.l EXC_EXTWPTR(%a6),(EXC_PC,%a6,%d0)
mov.w &0x00f0,(EXC_VOFF,%a6,%d0)
lea (EXC_SR,%a6,%d0),%a0
mov.l %a0,EXC_SR(%a6)
fmovm.x EXC_FP0(%a6),&0xc0 # restore fp0-fp1
fmovm.l USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
unlk %a6
mov.l (%sp)+,%sp
bra.l _fpsp_done
iea_fmovm_data_pi_trace:
mov.w EXC_SR(%a6),(EXC_SR-0x4,%a6,%d0)
mov.l EXC_EXTWPTR(%a6),(EXC_PC-0x4,%a6,%d0)
mov.w &0x2024,(EXC_VOFF-0x4,%a6,%d0)
mov.l EXC_PC(%a6),(EXC_VOFF+0x2-0x4,%a6,%d0)
lea (EXC_SR-0x4,%a6,%d0),%a0
mov.l %a0,EXC_SR(%a6)
fmovm.x EXC_FP0(%a6),&0xc0 # restore fp0-fp1
fmovm.l USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
unlk %a6
mov.l (%sp)+,%sp
bra.l _real_trace
# right now, d1 = size and d0 = the strg.
iea_fmovm_data_predec:
mov.b %d1,EXC_VOFF(%a6) # store strg
mov.b %d0,0x1+EXC_VOFF(%a6) # store size
fmovm.x EXC_FP0(%a6),&0xc0 # restore fp0-fp1
fmovm.l USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
mov.l (%a6),-(%sp) # make a copy of a6
mov.l %d0,-(%sp) # save d0
mov.l %d1,-(%sp) # save d1
mov.l EXC_EXTWPTR(%a6),-(%sp) # make a copy of Next PC
clr.l %d0
mov.b 0x1+EXC_VOFF(%a6),%d0 # fetch size
neg.l %d0 # get negative of size
btst &0x7,EXC_SR(%a6) # is trace enabled?
beq.b iea_fmovm_data_p2
mov.w EXC_SR(%a6),(EXC_SR-0x4,%a6,%d0)
mov.l EXC_PC(%a6),(EXC_VOFF-0x2,%a6,%d0)
mov.l (%sp)+,(EXC_PC-0x4,%a6,%d0)
mov.w &0x2024,(EXC_VOFF-0x4,%a6,%d0)
pea (%a6,%d0) # create final sp
bra.b iea_fmovm_data_p3
iea_fmovm_data_p2:
mov.w EXC_SR(%a6),(EXC_SR,%a6,%d0)
mov.l (%sp)+,(EXC_PC,%a6,%d0)
mov.w &0x00f0,(EXC_VOFF,%a6,%d0)
pea (0x4,%a6,%d0) # create final sp
iea_fmovm_data_p3:
clr.l %d1
mov.b EXC_VOFF(%a6),%d1 # fetch strg
tst.b %d1
bpl.b fm_1
fmovm.x &0x80,(0x4+0x8,%a6,%d0)
addi.l &0xc,%d0
fm_1:
lsl.b &0x1,%d1
bpl.b fm_2
fmovm.x &0x40,(0x4+0x8,%a6,%d0)
addi.l &0xc,%d0
fm_2:
lsl.b &0x1,%d1
bpl.b fm_3
fmovm.x &0x20,(0x4+0x8,%a6,%d0)
addi.l &0xc,%d0
fm_3:
lsl.b &0x1,%d1
bpl.b fm_4
fmovm.x &0x10,(0x4+0x8,%a6,%d0)
addi.l &0xc,%d0
fm_4:
lsl.b &0x1,%d1
bpl.b fm_5
fmovm.x &0x08,(0x4+0x8,%a6,%d0)
addi.l &0xc,%d0
fm_5:
lsl.b &0x1,%d1
bpl.b fm_6
fmovm.x &0x04,(0x4+0x8,%a6,%d0)
addi.l &0xc,%d0
fm_6:
lsl.b &0x1,%d1
bpl.b fm_7
fmovm.x &0x02,(0x4+0x8,%a6,%d0)
addi.l &0xc,%d0
fm_7:
lsl.b &0x1,%d1
bpl.b fm_end
fmovm.x &0x01,(0x4+0x8,%a6,%d0)
fm_end:
mov.l 0x4(%sp),%d1
mov.l 0x8(%sp),%d0
mov.l 0xc(%sp),%a6
mov.l (%sp)+,%sp
btst &0x7,(%sp) # is trace enabled?
beq.l _fpsp_done
bra.l _real_trace
#########################################################################
iea_fmovm_ctrl:
bsr.l fmovm_ctrl # load ctrl regs
iea_fmovm_exit:
fmovm.x EXC_FPREGS(%a6),&0xc0 # restore fp0-fp1
fmovm.l USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
btst &0x7,EXC_SR(%a6) # is trace on?
bne.b iea_fmovm_trace # yes
mov.l EXC_EXTWPTR(%a6),EXC_PC(%a6) # set Next PC
unlk %a6 # unravel the frame
bra.l _fpsp_done # exit to os
#
# The control reg instruction that took an "Unimplemented Effective Address"
# exception was being traced. The "Current PC" for the trace frame is the
# PC stacked for Unimp EA. The "Next PC" is in EXC_EXTWPTR.
# After fixing the stack frame, jump to _real_trace().
#
# UNIMP EA FRAME TRACE FRAME
# ***************** *****************
# * 0x0 * 0x0f0 * * Current *
# ***************** * PC *
# * Current * *****************
# * PC * * 0x2 * 0x024 *
# ***************** *****************
# * SR * * Next *
# ***************** * PC *
# *****************
# * SR *
# *****************
# this ain't a pretty solution, but it works:
# -restore a6 (not with unlk)
# -shift stack frame down over where old a6 used to be
# -add LOCAL_SIZE to stack pointer
iea_fmovm_trace:
mov.l (%a6),%a6 # restore frame pointer
mov.w EXC_SR+LOCAL_SIZE(%sp),0x0+LOCAL_SIZE(%sp)
mov.l EXC_PC+LOCAL_SIZE(%sp),0x8+LOCAL_SIZE(%sp)
mov.l EXC_EXTWPTR+LOCAL_SIZE(%sp),0x2+LOCAL_SIZE(%sp)
mov.w &0x2024,0x6+LOCAL_SIZE(%sp) # stk fmt = 0x2; voff = 0x024
add.l &LOCAL_SIZE,%sp # clear stack frame
bra.l _real_trace
#########################################################################
# The FPU is disabled and so we should really have taken the "Line
# F Emulator" exception. So, here we create an 8-word stack frame
# from our 4-word stack frame. This means we must calculate the length
# the faulting instruction to get the "next PC". This is trivial for
# immediate operands but requires some extra work for fmovm dynamic
# which can use most addressing modes.
iea_disabled:
mov.l (%sp)+,%d0 # restore d0
link %a6,&-LOCAL_SIZE # init stack frame
movm.l &0x0303,EXC_DREGS(%a6) # save d0-d1/a0-a1
# PC of instruction that took the exception is the PC in the frame
mov.l EXC_PC(%a6),EXC_EXTWPTR(%a6)
mov.l EXC_EXTWPTR(%a6),%a0 # fetch instruction addr
addq.l &0x4,EXC_EXTWPTR(%a6) # incr instruction ptr
bsr.l _imem_read_long # fetch the instruction words
mov.l %d0,EXC_OPWORD(%a6) # store OPWORD and EXTWORD
tst.w %d0 # is instr fmovm?
bmi.b iea_dis_fmovm # yes
# instruction is using an extended precision immediate operand. Therefore,
# the total instruction length is 16 bytes.
iea_dis_immed:
mov.l &0x10,%d0 # 16 bytes of instruction
bra.b iea_dis_cont
iea_dis_fmovm:
btst &0xe,%d0 # is instr fmovm ctrl
bne.b iea_dis_fmovm_data # no
# the instruction is a fmovm.l with 2 or 3 registers.
bfextu %d0{&19:&3},%d1
mov.l &0xc,%d0
cmpi.b %d1,&0x7 # move all regs?
bne.b iea_dis_cont
addq.l &0x4,%d0
bra.b iea_dis_cont
# the instruction is an fmovm.x dynamic which can use many addressing
# modes and thus can have several different total instruction lengths.
# call fmovm_calc_ea which will go through the ea calc process and,
# as a by-product, will tell us how long the instruction is.
iea_dis_fmovm_data:
clr.l %d0
bsr.l fmovm_calc_ea
mov.l EXC_EXTWPTR(%a6),%d0
sub.l EXC_PC(%a6),%d0
iea_dis_cont:
mov.w %d0,EXC_VOFF(%a6) # store stack shift value
movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
unlk %a6
# here, we actually create the 8-word frame from the 4-word frame,
# with the "next PC" as additional info.
# the <ea> field is let as undefined.
subq.l &0x8,%sp # make room for new stack
mov.l %d0,-(%sp) # save d0
mov.w 0xc(%sp),0x4(%sp) # move SR
mov.l 0xe(%sp),0x6(%sp) # move Current PC
clr.l %d0
mov.w 0x12(%sp),%d0
mov.l 0x6(%sp),0x10(%sp) # move Current PC
add.l %d0,0x6(%sp) # make Next PC
mov.w &0x402c,0xa(%sp) # insert offset,frame format
mov.l (%sp)+,%d0 # restore d0
bra.l _real_fpu_disabled
##########
iea_iacc:
movc %pcr,%d0
btst &0x1,%d0
bne.b iea_iacc_cont
fmovm.l USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
fmovm.x EXC_FPREGS(%a6),&0xc0 # restore fp0-fp1 on stack
iea_iacc_cont:
movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
unlk %a6
subq.w &0x8,%sp # make stack frame bigger
mov.l 0x8(%sp),(%sp) # store SR,hi(PC)
mov.w 0xc(%sp),0x4(%sp) # store lo(PC)
mov.w &0x4008,0x6(%sp) # store voff
mov.l 0x2(%sp),0x8(%sp) # store ea
mov.l &0x09428001,0xc(%sp) # store fslw
iea_acc_done:
btst &0x5,(%sp) # user or supervisor mode?
beq.b iea_acc_done2 # user
bset &0x2,0xd(%sp) # set supervisor TM bit
iea_acc_done2:
bra.l _real_access
iea_dacc:
lea -LOCAL_SIZE(%a6),%sp
movc %pcr,%d1
btst &0x1,%d1
bne.b iea_dacc_cont
fmovm.x EXC_FPREGS(%a6),&0xc0 # restore fp0-fp1 on stack
fmovm.l LOCAL_SIZE+USER_FPCR(%sp),%fpcr,%fpsr,%fpiar # restore ctrl regs
iea_dacc_cont:
mov.l (%a6),%a6
mov.l 0x4+LOCAL_SIZE(%sp),-0x8+0x4+LOCAL_SIZE(%sp)
mov.w 0x8+LOCAL_SIZE(%sp),-0x8+0x8+LOCAL_SIZE(%sp)
mov.w &0x4008,-0x8+0xa+LOCAL_SIZE(%sp)
mov.l %a0,-0x8+0xc+LOCAL_SIZE(%sp)
mov.w %d0,-0x8+0x10+LOCAL_SIZE(%sp)
mov.w &0x0001,-0x8+0x12+LOCAL_SIZE(%sp)
movm.l LOCAL_SIZE+EXC_DREGS(%sp),&0x0303 # restore d0-d1/a0-a1
add.w &LOCAL_SIZE-0x4,%sp
bra.b iea_acc_done
#########################################################################
# XDEF **************************************************************** #
# _fpsp_operr(): 060FPSP entry point for FP Operr exception. #
# #
# This handler should be the first code executed upon taking the #
# FP Operand Error exception in an operating system. #
# #
# XREF **************************************************************** #
# _imem_read_long() - read instruction longword #
# fix_skewed_ops() - adjust src operand in fsave frame #
# _real_operr() - "callout" to operating system operr handler #
# _dmem_write_{byte,word,long}() - store data to mem (opclass 3) #
# store_dreg_{b,w,l}() - store data to data regfile (opclass 3) #
# facc_out_{b,w,l}() - store to memory took access error (opcl 3) #
# #
# INPUT *************************************************************** #
# - The system stack contains the FP Operr exception frame #
# - The fsave frame contains the source operand #
# #
# OUTPUT ************************************************************** #
# No access error: #
# - The system stack is unchanged #
# - The fsave frame contains the adjusted src op for opclass 0,2 #
# #
# ALGORITHM *********************************************************** #
# In a system where the FP Operr exception is enabled, the goal #
# is to get to the handler specified at _real_operr(). But, on the 060, #
# for opclass zero and two instruction taking this exception, the #
# input operand in the fsave frame may be incorrect for some cases #
# and needs to be corrected. This handler calls fix_skewed_ops() to #
# do just this and then exits through _real_operr(). #
# For opclass 3 instructions, the 060 doesn't store the default #
# operr result out to memory or data register file as it should. #
# This code must emulate the move out before finally exiting through #
# _real_inex(). The move out, if to memory, is performed using #
# _mem_write() "callout" routines that may return a failing result. #
# In this special case, the handler must exit through facc_out() #
# which creates an access error stack frame from the current operr #
# stack frame. #
# #
#########################################################################
global _fpsp_operr
_fpsp_operr:
link.w %a6,&-LOCAL_SIZE # init stack frame
fsave FP_SRC(%a6) # grab the "busy" frame
movm.l &0x0303,EXC_DREGS(%a6) # save d0-d1/a0-a1
fmovm.l %fpcr,%fpsr,%fpiar,USER_FPCR(%a6) # save ctrl regs
fmovm.x &0xc0,EXC_FPREGS(%a6) # save fp0-fp1 on stack
# the FPIAR holds the "current PC" of the faulting instruction
mov.l USER_FPIAR(%a6),EXC_EXTWPTR(%a6)
mov.l EXC_EXTWPTR(%a6),%a0 # fetch instruction addr
addq.l &0x4,EXC_EXTWPTR(%a6) # incr instruction ptr
bsr.l _imem_read_long # fetch the instruction words
mov.l %d0,EXC_OPWORD(%a6)
##############################################################################
btst &13,%d0 # is instr an fmove out?
bne.b foperr_out # fmove out
# here, we simply see if the operand in the fsave frame needs to be "unskewed".
# this would be the case for opclass two operations with a source infinity or
# denorm operand in the sgl or dbl format. NANs also become skewed, but can't
# cause an operr so we don't need to check for them here.
lea FP_SRC(%a6),%a0 # pass: ptr to src op
bsr.l fix_skewed_ops # fix src op
foperr_exit:
fmovm.x EXC_FPREGS(%a6),&0xc0 # restore fp0-fp1
fmovm.l USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
frestore FP_SRC(%a6)
unlk %a6
bra.l _real_operr
########################################################################
#
# the hardware does not save the default result to memory on enabled
# operand error exceptions. we do this here before passing control to
# the user operand error handler.
#
# byte, word, and long destination format operations can pass
# through here. we simply need to test the sign of the src
# operand and save the appropriate minimum or maximum integer value
# to the effective address as pointed to by the stacked effective address.
#
# although packed opclass three operations can take operand error
# exceptions, they won't pass through here since they are caught
# first by the unsupported data format exception handler. that handler
# sends them directly to _real_operr() if necessary.
#
foperr_out:
mov.w FP_SRC_EX(%a6),%d1 # fetch exponent
andi.w &0x7fff,%d1
cmpi.w %d1,&0x7fff
bne.b foperr_out_not_qnan
# the operand is either an infinity or a QNAN.
tst.l FP_SRC_LO(%a6)
bne.b foperr_out_qnan
mov.l FP_SRC_HI(%a6),%d1
andi.l &0x7fffffff,%d1
beq.b foperr_out_not_qnan
foperr_out_qnan:
mov.l FP_SRC_HI(%a6),L_SCR1(%a6)
bra.b foperr_out_jmp
foperr_out_not_qnan:
mov.l &0x7fffffff,%d1
tst.b FP_SRC_EX(%a6)
bpl.b foperr_out_not_qnan2
addq.l &0x1,%d1
foperr_out_not_qnan2:
mov.l %d1,L_SCR1(%a6)
foperr_out_jmp:
bfextu %d0{&19:&3},%d0 # extract dst format field
mov.b 1+EXC_OPWORD(%a6),%d1 # extract <ea> mode,reg
mov.w (tbl_operr.b,%pc,%d0.w*2),%a0
jmp (tbl_operr.b,%pc,%a0)
tbl_operr:
short foperr_out_l - tbl_operr # long word integer
short tbl_operr - tbl_operr # sgl prec shouldn't happen
short tbl_operr - tbl_operr # ext prec shouldn't happen
short foperr_exit - tbl_operr # packed won't enter here
short foperr_out_w - tbl_operr # word integer
short tbl_operr - tbl_operr # dbl prec shouldn't happen
short foperr_out_b - tbl_operr # byte integer
short tbl_operr - tbl_operr # packed won't enter here
foperr_out_b:
mov.b L_SCR1(%a6),%d0 # load positive default result
cmpi.b %d1,&0x7 # is <ea> mode a data reg?
ble.b foperr_out_b_save_dn # yes
mov.l EXC_EA(%a6),%a0 # pass: <ea> of default result
bsr.l _dmem_write_byte # write the default result
tst.l %d1 # did dstore fail?
bne.l facc_out_b # yes
bra.w foperr_exit
foperr_out_b_save_dn:
andi.w &0x0007,%d1
bsr.l store_dreg_b # store result to regfile
bra.w foperr_exit
foperr_out_w:
mov.w L_SCR1(%a6),%d0 # load positive default result
cmpi.b %d1,&0x7 # is <ea> mode a data reg?
ble.b foperr_out_w_save_dn # yes
mov.l EXC_EA(%a6),%a0 # pass: <ea> of default result
bsr.l _dmem_write_word # write the default result
tst.l %d1 # did dstore fail?
bne.l facc_out_w # yes
bra.w foperr_exit
foperr_out_w_save_dn:
andi.w &0x0007,%d1
bsr.l store_dreg_w # store result to regfile
bra.w foperr_exit
foperr_out_l:
mov.l L_SCR1(%a6),%d0 # load positive default result
cmpi.b %d1,&0x7 # is <ea> mode a data reg?
ble.b foperr_out_l_save_dn # yes
mov.l EXC_EA(%a6),%a0 # pass: <ea> of default result
bsr.l _dmem_write_long # write the default result
tst.l %d1 # did dstore fail?
bne.l facc_out_l # yes
bra.w foperr_exit
foperr_out_l_save_dn:
andi.w &0x0007,%d1
bsr.l store_dreg_l # store result to regfile
bra.w foperr_exit
#########################################################################
# XDEF **************************************************************** #
# _fpsp_snan(): 060FPSP entry point for FP SNAN exception. #
# #
# This handler should be the first code executed upon taking the #
# FP Signalling NAN exception in an operating system. #
# #
# XREF **************************************************************** #
# _imem_read_long() - read instruction longword #
# fix_skewed_ops() - adjust src operand in fsave frame #
# _real_snan() - "callout" to operating system SNAN handler #
# _dmem_write_{byte,word,long}() - store data to mem (opclass 3) #
# store_dreg_{b,w,l}() - store data to data regfile (opclass 3) #
# facc_out_{b,w,l,d,x}() - store to mem took acc error (opcl 3) #
# _calc_ea_fout() - fix An if <ea> is -() or ()+; also get <ea> #
# #
# INPUT *************************************************************** #
# - The system stack contains the FP SNAN exception frame #
# - The fsave frame contains the source operand #
# #
# OUTPUT ************************************************************** #
# No access error: #
# - The system stack is unchanged #
# - The fsave frame contains the adjusted src op for opclass 0,2 #
# #
# ALGORITHM *********************************************************** #
# In a system where the FP SNAN exception is enabled, the goal #
# is to get to the handler specified at _real_snan(). But, on the 060, #
# for opclass zero and two instructions taking this exception, the #
# input operand in the fsave frame may be incorrect for some cases #
# and needs to be corrected. This handler calls fix_skewed_ops() to #
# do just this and then exits through _real_snan(). #
# For opclass 3 instructions, the 060 doesn't store the default #
# SNAN result out to memory or data register file as it should. #
# This code must emulate the move out before finally exiting through #
# _real_snan(). The move out, if to memory, is performed using #
# _mem_write() "callout" routines that may return a failing result. #
# In this special case, the handler must exit through facc_out() #
# which creates an access error stack frame from the current SNAN #
# stack frame. #
# For the case of an extended precision opclass 3 instruction, #
# if the effective addressing mode was -() or ()+, then the address #
# register must get updated by calling _calc_ea_fout(). If the <ea> #
# was -(a7) from supervisor mode, then the exception frame currently #
# on the system stack must be carefully moved "down" to make room #
# for the operand being moved. #
# #
#########################################################################
global _fpsp_snan
_fpsp_snan:
link.w %a6,&-LOCAL_SIZE # init stack frame
fsave FP_SRC(%a6) # grab the "busy" frame
movm.l &0x0303,EXC_DREGS(%a6) # save d0-d1/a0-a1
fmovm.l %fpcr,%fpsr,%fpiar,USER_FPCR(%a6) # save ctrl regs
fmovm.x &0xc0,EXC_FPREGS(%a6) # save fp0-fp1 on stack
# the FPIAR holds the "current PC" of the faulting instruction
mov.l USER_FPIAR(%a6),EXC_EXTWPTR(%a6)
mov.l EXC_EXTWPTR(%a6),%a0 # fetch instruction addr
addq.l &0x4,EXC_EXTWPTR(%a6) # incr instruction ptr
bsr.l _imem_read_long # fetch the instruction words
mov.l %d0,EXC_OPWORD(%a6)
##############################################################################
btst &13,%d0 # is instr an fmove out?
bne.w fsnan_out # fmove out
# here, we simply see if the operand in the fsave frame needs to be "unskewed".
# this would be the case for opclass two operations with a source infinity or
# denorm operand in the sgl or dbl format. NANs also become skewed and must be
# fixed here.
lea FP_SRC(%a6),%a0 # pass: ptr to src op
bsr.l fix_skewed_ops # fix src op
fsnan_exit:
fmovm.x EXC_FPREGS(%a6),&0xc0 # restore fp0-fp1
fmovm.l USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
frestore FP_SRC(%a6)
unlk %a6
bra.l _real_snan
########################################################################
#
# the hardware does not save the default result to memory on enabled
# snan exceptions. we do this here before passing control to
# the user snan handler.
#
# byte, word, long, and packed destination format operations can pass
# through here. since packed format operations already were handled by
# fpsp_unsupp(), then we need to do nothing else for them here.
# for byte, word, and long, we simply need to test the sign of the src
# operand and save the appropriate minimum or maximum integer value
# to the effective address as pointed to by the stacked effective address.
#
fsnan_out:
bfextu %d0{&19:&3},%d0 # extract dst format field
mov.b 1+EXC_OPWORD(%a6),%d1 # extract <ea> mode,reg
mov.w (tbl_snan.b,%pc,%d0.w*2),%a0
jmp (tbl_snan.b,%pc,%a0)
tbl_snan:
short fsnan_out_l - tbl_snan # long word integer
short fsnan_out_s - tbl_snan # sgl prec shouldn't happen
short fsnan_out_x - tbl_snan # ext prec shouldn't happen
short tbl_snan - tbl_snan # packed needs no help
short fsnan_out_w - tbl_snan # word integer
short fsnan_out_d - tbl_snan # dbl prec shouldn't happen
short fsnan_out_b - tbl_snan # byte integer
short tbl_snan - tbl_snan # packed needs no help
fsnan_out_b:
mov.b FP_SRC_HI(%a6),%d0 # load upper byte of SNAN
bset &6,%d0 # set SNAN bit
cmpi.b %d1,&0x7 # is <ea> mode a data reg?
ble.b fsnan_out_b_dn # yes
mov.l EXC_EA(%a6),%a0 # pass: <ea> of default result
bsr.l _dmem_write_byte # write the default result
tst.l %d1 # did dstore fail?
bne.l facc_out_b # yes
bra.w fsnan_exit
fsnan_out_b_dn:
andi.w &0x0007,%d1
bsr.l store_dreg_b # store result to regfile
bra.w fsnan_exit
fsnan_out_w:
mov.w FP_SRC_HI(%a6),%d0 # load upper word of SNAN
bset &14,%d0 # set SNAN bit
cmpi.b %d1,&0x7 # is <ea> mode a data reg?
ble.b fsnan_out_w_dn # yes
mov.l EXC_EA(%a6),%a0 # pass: <ea> of default result
bsr.l _dmem_write_word # write the default result
tst.l %d1 # did dstore fail?
bne.l facc_out_w # yes
bra.w fsnan_exit
fsnan_out_w_dn:
andi.w &0x0007,%d1
bsr.l store_dreg_w # store result to regfile
bra.w fsnan_exit
fsnan_out_l:
mov.l FP_SRC_HI(%a6),%d0 # load upper longword of SNAN
bset &30,%d0 # set SNAN bit
cmpi.b %d1,&0x7 # is <ea> mode a data reg?
ble.b fsnan_out_l_dn # yes
mov.l EXC_EA(%a6),%a0 # pass: <ea> of default result
bsr.l _dmem_write_long # write the default result
tst.l %d1 # did dstore fail?
bne.l facc_out_l # yes
bra.w fsnan_exit
fsnan_out_l_dn:
andi.w &0x0007,%d1
bsr.l store_dreg_l # store result to regfile
bra.w fsnan_exit
fsnan_out_s:
cmpi.b %d1,&0x7 # is <ea> mode a data reg?
ble.b fsnan_out_d_dn # yes
mov.l FP_SRC_EX(%a6),%d0 # fetch SNAN sign
andi.l &0x80000000,%d0 # keep sign
ori.l &0x7fc00000,%d0 # insert new exponent,SNAN bit
mov.l FP_SRC_HI(%a6),%d1 # load mantissa
lsr.l &0x8,%d1 # shift mantissa for sgl
or.l %d1,%d0 # create sgl SNAN
mov.l EXC_EA(%a6),%a0 # pass: <ea> of default result
bsr.l _dmem_write_long # write the default result
tst.l %d1 # did dstore fail?
bne.l facc_out_l # yes
bra.w fsnan_exit
fsnan_out_d_dn:
mov.l FP_SRC_EX(%a6),%d0 # fetch SNAN sign
andi.l &0x80000000,%d0 # keep sign
ori.l &0x7fc00000,%d0 # insert new exponent,SNAN bit
mov.l %d1,-(%sp)
mov.l FP_SRC_HI(%a6),%d1 # load mantissa
lsr.l &0x8,%d1 # shift mantissa for sgl
or.l %d1,%d0 # create sgl SNAN
mov.l (%sp)+,%d1
andi.w &0x0007,%d1
bsr.l store_dreg_l # store result to regfile
bra.w fsnan_exit
fsnan_out_d:
mov.l FP_SRC_EX(%a6),%d0 # fetch SNAN sign
andi.l &0x80000000,%d0 # keep sign
ori.l &0x7ff80000,%d0 # insert new exponent,SNAN bit
mov.l FP_SRC_HI(%a6),%d1 # load hi mantissa
mov.l %d0,FP_SCR0_EX(%a6) # store to temp space
mov.l &11,%d0 # load shift amt
lsr.l %d0,%d1
or.l %d1,FP_SCR0_EX(%a6) # create dbl hi
mov.l FP_SRC_HI(%a6),%d1 # load hi mantissa
andi.l &0x000007ff,%d1
ror.l %d0,%d1
mov.l %d1,FP_SCR0_HI(%a6) # store to temp space
mov.l FP_SRC_LO(%a6),%d1 # load lo mantissa
lsr.l %d0,%d1
or.l %d1,FP_SCR0_HI(%a6) # create dbl lo
lea FP_SCR0(%a6),%a0 # pass: ptr to operand
mov.l EXC_EA(%a6),%a1 # pass: dst addr
movq.l &0x8,%d0 # pass: size of 8 bytes
bsr.l _dmem_write # write the default result
tst.l %d1 # did dstore fail?
bne.l facc_out_d # yes
bra.w fsnan_exit
# for extended precision, if the addressing mode is pre-decrement or
# post-increment, then the address register did not get updated.
# in addition, for pre-decrement, the stacked <ea> is incorrect.
fsnan_out_x:
clr.b SPCOND_FLG(%a6) # clear special case flag
mov.w FP_SRC_EX(%a6),FP_SCR0_EX(%a6)
clr.w 2+FP_SCR0(%a6)
mov.l FP_SRC_HI(%a6),%d0
bset &30,%d0
mov.l %d0,FP_SCR0_HI(%a6)
mov.l FP_SRC_LO(%a6),FP_SCR0_LO(%a6)
btst &0x5,EXC_SR(%a6) # supervisor mode exception?
bne.b fsnan_out_x_s # yes
mov.l %usp,%a0 # fetch user stack pointer
mov.l %a0,EXC_A7(%a6) # save on stack for calc_ea()
mov.l (%a6),EXC_A6(%a6)
bsr.l _calc_ea_fout # find the correct ea,update An
mov.l %a0,%a1
mov.l %a0,EXC_EA(%a6) # stack correct <ea>
mov.l EXC_A7(%a6),%a0
mov.l %a0,%usp # restore user stack pointer
mov.l EXC_A6(%a6),(%a6)
fsnan_out_x_save:
lea FP_SCR0(%a6),%a0 # pass: ptr to operand
movq.l &0xc,%d0 # pass: size of extended
bsr.l _dmem_write # write the default result
tst.l %d1 # did dstore fail?
bne.l facc_out_x # yes
bra.w fsnan_exit
fsnan_out_x_s:
mov.l (%a6),EXC_A6(%a6)
bsr.l _calc_ea_fout # find the correct ea,update An
mov.l %a0,%a1
mov.l %a0,EXC_EA(%a6) # stack correct <ea>
mov.l EXC_A6(%a6),(%a6)
cmpi.b SPCOND_FLG(%a6),&mda7_flg # is <ea> mode -(a7)?
bne.b fsnan_out_x_save # no
# the operation was "fmove.x SNAN,-(a7)" from supervisor mode.
fmovm.x EXC_FPREGS(%a6),&0xc0 # restore fp0-fp1
fmovm.l USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
frestore FP_SRC(%a6)
mov.l EXC_A6(%a6),%a6 # restore frame pointer
mov.l LOCAL_SIZE+EXC_SR(%sp),LOCAL_SIZE+EXC_SR-0xc(%sp)
mov.l LOCAL_SIZE+EXC_PC+0x2(%sp),LOCAL_SIZE+EXC_PC+0x2-0xc(%sp)
mov.l LOCAL_SIZE+EXC_EA(%sp),LOCAL_SIZE+EXC_EA-0xc(%sp)
mov.l LOCAL_SIZE+FP_SCR0_EX(%sp),LOCAL_SIZE+EXC_SR(%sp)
mov.l LOCAL_SIZE+FP_SCR0_HI(%sp),LOCAL_SIZE+EXC_PC+0x2(%sp)
mov.l LOCAL_SIZE+FP_SCR0_LO(%sp),LOCAL_SIZE+EXC_EA(%sp)
add.l &LOCAL_SIZE-0x8,%sp
bra.l _real_snan
#########################################################################
# XDEF **************************************************************** #
# _fpsp_inex(): 060FPSP entry point for FP Inexact exception. #
# #
# This handler should be the first code executed upon taking the #
# FP Inexact exception in an operating system. #
# #
# XREF **************************************************************** #
# _imem_read_long() - read instruction longword #
# fix_skewed_ops() - adjust src operand in fsave frame #
# set_tag_x() - determine optype of src/dst operands #
# store_fpreg() - store opclass 0 or 2 result to FP regfile #
# unnorm_fix() - change UNNORM operands to NORM or ZERO #
# load_fpn2() - load dst operand from FP regfile #
# smovcr() - emulate an "fmovcr" instruction #
# fout() - emulate an opclass 3 instruction #
# tbl_unsupp - add of table of emulation routines for opclass 0,2 #
# _real_inex() - "callout" to operating system inexact handler #
# #
# INPUT *************************************************************** #
# - The system stack contains the FP Inexact exception frame #
# - The fsave frame contains the source operand #
# #
# OUTPUT ************************************************************** #
# - The system stack is unchanged #
# - The fsave frame contains the adjusted src op for opclass 0,2 #
# #
# ALGORITHM *********************************************************** #
# In a system where the FP Inexact exception is enabled, the goal #
# is to get to the handler specified at _real_inex(). But, on the 060, #
# for opclass zero and two instruction taking this exception, the #
# hardware doesn't store the correct result to the destination FP #
# register as did the '040 and '881/2. This handler must emulate the #
# instruction in order to get this value and then store it to the #
# correct register before calling _real_inex(). #
# For opclass 3 instructions, the 060 doesn't store the default #
# inexact result out to memory or data register file as it should. #
# This code must emulate the move out by calling fout() before finally #
# exiting through _real_inex(). #
# #
#########################################################################
global _fpsp_inex
_fpsp_inex:
link.w %a6,&-LOCAL_SIZE # init stack frame
fsave FP_SRC(%a6) # grab the "busy" frame
movm.l &0x0303,EXC_DREGS(%a6) # save d0-d1/a0-a1
fmovm.l %fpcr,%fpsr,%fpiar,USER_FPCR(%a6) # save ctrl regs
fmovm.x &0xc0,EXC_FPREGS(%a6) # save fp0-fp1 on stack
# the FPIAR holds the "current PC" of the faulting instruction
mov.l USER_FPIAR(%a6),EXC_EXTWPTR(%a6)
mov.l EXC_EXTWPTR(%a6),%a0 # fetch instruction addr
addq.l &0x4,EXC_EXTWPTR(%a6) # incr instruction ptr
bsr.l _imem_read_long # fetch the instruction words
mov.l %d0,EXC_OPWORD(%a6)
##############################################################################
btst &13,%d0 # is instr an fmove out?
bne.w finex_out # fmove out
# the hardware, for "fabs" and "fneg" w/ a long source format, puts the
# longword integer directly into the upper longword of the mantissa along
# w/ an exponent value of 0x401e. we convert this to extended precision here.
bfextu %d0{&19:&3},%d0 # fetch instr size
bne.b finex_cont # instr size is not long
cmpi.w FP_SRC_EX(%a6),&0x401e # is exponent 0x401e?
bne.b finex_cont # no
fmov.l &0x0,%fpcr
fmov.l FP_SRC_HI(%a6),%fp0 # load integer src
fmov.x %fp0,FP_SRC(%a6) # store integer as extended precision
mov.w &0xe001,0x2+FP_SRC(%a6)
finex_cont:
lea FP_SRC(%a6),%a0 # pass: ptr to src op
bsr.l fix_skewed_ops # fix src op
# Here, we zero the ccode and exception byte field since we're going to
# emulate the whole instruction. Notice, though, that we don't kill the
# INEX1 bit. This is because a packed op has long since been converted
# to extended before arriving here. Therefore, we need to retain the
# INEX1 bit from when the operand was first converted.
andi.l &0x00ff01ff,USER_FPSR(%a6) # zero all but accured field
fmov.l &0x0,%fpcr # zero current control regs
fmov.l &0x0,%fpsr
bfextu EXC_EXTWORD(%a6){&0:&6},%d1 # extract upper 6 of cmdreg
cmpi.b %d1,&0x17 # is op an fmovecr?
beq.w finex_fmovcr # yes
lea FP_SRC(%a6),%a0 # pass: ptr to src op
bsr.l set_tag_x # tag the operand type
mov.b %d0,STAG(%a6) # maybe NORM,DENORM
# bits four and five of the fp extension word separate the monadic and dyadic
# operations that can pass through fpsp_inex(). remember that fcmp and ftst
# will never take this exception, but fsincos will.
btst &0x5,1+EXC_CMDREG(%a6) # is operation monadic or dyadic?
beq.b finex_extract # monadic
btst &0x4,1+EXC_CMDREG(%a6) # is operation an fsincos?
bne.b finex_extract # yes
bfextu EXC_CMDREG(%a6){&6:&3},%d0 # dyadic; load dst reg
bsr.l load_fpn2 # load dst into FP_DST
lea FP_DST(%a6),%a0 # pass: ptr to dst op
bsr.l set_tag_x # tag the operand type
cmpi.b %d0,&UNNORM # is operand an UNNORM?
bne.b finex_op2_done # no
bsr.l unnorm_fix # yes; convert to NORM,DENORM,or ZERO
finex_op2_done:
mov.b %d0,DTAG(%a6) # save dst optype tag
finex_extract:
clr.l %d0
mov.b FPCR_MODE(%a6),%d0 # pass rnd prec/mode
mov.b 1+EXC_CMDREG(%a6),%d1
andi.w &0x007f,%d1 # extract extension
lea FP_SRC(%a6),%a0
lea FP_DST(%a6),%a1
mov.l (tbl_unsupp.l,%pc,%d1.w*4),%d1 # fetch routine addr
jsr (tbl_unsupp.l,%pc,%d1.l*1)
# the operation has been emulated. the result is in fp0.
finex_save:
bfextu EXC_CMDREG(%a6){&6:&3},%d0
bsr.l store_fpreg
finex_exit:
fmovm.x EXC_FPREGS(%a6),&0xc0 # restore fp0-fp1
fmovm.l USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
frestore FP_SRC(%a6)
unlk %a6
bra.l _real_inex
finex_fmovcr:
clr.l %d0
mov.b FPCR_MODE(%a6),%d0 # pass rnd prec,mode
mov.b 1+EXC_CMDREG(%a6),%d1
andi.l &0x0000007f,%d1 # pass rom offset
bsr.l smovcr
bra.b finex_save
########################################################################
#
# the hardware does not save the default result to memory on enabled
# inexact exceptions. we do this here before passing control to
# the user inexact handler.
#
# byte, word, and long destination format operations can pass
# through here. so can double and single precision.
# although packed opclass three operations can take inexact
# exceptions, they won't pass through here since they are caught
# first by the unsupported data format exception handler. that handler
# sends them directly to _real_inex() if necessary.
#
finex_out:
mov.b &NORM,STAG(%a6) # src is a NORM
clr.l %d0
mov.b FPCR_MODE(%a6),%d0 # pass rnd prec,mode
andi.l &0xffff00ff,USER_FPSR(%a6) # zero exception field
lea FP_SRC(%a6),%a0 # pass ptr to src operand
bsr.l fout # store the default result
bra.b finex_exit
#########################################################################
# XDEF **************************************************************** #
# _fpsp_dz(): 060FPSP entry point for FP DZ exception. #
# #
# This handler should be the first code executed upon taking #
# the FP DZ exception in an operating system. #
# #
# XREF **************************************************************** #
# _imem_read_long() - read instruction longword from memory #
# fix_skewed_ops() - adjust fsave operand #
# _real_dz() - "callout" exit point from FP DZ handler #
# #
# INPUT *************************************************************** #
# - The system stack contains the FP DZ exception stack. #
# - The fsave frame contains the source operand. #
# #
# OUTPUT ************************************************************** #
# - The system stack contains the FP DZ exception stack. #
# - The fsave frame contains the adjusted source operand. #
# #
# ALGORITHM *********************************************************** #
# In a system where the DZ exception is enabled, the goal is to #
# get to the handler specified at _real_dz(). But, on the 060, when the #
# exception is taken, the input operand in the fsave state frame may #
# be incorrect for some cases and need to be adjusted. So, this package #
# adjusts the operand using fix_skewed_ops() and then branches to #
# _real_dz(). #
# #
#########################################################################
global _fpsp_dz
_fpsp_dz:
link.w %a6,&-LOCAL_SIZE # init stack frame
fsave FP_SRC(%a6) # grab the "busy" frame
movm.l &0x0303,EXC_DREGS(%a6) # save d0-d1/a0-a1
fmovm.l %fpcr,%fpsr,%fpiar,USER_FPCR(%a6) # save ctrl regs
fmovm.x &0xc0,EXC_FPREGS(%a6) # save fp0-fp1 on stack
# the FPIAR holds the "current PC" of the faulting instruction
mov.l USER_FPIAR(%a6),EXC_EXTWPTR(%a6)
mov.l EXC_EXTWPTR(%a6),%a0 # fetch instruction addr
addq.l &0x4,EXC_EXTWPTR(%a6) # incr instruction ptr
bsr.l _imem_read_long # fetch the instruction words
mov.l %d0,EXC_OPWORD(%a6)
##############################################################################
# here, we simply see if the operand in the fsave frame needs to be "unskewed".
# this would be the case for opclass two operations with a source zero
# in the sgl or dbl format.
lea FP_SRC(%a6),%a0 # pass: ptr to src op
bsr.l fix_skewed_ops # fix src op
fdz_exit:
fmovm.x EXC_FPREGS(%a6),&0xc0 # restore fp0-fp1
fmovm.l USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
frestore FP_SRC(%a6)
unlk %a6
bra.l _real_dz
#########################################################################
# XDEF **************************************************************** #
# _fpsp_fline(): 060FPSP entry point for "Line F emulator" #
# exception when the "reduced" version of the #
# FPSP is implemented that does not emulate #
# FP unimplemented instructions. #
# #
# This handler should be the first code executed upon taking a #
# "Line F Emulator" exception in an operating system integrating #
# the reduced version of 060FPSP. #
# #
# XREF **************************************************************** #
# _real_fpu_disabled() - Handle "FPU disabled" exceptions #
# _real_fline() - Handle all other cases (treated equally) #
# #
# INPUT *************************************************************** #
# - The system stack contains a "Line F Emulator" exception #
# stack frame. #
# #
# OUTPUT ************************************************************** #
# - The system stack is unchanged. #
# #
# ALGORITHM *********************************************************** #
# When a "Line F Emulator" exception occurs in a system where #
# "FPU Unimplemented" instructions will not be emulated, the exception #
# can occur because then FPU is disabled or the instruction is to be #
# classifed as "Line F". This module determines which case exists and #
# calls the appropriate "callout". #
# #
#########################################################################
global _fpsp_fline
_fpsp_fline:
# check to see if the FPU is disabled. if so, jump to the OS entry
# point for that condition.
cmpi.w 0x6(%sp),&0x402c
beq.l _real_fpu_disabled
bra.l _real_fline
#########################################################################
# XDEF **************************************************************** #
# _dcalc_ea(): calc correct <ea> from <ea> stacked on exception #
# #
# XREF **************************************************************** #
# inc_areg() - increment an address register #
# dec_areg() - decrement an address register #
# #
# INPUT *************************************************************** #
# d0 = number of bytes to adjust <ea> by #
# #
# OUTPUT ************************************************************** #
# None #
# #
# ALGORITHM *********************************************************** #
# "Dummy" CALCulate Effective Address: #
# The stacked <ea> for FP unimplemented instructions and opclass #
# two packed instructions is correct with the exception of... #
# #
# 1) -(An) : The register is not updated regardless of size. #
# Also, for extended precision and packed, the #
# stacked <ea> value is 8 bytes too big #
# 2) (An)+ : The register is not updated. #
# 3) #<data> : The upper longword of the immediate operand is #
# stacked b,w,l and s sizes are completely stacked. #
# d,x, and p are not. #
# #
#########################################################################
global _dcalc_ea
_dcalc_ea:
mov.l %d0, %a0 # move # bytes to %a0
mov.b 1+EXC_OPWORD(%a6), %d0 # fetch opcode word
mov.l %d0, %d1 # make a copy
andi.w &0x38, %d0 # extract mode field
andi.l &0x7, %d1 # extract reg field
cmpi.b %d0,&0x18 # is mode (An)+ ?
beq.b dcea_pi # yes
cmpi.b %d0,&0x20 # is mode -(An) ?
beq.b dcea_pd # yes
or.w %d1,%d0 # concat mode,reg
cmpi.b %d0,&0x3c # is mode #<data>?
beq.b dcea_imm # yes
mov.l EXC_EA(%a6),%a0 # return <ea>
rts
# need to set immediate data flag here since we'll need to do
# an imem_read to fetch this later.
dcea_imm:
mov.b &immed_flg,SPCOND_FLG(%a6)
lea ([USER_FPIAR,%a6],0x4),%a0 # no; return <ea>
rts
# here, the <ea> is stacked correctly. however, we must update the
# address register...
dcea_pi:
mov.l %a0,%d0 # pass amt to inc by
bsr.l inc_areg # inc addr register
mov.l EXC_EA(%a6),%a0 # stacked <ea> is correct
rts
# the <ea> is stacked correctly for all but extended and packed which
# the <ea>s are 8 bytes too large.
# it would make no sense to have a pre-decrement to a7 in supervisor
# mode so we don't even worry about this tricky case here : )
dcea_pd:
mov.l %a0,%d0 # pass amt to dec by
bsr.l dec_areg # dec addr register
mov.l EXC_EA(%a6),%a0 # stacked <ea> is correct
cmpi.b %d0,&0xc # is opsize ext or packed?
beq.b dcea_pd2 # yes
rts
dcea_pd2:
sub.l &0x8,%a0 # correct <ea>
mov.l %a0,EXC_EA(%a6) # put correct <ea> on stack
rts
#########################################################################
# XDEF **************************************************************** #
# _calc_ea_fout(): calculate correct stacked <ea> for extended #
# and packed data opclass 3 operations. #
# #
# XREF **************************************************************** #
# None #
# #
# INPUT *************************************************************** #
# None #
# #
# OUTPUT ************************************************************** #
# a0 = return correct effective address #
# #
# ALGORITHM *********************************************************** #
# For opclass 3 extended and packed data operations, the <ea> #
# stacked for the exception is incorrect for -(an) and (an)+ addressing #
# modes. Also, while we're at it, the index register itself must get #
# updated. #
# So, for -(an), we must subtract 8 off of the stacked <ea> value #
# and return that value as the correct <ea> and store that value in An. #
# For (an)+, the stacked <ea> is correct but we must adjust An by +12. #
# #
#########################################################################
# This calc_ea is currently used to retrieve the correct <ea>
# for fmove outs of type extended and packed.
global _calc_ea_fout
_calc_ea_fout:
mov.b 1+EXC_OPWORD(%a6),%d0 # fetch opcode word
mov.l %d0,%d1 # make a copy
andi.w &0x38,%d0 # extract mode field
andi.l &0x7,%d1 # extract reg field
cmpi.b %d0,&0x18 # is mode (An)+ ?
beq.b ceaf_pi # yes
cmpi.b %d0,&0x20 # is mode -(An) ?
beq.w ceaf_pd # yes
mov.l EXC_EA(%a6),%a0 # stacked <ea> is correct
rts
# (An)+ : extended and packed fmove out
# : stacked <ea> is correct
# : "An" not updated
ceaf_pi:
mov.w (tbl_ceaf_pi.b,%pc,%d1.w*2),%d1
mov.l EXC_EA(%a6),%a0
jmp (tbl_ceaf_pi.b,%pc,%d1.w*1)
swbeg &0x8
tbl_ceaf_pi:
short ceaf_pi0 - tbl_ceaf_pi
short ceaf_pi1 - tbl_ceaf_pi
short ceaf_pi2 - tbl_ceaf_pi
short ceaf_pi3 - tbl_ceaf_pi
short ceaf_pi4 - tbl_ceaf_pi
short ceaf_pi5 - tbl_ceaf_pi
short ceaf_pi6 - tbl_ceaf_pi
short ceaf_pi7 - tbl_ceaf_pi
ceaf_pi0:
addi.l &0xc,EXC_DREGS+0x8(%a6)
rts
ceaf_pi1:
addi.l &0xc,EXC_DREGS+0xc(%a6)
rts
ceaf_pi2:
add.l &0xc,%a2
rts
ceaf_pi3:
add.l &0xc,%a3
rts
ceaf_pi4:
add.l &0xc,%a4
rts
ceaf_pi5:
add.l &0xc,%a5
rts
ceaf_pi6:
addi.l &0xc,EXC_A6(%a6)
rts
ceaf_pi7:
mov.b &mia7_flg,SPCOND_FLG(%a6)
addi.l &0xc,EXC_A7(%a6)
rts
# -(An) : extended and packed fmove out
# : stacked <ea> = actual <ea> + 8
# : "An" not updated
ceaf_pd:
mov.w (tbl_ceaf_pd.b,%pc,%d1.w*2),%d1
mov.l EXC_EA(%a6),%a0
sub.l &0x8,%a0
sub.l &0x8,EXC_EA(%a6)
jmp (tbl_ceaf_pd.b,%pc,%d1.w*1)
swbeg &0x8
tbl_ceaf_pd:
short ceaf_pd0 - tbl_ceaf_pd
short ceaf_pd1 - tbl_ceaf_pd
short ceaf_pd2 - tbl_ceaf_pd
short ceaf_pd3 - tbl_ceaf_pd
short ceaf_pd4 - tbl_ceaf_pd
short ceaf_pd5 - tbl_ceaf_pd
short ceaf_pd6 - tbl_ceaf_pd
short ceaf_pd7 - tbl_ceaf_pd
ceaf_pd0:
mov.l %a0,EXC_DREGS+0x8(%a6)
rts
ceaf_pd1:
mov.l %a0,EXC_DREGS+0xc(%a6)
rts
ceaf_pd2:
mov.l %a0,%a2
rts
ceaf_pd3:
mov.l %a0,%a3
rts
ceaf_pd4:
mov.l %a0,%a4
rts
ceaf_pd5:
mov.l %a0,%a5
rts
ceaf_pd6:
mov.l %a0,EXC_A6(%a6)
rts
ceaf_pd7:
mov.l %a0,EXC_A7(%a6)
mov.b &mda7_flg,SPCOND_FLG(%a6)
rts
#
# This table holds the offsets of the emulation routines for each individual
# math operation relative to the address of this table. Included are
# routines like fadd/fmul/fabs. The transcendentals ARE NOT. This is because
# this table is for the version if the 060FPSP without transcendentals.
# The location within the table is determined by the extension bits of the
# operation longword.
#
swbeg &109
tbl_unsupp:
long fin - tbl_unsupp # 00: fmove
long fint - tbl_unsupp # 01: fint
long tbl_unsupp - tbl_unsupp # 02: fsinh
long fintrz - tbl_unsupp # 03: fintrz
long fsqrt - tbl_unsupp # 04: fsqrt
long tbl_unsupp - tbl_unsupp
long tbl_unsupp - tbl_unsupp # 06: flognp1
long tbl_unsupp - tbl_unsupp
long tbl_unsupp - tbl_unsupp # 08: fetoxm1
long tbl_unsupp - tbl_unsupp # 09: ftanh
long tbl_unsupp - tbl_unsupp # 0a: fatan
long tbl_unsupp - tbl_unsupp
long tbl_unsupp - tbl_unsupp # 0c: fasin
long tbl_unsupp - tbl_unsupp # 0d: fatanh
long tbl_unsupp - tbl_unsupp # 0e: fsin
long tbl_unsupp - tbl_unsupp # 0f: ftan
long tbl_unsupp - tbl_unsupp # 10: fetox
long tbl_unsupp - tbl_unsupp # 11: ftwotox
long tbl_unsupp - tbl_unsupp # 12: ftentox
long tbl_unsupp - tbl_unsupp
long tbl_unsupp - tbl_unsupp # 14: flogn
long tbl_unsupp - tbl_unsupp # 15: flog10
long tbl_unsupp - tbl_unsupp # 16: flog2
long tbl_unsupp - tbl_unsupp
long fabs - tbl_unsupp # 18: fabs
long tbl_unsupp - tbl_unsupp # 19: fcosh
long fneg - tbl_unsupp # 1a: fneg
long tbl_unsupp - tbl_unsupp
long tbl_unsupp - tbl_unsupp # 1c: facos
long tbl_unsupp - tbl_unsupp # 1d: fcos
long tbl_unsupp - tbl_unsupp # 1e: fgetexp
long tbl_unsupp - tbl_unsupp # 1f: fgetman
long fdiv - tbl_unsupp # 20: fdiv
long tbl_unsupp - tbl_unsupp # 21: fmod
long fadd - tbl_unsupp # 22: fadd
long fmul - tbl_unsupp # 23: fmul
long fsgldiv - tbl_unsupp # 24: fsgldiv
long tbl_unsupp - tbl_unsupp # 25: frem
long tbl_unsupp - tbl_unsupp # 26: fscale
long fsglmul - tbl_unsupp # 27: fsglmul
long fsub - tbl_unsupp # 28: fsub
long tbl_unsupp - tbl_unsupp
long tbl_unsupp - tbl_unsupp
long tbl_unsupp - tbl_unsupp
long tbl_unsupp - tbl_unsupp
long tbl_unsupp - tbl_unsupp
long tbl_unsupp - tbl_unsupp
long tbl_unsupp - tbl_unsupp
long tbl_unsupp - tbl_unsupp # 30: fsincos
long tbl_unsupp - tbl_unsupp # 31: fsincos
long tbl_unsupp - tbl_unsupp # 32: fsincos
long tbl_unsupp - tbl_unsupp # 33: fsincos
long tbl_unsupp - tbl_unsupp # 34: fsincos
long tbl_unsupp - tbl_unsupp # 35: fsincos
long tbl_unsupp - tbl_unsupp # 36: fsincos
long tbl_unsupp - tbl_unsupp # 37: fsincos
long fcmp - tbl_unsupp # 38: fcmp
long tbl_unsupp - tbl_unsupp
long ftst - tbl_unsupp # 3a: ftst
long tbl_unsupp - tbl_unsupp
long tbl_unsupp - tbl_unsupp
long tbl_unsupp - tbl_unsupp
long tbl_unsupp - tbl_unsupp
long tbl_unsupp - tbl_unsupp
long fsin - tbl_unsupp # 40: fsmove
long fssqrt - tbl_unsupp # 41: fssqrt
long tbl_unsupp - tbl_unsupp
long tbl_unsupp - tbl_unsupp
long fdin - tbl_unsupp # 44: fdmove
long fdsqrt - tbl_unsupp # 45: fdsqrt
long tbl_unsupp - tbl_unsupp
long tbl_unsupp - tbl_unsupp
long tbl_unsupp - tbl_unsupp
long tbl_unsupp - tbl_unsupp
long tbl_unsupp - tbl_unsupp
long tbl_unsupp - tbl_unsupp
long tbl_unsupp - tbl_unsupp
long tbl_unsupp - tbl_unsupp
long tbl_unsupp - tbl_unsupp
long tbl_unsupp - tbl_unsupp
long tbl_unsupp - tbl_unsupp
long tbl_unsupp - tbl_unsupp
long tbl_unsupp - tbl_unsupp
long tbl_unsupp - tbl_unsupp
long tbl_unsupp - tbl_unsupp
long tbl_unsupp - tbl_unsupp
long tbl_unsupp - tbl_unsupp
long tbl_unsupp - tbl_unsupp
long fsabs - tbl_unsupp # 58: fsabs
long tbl_unsupp - tbl_unsupp
long fsneg - tbl_unsupp # 5a: fsneg
long tbl_unsupp - tbl_unsupp
long fdabs - tbl_unsupp # 5c: fdabs
long tbl_unsupp - tbl_unsupp
long fdneg - tbl_unsupp # 5e: fdneg
long tbl_unsupp - tbl_unsupp
long fsdiv - tbl_unsupp # 60: fsdiv
long tbl_unsupp - tbl_unsupp
long fsadd - tbl_unsupp # 62: fsadd
long fsmul - tbl_unsupp # 63: fsmul
long fddiv - tbl_unsupp # 64: fddiv
long tbl_unsupp - tbl_unsupp
long fdadd - tbl_unsupp # 66: fdadd
long fdmul - tbl_unsupp # 67: fdmul
long fssub - tbl_unsupp # 68: fssub
long tbl_unsupp - tbl_unsupp
long tbl_unsupp - tbl_unsupp
long tbl_unsupp - tbl_unsupp
long fdsub - tbl_unsupp # 6c: fdsub
#################################################
# Add this here so non-fp modules can compile.
# (smovcr is called from fpsp_inex.)
global smovcr
smovcr:
bra.b smovcr
#########################################################################
# XDEF **************************************************************** #
# fmovm_dynamic(): emulate "fmovm" dynamic instruction #
# #
# XREF **************************************************************** #
# fetch_dreg() - fetch data register #
# {i,d,}mem_read() - fetch data from memory #
# _mem_write() - write data to memory #
# iea_iacc() - instruction memory access error occurred #
# iea_dacc() - data memory access error occurred #
# restore() - restore An index regs if access error occurred #
# #
# INPUT *************************************************************** #
# None #
# #
# OUTPUT ************************************************************** #
# If instr is "fmovm Dn,-(A7)" from supervisor mode, #
# d0 = size of dump #
# d1 = Dn #
# Else if instruction access error, #
# d0 = FSLW #
# Else if data access error, #
# d0 = FSLW #
# a0 = address of fault #
# Else #
# none. #
# #
# ALGORITHM *********************************************************** #
# The effective address must be calculated since this is entered #
# from an "Unimplemented Effective Address" exception handler. So, we #
# have our own fcalc_ea() routine here. If an access error is flagged #
# by a _{i,d,}mem_read() call, we must exit through the special #
# handler. #
# The data register is determined and its value loaded to get the #
# string of FP registers affected. This value is used as an index into #
# a lookup table such that we can determine the number of bytes #
# involved. #
# If the instruction is "fmovm.x <ea>,Dn", a _mem_read() is used #
# to read in all FP values. Again, _mem_read() may fail and require a #
# special exit. #
# If the instruction is "fmovm.x DN,<ea>", a _mem_write() is used #
# to write all FP values. _mem_write() may also fail. #
# If the instruction is "fmovm.x DN,-(a7)" from supervisor mode, #
# then we return the size of the dump and the string to the caller #
# so that the move can occur outside of this routine. This special #
# case is required so that moves to the system stack are handled #
# correctly. #
# #
# DYNAMIC: #
# fmovm.x dn, <ea> #
# fmovm.x <ea>, dn #
# #
# <WORD 1> <WORD2> #
# 1111 0010 00 |<ea>| 11@& 1000 0$$$ 0000 #
# #
# & = (0): predecrement addressing mode #
# (1): postincrement or control addressing mode #
# @ = (0): move listed regs from memory to the FPU #
# (1): move listed regs from the FPU to memory #
# $$$ : index of data register holding reg select mask #
# #
# NOTES: #
# If the data register holds a zero, then the #
# instruction is a nop. #
# #
#########################################################################
global fmovm_dynamic
fmovm_dynamic:
# extract the data register in which the bit string resides...
mov.b 1+EXC_EXTWORD(%a6),%d1 # fetch extword
andi.w &0x70,%d1 # extract reg bits
lsr.b &0x4,%d1 # shift into lo bits
# fetch the bit string into d0...
bsr.l fetch_dreg # fetch reg string
andi.l &0x000000ff,%d0 # keep only lo byte
mov.l %d0,-(%sp) # save strg
mov.b (tbl_fmovm_size.w,%pc,%d0),%d0
mov.l %d0,-(%sp) # save size
bsr.l fmovm_calc_ea # calculate <ea>
mov.l (%sp)+,%d0 # restore size
mov.l (%sp)+,%d1 # restore strg
# if the bit string is a zero, then the operation is a no-op
# but, make sure that we've calculated ea and advanced the opword pointer
beq.w fmovm_data_done
# separate move ins from move outs...
btst &0x5,EXC_EXTWORD(%a6) # is it a move in or out?
beq.w fmovm_data_in # it's a move out
#############
# MOVE OUT: #
#############
fmovm_data_out:
btst &0x4,EXC_EXTWORD(%a6) # control or predecrement?
bne.w fmovm_out_ctrl # control
############################
fmovm_out_predec:
# for predecrement mode, the bit string is the opposite of both control
# operations and postincrement mode. (bit7 = FP7 ... bit0 = FP0)
# here, we convert it to be just like the others...
mov.b (tbl_fmovm_convert.w,%pc,%d1.w*1),%d1
btst &0x5,EXC_SR(%a6) # user or supervisor mode?
beq.b fmovm_out_ctrl # user
fmovm_out_predec_s:
cmpi.b SPCOND_FLG(%a6),&mda7_flg # is <ea> mode -(a7)?
bne.b fmovm_out_ctrl
# the operation was unfortunately an: fmovm.x dn,-(sp)
# called from supervisor mode.
# we're also passing "size" and "strg" back to the calling routine
rts
############################
fmovm_out_ctrl:
mov.l %a0,%a1 # move <ea> to a1
sub.l %d0,%sp # subtract size of dump
lea (%sp),%a0
tst.b %d1 # should FP0 be moved?
bpl.b fmovm_out_ctrl_fp1 # no
mov.l 0x0+EXC_FP0(%a6),(%a0)+ # yes
mov.l 0x4+EXC_FP0(%a6),(%a0)+
mov.l 0x8+EXC_FP0(%a6),(%a0)+
fmovm_out_ctrl_fp1:
lsl.b &0x1,%d1 # should FP1 be moved?
bpl.b fmovm_out_ctrl_fp2 # no
mov.l 0x0+EXC_FP1(%a6),(%a0)+ # yes
mov.l 0x4+EXC_FP1(%a6),(%a0)+
mov.l 0x8+EXC_FP1(%a6),(%a0)+
fmovm_out_ctrl_fp2:
lsl.b &0x1,%d1 # should FP2 be moved?
bpl.b fmovm_out_ctrl_fp3 # no
fmovm.x &0x20,(%a0) # yes
add.l &0xc,%a0
fmovm_out_ctrl_fp3:
lsl.b &0x1,%d1 # should FP3 be moved?
bpl.b fmovm_out_ctrl_fp4 # no
fmovm.x &0x10,(%a0) # yes
add.l &0xc,%a0
fmovm_out_ctrl_fp4:
lsl.b &0x1,%d1 # should FP4 be moved?
bpl.b fmovm_out_ctrl_fp5 # no
fmovm.x &0x08,(%a0) # yes
add.l &0xc,%a0
fmovm_out_ctrl_fp5:
lsl.b &0x1,%d1 # should FP5 be moved?
bpl.b fmovm_out_ctrl_fp6 # no
fmovm.x &0x04,(%a0) # yes
add.l &0xc,%a0
fmovm_out_ctrl_fp6:
lsl.b &0x1,%d1 # should FP6 be moved?
bpl.b fmovm_out_ctrl_fp7 # no
fmovm.x &0x02,(%a0) # yes
add.l &0xc,%a0
fmovm_out_ctrl_fp7:
lsl.b &0x1,%d1 # should FP7 be moved?
bpl.b fmovm_out_ctrl_done # no
fmovm.x &0x01,(%a0) # yes
add.l &0xc,%a0
fmovm_out_ctrl_done:
mov.l %a1,L_SCR1(%a6)
lea (%sp),%a0 # pass: supervisor src
mov.l %d0,-(%sp) # save size
bsr.l _dmem_write # copy data to user mem
mov.l (%sp)+,%d0
add.l %d0,%sp # clear fpreg data from stack
tst.l %d1 # did dstore err?
bne.w fmovm_out_err # yes
rts
############
# MOVE IN: #
############
fmovm_data_in:
mov.l %a0,L_SCR1(%a6)
sub.l %d0,%sp # make room for fpregs
lea (%sp),%a1
mov.l %d1,-(%sp) # save bit string for later
mov.l %d0,-(%sp) # save # of bytes
bsr.l _dmem_read # copy data from user mem
mov.l (%sp)+,%d0 # retrieve # of bytes
tst.l %d1 # did dfetch fail?
bne.w fmovm_in_err # yes
mov.l (%sp)+,%d1 # load bit string
lea (%sp),%a0 # addr of stack
tst.b %d1 # should FP0 be moved?
bpl.b fmovm_data_in_fp1 # no
mov.l (%a0)+,0x0+EXC_FP0(%a6) # yes
mov.l (%a0)+,0x4+EXC_FP0(%a6)
mov.l (%a0)+,0x8+EXC_FP0(%a6)
fmovm_data_in_fp1:
lsl.b &0x1,%d1 # should FP1 be moved?
bpl.b fmovm_data_in_fp2 # no
mov.l (%a0)+,0x0+EXC_FP1(%a6) # yes
mov.l (%a0)+,0x4+EXC_FP1(%a6)
mov.l (%a0)+,0x8+EXC_FP1(%a6)
fmovm_data_in_fp2:
lsl.b &0x1,%d1 # should FP2 be moved?
bpl.b fmovm_data_in_fp3 # no
fmovm.x (%a0)+,&0x20 # yes
fmovm_data_in_fp3:
lsl.b &0x1,%d1 # should FP3 be moved?
bpl.b fmovm_data_in_fp4 # no
fmovm.x (%a0)+,&0x10 # yes
fmovm_data_in_fp4:
lsl.b &0x1,%d1 # should FP4 be moved?
bpl.b fmovm_data_in_fp5 # no
fmovm.x (%a0)+,&0x08 # yes
fmovm_data_in_fp5:
lsl.b &0x1,%d1 # should FP5 be moved?
bpl.b fmovm_data_in_fp6 # no
fmovm.x (%a0)+,&0x04 # yes
fmovm_data_in_fp6:
lsl.b &0x1,%d1 # should FP6 be moved?
bpl.b fmovm_data_in_fp7 # no
fmovm.x (%a0)+,&0x02 # yes
fmovm_data_in_fp7:
lsl.b &0x1,%d1 # should FP7 be moved?
bpl.b fmovm_data_in_done # no
fmovm.x (%a0)+,&0x01 # yes
fmovm_data_in_done:
add.l %d0,%sp # remove fpregs from stack
rts
#####################################
fmovm_data_done:
rts
##############################################################################
#
# table indexed by the operation's bit string that gives the number
# of bytes that will be moved.
#
# number of bytes = (# of 1's in bit string) * 12(bytes/fpreg)
#
tbl_fmovm_size:
byte 0x00,0x0c,0x0c,0x18,0x0c,0x18,0x18,0x24
byte 0x0c,0x18,0x18,0x24,0x18,0x24,0x24,0x30
byte 0x0c,0x18,0x18,0x24,0x18,0x24,0x24,0x30
byte 0x18,0x24,0x24,0x30,0x24,0x30,0x30,0x3c
byte 0x0c,0x18,0x18,0x24,0x18,0x24,0x24,0x30
byte 0x18,0x24,0x24,0x30,0x24,0x30,0x30,0x3c
byte 0x18,0x24,0x24,0x30,0x24,0x30,0x30,0x3c
byte 0x24,0x30,0x30,0x3c,0x30,0x3c,0x3c,0x48
byte 0x0c,0x18,0x18,0x24,0x18,0x24,0x24,0x30
byte 0x18,0x24,0x24,0x30,0x24,0x30,0x30,0x3c
byte 0x18,0x24,0x24,0x30,0x24,0x30,0x30,0x3c
byte 0x24,0x30,0x30,0x3c,0x30,0x3c,0x3c,0x48
byte 0x18,0x24,0x24,0x30,0x24,0x30,0x30,0x3c
byte 0x24,0x30,0x30,0x3c,0x30,0x3c,0x3c,0x48
byte 0x24,0x30,0x30,0x3c,0x30,0x3c,0x3c,0x48
byte 0x30,0x3c,0x3c,0x48,0x3c,0x48,0x48,0x54
byte 0x0c,0x18,0x18,0x24,0x18,0x24,0x24,0x30
byte 0x18,0x24,0x24,0x30,0x24,0x30,0x30,0x3c
byte 0x18,0x24,0x24,0x30,0x24,0x30,0x30,0x3c
byte 0x24,0x30,0x30,0x3c,0x30,0x3c,0x3c,0x48
byte 0x18,0x24,0x24,0x30,0x24,0x30,0x30,0x3c
byte 0x24,0x30,0x30,0x3c,0x30,0x3c,0x3c,0x48
byte 0x24,0x30,0x30,0x3c,0x30,0x3c,0x3c,0x48
byte 0x30,0x3c,0x3c,0x48,0x3c,0x48,0x48,0x54
byte 0x18,0x24,0x24,0x30,0x24,0x30,0x30,0x3c
byte 0x24,0x30,0x30,0x3c,0x30,0x3c,0x3c,0x48
byte 0x24,0x30,0x30,0x3c,0x30,0x3c,0x3c,0x48
byte 0x30,0x3c,0x3c,0x48,0x3c,0x48,0x48,0x54
byte 0x24,0x30,0x30,0x3c,0x30,0x3c,0x3c,0x48
byte 0x30,0x3c,0x3c,0x48,0x3c,0x48,0x48,0x54
byte 0x30,0x3c,0x3c,0x48,0x3c,0x48,0x48,0x54
byte 0x3c,0x48,0x48,0x54,0x48,0x54,0x54,0x60
#
# table to convert a pre-decrement bit string into a post-increment
# or control bit string.
# ex: 0x00 ==> 0x00
# 0x01 ==> 0x80
# 0x02 ==> 0x40
# .
# .
# 0xfd ==> 0xbf
# 0xfe ==> 0x7f
# 0xff ==> 0xff
#
tbl_fmovm_convert:
byte 0x00,0x80,0x40,0xc0,0x20,0xa0,0x60,0xe0
byte 0x10,0x90,0x50,0xd0,0x30,0xb0,0x70,0xf0
byte 0x08,0x88,0x48,0xc8,0x28,0xa8,0x68,0xe8
byte 0x18,0x98,0x58,0xd8,0x38,0xb8,0x78,0xf8
byte 0x04,0x84,0x44,0xc4,0x24,0xa4,0x64,0xe4
byte 0x14,0x94,0x54,0xd4,0x34,0xb4,0x74,0xf4
byte 0x0c,0x8c,0x4c,0xcc,0x2c,0xac,0x6c,0xec
byte 0x1c,0x9c,0x5c,0xdc,0x3c,0xbc,0x7c,0xfc
byte 0x02,0x82,0x42,0xc2,0x22,0xa2,0x62,0xe2
byte 0x12,0x92,0x52,0xd2,0x32,0xb2,0x72,0xf2
byte 0x0a,0x8a,0x4a,0xca,0x2a,0xaa,0x6a,0xea
byte 0x1a,0x9a,0x5a,0xda,0x3a,0xba,0x7a,0xfa
byte 0x06,0x86,0x46,0xc6,0x26,0xa6,0x66,0xe6
byte 0x16,0x96,0x56,0xd6,0x36,0xb6,0x76,0xf6
byte 0x0e,0x8e,0x4e,0xce,0x2e,0xae,0x6e,0xee
byte 0x1e,0x9e,0x5e,0xde,0x3e,0xbe,0x7e,0xfe
byte 0x01,0x81,0x41,0xc1,0x21,0xa1,0x61,0xe1
byte 0x11,0x91,0x51,0xd1,0x31,0xb1,0x71,0xf1
byte 0x09,0x89,0x49,0xc9,0x29,0xa9,0x69,0xe9
byte 0x19,0x99,0x59,0xd9,0x39,0xb9,0x79,0xf9
byte 0x05,0x85,0x45,0xc5,0x25,0xa5,0x65,0xe5
byte 0x15,0x95,0x55,0xd5,0x35,0xb5,0x75,0xf5
byte 0x0d,0x8d,0x4d,0xcd,0x2d,0xad,0x6d,0xed
byte 0x1d,0x9d,0x5d,0xdd,0x3d,0xbd,0x7d,0xfd
byte 0x03,0x83,0x43,0xc3,0x23,0xa3,0x63,0xe3
byte 0x13,0x93,0x53,0xd3,0x33,0xb3,0x73,0xf3
byte 0x0b,0x8b,0x4b,0xcb,0x2b,0xab,0x6b,0xeb
byte 0x1b,0x9b,0x5b,0xdb,0x3b,0xbb,0x7b,0xfb
byte 0x07,0x87,0x47,0xc7,0x27,0xa7,0x67,0xe7
byte 0x17,0x97,0x57,0xd7,0x37,0xb7,0x77,0xf7
byte 0x0f,0x8f,0x4f,0xcf,0x2f,0xaf,0x6f,0xef
byte 0x1f,0x9f,0x5f,0xdf,0x3f,0xbf,0x7f,0xff
global fmovm_calc_ea
###############################################
# _fmovm_calc_ea: calculate effective address #
###############################################
fmovm_calc_ea:
mov.l %d0,%a0 # move # bytes to a0
# currently, MODE and REG are taken from the EXC_OPWORD. this could be
# easily changed if they were inputs passed in registers.
mov.w EXC_OPWORD(%a6),%d0 # fetch opcode word
mov.w %d0,%d1 # make a copy
andi.w &0x3f,%d0 # extract mode field
andi.l &0x7,%d1 # extract reg field
# jump to the corresponding function for each {MODE,REG} pair.
mov.w (tbl_fea_mode.b,%pc,%d0.w*2),%d0 # fetch jmp distance
jmp (tbl_fea_mode.b,%pc,%d0.w*1) # jmp to correct ea mode
swbeg &64
tbl_fea_mode:
short tbl_fea_mode - tbl_fea_mode
short tbl_fea_mode - tbl_fea_mode
short tbl_fea_mode - tbl_fea_mode
short tbl_fea_mode - tbl_fea_mode
short tbl_fea_mode - tbl_fea_mode
short tbl_fea_mode - tbl_fea_mode
short tbl_fea_mode - tbl_fea_mode
short tbl_fea_mode - tbl_fea_mode
short tbl_fea_mode - tbl_fea_mode
short tbl_fea_mode - tbl_fea_mode
short tbl_fea_mode - tbl_fea_mode
short tbl_fea_mode - tbl_fea_mode
short tbl_fea_mode - tbl_fea_mode
short tbl_fea_mode - tbl_fea_mode
short tbl_fea_mode - tbl_fea_mode
short tbl_fea_mode - tbl_fea_mode
short faddr_ind_a0 - tbl_fea_mode
short faddr_ind_a1 - tbl_fea_mode
short faddr_ind_a2 - tbl_fea_mode
short faddr_ind_a3 - tbl_fea_mode
short faddr_ind_a4 - tbl_fea_mode
short faddr_ind_a5 - tbl_fea_mode
short faddr_ind_a6 - tbl_fea_mode
short faddr_ind_a7 - tbl_fea_mode
short faddr_ind_p_a0 - tbl_fea_mode
short faddr_ind_p_a1 - tbl_fea_mode
short faddr_ind_p_a2 - tbl_fea_mode
short faddr_ind_p_a3 - tbl_fea_mode
short faddr_ind_p_a4 - tbl_fea_mode
short faddr_ind_p_a5 - tbl_fea_mode
short faddr_ind_p_a6 - tbl_fea_mode
short faddr_ind_p_a7 - tbl_fea_mode
short faddr_ind_m_a0 - tbl_fea_mode
short faddr_ind_m_a1 - tbl_fea_mode
short faddr_ind_m_a2 - tbl_fea_mode
short faddr_ind_m_a3 - tbl_fea_mode
short faddr_ind_m_a4 - tbl_fea_mode
short faddr_ind_m_a5 - tbl_fea_mode
short faddr_ind_m_a6 - tbl_fea_mode
short faddr_ind_m_a7 - tbl_fea_mode
short faddr_ind_disp_a0 - tbl_fea_mode
short faddr_ind_disp_a1 - tbl_fea_mode
short faddr_ind_disp_a2 - tbl_fea_mode
short faddr_ind_disp_a3 - tbl_fea_mode
short faddr_ind_disp_a4 - tbl_fea_mode
short faddr_ind_disp_a5 - tbl_fea_mode
short faddr_ind_disp_a6 - tbl_fea_mode
short faddr_ind_disp_a7 - tbl_fea_mode
short faddr_ind_ext - tbl_fea_mode
short faddr_ind_ext - tbl_fea_mode
short faddr_ind_ext - tbl_fea_mode
short faddr_ind_ext - tbl_fea_mode
short faddr_ind_ext - tbl_fea_mode
short faddr_ind_ext - tbl_fea_mode
short faddr_ind_ext - tbl_fea_mode
short faddr_ind_ext - tbl_fea_mode
short fabs_short - tbl_fea_mode
short fabs_long - tbl_fea_mode
short fpc_ind - tbl_fea_mode
short fpc_ind_ext - tbl_fea_mode
short tbl_fea_mode - tbl_fea_mode
short tbl_fea_mode - tbl_fea_mode
short tbl_fea_mode - tbl_fea_mode
short tbl_fea_mode - tbl_fea_mode
###################################
# Address register indirect: (An) #
###################################
faddr_ind_a0:
mov.l EXC_DREGS+0x8(%a6),%a0 # Get current a0
rts
faddr_ind_a1:
mov.l EXC_DREGS+0xc(%a6),%a0 # Get current a1
rts
faddr_ind_a2:
mov.l %a2,%a0 # Get current a2
rts
faddr_ind_a3:
mov.l %a3,%a0 # Get current a3
rts
faddr_ind_a4:
mov.l %a4,%a0 # Get current a4
rts
faddr_ind_a5:
mov.l %a5,%a0 # Get current a5
rts
faddr_ind_a6:
mov.l (%a6),%a0 # Get current a6
rts
faddr_ind_a7:
mov.l EXC_A7(%a6),%a0 # Get current a7
rts
#####################################################
# Address register indirect w/ postincrement: (An)+ #
#####################################################
faddr_ind_p_a0:
mov.l EXC_DREGS+0x8(%a6),%d0 # Get current a0
mov.l %d0,%d1
add.l %a0,%d1 # Increment
mov.l %d1,EXC_DREGS+0x8(%a6) # Save incr value
mov.l %d0,%a0
rts
faddr_ind_p_a1:
mov.l EXC_DREGS+0xc(%a6),%d0 # Get current a1
mov.l %d0,%d1
add.l %a0,%d1 # Increment
mov.l %d1,EXC_DREGS+0xc(%a6) # Save incr value
mov.l %d0,%a0
rts
faddr_ind_p_a2:
mov.l %a2,%d0 # Get current a2
mov.l %d0,%d1
add.l %a0,%d1 # Increment
mov.l %d1,%a2 # Save incr value
mov.l %d0,%a0
rts
faddr_ind_p_a3:
mov.l %a3,%d0 # Get current a3
mov.l %d0,%d1
add.l %a0,%d1 # Increment
mov.l %d1,%a3 # Save incr value
mov.l %d0,%a0
rts
faddr_ind_p_a4:
mov.l %a4,%d0 # Get current a4
mov.l %d0,%d1
add.l %a0,%d1 # Increment
mov.l %d1,%a4 # Save incr value
mov.l %d0,%a0
rts
faddr_ind_p_a5:
mov.l %a5,%d0 # Get current a5
mov.l %d0,%d1
add.l %a0,%d1 # Increment
mov.l %d1,%a5 # Save incr value
mov.l %d0,%a0
rts
faddr_ind_p_a6:
mov.l (%a6),%d0 # Get current a6
mov.l %d0,%d1
add.l %a0,%d1 # Increment
mov.l %d1,(%a6) # Save incr value
mov.l %d0,%a0
rts
faddr_ind_p_a7:
mov.b &mia7_flg,SPCOND_FLG(%a6) # set "special case" flag
mov.l EXC_A7(%a6),%d0 # Get current a7
mov.l %d0,%d1
add.l %a0,%d1 # Increment
mov.l %d1,EXC_A7(%a6) # Save incr value
mov.l %d0,%a0
rts
####################################################
# Address register indirect w/ predecrement: -(An) #
####################################################
faddr_ind_m_a0:
mov.l EXC_DREGS+0x8(%a6),%d0 # Get current a0
sub.l %a0,%d0 # Decrement
mov.l %d0,EXC_DREGS+0x8(%a6) # Save decr value
mov.l %d0,%a0
rts
faddr_ind_m_a1:
mov.l EXC_DREGS+0xc(%a6),%d0 # Get current a1
sub.l %a0,%d0 # Decrement
mov.l %d0,EXC_DREGS+0xc(%a6) # Save decr value
mov.l %d0,%a0
rts
faddr_ind_m_a2:
mov.l %a2,%d0 # Get current a2
sub.l %a0,%d0 # Decrement
mov.l %d0,%a2 # Save decr value
mov.l %d0,%a0
rts
faddr_ind_m_a3:
mov.l %a3,%d0 # Get current a3
sub.l %a0,%d0 # Decrement
mov.l %d0,%a3 # Save decr value
mov.l %d0,%a0
rts
faddr_ind_m_a4:
mov.l %a4,%d0 # Get current a4
sub.l %a0,%d0 # Decrement
mov.l %d0,%a4 # Save decr value
mov.l %d0,%a0
rts
faddr_ind_m_a5:
mov.l %a5,%d0 # Get current a5
sub.l %a0,%d0 # Decrement
mov.l %d0,%a5 # Save decr value
mov.l %d0,%a0
rts
faddr_ind_m_a6:
mov.l (%a6),%d0 # Get current a6
sub.l %a0,%d0 # Decrement
mov.l %d0,(%a6) # Save decr value
mov.l %d0,%a0
rts
faddr_ind_m_a7:
mov.b &mda7_flg,SPCOND_FLG(%a6) # set "special case" flag
mov.l EXC_A7(%a6),%d0 # Get current a7
sub.l %a0,%d0 # Decrement
mov.l %d0,EXC_A7(%a6) # Save decr value
mov.l %d0,%a0
rts
########################################################
# Address register indirect w/ displacement: (d16, An) #
########################################################
faddr_ind_disp_a0:
mov.l EXC_EXTWPTR(%a6),%a0 # fetch instruction addr
addq.l &0x2,EXC_EXTWPTR(%a6) # incr instruction ptr
bsr.l _imem_read_word
tst.l %d1 # did ifetch fail?
bne.l iea_iacc # yes
mov.w %d0,%a0 # sign extend displacement
add.l EXC_DREGS+0x8(%a6),%a0 # a0 + d16
rts
faddr_ind_disp_a1:
mov.l EXC_EXTWPTR(%a6),%a0 # fetch instruction addr
addq.l &0x2,EXC_EXTWPTR(%a6) # incr instruction ptr
bsr.l _imem_read_word
tst.l %d1 # did ifetch fail?
bne.l iea_iacc # yes
mov.w %d0,%a0 # sign extend displacement
add.l EXC_DREGS+0xc(%a6),%a0 # a1 + d16
rts
faddr_ind_disp_a2:
mov.l EXC_EXTWPTR(%a6),%a0 # fetch instruction addr
addq.l &0x2,EXC_EXTWPTR(%a6) # incr instruction ptr
bsr.l _imem_read_word
tst.l %d1 # did ifetch fail?
bne.l iea_iacc # yes
mov.w %d0,%a0 # sign extend displacement
add.l %a2,%a0 # a2 + d16
rts
faddr_ind_disp_a3:
mov.l EXC_EXTWPTR(%a6),%a0 # fetch instruction addr
addq.l &0x2,EXC_EXTWPTR(%a6) # incr instruction ptr
bsr.l _imem_read_word
tst.l %d1 # did ifetch fail?
bne.l iea_iacc # yes
mov.w %d0,%a0 # sign extend displacement
add.l %a3,%a0 # a3 + d16
rts
faddr_ind_disp_a4:
mov.l EXC_EXTWPTR(%a6),%a0 # fetch instruction addr
addq.l &0x2,EXC_EXTWPTR(%a6) # incr instruction ptr
bsr.l _imem_read_word
tst.l %d1 # did ifetch fail?
bne.l iea_iacc # yes
mov.w %d0,%a0 # sign extend displacement
add.l %a4,%a0 # a4 + d16
rts
faddr_ind_disp_a5:
mov.l EXC_EXTWPTR(%a6),%a0 # fetch instruction addr
addq.l &0x2,EXC_EXTWPTR(%a6) # incr instruction ptr
bsr.l _imem_read_word
tst.l %d1 # did ifetch fail?
bne.l iea_iacc # yes
mov.w %d0,%a0 # sign extend displacement
add.l %a5,%a0 # a5 + d16
rts
faddr_ind_disp_a6:
mov.l EXC_EXTWPTR(%a6),%a0 # fetch instruction addr
addq.l &0x2,EXC_EXTWPTR(%a6) # incr instruction ptr
bsr.l _imem_read_word
tst.l %d1 # did ifetch fail?
bne.l iea_iacc # yes
mov.w %d0,%a0 # sign extend displacement
add.l (%a6),%a0 # a6 + d16
rts
faddr_ind_disp_a7:
mov.l EXC_EXTWPTR(%a6),%a0 # fetch instruction addr
addq.l &0x2,EXC_EXTWPTR(%a6) # incr instruction ptr
bsr.l _imem_read_word
tst.l %d1 # did ifetch fail?
bne.l iea_iacc # yes
mov.w %d0,%a0 # sign extend displacement
add.l EXC_A7(%a6),%a0 # a7 + d16
rts
########################################################################
# Address register indirect w/ index(8-bit displacement): (d8, An, Xn) #
# " " " w/ " (base displacement): (bd, An, Xn) #
# Memory indirect postindexed: ([bd, An], Xn, od) #
# Memory indirect preindexed: ([bd, An, Xn], od) #
########################################################################
faddr_ind_ext:
addq.l &0x8,%d1
bsr.l fetch_dreg # fetch base areg
mov.l %d0,-(%sp)
mov.l EXC_EXTWPTR(%a6),%a0 # fetch instruction addr
addq.l &0x2,EXC_EXTWPTR(%a6) # incr instruction ptr
bsr.l _imem_read_word # fetch extword in d0
tst.l %d1 # did ifetch fail?
bne.l iea_iacc # yes
mov.l (%sp)+,%a0
btst &0x8,%d0
bne.w fcalc_mem_ind
mov.l %d0,L_SCR1(%a6) # hold opword
mov.l %d0,%d1
rol.w &0x4,%d1
andi.w &0xf,%d1 # extract index regno
# count on fetch_dreg() not to alter a0...
bsr.l fetch_dreg # fetch index
mov.l %d2,-(%sp) # save d2
mov.l L_SCR1(%a6),%d2 # fetch opword
btst &0xb,%d2 # is it word or long?
bne.b faii8_long
ext.l %d0 # sign extend word index
faii8_long:
mov.l %d2,%d1
rol.w &0x7,%d1
andi.l &0x3,%d1 # extract scale value
lsl.l %d1,%d0 # shift index by scale
extb.l %d2 # sign extend displacement
add.l %d2,%d0 # index + disp
add.l %d0,%a0 # An + (index + disp)
mov.l (%sp)+,%d2 # restore old d2
rts
###########################
# Absolute short: (XXX).W #
###########################
fabs_short:
mov.l EXC_EXTWPTR(%a6),%a0 # fetch instruction addr
addq.l &0x2,EXC_EXTWPTR(%a6) # incr instruction ptr
bsr.l _imem_read_word # fetch short address
tst.l %d1 # did ifetch fail?
bne.l iea_iacc # yes
mov.w %d0,%a0 # return <ea> in a0
rts
##########################
# Absolute long: (XXX).L #
##########################
fabs_long:
mov.l EXC_EXTWPTR(%a6),%a0 # fetch instruction addr
addq.l &0x4,EXC_EXTWPTR(%a6) # incr instruction ptr
bsr.l _imem_read_long # fetch long address
tst.l %d1 # did ifetch fail?
bne.l iea_iacc # yes
mov.l %d0,%a0 # return <ea> in a0
rts
#######################################################
# Program counter indirect w/ displacement: (d16, PC) #
#######################################################
fpc_ind:
mov.l EXC_EXTWPTR(%a6),%a0 # fetch instruction addr
addq.l &0x2,EXC_EXTWPTR(%a6) # incr instruction ptr
bsr.l _imem_read_word # fetch word displacement
tst.l %d1 # did ifetch fail?
bne.l iea_iacc # yes
mov.w %d0,%a0 # sign extend displacement
add.l EXC_EXTWPTR(%a6),%a0 # pc + d16
# _imem_read_word() increased the extwptr by 2. need to adjust here.
subq.l &0x2,%a0 # adjust <ea>
rts
##########################################################
# PC indirect w/ index(8-bit displacement): (d8, PC, An) #
# " " w/ " (base displacement): (bd, PC, An) #
# PC memory indirect postindexed: ([bd, PC], Xn, od) #
# PC memory indirect preindexed: ([bd, PC, Xn], od) #
##########################################################
fpc_ind_ext:
mov.l EXC_EXTWPTR(%a6),%a0 # fetch instruction addr
addq.l &0x2,EXC_EXTWPTR(%a6) # incr instruction ptr
bsr.l _imem_read_word # fetch ext word
tst.l %d1 # did ifetch fail?
bne.l iea_iacc # yes
mov.l EXC_EXTWPTR(%a6),%a0 # put base in a0
subq.l &0x2,%a0 # adjust base
btst &0x8,%d0 # is disp only 8 bits?
bne.w fcalc_mem_ind # calc memory indirect
mov.l %d0,L_SCR1(%a6) # store opword
mov.l %d0,%d1 # make extword copy
rol.w &0x4,%d1 # rotate reg num into place
andi.w &0xf,%d1 # extract register number
# count on fetch_dreg() not to alter a0...
bsr.l fetch_dreg # fetch index
mov.l %d2,-(%sp) # save d2
mov.l L_SCR1(%a6),%d2 # fetch opword
btst &0xb,%d2 # is index word or long?
bne.b fpii8_long # long
ext.l %d0 # sign extend word index
fpii8_long:
mov.l %d2,%d1
rol.w &0x7,%d1 # rotate scale value into place
andi.l &0x3,%d1 # extract scale value
lsl.l %d1,%d0 # shift index by scale
extb.l %d2 # sign extend displacement
add.l %d2,%d0 # disp + index
add.l %d0,%a0 # An + (index + disp)
mov.l (%sp)+,%d2 # restore temp register
rts
# d2 = index
# d3 = base
# d4 = od
# d5 = extword
fcalc_mem_ind:
btst &0x6,%d0 # is the index suppressed?
beq.b fcalc_index
movm.l &0x3c00,-(%sp) # save d2-d5
mov.l %d0,%d5 # put extword in d5
mov.l %a0,%d3 # put base in d3
clr.l %d2 # yes, so index = 0
bra.b fbase_supp_ck
# index:
fcalc_index:
mov.l %d0,L_SCR1(%a6) # save d0 (opword)
bfextu %d0{&16:&4},%d1 # fetch dreg index
bsr.l fetch_dreg
movm.l &0x3c00,-(%sp) # save d2-d5
mov.l %d0,%d2 # put index in d2
mov.l L_SCR1(%a6),%d5
mov.l %a0,%d3
btst &0xb,%d5 # is index word or long?
bne.b fno_ext
ext.l %d2
fno_ext:
bfextu %d5{&21:&2},%d0
lsl.l %d0,%d2
# base address (passed as parameter in d3):
# we clear the value here if it should actually be suppressed.
fbase_supp_ck:
btst &0x7,%d5 # is the bd suppressed?
beq.b fno_base_sup
clr.l %d3
# base displacement:
fno_base_sup:
bfextu %d5{&26:&2},%d0 # get bd size
# beq.l fmovm_error # if (size == 0) it's reserved
cmpi.b %d0,&0x2
blt.b fno_bd
beq.b fget_word_bd
mov.l EXC_EXTWPTR(%a6),%a0 # fetch instruction addr
addq.l &0x4,EXC_EXTWPTR(%a6) # incr instruction ptr
bsr.l _imem_read_long
tst.l %d1 # did ifetch fail?
bne.l fcea_iacc # yes
bra.b fchk_ind
fget_word_bd:
mov.l EXC_EXTWPTR(%a6),%a0 # fetch instruction addr
addq.l &0x2,EXC_EXTWPTR(%a6) # incr instruction ptr
bsr.l _imem_read_word
tst.l %d1 # did ifetch fail?
bne.l fcea_iacc # yes
ext.l %d0 # sign extend bd
fchk_ind:
add.l %d0,%d3 # base += bd
# outer displacement:
fno_bd:
bfextu %d5{&30:&2},%d0 # is od suppressed?
beq.w faii_bd
cmpi.b %d0,&0x2
blt.b fnull_od
beq.b fword_od
mov.l EXC_EXTWPTR(%a6),%a0 # fetch instruction addr
addq.l &0x4,EXC_EXTWPTR(%a6) # incr instruction ptr
bsr.l _imem_read_long
tst.l %d1 # did ifetch fail?
bne.l fcea_iacc # yes
bra.b fadd_them
fword_od:
mov.l EXC_EXTWPTR(%a6),%a0 # fetch instruction addr
addq.l &0x2,EXC_EXTWPTR(%a6) # incr instruction ptr
bsr.l _imem_read_word
tst.l %d1 # did ifetch fail?
bne.l fcea_iacc # yes
ext.l %d0 # sign extend od
bra.b fadd_them
fnull_od:
clr.l %d0
fadd_them:
mov.l %d0,%d4
btst &0x2,%d5 # pre or post indexing?
beq.b fpre_indexed
mov.l %d3,%a0
bsr.l _dmem_read_long
tst.l %d1 # did dfetch fail?
bne.w fcea_err # yes
add.l %d2,%d0 # <ea> += index
add.l %d4,%d0 # <ea> += od
bra.b fdone_ea
fpre_indexed:
add.l %d2,%d3 # preindexing
mov.l %d3,%a0
bsr.l _dmem_read_long
tst.l %d1 # did dfetch fail?
bne.w fcea_err # yes
add.l %d4,%d0 # ea += od
bra.b fdone_ea
faii_bd:
add.l %d2,%d3 # ea = (base + bd) + index
mov.l %d3,%d0
fdone_ea:
mov.l %d0,%a0
movm.l (%sp)+,&0x003c # restore d2-d5
rts
#########################################################
fcea_err:
mov.l %d3,%a0
movm.l (%sp)+,&0x003c # restore d2-d5
mov.w &0x0101,%d0
bra.l iea_dacc
fcea_iacc:
movm.l (%sp)+,&0x003c # restore d2-d5
bra.l iea_iacc
fmovm_out_err:
bsr.l restore
mov.w &0x00e1,%d0
bra.b fmovm_err
fmovm_in_err:
bsr.l restore
mov.w &0x0161,%d0
fmovm_err:
mov.l L_SCR1(%a6),%a0
bra.l iea_dacc
#########################################################################
# XDEF **************************************************************** #
# fmovm_ctrl(): emulate fmovm.l of control registers instr #
# #
# XREF **************************************************************** #
# _imem_read_long() - read longword from memory #
# iea_iacc() - _imem_read_long() failed; error recovery #
# #
# INPUT *************************************************************** #
# None #
# #
# OUTPUT ************************************************************** #
# If _imem_read_long() doesn't fail: #
# USER_FPCR(a6) = new FPCR value #
# USER_FPSR(a6) = new FPSR value #
# USER_FPIAR(a6) = new FPIAR value #
# #
# ALGORITHM *********************************************************** #
# Decode the instruction type by looking at the extension word #
# in order to see how many control registers to fetch from memory. #
# Fetch them using _imem_read_long(). If this fetch fails, exit through #
# the special access error exit handler iea_iacc(). #
# #
# Instruction word decoding: #
# #
# fmovem.l #<data>, {FPIAR&|FPCR&|FPSR} #
# #
# WORD1 WORD2 #
# 1111 0010 00 111100 100$ $$00 0000 0000 #
# #
# $$$ (100): FPCR #
# (010): FPSR #
# (001): FPIAR #
# (000): FPIAR #
# #
#########################################################################
global fmovm_ctrl
fmovm_ctrl:
mov.b EXC_EXTWORD(%a6),%d0 # fetch reg select bits
cmpi.b %d0,&0x9c # fpcr & fpsr & fpiar ?
beq.w fctrl_in_7 # yes
cmpi.b %d0,&0x98 # fpcr & fpsr ?
beq.w fctrl_in_6 # yes
cmpi.b %d0,&0x94 # fpcr & fpiar ?
beq.b fctrl_in_5 # yes
# fmovem.l #<data>, fpsr/fpiar
fctrl_in_3:
mov.l EXC_EXTWPTR(%a6),%a0 # fetch instruction addr
addq.l &0x4,EXC_EXTWPTR(%a6) # incr instruction ptr
bsr.l _imem_read_long # fetch FPSR from mem
tst.l %d1 # did ifetch fail?
bne.l iea_iacc # yes
mov.l %d0,USER_FPSR(%a6) # store new FPSR to stack
mov.l EXC_EXTWPTR(%a6),%a0 # fetch instruction addr
addq.l &0x4,EXC_EXTWPTR(%a6) # incr instruction ptr
bsr.l _imem_read_long # fetch FPIAR from mem
tst.l %d1 # did ifetch fail?
bne.l iea_iacc # yes
mov.l %d0,USER_FPIAR(%a6) # store new FPIAR to stack
rts
# fmovem.l #<data>, fpcr/fpiar
fctrl_in_5:
mov.l EXC_EXTWPTR(%a6),%a0 # fetch instruction addr
addq.l &0x4,EXC_EXTWPTR(%a6) # incr instruction ptr
bsr.l _imem_read_long # fetch FPCR from mem
tst.l %d1 # did ifetch fail?
bne.l iea_iacc # yes
mov.l %d0,USER_FPCR(%a6) # store new FPCR to stack
mov.l EXC_EXTWPTR(%a6),%a0 # fetch instruction addr
addq.l &0x4,EXC_EXTWPTR(%a6) # incr instruction ptr
bsr.l _imem_read_long # fetch FPIAR from mem
tst.l %d1 # did ifetch fail?
bne.l iea_iacc # yes
mov.l %d0,USER_FPIAR(%a6) # store new FPIAR to stack
rts
# fmovem.l #<data>, fpcr/fpsr
fctrl_in_6:
mov.l EXC_EXTWPTR(%a6),%a0 # fetch instruction addr
addq.l &0x4,EXC_EXTWPTR(%a6) # incr instruction ptr
bsr.l _imem_read_long # fetch FPCR from mem
tst.l %d1 # did ifetch fail?
bne.l iea_iacc # yes
mov.l %d0,USER_FPCR(%a6) # store new FPCR to mem
mov.l EXC_EXTWPTR(%a6),%a0 # fetch instruction addr
addq.l &0x4,EXC_EXTWPTR(%a6) # incr instruction ptr
bsr.l _imem_read_long # fetch FPSR from mem
tst.l %d1 # did ifetch fail?
bne.l iea_iacc # yes
mov.l %d0,USER_FPSR(%a6) # store new FPSR to mem
rts
# fmovem.l #<data>, fpcr/fpsr/fpiar
fctrl_in_7:
mov.l EXC_EXTWPTR(%a6),%a0 # fetch instruction addr
addq.l &0x4,EXC_EXTWPTR(%a6) # incr instruction ptr
bsr.l _imem_read_long # fetch FPCR from mem
tst.l %d1 # did ifetch fail?
bne.l iea_iacc # yes
mov.l %d0,USER_FPCR(%a6) # store new FPCR to mem
mov.l EXC_EXTWPTR(%a6),%a0 # fetch instruction addr
addq.l &0x4,EXC_EXTWPTR(%a6) # incr instruction ptr
bsr.l _imem_read_long # fetch FPSR from mem
tst.l %d1 # did ifetch fail?
bne.l iea_iacc # yes
mov.l %d0,USER_FPSR(%a6) # store new FPSR to mem
mov.l EXC_EXTWPTR(%a6),%a0 # fetch instruction addr
addq.l &0x4,EXC_EXTWPTR(%a6) # incr instruction ptr
bsr.l _imem_read_long # fetch FPIAR from mem
tst.l %d1 # did ifetch fail?
bne.l iea_iacc # yes
mov.l %d0,USER_FPIAR(%a6) # store new FPIAR to mem
rts
##########################################################################
#########################################################################
# XDEF **************************************************************** #
# addsub_scaler2(): scale inputs to fadd/fsub such that no #
# OVFL/UNFL exceptions will result #
# #
# XREF **************************************************************** #
# norm() - normalize mantissa after adjusting exponent #
# #
# INPUT *************************************************************** #
# FP_SRC(a6) = fp op1(src) #
# FP_DST(a6) = fp op2(dst) #
# #
# OUTPUT ************************************************************** #
# FP_SRC(a6) = fp op1 scaled(src) #
# FP_DST(a6) = fp op2 scaled(dst) #
# d0 = scale amount #
# #
# ALGORITHM *********************************************************** #
# If the DST exponent is > the SRC exponent, set the DST exponent #
# equal to 0x3fff and scale the SRC exponent by the value that the #
# DST exponent was scaled by. If the SRC exponent is greater or equal, #
# do the opposite. Return this scale factor in d0. #
# If the two exponents differ by > the number of mantissa bits #
# plus two, then set the smallest exponent to a very small value as a #
# quick shortcut. #
# #
#########################################################################
global addsub_scaler2
addsub_scaler2:
mov.l SRC_HI(%a0),FP_SCR0_HI(%a6)
mov.l DST_HI(%a1),FP_SCR1_HI(%a6)
mov.l SRC_LO(%a0),FP_SCR0_LO(%a6)
mov.l DST_LO(%a1),FP_SCR1_LO(%a6)
mov.w SRC_EX(%a0),%d0
mov.w DST_EX(%a1),%d1
mov.w %d0,FP_SCR0_EX(%a6)
mov.w %d1,FP_SCR1_EX(%a6)
andi.w &0x7fff,%d0
andi.w &0x7fff,%d1
mov.w %d0,L_SCR1(%a6) # store src exponent
mov.w %d1,2+L_SCR1(%a6) # store dst exponent
cmp.w %d0, %d1 # is src exp >= dst exp?
bge.l src_exp_ge2
# dst exp is > src exp; scale dst to exp = 0x3fff
dst_exp_gt2:
bsr.l scale_to_zero_dst
mov.l %d0,-(%sp) # save scale factor
cmpi.b STAG(%a6),&DENORM # is dst denormalized?
bne.b cmpexp12
lea FP_SCR0(%a6),%a0
bsr.l norm # normalize the denorm; result is new exp
neg.w %d0 # new exp = -(shft val)
mov.w %d0,L_SCR1(%a6) # inset new exp
cmpexp12:
mov.w 2+L_SCR1(%a6),%d0
subi.w &mantissalen+2,%d0 # subtract mantissalen+2 from larger exp
cmp.w %d0,L_SCR1(%a6) # is difference >= len(mantissa)+2?
bge.b quick_scale12
mov.w L_SCR1(%a6),%d0
add.w 0x2(%sp),%d0 # scale src exponent by scale factor
mov.w FP_SCR0_EX(%a6),%d1
and.w &0x8000,%d1
or.w %d1,%d0 # concat {sgn,new exp}
mov.w %d0,FP_SCR0_EX(%a6) # insert new dst exponent
mov.l (%sp)+,%d0 # return SCALE factor
rts
quick_scale12:
andi.w &0x8000,FP_SCR0_EX(%a6) # zero src exponent
bset &0x0,1+FP_SCR0_EX(%a6) # set exp = 1
mov.l (%sp)+,%d0 # return SCALE factor
rts
# src exp is >= dst exp; scale src to exp = 0x3fff
src_exp_ge2:
bsr.l scale_to_zero_src
mov.l %d0,-(%sp) # save scale factor
cmpi.b DTAG(%a6),&DENORM # is dst denormalized?
bne.b cmpexp22
lea FP_SCR1(%a6),%a0
bsr.l norm # normalize the denorm; result is new exp
neg.w %d0 # new exp = -(shft val)
mov.w %d0,2+L_SCR1(%a6) # inset new exp
cmpexp22:
mov.w L_SCR1(%a6),%d0
subi.w &mantissalen+2,%d0 # subtract mantissalen+2 from larger exp
cmp.w %d0,2+L_SCR1(%a6) # is difference >= len(mantissa)+2?
bge.b quick_scale22
mov.w 2+L_SCR1(%a6),%d0
add.w 0x2(%sp),%d0 # scale dst exponent by scale factor
mov.w FP_SCR1_EX(%a6),%d1
andi.w &0x8000,%d1
or.w %d1,%d0 # concat {sgn,new exp}
mov.w %d0,FP_SCR1_EX(%a6) # insert new dst exponent
mov.l (%sp)+,%d0 # return SCALE factor
rts
quick_scale22:
andi.w &0x8000,FP_SCR1_EX(%a6) # zero dst exponent
bset &0x0,1+FP_SCR1_EX(%a6) # set exp = 1
mov.l (%sp)+,%d0 # return SCALE factor
rts
##########################################################################
#########################################################################
# XDEF **************************************************************** #
# scale_to_zero_src(): scale the exponent of extended precision #
# value at FP_SCR0(a6). #
# #
# XREF **************************************************************** #
# norm() - normalize the mantissa if the operand was a DENORM #
# #
# INPUT *************************************************************** #
# FP_SCR0(a6) = extended precision operand to be scaled #
# #
# OUTPUT ************************************************************** #
# FP_SCR0(a6) = scaled extended precision operand #
# d0 = scale value #
# #
# ALGORITHM *********************************************************** #
# Set the exponent of the input operand to 0x3fff. Save the value #
# of the difference between the original and new exponent. Then, #
# normalize the operand if it was a DENORM. Add this normalization #
# value to the previous value. Return the result. #
# #
#########################################################################
global scale_to_zero_src
scale_to_zero_src:
mov.w FP_SCR0_EX(%a6),%d1 # extract operand's {sgn,exp}
mov.w %d1,%d0 # make a copy
andi.l &0x7fff,%d1 # extract operand's exponent
andi.w &0x8000,%d0 # extract operand's sgn
or.w &0x3fff,%d0 # insert new operand's exponent(=0)
mov.w %d0,FP_SCR0_EX(%a6) # insert biased exponent
cmpi.b STAG(%a6),&DENORM # is operand normalized?
beq.b stzs_denorm # normalize the DENORM
stzs_norm:
mov.l &0x3fff,%d0
sub.l %d1,%d0 # scale = BIAS + (-exp)
rts
stzs_denorm:
lea FP_SCR0(%a6),%a0 # pass ptr to src op
bsr.l norm # normalize denorm
neg.l %d0 # new exponent = -(shft val)
mov.l %d0,%d1 # prepare for op_norm call
bra.b stzs_norm # finish scaling
###
#########################################################################
# XDEF **************************************************************** #
# scale_sqrt(): scale the input operand exponent so a subsequent #
# fsqrt operation won't take an exception. #
# #
# XREF **************************************************************** #
# norm() - normalize the mantissa if the operand was a DENORM #
# #
# INPUT *************************************************************** #
# FP_SCR0(a6) = extended precision operand to be scaled #
# #
# OUTPUT ************************************************************** #
# FP_SCR0(a6) = scaled extended precision operand #
# d0 = scale value #
# #
# ALGORITHM *********************************************************** #
# If the input operand is a DENORM, normalize it. #
# If the exponent of the input operand is even, set the exponent #
# to 0x3ffe and return a scale factor of "(exp-0x3ffe)/2". If the #
# exponent of the input operand is off, set the exponent to ox3fff and #
# return a scale factor of "(exp-0x3fff)/2". #
# #
#########################################################################
global scale_sqrt
scale_sqrt:
cmpi.b STAG(%a6),&DENORM # is operand normalized?
beq.b ss_denorm # normalize the DENORM
mov.w FP_SCR0_EX(%a6),%d1 # extract operand's {sgn,exp}
andi.l &0x7fff,%d1 # extract operand's exponent
andi.w &0x8000,FP_SCR0_EX(%a6) # extract operand's sgn
btst &0x0,%d1 # is exp even or odd?
beq.b ss_norm_even
ori.w &0x3fff,FP_SCR0_EX(%a6) # insert new operand's exponent(=0)
mov.l &0x3fff,%d0
sub.l %d1,%d0 # scale = BIAS + (-exp)
asr.l &0x1,%d0 # divide scale factor by 2
rts
ss_norm_even:
ori.w &0x3ffe,FP_SCR0_EX(%a6) # insert new operand's exponent(=0)
mov.l &0x3ffe,%d0
sub.l %d1,%d0 # scale = BIAS + (-exp)
asr.l &0x1,%d0 # divide scale factor by 2
rts
ss_denorm:
lea FP_SCR0(%a6),%a0 # pass ptr to src op
bsr.l norm # normalize denorm
btst &0x0,%d0 # is exp even or odd?
beq.b ss_denorm_even
ori.w &0x3fff,FP_SCR0_EX(%a6) # insert new operand's exponent(=0)
add.l &0x3fff,%d0
asr.l &0x1,%d0 # divide scale factor by 2
rts
ss_denorm_even:
ori.w &0x3ffe,FP_SCR0_EX(%a6) # insert new operand's exponent(=0)
add.l &0x3ffe,%d0
asr.l &0x1,%d0 # divide scale factor by 2
rts
###
#########################################################################
# XDEF **************************************************************** #
# scale_to_zero_dst(): scale the exponent of extended precision #
# value at FP_SCR1(a6). #
# #
# XREF **************************************************************** #
# norm() - normalize the mantissa if the operand was a DENORM #
# #
# INPUT *************************************************************** #
# FP_SCR1(a6) = extended precision operand to be scaled #
# #
# OUTPUT ************************************************************** #
# FP_SCR1(a6) = scaled extended precision operand #
# d0 = scale value #
# #
# ALGORITHM *********************************************************** #
# Set the exponent of the input operand to 0x3fff. Save the value #
# of the difference between the original and new exponent. Then, #
# normalize the operand if it was a DENORM. Add this normalization #
# value to the previous value. Return the result. #
# #
#########################################################################
global scale_to_zero_dst
scale_to_zero_dst:
mov.w FP_SCR1_EX(%a6),%d1 # extract operand's {sgn,exp}
mov.w %d1,%d0 # make a copy
andi.l &0x7fff,%d1 # extract operand's exponent
andi.w &0x8000,%d0 # extract operand's sgn
or.w &0x3fff,%d0 # insert new operand's exponent(=0)
mov.w %d0,FP_SCR1_EX(%a6) # insert biased exponent
cmpi.b DTAG(%a6),&DENORM # is operand normalized?
beq.b stzd_denorm # normalize the DENORM
stzd_norm:
mov.l &0x3fff,%d0
sub.l %d1,%d0 # scale = BIAS + (-exp)
rts
stzd_denorm:
lea FP_SCR1(%a6),%a0 # pass ptr to dst op
bsr.l norm # normalize denorm
neg.l %d0 # new exponent = -(shft val)
mov.l %d0,%d1 # prepare for op_norm call
bra.b stzd_norm # finish scaling
##########################################################################
#########################################################################
# XDEF **************************************************************** #
# res_qnan(): return default result w/ QNAN operand for dyadic #
# res_snan(): return default result w/ SNAN operand for dyadic #
# res_qnan_1op(): return dflt result w/ QNAN operand for monadic #
# res_snan_1op(): return dflt result w/ SNAN operand for monadic #
# #
# XREF **************************************************************** #
# None #
# #
# INPUT *************************************************************** #
# FP_SRC(a6) = pointer to extended precision src operand #
# FP_DST(a6) = pointer to extended precision dst operand #
# #
# OUTPUT ************************************************************** #
# fp0 = default result #
# #
# ALGORITHM *********************************************************** #
# If either operand (but not both operands) of an operation is a #
# nonsignalling NAN, then that NAN is returned as the result. If both #
# operands are nonsignalling NANs, then the destination operand #
# nonsignalling NAN is returned as the result. #
# If either operand to an operation is a signalling NAN (SNAN), #
# then, the SNAN bit is set in the FPSR EXC byte. If the SNAN trap #
# enable bit is set in the FPCR, then the trap is taken and the #
# destination is not modified. If the SNAN trap enable bit is not set, #
# then the SNAN is converted to a nonsignalling NAN (by setting the #
# SNAN bit in the operand to one), and the operation continues as #
# described in the preceding paragraph, for nonsignalling NANs. #
# Make sure the appropriate FPSR bits are set before exiting. #
# #
#########################################################################
global res_qnan
global res_snan
res_qnan:
res_snan:
cmp.b DTAG(%a6), &SNAN # is the dst an SNAN?
beq.b dst_snan2
cmp.b DTAG(%a6), &QNAN # is the dst a QNAN?
beq.b dst_qnan2
src_nan:
cmp.b STAG(%a6), &QNAN
beq.b src_qnan2
global res_snan_1op
res_snan_1op:
src_snan2:
bset &0x6, FP_SRC_HI(%a6) # set SNAN bit
or.l &nan_mask+aiop_mask+snan_mask, USER_FPSR(%a6)
lea FP_SRC(%a6), %a0
bra.b nan_comp
global res_qnan_1op
res_qnan_1op:
src_qnan2:
or.l &nan_mask, USER_FPSR(%a6)
lea FP_SRC(%a6), %a0
bra.b nan_comp
dst_snan2:
or.l &nan_mask+aiop_mask+snan_mask, USER_FPSR(%a6)
bset &0x6, FP_DST_HI(%a6) # set SNAN bit
lea FP_DST(%a6), %a0
bra.b nan_comp
dst_qnan2:
lea FP_DST(%a6), %a0
cmp.b STAG(%a6), &SNAN
bne nan_done
or.l &aiop_mask+snan_mask, USER_FPSR(%a6)
nan_done:
or.l &nan_mask, USER_FPSR(%a6)
nan_comp:
btst &0x7, FTEMP_EX(%a0) # is NAN neg?
beq.b nan_not_neg
or.l &neg_mask, USER_FPSR(%a6)
nan_not_neg:
fmovm.x (%a0), &0x80
rts
#########################################################################
# XDEF **************************************************************** #
# res_operr(): return default result during operand error #
# #
# XREF **************************************************************** #
# None #
# #
# INPUT *************************************************************** #
# None #
# #
# OUTPUT ************************************************************** #
# fp0 = default operand error result #
# #
# ALGORITHM *********************************************************** #
# An nonsignalling NAN is returned as the default result when #
# an operand error occurs for the following cases: #
# #
# Multiply: (Infinity x Zero) #
# Divide : (Zero / Zero) || (Infinity / Infinity) #
# #
#########################################################################
global res_operr
res_operr:
or.l &nan_mask+operr_mask+aiop_mask, USER_FPSR(%a6)
fmovm.x nan_return(%pc), &0x80
rts
nan_return:
long 0x7fff0000, 0xffffffff, 0xffffffff
#########################################################################
# XDEF **************************************************************** #
# _denorm(): denormalize an intermediate result #
# #
# XREF **************************************************************** #
# None #
# #
# INPUT *************************************************************** #
# a0 = points to the operand to be denormalized #
# (in the internal extended format) #
# #
# d0 = rounding precision #
# #
# OUTPUT ************************************************************** #
# a0 = pointer to the denormalized result #
# (in the internal extended format) #
# #
# d0 = guard,round,sticky #
# #
# ALGORITHM *********************************************************** #
# According to the exponent underflow threshold for the given #
# precision, shift the mantissa bits to the right in order raise the #
# exponent of the operand to the threshold value. While shifting the #
# mantissa bits right, maintain the value of the guard, round, and #
# sticky bits. #
# other notes: #
# (1) _denorm() is called by the underflow routines #
# (2) _denorm() does NOT affect the status register #
# #
#########################################################################
#
# table of exponent threshold values for each precision
#
tbl_thresh:
short 0x0
short sgl_thresh
short dbl_thresh
global _denorm
_denorm:
#
# Load the exponent threshold for the precision selected and check
# to see if (threshold - exponent) is > 65 in which case we can
# simply calculate the sticky bit and zero the mantissa. otherwise
# we have to call the denormalization routine.
#
lsr.b &0x2, %d0 # shift prec to lo bits
mov.w (tbl_thresh.b,%pc,%d0.w*2), %d1 # load prec threshold
mov.w %d1, %d0 # copy d1 into d0
sub.w FTEMP_EX(%a0), %d0 # diff = threshold - exp
cmpi.w %d0, &66 # is diff > 65? (mant + g,r bits)
bpl.b denorm_set_stky # yes; just calc sticky
clr.l %d0 # clear g,r,s
btst &inex2_bit, FPSR_EXCEPT(%a6) # yes; was INEX2 set?
beq.b denorm_call # no; don't change anything
bset &29, %d0 # yes; set sticky bit
denorm_call:
bsr.l dnrm_lp # denormalize the number
rts
#
# all bit would have been shifted off during the denorm so simply
# calculate if the sticky should be set and clear the entire mantissa.
#
denorm_set_stky:
mov.l &0x20000000, %d0 # set sticky bit in return value
mov.w %d1, FTEMP_EX(%a0) # load exp with threshold
clr.l FTEMP_HI(%a0) # set d1 = 0 (ms mantissa)
clr.l FTEMP_LO(%a0) # set d2 = 0 (ms mantissa)
rts
# #
# dnrm_lp(): normalize exponent/mantissa to specified threshold #
# #
# INPUT: #
# %a0 : points to the operand to be denormalized #
# %d0{31:29} : initial guard,round,sticky #
# %d1{15:0} : denormalization threshold #
# OUTPUT: #
# %a0 : points to the denormalized operand #
# %d0{31:29} : final guard,round,sticky #
# #
# *** Local Equates *** #
set GRS, L_SCR2 # g,r,s temp storage
set FTEMP_LO2, L_SCR1 # FTEMP_LO copy
global dnrm_lp
dnrm_lp:
#
# make a copy of FTEMP_LO and place the g,r,s bits directly after it
# in memory so as to make the bitfield extraction for denormalization easier.
#
mov.l FTEMP_LO(%a0), FTEMP_LO2(%a6) # make FTEMP_LO copy
mov.l %d0, GRS(%a6) # place g,r,s after it
#
# check to see how much less than the underflow threshold the operand
# exponent is.
#
mov.l %d1, %d0 # copy the denorm threshold
sub.w FTEMP_EX(%a0), %d1 # d1 = threshold - uns exponent
ble.b dnrm_no_lp # d1 <= 0
cmpi.w %d1, &0x20 # is ( 0 <= d1 < 32) ?
blt.b case_1 # yes
cmpi.w %d1, &0x40 # is (32 <= d1 < 64) ?
blt.b case_2 # yes
bra.w case_3 # (d1 >= 64)
#
# No normalization necessary
#
dnrm_no_lp:
mov.l GRS(%a6), %d0 # restore original g,r,s
rts
#
# case (0<d1<32)
#
# %d0 = denorm threshold
# %d1 = "n" = amt to shift
#
# ---------------------------------------------------------
# | FTEMP_HI | FTEMP_LO |grs000.........000|
# ---------------------------------------------------------
# <-(32 - n)-><-(n)-><-(32 - n)-><-(n)-><-(32 - n)-><-(n)->
# \ \ \ \
# \ \ \ \
# \ \ \ \
# \ \ \ \
# \ \ \ \
# \ \ \ \
# \ \ \ \
# \ \ \ \
# <-(n)-><-(32 - n)-><------(32)-------><------(32)------->
# ---------------------------------------------------------
# |0.....0| NEW_HI | NEW_FTEMP_LO |grs |
# ---------------------------------------------------------
#
case_1:
mov.l %d2, -(%sp) # create temp storage
mov.w %d0, FTEMP_EX(%a0) # exponent = denorm threshold
mov.l &32, %d0
sub.w %d1, %d0 # %d0 = 32 - %d1
cmpi.w %d1, &29 # is shft amt >= 29
blt.b case1_extract # no; no fix needed
mov.b GRS(%a6), %d2
or.b %d2, 3+FTEMP_LO2(%a6)
case1_extract:
bfextu FTEMP_HI(%a0){&0:%d0}, %d2 # %d2 = new FTEMP_HI
bfextu FTEMP_HI(%a0){%d0:&32}, %d1 # %d1 = new FTEMP_LO
bfextu FTEMP_LO2(%a6){%d0:&32}, %d0 # %d0 = new G,R,S
mov.l %d2, FTEMP_HI(%a0) # store new FTEMP_HI
mov.l %d1, FTEMP_LO(%a0) # store new FTEMP_LO
bftst %d0{&2:&30} # were bits shifted off?
beq.b case1_sticky_clear # no; go finish
bset &rnd_stky_bit, %d0 # yes; set sticky bit
case1_sticky_clear:
and.l &0xe0000000, %d0 # clear all but G,R,S
mov.l (%sp)+, %d2 # restore temp register
rts
#
# case (32<=d1<64)
#
# %d0 = denorm threshold
# %d1 = "n" = amt to shift
#
# ---------------------------------------------------------
# | FTEMP_HI | FTEMP_LO |grs000.........000|
# ---------------------------------------------------------
# <-(32 - n)-><-(n)-><-(32 - n)-><-(n)-><-(32 - n)-><-(n)->
# \ \ \
# \ \ \
# \ \ -------------------
# \ -------------------- \
# ------------------- \ \
# \ \ \
# \ \ \
# \ \ \
# <-------(32)------><-(n)-><-(32 - n)-><------(32)------->
# ---------------------------------------------------------
# |0...............0|0....0| NEW_LO |grs |
# ---------------------------------------------------------
#
case_2:
mov.l %d2, -(%sp) # create temp storage
mov.w %d0, FTEMP_EX(%a0) # exponent = denorm threshold
subi.w &0x20, %d1 # %d1 now between 0 and 32
mov.l &0x20, %d0
sub.w %d1, %d0 # %d0 = 32 - %d1
# subtle step here; or in the g,r,s at the bottom of FTEMP_LO to minimize
# the number of bits to check for the sticky detect.
# it only plays a role in shift amounts of 61-63.
mov.b GRS(%a6), %d2
or.b %d2, 3+FTEMP_LO2(%a6)
bfextu FTEMP_HI(%a0){&0:%d0}, %d2 # %d2 = new FTEMP_LO
bfextu FTEMP_HI(%a0){%d0:&32}, %d1 # %d1 = new G,R,S
bftst %d1{&2:&30} # were any bits shifted off?
bne.b case2_set_sticky # yes; set sticky bit
bftst FTEMP_LO2(%a6){%d0:&31} # were any bits shifted off?
bne.b case2_set_sticky # yes; set sticky bit
mov.l %d1, %d0 # move new G,R,S to %d0
bra.b case2_end
case2_set_sticky:
mov.l %d1, %d0 # move new G,R,S to %d0
bset &rnd_stky_bit, %d0 # set sticky bit
case2_end:
clr.l FTEMP_HI(%a0) # store FTEMP_HI = 0
mov.l %d2, FTEMP_LO(%a0) # store FTEMP_LO
and.l &0xe0000000, %d0 # clear all but G,R,S
mov.l (%sp)+,%d2 # restore temp register
rts
#
# case (d1>=64)
#
# %d0 = denorm threshold
# %d1 = amt to shift
#
case_3:
mov.w %d0, FTEMP_EX(%a0) # insert denorm threshold
cmpi.w %d1, &65 # is shift amt > 65?
blt.b case3_64 # no; it's == 64
beq.b case3_65 # no; it's == 65
#
# case (d1>65)
#
# Shift value is > 65 and out of range. All bits are shifted off.
# Return a zero mantissa with the sticky bit set
#
clr.l FTEMP_HI(%a0) # clear hi(mantissa)
clr.l FTEMP_LO(%a0) # clear lo(mantissa)
mov.l &0x20000000, %d0 # set sticky bit
rts
#
# case (d1 == 64)
#
# ---------------------------------------------------------
# | FTEMP_HI | FTEMP_LO |grs000.........000|
# ---------------------------------------------------------
# <-------(32)------>
# \ \
# \ \
# \ \
# \ ------------------------------
# ------------------------------- \
# \ \
# \ \
# \ \
# <-------(32)------>
# ---------------------------------------------------------
# |0...............0|0................0|grs |
# ---------------------------------------------------------
#
case3_64:
mov.l FTEMP_HI(%a0), %d0 # fetch hi(mantissa)
mov.l %d0, %d1 # make a copy
and.l &0xc0000000, %d0 # extract G,R
and.l &0x3fffffff, %d1 # extract other bits
bra.b case3_complete
#
# case (d1 == 65)
#
# ---------------------------------------------------------
# | FTEMP_HI | FTEMP_LO |grs000.........000|
# ---------------------------------------------------------
# <-------(32)------>
# \ \
# \ \
# \ \
# \ ------------------------------
# -------------------------------- \
# \ \
# \ \
# \ \
# <-------(31)----->
# ---------------------------------------------------------
# |0...............0|0................0|0rs |
# ---------------------------------------------------------
#
case3_65:
mov.l FTEMP_HI(%a0), %d0 # fetch hi(mantissa)
and.l &0x80000000, %d0 # extract R bit
lsr.l &0x1, %d0 # shift high bit into R bit
and.l &0x7fffffff, %d1 # extract other bits
case3_complete:
# last operation done was an "and" of the bits shifted off so the condition
# codes are already set so branch accordingly.
bne.b case3_set_sticky # yes; go set new sticky
tst.l FTEMP_LO(%a0) # were any bits shifted off?
bne.b case3_set_sticky # yes; go set new sticky
tst.b GRS(%a6) # were any bits shifted off?
bne.b case3_set_sticky # yes; go set new sticky
#
# no bits were shifted off so don't set the sticky bit.
# the guard and
# the entire mantissa is zero.
#
clr.l FTEMP_HI(%a0) # clear hi(mantissa)
clr.l FTEMP_LO(%a0) # clear lo(mantissa)
rts
#
# some bits were shifted off so set the sticky bit.
# the entire mantissa is zero.
#
case3_set_sticky:
bset &rnd_stky_bit,%d0 # set new sticky bit
clr.l FTEMP_HI(%a0) # clear hi(mantissa)
clr.l FTEMP_LO(%a0) # clear lo(mantissa)
rts
#########################################################################
# XDEF **************************************************************** #
# _round(): round result according to precision/mode #
# #
# XREF **************************************************************** #
# None #
# #
# INPUT *************************************************************** #
# a0 = ptr to input operand in internal extended format #
# d1(hi) = contains rounding precision: #
# ext = $0000xxxx #
# sgl = $0004xxxx #
# dbl = $0008xxxx #
# d1(lo) = contains rounding mode: #
# RN = $xxxx0000 #
# RZ = $xxxx0001 #
# RM = $xxxx0002 #
# RP = $xxxx0003 #
# d0{31:29} = contains the g,r,s bits (extended) #
# #
# OUTPUT ************************************************************** #
# a0 = pointer to rounded result #
# #
# ALGORITHM *********************************************************** #
# On return the value pointed to by a0 is correctly rounded, #
# a0 is preserved and the g-r-s bits in d0 are cleared. #
# The result is not typed - the tag field is invalid. The #
# result is still in the internal extended format. #
# #
# The INEX bit of USER_FPSR will be set if the rounded result was #
# inexact (i.e. if any of the g-r-s bits were set). #
# #
#########################################################################
global _round
_round:
#
# ext_grs() looks at the rounding precision and sets the appropriate
# G,R,S bits.
# If (G,R,S == 0) then result is exact and round is done, else set
# the inex flag in status reg and continue.
#
bsr.l ext_grs # extract G,R,S
tst.l %d0 # are G,R,S zero?
beq.w truncate # yes; round is complete
or.w &inx2a_mask, 2+USER_FPSR(%a6) # set inex2/ainex
#
# Use rounding mode as an index into a jump table for these modes.
# All of the following assumes grs != 0.
#
mov.w (tbl_mode.b,%pc,%d1.w*2), %a1 # load jump offset
jmp (tbl_mode.b,%pc,%a1) # jmp to rnd mode handler
tbl_mode:
short rnd_near - tbl_mode
short truncate - tbl_mode # RZ always truncates
short rnd_mnus - tbl_mode
short rnd_plus - tbl_mode
#################################################################
# ROUND PLUS INFINITY #
# #
# If sign of fp number = 0 (positive), then add 1 to l. #
#################################################################
rnd_plus:
tst.b FTEMP_SGN(%a0) # check for sign
bmi.w truncate # if positive then truncate
mov.l &0xffffffff, %d0 # force g,r,s to be all f's
swap %d1 # set up d1 for round prec.
cmpi.b %d1, &s_mode # is prec = sgl?
beq.w add_sgl # yes
bgt.w add_dbl # no; it's dbl
bra.w add_ext # no; it's ext
#################################################################
# ROUND MINUS INFINITY #
# #
# If sign of fp number = 1 (negative), then add 1 to l. #
#################################################################
rnd_mnus:
tst.b FTEMP_SGN(%a0) # check for sign
bpl.w truncate # if negative then truncate
mov.l &0xffffffff, %d0 # force g,r,s to be all f's
swap %d1 # set up d1 for round prec.
cmpi.b %d1, &s_mode # is prec = sgl?
beq.w add_sgl # yes
bgt.w add_dbl # no; it's dbl
bra.w add_ext # no; it's ext
#################################################################
# ROUND NEAREST #
# #
# If (g=1), then add 1 to l and if (r=s=0), then clear l #
# Note that this will round to even in case of a tie. #
#################################################################
rnd_near:
asl.l &0x1, %d0 # shift g-bit to c-bit
bcc.w truncate # if (g=1) then
swap %d1 # set up d1 for round prec.
cmpi.b %d1, &s_mode # is prec = sgl?
beq.w add_sgl # yes
bgt.w add_dbl # no; it's dbl
bra.w add_ext # no; it's ext
# *** LOCAL EQUATES ***
set ad_1_sgl, 0x00000100 # constant to add 1 to l-bit in sgl prec
set ad_1_dbl, 0x00000800 # constant to add 1 to l-bit in dbl prec
#########################
# ADD SINGLE #
#########################
add_sgl:
add.l &ad_1_sgl, FTEMP_HI(%a0)
bcc.b scc_clr # no mantissa overflow
roxr.w FTEMP_HI(%a0) # shift v-bit back in
roxr.w FTEMP_HI+2(%a0) # shift v-bit back in
add.w &0x1, FTEMP_EX(%a0) # and incr exponent
scc_clr:
tst.l %d0 # test for rs = 0
bne.b sgl_done
and.w &0xfe00, FTEMP_HI+2(%a0) # clear the l-bit
sgl_done:
and.l &0xffffff00, FTEMP_HI(%a0) # truncate bits beyond sgl limit
clr.l FTEMP_LO(%a0) # clear d2
rts
#########################
# ADD EXTENDED #
#########################
add_ext:
addq.l &1,FTEMP_LO(%a0) # add 1 to l-bit
bcc.b xcc_clr # test for carry out
addq.l &1,FTEMP_HI(%a0) # propagate carry
bcc.b xcc_clr
roxr.w FTEMP_HI(%a0) # mant is 0 so restore v-bit
roxr.w FTEMP_HI+2(%a0) # mant is 0 so restore v-bit
roxr.w FTEMP_LO(%a0)
roxr.w FTEMP_LO+2(%a0)
add.w &0x1,FTEMP_EX(%a0) # and inc exp
xcc_clr:
tst.l %d0 # test rs = 0
bne.b add_ext_done
and.b &0xfe,FTEMP_LO+3(%a0) # clear the l bit
add_ext_done:
rts
#########################
# ADD DOUBLE #
#########################
add_dbl:
add.l &ad_1_dbl, FTEMP_LO(%a0) # add 1 to lsb
bcc.b dcc_clr # no carry
addq.l &0x1, FTEMP_HI(%a0) # propagate carry
bcc.b dcc_clr # no carry
roxr.w FTEMP_HI(%a0) # mant is 0 so restore v-bit
roxr.w FTEMP_HI+2(%a0) # mant is 0 so restore v-bit
roxr.w FTEMP_LO(%a0)
roxr.w FTEMP_LO+2(%a0)
addq.w &0x1, FTEMP_EX(%a0) # incr exponent
dcc_clr:
tst.l %d0 # test for rs = 0
bne.b dbl_done
and.w &0xf000, FTEMP_LO+2(%a0) # clear the l-bit
dbl_done:
and.l &0xfffff800,FTEMP_LO(%a0) # truncate bits beyond dbl limit
rts
###########################
# Truncate all other bits #
###########################
truncate:
swap %d1 # select rnd prec
cmpi.b %d1, &s_mode # is prec sgl?
beq.w sgl_done # yes
bgt.b dbl_done # no; it's dbl
rts # no; it's ext
#
# ext_grs(): extract guard, round and sticky bits according to
# rounding precision.
#
# INPUT
# d0 = extended precision g,r,s (in d0{31:29})
# d1 = {PREC,ROUND}
# OUTPUT
# d0{31:29} = guard, round, sticky
#
# The ext_grs extract the guard/round/sticky bits according to the
# selected rounding precision. It is called by the round subroutine
# only. All registers except d0 are kept intact. d0 becomes an
# updated guard,round,sticky in d0{31:29}
#
# Notes: the ext_grs uses the round PREC, and therefore has to swap d1
# prior to usage, and needs to restore d1 to original. this
# routine is tightly tied to the round routine and not meant to
# uphold standard subroutine calling practices.
#
ext_grs:
swap %d1 # have d1.w point to round precision
tst.b %d1 # is rnd prec = extended?
bne.b ext_grs_not_ext # no; go handle sgl or dbl
#
# %d0 actually already hold g,r,s since _round() had it before calling
# this function. so, as long as we don't disturb it, we are "returning" it.
#
ext_grs_ext:
swap %d1 # yes; return to correct positions
rts
ext_grs_not_ext:
movm.l &0x3000, -(%sp) # make some temp registers {d2/d3}
cmpi.b %d1, &s_mode # is rnd prec = sgl?
bne.b ext_grs_dbl # no; go handle dbl
#
# sgl:
# 96 64 40 32 0
# -----------------------------------------------------
# | EXP |XXXXXXX| |xx | |grs|
# -----------------------------------------------------
# <--(24)--->nn\ /
# ee ---------------------
# ww |
# v
# gr new sticky
#
ext_grs_sgl:
bfextu FTEMP_HI(%a0){&24:&2}, %d3 # sgl prec. g-r are 2 bits right
mov.l &30, %d2 # of the sgl prec. limits
lsl.l %d2, %d3 # shift g-r bits to MSB of d3
mov.l FTEMP_HI(%a0), %d2 # get word 2 for s-bit test
and.l &0x0000003f, %d2 # s bit is the or of all other
bne.b ext_grs_st_stky # bits to the right of g-r
tst.l FTEMP_LO(%a0) # test lower mantissa
bne.b ext_grs_st_stky # if any are set, set sticky
tst.l %d0 # test original g,r,s
bne.b ext_grs_st_stky # if any are set, set sticky
bra.b ext_grs_end_sd # if words 3 and 4 are clr, exit
#
# dbl:
# 96 64 32 11 0
# -----------------------------------------------------
# | EXP |XXXXXXX| | |xx |grs|
# -----------------------------------------------------
# nn\ /
# ee -------
# ww |
# v
# gr new sticky
#
ext_grs_dbl:
bfextu FTEMP_LO(%a0){&21:&2}, %d3 # dbl-prec. g-r are 2 bits right
mov.l &30, %d2 # of the dbl prec. limits
lsl.l %d2, %d3 # shift g-r bits to the MSB of d3
mov.l FTEMP_LO(%a0), %d2 # get lower mantissa for s-bit test
and.l &0x000001ff, %d2 # s bit is the or-ing of all
bne.b ext_grs_st_stky # other bits to the right of g-r
tst.l %d0 # test word original g,r,s
bne.b ext_grs_st_stky # if any are set, set sticky
bra.b ext_grs_end_sd # if clear, exit
ext_grs_st_stky:
bset &rnd_stky_bit, %d3 # set sticky bit
ext_grs_end_sd:
mov.l %d3, %d0 # return grs to d0
movm.l (%sp)+, &0xc # restore scratch registers {d2/d3}
swap %d1 # restore d1 to original
rts
#########################################################################
# norm(): normalize the mantissa of an extended precision input. the #
# input operand should not be normalized already. #
# #
# XDEF **************************************************************** #
# norm() #
# #
# XREF **************************************************************** #
# none #
# #
# INPUT *************************************************************** #
# a0 = pointer fp extended precision operand to normalize #
# #
# OUTPUT ************************************************************** #
# d0 = number of bit positions the mantissa was shifted #
# a0 = the input operand's mantissa is normalized; the exponent #
# is unchanged. #
# #
#########################################################################
global norm
norm:
mov.l %d2, -(%sp) # create some temp regs
mov.l %d3, -(%sp)
mov.l FTEMP_HI(%a0), %d0 # load hi(mantissa)
mov.l FTEMP_LO(%a0), %d1 # load lo(mantissa)
bfffo %d0{&0:&32}, %d2 # how many places to shift?
beq.b norm_lo # hi(man) is all zeroes!
norm_hi:
lsl.l %d2, %d0 # left shift hi(man)
bfextu %d1{&0:%d2}, %d3 # extract lo bits
or.l %d3, %d0 # create hi(man)
lsl.l %d2, %d1 # create lo(man)
mov.l %d0, FTEMP_HI(%a0) # store new hi(man)
mov.l %d1, FTEMP_LO(%a0) # store new lo(man)
mov.l %d2, %d0 # return shift amount
mov.l (%sp)+, %d3 # restore temp regs
mov.l (%sp)+, %d2
rts
norm_lo:
bfffo %d1{&0:&32}, %d2 # how many places to shift?
lsl.l %d2, %d1 # shift lo(man)
add.l &32, %d2 # add 32 to shft amount
mov.l %d1, FTEMP_HI(%a0) # store hi(man)
clr.l FTEMP_LO(%a0) # lo(man) is now zero
mov.l %d2, %d0 # return shift amount
mov.l (%sp)+, %d3 # restore temp regs
mov.l (%sp)+, %d2
rts
#########################################################################
# unnorm_fix(): - changes an UNNORM to one of NORM, DENORM, or ZERO #
# - returns corresponding optype tag #
# #
# XDEF **************************************************************** #
# unnorm_fix() #
# #
# XREF **************************************************************** #
# norm() - normalize the mantissa #
# #
# INPUT *************************************************************** #
# a0 = pointer to unnormalized extended precision number #
# #
# OUTPUT ************************************************************** #
# d0 = optype tag - is corrected to one of NORM, DENORM, or ZERO #
# a0 = input operand has been converted to a norm, denorm, or #
# zero; both the exponent and mantissa are changed. #
# #
#########################################################################
global unnorm_fix
unnorm_fix:
bfffo FTEMP_HI(%a0){&0:&32}, %d0 # how many shifts are needed?
bne.b unnorm_shift # hi(man) is not all zeroes
#
# hi(man) is all zeroes so see if any bits in lo(man) are set
#
unnorm_chk_lo:
bfffo FTEMP_LO(%a0){&0:&32}, %d0 # is operand really a zero?
beq.w unnorm_zero # yes
add.w &32, %d0 # no; fix shift distance
#
# d0 = # shifts needed for complete normalization
#
unnorm_shift:
clr.l %d1 # clear top word
mov.w FTEMP_EX(%a0), %d1 # extract exponent
and.w &0x7fff, %d1 # strip off sgn
cmp.w %d0, %d1 # will denorm push exp < 0?
bgt.b unnorm_nrm_zero # yes; denorm only until exp = 0
#
# exponent would not go < 0. Therefore, number stays normalized
#
sub.w %d0, %d1 # shift exponent value
mov.w FTEMP_EX(%a0), %d0 # load old exponent
and.w &0x8000, %d0 # save old sign
or.w %d0, %d1 # {sgn,new exp}
mov.w %d1, FTEMP_EX(%a0) # insert new exponent
bsr.l norm # normalize UNNORM
mov.b &NORM, %d0 # return new optype tag
rts
#
# exponent would go < 0, so only denormalize until exp = 0
#
unnorm_nrm_zero:
cmp.b %d1, &32 # is exp <= 32?
bgt.b unnorm_nrm_zero_lrg # no; go handle large exponent
bfextu FTEMP_HI(%a0){%d1:&32}, %d0 # extract new hi(man)
mov.l %d0, FTEMP_HI(%a0) # save new hi(man)
mov.l FTEMP_LO(%a0), %d0 # fetch old lo(man)
lsl.l %d1, %d0 # extract new lo(man)
mov.l %d0, FTEMP_LO(%a0) # save new lo(man)
and.w &0x8000, FTEMP_EX(%a0) # set exp = 0
mov.b &DENORM, %d0 # return new optype tag
rts
#
# only mantissa bits set are in lo(man)
#
unnorm_nrm_zero_lrg:
sub.w &32, %d1 # adjust shft amt by 32
mov.l FTEMP_LO(%a0), %d0 # fetch old lo(man)
lsl.l %d1, %d0 # left shift lo(man)
mov.l %d0, FTEMP_HI(%a0) # store new hi(man)
clr.l FTEMP_LO(%a0) # lo(man) = 0
and.w &0x8000, FTEMP_EX(%a0) # set exp = 0
mov.b &DENORM, %d0 # return new optype tag
rts
#
# whole mantissa is zero so this UNNORM is actually a zero
#
unnorm_zero:
and.w &0x8000, FTEMP_EX(%a0) # force exponent to zero
mov.b &ZERO, %d0 # fix optype tag
rts
#########################################################################
# XDEF **************************************************************** #
# set_tag_x(): return the optype of the input ext fp number #
# #
# XREF **************************************************************** #
# None #
# #
# INPUT *************************************************************** #
# a0 = pointer to extended precision operand #
# #
# OUTPUT ************************************************************** #
# d0 = value of type tag #
# one of: NORM, INF, QNAN, SNAN, DENORM, UNNORM, ZERO #
# #
# ALGORITHM *********************************************************** #
# Simply test the exponent, j-bit, and mantissa values to #
# determine the type of operand. #
# If it's an unnormalized zero, alter the operand and force it #
# to be a normal zero. #
# #
#########################################################################
global set_tag_x
set_tag_x:
mov.w FTEMP_EX(%a0), %d0 # extract exponent
andi.w &0x7fff, %d0 # strip off sign
cmpi.w %d0, &0x7fff # is (EXP == MAX)?
beq.b inf_or_nan_x
not_inf_or_nan_x:
btst &0x7,FTEMP_HI(%a0)
beq.b not_norm_x
is_norm_x:
mov.b &NORM, %d0
rts
not_norm_x:
tst.w %d0 # is exponent = 0?
bne.b is_unnorm_x
not_unnorm_x:
tst.l FTEMP_HI(%a0)
bne.b is_denorm_x
tst.l FTEMP_LO(%a0)
bne.b is_denorm_x
is_zero_x:
mov.b &ZERO, %d0
rts
is_denorm_x:
mov.b &DENORM, %d0
rts
# must distinguish now "Unnormalized zeroes" which we
# must convert to zero.
is_unnorm_x:
tst.l FTEMP_HI(%a0)
bne.b is_unnorm_reg_x
tst.l FTEMP_LO(%a0)
bne.b is_unnorm_reg_x
# it's an "unnormalized zero". let's convert it to an actual zero...
andi.w &0x8000,FTEMP_EX(%a0) # clear exponent
mov.b &ZERO, %d0
rts
is_unnorm_reg_x:
mov.b &UNNORM, %d0
rts
inf_or_nan_x:
tst.l FTEMP_LO(%a0)
bne.b is_nan_x
mov.l FTEMP_HI(%a0), %d0
and.l &0x7fffffff, %d0 # msb is a don't care!
bne.b is_nan_x
is_inf_x:
mov.b &INF, %d0
rts
is_nan_x:
btst &0x6, FTEMP_HI(%a0)
beq.b is_snan_x
mov.b &QNAN, %d0
rts
is_snan_x:
mov.b &SNAN, %d0
rts
#########################################################################
# XDEF **************************************************************** #
# set_tag_d(): return the optype of the input dbl fp number #
# #
# XREF **************************************************************** #
# None #
# #
# INPUT *************************************************************** #
# a0 = points to double precision operand #
# #
# OUTPUT ************************************************************** #
# d0 = value of type tag #
# one of: NORM, INF, QNAN, SNAN, DENORM, ZERO #
# #
# ALGORITHM *********************************************************** #
# Simply test the exponent, j-bit, and mantissa values to #
# determine the type of operand. #
# #
#########################################################################
global set_tag_d
set_tag_d:
mov.l FTEMP(%a0), %d0
mov.l %d0, %d1
andi.l &0x7ff00000, %d0
beq.b zero_or_denorm_d
cmpi.l %d0, &0x7ff00000
beq.b inf_or_nan_d
is_norm_d:
mov.b &NORM, %d0
rts
zero_or_denorm_d:
and.l &0x000fffff, %d1
bne is_denorm_d
tst.l 4+FTEMP(%a0)
bne is_denorm_d
is_zero_d:
mov.b &ZERO, %d0
rts
is_denorm_d:
mov.b &DENORM, %d0
rts
inf_or_nan_d:
and.l &0x000fffff, %d1
bne is_nan_d
tst.l 4+FTEMP(%a0)
bne is_nan_d
is_inf_d:
mov.b &INF, %d0
rts
is_nan_d:
btst &19, %d1
bne is_qnan_d
is_snan_d:
mov.b &SNAN, %d0
rts
is_qnan_d:
mov.b &QNAN, %d0
rts
#########################################################################
# XDEF **************************************************************** #
# set_tag_s(): return the optype of the input sgl fp number #
# #
# XREF **************************************************************** #
# None #
# #
# INPUT *************************************************************** #
# a0 = pointer to single precision operand #
# #
# OUTPUT ************************************************************** #
# d0 = value of type tag #
# one of: NORM, INF, QNAN, SNAN, DENORM, ZERO #
# #
# ALGORITHM *********************************************************** #
# Simply test the exponent, j-bit, and mantissa values to #
# determine the type of operand. #
# #
#########################################################################
global set_tag_s
set_tag_s:
mov.l FTEMP(%a0), %d0
mov.l %d0, %d1
andi.l &0x7f800000, %d0
beq.b zero_or_denorm_s
cmpi.l %d0, &0x7f800000
beq.b inf_or_nan_s
is_norm_s:
mov.b &NORM, %d0
rts
zero_or_denorm_s:
and.l &0x007fffff, %d1
bne is_denorm_s
is_zero_s:
mov.b &ZERO, %d0
rts
is_denorm_s:
mov.b &DENORM, %d0
rts
inf_or_nan_s:
and.l &0x007fffff, %d1
bne is_nan_s
is_inf_s:
mov.b &INF, %d0
rts
is_nan_s:
btst &22, %d1
bne is_qnan_s
is_snan_s:
mov.b &SNAN, %d0
rts
is_qnan_s:
mov.b &QNAN, %d0
rts
#########################################################################
# XDEF **************************************************************** #
# unf_res(): routine to produce default underflow result of a #
# scaled extended precision number; this is used by #
# fadd/fdiv/fmul/etc. emulation routines. #
# unf_res4(): same as above but for fsglmul/fsgldiv which use #
# single round prec and extended prec mode. #
# #
# XREF **************************************************************** #
# _denorm() - denormalize according to scale factor #
# _round() - round denormalized number according to rnd prec #
# #
# INPUT *************************************************************** #
# a0 = pointer to extended precison operand #
# d0 = scale factor #
# d1 = rounding precision/mode #
# #
# OUTPUT ************************************************************** #
# a0 = pointer to default underflow result in extended precision #
# d0.b = result FPSR_cc which caller may or may not want to save #
# #
# ALGORITHM *********************************************************** #
# Convert the input operand to "internal format" which means the #
# exponent is extended to 16 bits and the sign is stored in the unused #
# portion of the extended precison operand. Denormalize the number #
# according to the scale factor passed in d0. Then, round the #
# denormalized result. #
# Set the FPSR_exc bits as appropriate but return the cc bits in #
# d0 in case the caller doesn't want to save them (as is the case for #
# fmove out). #
# unf_res4() for fsglmul/fsgldiv forces the denorm to extended #
# precision and the rounding mode to single. #
# #
#########################################################################
global unf_res
unf_res:
mov.l %d1, -(%sp) # save rnd prec,mode on stack
btst &0x7, FTEMP_EX(%a0) # make "internal" format
sne FTEMP_SGN(%a0)
mov.w FTEMP_EX(%a0), %d1 # extract exponent
and.w &0x7fff, %d1
sub.w %d0, %d1
mov.w %d1, FTEMP_EX(%a0) # insert 16 bit exponent
mov.l %a0, -(%sp) # save operand ptr during calls
mov.l 0x4(%sp),%d0 # pass rnd prec.
andi.w &0x00c0,%d0
lsr.w &0x4,%d0
bsr.l _denorm # denorm result
mov.l (%sp),%a0
mov.w 0x6(%sp),%d1 # load prec:mode into %d1
andi.w &0xc0,%d1 # extract rnd prec
lsr.w &0x4,%d1
swap %d1
mov.w 0x6(%sp),%d1
andi.w &0x30,%d1
lsr.w &0x4,%d1
bsr.l _round # round the denorm
mov.l (%sp)+, %a0
# result is now rounded properly. convert back to normal format
bclr &0x7, FTEMP_EX(%a0) # clear sgn first; may have residue
tst.b FTEMP_SGN(%a0) # is "internal result" sign set?
beq.b unf_res_chkifzero # no; result is positive
bset &0x7, FTEMP_EX(%a0) # set result sgn
clr.b FTEMP_SGN(%a0) # clear temp sign
# the number may have become zero after rounding. set ccodes accordingly.
unf_res_chkifzero:
clr.l %d0
tst.l FTEMP_HI(%a0) # is value now a zero?
bne.b unf_res_cont # no
tst.l FTEMP_LO(%a0)
bne.b unf_res_cont # no
# bset &z_bit, FPSR_CC(%a6) # yes; set zero ccode bit
bset &z_bit, %d0 # yes; set zero ccode bit
unf_res_cont:
#
# can inex1 also be set along with unfl and inex2???
#
# we know that underflow has occurred. aunfl should be set if INEX2 is also set.
#
btst &inex2_bit, FPSR_EXCEPT(%a6) # is INEX2 set?
beq.b unf_res_end # no
bset &aunfl_bit, FPSR_AEXCEPT(%a6) # yes; set aunfl
unf_res_end:
add.l &0x4, %sp # clear stack
rts
# unf_res() for fsglmul() and fsgldiv().
global unf_res4
unf_res4:
mov.l %d1,-(%sp) # save rnd prec,mode on stack
btst &0x7,FTEMP_EX(%a0) # make "internal" format
sne FTEMP_SGN(%a0)
mov.w FTEMP_EX(%a0),%d1 # extract exponent
and.w &0x7fff,%d1
sub.w %d0,%d1
mov.w %d1,FTEMP_EX(%a0) # insert 16 bit exponent
mov.l %a0,-(%sp) # save operand ptr during calls
clr.l %d0 # force rnd prec = ext
bsr.l _denorm # denorm result
mov.l (%sp),%a0
mov.w &s_mode,%d1 # force rnd prec = sgl
swap %d1
mov.w 0x6(%sp),%d1 # load rnd mode
andi.w &0x30,%d1 # extract rnd prec
lsr.w &0x4,%d1
bsr.l _round # round the denorm
mov.l (%sp)+,%a0
# result is now rounded properly. convert back to normal format
bclr &0x7,FTEMP_EX(%a0) # clear sgn first; may have residue
tst.b FTEMP_SGN(%a0) # is "internal result" sign set?
beq.b unf_res4_chkifzero # no; result is positive
bset &0x7,FTEMP_EX(%a0) # set result sgn
clr.b FTEMP_SGN(%a0) # clear temp sign
# the number may have become zero after rounding. set ccodes accordingly.
unf_res4_chkifzero:
clr.l %d0
tst.l FTEMP_HI(%a0) # is value now a zero?
bne.b unf_res4_cont # no
tst.l FTEMP_LO(%a0)
bne.b unf_res4_cont # no
# bset &z_bit,FPSR_CC(%a6) # yes; set zero ccode bit
bset &z_bit,%d0 # yes; set zero ccode bit
unf_res4_cont:
#
# can inex1 also be set along with unfl and inex2???
#
# we know that underflow has occurred. aunfl should be set if INEX2 is also set.
#
btst &inex2_bit,FPSR_EXCEPT(%a6) # is INEX2 set?
beq.b unf_res4_end # no
bset &aunfl_bit,FPSR_AEXCEPT(%a6) # yes; set aunfl
unf_res4_end:
add.l &0x4,%sp # clear stack
rts
#########################################################################
# XDEF **************************************************************** #
# ovf_res(): routine to produce the default overflow result of #
# an overflowing number. #
# ovf_res2(): same as above but the rnd mode/prec are passed #
# differently. #
# #
# XREF **************************************************************** #
# none #
# #
# INPUT *************************************************************** #
# d1.b = '-1' => (-); '0' => (+) #
# ovf_res(): #
# d0 = rnd mode/prec #
# ovf_res2(): #
# hi(d0) = rnd prec #
# lo(d0) = rnd mode #
# #
# OUTPUT ************************************************************** #
# a0 = points to extended precision result #
# d0.b = condition code bits #
# #
# ALGORITHM *********************************************************** #
# The default overflow result can be determined by the sign of #
# the result and the rounding mode/prec in effect. These bits are #
# concatenated together to create an index into the default result #
# table. A pointer to the correct result is returned in a0. The #
# resulting condition codes are returned in d0 in case the caller #
# doesn't want FPSR_cc altered (as is the case for fmove out). #
# #
#########################################################################
global ovf_res
ovf_res:
andi.w &0x10,%d1 # keep result sign
lsr.b &0x4,%d0 # shift prec/mode
or.b %d0,%d1 # concat the two
mov.w %d1,%d0 # make a copy
lsl.b &0x1,%d1 # multiply d1 by 2
bra.b ovf_res_load
global ovf_res2
ovf_res2:
and.w &0x10, %d1 # keep result sign
or.b %d0, %d1 # insert rnd mode
swap %d0
or.b %d0, %d1 # insert rnd prec
mov.w %d1, %d0 # make a copy
lsl.b &0x1, %d1 # shift left by 1
#
# use the rounding mode, precision, and result sign as in index into the
# two tables below to fetch the default result and the result ccodes.
#
ovf_res_load:
mov.b (tbl_ovfl_cc.b,%pc,%d0.w*1), %d0 # fetch result ccodes
lea (tbl_ovfl_result.b,%pc,%d1.w*8), %a0 # return result ptr
rts
tbl_ovfl_cc:
byte 0x2, 0x0, 0x0, 0x2
byte 0x2, 0x0, 0x0, 0x2
byte 0x2, 0x0, 0x0, 0x2
byte 0x0, 0x0, 0x0, 0x0
byte 0x2+0x8, 0x8, 0x2+0x8, 0x8
byte 0x2+0x8, 0x8, 0x2+0x8, 0x8
byte 0x2+0x8, 0x8, 0x2+0x8, 0x8
tbl_ovfl_result:
long 0x7fff0000,0x00000000,0x00000000,0x00000000 # +INF; RN
long 0x7ffe0000,0xffffffff,0xffffffff,0x00000000 # +EXT; RZ
long 0x7ffe0000,0xffffffff,0xffffffff,0x00000000 # +EXT; RM
long 0x7fff0000,0x00000000,0x00000000,0x00000000 # +INF; RP
long 0x7fff0000,0x00000000,0x00000000,0x00000000 # +INF; RN
long 0x407e0000,0xffffff00,0x00000000,0x00000000 # +SGL; RZ
long 0x407e0000,0xffffff00,0x00000000,0x00000000 # +SGL; RM
long 0x7fff0000,0x00000000,0x00000000,0x00000000 # +INF; RP
long 0x7fff0000,0x00000000,0x00000000,0x00000000 # +INF; RN
long 0x43fe0000,0xffffffff,0xfffff800,0x00000000 # +DBL; RZ
long 0x43fe0000,0xffffffff,0xfffff800,0x00000000 # +DBL; RM
long 0x7fff0000,0x00000000,0x00000000,0x00000000 # +INF; RP
long 0x00000000,0x00000000,0x00000000,0x00000000
long 0x00000000,0x00000000,0x00000000,0x00000000
long 0x00000000,0x00000000,0x00000000,0x00000000
long 0x00000000,0x00000000,0x00000000,0x00000000
long 0xffff0000,0x00000000,0x00000000,0x00000000 # -INF; RN
long 0xfffe0000,0xffffffff,0xffffffff,0x00000000 # -EXT; RZ
long 0xffff0000,0x00000000,0x00000000,0x00000000 # -INF; RM
long 0xfffe0000,0xffffffff,0xffffffff,0x00000000 # -EXT; RP
long 0xffff0000,0x00000000,0x00000000,0x00000000 # -INF; RN
long 0xc07e0000,0xffffff00,0x00000000,0x00000000 # -SGL; RZ
long 0xffff0000,0x00000000,0x00000000,0x00000000 # -INF; RM
long 0xc07e0000,0xffffff00,0x00000000,0x00000000 # -SGL; RP
long 0xffff0000,0x00000000,0x00000000,0x00000000 # -INF; RN
long 0xc3fe0000,0xffffffff,0xfffff800,0x00000000 # -DBL; RZ
long 0xffff0000,0x00000000,0x00000000,0x00000000 # -INF; RM
long 0xc3fe0000,0xffffffff,0xfffff800,0x00000000 # -DBL; RP
#########################################################################
# XDEF **************************************************************** #
# fout(): move from fp register to memory or data register #
# #
# XREF **************************************************************** #
# _round() - needed to create EXOP for sgl/dbl precision #
# norm() - needed to create EXOP for extended precision #
# ovf_res() - create default overflow result for sgl/dbl precision#
# unf_res() - create default underflow result for sgl/dbl prec. #
# dst_dbl() - create rounded dbl precision result. #
# dst_sgl() - create rounded sgl precision result. #
# fetch_dreg() - fetch dynamic k-factor reg for packed. #
# bindec() - convert FP binary number to packed number. #
# _mem_write() - write data to memory. #
# _mem_write2() - write data to memory unless supv mode -(a7) exc.#
# _dmem_write_{byte,word,long}() - write data to memory. #
# store_dreg_{b,w,l}() - store data to data register file. #
# facc_out_{b,w,l,d,x}() - data access error occurred. #
# #
# INPUT *************************************************************** #
# a0 = pointer to extended precision source operand #
# d0 = round prec,mode #
# #
# OUTPUT ************************************************************** #
# fp0 : intermediate underflow or overflow result if #
# OVFL/UNFL occurred for a sgl or dbl operand #
# #
# ALGORITHM *********************************************************** #
# This routine is accessed by many handlers that need to do an #
# opclass three move of an operand out to memory. #
# Decode an fmove out (opclass 3) instruction to determine if #
# it's b,w,l,s,d,x, or p in size. b,w,l can be stored to either a data #
# register or memory. The algorithm uses a standard "fmove" to create #
# the rounded result. Also, since exceptions are disabled, this also #
# create the correct OPERR default result if appropriate. #
# For sgl or dbl precision, overflow or underflow can occur. If #
# either occurs and is enabled, the EXOP. #
# For extended precision, the stacked <ea> must be fixed along #
# w/ the address index register as appropriate w/ _calc_ea_fout(). If #
# the source is a denorm and if underflow is enabled, an EXOP must be #
# created. #
# For packed, the k-factor must be fetched from the instruction #
# word or a data register. The <ea> must be fixed as w/ extended #
# precision. Then, bindec() is called to create the appropriate #
# packed result. #
# If at any time an access error is flagged by one of the move- #
# to-memory routines, then a special exit must be made so that the #
# access error can be handled properly. #
# #
#########################################################################
global fout
fout:
bfextu EXC_CMDREG(%a6){&3:&3},%d1 # extract dst fmt
mov.w (tbl_fout.b,%pc,%d1.w*2),%a1 # use as index
jmp (tbl_fout.b,%pc,%a1) # jump to routine
swbeg &0x8
tbl_fout:
short fout_long - tbl_fout
short fout_sgl - tbl_fout
short fout_ext - tbl_fout
short fout_pack - tbl_fout
short fout_word - tbl_fout
short fout_dbl - tbl_fout
short fout_byte - tbl_fout
short fout_pack - tbl_fout
#################################################################
# fmove.b out ###################################################
#################################################################
# Only "Unimplemented Data Type" exceptions enter here. The operand
# is either a DENORM or a NORM.
fout_byte:
tst.b STAG(%a6) # is operand normalized?
bne.b fout_byte_denorm # no
fmovm.x SRC(%a0),&0x80 # load value
fout_byte_norm:
fmov.l %d0,%fpcr # insert rnd prec,mode
fmov.b %fp0,%d0 # exec move out w/ correct rnd mode
fmov.l &0x0,%fpcr # clear FPCR
fmov.l %fpsr,%d1 # fetch FPSR
or.w %d1,2+USER_FPSR(%a6) # save new exc,accrued bits
mov.b 1+EXC_OPWORD(%a6),%d1 # extract dst mode
andi.b &0x38,%d1 # is mode == 0? (Dreg dst)
beq.b fout_byte_dn # must save to integer regfile
mov.l EXC_EA(%a6),%a0 # stacked <ea> is correct
bsr.l _dmem_write_byte # write byte
tst.l %d1 # did dstore fail?
bne.l facc_out_b # yes
rts
fout_byte_dn:
mov.b 1+EXC_OPWORD(%a6),%d1 # extract Dn
andi.w &0x7,%d1
bsr.l store_dreg_b
rts
fout_byte_denorm:
mov.l SRC_EX(%a0),%d1
andi.l &0x80000000,%d1 # keep DENORM sign
ori.l &0x00800000,%d1 # make smallest sgl
fmov.s %d1,%fp0
bra.b fout_byte_norm
#################################################################
# fmove.w out ###################################################
#################################################################
# Only "Unimplemented Data Type" exceptions enter here. The operand
# is either a DENORM or a NORM.
fout_word:
tst.b STAG(%a6) # is operand normalized?
bne.b fout_word_denorm # no
fmovm.x SRC(%a0),&0x80 # load value
fout_word_norm:
fmov.l %d0,%fpcr # insert rnd prec:mode
fmov.w %fp0,%d0 # exec move out w/ correct rnd mode
fmov.l &0x0,%fpcr # clear FPCR
fmov.l %fpsr,%d1 # fetch FPSR
or.w %d1,2+USER_FPSR(%a6) # save new exc,accrued bits
mov.b 1+EXC_OPWORD(%a6),%d1 # extract dst mode
andi.b &0x38,%d1 # is mode == 0? (Dreg dst)
beq.b fout_word_dn # must save to integer regfile
mov.l EXC_EA(%a6),%a0 # stacked <ea> is correct
bsr.l _dmem_write_word # write word
tst.l %d1 # did dstore fail?
bne.l facc_out_w # yes
rts
fout_word_dn:
mov.b 1+EXC_OPWORD(%a6),%d1 # extract Dn
andi.w &0x7,%d1
bsr.l store_dreg_w
rts
fout_word_denorm:
mov.l SRC_EX(%a0),%d1
andi.l &0x80000000,%d1 # keep DENORM sign
ori.l &0x00800000,%d1 # make smallest sgl
fmov.s %d1,%fp0
bra.b fout_word_norm
#################################################################
# fmove.l out ###################################################
#################################################################
# Only "Unimplemented Data Type" exceptions enter here. The operand
# is either a DENORM or a NORM.
fout_long:
tst.b STAG(%a6) # is operand normalized?
bne.b fout_long_denorm # no
fmovm.x SRC(%a0),&0x80 # load value
fout_long_norm:
fmov.l %d0,%fpcr # insert rnd prec:mode
fmov.l %fp0,%d0 # exec move out w/ correct rnd mode
fmov.l &0x0,%fpcr # clear FPCR
fmov.l %fpsr,%d1 # fetch FPSR
or.w %d1,2+USER_FPSR(%a6) # save new exc,accrued bits
fout_long_write:
mov.b 1+EXC_OPWORD(%a6),%d1 # extract dst mode
andi.b &0x38,%d1 # is mode == 0? (Dreg dst)
beq.b fout_long_dn # must save to integer regfile
mov.l EXC_EA(%a6),%a0 # stacked <ea> is correct
bsr.l _dmem_write_long # write long
tst.l %d1 # did dstore fail?
bne.l facc_out_l # yes
rts
fout_long_dn:
mov.b 1+EXC_OPWORD(%a6),%d1 # extract Dn
andi.w &0x7,%d1
bsr.l store_dreg_l
rts
fout_long_denorm:
mov.l SRC_EX(%a0),%d1
andi.l &0x80000000,%d1 # keep DENORM sign
ori.l &0x00800000,%d1 # make smallest sgl
fmov.s %d1,%fp0
bra.b fout_long_norm
#################################################################
# fmove.x out ###################################################
#################################################################
# Only "Unimplemented Data Type" exceptions enter here. The operand
# is either a DENORM or a NORM.
# The DENORM causes an Underflow exception.
fout_ext:
# we copy the extended precision result to FP_SCR0 so that the reserved
# 16-bit field gets zeroed. we do this since we promise not to disturb
# what's at SRC(a0).
mov.w SRC_EX(%a0),FP_SCR0_EX(%a6)
clr.w 2+FP_SCR0_EX(%a6) # clear reserved field
mov.l SRC_HI(%a0),FP_SCR0_HI(%a6)
mov.l SRC_LO(%a0),FP_SCR0_LO(%a6)
fmovm.x SRC(%a0),&0x80 # return result
bsr.l _calc_ea_fout # fix stacked <ea>
mov.l %a0,%a1 # pass: dst addr
lea FP_SCR0(%a6),%a0 # pass: src addr
mov.l &0xc,%d0 # pass: opsize is 12 bytes
# we must not yet write the extended precision data to the stack
# in the pre-decrement case from supervisor mode or else we'll corrupt
# the stack frame. so, leave it in FP_SRC for now and deal with it later...
cmpi.b SPCOND_FLG(%a6),&mda7_flg
beq.b fout_ext_a7
bsr.l _dmem_write # write ext prec number to memory
tst.l %d1 # did dstore fail?
bne.w fout_ext_err # yes
tst.b STAG(%a6) # is operand normalized?
bne.b fout_ext_denorm # no
rts
# the number is a DENORM. must set the underflow exception bit
fout_ext_denorm:
bset &unfl_bit,FPSR_EXCEPT(%a6) # set underflow exc bit
mov.b FPCR_ENABLE(%a6),%d0
andi.b &0x0a,%d0 # is UNFL or INEX enabled?
bne.b fout_ext_exc # yes
rts
# we don't want to do the write if the exception occurred in supervisor mode
# so _mem_write2() handles this for us.
fout_ext_a7:
bsr.l _mem_write2 # write ext prec number to memory
tst.l %d1 # did dstore fail?
bne.w fout_ext_err # yes
tst.b STAG(%a6) # is operand normalized?
bne.b fout_ext_denorm # no
rts
fout_ext_exc:
lea FP_SCR0(%a6),%a0
bsr.l norm # normalize the mantissa
neg.w %d0 # new exp = -(shft amt)
andi.w &0x7fff,%d0
andi.w &0x8000,FP_SCR0_EX(%a6) # keep only old sign
or.w %d0,FP_SCR0_EX(%a6) # insert new exponent
fmovm.x FP_SCR0(%a6),&0x40 # return EXOP in fp1
rts
fout_ext_err:
mov.l EXC_A6(%a6),(%a6) # fix stacked a6
bra.l facc_out_x
#########################################################################
# fmove.s out ###########################################################
#########################################################################
fout_sgl:
andi.b &0x30,%d0 # clear rnd prec
ori.b &s_mode*0x10,%d0 # insert sgl prec
mov.l %d0,L_SCR3(%a6) # save rnd prec,mode on stack
#
# operand is a normalized number. first, we check to see if the move out
# would cause either an underflow or overflow. these cases are handled
# separately. otherwise, set the FPCR to the proper rounding mode and
# execute the move.
#
mov.w SRC_EX(%a0),%d0 # extract exponent
andi.w &0x7fff,%d0 # strip sign
cmpi.w %d0,&SGL_HI # will operand overflow?
bgt.w fout_sgl_ovfl # yes; go handle OVFL
beq.w fout_sgl_may_ovfl # maybe; go handle possible OVFL
cmpi.w %d0,&SGL_LO # will operand underflow?
blt.w fout_sgl_unfl # yes; go handle underflow
#
# NORMs(in range) can be stored out by a simple "fmov.s"
# Unnormalized inputs can come through this point.
#
fout_sgl_exg:
fmovm.x SRC(%a0),&0x80 # fetch fop from stack
fmov.l L_SCR3(%a6),%fpcr # set FPCR
fmov.l &0x0,%fpsr # clear FPSR
fmov.s %fp0,%d0 # store does convert and round
fmov.l &0x0,%fpcr # clear FPCR
fmov.l %fpsr,%d1 # save FPSR
or.w %d1,2+USER_FPSR(%a6) # set possible inex2/ainex
fout_sgl_exg_write:
mov.b 1+EXC_OPWORD(%a6),%d1 # extract dst mode
andi.b &0x38,%d1 # is mode == 0? (Dreg dst)
beq.b fout_sgl_exg_write_dn # must save to integer regfile
mov.l EXC_EA(%a6),%a0 # stacked <ea> is correct
bsr.l _dmem_write_long # write long
tst.l %d1 # did dstore fail?
bne.l facc_out_l # yes
rts
fout_sgl_exg_write_dn:
mov.b 1+EXC_OPWORD(%a6),%d1 # extract Dn
andi.w &0x7,%d1
bsr.l store_dreg_l
rts
#
# here, we know that the operand would UNFL if moved out to single prec,
# so, denorm and round and then use generic store single routine to
# write the value to memory.
#
fout_sgl_unfl:
bset &unfl_bit,FPSR_EXCEPT(%a6) # set UNFL
mov.w SRC_EX(%a0),FP_SCR0_EX(%a6)
mov.l SRC_HI(%a0),FP_SCR0_HI(%a6)
mov.l SRC_LO(%a0),FP_SCR0_LO(%a6)
mov.l %a0,-(%sp)
clr.l %d0 # pass: S.F. = 0
cmpi.b STAG(%a6),&DENORM # fetch src optype tag
bne.b fout_sgl_unfl_cont # let DENORMs fall through
lea FP_SCR0(%a6),%a0
bsr.l norm # normalize the DENORM
fout_sgl_unfl_cont:
lea FP_SCR0(%a6),%a0 # pass: ptr to operand
mov.l L_SCR3(%a6),%d1 # pass: rnd prec,mode
bsr.l unf_res # calc default underflow result
lea FP_SCR0(%a6),%a0 # pass: ptr to fop
bsr.l dst_sgl # convert to single prec
mov.b 1+EXC_OPWORD(%a6),%d1 # extract dst mode
andi.b &0x38,%d1 # is mode == 0? (Dreg dst)
beq.b fout_sgl_unfl_dn # must save to integer regfile
mov.l EXC_EA(%a6),%a0 # stacked <ea> is correct
bsr.l _dmem_write_long # write long
tst.l %d1 # did dstore fail?
bne.l facc_out_l # yes
bra.b fout_sgl_unfl_chkexc
fout_sgl_unfl_dn:
mov.b 1+EXC_OPWORD(%a6),%d1 # extract Dn
andi.w &0x7,%d1
bsr.l store_dreg_l
fout_sgl_unfl_chkexc:
mov.b FPCR_ENABLE(%a6),%d1
andi.b &0x0a,%d1 # is UNFL or INEX enabled?
bne.w fout_sd_exc_unfl # yes
addq.l &0x4,%sp
rts
#
# it's definitely an overflow so call ovf_res to get the correct answer
#
fout_sgl_ovfl:
tst.b 3+SRC_HI(%a0) # is result inexact?
bne.b fout_sgl_ovfl_inex2
tst.l SRC_LO(%a0) # is result inexact?
bne.b fout_sgl_ovfl_inex2
ori.w &ovfl_inx_mask,2+USER_FPSR(%a6) # set ovfl/aovfl/ainex
bra.b fout_sgl_ovfl_cont
fout_sgl_ovfl_inex2:
ori.w &ovfinx_mask,2+USER_FPSR(%a6) # set ovfl/aovfl/ainex/inex2
fout_sgl_ovfl_cont:
mov.l %a0,-(%sp)
# call ovf_res() w/ sgl prec and the correct rnd mode to create the default
# overflow result. DON'T save the returned ccodes from ovf_res() since
# fmove out doesn't alter them.
tst.b SRC_EX(%a0) # is operand negative?
smi %d1 # set if so
mov.l L_SCR3(%a6),%d0 # pass: sgl prec,rnd mode
bsr.l ovf_res # calc OVFL result
fmovm.x (%a0),&0x80 # load default overflow result
fmov.s %fp0,%d0 # store to single
mov.b 1+EXC_OPWORD(%a6),%d1 # extract dst mode
andi.b &0x38,%d1 # is mode == 0? (Dreg dst)
beq.b fout_sgl_ovfl_dn # must save to integer regfile
mov.l EXC_EA(%a6),%a0 # stacked <ea> is correct
bsr.l _dmem_write_long # write long
tst.l %d1 # did dstore fail?
bne.l facc_out_l # yes
bra.b fout_sgl_ovfl_chkexc
fout_sgl_ovfl_dn:
mov.b 1+EXC_OPWORD(%a6),%d1 # extract Dn
andi.w &0x7,%d1
bsr.l store_dreg_l
fout_sgl_ovfl_chkexc:
mov.b FPCR_ENABLE(%a6),%d1
andi.b &0x0a,%d1 # is UNFL or INEX enabled?
bne.w fout_sd_exc_ovfl # yes
addq.l &0x4,%sp
rts
#
# move out MAY overflow:
# (1) force the exp to 0x3fff
# (2) do a move w/ appropriate rnd mode
# (3) if exp still equals zero, then insert original exponent
# for the correct result.
# if exp now equals one, then it overflowed so call ovf_res.
#
fout_sgl_may_ovfl:
mov.w SRC_EX(%a0),%d1 # fetch current sign
andi.w &0x8000,%d1 # keep it,clear exp
ori.w &0x3fff,%d1 # insert exp = 0
mov.w %d1,FP_SCR0_EX(%a6) # insert scaled exp
mov.l SRC_HI(%a0),FP_SCR0_HI(%a6) # copy hi(man)
mov.l SRC_LO(%a0),FP_SCR0_LO(%a6) # copy lo(man)
fmov.l L_SCR3(%a6),%fpcr # set FPCR
fmov.x FP_SCR0(%a6),%fp0 # force fop to be rounded
fmov.l &0x0,%fpcr # clear FPCR
fabs.x %fp0 # need absolute value
fcmp.b %fp0,&0x2 # did exponent increase?
fblt.w fout_sgl_exg # no; go finish NORM
bra.w fout_sgl_ovfl # yes; go handle overflow
################
fout_sd_exc_unfl:
mov.l (%sp)+,%a0
mov.w SRC_EX(%a0),FP_SCR0_EX(%a6)
mov.l SRC_HI(%a0),FP_SCR0_HI(%a6)
mov.l SRC_LO(%a0),FP_SCR0_LO(%a6)
cmpi.b STAG(%a6),&DENORM # was src a DENORM?
bne.b fout_sd_exc_cont # no
lea FP_SCR0(%a6),%a0
bsr.l norm
neg.l %d0
andi.w &0x7fff,%d0
bfins %d0,FP_SCR0_EX(%a6){&1:&15}
bra.b fout_sd_exc_cont
fout_sd_exc:
fout_sd_exc_ovfl:
mov.l (%sp)+,%a0 # restore a0
mov.w SRC_EX(%a0),FP_SCR0_EX(%a6)
mov.l SRC_HI(%a0),FP_SCR0_HI(%a6)
mov.l SRC_LO(%a0),FP_SCR0_LO(%a6)
fout_sd_exc_cont:
bclr &0x7,FP_SCR0_EX(%a6) # clear sign bit
sne.b 2+FP_SCR0_EX(%a6) # set internal sign bit
lea FP_SCR0(%a6),%a0 # pass: ptr to DENORM
mov.b 3+L_SCR3(%a6),%d1
lsr.b &0x4,%d1
andi.w &0x0c,%d1
swap %d1
mov.b 3+L_SCR3(%a6),%d1
lsr.b &0x4,%d1
andi.w &0x03,%d1
clr.l %d0 # pass: zero g,r,s
bsr.l _round # round the DENORM
tst.b 2+FP_SCR0_EX(%a6) # is EXOP negative?
beq.b fout_sd_exc_done # no
bset &0x7,FP_SCR0_EX(%a6) # yes
fout_sd_exc_done:
fmovm.x FP_SCR0(%a6),&0x40 # return EXOP in fp1
rts
#################################################################
# fmove.d out ###################################################
#################################################################
fout_dbl:
andi.b &0x30,%d0 # clear rnd prec
ori.b &d_mode*0x10,%d0 # insert dbl prec
mov.l %d0,L_SCR3(%a6) # save rnd prec,mode on stack
#
# operand is a normalized number. first, we check to see if the move out
# would cause either an underflow or overflow. these cases are handled
# separately. otherwise, set the FPCR to the proper rounding mode and
# execute the move.
#
mov.w SRC_EX(%a0),%d0 # extract exponent
andi.w &0x7fff,%d0 # strip sign
cmpi.w %d0,&DBL_HI # will operand overflow?
bgt.w fout_dbl_ovfl # yes; go handle OVFL
beq.w fout_dbl_may_ovfl # maybe; go handle possible OVFL
cmpi.w %d0,&DBL_LO # will operand underflow?
blt.w fout_dbl_unfl # yes; go handle underflow
#
# NORMs(in range) can be stored out by a simple "fmov.d"
# Unnormalized inputs can come through this point.
#
fout_dbl_exg:
fmovm.x SRC(%a0),&0x80 # fetch fop from stack
fmov.l L_SCR3(%a6),%fpcr # set FPCR
fmov.l &0x0,%fpsr # clear FPSR
fmov.d %fp0,L_SCR1(%a6) # store does convert and round
fmov.l &0x0,%fpcr # clear FPCR
fmov.l %fpsr,%d0 # save FPSR
or.w %d0,2+USER_FPSR(%a6) # set possible inex2/ainex
mov.l EXC_EA(%a6),%a1 # pass: dst addr
lea L_SCR1(%a6),%a0 # pass: src addr
movq.l &0x8,%d0 # pass: opsize is 8 bytes
bsr.l _dmem_write # store dbl fop to memory
tst.l %d1 # did dstore fail?
bne.l facc_out_d # yes
rts # no; so we're finished
#
# here, we know that the operand would UNFL if moved out to double prec,
# so, denorm and round and then use generic store double routine to
# write the value to memory.
#
fout_dbl_unfl:
bset &unfl_bit,FPSR_EXCEPT(%a6) # set UNFL
mov.w SRC_EX(%a0),FP_SCR0_EX(%a6)
mov.l SRC_HI(%a0),FP_SCR0_HI(%a6)
mov.l SRC_LO(%a0),FP_SCR0_LO(%a6)
mov.l %a0,-(%sp)
clr.l %d0 # pass: S.F. = 0
cmpi.b STAG(%a6),&DENORM # fetch src optype tag
bne.b fout_dbl_unfl_cont # let DENORMs fall through
lea FP_SCR0(%a6),%a0
bsr.l norm # normalize the DENORM
fout_dbl_unfl_cont:
lea FP_SCR0(%a6),%a0 # pass: ptr to operand
mov.l L_SCR3(%a6),%d1 # pass: rnd prec,mode
bsr.l unf_res # calc default underflow result
lea FP_SCR0(%a6),%a0 # pass: ptr to fop
bsr.l dst_dbl # convert to single prec
mov.l %d0,L_SCR1(%a6)
mov.l %d1,L_SCR2(%a6)
mov.l EXC_EA(%a6),%a1 # pass: dst addr
lea L_SCR1(%a6),%a0 # pass: src addr
movq.l &0x8,%d0 # pass: opsize is 8 bytes
bsr.l _dmem_write # store dbl fop to memory
tst.l %d1 # did dstore fail?
bne.l facc_out_d # yes
mov.b FPCR_ENABLE(%a6),%d1
andi.b &0x0a,%d1 # is UNFL or INEX enabled?
bne.w fout_sd_exc_unfl # yes
addq.l &0x4,%sp
rts
#
# it's definitely an overflow so call ovf_res to get the correct answer
#
fout_dbl_ovfl:
mov.w 2+SRC_LO(%a0),%d0
andi.w &0x7ff,%d0
bne.b fout_dbl_ovfl_inex2
ori.w &ovfl_inx_mask,2+USER_FPSR(%a6) # set ovfl/aovfl/ainex
bra.b fout_dbl_ovfl_cont
fout_dbl_ovfl_inex2:
ori.w &ovfinx_mask,2+USER_FPSR(%a6) # set ovfl/aovfl/ainex/inex2
fout_dbl_ovfl_cont:
mov.l %a0,-(%sp)
# call ovf_res() w/ dbl prec and the correct rnd mode to create the default
# overflow result. DON'T save the returned ccodes from ovf_res() since
# fmove out doesn't alter them.
tst.b SRC_EX(%a0) # is operand negative?
smi %d1 # set if so
mov.l L_SCR3(%a6),%d0 # pass: dbl prec,rnd mode
bsr.l ovf_res # calc OVFL result
fmovm.x (%a0),&0x80 # load default overflow result
fmov.d %fp0,L_SCR1(%a6) # store to double
mov.l EXC_EA(%a6),%a1 # pass: dst addr
lea L_SCR1(%a6),%a0 # pass: src addr
movq.l &0x8,%d0 # pass: opsize is 8 bytes
bsr.l _dmem_write # store dbl fop to memory
tst.l %d1 # did dstore fail?
bne.l facc_out_d # yes
mov.b FPCR_ENABLE(%a6),%d1
andi.b &0x0a,%d1 # is UNFL or INEX enabled?
bne.w fout_sd_exc_ovfl # yes
addq.l &0x4,%sp
rts
#
# move out MAY overflow:
# (1) force the exp to 0x3fff
# (2) do a move w/ appropriate rnd mode
# (3) if exp still equals zero, then insert original exponent
# for the correct result.
# if exp now equals one, then it overflowed so call ovf_res.
#
fout_dbl_may_ovfl:
mov.w SRC_EX(%a0),%d1 # fetch current sign
andi.w &0x8000,%d1 # keep it,clear exp
ori.w &0x3fff,%d1 # insert exp = 0
mov.w %d1,FP_SCR0_EX(%a6) # insert scaled exp
mov.l SRC_HI(%a0),FP_SCR0_HI(%a6) # copy hi(man)
mov.l SRC_LO(%a0),FP_SCR0_LO(%a6) # copy lo(man)
fmov.l L_SCR3(%a6),%fpcr # set FPCR
fmov.x FP_SCR0(%a6),%fp0 # force fop to be rounded
fmov.l &0x0,%fpcr # clear FPCR
fabs.x %fp0 # need absolute value
fcmp.b %fp0,&0x2 # did exponent increase?
fblt.w fout_dbl_exg # no; go finish NORM
bra.w fout_dbl_ovfl # yes; go handle overflow
#########################################################################
# XDEF **************************************************************** #
# dst_dbl(): create double precision value from extended prec. #
# #
# XREF **************************************************************** #
# None #
# #
# INPUT *************************************************************** #
# a0 = pointer to source operand in extended precision #
# #
# OUTPUT ************************************************************** #
# d0 = hi(double precision result) #
# d1 = lo(double precision result) #
# #
# ALGORITHM *********************************************************** #
# #
# Changes extended precision to double precision. #
# Note: no attempt is made to round the extended value to double. #
# dbl_sign = ext_sign #
# dbl_exp = ext_exp - $3fff(ext bias) + $7ff(dbl bias) #
# get rid of ext integer bit #
# dbl_mant = ext_mant{62:12} #
# #
# --------------- --------------- --------------- #
# extended -> |s| exp | |1| ms mant | | ls mant | #
# --------------- --------------- --------------- #
# 95 64 63 62 32 31 11 0 #
# | | #
# | | #
# | | #
# v v #
# --------------- --------------- #
# double -> |s|exp| mant | | mant | #
# --------------- --------------- #
# 63 51 32 31 0 #
# #
#########################################################################
dst_dbl:
clr.l %d0 # clear d0
mov.w FTEMP_EX(%a0),%d0 # get exponent
subi.w &EXT_BIAS,%d0 # subtract extended precision bias
addi.w &DBL_BIAS,%d0 # add double precision bias
tst.b FTEMP_HI(%a0) # is number a denorm?
bmi.b dst_get_dupper # no
subq.w &0x1,%d0 # yes; denorm bias = DBL_BIAS - 1
dst_get_dupper:
swap %d0 # d0 now in upper word
lsl.l &0x4,%d0 # d0 in proper place for dbl prec exp
tst.b FTEMP_EX(%a0) # test sign
bpl.b dst_get_dman # if positive, go process mantissa
bset &0x1f,%d0 # if negative, set sign
dst_get_dman:
mov.l FTEMP_HI(%a0),%d1 # get ms mantissa
bfextu %d1{&1:&20},%d1 # get upper 20 bits of ms
or.l %d1,%d0 # put these bits in ms word of double
mov.l %d0,L_SCR1(%a6) # put the new exp back on the stack
mov.l FTEMP_HI(%a0),%d1 # get ms mantissa
mov.l &21,%d0 # load shift count
lsl.l %d0,%d1 # put lower 11 bits in upper bits
mov.l %d1,L_SCR2(%a6) # build lower lword in memory
mov.l FTEMP_LO(%a0),%d1 # get ls mantissa
bfextu %d1{&0:&21},%d0 # get ls 21 bits of double
mov.l L_SCR2(%a6),%d1
or.l %d0,%d1 # put them in double result
mov.l L_SCR1(%a6),%d0
rts
#########################################################################
# XDEF **************************************************************** #
# dst_sgl(): create single precision value from extended prec #
# #
# XREF **************************************************************** #
# #
# INPUT *************************************************************** #
# a0 = pointer to source operand in extended precision #
# #
# OUTPUT ************************************************************** #
# d0 = single precision result #
# #
# ALGORITHM *********************************************************** #
# #
# Changes extended precision to single precision. #
# sgl_sign = ext_sign #
# sgl_exp = ext_exp - $3fff(ext bias) + $7f(sgl bias) #
# get rid of ext integer bit #
# sgl_mant = ext_mant{62:12} #
# #
# --------------- --------------- --------------- #
# extended -> |s| exp | |1| ms mant | | ls mant | #
# --------------- --------------- --------------- #
# 95 64 63 62 40 32 31 12 0 #
# | | #
# | | #
# | | #
# v v #
# --------------- #
# single -> |s|exp| mant | #
# --------------- #
# 31 22 0 #
# #
#########################################################################
dst_sgl:
clr.l %d0
mov.w FTEMP_EX(%a0),%d0 # get exponent
subi.w &EXT_BIAS,%d0 # subtract extended precision bias
addi.w &SGL_BIAS,%d0 # add single precision bias
tst.b FTEMP_HI(%a0) # is number a denorm?
bmi.b dst_get_supper # no
subq.w &0x1,%d0 # yes; denorm bias = SGL_BIAS - 1
dst_get_supper:
swap %d0 # put exp in upper word of d0
lsl.l &0x7,%d0 # shift it into single exp bits
tst.b FTEMP_EX(%a0) # test sign
bpl.b dst_get_sman # if positive, continue
bset &0x1f,%d0 # if negative, put in sign first
dst_get_sman:
mov.l FTEMP_HI(%a0),%d1 # get ms mantissa
andi.l &0x7fffff00,%d1 # get upper 23 bits of ms
lsr.l &0x8,%d1 # and put them flush right
or.l %d1,%d0 # put these bits in ms word of single
rts
##############################################################################
fout_pack:
bsr.l _calc_ea_fout # fetch the <ea>
mov.l %a0,-(%sp)
mov.b STAG(%a6),%d0 # fetch input type
bne.w fout_pack_not_norm # input is not NORM
fout_pack_norm:
btst &0x4,EXC_CMDREG(%a6) # static or dynamic?
beq.b fout_pack_s # static
fout_pack_d:
mov.b 1+EXC_CMDREG(%a6),%d1 # fetch dynamic reg
lsr.b &0x4,%d1
andi.w &0x7,%d1
bsr.l fetch_dreg # fetch Dn w/ k-factor
bra.b fout_pack_type
fout_pack_s:
mov.b 1+EXC_CMDREG(%a6),%d0 # fetch static field
fout_pack_type:
bfexts %d0{&25:&7},%d0 # extract k-factor
mov.l %d0,-(%sp)
lea FP_SRC(%a6),%a0 # pass: ptr to input
# bindec is currently scrambling FP_SRC for denorm inputs.
# we'll have to change this, but for now, tough luck!!!
bsr.l bindec # convert xprec to packed
# andi.l &0xcfff000f,FP_SCR0(%a6) # clear unused fields
andi.l &0xcffff00f,FP_SCR0(%a6) # clear unused fields
mov.l (%sp)+,%d0
tst.b 3+FP_SCR0_EX(%a6)
bne.b fout_pack_set
tst.l FP_SCR0_HI(%a6)
bne.b fout_pack_set
tst.l FP_SCR0_LO(%a6)
bne.b fout_pack_set
# add the extra condition that only if the k-factor was zero, too, should
# we zero the exponent
tst.l %d0
bne.b fout_pack_set
# "mantissa" is all zero which means that the answer is zero. but, the '040
# algorithm allows the exponent to be non-zero. the 881/2 do not. Therefore,
# if the mantissa is zero, I will zero the exponent, too.
# the question now is whether the exponents sign bit is allowed to be non-zero
# for a zero, also...
andi.w &0xf000,FP_SCR0(%a6)
fout_pack_set:
lea FP_SCR0(%a6),%a0 # pass: src addr
fout_pack_write:
mov.l (%sp)+,%a1 # pass: dst addr
mov.l &0xc,%d0 # pass: opsize is 12 bytes
cmpi.b SPCOND_FLG(%a6),&mda7_flg
beq.b fout_pack_a7
bsr.l _dmem_write # write ext prec number to memory
tst.l %d1 # did dstore fail?
bne.w fout_ext_err # yes
rts
# we don't want to do the write if the exception occurred in supervisor mode
# so _mem_write2() handles this for us.
fout_pack_a7:
bsr.l _mem_write2 # write ext prec number to memory
tst.l %d1 # did dstore fail?
bne.w fout_ext_err # yes
rts
fout_pack_not_norm:
cmpi.b %d0,&DENORM # is it a DENORM?
beq.w fout_pack_norm # yes
lea FP_SRC(%a6),%a0
clr.w 2+FP_SRC_EX(%a6)
cmpi.b %d0,&SNAN # is it an SNAN?
beq.b fout_pack_snan # yes
bra.b fout_pack_write # no
fout_pack_snan:
ori.w &snaniop2_mask,FPSR_EXCEPT(%a6) # set SNAN/AIOP
bset &0x6,FP_SRC_HI(%a6) # set snan bit
bra.b fout_pack_write
#########################################################################
# XDEF **************************************************************** #
# fmul(): emulates the fmul instruction #
# fsmul(): emulates the fsmul instruction #
# fdmul(): emulates the fdmul instruction #
# #
# XREF **************************************************************** #
# scale_to_zero_src() - scale src exponent to zero #
# scale_to_zero_dst() - scale dst exponent to zero #
# unf_res() - return default underflow result #
# ovf_res() - return default overflow result #
# res_qnan() - return QNAN result #
# res_snan() - return SNAN result #
# #
# INPUT *************************************************************** #
# a0 = pointer to extended precision source operand #
# a1 = pointer to extended precision destination operand #
# d0 rnd prec,mode #
# #
# OUTPUT ************************************************************** #
# fp0 = result #
# fp1 = EXOP (if exception occurred) #
# #
# ALGORITHM *********************************************************** #
# Handle NANs, infinities, and zeroes as special cases. Divide #
# norms/denorms into ext/sgl/dbl precision. #
# For norms/denorms, scale the exponents such that a multiply #
# instruction won't cause an exception. Use the regular fmul to #
# compute a result. Check if the regular operands would have taken #
# an exception. If so, return the default overflow/underflow result #
# and return the EXOP if exceptions are enabled. Else, scale the #
# result operand to the proper exponent. #
# #
#########################################################################
align 0x10
tbl_fmul_ovfl:
long 0x3fff - 0x7ffe # ext_max
long 0x3fff - 0x407e # sgl_max
long 0x3fff - 0x43fe # dbl_max
tbl_fmul_unfl:
long 0x3fff + 0x0001 # ext_unfl
long 0x3fff - 0x3f80 # sgl_unfl
long 0x3fff - 0x3c00 # dbl_unfl
global fsmul
fsmul:
andi.b &0x30,%d0 # clear rnd prec
ori.b &s_mode*0x10,%d0 # insert sgl prec
bra.b fmul
global fdmul
fdmul:
andi.b &0x30,%d0
ori.b &d_mode*0x10,%d0 # insert dbl prec
global fmul
fmul:
mov.l %d0,L_SCR3(%a6) # store rnd info
clr.w %d1
mov.b DTAG(%a6),%d1
lsl.b &0x3,%d1
or.b STAG(%a6),%d1 # combine src tags
bne.w fmul_not_norm # optimize on non-norm input
fmul_norm:
mov.w DST_EX(%a1),FP_SCR1_EX(%a6)
mov.l DST_HI(%a1),FP_SCR1_HI(%a6)
mov.l DST_LO(%a1),FP_SCR1_LO(%a6)
mov.w SRC_EX(%a0),FP_SCR0_EX(%a6)
mov.l SRC_HI(%a0),FP_SCR0_HI(%a6)
mov.l SRC_LO(%a0),FP_SCR0_LO(%a6)
bsr.l scale_to_zero_src # scale src exponent
mov.l %d0,-(%sp) # save scale factor 1
bsr.l scale_to_zero_dst # scale dst exponent
add.l %d0,(%sp) # SCALE_FACTOR = scale1 + scale2
mov.w 2+L_SCR3(%a6),%d1 # fetch precision
lsr.b &0x6,%d1 # shift to lo bits
mov.l (%sp)+,%d0 # load S.F.
cmp.l %d0,(tbl_fmul_ovfl.w,%pc,%d1.w*4) # would result ovfl?
beq.w fmul_may_ovfl # result may rnd to overflow
blt.w fmul_ovfl # result will overflow
cmp.l %d0,(tbl_fmul_unfl.w,%pc,%d1.w*4) # would result unfl?
beq.w fmul_may_unfl # result may rnd to no unfl
bgt.w fmul_unfl # result will underflow
#
# NORMAL:
# - the result of the multiply operation will neither overflow nor underflow.
# - do the multiply to the proper precision and rounding mode.
# - scale the result exponent using the scale factor. if both operands were
# normalized then we really don't need to go through this scaling. but for now,
# this will do.
#
fmul_normal:
fmovm.x FP_SCR1(%a6),&0x80 # load dst operand
fmov.l L_SCR3(%a6),%fpcr # set FPCR
fmov.l &0x0,%fpsr # clear FPSR
fmul.x FP_SCR0(%a6),%fp0 # execute multiply
fmov.l %fpsr,%d1 # save status
fmov.l &0x0,%fpcr # clear FPCR
or.l %d1,USER_FPSR(%a6) # save INEX2,N
fmul_normal_exit:
fmovm.x &0x80,FP_SCR0(%a6) # store out result
mov.l %d2,-(%sp) # save d2
mov.w FP_SCR0_EX(%a6),%d1 # load {sgn,exp}
mov.l %d1,%d2 # make a copy
andi.l &0x7fff,%d1 # strip sign
andi.w &0x8000,%d2 # keep old sign
sub.l %d0,%d1 # add scale factor
or.w %d2,%d1 # concat old sign,new exp
mov.w %d1,FP_SCR0_EX(%a6) # insert new exponent
mov.l (%sp)+,%d2 # restore d2
fmovm.x FP_SCR0(%a6),&0x80 # return default result in fp0
rts
#
# OVERFLOW:
# - the result of the multiply operation is an overflow.
# - do the multiply to the proper precision and rounding mode in order to
# set the inexact bits.
# - calculate the default result and return it in fp0.
# - if overflow or inexact is enabled, we need a multiply result rounded to
# extended precision. if the original operation was extended, then we have this
# result. if the original operation was single or double, we have to do another
# multiply using extended precision and the correct rounding mode. the result
# of this operation then has its exponent scaled by -0x6000 to create the
# exceptional operand.
#
fmul_ovfl:
fmovm.x FP_SCR1(%a6),&0x80 # load dst operand
fmov.l L_SCR3(%a6),%fpcr # set FPCR
fmov.l &0x0,%fpsr # clear FPSR
fmul.x FP_SCR0(%a6),%fp0 # execute multiply
fmov.l %fpsr,%d1 # save status
fmov.l &0x0,%fpcr # clear FPCR
or.l %d1,USER_FPSR(%a6) # save INEX2,N
# save setting this until now because this is where fmul_may_ovfl may jump in
fmul_ovfl_tst:
or.l &ovfl_inx_mask,USER_FPSR(%a6) # set ovfl/aovfl/ainex
mov.b FPCR_ENABLE(%a6),%d1
andi.b &0x13,%d1 # is OVFL or INEX enabled?
bne.b fmul_ovfl_ena # yes
# calculate the default result
fmul_ovfl_dis:
btst &neg_bit,FPSR_CC(%a6) # is result negative?
sne %d1 # set sign param accordingly
mov.l L_SCR3(%a6),%d0 # pass rnd prec,mode
bsr.l ovf_res # calculate default result
or.b %d0,FPSR_CC(%a6) # set INF,N if applicable
fmovm.x (%a0),&0x80 # return default result in fp0
rts
#
# OVFL is enabled; Create EXOP:
# - if precision is extended, then we have the EXOP. simply bias the exponent
# with an extra -0x6000. if the precision is single or double, we need to
# calculate a result rounded to extended precision.
#
fmul_ovfl_ena:
mov.l L_SCR3(%a6),%d1
andi.b &0xc0,%d1 # test the rnd prec
bne.b fmul_ovfl_ena_sd # it's sgl or dbl
fmul_ovfl_ena_cont:
fmovm.x &0x80,FP_SCR0(%a6) # move result to stack
mov.l %d2,-(%sp) # save d2
mov.w FP_SCR0_EX(%a6),%d1 # fetch {sgn,exp}
mov.w %d1,%d2 # make a copy
andi.l &0x7fff,%d1 # strip sign
sub.l %d0,%d1 # add scale factor
subi.l &0x6000,%d1 # subtract bias
andi.w &0x7fff,%d1 # clear sign bit
andi.w &0x8000,%d2 # keep old sign
or.w %d2,%d1 # concat old sign,new exp
mov.w %d1,FP_SCR0_EX(%a6) # insert new exponent
mov.l (%sp)+,%d2 # restore d2
fmovm.x FP_SCR0(%a6),&0x40 # return EXOP in fp1
bra.b fmul_ovfl_dis
fmul_ovfl_ena_sd:
fmovm.x FP_SCR1(%a6),&0x80 # load dst operand
mov.l L_SCR3(%a6),%d1
andi.b &0x30,%d1 # keep rnd mode only
fmov.l %d1,%fpcr # set FPCR
fmul.x FP_SCR0(%a6),%fp0 # execute multiply
fmov.l &0x0,%fpcr # clear FPCR
bra.b fmul_ovfl_ena_cont
#
# may OVERFLOW:
# - the result of the multiply operation MAY overflow.
# - do the multiply to the proper precision and rounding mode in order to
# set the inexact bits.
# - calculate the default result and return it in fp0.
#
fmul_may_ovfl:
fmovm.x FP_SCR1(%a6),&0x80 # load dst op
fmov.l L_SCR3(%a6),%fpcr # set FPCR
fmov.l &0x0,%fpsr # clear FPSR
fmul.x FP_SCR0(%a6),%fp0 # execute multiply
fmov.l %fpsr,%d1 # save status
fmov.l &0x0,%fpcr # clear FPCR
or.l %d1,USER_FPSR(%a6) # save INEX2,N
fabs.x %fp0,%fp1 # make a copy of result
fcmp.b %fp1,&0x2 # is |result| >= 2.b?
fbge.w fmul_ovfl_tst # yes; overflow has occurred
# no, it didn't overflow; we have correct result
bra.w fmul_normal_exit
#
# UNDERFLOW:
# - the result of the multiply operation is an underflow.
# - do the multiply to the proper precision and rounding mode in order to
# set the inexact bits.
# - calculate the default result and return it in fp0.
# - if overflow or inexact is enabled, we need a multiply result rounded to
# extended precision. if the original operation was extended, then we have this
# result. if the original operation was single or double, we have to do another
# multiply using extended precision and the correct rounding mode. the result
# of this operation then has its exponent scaled by -0x6000 to create the
# exceptional operand.
#
fmul_unfl:
bset &unfl_bit,FPSR_EXCEPT(%a6) # set unfl exc bit
# for fun, let's use only extended precision, round to zero. then, let
# the unf_res() routine figure out all the rest.
# will we get the correct answer.
fmovm.x FP_SCR1(%a6),&0x80 # load dst operand
fmov.l &rz_mode*0x10,%fpcr # set FPCR
fmov.l &0x0,%fpsr # clear FPSR
fmul.x FP_SCR0(%a6),%fp0 # execute multiply
fmov.l %fpsr,%d1 # save status
fmov.l &0x0,%fpcr # clear FPCR
or.l %d1,USER_FPSR(%a6) # save INEX2,N
mov.b FPCR_ENABLE(%a6),%d1
andi.b &0x0b,%d1 # is UNFL or INEX enabled?
bne.b fmul_unfl_ena # yes
fmul_unfl_dis:
fmovm.x &0x80,FP_SCR0(%a6) # store out result
lea FP_SCR0(%a6),%a0 # pass: result addr
mov.l L_SCR3(%a6),%d1 # pass: rnd prec,mode
bsr.l unf_res # calculate default result
or.b %d0,FPSR_CC(%a6) # unf_res2 may have set 'Z'
fmovm.x FP_SCR0(%a6),&0x80 # return default result in fp0
rts
#
# UNFL is enabled.
#
fmul_unfl_ena:
fmovm.x FP_SCR1(%a6),&0x40 # load dst op
mov.l L_SCR3(%a6),%d1
andi.b &0xc0,%d1 # is precision extended?
bne.b fmul_unfl_ena_sd # no, sgl or dbl
# if the rnd mode is anything but RZ, then we have to re-do the above
# multiplication because we used RZ for all.
fmov.l L_SCR3(%a6),%fpcr # set FPCR
fmul_unfl_ena_cont:
fmov.l &0x0,%fpsr # clear FPSR
fmul.x FP_SCR0(%a6),%fp1 # execute multiply
fmov.l &0x0,%fpcr # clear FPCR
fmovm.x &0x40,FP_SCR0(%a6) # save result to stack
mov.l %d2,-(%sp) # save d2
mov.w FP_SCR0_EX(%a6),%d1 # fetch {sgn,exp}
mov.l %d1,%d2 # make a copy
andi.l &0x7fff,%d1 # strip sign
andi.w &0x8000,%d2 # keep old sign
sub.l %d0,%d1 # add scale factor
addi.l &0x6000,%d1 # add bias
andi.w &0x7fff,%d1
or.w %d2,%d1 # concat old sign,new exp
mov.w %d1,FP_SCR0_EX(%a6) # insert new exponent
mov.l (%sp)+,%d2 # restore d2
fmovm.x FP_SCR0(%a6),&0x40 # return EXOP in fp1
bra.w fmul_unfl_dis
fmul_unfl_ena_sd:
mov.l L_SCR3(%a6),%d1
andi.b &0x30,%d1 # use only rnd mode
fmov.l %d1,%fpcr # set FPCR
bra.b fmul_unfl_ena_cont
# MAY UNDERFLOW:
# -use the correct rounding mode and precision. this code favors operations
# that do not underflow.
fmul_may_unfl:
fmovm.x FP_SCR1(%a6),&0x80 # load dst operand
fmov.l L_SCR3(%a6),%fpcr # set FPCR
fmov.l &0x0,%fpsr # clear FPSR
fmul.x FP_SCR0(%a6),%fp0 # execute multiply
fmov.l %fpsr,%d1 # save status
fmov.l &0x0,%fpcr # clear FPCR
or.l %d1,USER_FPSR(%a6) # save INEX2,N
fabs.x %fp0,%fp1 # make a copy of result
fcmp.b %fp1,&0x2 # is |result| > 2.b?
fbgt.w fmul_normal_exit # no; no underflow occurred
fblt.w fmul_unfl # yes; underflow occurred
#
# we still don't know if underflow occurred. result is ~ equal to 2. but,
# we don't know if the result was an underflow that rounded up to a 2 or
# a normalized number that rounded down to a 2. so, redo the entire operation
# using RZ as the rounding mode to see what the pre-rounded result is.
# this case should be relatively rare.
#
fmovm.x FP_SCR1(%a6),&0x40 # load dst operand
mov.l L_SCR3(%a6),%d1
andi.b &0xc0,%d1 # keep rnd prec
ori.b &rz_mode*0x10,%d1 # insert RZ
fmov.l %d1,%fpcr # set FPCR
fmov.l &0x0,%fpsr # clear FPSR
fmul.x FP_SCR0(%a6),%fp1 # execute multiply
fmov.l &0x0,%fpcr # clear FPCR
fabs.x %fp1 # make absolute value
fcmp.b %fp1,&0x2 # is |result| < 2.b?
fbge.w fmul_normal_exit # no; no underflow occurred
bra.w fmul_unfl # yes, underflow occurred
################################################################################
#
# Multiply: inputs are not both normalized; what are they?
#
fmul_not_norm:
mov.w (tbl_fmul_op.b,%pc,%d1.w*2),%d1
jmp (tbl_fmul_op.b,%pc,%d1.w)
swbeg &48
tbl_fmul_op:
short fmul_norm - tbl_fmul_op # NORM x NORM
short fmul_zero - tbl_fmul_op # NORM x ZERO
short fmul_inf_src - tbl_fmul_op # NORM x INF
short fmul_res_qnan - tbl_fmul_op # NORM x QNAN
short fmul_norm - tbl_fmul_op # NORM x DENORM
short fmul_res_snan - tbl_fmul_op # NORM x SNAN
short tbl_fmul_op - tbl_fmul_op #
short tbl_fmul_op - tbl_fmul_op #
short fmul_zero - tbl_fmul_op # ZERO x NORM
short fmul_zero - tbl_fmul_op # ZERO x ZERO
short fmul_res_operr - tbl_fmul_op # ZERO x INF
short fmul_res_qnan - tbl_fmul_op # ZERO x QNAN
short fmul_zero - tbl_fmul_op # ZERO x DENORM
short fmul_res_snan - tbl_fmul_op # ZERO x SNAN
short tbl_fmul_op - tbl_fmul_op #
short tbl_fmul_op - tbl_fmul_op #
short fmul_inf_dst - tbl_fmul_op # INF x NORM
short fmul_res_operr - tbl_fmul_op # INF x ZERO
short fmul_inf_dst - tbl_fmul_op # INF x INF
short fmul_res_qnan - tbl_fmul_op # INF x QNAN
short fmul_inf_dst - tbl_fmul_op # INF x DENORM
short fmul_res_snan - tbl_fmul_op # INF x SNAN
short tbl_fmul_op - tbl_fmul_op #
short tbl_fmul_op - tbl_fmul_op #
short fmul_res_qnan - tbl_fmul_op # QNAN x NORM
short fmul_res_qnan - tbl_fmul_op # QNAN x ZERO
short fmul_res_qnan - tbl_fmul_op # QNAN x INF
short fmul_res_qnan - tbl_fmul_op # QNAN x QNAN
short fmul_res_qnan - tbl_fmul_op # QNAN x DENORM
short fmul_res_snan - tbl_fmul_op # QNAN x SNAN
short tbl_fmul_op - tbl_fmul_op #
short tbl_fmul_op - tbl_fmul_op #
short fmul_norm - tbl_fmul_op # NORM x NORM
short fmul_zero - tbl_fmul_op # NORM x ZERO
short fmul_inf_src - tbl_fmul_op # NORM x INF
short fmul_res_qnan - tbl_fmul_op # NORM x QNAN
short fmul_norm - tbl_fmul_op # NORM x DENORM
short fmul_res_snan - tbl_fmul_op # NORM x SNAN
short tbl_fmul_op - tbl_fmul_op #
short tbl_fmul_op - tbl_fmul_op #
short fmul_res_snan - tbl_fmul_op # SNAN x NORM
short fmul_res_snan - tbl_fmul_op # SNAN x ZERO
short fmul_res_snan - tbl_fmul_op # SNAN x INF
short fmul_res_snan - tbl_fmul_op # SNAN x QNAN
short fmul_res_snan - tbl_fmul_op # SNAN x DENORM
short fmul_res_snan - tbl_fmul_op # SNAN x SNAN
short tbl_fmul_op - tbl_fmul_op #
short tbl_fmul_op - tbl_fmul_op #
fmul_res_operr:
bra.l res_operr
fmul_res_snan:
bra.l res_snan
fmul_res_qnan:
bra.l res_qnan
#
# Multiply: (Zero x Zero) || (Zero x norm) || (Zero x denorm)
#
global fmul_zero # global for fsglmul
fmul_zero:
mov.b SRC_EX(%a0),%d0 # exclusive or the signs
mov.b DST_EX(%a1),%d1
eor.b %d0,%d1
bpl.b fmul_zero_p # result ZERO is pos.
fmul_zero_n:
fmov.s &0x80000000,%fp0 # load -ZERO
mov.b &z_bmask+neg_bmask,FPSR_CC(%a6) # set Z/N
rts
fmul_zero_p:
fmov.s &0x00000000,%fp0 # load +ZERO
mov.b &z_bmask,FPSR_CC(%a6) # set Z
rts
#
# Multiply: (inf x inf) || (inf x norm) || (inf x denorm)
#
# Note: The j-bit for an infinity is a don't-care. However, to be
# strictly compatible w/ the 68881/882, we make sure to return an
# INF w/ the j-bit set if the input INF j-bit was set. Destination
# INFs take priority.
#
global fmul_inf_dst # global for fsglmul
fmul_inf_dst:
fmovm.x DST(%a1),&0x80 # return INF result in fp0
mov.b SRC_EX(%a0),%d0 # exclusive or the signs
mov.b DST_EX(%a1),%d1
eor.b %d0,%d1
bpl.b fmul_inf_dst_p # result INF is pos.
fmul_inf_dst_n:
fabs.x %fp0 # clear result sign
fneg.x %fp0 # set result sign
mov.b &inf_bmask+neg_bmask,FPSR_CC(%a6) # set INF/N
rts
fmul_inf_dst_p:
fabs.x %fp0 # clear result sign
mov.b &inf_bmask,FPSR_CC(%a6) # set INF
rts
global fmul_inf_src # global for fsglmul
fmul_inf_src:
fmovm.x SRC(%a0),&0x80 # return INF result in fp0
mov.b SRC_EX(%a0),%d0 # exclusive or the signs
mov.b DST_EX(%a1),%d1
eor.b %d0,%d1
bpl.b fmul_inf_dst_p # result INF is pos.
bra.b fmul_inf_dst_n
#########################################################################
# XDEF **************************************************************** #
# fin(): emulates the fmove instruction #
# fsin(): emulates the fsmove instruction #
# fdin(): emulates the fdmove instruction #
# #
# XREF **************************************************************** #
# norm() - normalize mantissa for EXOP on denorm #
# scale_to_zero_src() - scale src exponent to zero #
# ovf_res() - return default overflow result #
# unf_res() - return default underflow result #
# res_qnan_1op() - return QNAN result #
# res_snan_1op() - return SNAN result #
# #
# INPUT *************************************************************** #
# a0 = pointer to extended precision source operand #
# d0 = round prec/mode #
# #
# OUTPUT ************************************************************** #
# fp0 = result #
# fp1 = EXOP (if exception occurred) #
# #
# ALGORITHM *********************************************************** #
# Handle NANs, infinities, and zeroes as special cases. Divide #
# norms into extended, single, and double precision. #
# Norms can be emulated w/ a regular fmove instruction. For #
# sgl/dbl, must scale exponent and perform an "fmove". Check to see #
# if the result would have overflowed/underflowed. If so, use unf_res() #
# or ovf_res() to return the default result. Also return EXOP if #
# exception is enabled. If no exception, return the default result. #
# Unnorms don't pass through here. #
# #
#########################################################################
global fsin
fsin:
andi.b &0x30,%d0 # clear rnd prec
ori.b &s_mode*0x10,%d0 # insert sgl precision
bra.b fin
global fdin
fdin:
andi.b &0x30,%d0 # clear rnd prec
ori.b &d_mode*0x10,%d0 # insert dbl precision
global fin
fin:
mov.l %d0,L_SCR3(%a6) # store rnd info
mov.b STAG(%a6),%d1 # fetch src optype tag
bne.w fin_not_norm # optimize on non-norm input
#
# FP MOVE IN: NORMs and DENORMs ONLY!
#
fin_norm:
andi.b &0xc0,%d0 # is precision extended?
bne.w fin_not_ext # no, so go handle dbl or sgl
#
# precision selected is extended. so...we cannot get an underflow
# or overflow because of rounding to the correct precision. so...
# skip the scaling and unscaling...
#
tst.b SRC_EX(%a0) # is the operand negative?
bpl.b fin_norm_done # no
bset &neg_bit,FPSR_CC(%a6) # yes, so set 'N' ccode bit
fin_norm_done:
fmovm.x SRC(%a0),&0x80 # return result in fp0
rts
#
# for an extended precision DENORM, the UNFL exception bit is set
# the accrued bit is NOT set in this instance(no inexactness!)
#
fin_denorm:
andi.b &0xc0,%d0 # is precision extended?
bne.w fin_not_ext # no, so go handle dbl or sgl
bset &unfl_bit,FPSR_EXCEPT(%a6) # set unfl exc bit
tst.b SRC_EX(%a0) # is the operand negative?
bpl.b fin_denorm_done # no
bset &neg_bit,FPSR_CC(%a6) # yes, so set 'N' ccode bit
fin_denorm_done:
fmovm.x SRC(%a0),&0x80 # return result in fp0
btst &unfl_bit,FPCR_ENABLE(%a6) # is UNFL enabled?
bne.b fin_denorm_unfl_ena # yes
rts
#
# the input is an extended DENORM and underflow is enabled in the FPCR.
# normalize the mantissa and add the bias of 0x6000 to the resulting negative
# exponent and insert back into the operand.
#
fin_denorm_unfl_ena:
mov.w SRC_EX(%a0),FP_SCR0_EX(%a6)
mov.l SRC_HI(%a0),FP_SCR0_HI(%a6)
mov.l SRC_LO(%a0),FP_SCR0_LO(%a6)
lea FP_SCR0(%a6),%a0 # pass: ptr to operand
bsr.l norm # normalize result
neg.w %d0 # new exponent = -(shft val)
addi.w &0x6000,%d0 # add new bias to exponent
mov.w FP_SCR0_EX(%a6),%d1 # fetch old sign,exp
andi.w &0x8000,%d1 # keep old sign
andi.w &0x7fff,%d0 # clear sign position
or.w %d1,%d0 # concat new exo,old sign
mov.w %d0,FP_SCR0_EX(%a6) # insert new exponent
fmovm.x FP_SCR0(%a6),&0x40 # return EXOP in fp1
rts
#
# operand is to be rounded to single or double precision
#
fin_not_ext:
cmpi.b %d0,&s_mode*0x10 # separate sgl/dbl prec
bne.b fin_dbl
#
# operand is to be rounded to single precision
#
fin_sgl:
mov.w SRC_EX(%a0),FP_SCR0_EX(%a6)
mov.l SRC_HI(%a0),FP_SCR0_HI(%a6)
mov.l SRC_LO(%a0),FP_SCR0_LO(%a6)
bsr.l scale_to_zero_src # calculate scale factor
cmpi.l %d0,&0x3fff-0x3f80 # will move in underflow?
bge.w fin_sd_unfl # yes; go handle underflow
cmpi.l %d0,&0x3fff-0x407e # will move in overflow?
beq.w fin_sd_may_ovfl # maybe; go check
blt.w fin_sd_ovfl # yes; go handle overflow
#
# operand will NOT overflow or underflow when moved into the fp reg file
#
fin_sd_normal:
fmov.l &0x0,%fpsr # clear FPSR
fmov.l L_SCR3(%a6),%fpcr # set FPCR
fmov.x FP_SCR0(%a6),%fp0 # perform move
fmov.l %fpsr,%d1 # save FPSR
fmov.l &0x0,%fpcr # clear FPCR
or.l %d1,USER_FPSR(%a6) # save INEX2,N
fin_sd_normal_exit:
mov.l %d2,-(%sp) # save d2
fmovm.x &0x80,FP_SCR0(%a6) # store out result
mov.w FP_SCR0_EX(%a6),%d1 # load {sgn,exp}
mov.w %d1,%d2 # make a copy
andi.l &0x7fff,%d1 # strip sign
sub.l %d0,%d1 # add scale factor
andi.w &0x8000,%d2 # keep old sign
or.w %d1,%d2 # concat old sign,new exponent
mov.w %d2,FP_SCR0_EX(%a6) # insert new exponent
mov.l (%sp)+,%d2 # restore d2
fmovm.x FP_SCR0(%a6),&0x80 # return result in fp0
rts
#
# operand is to be rounded to double precision
#
fin_dbl:
mov.w SRC_EX(%a0),FP_SCR0_EX(%a6)
mov.l SRC_HI(%a0),FP_SCR0_HI(%a6)
mov.l SRC_LO(%a0),FP_SCR0_LO(%a6)
bsr.l scale_to_zero_src # calculate scale factor
cmpi.l %d0,&0x3fff-0x3c00 # will move in underflow?
bge.w fin_sd_unfl # yes; go handle underflow
cmpi.l %d0,&0x3fff-0x43fe # will move in overflow?
beq.w fin_sd_may_ovfl # maybe; go check
blt.w fin_sd_ovfl # yes; go handle overflow
bra.w fin_sd_normal # no; ho handle normalized op
#
# operand WILL underflow when moved in to the fp register file
#
fin_sd_unfl:
bset &unfl_bit,FPSR_EXCEPT(%a6) # set unfl exc bit
tst.b FP_SCR0_EX(%a6) # is operand negative?
bpl.b fin_sd_unfl_tst
bset &neg_bit,FPSR_CC(%a6) # set 'N' ccode bit
# if underflow or inexact is enabled, then go calculate the EXOP first.
fin_sd_unfl_tst:
mov.b FPCR_ENABLE(%a6),%d1
andi.b &0x0b,%d1 # is UNFL or INEX enabled?
bne.b fin_sd_unfl_ena # yes
fin_sd_unfl_dis:
lea FP_SCR0(%a6),%a0 # pass: result addr
mov.l L_SCR3(%a6),%d1 # pass: rnd prec,mode
bsr.l unf_res # calculate default result
or.b %d0,FPSR_CC(%a6) # unf_res may have set 'Z'
fmovm.x FP_SCR0(%a6),&0x80 # return default result in fp0
rts
#
# operand will underflow AND underflow or inexact is enabled.
# Therefore, we must return the result rounded to extended precision.
#
fin_sd_unfl_ena:
mov.l FP_SCR0_HI(%a6),FP_SCR1_HI(%a6)
mov.l FP_SCR0_LO(%a6),FP_SCR1_LO(%a6)
mov.w FP_SCR0_EX(%a6),%d1 # load current exponent
mov.l %d2,-(%sp) # save d2
mov.w %d1,%d2 # make a copy
andi.l &0x7fff,%d1 # strip sign
sub.l %d0,%d1 # subtract scale factor
andi.w &0x8000,%d2 # extract old sign
addi.l &0x6000,%d1 # add new bias
andi.w &0x7fff,%d1
or.w %d1,%d2 # concat old sign,new exp
mov.w %d2,FP_SCR1_EX(%a6) # insert new exponent
fmovm.x FP_SCR1(%a6),&0x40 # return EXOP in fp1
mov.l (%sp)+,%d2 # restore d2
bra.b fin_sd_unfl_dis
#
# operand WILL overflow.
#
fin_sd_ovfl:
fmov.l &0x0,%fpsr # clear FPSR
fmov.l L_SCR3(%a6),%fpcr # set FPCR
fmov.x FP_SCR0(%a6),%fp0 # perform move
fmov.l &0x0,%fpcr # clear FPCR
fmov.l %fpsr,%d1 # save FPSR
or.l %d1,USER_FPSR(%a6) # save INEX2,N
fin_sd_ovfl_tst:
or.l &ovfl_inx_mask,USER_FPSR(%a6) # set ovfl/aovfl/ainex
mov.b FPCR_ENABLE(%a6),%d1
andi.b &0x13,%d1 # is OVFL or INEX enabled?
bne.b fin_sd_ovfl_ena # yes
#
# OVFL is not enabled; therefore, we must create the default result by
# calling ovf_res().
#
fin_sd_ovfl_dis:
btst &neg_bit,FPSR_CC(%a6) # is result negative?
sne %d1 # set sign param accordingly
mov.l L_SCR3(%a6),%d0 # pass: prec,mode
bsr.l ovf_res # calculate default result
or.b %d0,FPSR_CC(%a6) # set INF,N if applicable
fmovm.x (%a0),&0x80 # return default result in fp0
rts
#
# OVFL is enabled.
# the INEX2 bit has already been updated by the round to the correct precision.
# now, round to extended(and don't alter the FPSR).
#
fin_sd_ovfl_ena:
mov.l %d2,-(%sp) # save d2
mov.w FP_SCR0_EX(%a6),%d1 # fetch {sgn,exp}
mov.l %d1,%d2 # make a copy
andi.l &0x7fff,%d1 # strip sign
andi.w &0x8000,%d2 # keep old sign
sub.l %d0,%d1 # add scale factor
sub.l &0x6000,%d1 # subtract bias
andi.w &0x7fff,%d1
or.w %d2,%d1
mov.w %d1,FP_SCR0_EX(%a6) # insert new exponent
mov.l (%sp)+,%d2 # restore d2
fmovm.x FP_SCR0(%a6),&0x40 # return EXOP in fp1
bra.b fin_sd_ovfl_dis
#
# the move in MAY overflow. so...
#
fin_sd_may_ovfl:
fmov.l &0x0,%fpsr # clear FPSR
fmov.l L_SCR3(%a6),%fpcr # set FPCR
fmov.x FP_SCR0(%a6),%fp0 # perform the move
fmov.l %fpsr,%d1 # save status
fmov.l &0x0,%fpcr # clear FPCR
or.l %d1,USER_FPSR(%a6) # save INEX2,N
fabs.x %fp0,%fp1 # make a copy of result
fcmp.b %fp1,&0x2 # is |result| >= 2.b?
fbge.w fin_sd_ovfl_tst # yes; overflow has occurred
# no, it didn't overflow; we have correct result
bra.w fin_sd_normal_exit
##########################################################################
#
# operand is not a NORM: check its optype and branch accordingly
#
fin_not_norm:
cmpi.b %d1,&DENORM # weed out DENORM
beq.w fin_denorm
cmpi.b %d1,&SNAN # weed out SNANs
beq.l res_snan_1op
cmpi.b %d1,&QNAN # weed out QNANs
beq.l res_qnan_1op
#
# do the fmove in; at this point, only possible ops are ZERO and INF.
# use fmov to determine ccodes.
# prec:mode should be zero at this point but it won't affect answer anyways.
#
fmov.x SRC(%a0),%fp0 # do fmove in
fmov.l %fpsr,%d0 # no exceptions possible
rol.l &0x8,%d0 # put ccodes in lo byte
mov.b %d0,FPSR_CC(%a6) # insert correct ccodes
rts
#########################################################################
# XDEF **************************************************************** #
# fdiv(): emulates the fdiv instruction #
# fsdiv(): emulates the fsdiv instruction #
# fddiv(): emulates the fddiv instruction #
# #
# XREF **************************************************************** #
# scale_to_zero_src() - scale src exponent to zero #
# scale_to_zero_dst() - scale dst exponent to zero #
# unf_res() - return default underflow result #
# ovf_res() - return default overflow result #
# res_qnan() - return QNAN result #
# res_snan() - return SNAN result #
# #
# INPUT *************************************************************** #
# a0 = pointer to extended precision source operand #
# a1 = pointer to extended precision destination operand #
# d0 rnd prec,mode #
# #
# OUTPUT ************************************************************** #
# fp0 = result #
# fp1 = EXOP (if exception occurred) #
# #
# ALGORITHM *********************************************************** #
# Handle NANs, infinities, and zeroes as special cases. Divide #
# norms/denorms into ext/sgl/dbl precision. #
# For norms/denorms, scale the exponents such that a divide #
# instruction won't cause an exception. Use the regular fdiv to #
# compute a result. Check if the regular operands would have taken #
# an exception. If so, return the default overflow/underflow result #
# and return the EXOP if exceptions are enabled. Else, scale the #
# result operand to the proper exponent. #
# #
#########################################################################
align 0x10
tbl_fdiv_unfl:
long 0x3fff - 0x0000 # ext_unfl
long 0x3fff - 0x3f81 # sgl_unfl
long 0x3fff - 0x3c01 # dbl_unfl
tbl_fdiv_ovfl:
long 0x3fff - 0x7ffe # ext overflow exponent
long 0x3fff - 0x407e # sgl overflow exponent
long 0x3fff - 0x43fe # dbl overflow exponent
global fsdiv
fsdiv:
andi.b &0x30,%d0 # clear rnd prec
ori.b &s_mode*0x10,%d0 # insert sgl prec
bra.b fdiv
global fddiv
fddiv:
andi.b &0x30,%d0 # clear rnd prec
ori.b &d_mode*0x10,%d0 # insert dbl prec
global fdiv
fdiv:
mov.l %d0,L_SCR3(%a6) # store rnd info
clr.w %d1
mov.b DTAG(%a6),%d1
lsl.b &0x3,%d1
or.b STAG(%a6),%d1 # combine src tags
bne.w fdiv_not_norm # optimize on non-norm input
#
# DIVIDE: NORMs and DENORMs ONLY!
#
fdiv_norm:
mov.w DST_EX(%a1),FP_SCR1_EX(%a6)
mov.l DST_HI(%a1),FP_SCR1_HI(%a6)
mov.l DST_LO(%a1),FP_SCR1_LO(%a6)
mov.w SRC_EX(%a0),FP_SCR0_EX(%a6)
mov.l SRC_HI(%a0),FP_SCR0_HI(%a6)
mov.l SRC_LO(%a0),FP_SCR0_LO(%a6)
bsr.l scale_to_zero_src # scale src exponent
mov.l %d0,-(%sp) # save scale factor 1
bsr.l scale_to_zero_dst # scale dst exponent
neg.l (%sp) # SCALE FACTOR = scale1 - scale2
add.l %d0,(%sp)
mov.w 2+L_SCR3(%a6),%d1 # fetch precision
lsr.b &0x6,%d1 # shift to lo bits
mov.l (%sp)+,%d0 # load S.F.
cmp.l %d0,(tbl_fdiv_ovfl.b,%pc,%d1.w*4) # will result overflow?
ble.w fdiv_may_ovfl # result will overflow
cmp.l %d0,(tbl_fdiv_unfl.w,%pc,%d1.w*4) # will result underflow?
beq.w fdiv_may_unfl # maybe
bgt.w fdiv_unfl # yes; go handle underflow
fdiv_normal:
fmovm.x FP_SCR1(%a6),&0x80 # load dst op
fmov.l L_SCR3(%a6),%fpcr # save FPCR
fmov.l &0x0,%fpsr # clear FPSR
fdiv.x FP_SCR0(%a6),%fp0 # perform divide
fmov.l %fpsr,%d1 # save FPSR
fmov.l &0x0,%fpcr # clear FPCR
or.l %d1,USER_FPSR(%a6) # save INEX2,N
fdiv_normal_exit:
fmovm.x &0x80,FP_SCR0(%a6) # store result on stack
mov.l %d2,-(%sp) # store d2
mov.w FP_SCR0_EX(%a6),%d1 # load {sgn,exp}
mov.l %d1,%d2 # make a copy
andi.l &0x7fff,%d1 # strip sign
andi.w &0x8000,%d2 # keep old sign
sub.l %d0,%d1 # add scale factor
or.w %d2,%d1 # concat old sign,new exp
mov.w %d1,FP_SCR0_EX(%a6) # insert new exponent
mov.l (%sp)+,%d2 # restore d2
fmovm.x FP_SCR0(%a6),&0x80 # return result in fp0
rts
tbl_fdiv_ovfl2:
long 0x7fff
long 0x407f
long 0x43ff
fdiv_no_ovfl:
mov.l (%sp)+,%d0 # restore scale factor
bra.b fdiv_normal_exit
fdiv_may_ovfl:
mov.l %d0,-(%sp) # save scale factor
fmovm.x FP_SCR1(%a6),&0x80 # load dst op
fmov.l L_SCR3(%a6),%fpcr # set FPCR
fmov.l &0x0,%fpsr # set FPSR
fdiv.x FP_SCR0(%a6),%fp0 # execute divide
fmov.l %fpsr,%d0
fmov.l &0x0,%fpcr
or.l %d0,USER_FPSR(%a6) # save INEX,N
fmovm.x &0x01,-(%sp) # save result to stack
mov.w (%sp),%d0 # fetch new exponent
add.l &0xc,%sp # clear result from stack
andi.l &0x7fff,%d0 # strip sign
sub.l (%sp),%d0 # add scale factor
cmp.l %d0,(tbl_fdiv_ovfl2.b,%pc,%d1.w*4)
blt.b fdiv_no_ovfl
mov.l (%sp)+,%d0
fdiv_ovfl_tst:
or.l &ovfl_inx_mask,USER_FPSR(%a6) # set ovfl/aovfl/ainex
mov.b FPCR_ENABLE(%a6),%d1
andi.b &0x13,%d1 # is OVFL or INEX enabled?
bne.b fdiv_ovfl_ena # yes
fdiv_ovfl_dis:
btst &neg_bit,FPSR_CC(%a6) # is result negative?
sne %d1 # set sign param accordingly
mov.l L_SCR3(%a6),%d0 # pass prec:rnd
bsr.l ovf_res # calculate default result
or.b %d0,FPSR_CC(%a6) # set INF if applicable
fmovm.x (%a0),&0x80 # return default result in fp0
rts
fdiv_ovfl_ena:
mov.l L_SCR3(%a6),%d1
andi.b &0xc0,%d1 # is precision extended?
bne.b fdiv_ovfl_ena_sd # no, do sgl or dbl
fdiv_ovfl_ena_cont:
fmovm.x &0x80,FP_SCR0(%a6) # move result to stack
mov.l %d2,-(%sp) # save d2
mov.w FP_SCR0_EX(%a6),%d1 # fetch {sgn,exp}
mov.w %d1,%d2 # make a copy
andi.l &0x7fff,%d1 # strip sign
sub.l %d0,%d1 # add scale factor
subi.l &0x6000,%d1 # subtract bias
andi.w &0x7fff,%d1 # clear sign bit
andi.w &0x8000,%d2 # keep old sign
or.w %d2,%d1 # concat old sign,new exp
mov.w %d1,FP_SCR0_EX(%a6) # insert new exponent
mov.l (%sp)+,%d2 # restore d2
fmovm.x FP_SCR0(%a6),&0x40 # return EXOP in fp1
bra.b fdiv_ovfl_dis
fdiv_ovfl_ena_sd:
fmovm.x FP_SCR1(%a6),&0x80 # load dst operand
mov.l L_SCR3(%a6),%d1
andi.b &0x30,%d1 # keep rnd mode
fmov.l %d1,%fpcr # set FPCR
fdiv.x FP_SCR0(%a6),%fp0 # execute divide
fmov.l &0x0,%fpcr # clear FPCR
bra.b fdiv_ovfl_ena_cont
fdiv_unfl:
bset &unfl_bit,FPSR_EXCEPT(%a6) # set unfl exc bit
fmovm.x FP_SCR1(%a6),&0x80 # load dst op
fmov.l &rz_mode*0x10,%fpcr # set FPCR
fmov.l &0x0,%fpsr # clear FPSR
fdiv.x FP_SCR0(%a6),%fp0 # execute divide
fmov.l %fpsr,%d1 # save status
fmov.l &0x0,%fpcr # clear FPCR
or.l %d1,USER_FPSR(%a6) # save INEX2,N
mov.b FPCR_ENABLE(%a6),%d1
andi.b &0x0b,%d1 # is UNFL or INEX enabled?
bne.b fdiv_unfl_ena # yes
fdiv_unfl_dis:
fmovm.x &0x80,FP_SCR0(%a6) # store out result
lea FP_SCR0(%a6),%a0 # pass: result addr
mov.l L_SCR3(%a6),%d1 # pass: rnd prec,mode
bsr.l unf_res # calculate default result
or.b %d0,FPSR_CC(%a6) # 'Z' may have been set
fmovm.x FP_SCR0(%a6),&0x80 # return default result in fp0
rts
#
# UNFL is enabled.
#
fdiv_unfl_ena:
fmovm.x FP_SCR1(%a6),&0x40 # load dst op
mov.l L_SCR3(%a6),%d1
andi.b &0xc0,%d1 # is precision extended?
bne.b fdiv_unfl_ena_sd # no, sgl or dbl
fmov.l L_SCR3(%a6),%fpcr # set FPCR
fdiv_unfl_ena_cont:
fmov.l &0x0,%fpsr # clear FPSR
fdiv.x FP_SCR0(%a6),%fp1 # execute divide
fmov.l &0x0,%fpcr # clear FPCR
fmovm.x &0x40,FP_SCR0(%a6) # save result to stack
mov.l %d2,-(%sp) # save d2
mov.w FP_SCR0_EX(%a6),%d1 # fetch {sgn,exp}
mov.l %d1,%d2 # make a copy
andi.l &0x7fff,%d1 # strip sign
andi.w &0x8000,%d2 # keep old sign
sub.l %d0,%d1 # add scale factoer
addi.l &0x6000,%d1 # add bias
andi.w &0x7fff,%d1
or.w %d2,%d1 # concat old sign,new exp
mov.w %d1,FP_SCR0_EX(%a6) # insert new exp
mov.l (%sp)+,%d2 # restore d2
fmovm.x FP_SCR0(%a6),&0x40 # return EXOP in fp1
bra.w fdiv_unfl_dis
fdiv_unfl_ena_sd:
mov.l L_SCR3(%a6),%d1
andi.b &0x30,%d1 # use only rnd mode
fmov.l %d1,%fpcr # set FPCR
bra.b fdiv_unfl_ena_cont
#
# the divide operation MAY underflow:
#
fdiv_may_unfl:
fmovm.x FP_SCR1(%a6),&0x80 # load dst op
fmov.l L_SCR3(%a6),%fpcr # set FPCR
fmov.l &0x0,%fpsr # clear FPSR
fdiv.x FP_SCR0(%a6),%fp0 # execute divide
fmov.l %fpsr,%d1 # save status
fmov.l &0x0,%fpcr # clear FPCR
or.l %d1,USER_FPSR(%a6) # save INEX2,N
fabs.x %fp0,%fp1 # make a copy of result
fcmp.b %fp1,&0x1 # is |result| > 1.b?
fbgt.w fdiv_normal_exit # no; no underflow occurred
fblt.w fdiv_unfl # yes; underflow occurred
#
# we still don't know if underflow occurred. result is ~ equal to 1. but,
# we don't know if the result was an underflow that rounded up to a 1
# or a normalized number that rounded down to a 1. so, redo the entire
# operation using RZ as the rounding mode to see what the pre-rounded
# result is. this case should be relatively rare.
#
fmovm.x FP_SCR1(%a6),&0x40 # load dst op into fp1
mov.l L_SCR3(%a6),%d1
andi.b &0xc0,%d1 # keep rnd prec
ori.b &rz_mode*0x10,%d1 # insert RZ
fmov.l %d1,%fpcr # set FPCR
fmov.l &0x0,%fpsr # clear FPSR
fdiv.x FP_SCR0(%a6),%fp1 # execute divide
fmov.l &0x0,%fpcr # clear FPCR
fabs.x %fp1 # make absolute value
fcmp.b %fp1,&0x1 # is |result| < 1.b?
fbge.w fdiv_normal_exit # no; no underflow occurred
bra.w fdiv_unfl # yes; underflow occurred
############################################################################
#
# Divide: inputs are not both normalized; what are they?
#
fdiv_not_norm:
mov.w (tbl_fdiv_op.b,%pc,%d1.w*2),%d1
jmp (tbl_fdiv_op.b,%pc,%d1.w*1)
swbeg &48
tbl_fdiv_op:
short fdiv_norm - tbl_fdiv_op # NORM / NORM
short fdiv_inf_load - tbl_fdiv_op # NORM / ZERO
short fdiv_zero_load - tbl_fdiv_op # NORM / INF
short fdiv_res_qnan - tbl_fdiv_op # NORM / QNAN
short fdiv_norm - tbl_fdiv_op # NORM / DENORM
short fdiv_res_snan - tbl_fdiv_op # NORM / SNAN
short tbl_fdiv_op - tbl_fdiv_op #
short tbl_fdiv_op - tbl_fdiv_op #
short fdiv_zero_load - tbl_fdiv_op # ZERO / NORM
short fdiv_res_operr - tbl_fdiv_op # ZERO / ZERO
short fdiv_zero_load - tbl_fdiv_op # ZERO / INF
short fdiv_res_qnan - tbl_fdiv_op # ZERO / QNAN
short fdiv_zero_load - tbl_fdiv_op # ZERO / DENORM
short fdiv_res_snan - tbl_fdiv_op # ZERO / SNAN
short tbl_fdiv_op - tbl_fdiv_op #
short tbl_fdiv_op - tbl_fdiv_op #
short fdiv_inf_dst - tbl_fdiv_op # INF / NORM
short fdiv_inf_dst - tbl_fdiv_op # INF / ZERO
short fdiv_res_operr - tbl_fdiv_op # INF / INF
short fdiv_res_qnan - tbl_fdiv_op # INF / QNAN
short fdiv_inf_dst - tbl_fdiv_op # INF / DENORM
short fdiv_res_snan - tbl_fdiv_op # INF / SNAN
short tbl_fdiv_op - tbl_fdiv_op #
short tbl_fdiv_op - tbl_fdiv_op #
short fdiv_res_qnan - tbl_fdiv_op # QNAN / NORM
short fdiv_res_qnan - tbl_fdiv_op # QNAN / ZERO
short fdiv_res_qnan - tbl_fdiv_op # QNAN / INF
short fdiv_res_qnan - tbl_fdiv_op # QNAN / QNAN
short fdiv_res_qnan - tbl_fdiv_op # QNAN / DENORM
short fdiv_res_snan - tbl_fdiv_op # QNAN / SNAN
short tbl_fdiv_op - tbl_fdiv_op #
short tbl_fdiv_op - tbl_fdiv_op #
short fdiv_norm - tbl_fdiv_op # DENORM / NORM
short fdiv_inf_load - tbl_fdiv_op # DENORM / ZERO
short fdiv_zero_load - tbl_fdiv_op # DENORM / INF
short fdiv_res_qnan - tbl_fdiv_op # DENORM / QNAN
short fdiv_norm - tbl_fdiv_op # DENORM / DENORM
short fdiv_res_snan - tbl_fdiv_op # DENORM / SNAN
short tbl_fdiv_op - tbl_fdiv_op #
short tbl_fdiv_op - tbl_fdiv_op #
short fdiv_res_snan - tbl_fdiv_op # SNAN / NORM
short fdiv_res_snan - tbl_fdiv_op # SNAN / ZERO
short fdiv_res_snan - tbl_fdiv_op # SNAN / INF
short fdiv_res_snan - tbl_fdiv_op # SNAN / QNAN
short fdiv_res_snan - tbl_fdiv_op # SNAN / DENORM
short fdiv_res_snan - tbl_fdiv_op # SNAN / SNAN
short tbl_fdiv_op - tbl_fdiv_op #
short tbl_fdiv_op - tbl_fdiv_op #
fdiv_res_qnan:
bra.l res_qnan
fdiv_res_snan:
bra.l res_snan
fdiv_res_operr:
bra.l res_operr
global fdiv_zero_load # global for fsgldiv
fdiv_zero_load:
mov.b SRC_EX(%a0),%d0 # result sign is exclusive
mov.b DST_EX(%a1),%d1 # or of input signs.
eor.b %d0,%d1
bpl.b fdiv_zero_load_p # result is positive
fmov.s &0x80000000,%fp0 # load a -ZERO
mov.b &z_bmask+neg_bmask,FPSR_CC(%a6) # set Z/N
rts
fdiv_zero_load_p:
fmov.s &0x00000000,%fp0 # load a +ZERO
mov.b &z_bmask,FPSR_CC(%a6) # set Z
rts
#
# The destination was In Range and the source was a ZERO. The result,
# Therefore, is an INF w/ the proper sign.
# So, determine the sign and return a new INF (w/ the j-bit cleared).
#
global fdiv_inf_load # global for fsgldiv
fdiv_inf_load:
ori.w &dz_mask+adz_mask,2+USER_FPSR(%a6) # no; set DZ/ADZ
mov.b SRC_EX(%a0),%d0 # load both signs
mov.b DST_EX(%a1),%d1
eor.b %d0,%d1
bpl.b fdiv_inf_load_p # result is positive
fmov.s &0xff800000,%fp0 # make result -INF
mov.b &inf_bmask+neg_bmask,FPSR_CC(%a6) # set INF/N
rts
fdiv_inf_load_p:
fmov.s &0x7f800000,%fp0 # make result +INF
mov.b &inf_bmask,FPSR_CC(%a6) # set INF
rts
#
# The destination was an INF w/ an In Range or ZERO source, the result is
# an INF w/ the proper sign.
# The 68881/882 returns the destination INF w/ the new sign(if the j-bit of the
# dst INF is set, then then j-bit of the result INF is also set).
#
global fdiv_inf_dst # global for fsgldiv
fdiv_inf_dst:
mov.b DST_EX(%a1),%d0 # load both signs
mov.b SRC_EX(%a0),%d1
eor.b %d0,%d1
bpl.b fdiv_inf_dst_p # result is positive
fmovm.x DST(%a1),&0x80 # return result in fp0
fabs.x %fp0 # clear sign bit
fneg.x %fp0 # set sign bit
mov.b &inf_bmask+neg_bmask,FPSR_CC(%a6) # set INF/NEG
rts
fdiv_inf_dst_p:
fmovm.x DST(%a1),&0x80 # return result in fp0
fabs.x %fp0 # return positive INF
mov.b &inf_bmask,FPSR_CC(%a6) # set INF
rts
#########################################################################
# XDEF **************************************************************** #
# fneg(): emulates the fneg instruction #
# fsneg(): emulates the fsneg instruction #
# fdneg(): emulates the fdneg instruction #
# #
# XREF **************************************************************** #
# norm() - normalize a denorm to provide EXOP #
# scale_to_zero_src() - scale sgl/dbl source exponent #
# ovf_res() - return default overflow result #
# unf_res() - return default underflow result #
# res_qnan_1op() - return QNAN result #
# res_snan_1op() - return SNAN result #
# #
# INPUT *************************************************************** #
# a0 = pointer to extended precision source operand #
# d0 = rnd prec,mode #
# #
# OUTPUT ************************************************************** #
# fp0 = result #
# fp1 = EXOP (if exception occurred) #
# #
# ALGORITHM *********************************************************** #
# Handle NANs, zeroes, and infinities as special cases. Separate #
# norms/denorms into ext/sgl/dbl precisions. Extended precision can be #
# emulated by simply setting sign bit. Sgl/dbl operands must be scaled #
# and an actual fneg performed to see if overflow/underflow would have #
# occurred. If so, return default underflow/overflow result. Else, #
# scale the result exponent and return result. FPSR gets set based on #
# the result value. #
# #
#########################################################################
global fsneg
fsneg:
andi.b &0x30,%d0 # clear rnd prec
ori.b &s_mode*0x10,%d0 # insert sgl precision
bra.b fneg
global fdneg
fdneg:
andi.b &0x30,%d0 # clear rnd prec
ori.b &d_mode*0x10,%d0 # insert dbl prec
global fneg
fneg:
mov.l %d0,L_SCR3(%a6) # store rnd info
mov.b STAG(%a6),%d1
bne.w fneg_not_norm # optimize on non-norm input
#
# NEGATE SIGN : norms and denorms ONLY!
#
fneg_norm:
andi.b &0xc0,%d0 # is precision extended?
bne.w fneg_not_ext # no; go handle sgl or dbl
#
# precision selected is extended. so...we can not get an underflow
# or overflow because of rounding to the correct precision. so...
# skip the scaling and unscaling...
#
mov.l SRC_HI(%a0),FP_SCR0_HI(%a6)
mov.l SRC_LO(%a0),FP_SCR0_LO(%a6)
mov.w SRC_EX(%a0),%d0
eori.w &0x8000,%d0 # negate sign
bpl.b fneg_norm_load # sign is positive
mov.b &neg_bmask,FPSR_CC(%a6) # set 'N' ccode bit
fneg_norm_load:
mov.w %d0,FP_SCR0_EX(%a6)
fmovm.x FP_SCR0(%a6),&0x80 # return result in fp0
rts
#
# for an extended precision DENORM, the UNFL exception bit is set
# the accrued bit is NOT set in this instance(no inexactness!)
#
fneg_denorm:
andi.b &0xc0,%d0 # is precision extended?
bne.b fneg_not_ext # no; go handle sgl or dbl
bset &unfl_bit,FPSR_EXCEPT(%a6) # set unfl exc bit
mov.l SRC_HI(%a0),FP_SCR0_HI(%a6)
mov.l SRC_LO(%a0),FP_SCR0_LO(%a6)
mov.w SRC_EX(%a0),%d0
eori.w &0x8000,%d0 # negate sign
bpl.b fneg_denorm_done # no
mov.b &neg_bmask,FPSR_CC(%a6) # yes, set 'N' ccode bit
fneg_denorm_done:
mov.w %d0,FP_SCR0_EX(%a6)
fmovm.x FP_SCR0(%a6),&0x80 # return default result in fp0
btst &unfl_bit,FPCR_ENABLE(%a6) # is UNFL enabled?
bne.b fneg_ext_unfl_ena # yes
rts
#
# the input is an extended DENORM and underflow is enabled in the FPCR.
# normalize the mantissa and add the bias of 0x6000 to the resulting negative
# exponent and insert back into the operand.
#
fneg_ext_unfl_ena:
lea FP_SCR0(%a6),%a0 # pass: ptr to operand
bsr.l norm # normalize result
neg.w %d0 # new exponent = -(shft val)
addi.w &0x6000,%d0 # add new bias to exponent
mov.w FP_SCR0_EX(%a6),%d1 # fetch old sign,exp
andi.w &0x8000,%d1 # keep old sign
andi.w &0x7fff,%d0 # clear sign position
or.w %d1,%d0 # concat old sign, new exponent
mov.w %d0,FP_SCR0_EX(%a6) # insert new exponent
fmovm.x FP_SCR0(%a6),&0x40 # return EXOP in fp1
rts
#
# operand is either single or double
#
fneg_not_ext:
cmpi.b %d0,&s_mode*0x10 # separate sgl/dbl prec
bne.b fneg_dbl
#
# operand is to be rounded to single precision
#
fneg_sgl:
mov.w SRC_EX(%a0),FP_SCR0_EX(%a6)
mov.l SRC_HI(%a0),FP_SCR0_HI(%a6)
mov.l SRC_LO(%a0),FP_SCR0_LO(%a6)
bsr.l scale_to_zero_src # calculate scale factor
cmpi.l %d0,&0x3fff-0x3f80 # will move in underflow?
bge.w fneg_sd_unfl # yes; go handle underflow
cmpi.l %d0,&0x3fff-0x407e # will move in overflow?
beq.w fneg_sd_may_ovfl # maybe; go check
blt.w fneg_sd_ovfl # yes; go handle overflow
#
# operand will NOT overflow or underflow when moved in to the fp reg file
#
fneg_sd_normal:
fmov.l &0x0,%fpsr # clear FPSR
fmov.l L_SCR3(%a6),%fpcr # set FPCR
fneg.x FP_SCR0(%a6),%fp0 # perform negation
fmov.l %fpsr,%d1 # save FPSR
fmov.l &0x0,%fpcr # clear FPCR
or.l %d1,USER_FPSR(%a6) # save INEX2,N
fneg_sd_normal_exit:
mov.l %d2,-(%sp) # save d2
fmovm.x &0x80,FP_SCR0(%a6) # store out result
mov.w FP_SCR0_EX(%a6),%d1 # load sgn,exp
mov.w %d1,%d2 # make a copy
andi.l &0x7fff,%d1 # strip sign
sub.l %d0,%d1 # add scale factor
andi.w &0x8000,%d2 # keep old sign
or.w %d1,%d2 # concat old sign,new exp
mov.w %d2,FP_SCR0_EX(%a6) # insert new exponent
mov.l (%sp)+,%d2 # restore d2
fmovm.x FP_SCR0(%a6),&0x80 # return result in fp0
rts
#
# operand is to be rounded to double precision
#
fneg_dbl:
mov.w SRC_EX(%a0),FP_SCR0_EX(%a6)
mov.l SRC_HI(%a0),FP_SCR0_HI(%a6)
mov.l SRC_LO(%a0),FP_SCR0_LO(%a6)
bsr.l scale_to_zero_src # calculate scale factor
cmpi.l %d0,&0x3fff-0x3c00 # will move in underflow?
bge.b fneg_sd_unfl # yes; go handle underflow
cmpi.l %d0,&0x3fff-0x43fe # will move in overflow?
beq.w fneg_sd_may_ovfl # maybe; go check
blt.w fneg_sd_ovfl # yes; go handle overflow
bra.w fneg_sd_normal # no; ho handle normalized op
#
# operand WILL underflow when moved in to the fp register file
#
fneg_sd_unfl:
bset &unfl_bit,FPSR_EXCEPT(%a6) # set unfl exc bit
eori.b &0x80,FP_SCR0_EX(%a6) # negate sign
bpl.b fneg_sd_unfl_tst
bset &neg_bit,FPSR_CC(%a6) # set 'N' ccode bit
# if underflow or inexact is enabled, go calculate EXOP first.
fneg_sd_unfl_tst:
mov.b FPCR_ENABLE(%a6),%d1
andi.b &0x0b,%d1 # is UNFL or INEX enabled?
bne.b fneg_sd_unfl_ena # yes
fneg_sd_unfl_dis:
lea FP_SCR0(%a6),%a0 # pass: result addr
mov.l L_SCR3(%a6),%d1 # pass: rnd prec,mode
bsr.l unf_res # calculate default result
or.b %d0,FPSR_CC(%a6) # unf_res may have set 'Z'
fmovm.x FP_SCR0(%a6),&0x80 # return default result in fp0
rts
#
# operand will underflow AND underflow is enabled.
# Therefore, we must return the result rounded to extended precision.
#
fneg_sd_unfl_ena:
mov.l FP_SCR0_HI(%a6),FP_SCR1_HI(%a6)
mov.l FP_SCR0_LO(%a6),FP_SCR1_LO(%a6)
mov.w FP_SCR0_EX(%a6),%d1 # load current exponent
mov.l %d2,-(%sp) # save d2
mov.l %d1,%d2 # make a copy
andi.l &0x7fff,%d1 # strip sign
andi.w &0x8000,%d2 # keep old sign
sub.l %d0,%d1 # subtract scale factor
addi.l &0x6000,%d1 # add new bias
andi.w &0x7fff,%d1
or.w %d2,%d1 # concat new sign,new exp
mov.w %d1,FP_SCR1_EX(%a6) # insert new exp
fmovm.x FP_SCR1(%a6),&0x40 # return EXOP in fp1
mov.l (%sp)+,%d2 # restore d2
bra.b fneg_sd_unfl_dis
#
# operand WILL overflow.
#
fneg_sd_ovfl:
fmov.l &0x0,%fpsr # clear FPSR
fmov.l L_SCR3(%a6),%fpcr # set FPCR
fneg.x FP_SCR0(%a6),%fp0 # perform negation
fmov.l &0x0,%fpcr # clear FPCR
fmov.l %fpsr,%d1 # save FPSR
or.l %d1,USER_FPSR(%a6) # save INEX2,N
fneg_sd_ovfl_tst:
or.l &ovfl_inx_mask,USER_FPSR(%a6) # set ovfl/aovfl/ainex
mov.b FPCR_ENABLE(%a6),%d1
andi.b &0x13,%d1 # is OVFL or INEX enabled?
bne.b fneg_sd_ovfl_ena # yes
#
# OVFL is not enabled; therefore, we must create the default result by
# calling ovf_res().
#
fneg_sd_ovfl_dis:
btst &neg_bit,FPSR_CC(%a6) # is result negative?
sne %d1 # set sign param accordingly
mov.l L_SCR3(%a6),%d0 # pass: prec,mode
bsr.l ovf_res # calculate default result
or.b %d0,FPSR_CC(%a6) # set INF,N if applicable
fmovm.x (%a0),&0x80 # return default result in fp0
rts
#
# OVFL is enabled.
# the INEX2 bit has already been updated by the round to the correct precision.
# now, round to extended(and don't alter the FPSR).
#
fneg_sd_ovfl_ena:
mov.l %d2,-(%sp) # save d2
mov.w FP_SCR0_EX(%a6),%d1 # fetch {sgn,exp}
mov.l %d1,%d2 # make a copy
andi.l &0x7fff,%d1 # strip sign
andi.w &0x8000,%d2 # keep old sign
sub.l %d0,%d1 # add scale factor
subi.l &0x6000,%d1 # subtract bias
andi.w &0x7fff,%d1
or.w %d2,%d1 # concat sign,exp
mov.w %d1,FP_SCR0_EX(%a6) # insert new exponent
fmovm.x FP_SCR0(%a6),&0x40 # return EXOP in fp1
mov.l (%sp)+,%d2 # restore d2
bra.b fneg_sd_ovfl_dis
#
# the move in MAY underflow. so...
#
fneg_sd_may_ovfl:
fmov.l &0x0,%fpsr # clear FPSR
fmov.l L_SCR3(%a6),%fpcr # set FPCR
fneg.x FP_SCR0(%a6),%fp0 # perform negation
fmov.l %fpsr,%d1 # save status
fmov.l &0x0,%fpcr # clear FPCR
or.l %d1,USER_FPSR(%a6) # save INEX2,N
fabs.x %fp0,%fp1 # make a copy of result
fcmp.b %fp1,&0x2 # is |result| >= 2.b?
fbge.w fneg_sd_ovfl_tst # yes; overflow has occurred
# no, it didn't overflow; we have correct result
bra.w fneg_sd_normal_exit
##########################################################################
#
# input is not normalized; what is it?
#
fneg_not_norm:
cmpi.b %d1,&DENORM # weed out DENORM
beq.w fneg_denorm
cmpi.b %d1,&SNAN # weed out SNAN
beq.l res_snan_1op
cmpi.b %d1,&QNAN # weed out QNAN
beq.l res_qnan_1op
#
# do the fneg; at this point, only possible ops are ZERO and INF.
# use fneg to determine ccodes.
# prec:mode should be zero at this point but it won't affect answer anyways.
#
fneg.x SRC_EX(%a0),%fp0 # do fneg
fmov.l %fpsr,%d0
rol.l &0x8,%d0 # put ccodes in lo byte
mov.b %d0,FPSR_CC(%a6) # insert correct ccodes
rts
#########################################################################
# XDEF **************************************************************** #
# ftst(): emulates the ftest instruction #
# #
# XREF **************************************************************** #
# res{s,q}nan_1op() - set NAN result for monadic instruction #
# #
# INPUT *************************************************************** #
# a0 = pointer to extended precision source operand #
# #
# OUTPUT ************************************************************** #
# none #
# #
# ALGORITHM *********************************************************** #
# Check the source operand tag (STAG) and set the FPCR according #
# to the operand type and sign. #
# #
#########################################################################
global ftst
ftst:
mov.b STAG(%a6),%d1
bne.b ftst_not_norm # optimize on non-norm input
#
# Norm:
#
ftst_norm:
tst.b SRC_EX(%a0) # is operand negative?
bmi.b ftst_norm_m # yes
rts
ftst_norm_m:
mov.b &neg_bmask,FPSR_CC(%a6) # set 'N' ccode bit
rts
#
# input is not normalized; what is it?
#
ftst_not_norm:
cmpi.b %d1,&ZERO # weed out ZERO
beq.b ftst_zero
cmpi.b %d1,&INF # weed out INF
beq.b ftst_inf
cmpi.b %d1,&SNAN # weed out SNAN
beq.l res_snan_1op
cmpi.b %d1,&QNAN # weed out QNAN
beq.l res_qnan_1op
#
# Denorm:
#
ftst_denorm:
tst.b SRC_EX(%a0) # is operand negative?
bmi.b ftst_denorm_m # yes
rts
ftst_denorm_m:
mov.b &neg_bmask,FPSR_CC(%a6) # set 'N' ccode bit
rts
#
# Infinity:
#
ftst_inf:
tst.b SRC_EX(%a0) # is operand negative?
bmi.b ftst_inf_m # yes
ftst_inf_p:
mov.b &inf_bmask,FPSR_CC(%a6) # set 'I' ccode bit
rts
ftst_inf_m:
mov.b &inf_bmask+neg_bmask,FPSR_CC(%a6) # set 'I','N' ccode bits
rts
#
# Zero:
#
ftst_zero:
tst.b SRC_EX(%a0) # is operand negative?
bmi.b ftst_zero_m # yes
ftst_zero_p:
mov.b &z_bmask,FPSR_CC(%a6) # set 'N' ccode bit
rts
ftst_zero_m:
mov.b &z_bmask+neg_bmask,FPSR_CC(%a6) # set 'Z','N' ccode bits
rts
#########################################################################
# XDEF **************************************************************** #
# fint(): emulates the fint instruction #
# #
# XREF **************************************************************** #
# res_{s,q}nan_1op() - set NAN result for monadic operation #
# #
# INPUT *************************************************************** #
# a0 = pointer to extended precision source operand #
# d0 = round precision/mode #
# #
# OUTPUT ************************************************************** #
# fp0 = result #
# #
# ALGORITHM *********************************************************** #
# Separate according to operand type. Unnorms don't pass through #
# here. For norms, load the rounding mode/prec, execute a "fint", then #
# store the resulting FPSR bits. #
# For denorms, force the j-bit to a one and do the same as for #
# norms. Denorms are so low that the answer will either be a zero or a #
# one. #
# For zeroes/infs/NANs, return the same while setting the FPSR #
# as appropriate. #
# #
#########################################################################
global fint
fint:
mov.b STAG(%a6),%d1
bne.b fint_not_norm # optimize on non-norm input
#
# Norm:
#
fint_norm:
andi.b &0x30,%d0 # set prec = ext
fmov.l %d0,%fpcr # set FPCR
fmov.l &0x0,%fpsr # clear FPSR
fint.x SRC(%a0),%fp0 # execute fint
fmov.l &0x0,%fpcr # clear FPCR
fmov.l %fpsr,%d0 # save FPSR
or.l %d0,USER_FPSR(%a6) # set exception bits
rts
#
# input is not normalized; what is it?
#
fint_not_norm:
cmpi.b %d1,&ZERO # weed out ZERO
beq.b fint_zero
cmpi.b %d1,&INF # weed out INF
beq.b fint_inf
cmpi.b %d1,&DENORM # weed out DENORM
beq.b fint_denorm
cmpi.b %d1,&SNAN # weed out SNAN
beq.l res_snan_1op
bra.l res_qnan_1op # weed out QNAN
#
# Denorm:
#
# for DENORMs, the result will be either (+/-)ZERO or (+/-)1.
# also, the INEX2 and AINEX exception bits will be set.
# so, we could either set these manually or force the DENORM
# to a very small NORM and ship it to the NORM routine.
# I do the latter.
#
fint_denorm:
mov.w SRC_EX(%a0),FP_SCR0_EX(%a6) # copy sign, zero exp
mov.b &0x80,FP_SCR0_HI(%a6) # force DENORM ==> small NORM
lea FP_SCR0(%a6),%a0
bra.b fint_norm
#
# Zero:
#
fint_zero:
tst.b SRC_EX(%a0) # is ZERO negative?
bmi.b fint_zero_m # yes
fint_zero_p:
fmov.s &0x00000000,%fp0 # return +ZERO in fp0
mov.b &z_bmask,FPSR_CC(%a6) # set 'Z' ccode bit
rts
fint_zero_m:
fmov.s &0x80000000,%fp0 # return -ZERO in fp0
mov.b &z_bmask+neg_bmask,FPSR_CC(%a6) # set 'Z','N' ccode bits
rts
#
# Infinity:
#
fint_inf:
fmovm.x SRC(%a0),&0x80 # return result in fp0
tst.b SRC_EX(%a0) # is INF negative?
bmi.b fint_inf_m # yes
fint_inf_p:
mov.b &inf_bmask,FPSR_CC(%a6) # set 'I' ccode bit
rts
fint_inf_m:
mov.b &inf_bmask+neg_bmask,FPSR_CC(%a6) # set 'N','I' ccode bits
rts
#########################################################################
# XDEF **************************************************************** #
# fintrz(): emulates the fintrz instruction #
# #
# XREF **************************************************************** #
# res_{s,q}nan_1op() - set NAN result for monadic operation #
# #
# INPUT *************************************************************** #
# a0 = pointer to extended precision source operand #
# d0 = round precision/mode #
# #
# OUTPUT ************************************************************** #
# fp0 = result #
# #
# ALGORITHM *********************************************************** #
# Separate according to operand type. Unnorms don't pass through #
# here. For norms, load the rounding mode/prec, execute a "fintrz", #
# then store the resulting FPSR bits. #
# For denorms, force the j-bit to a one and do the same as for #
# norms. Denorms are so low that the answer will either be a zero or a #
# one. #
# For zeroes/infs/NANs, return the same while setting the FPSR #
# as appropriate. #
# #
#########################################################################
global fintrz
fintrz:
mov.b STAG(%a6),%d1
bne.b fintrz_not_norm # optimize on non-norm input
#
# Norm:
#
fintrz_norm:
fmov.l &0x0,%fpsr # clear FPSR
fintrz.x SRC(%a0),%fp0 # execute fintrz
fmov.l %fpsr,%d0 # save FPSR
or.l %d0,USER_FPSR(%a6) # set exception bits
rts
#
# input is not normalized; what is it?
#
fintrz_not_norm:
cmpi.b %d1,&ZERO # weed out ZERO
beq.b fintrz_zero
cmpi.b %d1,&INF # weed out INF
beq.b fintrz_inf
cmpi.b %d1,&DENORM # weed out DENORM
beq.b fintrz_denorm
cmpi.b %d1,&SNAN # weed out SNAN
beq.l res_snan_1op
bra.l res_qnan_1op # weed out QNAN
#
# Denorm:
#
# for DENORMs, the result will be (+/-)ZERO.
# also, the INEX2 and AINEX exception bits will be set.
# so, we could either set these manually or force the DENORM
# to a very small NORM and ship it to the NORM routine.
# I do the latter.
#
fintrz_denorm:
mov.w SRC_EX(%a0),FP_SCR0_EX(%a6) # copy sign, zero exp
mov.b &0x80,FP_SCR0_HI(%a6) # force DENORM ==> small NORM
lea FP_SCR0(%a6),%a0
bra.b fintrz_norm
#
# Zero:
#
fintrz_zero:
tst.b SRC_EX(%a0) # is ZERO negative?
bmi.b fintrz_zero_m # yes
fintrz_zero_p:
fmov.s &0x00000000,%fp0 # return +ZERO in fp0
mov.b &z_bmask,FPSR_CC(%a6) # set 'Z' ccode bit
rts
fintrz_zero_m:
fmov.s &0x80000000,%fp0 # return -ZERO in fp0
mov.b &z_bmask+neg_bmask,FPSR_CC(%a6) # set 'Z','N' ccode bits
rts
#
# Infinity:
#
fintrz_inf:
fmovm.x SRC(%a0),&0x80 # return result in fp0
tst.b SRC_EX(%a0) # is INF negative?
bmi.b fintrz_inf_m # yes
fintrz_inf_p:
mov.b &inf_bmask,FPSR_CC(%a6) # set 'I' ccode bit
rts
fintrz_inf_m:
mov.b &inf_bmask+neg_bmask,FPSR_CC(%a6) # set 'N','I' ccode bits
rts
#########################################################################
# XDEF **************************************************************** #
# fabs(): emulates the fabs instruction #
# fsabs(): emulates the fsabs instruction #
# fdabs(): emulates the fdabs instruction #
# #
# XREF **************************************************************** #
# norm() - normalize denorm mantissa to provide EXOP #
# scale_to_zero_src() - make exponent. = 0; get scale factor #
# unf_res() - calculate underflow result #
# ovf_res() - calculate overflow result #
# res_{s,q}nan_1op() - set NAN result for monadic operation #
# #
# INPUT *************************************************************** #
# a0 = pointer to extended precision source operand #
# d0 = rnd precision/mode #
# #
# OUTPUT ************************************************************** #
# fp0 = result #
# fp1 = EXOP (if exception occurred) #
# #
# ALGORITHM *********************************************************** #
# Handle NANs, infinities, and zeroes as special cases. Divide #
# norms into extended, single, and double precision. #
# Simply clear sign for extended precision norm. Ext prec denorm #
# gets an EXOP created for it since it's an underflow. #
# Double and single precision can overflow and underflow. First, #
# scale the operand such that the exponent is zero. Perform an "fabs" #
# using the correct rnd mode/prec. Check to see if the original #
# exponent would take an exception. If so, use unf_res() or ovf_res() #
# to calculate the default result. Also, create the EXOP for the #
# exceptional case. If no exception should occur, insert the correct #
# result exponent and return. #
# Unnorms don't pass through here. #
# #
#########################################################################
global fsabs
fsabs:
andi.b &0x30,%d0 # clear rnd prec
ori.b &s_mode*0x10,%d0 # insert sgl precision
bra.b fabs
global fdabs
fdabs:
andi.b &0x30,%d0 # clear rnd prec
ori.b &d_mode*0x10,%d0 # insert dbl precision
global fabs
fabs:
mov.l %d0,L_SCR3(%a6) # store rnd info
mov.b STAG(%a6),%d1
bne.w fabs_not_norm # optimize on non-norm input
#
# ABSOLUTE VALUE: norms and denorms ONLY!
#
fabs_norm:
andi.b &0xc0,%d0 # is precision extended?
bne.b fabs_not_ext # no; go handle sgl or dbl
#
# precision selected is extended. so...we can not get an underflow
# or overflow because of rounding to the correct precision. so...
# skip the scaling and unscaling...
#
mov.l SRC_HI(%a0),FP_SCR0_HI(%a6)
mov.l SRC_LO(%a0),FP_SCR0_LO(%a6)
mov.w SRC_EX(%a0),%d1
bclr &15,%d1 # force absolute value
mov.w %d1,FP_SCR0_EX(%a6) # insert exponent
fmovm.x FP_SCR0(%a6),&0x80 # return result in fp0
rts
#
# for an extended precision DENORM, the UNFL exception bit is set
# the accrued bit is NOT set in this instance(no inexactness!)
#
fabs_denorm:
andi.b &0xc0,%d0 # is precision extended?
bne.b fabs_not_ext # no
bset &unfl_bit,FPSR_EXCEPT(%a6) # set unfl exc bit
mov.l SRC_HI(%a0),FP_SCR0_HI(%a6)
mov.l SRC_LO(%a0),FP_SCR0_LO(%a6)
mov.w SRC_EX(%a0),%d0
bclr &15,%d0 # clear sign
mov.w %d0,FP_SCR0_EX(%a6) # insert exponent
fmovm.x FP_SCR0(%a6),&0x80 # return default result in fp0
btst &unfl_bit,FPCR_ENABLE(%a6) # is UNFL enabled?
bne.b fabs_ext_unfl_ena
rts
#
# the input is an extended DENORM and underflow is enabled in the FPCR.
# normalize the mantissa and add the bias of 0x6000 to the resulting negative
# exponent and insert back into the operand.
#
fabs_ext_unfl_ena:
lea FP_SCR0(%a6),%a0 # pass: ptr to operand
bsr.l norm # normalize result
neg.w %d0 # new exponent = -(shft val)
addi.w &0x6000,%d0 # add new bias to exponent
mov.w FP_SCR0_EX(%a6),%d1 # fetch old sign,exp
andi.w &0x8000,%d1 # keep old sign
andi.w &0x7fff,%d0 # clear sign position
or.w %d1,%d0 # concat old sign, new exponent
mov.w %d0,FP_SCR0_EX(%a6) # insert new exponent
fmovm.x FP_SCR0(%a6),&0x40 # return EXOP in fp1
rts
#
# operand is either single or double
#
fabs_not_ext:
cmpi.b %d0,&s_mode*0x10 # separate sgl/dbl prec
bne.b fabs_dbl
#
# operand is to be rounded to single precision
#
fabs_sgl:
mov.w SRC_EX(%a0),FP_SCR0_EX(%a6)
mov.l SRC_HI(%a0),FP_SCR0_HI(%a6)
mov.l SRC_LO(%a0),FP_SCR0_LO(%a6)
bsr.l scale_to_zero_src # calculate scale factor
cmpi.l %d0,&0x3fff-0x3f80 # will move in underflow?
bge.w fabs_sd_unfl # yes; go handle underflow
cmpi.l %d0,&0x3fff-0x407e # will move in overflow?
beq.w fabs_sd_may_ovfl # maybe; go check
blt.w fabs_sd_ovfl # yes; go handle overflow
#
# operand will NOT overflow or underflow when moved in to the fp reg file
#
fabs_sd_normal:
fmov.l &0x0,%fpsr # clear FPSR
fmov.l L_SCR3(%a6),%fpcr # set FPCR
fabs.x FP_SCR0(%a6),%fp0 # perform absolute
fmov.l %fpsr,%d1 # save FPSR
fmov.l &0x0,%fpcr # clear FPCR
or.l %d1,USER_FPSR(%a6) # save INEX2,N
fabs_sd_normal_exit:
mov.l %d2,-(%sp) # save d2
fmovm.x &0x80,FP_SCR0(%a6) # store out result
mov.w FP_SCR0_EX(%a6),%d1 # load sgn,exp
mov.l %d1,%d2 # make a copy
andi.l &0x7fff,%d1 # strip sign
sub.l %d0,%d1 # add scale factor
andi.w &0x8000,%d2 # keep old sign
or.w %d1,%d2 # concat old sign,new exp
mov.w %d2,FP_SCR0_EX(%a6) # insert new exponent
mov.l (%sp)+,%d2 # restore d2
fmovm.x FP_SCR0(%a6),&0x80 # return result in fp0
rts
#
# operand is to be rounded to double precision
#
fabs_dbl:
mov.w SRC_EX(%a0),FP_SCR0_EX(%a6)
mov.l SRC_HI(%a0),FP_SCR0_HI(%a6)
mov.l SRC_LO(%a0),FP_SCR0_LO(%a6)
bsr.l scale_to_zero_src # calculate scale factor
cmpi.l %d0,&0x3fff-0x3c00 # will move in underflow?
bge.b fabs_sd_unfl # yes; go handle underflow
cmpi.l %d0,&0x3fff-0x43fe # will move in overflow?
beq.w fabs_sd_may_ovfl # maybe; go check
blt.w fabs_sd_ovfl # yes; go handle overflow
bra.w fabs_sd_normal # no; ho handle normalized op
#
# operand WILL underflow when moved in to the fp register file
#
fabs_sd_unfl:
bset &unfl_bit,FPSR_EXCEPT(%a6) # set unfl exc bit
bclr &0x7,FP_SCR0_EX(%a6) # force absolute value
# if underflow or inexact is enabled, go calculate EXOP first.
mov.b FPCR_ENABLE(%a6),%d1
andi.b &0x0b,%d1 # is UNFL or INEX enabled?
bne.b fabs_sd_unfl_ena # yes
fabs_sd_unfl_dis:
lea FP_SCR0(%a6),%a0 # pass: result addr
mov.l L_SCR3(%a6),%d1 # pass: rnd prec,mode
bsr.l unf_res # calculate default result
or.b %d0,FPSR_CC(%a6) # set possible 'Z' ccode
fmovm.x FP_SCR0(%a6),&0x80 # return default result in fp0
rts
#
# operand will underflow AND underflow is enabled.
# Therefore, we must return the result rounded to extended precision.
#
fabs_sd_unfl_ena:
mov.l FP_SCR0_HI(%a6),FP_SCR1_HI(%a6)
mov.l FP_SCR0_LO(%a6),FP_SCR1_LO(%a6)
mov.w FP_SCR0_EX(%a6),%d1 # load current exponent
mov.l %d2,-(%sp) # save d2
mov.l %d1,%d2 # make a copy
andi.l &0x7fff,%d1 # strip sign
andi.w &0x8000,%d2 # keep old sign
sub.l %d0,%d1 # subtract scale factor
addi.l &0x6000,%d1 # add new bias
andi.w &0x7fff,%d1
or.w %d2,%d1 # concat new sign,new exp
mov.w %d1,FP_SCR1_EX(%a6) # insert new exp
fmovm.x FP_SCR1(%a6),&0x40 # return EXOP in fp1
mov.l (%sp)+,%d2 # restore d2
bra.b fabs_sd_unfl_dis
#
# operand WILL overflow.
#
fabs_sd_ovfl:
fmov.l &0x0,%fpsr # clear FPSR
fmov.l L_SCR3(%a6),%fpcr # set FPCR
fabs.x FP_SCR0(%a6),%fp0 # perform absolute
fmov.l &0x0,%fpcr # clear FPCR
fmov.l %fpsr,%d1 # save FPSR
or.l %d1,USER_FPSR(%a6) # save INEX2,N
fabs_sd_ovfl_tst:
or.l &ovfl_inx_mask,USER_FPSR(%a6) # set ovfl/aovfl/ainex
mov.b FPCR_ENABLE(%a6),%d1
andi.b &0x13,%d1 # is OVFL or INEX enabled?
bne.b fabs_sd_ovfl_ena # yes
#
# OVFL is not enabled; therefore, we must create the default result by
# calling ovf_res().
#
fabs_sd_ovfl_dis:
btst &neg_bit,FPSR_CC(%a6) # is result negative?
sne %d1 # set sign param accordingly
mov.l L_SCR3(%a6),%d0 # pass: prec,mode
bsr.l ovf_res # calculate default result
or.b %d0,FPSR_CC(%a6) # set INF,N if applicable
fmovm.x (%a0),&0x80 # return default result in fp0
rts
#
# OVFL is enabled.
# the INEX2 bit has already been updated by the round to the correct precision.
# now, round to extended(and don't alter the FPSR).
#
fabs_sd_ovfl_ena:
mov.l %d2,-(%sp) # save d2
mov.w FP_SCR0_EX(%a6),%d1 # fetch {sgn,exp}
mov.l %d1,%d2 # make a copy
andi.l &0x7fff,%d1 # strip sign
andi.w &0x8000,%d2 # keep old sign
sub.l %d0,%d1 # add scale factor
subi.l &0x6000,%d1 # subtract bias
andi.w &0x7fff,%d1
or.w %d2,%d1 # concat sign,exp
mov.w %d1,FP_SCR0_EX(%a6) # insert new exponent
fmovm.x FP_SCR0(%a6),&0x40 # return EXOP in fp1
mov.l (%sp)+,%d2 # restore d2
bra.b fabs_sd_ovfl_dis
#
# the move in MAY underflow. so...
#
fabs_sd_may_ovfl:
fmov.l &0x0,%fpsr # clear FPSR
fmov.l L_SCR3(%a6),%fpcr # set FPCR
fabs.x FP_SCR0(%a6),%fp0 # perform absolute
fmov.l %fpsr,%d1 # save status
fmov.l &0x0,%fpcr # clear FPCR
or.l %d1,USER_FPSR(%a6) # save INEX2,N
fabs.x %fp0,%fp1 # make a copy of result
fcmp.b %fp1,&0x2 # is |result| >= 2.b?
fbge.w fabs_sd_ovfl_tst # yes; overflow has occurred
# no, it didn't overflow; we have correct result
bra.w fabs_sd_normal_exit
##########################################################################
#
# input is not normalized; what is it?
#
fabs_not_norm:
cmpi.b %d1,&DENORM # weed out DENORM
beq.w fabs_denorm
cmpi.b %d1,&SNAN # weed out SNAN
beq.l res_snan_1op
cmpi.b %d1,&QNAN # weed out QNAN
beq.l res_qnan_1op
fabs.x SRC(%a0),%fp0 # force absolute value
cmpi.b %d1,&INF # weed out INF
beq.b fabs_inf
fabs_zero:
mov.b &z_bmask,FPSR_CC(%a6) # set 'Z' ccode bit
rts
fabs_inf:
mov.b &inf_bmask,FPSR_CC(%a6) # set 'I' ccode bit
rts
#########################################################################
# XDEF **************************************************************** #
# fcmp(): fp compare op routine #
# #
# XREF **************************************************************** #
# res_qnan() - return QNAN result #
# res_snan() - return SNAN result #
# #
# INPUT *************************************************************** #
# a0 = pointer to extended precision source operand #
# a1 = pointer to extended precision destination operand #
# d0 = round prec/mode #
# #
# OUTPUT ************************************************************** #
# None #
# #
# ALGORITHM *********************************************************** #
# Handle NANs and denorms as special cases. For everything else, #
# just use the actual fcmp instruction to produce the correct condition #
# codes. #
# #
#########################################################################
global fcmp
fcmp:
clr.w %d1
mov.b DTAG(%a6),%d1
lsl.b &0x3,%d1
or.b STAG(%a6),%d1
bne.b fcmp_not_norm # optimize on non-norm input
#
# COMPARE FP OPs : NORMs, ZEROs, INFs, and "corrected" DENORMs
#
fcmp_norm:
fmovm.x DST(%a1),&0x80 # load dst op
fcmp.x %fp0,SRC(%a0) # do compare
fmov.l %fpsr,%d0 # save FPSR
rol.l &0x8,%d0 # extract ccode bits
mov.b %d0,FPSR_CC(%a6) # set ccode bits(no exc bits are set)
rts
#
# fcmp: inputs are not both normalized; what are they?
#
fcmp_not_norm:
mov.w (tbl_fcmp_op.b,%pc,%d1.w*2),%d1
jmp (tbl_fcmp_op.b,%pc,%d1.w*1)
swbeg &48
tbl_fcmp_op:
short fcmp_norm - tbl_fcmp_op # NORM - NORM
short fcmp_norm - tbl_fcmp_op # NORM - ZERO
short fcmp_norm - tbl_fcmp_op # NORM - INF
short fcmp_res_qnan - tbl_fcmp_op # NORM - QNAN
short fcmp_nrm_dnrm - tbl_fcmp_op # NORM - DENORM
short fcmp_res_snan - tbl_fcmp_op # NORM - SNAN
short tbl_fcmp_op - tbl_fcmp_op #
short tbl_fcmp_op - tbl_fcmp_op #
short fcmp_norm - tbl_fcmp_op # ZERO - NORM
short fcmp_norm - tbl_fcmp_op # ZERO - ZERO
short fcmp_norm - tbl_fcmp_op # ZERO - INF
short fcmp_res_qnan - tbl_fcmp_op # ZERO - QNAN
short fcmp_dnrm_s - tbl_fcmp_op # ZERO - DENORM
short fcmp_res_snan - tbl_fcmp_op # ZERO - SNAN
short tbl_fcmp_op - tbl_fcmp_op #
short tbl_fcmp_op - tbl_fcmp_op #
short fcmp_norm - tbl_fcmp_op # INF - NORM
short fcmp_norm - tbl_fcmp_op # INF - ZERO
short fcmp_norm - tbl_fcmp_op # INF - INF
short fcmp_res_qnan - tbl_fcmp_op # INF - QNAN
short fcmp_dnrm_s - tbl_fcmp_op # INF - DENORM
short fcmp_res_snan - tbl_fcmp_op # INF - SNAN
short tbl_fcmp_op - tbl_fcmp_op #
short tbl_fcmp_op - tbl_fcmp_op #
short fcmp_res_qnan - tbl_fcmp_op # QNAN - NORM
short fcmp_res_qnan - tbl_fcmp_op # QNAN - ZERO
short fcmp_res_qnan - tbl_fcmp_op # QNAN - INF
short fcmp_res_qnan - tbl_fcmp_op # QNAN - QNAN
short fcmp_res_qnan - tbl_fcmp_op # QNAN - DENORM
short fcmp_res_snan - tbl_fcmp_op # QNAN - SNAN
short tbl_fcmp_op - tbl_fcmp_op #
short tbl_fcmp_op - tbl_fcmp_op #
short fcmp_dnrm_nrm - tbl_fcmp_op # DENORM - NORM
short fcmp_dnrm_d - tbl_fcmp_op # DENORM - ZERO
short fcmp_dnrm_d - tbl_fcmp_op # DENORM - INF
short fcmp_res_qnan - tbl_fcmp_op # DENORM - QNAN
short fcmp_dnrm_sd - tbl_fcmp_op # DENORM - DENORM
short fcmp_res_snan - tbl_fcmp_op # DENORM - SNAN
short tbl_fcmp_op - tbl_fcmp_op #
short tbl_fcmp_op - tbl_fcmp_op #
short fcmp_res_snan - tbl_fcmp_op # SNAN - NORM
short fcmp_res_snan - tbl_fcmp_op # SNAN - ZERO
short fcmp_res_snan - tbl_fcmp_op # SNAN - INF
short fcmp_res_snan - tbl_fcmp_op # SNAN - QNAN
short fcmp_res_snan - tbl_fcmp_op # SNAN - DENORM
short fcmp_res_snan - tbl_fcmp_op # SNAN - SNAN
short tbl_fcmp_op - tbl_fcmp_op #
short tbl_fcmp_op - tbl_fcmp_op #
# unlike all other functions for QNAN and SNAN, fcmp does NOT set the
# 'N' bit for a negative QNAN or SNAN input so we must squelch it here.
fcmp_res_qnan:
bsr.l res_qnan
andi.b &0xf7,FPSR_CC(%a6)
rts
fcmp_res_snan:
bsr.l res_snan
andi.b &0xf7,FPSR_CC(%a6)
rts
#
# DENORMs are a little more difficult.
# If you have a 2 DENORMs, then you can just force the j-bit to a one
# and use the fcmp_norm routine.
# If you have a DENORM and an INF or ZERO, just force the DENORM's j-bit to a one
# and use the fcmp_norm routine.
# If you have a DENORM and a NORM with opposite signs, then use fcmp_norm, also.
# But with a DENORM and a NORM of the same sign, the neg bit is set if the
# (1) signs are (+) and the DENORM is the dst or
# (2) signs are (-) and the DENORM is the src
#
fcmp_dnrm_s:
mov.w SRC_EX(%a0),FP_SCR0_EX(%a6)
mov.l SRC_HI(%a0),%d0
bset &31,%d0 # DENORM src; make into small norm
mov.l %d0,FP_SCR0_HI(%a6)
mov.l SRC_LO(%a0),FP_SCR0_LO(%a6)
lea FP_SCR0(%a6),%a0
bra.w fcmp_norm
fcmp_dnrm_d:
mov.l DST_EX(%a1),FP_SCR0_EX(%a6)
mov.l DST_HI(%a1),%d0
bset &31,%d0 # DENORM src; make into small norm
mov.l %d0,FP_SCR0_HI(%a6)
mov.l DST_LO(%a1),FP_SCR0_LO(%a6)
lea FP_SCR0(%a6),%a1
bra.w fcmp_norm
fcmp_dnrm_sd:
mov.w DST_EX(%a1),FP_SCR1_EX(%a6)
mov.w SRC_EX(%a0),FP_SCR0_EX(%a6)
mov.l DST_HI(%a1),%d0
bset &31,%d0 # DENORM dst; make into small norm
mov.l %d0,FP_SCR1_HI(%a6)
mov.l SRC_HI(%a0),%d0
bset &31,%d0 # DENORM dst; make into small norm
mov.l %d0,FP_SCR0_HI(%a6)
mov.l DST_LO(%a1),FP_SCR1_LO(%a6)
mov.l SRC_LO(%a0),FP_SCR0_LO(%a6)
lea FP_SCR1(%a6),%a1
lea FP_SCR0(%a6),%a0
bra.w fcmp_norm
fcmp_nrm_dnrm:
mov.b SRC_EX(%a0),%d0 # determine if like signs
mov.b DST_EX(%a1),%d1
eor.b %d0,%d1
bmi.w fcmp_dnrm_s
# signs are the same, so must determine the answer ourselves.
tst.b %d0 # is src op negative?
bmi.b fcmp_nrm_dnrm_m # yes
rts
fcmp_nrm_dnrm_m:
mov.b &neg_bmask,FPSR_CC(%a6) # set 'Z' ccode bit
rts
fcmp_dnrm_nrm:
mov.b SRC_EX(%a0),%d0 # determine if like signs
mov.b DST_EX(%a1),%d1
eor.b %d0,%d1
bmi.w fcmp_dnrm_d
# signs are the same, so must determine the answer ourselves.
tst.b %d0 # is src op negative?
bpl.b fcmp_dnrm_nrm_m # no
rts
fcmp_dnrm_nrm_m:
mov.b &neg_bmask,FPSR_CC(%a6) # set 'Z' ccode bit
rts
#########################################################################
# XDEF **************************************************************** #
# fsglmul(): emulates the fsglmul instruction #
# #
# XREF **************************************************************** #
# scale_to_zero_src() - scale src exponent to zero #
# scale_to_zero_dst() - scale dst exponent to zero #
# unf_res4() - return default underflow result for sglop #
# ovf_res() - return default overflow result #
# res_qnan() - return QNAN result #
# res_snan() - return SNAN result #
# #
# INPUT *************************************************************** #
# a0 = pointer to extended precision source operand #
# a1 = pointer to extended precision destination operand #
# d0 rnd prec,mode #
# #
# OUTPUT ************************************************************** #
# fp0 = result #
# fp1 = EXOP (if exception occurred) #
# #
# ALGORITHM *********************************************************** #
# Handle NANs, infinities, and zeroes as special cases. Divide #
# norms/denorms into ext/sgl/dbl precision. #
# For norms/denorms, scale the exponents such that a multiply #
# instruction won't cause an exception. Use the regular fsglmul to #
# compute a result. Check if the regular operands would have taken #
# an exception. If so, return the default overflow/underflow result #
# and return the EXOP if exceptions are enabled. Else, scale the #
# result operand to the proper exponent. #
# #
#########################################################################
global fsglmul
fsglmul:
mov.l %d0,L_SCR3(%a6) # store rnd info
clr.w %d1
mov.b DTAG(%a6),%d1
lsl.b &0x3,%d1
or.b STAG(%a6),%d1
bne.w fsglmul_not_norm # optimize on non-norm input
fsglmul_norm:
mov.w DST_EX(%a1),FP_SCR1_EX(%a6)
mov.l DST_HI(%a1),FP_SCR1_HI(%a6)
mov.l DST_LO(%a1),FP_SCR1_LO(%a6)
mov.w SRC_EX(%a0),FP_SCR0_EX(%a6)
mov.l SRC_HI(%a0),FP_SCR0_HI(%a6)
mov.l SRC_LO(%a0),FP_SCR0_LO(%a6)
bsr.l scale_to_zero_src # scale exponent
mov.l %d0,-(%sp) # save scale factor 1
bsr.l scale_to_zero_dst # scale dst exponent
add.l (%sp)+,%d0 # SCALE_FACTOR = scale1 + scale2
cmpi.l %d0,&0x3fff-0x7ffe # would result ovfl?
beq.w fsglmul_may_ovfl # result may rnd to overflow
blt.w fsglmul_ovfl # result will overflow
cmpi.l %d0,&0x3fff+0x0001 # would result unfl?
beq.w fsglmul_may_unfl # result may rnd to no unfl
bgt.w fsglmul_unfl # result will underflow
fsglmul_normal:
fmovm.x FP_SCR1(%a6),&0x80 # load dst op
fmov.l L_SCR3(%a6),%fpcr # set FPCR
fmov.l &0x0,%fpsr # clear FPSR
fsglmul.x FP_SCR0(%a6),%fp0 # execute sgl multiply
fmov.l %fpsr,%d1 # save status
fmov.l &0x0,%fpcr # clear FPCR
or.l %d1,USER_FPSR(%a6) # save INEX2,N
fsglmul_normal_exit:
fmovm.x &0x80,FP_SCR0(%a6) # store out result
mov.l %d2,-(%sp) # save d2
mov.w FP_SCR0_EX(%a6),%d1 # load {sgn,exp}
mov.l %d1,%d2 # make a copy
andi.l &0x7fff,%d1 # strip sign
andi.w &0x8000,%d2 # keep old sign
sub.l %d0,%d1 # add scale factor
or.w %d2,%d1 # concat old sign,new exp
mov.w %d1,FP_SCR0_EX(%a6) # insert new exponent
mov.l (%sp)+,%d2 # restore d2
fmovm.x FP_SCR0(%a6),&0x80 # return result in fp0
rts
fsglmul_ovfl:
fmovm.x FP_SCR1(%a6),&0x80 # load dst op
fmov.l L_SCR3(%a6),%fpcr # set FPCR
fmov.l &0x0,%fpsr # clear FPSR
fsglmul.x FP_SCR0(%a6),%fp0 # execute sgl multiply
fmov.l %fpsr,%d1 # save status
fmov.l &0x0,%fpcr # clear FPCR
or.l %d1,USER_FPSR(%a6) # save INEX2,N
fsglmul_ovfl_tst:
# save setting this until now because this is where fsglmul_may_ovfl may jump in
or.l &ovfl_inx_mask, USER_FPSR(%a6) # set ovfl/aovfl/ainex
mov.b FPCR_ENABLE(%a6),%d1
andi.b &0x13,%d1 # is OVFL or INEX enabled?
bne.b fsglmul_ovfl_ena # yes
fsglmul_ovfl_dis:
btst &neg_bit,FPSR_CC(%a6) # is result negative?
sne %d1 # set sign param accordingly
mov.l L_SCR3(%a6),%d0 # pass prec:rnd
andi.b &0x30,%d0 # force prec = ext
bsr.l ovf_res # calculate default result
or.b %d0,FPSR_CC(%a6) # set INF,N if applicable
fmovm.x (%a0),&0x80 # return default result in fp0
rts
fsglmul_ovfl_ena:
fmovm.x &0x80,FP_SCR0(%a6) # move result to stack
mov.l %d2,-(%sp) # save d2
mov.w FP_SCR0_EX(%a6),%d1 # fetch {sgn,exp}
mov.l %d1,%d2 # make a copy
andi.l &0x7fff,%d1 # strip sign
sub.l %d0,%d1 # add scale factor
subi.l &0x6000,%d1 # subtract bias
andi.w &0x7fff,%d1
andi.w &0x8000,%d2 # keep old sign
or.w %d2,%d1 # concat old sign,new exp
mov.w %d1,FP_SCR0_EX(%a6) # insert new exponent
mov.l (%sp)+,%d2 # restore d2
fmovm.x FP_SCR0(%a6),&0x40 # return EXOP in fp1
bra.b fsglmul_ovfl_dis
fsglmul_may_ovfl:
fmovm.x FP_SCR1(%a6),&0x80 # load dst op
fmov.l L_SCR3(%a6),%fpcr # set FPCR
fmov.l &0x0,%fpsr # clear FPSR
fsglmul.x FP_SCR0(%a6),%fp0 # execute sgl multiply
fmov.l %fpsr,%d1 # save status
fmov.l &0x0,%fpcr # clear FPCR
or.l %d1,USER_FPSR(%a6) # save INEX2,N
fabs.x %fp0,%fp1 # make a copy of result
fcmp.b %fp1,&0x2 # is |result| >= 2.b?
fbge.w fsglmul_ovfl_tst # yes; overflow has occurred
# no, it didn't overflow; we have correct result
bra.w fsglmul_normal_exit
fsglmul_unfl:
bset &unfl_bit,FPSR_EXCEPT(%a6) # set unfl exc bit
fmovm.x FP_SCR1(%a6),&0x80 # load dst op
fmov.l &rz_mode*0x10,%fpcr # set FPCR
fmov.l &0x0,%fpsr # clear FPSR
fsglmul.x FP_SCR0(%a6),%fp0 # execute sgl multiply
fmov.l %fpsr,%d1 # save status
fmov.l &0x0,%fpcr # clear FPCR
or.l %d1,USER_FPSR(%a6) # save INEX2,N
mov.b FPCR_ENABLE(%a6),%d1
andi.b &0x0b,%d1 # is UNFL or INEX enabled?
bne.b fsglmul_unfl_ena # yes
fsglmul_unfl_dis:
fmovm.x &0x80,FP_SCR0(%a6) # store out result
lea FP_SCR0(%a6),%a0 # pass: result addr
mov.l L_SCR3(%a6),%d1 # pass: rnd prec,mode
bsr.l unf_res4 # calculate default result
or.b %d0,FPSR_CC(%a6) # 'Z' bit may have been set
fmovm.x FP_SCR0(%a6),&0x80 # return default result in fp0
rts
#
# UNFL is enabled.
#
fsglmul_unfl_ena:
fmovm.x FP_SCR1(%a6),&0x40 # load dst op
fmov.l L_SCR3(%a6),%fpcr # set FPCR
fmov.l &0x0,%fpsr # clear FPSR
fsglmul.x FP_SCR0(%a6),%fp1 # execute sgl multiply
fmov.l &0x0,%fpcr # clear FPCR
fmovm.x &0x40,FP_SCR0(%a6) # save result to stack
mov.l %d2,-(%sp) # save d2
mov.w FP_SCR0_EX(%a6),%d1 # fetch {sgn,exp}
mov.l %d1,%d2 # make a copy
andi.l &0x7fff,%d1 # strip sign
andi.w &0x8000,%d2 # keep old sign
sub.l %d0,%d1 # add scale factor
addi.l &0x6000,%d1 # add bias
andi.w &0x7fff,%d1
or.w %d2,%d1 # concat old sign,new exp
mov.w %d1,FP_SCR0_EX(%a6) # insert new exponent
mov.l (%sp)+,%d2 # restore d2
fmovm.x FP_SCR0(%a6),&0x40 # return EXOP in fp1
bra.w fsglmul_unfl_dis
fsglmul_may_unfl:
fmovm.x FP_SCR1(%a6),&0x80 # load dst op
fmov.l L_SCR3(%a6),%fpcr # set FPCR
fmov.l &0x0,%fpsr # clear FPSR
fsglmul.x FP_SCR0(%a6),%fp0 # execute sgl multiply
fmov.l %fpsr,%d1 # save status
fmov.l &0x0,%fpcr # clear FPCR
or.l %d1,USER_FPSR(%a6) # save INEX2,N
fabs.x %fp0,%fp1 # make a copy of result
fcmp.b %fp1,&0x2 # is |result| > 2.b?
fbgt.w fsglmul_normal_exit # no; no underflow occurred
fblt.w fsglmul_unfl # yes; underflow occurred
#
# we still don't know if underflow occurred. result is ~ equal to 2. but,
# we don't know if the result was an underflow that rounded up to a 2 or
# a normalized number that rounded down to a 2. so, redo the entire operation
# using RZ as the rounding mode to see what the pre-rounded result is.
# this case should be relatively rare.
#
fmovm.x FP_SCR1(%a6),&0x40 # load dst op into fp1
mov.l L_SCR3(%a6),%d1
andi.b &0xc0,%d1 # keep rnd prec
ori.b &rz_mode*0x10,%d1 # insert RZ
fmov.l %d1,%fpcr # set FPCR
fmov.l &0x0,%fpsr # clear FPSR
fsglmul.x FP_SCR0(%a6),%fp1 # execute sgl multiply
fmov.l &0x0,%fpcr # clear FPCR
fabs.x %fp1 # make absolute value
fcmp.b %fp1,&0x2 # is |result| < 2.b?
fbge.w fsglmul_normal_exit # no; no underflow occurred
bra.w fsglmul_unfl # yes, underflow occurred
##############################################################################
#
# Single Precision Multiply: inputs are not both normalized; what are they?
#
fsglmul_not_norm:
mov.w (tbl_fsglmul_op.b,%pc,%d1.w*2),%d1
jmp (tbl_fsglmul_op.b,%pc,%d1.w*1)
swbeg &48
tbl_fsglmul_op:
short fsglmul_norm - tbl_fsglmul_op # NORM x NORM
short fsglmul_zero - tbl_fsglmul_op # NORM x ZERO
short fsglmul_inf_src - tbl_fsglmul_op # NORM x INF
short fsglmul_res_qnan - tbl_fsglmul_op # NORM x QNAN
short fsglmul_norm - tbl_fsglmul_op # NORM x DENORM
short fsglmul_res_snan - tbl_fsglmul_op # NORM x SNAN
short tbl_fsglmul_op - tbl_fsglmul_op #
short tbl_fsglmul_op - tbl_fsglmul_op #
short fsglmul_zero - tbl_fsglmul_op # ZERO x NORM
short fsglmul_zero - tbl_fsglmul_op # ZERO x ZERO
short fsglmul_res_operr - tbl_fsglmul_op # ZERO x INF
short fsglmul_res_qnan - tbl_fsglmul_op # ZERO x QNAN
short fsglmul_zero - tbl_fsglmul_op # ZERO x DENORM
short fsglmul_res_snan - tbl_fsglmul_op # ZERO x SNAN
short tbl_fsglmul_op - tbl_fsglmul_op #
short tbl_fsglmul_op - tbl_fsglmul_op #
short fsglmul_inf_dst - tbl_fsglmul_op # INF x NORM
short fsglmul_res_operr - tbl_fsglmul_op # INF x ZERO
short fsglmul_inf_dst - tbl_fsglmul_op # INF x INF
short fsglmul_res_qnan - tbl_fsglmul_op # INF x QNAN
short fsglmul_inf_dst - tbl_fsglmul_op # INF x DENORM
short fsglmul_res_snan - tbl_fsglmul_op # INF x SNAN
short tbl_fsglmul_op - tbl_fsglmul_op #
short tbl_fsglmul_op - tbl_fsglmul_op #
short fsglmul_res_qnan - tbl_fsglmul_op # QNAN x NORM
short fsglmul_res_qnan - tbl_fsglmul_op # QNAN x ZERO
short fsglmul_res_qnan - tbl_fsglmul_op # QNAN x INF
short fsglmul_res_qnan - tbl_fsglmul_op # QNAN x QNAN
short fsglmul_res_qnan - tbl_fsglmul_op # QNAN x DENORM
short fsglmul_res_snan - tbl_fsglmul_op # QNAN x SNAN
short tbl_fsglmul_op - tbl_fsglmul_op #
short tbl_fsglmul_op - tbl_fsglmul_op #
short fsglmul_norm - tbl_fsglmul_op # NORM x NORM
short fsglmul_zero - tbl_fsglmul_op # NORM x ZERO
short fsglmul_inf_src - tbl_fsglmul_op # NORM x INF
short fsglmul_res_qnan - tbl_fsglmul_op # NORM x QNAN
short fsglmul_norm - tbl_fsglmul_op # NORM x DENORM
short fsglmul_res_snan - tbl_fsglmul_op # NORM x SNAN
short tbl_fsglmul_op - tbl_fsglmul_op #
short tbl_fsglmul_op - tbl_fsglmul_op #
short fsglmul_res_snan - tbl_fsglmul_op # SNAN x NORM
short fsglmul_res_snan - tbl_fsglmul_op # SNAN x ZERO
short fsglmul_res_snan - tbl_fsglmul_op # SNAN x INF
short fsglmul_res_snan - tbl_fsglmul_op # SNAN x QNAN
short fsglmul_res_snan - tbl_fsglmul_op # SNAN x DENORM
short fsglmul_res_snan - tbl_fsglmul_op # SNAN x SNAN
short tbl_fsglmul_op - tbl_fsglmul_op #
short tbl_fsglmul_op - tbl_fsglmul_op #
fsglmul_res_operr:
bra.l res_operr
fsglmul_res_snan:
bra.l res_snan
fsglmul_res_qnan:
bra.l res_qnan
fsglmul_zero:
bra.l fmul_zero
fsglmul_inf_src:
bra.l fmul_inf_src
fsglmul_inf_dst:
bra.l fmul_inf_dst
#########################################################################
# XDEF **************************************************************** #
# fsgldiv(): emulates the fsgldiv instruction #
# #
# XREF **************************************************************** #
# scale_to_zero_src() - scale src exponent to zero #
# scale_to_zero_dst() - scale dst exponent to zero #
# unf_res4() - return default underflow result for sglop #
# ovf_res() - return default overflow result #
# res_qnan() - return QNAN result #
# res_snan() - return SNAN result #
# #
# INPUT *************************************************************** #
# a0 = pointer to extended precision source operand #
# a1 = pointer to extended precision destination operand #
# d0 rnd prec,mode #
# #
# OUTPUT ************************************************************** #
# fp0 = result #
# fp1 = EXOP (if exception occurred) #
# #
# ALGORITHM *********************************************************** #
# Handle NANs, infinities, and zeroes as special cases. Divide #
# norms/denorms into ext/sgl/dbl precision. #
# For norms/denorms, scale the exponents such that a divide #
# instruction won't cause an exception. Use the regular fsgldiv to #
# compute a result. Check if the regular operands would have taken #
# an exception. If so, return the default overflow/underflow result #
# and return the EXOP if exceptions are enabled. Else, scale the #
# result operand to the proper exponent. #
# #
#########################################################################
global fsgldiv
fsgldiv:
mov.l %d0,L_SCR3(%a6) # store rnd info
clr.w %d1
mov.b DTAG(%a6),%d1
lsl.b &0x3,%d1
or.b STAG(%a6),%d1 # combine src tags
bne.w fsgldiv_not_norm # optimize on non-norm input
#
# DIVIDE: NORMs and DENORMs ONLY!
#
fsgldiv_norm:
mov.w DST_EX(%a1),FP_SCR1_EX(%a6)
mov.l DST_HI(%a1),FP_SCR1_HI(%a6)
mov.l DST_LO(%a1),FP_SCR1_LO(%a6)
mov.w SRC_EX(%a0),FP_SCR0_EX(%a6)
mov.l SRC_HI(%a0),FP_SCR0_HI(%a6)
mov.l SRC_LO(%a0),FP_SCR0_LO(%a6)
bsr.l scale_to_zero_src # calculate scale factor 1
mov.l %d0,-(%sp) # save scale factor 1
bsr.l scale_to_zero_dst # calculate scale factor 2
neg.l (%sp) # S.F. = scale1 - scale2
add.l %d0,(%sp)
mov.w 2+L_SCR3(%a6),%d1 # fetch precision,mode
lsr.b &0x6,%d1
mov.l (%sp)+,%d0
cmpi.l %d0,&0x3fff-0x7ffe
ble.w fsgldiv_may_ovfl
cmpi.l %d0,&0x3fff-0x0000 # will result underflow?
beq.w fsgldiv_may_unfl # maybe
bgt.w fsgldiv_unfl # yes; go handle underflow
fsgldiv_normal:
fmovm.x FP_SCR1(%a6),&0x80 # load dst op
fmov.l L_SCR3(%a6),%fpcr # save FPCR
fmov.l &0x0,%fpsr # clear FPSR
fsgldiv.x FP_SCR0(%a6),%fp0 # perform sgl divide
fmov.l %fpsr,%d1 # save FPSR
fmov.l &0x0,%fpcr # clear FPCR
or.l %d1,USER_FPSR(%a6) # save INEX2,N
fsgldiv_normal_exit:
fmovm.x &0x80,FP_SCR0(%a6) # store result on stack
mov.l %d2,-(%sp) # save d2
mov.w FP_SCR0_EX(%a6),%d1 # load {sgn,exp}
mov.l %d1,%d2 # make a copy
andi.l &0x7fff,%d1 # strip sign
andi.w &0x8000,%d2 # keep old sign
sub.l %d0,%d1 # add scale factor
or.w %d2,%d1 # concat old sign,new exp
mov.w %d1,FP_SCR0_EX(%a6) # insert new exponent
mov.l (%sp)+,%d2 # restore d2
fmovm.x FP_SCR0(%a6),&0x80 # return result in fp0
rts
fsgldiv_may_ovfl:
fmovm.x FP_SCR1(%a6),&0x80 # load dst op
fmov.l L_SCR3(%a6),%fpcr # set FPCR
fmov.l &0x0,%fpsr # set FPSR
fsgldiv.x FP_SCR0(%a6),%fp0 # execute divide
fmov.l %fpsr,%d1
fmov.l &0x0,%fpcr
or.l %d1,USER_FPSR(%a6) # save INEX,N
fmovm.x &0x01,-(%sp) # save result to stack
mov.w (%sp),%d1 # fetch new exponent
add.l &0xc,%sp # clear result
andi.l &0x7fff,%d1 # strip sign
sub.l %d0,%d1 # add scale factor
cmp.l %d1,&0x7fff # did divide overflow?
blt.b fsgldiv_normal_exit
fsgldiv_ovfl_tst:
or.w &ovfl_inx_mask,2+USER_FPSR(%a6) # set ovfl/aovfl/ainex
mov.b FPCR_ENABLE(%a6),%d1
andi.b &0x13,%d1 # is OVFL or INEX enabled?
bne.b fsgldiv_ovfl_ena # yes
fsgldiv_ovfl_dis:
btst &neg_bit,FPSR_CC(%a6) # is result negative
sne %d1 # set sign param accordingly
mov.l L_SCR3(%a6),%d0 # pass prec:rnd
andi.b &0x30,%d0 # kill precision
bsr.l ovf_res # calculate default result
or.b %d0,FPSR_CC(%a6) # set INF if applicable
fmovm.x (%a0),&0x80 # return default result in fp0
rts
fsgldiv_ovfl_ena:
fmovm.x &0x80,FP_SCR0(%a6) # move result to stack
mov.l %d2,-(%sp) # save d2
mov.w FP_SCR0_EX(%a6),%d1 # fetch {sgn,exp}
mov.l %d1,%d2 # make a copy
andi.l &0x7fff,%d1 # strip sign
andi.w &0x8000,%d2 # keep old sign
sub.l %d0,%d1 # add scale factor
subi.l &0x6000,%d1 # subtract new bias
andi.w &0x7fff,%d1 # clear ms bit
or.w %d2,%d1 # concat old sign,new exp
mov.w %d1,FP_SCR0_EX(%a6) # insert new exponent
mov.l (%sp)+,%d2 # restore d2
fmovm.x FP_SCR0(%a6),&0x40 # return EXOP in fp1
bra.b fsgldiv_ovfl_dis
fsgldiv_unfl:
bset &unfl_bit,FPSR_EXCEPT(%a6) # set unfl exc bit
fmovm.x FP_SCR1(%a6),&0x80 # load dst op
fmov.l &rz_mode*0x10,%fpcr # set FPCR
fmov.l &0x0,%fpsr # clear FPSR
fsgldiv.x FP_SCR0(%a6),%fp0 # execute sgl divide
fmov.l %fpsr,%d1 # save status
fmov.l &0x0,%fpcr # clear FPCR
or.l %d1,USER_FPSR(%a6) # save INEX2,N
mov.b FPCR_ENABLE(%a6),%d1
andi.b &0x0b,%d1 # is UNFL or INEX enabled?
bne.b fsgldiv_unfl_ena # yes
fsgldiv_unfl_dis:
fmovm.x &0x80,FP_SCR0(%a6) # store out result
lea FP_SCR0(%a6),%a0 # pass: result addr
mov.l L_SCR3(%a6),%d1 # pass: rnd prec,mode
bsr.l unf_res4 # calculate default result
or.b %d0,FPSR_CC(%a6) # 'Z' bit may have been set
fmovm.x FP_SCR0(%a6),&0x80 # return default result in fp0
rts
#
# UNFL is enabled.
#
fsgldiv_unfl_ena:
fmovm.x FP_SCR1(%a6),&0x40 # load dst op
fmov.l L_SCR3(%a6),%fpcr # set FPCR
fmov.l &0x0,%fpsr # clear FPSR
fsgldiv.x FP_SCR0(%a6),%fp1 # execute sgl divide
fmov.l &0x0,%fpcr # clear FPCR
fmovm.x &0x40,FP_SCR0(%a6) # save result to stack
mov.l %d2,-(%sp) # save d2
mov.w FP_SCR0_EX(%a6),%d1 # fetch {sgn,exp}
mov.l %d1,%d2 # make a copy
andi.l &0x7fff,%d1 # strip sign
andi.w &0x8000,%d2 # keep old sign
sub.l %d0,%d1 # add scale factor
addi.l &0x6000,%d1 # add bias
andi.w &0x7fff,%d1 # clear top bit
or.w %d2,%d1 # concat old sign, new exp
mov.w %d1,FP_SCR0_EX(%a6) # insert new exponent
mov.l (%sp)+,%d2 # restore d2
fmovm.x FP_SCR0(%a6),&0x40 # return EXOP in fp1
bra.b fsgldiv_unfl_dis
#
# the divide operation MAY underflow:
#
fsgldiv_may_unfl:
fmovm.x FP_SCR1(%a6),&0x80 # load dst op
fmov.l L_SCR3(%a6),%fpcr # set FPCR
fmov.l &0x0,%fpsr # clear FPSR
fsgldiv.x FP_SCR0(%a6),%fp0 # execute sgl divide
fmov.l %fpsr,%d1 # save status
fmov.l &0x0,%fpcr # clear FPCR
or.l %d1,USER_FPSR(%a6) # save INEX2,N
fabs.x %fp0,%fp1 # make a copy of result
fcmp.b %fp1,&0x1 # is |result| > 1.b?
fbgt.w fsgldiv_normal_exit # no; no underflow occurred
fblt.w fsgldiv_unfl # yes; underflow occurred
#
# we still don't know if underflow occurred. result is ~ equal to 1. but,
# we don't know if the result was an underflow that rounded up to a 1
# or a normalized number that rounded down to a 1. so, redo the entire
# operation using RZ as the rounding mode to see what the pre-rounded
# result is. this case should be relatively rare.
#
fmovm.x FP_SCR1(%a6),&0x40 # load dst op into %fp1
clr.l %d1 # clear scratch register
ori.b &rz_mode*0x10,%d1 # force RZ rnd mode
fmov.l %d1,%fpcr # set FPCR
fmov.l &0x0,%fpsr # clear FPSR
fsgldiv.x FP_SCR0(%a6),%fp1 # execute sgl divide
fmov.l &0x0,%fpcr # clear FPCR
fabs.x %fp1 # make absolute value
fcmp.b %fp1,&0x1 # is |result| < 1.b?
fbge.w fsgldiv_normal_exit # no; no underflow occurred
bra.w fsgldiv_unfl # yes; underflow occurred
############################################################################
#
# Divide: inputs are not both normalized; what are they?
#
fsgldiv_not_norm:
mov.w (tbl_fsgldiv_op.b,%pc,%d1.w*2),%d1
jmp (tbl_fsgldiv_op.b,%pc,%d1.w*1)
swbeg &48
tbl_fsgldiv_op:
short fsgldiv_norm - tbl_fsgldiv_op # NORM / NORM
short fsgldiv_inf_load - tbl_fsgldiv_op # NORM / ZERO
short fsgldiv_zero_load - tbl_fsgldiv_op # NORM / INF
short fsgldiv_res_qnan - tbl_fsgldiv_op # NORM / QNAN
short fsgldiv_norm - tbl_fsgldiv_op # NORM / DENORM
short fsgldiv_res_snan - tbl_fsgldiv_op # NORM / SNAN
short tbl_fsgldiv_op - tbl_fsgldiv_op #
short tbl_fsgldiv_op - tbl_fsgldiv_op #
short fsgldiv_zero_load - tbl_fsgldiv_op # ZERO / NORM
short fsgldiv_res_operr - tbl_fsgldiv_op # ZERO / ZERO
short fsgldiv_zero_load - tbl_fsgldiv_op # ZERO / INF
short fsgldiv_res_qnan - tbl_fsgldiv_op # ZERO / QNAN
short fsgldiv_zero_load - tbl_fsgldiv_op # ZERO / DENORM
short fsgldiv_res_snan - tbl_fsgldiv_op # ZERO / SNAN
short tbl_fsgldiv_op - tbl_fsgldiv_op #
short tbl_fsgldiv_op - tbl_fsgldiv_op #
short fsgldiv_inf_dst - tbl_fsgldiv_op # INF / NORM
short fsgldiv_inf_dst - tbl_fsgldiv_op # INF / ZERO
short fsgldiv_res_operr - tbl_fsgldiv_op # INF / INF
short fsgldiv_res_qnan - tbl_fsgldiv_op # INF / QNAN
short fsgldiv_inf_dst - tbl_fsgldiv_op # INF / DENORM
short fsgldiv_res_snan - tbl_fsgldiv_op # INF / SNAN
short tbl_fsgldiv_op - tbl_fsgldiv_op #
short tbl_fsgldiv_op - tbl_fsgldiv_op #
short fsgldiv_res_qnan - tbl_fsgldiv_op # QNAN / NORM
short fsgldiv_res_qnan - tbl_fsgldiv_op # QNAN / ZERO
short fsgldiv_res_qnan - tbl_fsgldiv_op # QNAN / INF
short fsgldiv_res_qnan - tbl_fsgldiv_op # QNAN / QNAN
short fsgldiv_res_qnan - tbl_fsgldiv_op # QNAN / DENORM
short fsgldiv_res_snan - tbl_fsgldiv_op # QNAN / SNAN
short tbl_fsgldiv_op - tbl_fsgldiv_op #
short tbl_fsgldiv_op - tbl_fsgldiv_op #
short fsgldiv_norm - tbl_fsgldiv_op # DENORM / NORM
short fsgldiv_inf_load - tbl_fsgldiv_op # DENORM / ZERO
short fsgldiv_zero_load - tbl_fsgldiv_op # DENORM / INF
short fsgldiv_res_qnan - tbl_fsgldiv_op # DENORM / QNAN
short fsgldiv_norm - tbl_fsgldiv_op # DENORM / DENORM
short fsgldiv_res_snan - tbl_fsgldiv_op # DENORM / SNAN
short tbl_fsgldiv_op - tbl_fsgldiv_op #
short tbl_fsgldiv_op - tbl_fsgldiv_op #
short fsgldiv_res_snan - tbl_fsgldiv_op # SNAN / NORM
short fsgldiv_res_snan - tbl_fsgldiv_op # SNAN / ZERO
short fsgldiv_res_snan - tbl_fsgldiv_op # SNAN / INF
short fsgldiv_res_snan - tbl_fsgldiv_op # SNAN / QNAN
short fsgldiv_res_snan - tbl_fsgldiv_op # SNAN / DENORM
short fsgldiv_res_snan - tbl_fsgldiv_op # SNAN / SNAN
short tbl_fsgldiv_op - tbl_fsgldiv_op #
short tbl_fsgldiv_op - tbl_fsgldiv_op #
fsgldiv_res_qnan:
bra.l res_qnan
fsgldiv_res_snan:
bra.l res_snan
fsgldiv_res_operr:
bra.l res_operr
fsgldiv_inf_load:
bra.l fdiv_inf_load
fsgldiv_zero_load:
bra.l fdiv_zero_load
fsgldiv_inf_dst:
bra.l fdiv_inf_dst
#########################################################################
# XDEF **************************************************************** #
# fadd(): emulates the fadd instruction #
# fsadd(): emulates the fadd instruction #
# fdadd(): emulates the fdadd instruction #
# #
# XREF **************************************************************** #
# addsub_scaler2() - scale the operands so they won't take exc #
# ovf_res() - return default overflow result #
# unf_res() - return default underflow result #
# res_qnan() - set QNAN result #
# res_snan() - set SNAN result #
# res_operr() - set OPERR result #
# scale_to_zero_src() - set src operand exponent equal to zero #
# scale_to_zero_dst() - set dst operand exponent equal to zero #
# #
# INPUT *************************************************************** #
# a0 = pointer to extended precision source operand #
# a1 = pointer to extended precision destination operand #
# #
# OUTPUT ************************************************************** #
# fp0 = result #
# fp1 = EXOP (if exception occurred) #
# #
# ALGORITHM *********************************************************** #
# Handle NANs, infinities, and zeroes as special cases. Divide #
# norms into extended, single, and double precision. #
# Do addition after scaling exponents such that exception won't #
# occur. Then, check result exponent to see if exception would have #
# occurred. If so, return default result and maybe EXOP. Else, insert #
# the correct result exponent and return. Set FPSR bits as appropriate. #
# #
#########################################################################
global fsadd
fsadd:
andi.b &0x30,%d0 # clear rnd prec
ori.b &s_mode*0x10,%d0 # insert sgl prec
bra.b fadd
global fdadd
fdadd:
andi.b &0x30,%d0 # clear rnd prec
ori.b &d_mode*0x10,%d0 # insert dbl prec
global fadd
fadd:
mov.l %d0,L_SCR3(%a6) # store rnd info
clr.w %d1
mov.b DTAG(%a6),%d1
lsl.b &0x3,%d1
or.b STAG(%a6),%d1 # combine src tags
bne.w fadd_not_norm # optimize on non-norm input
#
# ADD: norms and denorms
#
fadd_norm:
bsr.l addsub_scaler2 # scale exponents
fadd_zero_entry:
fmovm.x FP_SCR1(%a6),&0x80 # load dst op
fmov.l &0x0,%fpsr # clear FPSR
fmov.l L_SCR3(%a6),%fpcr # set FPCR
fadd.x FP_SCR0(%a6),%fp0 # execute add
fmov.l &0x0,%fpcr # clear FPCR
fmov.l %fpsr,%d1 # fetch INEX2,N,Z
or.l %d1,USER_FPSR(%a6) # save exc and ccode bits
fbeq.w fadd_zero_exit # if result is zero, end now
mov.l %d2,-(%sp) # save d2
fmovm.x &0x01,-(%sp) # save result to stack
mov.w 2+L_SCR3(%a6),%d1
lsr.b &0x6,%d1
mov.w (%sp),%d2 # fetch new sign, exp
andi.l &0x7fff,%d2 # strip sign
sub.l %d0,%d2 # add scale factor
cmp.l %d2,(tbl_fadd_ovfl.b,%pc,%d1.w*4) # is it an overflow?
bge.b fadd_ovfl # yes
cmp.l %d2,(tbl_fadd_unfl.b,%pc,%d1.w*4) # is it an underflow?
blt.w fadd_unfl # yes
beq.w fadd_may_unfl # maybe; go find out
fadd_normal:
mov.w (%sp),%d1
andi.w &0x8000,%d1 # keep sign
or.w %d2,%d1 # concat sign,new exp
mov.w %d1,(%sp) # insert new exponent
fmovm.x (%sp)+,&0x80 # return result in fp0
mov.l (%sp)+,%d2 # restore d2
rts
fadd_zero_exit:
# fmov.s &0x00000000,%fp0 # return zero in fp0
rts
tbl_fadd_ovfl:
long 0x7fff # ext ovfl
long 0x407f # sgl ovfl
long 0x43ff # dbl ovfl
tbl_fadd_unfl:
long 0x0000 # ext unfl
long 0x3f81 # sgl unfl
long 0x3c01 # dbl unfl
fadd_ovfl:
or.l &ovfl_inx_mask,USER_FPSR(%a6) # set ovfl/aovfl/ainex
mov.b FPCR_ENABLE(%a6),%d1
andi.b &0x13,%d1 # is OVFL or INEX enabled?
bne.b fadd_ovfl_ena # yes
add.l &0xc,%sp
fadd_ovfl_dis:
btst &neg_bit,FPSR_CC(%a6) # is result negative?
sne %d1 # set sign param accordingly
mov.l L_SCR3(%a6),%d0 # pass prec:rnd
bsr.l ovf_res # calculate default result
or.b %d0,FPSR_CC(%a6) # set INF,N if applicable
fmovm.x (%a0),&0x80 # return default result in fp0
mov.l (%sp)+,%d2 # restore d2
rts
fadd_ovfl_ena:
mov.b L_SCR3(%a6),%d1
andi.b &0xc0,%d1 # is precision extended?
bne.b fadd_ovfl_ena_sd # no; prec = sgl or dbl
fadd_ovfl_ena_cont:
mov.w (%sp),%d1
andi.w &0x8000,%d1 # keep sign
subi.l &0x6000,%d2 # add extra bias
andi.w &0x7fff,%d2
or.w %d2,%d1 # concat sign,new exp
mov.w %d1,(%sp) # insert new exponent
fmovm.x (%sp)+,&0x40 # return EXOP in fp1
bra.b fadd_ovfl_dis
fadd_ovfl_ena_sd:
fmovm.x FP_SCR1(%a6),&0x80 # load dst op
mov.l L_SCR3(%a6),%d1
andi.b &0x30,%d1 # keep rnd mode
fmov.l %d1,%fpcr # set FPCR
fadd.x FP_SCR0(%a6),%fp0 # execute add
fmov.l &0x0,%fpcr # clear FPCR
add.l &0xc,%sp
fmovm.x &0x01,-(%sp)
bra.b fadd_ovfl_ena_cont
fadd_unfl:
bset &unfl_bit,FPSR_EXCEPT(%a6) # set unfl exc bit
add.l &0xc,%sp
fmovm.x FP_SCR1(%a6),&0x80 # load dst op
fmov.l &rz_mode*0x10,%fpcr # set FPCR
fmov.l &0x0,%fpsr # clear FPSR
fadd.x FP_SCR0(%a6),%fp0 # execute add
fmov.l &0x0,%fpcr # clear FPCR
fmov.l %fpsr,%d1 # save status
or.l %d1,USER_FPSR(%a6) # save INEX,N
mov.b FPCR_ENABLE(%a6),%d1
andi.b &0x0b,%d1 # is UNFL or INEX enabled?
bne.b fadd_unfl_ena # yes
fadd_unfl_dis:
fmovm.x &0x80,FP_SCR0(%a6) # store out result
lea FP_SCR0(%a6),%a0 # pass: result addr
mov.l L_SCR3(%a6),%d1 # pass: rnd prec,mode
bsr.l unf_res # calculate default result
or.b %d0,FPSR_CC(%a6) # 'Z' bit may have been set
fmovm.x FP_SCR0(%a6),&0x80 # return default result in fp0
mov.l (%sp)+,%d2 # restore d2
rts
fadd_unfl_ena:
fmovm.x FP_SCR1(%a6),&0x40 # load dst op
mov.l L_SCR3(%a6),%d1
andi.b &0xc0,%d1 # is precision extended?
bne.b fadd_unfl_ena_sd # no; sgl or dbl
fmov.l L_SCR3(%a6),%fpcr # set FPCR
fadd_unfl_ena_cont:
fmov.l &0x0,%fpsr # clear FPSR
fadd.x FP_SCR0(%a6),%fp1 # execute multiply
fmov.l &0x0,%fpcr # clear FPCR
fmovm.x &0x40,FP_SCR0(%a6) # save result to stack
mov.w FP_SCR0_EX(%a6),%d1 # fetch {sgn,exp}
mov.l %d1,%d2 # make a copy
andi.l &0x7fff,%d1 # strip sign
andi.w &0x8000,%d2 # keep old sign
sub.l %d0,%d1 # add scale factor
addi.l &0x6000,%d1 # add new bias
andi.w &0x7fff,%d1 # clear top bit
or.w %d2,%d1 # concat sign,new exp
mov.w %d1,FP_SCR0_EX(%a6) # insert new exponent
fmovm.x FP_SCR0(%a6),&0x40 # return EXOP in fp1
bra.w fadd_unfl_dis
fadd_unfl_ena_sd:
mov.l L_SCR3(%a6),%d1
andi.b &0x30,%d1 # use only rnd mode
fmov.l %d1,%fpcr # set FPCR
bra.b fadd_unfl_ena_cont
#
# result is equal to the smallest normalized number in the selected precision
# if the precision is extended, this result could not have come from an
# underflow that rounded up.
#
fadd_may_unfl:
mov.l L_SCR3(%a6),%d1
andi.b &0xc0,%d1
beq.w fadd_normal # yes; no underflow occurred
mov.l 0x4(%sp),%d1 # extract hi(man)
cmpi.l %d1,&0x80000000 # is hi(man) = 0x80000000?
bne.w fadd_normal # no; no underflow occurred
tst.l 0x8(%sp) # is lo(man) = 0x0?
bne.w fadd_normal # no; no underflow occurred
btst &inex2_bit,FPSR_EXCEPT(%a6) # is INEX2 set?
beq.w fadd_normal # no; no underflow occurred
#
# ok, so now the result has a exponent equal to the smallest normalized
# exponent for the selected precision. also, the mantissa is equal to
# 0x8000000000000000 and this mantissa is the result of rounding non-zero
# g,r,s.
# now, we must determine whether the pre-rounded result was an underflow
# rounded "up" or a normalized number rounded "down".
# so, we do this be re-executing the add using RZ as the rounding mode and
# seeing if the new result is smaller or equal to the current result.
#
fmovm.x FP_SCR1(%a6),&0x40 # load dst op into fp1
mov.l L_SCR3(%a6),%d1
andi.b &0xc0,%d1 # keep rnd prec
ori.b &rz_mode*0x10,%d1 # insert rnd mode
fmov.l %d1,%fpcr # set FPCR
fmov.l &0x0,%fpsr # clear FPSR
fadd.x FP_SCR0(%a6),%fp1 # execute add
fmov.l &0x0,%fpcr # clear FPCR
fabs.x %fp0 # compare absolute values
fabs.x %fp1
fcmp.x %fp0,%fp1 # is first result > second?
fbgt.w fadd_unfl # yes; it's an underflow
bra.w fadd_normal # no; it's not an underflow
##########################################################################
#
# Add: inputs are not both normalized; what are they?
#
fadd_not_norm:
mov.w (tbl_fadd_op.b,%pc,%d1.w*2),%d1
jmp (tbl_fadd_op.b,%pc,%d1.w*1)
swbeg &48
tbl_fadd_op:
short fadd_norm - tbl_fadd_op # NORM + NORM
short fadd_zero_src - tbl_fadd_op # NORM + ZERO
short fadd_inf_src - tbl_fadd_op # NORM + INF
short fadd_res_qnan - tbl_fadd_op # NORM + QNAN
short fadd_norm - tbl_fadd_op # NORM + DENORM
short fadd_res_snan - tbl_fadd_op # NORM + SNAN
short tbl_fadd_op - tbl_fadd_op #
short tbl_fadd_op - tbl_fadd_op #
short fadd_zero_dst - tbl_fadd_op # ZERO + NORM
short fadd_zero_2 - tbl_fadd_op # ZERO + ZERO
short fadd_inf_src - tbl_fadd_op # ZERO + INF
short fadd_res_qnan - tbl_fadd_op # NORM + QNAN
short fadd_zero_dst - tbl_fadd_op # ZERO + DENORM
short fadd_res_snan - tbl_fadd_op # NORM + SNAN
short tbl_fadd_op - tbl_fadd_op #
short tbl_fadd_op - tbl_fadd_op #
short fadd_inf_dst - tbl_fadd_op # INF + NORM
short fadd_inf_dst - tbl_fadd_op # INF + ZERO
short fadd_inf_2 - tbl_fadd_op # INF + INF
short fadd_res_qnan - tbl_fadd_op # NORM + QNAN
short fadd_inf_dst - tbl_fadd_op # INF + DENORM
short fadd_res_snan - tbl_fadd_op # NORM + SNAN
short tbl_fadd_op - tbl_fadd_op #
short tbl_fadd_op - tbl_fadd_op #
short fadd_res_qnan - tbl_fadd_op # QNAN + NORM
short fadd_res_qnan - tbl_fadd_op # QNAN + ZERO
short fadd_res_qnan - tbl_fadd_op # QNAN + INF
short fadd_res_qnan - tbl_fadd_op # QNAN + QNAN
short fadd_res_qnan - tbl_fadd_op # QNAN + DENORM
short fadd_res_snan - tbl_fadd_op # QNAN + SNAN
short tbl_fadd_op - tbl_fadd_op #
short tbl_fadd_op - tbl_fadd_op #
short fadd_norm - tbl_fadd_op # DENORM + NORM
short fadd_zero_src - tbl_fadd_op # DENORM + ZERO
short fadd_inf_src - tbl_fadd_op # DENORM + INF
short fadd_res_qnan - tbl_fadd_op # NORM + QNAN
short fadd_norm - tbl_fadd_op # DENORM + DENORM
short fadd_res_snan - tbl_fadd_op # NORM + SNAN
short tbl_fadd_op - tbl_fadd_op #
short tbl_fadd_op - tbl_fadd_op #
short fadd_res_snan - tbl_fadd_op # SNAN + NORM
short fadd_res_snan - tbl_fadd_op # SNAN + ZERO
short fadd_res_snan - tbl_fadd_op # SNAN + INF
short fadd_res_snan - tbl_fadd_op # SNAN + QNAN
short fadd_res_snan - tbl_fadd_op # SNAN + DENORM
short fadd_res_snan - tbl_fadd_op # SNAN + SNAN
short tbl_fadd_op - tbl_fadd_op #
short tbl_fadd_op - tbl_fadd_op #
fadd_res_qnan:
bra.l res_qnan
fadd_res_snan:
bra.l res_snan
#
# both operands are ZEROes
#
fadd_zero_2:
mov.b SRC_EX(%a0),%d0 # are the signs opposite
mov.b DST_EX(%a1),%d1
eor.b %d0,%d1
bmi.w fadd_zero_2_chk_rm # weed out (-ZERO)+(+ZERO)
# the signs are the same. so determine whether they are positive or negative
# and return the appropriately signed zero.
tst.b %d0 # are ZEROes positive or negative?
bmi.b fadd_zero_rm # negative
fmov.s &0x00000000,%fp0 # return +ZERO
mov.b &z_bmask,FPSR_CC(%a6) # set Z
rts
#
# the ZEROes have opposite signs:
# - Therefore, we return +ZERO if the rounding modes are RN,RZ, or RP.
# - -ZERO is returned in the case of RM.
#
fadd_zero_2_chk_rm:
mov.b 3+L_SCR3(%a6),%d1
andi.b &0x30,%d1 # extract rnd mode
cmpi.b %d1,&rm_mode*0x10 # is rnd mode == RM?
beq.b fadd_zero_rm # yes
fmov.s &0x00000000,%fp0 # return +ZERO
mov.b &z_bmask,FPSR_CC(%a6) # set Z
rts
fadd_zero_rm:
fmov.s &0x80000000,%fp0 # return -ZERO
mov.b &neg_bmask+z_bmask,FPSR_CC(%a6) # set NEG/Z
rts
#
# one operand is a ZERO and the other is a DENORM or NORM. scale
# the DENORM or NORM and jump to the regular fadd routine.
#
fadd_zero_dst:
mov.w SRC_EX(%a0),FP_SCR0_EX(%a6)
mov.l SRC_HI(%a0),FP_SCR0_HI(%a6)
mov.l SRC_LO(%a0),FP_SCR0_LO(%a6)
bsr.l scale_to_zero_src # scale the operand
clr.w FP_SCR1_EX(%a6)
clr.l FP_SCR1_HI(%a6)
clr.l FP_SCR1_LO(%a6)
bra.w fadd_zero_entry # go execute fadd
fadd_zero_src:
mov.w DST_EX(%a1),FP_SCR1_EX(%a6)
mov.l DST_HI(%a1),FP_SCR1_HI(%a6)
mov.l DST_LO(%a1),FP_SCR1_LO(%a6)
bsr.l scale_to_zero_dst # scale the operand
clr.w FP_SCR0_EX(%a6)
clr.l FP_SCR0_HI(%a6)
clr.l FP_SCR0_LO(%a6)
bra.w fadd_zero_entry # go execute fadd
#
# both operands are INFs. an OPERR will result if the INFs have
# different signs. else, an INF of the same sign is returned
#
fadd_inf_2:
mov.b SRC_EX(%a0),%d0 # exclusive or the signs
mov.b DST_EX(%a1),%d1
eor.b %d1,%d0
bmi.l res_operr # weed out (-INF)+(+INF)
# ok, so it's not an OPERR. but, we do have to remember to return the
# src INF since that's where the 881/882 gets the j-bit from...
#
# operands are INF and one of {ZERO, INF, DENORM, NORM}
#
fadd_inf_src:
fmovm.x SRC(%a0),&0x80 # return src INF
tst.b SRC_EX(%a0) # is INF positive?
bpl.b fadd_inf_done # yes; we're done
mov.b &neg_bmask+inf_bmask,FPSR_CC(%a6) # set INF/NEG
rts
#
# operands are INF and one of {ZERO, INF, DENORM, NORM}
#
fadd_inf_dst:
fmovm.x DST(%a1),&0x80 # return dst INF
tst.b DST_EX(%a1) # is INF positive?
bpl.b fadd_inf_done # yes; we're done
mov.b &neg_bmask+inf_bmask,FPSR_CC(%a6) # set INF/NEG
rts
fadd_inf_done:
mov.b &inf_bmask,FPSR_CC(%a6) # set INF
rts
#########################################################################
# XDEF **************************************************************** #
# fsub(): emulates the fsub instruction #
# fssub(): emulates the fssub instruction #
# fdsub(): emulates the fdsub instruction #
# #
# XREF **************************************************************** #
# addsub_scaler2() - scale the operands so they won't take exc #
# ovf_res() - return default overflow result #
# unf_res() - return default underflow result #
# res_qnan() - set QNAN result #
# res_snan() - set SNAN result #
# res_operr() - set OPERR result #
# scale_to_zero_src() - set src operand exponent equal to zero #
# scale_to_zero_dst() - set dst operand exponent equal to zero #
# #
# INPUT *************************************************************** #
# a0 = pointer to extended precision source operand #
# a1 = pointer to extended precision destination operand #
# #
# OUTPUT ************************************************************** #
# fp0 = result #
# fp1 = EXOP (if exception occurred) #
# #
# ALGORITHM *********************************************************** #
# Handle NANs, infinities, and zeroes as special cases. Divide #
# norms into extended, single, and double precision. #
# Do subtraction after scaling exponents such that exception won't#
# occur. Then, check result exponent to see if exception would have #
# occurred. If so, return default result and maybe EXOP. Else, insert #
# the correct result exponent and return. Set FPSR bits as appropriate. #
# #
#########################################################################
global fssub
fssub:
andi.b &0x30,%d0 # clear rnd prec
ori.b &s_mode*0x10,%d0 # insert sgl prec
bra.b fsub
global fdsub
fdsub:
andi.b &0x30,%d0 # clear rnd prec
ori.b &d_mode*0x10,%d0 # insert dbl prec
global fsub
fsub:
mov.l %d0,L_SCR3(%a6) # store rnd info
clr.w %d1
mov.b DTAG(%a6),%d1
lsl.b &0x3,%d1
or.b STAG(%a6),%d1 # combine src tags
bne.w fsub_not_norm # optimize on non-norm input
#
# SUB: norms and denorms
#
fsub_norm:
bsr.l addsub_scaler2 # scale exponents
fsub_zero_entry:
fmovm.x FP_SCR1(%a6),&0x80 # load dst op
fmov.l &0x0,%fpsr # clear FPSR
fmov.l L_SCR3(%a6),%fpcr # set FPCR
fsub.x FP_SCR0(%a6),%fp0 # execute subtract
fmov.l &0x0,%fpcr # clear FPCR
fmov.l %fpsr,%d1 # fetch INEX2, N, Z
or.l %d1,USER_FPSR(%a6) # save exc and ccode bits
fbeq.w fsub_zero_exit # if result zero, end now
mov.l %d2,-(%sp) # save d2
fmovm.x &0x01,-(%sp) # save result to stack
mov.w 2+L_SCR3(%a6),%d1
lsr.b &0x6,%d1
mov.w (%sp),%d2 # fetch new exponent
andi.l &0x7fff,%d2 # strip sign
sub.l %d0,%d2 # add scale factor
cmp.l %d2,(tbl_fsub_ovfl.b,%pc,%d1.w*4) # is it an overflow?
bge.b fsub_ovfl # yes
cmp.l %d2,(tbl_fsub_unfl.b,%pc,%d1.w*4) # is it an underflow?
blt.w fsub_unfl # yes
beq.w fsub_may_unfl # maybe; go find out
fsub_normal:
mov.w (%sp),%d1
andi.w &0x8000,%d1 # keep sign
or.w %d2,%d1 # insert new exponent
mov.w %d1,(%sp) # insert new exponent
fmovm.x (%sp)+,&0x80 # return result in fp0
mov.l (%sp)+,%d2 # restore d2
rts
fsub_zero_exit:
# fmov.s &0x00000000,%fp0 # return zero in fp0
rts
tbl_fsub_ovfl:
long 0x7fff # ext ovfl
long 0x407f # sgl ovfl
long 0x43ff # dbl ovfl
tbl_fsub_unfl:
long 0x0000 # ext unfl
long 0x3f81 # sgl unfl
long 0x3c01 # dbl unfl
fsub_ovfl:
or.l &ovfl_inx_mask,USER_FPSR(%a6) # set ovfl/aovfl/ainex
mov.b FPCR_ENABLE(%a6),%d1
andi.b &0x13,%d1 # is OVFL or INEX enabled?
bne.b fsub_ovfl_ena # yes
add.l &0xc,%sp
fsub_ovfl_dis:
btst &neg_bit,FPSR_CC(%a6) # is result negative?
sne %d1 # set sign param accordingly
mov.l L_SCR3(%a6),%d0 # pass prec:rnd
bsr.l ovf_res # calculate default result
or.b %d0,FPSR_CC(%a6) # set INF,N if applicable
fmovm.x (%a0),&0x80 # return default result in fp0
mov.l (%sp)+,%d2 # restore d2
rts
fsub_ovfl_ena:
mov.b L_SCR3(%a6),%d1
andi.b &0xc0,%d1 # is precision extended?
bne.b fsub_ovfl_ena_sd # no
fsub_ovfl_ena_cont:
mov.w (%sp),%d1 # fetch {sgn,exp}
andi.w &0x8000,%d1 # keep sign
subi.l &0x6000,%d2 # subtract new bias
andi.w &0x7fff,%d2 # clear top bit
or.w %d2,%d1 # concat sign,exp
mov.w %d1,(%sp) # insert new exponent
fmovm.x (%sp)+,&0x40 # return EXOP in fp1
bra.b fsub_ovfl_dis
fsub_ovfl_ena_sd:
fmovm.x FP_SCR1(%a6),&0x80 # load dst op
mov.l L_SCR3(%a6),%d1
andi.b &0x30,%d1 # clear rnd prec
fmov.l %d1,%fpcr # set FPCR
fsub.x FP_SCR0(%a6),%fp0 # execute subtract
fmov.l &0x0,%fpcr # clear FPCR
add.l &0xc,%sp
fmovm.x &0x01,-(%sp)
bra.b fsub_ovfl_ena_cont
fsub_unfl:
bset &unfl_bit,FPSR_EXCEPT(%a6) # set unfl exc bit
add.l &0xc,%sp
fmovm.x FP_SCR1(%a6),&0x80 # load dst op
fmov.l &rz_mode*0x10,%fpcr # set FPCR
fmov.l &0x0,%fpsr # clear FPSR
fsub.x FP_SCR0(%a6),%fp0 # execute subtract
fmov.l &0x0,%fpcr # clear FPCR
fmov.l %fpsr,%d1 # save status
or.l %d1,USER_FPSR(%a6)
mov.b FPCR_ENABLE(%a6),%d1
andi.b &0x0b,%d1 # is UNFL or INEX enabled?
bne.b fsub_unfl_ena # yes
fsub_unfl_dis:
fmovm.x &0x80,FP_SCR0(%a6) # store out result
lea FP_SCR0(%a6),%a0 # pass: result addr
mov.l L_SCR3(%a6),%d1 # pass: rnd prec,mode
bsr.l unf_res # calculate default result
or.b %d0,FPSR_CC(%a6) # 'Z' may have been set
fmovm.x FP_SCR0(%a6),&0x80 # return default result in fp0
mov.l (%sp)+,%d2 # restore d2
rts
fsub_unfl_ena:
fmovm.x FP_SCR1(%a6),&0x40
mov.l L_SCR3(%a6),%d1
andi.b &0xc0,%d1 # is precision extended?
bne.b fsub_unfl_ena_sd # no
fmov.l L_SCR3(%a6),%fpcr # set FPCR
fsub_unfl_ena_cont:
fmov.l &0x0,%fpsr # clear FPSR
fsub.x FP_SCR0(%a6),%fp1 # execute subtract
fmov.l &0x0,%fpcr # clear FPCR
fmovm.x &0x40,FP_SCR0(%a6) # store result to stack
mov.w FP_SCR0_EX(%a6),%d1 # fetch {sgn,exp}
mov.l %d1,%d2 # make a copy
andi.l &0x7fff,%d1 # strip sign
andi.w &0x8000,%d2 # keep old sign
sub.l %d0,%d1 # add scale factor
addi.l &0x6000,%d1 # subtract new bias
andi.w &0x7fff,%d1 # clear top bit
or.w %d2,%d1 # concat sgn,exp
mov.w %d1,FP_SCR0_EX(%a6) # insert new exponent
fmovm.x FP_SCR0(%a6),&0x40 # return EXOP in fp1
bra.w fsub_unfl_dis
fsub_unfl_ena_sd:
mov.l L_SCR3(%a6),%d1
andi.b &0x30,%d1 # clear rnd prec
fmov.l %d1,%fpcr # set FPCR
bra.b fsub_unfl_ena_cont
#
# result is equal to the smallest normalized number in the selected precision
# if the precision is extended, this result could not have come from an
# underflow that rounded up.
#
fsub_may_unfl:
mov.l L_SCR3(%a6),%d1
andi.b &0xc0,%d1 # fetch rnd prec
beq.w fsub_normal # yes; no underflow occurred
mov.l 0x4(%sp),%d1
cmpi.l %d1,&0x80000000 # is hi(man) = 0x80000000?
bne.w fsub_normal # no; no underflow occurred
tst.l 0x8(%sp) # is lo(man) = 0x0?
bne.w fsub_normal # no; no underflow occurred
btst &inex2_bit,FPSR_EXCEPT(%a6) # is INEX2 set?
beq.w fsub_normal # no; no underflow occurred
#
# ok, so now the result has a exponent equal to the smallest normalized
# exponent for the selected precision. also, the mantissa is equal to
# 0x8000000000000000 and this mantissa is the result of rounding non-zero
# g,r,s.
# now, we must determine whether the pre-rounded result was an underflow
# rounded "up" or a normalized number rounded "down".
# so, we do this be re-executing the add using RZ as the rounding mode and
# seeing if the new result is smaller or equal to the current result.
#
fmovm.x FP_SCR1(%a6),&0x40 # load dst op into fp1
mov.l L_SCR3(%a6),%d1
andi.b &0xc0,%d1 # keep rnd prec
ori.b &rz_mode*0x10,%d1 # insert rnd mode
fmov.l %d1,%fpcr # set FPCR
fmov.l &0x0,%fpsr # clear FPSR
fsub.x FP_SCR0(%a6),%fp1 # execute subtract
fmov.l &0x0,%fpcr # clear FPCR
fabs.x %fp0 # compare absolute values
fabs.x %fp1
fcmp.x %fp0,%fp1 # is first result > second?
fbgt.w fsub_unfl # yes; it's an underflow
bra.w fsub_normal # no; it's not an underflow
##########################################################################
#
# Sub: inputs are not both normalized; what are they?
#
fsub_not_norm:
mov.w (tbl_fsub_op.b,%pc,%d1.w*2),%d1
jmp (tbl_fsub_op.b,%pc,%d1.w*1)
swbeg &48
tbl_fsub_op:
short fsub_norm - tbl_fsub_op # NORM - NORM
short fsub_zero_src - tbl_fsub_op # NORM - ZERO
short fsub_inf_src - tbl_fsub_op # NORM - INF
short fsub_res_qnan - tbl_fsub_op # NORM - QNAN
short fsub_norm - tbl_fsub_op # NORM - DENORM
short fsub_res_snan - tbl_fsub_op # NORM - SNAN
short tbl_fsub_op - tbl_fsub_op #
short tbl_fsub_op - tbl_fsub_op #
short fsub_zero_dst - tbl_fsub_op # ZERO - NORM
short fsub_zero_2 - tbl_fsub_op # ZERO - ZERO
short fsub_inf_src - tbl_fsub_op # ZERO - INF
short fsub_res_qnan - tbl_fsub_op # NORM - QNAN
short fsub_zero_dst - tbl_fsub_op # ZERO - DENORM
short fsub_res_snan - tbl_fsub_op # NORM - SNAN
short tbl_fsub_op - tbl_fsub_op #
short tbl_fsub_op - tbl_fsub_op #
short fsub_inf_dst - tbl_fsub_op # INF - NORM
short fsub_inf_dst - tbl_fsub_op # INF - ZERO
short fsub_inf_2 - tbl_fsub_op # INF - INF
short fsub_res_qnan - tbl_fsub_op # NORM - QNAN
short fsub_inf_dst - tbl_fsub_op # INF - DENORM
short fsub_res_snan - tbl_fsub_op # NORM - SNAN
short tbl_fsub_op - tbl_fsub_op #
short tbl_fsub_op - tbl_fsub_op #
short fsub_res_qnan - tbl_fsub_op # QNAN - NORM
short fsub_res_qnan - tbl_fsub_op # QNAN - ZERO
short fsub_res_qnan - tbl_fsub_op # QNAN - INF
short fsub_res_qnan - tbl_fsub_op # QNAN - QNAN
short fsub_res_qnan - tbl_fsub_op # QNAN - DENORM
short fsub_res_snan - tbl_fsub_op # QNAN - SNAN
short tbl_fsub_op - tbl_fsub_op #
short tbl_fsub_op - tbl_fsub_op #
short fsub_norm - tbl_fsub_op # DENORM - NORM
short fsub_zero_src - tbl_fsub_op # DENORM - ZERO
short fsub_inf_src - tbl_fsub_op # DENORM - INF
short fsub_res_qnan - tbl_fsub_op # NORM - QNAN
short fsub_norm - tbl_fsub_op # DENORM - DENORM
short fsub_res_snan - tbl_fsub_op # NORM - SNAN
short tbl_fsub_op - tbl_fsub_op #
short tbl_fsub_op - tbl_fsub_op #
short fsub_res_snan - tbl_fsub_op # SNAN - NORM
short fsub_res_snan - tbl_fsub_op # SNAN - ZERO
short fsub_res_snan - tbl_fsub_op # SNAN - INF
short fsub_res_snan - tbl_fsub_op # SNAN - QNAN
short fsub_res_snan - tbl_fsub_op # SNAN - DENORM
short fsub_res_snan - tbl_fsub_op # SNAN - SNAN
short tbl_fsub_op - tbl_fsub_op #
short tbl_fsub_op - tbl_fsub_op #
fsub_res_qnan:
bra.l res_qnan
fsub_res_snan:
bra.l res_snan
#
# both operands are ZEROes
#
fsub_zero_2:
mov.b SRC_EX(%a0),%d0
mov.b DST_EX(%a1),%d1
eor.b %d1,%d0
bpl.b fsub_zero_2_chk_rm
# the signs are opposite, so, return a ZERO w/ the sign of the dst ZERO
tst.b %d0 # is dst negative?
bmi.b fsub_zero_2_rm # yes
fmov.s &0x00000000,%fp0 # no; return +ZERO
mov.b &z_bmask,FPSR_CC(%a6) # set Z
rts
#
# the ZEROes have the same signs:
# - Therefore, we return +ZERO if the rounding mode is RN,RZ, or RP
# - -ZERO is returned in the case of RM.
#
fsub_zero_2_chk_rm:
mov.b 3+L_SCR3(%a6),%d1
andi.b &0x30,%d1 # extract rnd mode
cmpi.b %d1,&rm_mode*0x10 # is rnd mode = RM?
beq.b fsub_zero_2_rm # yes
fmov.s &0x00000000,%fp0 # no; return +ZERO
mov.b &z_bmask,FPSR_CC(%a6) # set Z
rts
fsub_zero_2_rm:
fmov.s &0x80000000,%fp0 # return -ZERO
mov.b &z_bmask+neg_bmask,FPSR_CC(%a6) # set Z/NEG
rts
#
# one operand is a ZERO and the other is a DENORM or a NORM.
# scale the DENORM or NORM and jump to the regular fsub routine.
#
fsub_zero_dst:
mov.w SRC_EX(%a0),FP_SCR0_EX(%a6)
mov.l SRC_HI(%a0),FP_SCR0_HI(%a6)
mov.l SRC_LO(%a0),FP_SCR0_LO(%a6)
bsr.l scale_to_zero_src # scale the operand
clr.w FP_SCR1_EX(%a6)
clr.l FP_SCR1_HI(%a6)
clr.l FP_SCR1_LO(%a6)
bra.w fsub_zero_entry # go execute fsub
fsub_zero_src:
mov.w DST_EX(%a1),FP_SCR1_EX(%a6)
mov.l DST_HI(%a1),FP_SCR1_HI(%a6)
mov.l DST_LO(%a1),FP_SCR1_LO(%a6)
bsr.l scale_to_zero_dst # scale the operand
clr.w FP_SCR0_EX(%a6)
clr.l FP_SCR0_HI(%a6)
clr.l FP_SCR0_LO(%a6)
bra.w fsub_zero_entry # go execute fsub
#
# both operands are INFs. an OPERR will result if the INFs have the
# same signs. else,
#
fsub_inf_2:
mov.b SRC_EX(%a0),%d0 # exclusive or the signs
mov.b DST_EX(%a1),%d1
eor.b %d1,%d0
bpl.l res_operr # weed out (-INF)+(+INF)
# ok, so it's not an OPERR. but we do have to remember to return
# the src INF since that's where the 881/882 gets the j-bit.
fsub_inf_src:
fmovm.x SRC(%a0),&0x80 # return src INF
fneg.x %fp0 # invert sign
fbge.w fsub_inf_done # sign is now positive
mov.b &neg_bmask+inf_bmask,FPSR_CC(%a6) # set INF/NEG
rts
fsub_inf_dst:
fmovm.x DST(%a1),&0x80 # return dst INF
tst.b DST_EX(%a1) # is INF negative?
bpl.b fsub_inf_done # no
mov.b &neg_bmask+inf_bmask,FPSR_CC(%a6) # set INF/NEG
rts
fsub_inf_done:
mov.b &inf_bmask,FPSR_CC(%a6) # set INF
rts
#########################################################################
# XDEF **************************************************************** #
# fsqrt(): emulates the fsqrt instruction #
# fssqrt(): emulates the fssqrt instruction #
# fdsqrt(): emulates the fdsqrt instruction #
# #
# XREF **************************************************************** #
# scale_sqrt() - scale the source operand #
# unf_res() - return default underflow result #
# ovf_res() - return default overflow result #
# res_qnan_1op() - return QNAN result #
# res_snan_1op() - return SNAN result #
# #
# INPUT *************************************************************** #
# a0 = pointer to extended precision source operand #
# d0 rnd prec,mode #
# #
# OUTPUT ************************************************************** #
# fp0 = result #
# fp1 = EXOP (if exception occurred) #
# #
# ALGORITHM *********************************************************** #
# Handle NANs, infinities, and zeroes as special cases. Divide #
# norms/denorms into ext/sgl/dbl precision. #
# For norms/denorms, scale the exponents such that a sqrt #
# instruction won't cause an exception. Use the regular fsqrt to #
# compute a result. Check if the regular operands would have taken #
# an exception. If so, return the default overflow/underflow result #
# and return the EXOP if exceptions are enabled. Else, scale the #
# result operand to the proper exponent. #
# #
#########################################################################
global fssqrt
fssqrt:
andi.b &0x30,%d0 # clear rnd prec
ori.b &s_mode*0x10,%d0 # insert sgl precision
bra.b fsqrt
global fdsqrt
fdsqrt:
andi.b &0x30,%d0 # clear rnd prec
ori.b &d_mode*0x10,%d0 # insert dbl precision
global fsqrt
fsqrt:
mov.l %d0,L_SCR3(%a6) # store rnd info
clr.w %d1
mov.b STAG(%a6),%d1
bne.w fsqrt_not_norm # optimize on non-norm input
#
# SQUARE ROOT: norms and denorms ONLY!
#
fsqrt_norm:
tst.b SRC_EX(%a0) # is operand negative?
bmi.l res_operr # yes
andi.b &0xc0,%d0 # is precision extended?
bne.b fsqrt_not_ext # no; go handle sgl or dbl
fmov.l L_SCR3(%a6),%fpcr # set FPCR
fmov.l &0x0,%fpsr # clear FPSR
fsqrt.x (%a0),%fp0 # execute square root
fmov.l %fpsr,%d1
or.l %d1,USER_FPSR(%a6) # set N,INEX
rts
fsqrt_denorm:
tst.b SRC_EX(%a0) # is operand negative?
bmi.l res_operr # yes
andi.b &0xc0,%d0 # is precision extended?
bne.b fsqrt_not_ext # no; go handle sgl or dbl
mov.w SRC_EX(%a0),FP_SCR0_EX(%a6)
mov.l SRC_HI(%a0),FP_SCR0_HI(%a6)
mov.l SRC_LO(%a0),FP_SCR0_LO(%a6)
bsr.l scale_sqrt # calculate scale factor
bra.w fsqrt_sd_normal
#
# operand is either single or double
#
fsqrt_not_ext:
cmpi.b %d0,&s_mode*0x10 # separate sgl/dbl prec
bne.w fsqrt_dbl
#
# operand is to be rounded to single precision
#
fsqrt_sgl:
mov.w SRC_EX(%a0),FP_SCR0_EX(%a6)
mov.l SRC_HI(%a0),FP_SCR0_HI(%a6)
mov.l SRC_LO(%a0),FP_SCR0_LO(%a6)
bsr.l scale_sqrt # calculate scale factor
cmpi.l %d0,&0x3fff-0x3f81 # will move in underflow?
beq.w fsqrt_sd_may_unfl
bgt.w fsqrt_sd_unfl # yes; go handle underflow
cmpi.l %d0,&0x3fff-0x407f # will move in overflow?
beq.w fsqrt_sd_may_ovfl # maybe; go check
blt.w fsqrt_sd_ovfl # yes; go handle overflow
#
# operand will NOT overflow or underflow when moved in to the fp reg file
#
fsqrt_sd_normal:
fmov.l &0x0,%fpsr # clear FPSR
fmov.l L_SCR3(%a6),%fpcr # set FPCR
fsqrt.x FP_SCR0(%a6),%fp0 # perform absolute
fmov.l %fpsr,%d1 # save FPSR
fmov.l &0x0,%fpcr # clear FPCR
or.l %d1,USER_FPSR(%a6) # save INEX2,N
fsqrt_sd_normal_exit:
mov.l %d2,-(%sp) # save d2
fmovm.x &0x80,FP_SCR0(%a6) # store out result
mov.w FP_SCR0_EX(%a6),%d1 # load sgn,exp
mov.l %d1,%d2 # make a copy
andi.l &0x7fff,%d1 # strip sign
sub.l %d0,%d1 # add scale factor
andi.w &0x8000,%d2 # keep old sign
or.w %d1,%d2 # concat old sign,new exp
mov.w %d2,FP_SCR0_EX(%a6) # insert new exponent
mov.l (%sp)+,%d2 # restore d2
fmovm.x FP_SCR0(%a6),&0x80 # return result in fp0
rts
#
# operand is to be rounded to double precision
#
fsqrt_dbl:
mov.w SRC_EX(%a0),FP_SCR0_EX(%a6)
mov.l SRC_HI(%a0),FP_SCR0_HI(%a6)
mov.l SRC_LO(%a0),FP_SCR0_LO(%a6)
bsr.l scale_sqrt # calculate scale factor
cmpi.l %d0,&0x3fff-0x3c01 # will move in underflow?
beq.w fsqrt_sd_may_unfl
bgt.b fsqrt_sd_unfl # yes; go handle underflow
cmpi.l %d0,&0x3fff-0x43ff # will move in overflow?
beq.w fsqrt_sd_may_ovfl # maybe; go check
blt.w fsqrt_sd_ovfl # yes; go handle overflow
bra.w fsqrt_sd_normal # no; ho handle normalized op
# we're on the line here and the distinguising characteristic is whether
# the exponent is 3fff or 3ffe. if it's 3ffe, then it's a safe number
# elsewise fall through to underflow.
fsqrt_sd_may_unfl:
btst &0x0,1+FP_SCR0_EX(%a6) # is exponent 0x3fff?
bne.w fsqrt_sd_normal # yes, so no underflow
#
# operand WILL underflow when moved in to the fp register file
#
fsqrt_sd_unfl:
bset &unfl_bit,FPSR_EXCEPT(%a6) # set unfl exc bit
fmov.l &rz_mode*0x10,%fpcr # set FPCR
fmov.l &0x0,%fpsr # clear FPSR
fsqrt.x FP_SCR0(%a6),%fp0 # execute square root
fmov.l %fpsr,%d1 # save status
fmov.l &0x0,%fpcr # clear FPCR
or.l %d1,USER_FPSR(%a6) # save INEX2,N
# if underflow or inexact is enabled, go calculate EXOP first.
mov.b FPCR_ENABLE(%a6),%d1
andi.b &0x0b,%d1 # is UNFL or INEX enabled?
bne.b fsqrt_sd_unfl_ena # yes
fsqrt_sd_unfl_dis:
fmovm.x &0x80,FP_SCR0(%a6) # store out result
lea FP_SCR0(%a6),%a0 # pass: result addr
mov.l L_SCR3(%a6),%d1 # pass: rnd prec,mode
bsr.l unf_res # calculate default result
or.b %d0,FPSR_CC(%a6) # set possible 'Z' ccode
fmovm.x FP_SCR0(%a6),&0x80 # return default result in fp0
rts
#
# operand will underflow AND underflow is enabled.
# Therefore, we must return the result rounded to extended precision.
#
fsqrt_sd_unfl_ena:
mov.l FP_SCR0_HI(%a6),FP_SCR1_HI(%a6)
mov.l FP_SCR0_LO(%a6),FP_SCR1_LO(%a6)
mov.w FP_SCR0_EX(%a6),%d1 # load current exponent
mov.l %d2,-(%sp) # save d2
mov.l %d1,%d2 # make a copy
andi.l &0x7fff,%d1 # strip sign
andi.w &0x8000,%d2 # keep old sign
sub.l %d0,%d1 # subtract scale factor
addi.l &0x6000,%d1 # add new bias
andi.w &0x7fff,%d1
or.w %d2,%d1 # concat new sign,new exp
mov.w %d1,FP_SCR1_EX(%a6) # insert new exp
fmovm.x FP_SCR1(%a6),&0x40 # return EXOP in fp1
mov.l (%sp)+,%d2 # restore d2
bra.b fsqrt_sd_unfl_dis
#
# operand WILL overflow.
#
fsqrt_sd_ovfl:
fmov.l &0x0,%fpsr # clear FPSR
fmov.l L_SCR3(%a6),%fpcr # set FPCR
fsqrt.x FP_SCR0(%a6),%fp0 # perform square root
fmov.l &0x0,%fpcr # clear FPCR
fmov.l %fpsr,%d1 # save FPSR
or.l %d1,USER_FPSR(%a6) # save INEX2,N
fsqrt_sd_ovfl_tst:
or.l &ovfl_inx_mask,USER_FPSR(%a6) # set ovfl/aovfl/ainex
mov.b FPCR_ENABLE(%a6),%d1
andi.b &0x13,%d1 # is OVFL or INEX enabled?
bne.b fsqrt_sd_ovfl_ena # yes
#
# OVFL is not enabled; therefore, we must create the default result by
# calling ovf_res().
#
fsqrt_sd_ovfl_dis:
btst &neg_bit,FPSR_CC(%a6) # is result negative?
sne %d1 # set sign param accordingly
mov.l L_SCR3(%a6),%d0 # pass: prec,mode
bsr.l ovf_res # calculate default result
or.b %d0,FPSR_CC(%a6) # set INF,N if applicable
fmovm.x (%a0),&0x80 # return default result in fp0
rts
#
# OVFL is enabled.
# the INEX2 bit has already been updated by the round to the correct precision.
# now, round to extended(and don't alter the FPSR).
#
fsqrt_sd_ovfl_ena:
mov.l %d2,-(%sp) # save d2
mov.w FP_SCR0_EX(%a6),%d1 # fetch {sgn,exp}
mov.l %d1,%d2 # make a copy
andi.l &0x7fff,%d1 # strip sign
andi.w &0x8000,%d2 # keep old sign
sub.l %d0,%d1 # add scale factor
subi.l &0x6000,%d1 # subtract bias
andi.w &0x7fff,%d1
or.w %d2,%d1 # concat sign,exp
mov.w %d1,FP_SCR0_EX(%a6) # insert new exponent
fmovm.x FP_SCR0(%a6),&0x40 # return EXOP in fp1
mov.l (%sp)+,%d2 # restore d2
bra.b fsqrt_sd_ovfl_dis
#
# the move in MAY underflow. so...
#
fsqrt_sd_may_ovfl:
btst &0x0,1+FP_SCR0_EX(%a6) # is exponent 0x3fff?
bne.w fsqrt_sd_ovfl # yes, so overflow
fmov.l &0x0,%fpsr # clear FPSR
fmov.l L_SCR3(%a6),%fpcr # set FPCR
fsqrt.x FP_SCR0(%a6),%fp0 # perform absolute
fmov.l %fpsr,%d1 # save status
fmov.l &0x0,%fpcr # clear FPCR
or.l %d1,USER_FPSR(%a6) # save INEX2,N
fmov.x %fp0,%fp1 # make a copy of result
fcmp.b %fp1,&0x1 # is |result| >= 1.b?
fbge.w fsqrt_sd_ovfl_tst # yes; overflow has occurred
# no, it didn't overflow; we have correct result
bra.w fsqrt_sd_normal_exit
##########################################################################
#
# input is not normalized; what is it?
#
fsqrt_not_norm:
cmpi.b %d1,&DENORM # weed out DENORM
beq.w fsqrt_denorm
cmpi.b %d1,&ZERO # weed out ZERO
beq.b fsqrt_zero
cmpi.b %d1,&INF # weed out INF
beq.b fsqrt_inf
cmpi.b %d1,&SNAN # weed out SNAN
beq.l res_snan_1op
bra.l res_qnan_1op
#
# fsqrt(+0) = +0
# fsqrt(-0) = -0
# fsqrt(+INF) = +INF
# fsqrt(-INF) = OPERR
#
fsqrt_zero:
tst.b SRC_EX(%a0) # is ZERO positive or negative?
bmi.b fsqrt_zero_m # negative
fsqrt_zero_p:
fmov.s &0x00000000,%fp0 # return +ZERO
mov.b &z_bmask,FPSR_CC(%a6) # set 'Z' ccode bit
rts
fsqrt_zero_m:
fmov.s &0x80000000,%fp0 # return -ZERO
mov.b &z_bmask+neg_bmask,FPSR_CC(%a6) # set 'Z','N' ccode bits
rts
fsqrt_inf:
tst.b SRC_EX(%a0) # is INF positive or negative?
bmi.l res_operr # negative
fsqrt_inf_p:
fmovm.x SRC(%a0),&0x80 # return +INF in fp0
mov.b &inf_bmask,FPSR_CC(%a6) # set 'I' ccode bit
rts
#########################################################################
# XDEF **************************************************************** #
# fetch_dreg(): fetch register according to index in d1 #
# #
# XREF **************************************************************** #
# None #
# #
# INPUT *************************************************************** #
# d1 = index of register to fetch from #
# #
# OUTPUT ************************************************************** #
# d0 = value of register fetched #
# #
# ALGORITHM *********************************************************** #
# According to the index value in d1 which can range from zero #
# to fifteen, load the corresponding register file value (where #
# address register indexes start at 8). D0/D1/A0/A1/A6/A7 are on the #
# stack. The rest should still be in their original places. #
# #
#########################################################################
# this routine leaves d1 intact for subsequent store_dreg calls.
global fetch_dreg
fetch_dreg:
mov.w (tbl_fdreg.b,%pc,%d1.w*2),%d0
jmp (tbl_fdreg.b,%pc,%d0.w*1)
tbl_fdreg:
short fdreg0 - tbl_fdreg
short fdreg1 - tbl_fdreg
short fdreg2 - tbl_fdreg
short fdreg3 - tbl_fdreg
short fdreg4 - tbl_fdreg
short fdreg5 - tbl_fdreg
short fdreg6 - tbl_fdreg
short fdreg7 - tbl_fdreg
short fdreg8 - tbl_fdreg
short fdreg9 - tbl_fdreg
short fdrega - tbl_fdreg
short fdregb - tbl_fdreg
short fdregc - tbl_fdreg
short fdregd - tbl_fdreg
short fdrege - tbl_fdreg
short fdregf - tbl_fdreg
fdreg0:
mov.l EXC_DREGS+0x0(%a6),%d0
rts
fdreg1:
mov.l EXC_DREGS+0x4(%a6),%d0
rts
fdreg2:
mov.l %d2,%d0
rts
fdreg3:
mov.l %d3,%d0
rts
fdreg4:
mov.l %d4,%d0
rts
fdreg5:
mov.l %d5,%d0
rts
fdreg6:
mov.l %d6,%d0
rts
fdreg7:
mov.l %d7,%d0
rts
fdreg8:
mov.l EXC_DREGS+0x8(%a6),%d0
rts
fdreg9:
mov.l EXC_DREGS+0xc(%a6),%d0
rts
fdrega:
mov.l %a2,%d0
rts
fdregb:
mov.l %a3,%d0
rts
fdregc:
mov.l %a4,%d0
rts
fdregd:
mov.l %a5,%d0
rts
fdrege:
mov.l (%a6),%d0
rts
fdregf:
mov.l EXC_A7(%a6),%d0
rts
#########################################################################
# XDEF **************************************************************** #
# store_dreg_l(): store longword to data register specified by d1 #
# #
# XREF **************************************************************** #
# None #
# #
# INPUT *************************************************************** #
# d0 = longowrd value to store #
# d1 = index of register to fetch from #
# #
# OUTPUT ************************************************************** #
# (data register is updated) #
# #
# ALGORITHM *********************************************************** #
# According to the index value in d1, store the longword value #
# in d0 to the corresponding data register. D0/D1 are on the stack #
# while the rest are in their initial places. #
# #
#########################################################################
global store_dreg_l
store_dreg_l:
mov.w (tbl_sdregl.b,%pc,%d1.w*2),%d1
jmp (tbl_sdregl.b,%pc,%d1.w*1)
tbl_sdregl:
short sdregl0 - tbl_sdregl
short sdregl1 - tbl_sdregl
short sdregl2 - tbl_sdregl
short sdregl3 - tbl_sdregl
short sdregl4 - tbl_sdregl
short sdregl5 - tbl_sdregl
short sdregl6 - tbl_sdregl
short sdregl7 - tbl_sdregl
sdregl0:
mov.l %d0,EXC_DREGS+0x0(%a6)
rts
sdregl1:
mov.l %d0,EXC_DREGS+0x4(%a6)
rts
sdregl2:
mov.l %d0,%d2
rts
sdregl3:
mov.l %d0,%d3
rts
sdregl4:
mov.l %d0,%d4
rts
sdregl5:
mov.l %d0,%d5
rts
sdregl6:
mov.l %d0,%d6
rts
sdregl7:
mov.l %d0,%d7
rts
#########################################################################
# XDEF **************************************************************** #
# store_dreg_w(): store word to data register specified by d1 #
# #
# XREF **************************************************************** #
# None #
# #
# INPUT *************************************************************** #
# d0 = word value to store #
# d1 = index of register to fetch from #
# #
# OUTPUT ************************************************************** #
# (data register is updated) #
# #
# ALGORITHM *********************************************************** #
# According to the index value in d1, store the word value #
# in d0 to the corresponding data register. D0/D1 are on the stack #
# while the rest are in their initial places. #
# #
#########################################################################
global store_dreg_w
store_dreg_w:
mov.w (tbl_sdregw.b,%pc,%d1.w*2),%d1
jmp (tbl_sdregw.b,%pc,%d1.w*1)
tbl_sdregw:
short sdregw0 - tbl_sdregw
short sdregw1 - tbl_sdregw
short sdregw2 - tbl_sdregw
short sdregw3 - tbl_sdregw
short sdregw4 - tbl_sdregw
short sdregw5 - tbl_sdregw
short sdregw6 - tbl_sdregw
short sdregw7 - tbl_sdregw
sdregw0:
mov.w %d0,2+EXC_DREGS+0x0(%a6)
rts
sdregw1:
mov.w %d0,2+EXC_DREGS+0x4(%a6)
rts
sdregw2:
mov.w %d0,%d2
rts
sdregw3:
mov.w %d0,%d3
rts
sdregw4:
mov.w %d0,%d4
rts
sdregw5:
mov.w %d0,%d5
rts
sdregw6:
mov.w %d0,%d6
rts
sdregw7:
mov.w %d0,%d7
rts
#########################################################################
# XDEF **************************************************************** #
# store_dreg_b(): store byte to data register specified by d1 #
# #
# XREF **************************************************************** #
# None #
# #
# INPUT *************************************************************** #
# d0 = byte value to store #
# d1 = index of register to fetch from #
# #
# OUTPUT ************************************************************** #
# (data register is updated) #
# #
# ALGORITHM *********************************************************** #
# According to the index value in d1, store the byte value #
# in d0 to the corresponding data register. D0/D1 are on the stack #
# while the rest are in their initial places. #
# #
#########################################################################
global store_dreg_b
store_dreg_b:
mov.w (tbl_sdregb.b,%pc,%d1.w*2),%d1
jmp (tbl_sdregb.b,%pc,%d1.w*1)
tbl_sdregb:
short sdregb0 - tbl_sdregb
short sdregb1 - tbl_sdregb
short sdregb2 - tbl_sdregb
short sdregb3 - tbl_sdregb
short sdregb4 - tbl_sdregb
short sdregb5 - tbl_sdregb
short sdregb6 - tbl_sdregb
short sdregb7 - tbl_sdregb
sdregb0:
mov.b %d0,3+EXC_DREGS+0x0(%a6)
rts
sdregb1:
mov.b %d0,3+EXC_DREGS+0x4(%a6)
rts
sdregb2:
mov.b %d0,%d2
rts
sdregb3:
mov.b %d0,%d3
rts
sdregb4:
mov.b %d0,%d4
rts
sdregb5:
mov.b %d0,%d5
rts
sdregb6:
mov.b %d0,%d6
rts
sdregb7:
mov.b %d0,%d7
rts
#########################################################################
# XDEF **************************************************************** #
# inc_areg(): increment an address register by the value in d0 #
# #
# XREF **************************************************************** #
# None #
# #
# INPUT *************************************************************** #
# d0 = amount to increment by #
# d1 = index of address register to increment #
# #
# OUTPUT ************************************************************** #
# (address register is updated) #
# #
# ALGORITHM *********************************************************** #
# Typically used for an instruction w/ a post-increment <ea>, #
# this routine adds the increment value in d0 to the address register #
# specified by d1. A0/A1/A6/A7 reside on the stack. The rest reside #
# in their original places. #
# For a7, if the increment amount is one, then we have to #
# increment by two. For any a7 update, set the mia7_flag so that if #
# an access error exception occurs later in emulation, this address #
# register update can be undone. #
# #
#########################################################################
global inc_areg
inc_areg:
mov.w (tbl_iareg.b,%pc,%d1.w*2),%d1
jmp (tbl_iareg.b,%pc,%d1.w*1)
tbl_iareg:
short iareg0 - tbl_iareg
short iareg1 - tbl_iareg
short iareg2 - tbl_iareg
short iareg3 - tbl_iareg
short iareg4 - tbl_iareg
short iareg5 - tbl_iareg
short iareg6 - tbl_iareg
short iareg7 - tbl_iareg
iareg0: add.l %d0,EXC_DREGS+0x8(%a6)
rts
iareg1: add.l %d0,EXC_DREGS+0xc(%a6)
rts
iareg2: add.l %d0,%a2
rts
iareg3: add.l %d0,%a3
rts
iareg4: add.l %d0,%a4
rts
iareg5: add.l %d0,%a5
rts
iareg6: add.l %d0,(%a6)
rts
iareg7: mov.b &mia7_flg,SPCOND_FLG(%a6)
cmpi.b %d0,&0x1
beq.b iareg7b
add.l %d0,EXC_A7(%a6)
rts
iareg7b:
addq.l &0x2,EXC_A7(%a6)
rts
#########################################################################
# XDEF **************************************************************** #
# dec_areg(): decrement an address register by the value in d0 #
# #
# XREF **************************************************************** #
# None #
# #
# INPUT *************************************************************** #
# d0 = amount to decrement by #
# d1 = index of address register to decrement #
# #
# OUTPUT ************************************************************** #
# (address register is updated) #
# #
# ALGORITHM *********************************************************** #
# Typically used for an instruction w/ a pre-decrement <ea>, #
# this routine adds the decrement value in d0 to the address register #
# specified by d1. A0/A1/A6/A7 reside on the stack. The rest reside #
# in their original places. #
# For a7, if the decrement amount is one, then we have to #
# decrement by two. For any a7 update, set the mda7_flag so that if #
# an access error exception occurs later in emulation, this address #
# register update can be undone. #
# #
#########################################################################
global dec_areg
dec_areg:
mov.w (tbl_dareg.b,%pc,%d1.w*2),%d1
jmp (tbl_dareg.b,%pc,%d1.w*1)
tbl_dareg:
short dareg0 - tbl_dareg
short dareg1 - tbl_dareg
short dareg2 - tbl_dareg
short dareg3 - tbl_dareg
short dareg4 - tbl_dareg
short dareg5 - tbl_dareg
short dareg6 - tbl_dareg
short dareg7 - tbl_dareg
dareg0: sub.l %d0,EXC_DREGS+0x8(%a6)
rts
dareg1: sub.l %d0,EXC_DREGS+0xc(%a6)
rts
dareg2: sub.l %d0,%a2
rts
dareg3: sub.l %d0,%a3
rts
dareg4: sub.l %d0,%a4
rts
dareg5: sub.l %d0,%a5
rts
dareg6: sub.l %d0,(%a6)
rts
dareg7: mov.b &mda7_flg,SPCOND_FLG(%a6)
cmpi.b %d0,&0x1
beq.b dareg7b
sub.l %d0,EXC_A7(%a6)
rts
dareg7b:
subq.l &0x2,EXC_A7(%a6)
rts
##############################################################################
#########################################################################
# XDEF **************************************************************** #
# load_fpn1(): load FP register value into FP_SRC(a6). #
# #
# XREF **************************************************************** #
# None #
# #
# INPUT *************************************************************** #
# d0 = index of FP register to load #
# #
# OUTPUT ************************************************************** #
# FP_SRC(a6) = value loaded from FP register file #
# #
# ALGORITHM *********************************************************** #
# Using the index in d0, load FP_SRC(a6) with a number from the #
# FP register file. #
# #
#########################################################################
global load_fpn1
load_fpn1:
mov.w (tbl_load_fpn1.b,%pc,%d0.w*2), %d0
jmp (tbl_load_fpn1.b,%pc,%d0.w*1)
tbl_load_fpn1:
short load_fpn1_0 - tbl_load_fpn1
short load_fpn1_1 - tbl_load_fpn1
short load_fpn1_2 - tbl_load_fpn1
short load_fpn1_3 - tbl_load_fpn1
short load_fpn1_4 - tbl_load_fpn1
short load_fpn1_5 - tbl_load_fpn1
short load_fpn1_6 - tbl_load_fpn1
short load_fpn1_7 - tbl_load_fpn1
load_fpn1_0:
mov.l 0+EXC_FP0(%a6), 0+FP_SRC(%a6)
mov.l 4+EXC_FP0(%a6), 4+FP_SRC(%a6)
mov.l 8+EXC_FP0(%a6), 8+FP_SRC(%a6)
lea FP_SRC(%a6), %a0
rts
load_fpn1_1:
mov.l 0+EXC_FP1(%a6), 0+FP_SRC(%a6)
mov.l 4+EXC_FP1(%a6), 4+FP_SRC(%a6)
mov.l 8+EXC_FP1(%a6), 8+FP_SRC(%a6)
lea FP_SRC(%a6), %a0
rts
load_fpn1_2:
fmovm.x &0x20, FP_SRC(%a6)
lea FP_SRC(%a6), %a0
rts
load_fpn1_3:
fmovm.x &0x10, FP_SRC(%a6)
lea FP_SRC(%a6), %a0
rts
load_fpn1_4:
fmovm.x &0x08, FP_SRC(%a6)
lea FP_SRC(%a6), %a0
rts
load_fpn1_5:
fmovm.x &0x04, FP_SRC(%a6)
lea FP_SRC(%a6), %a0
rts
load_fpn1_6:
fmovm.x &0x02, FP_SRC(%a6)
lea FP_SRC(%a6), %a0
rts
load_fpn1_7:
fmovm.x &0x01, FP_SRC(%a6)
lea FP_SRC(%a6), %a0
rts
#############################################################################
#########################################################################
# XDEF **************************************************************** #
# load_fpn2(): load FP register value into FP_DST(a6). #
# #
# XREF **************************************************************** #
# None #
# #
# INPUT *************************************************************** #
# d0 = index of FP register to load #
# #
# OUTPUT ************************************************************** #
# FP_DST(a6) = value loaded from FP register file #
# #
# ALGORITHM *********************************************************** #
# Using the index in d0, load FP_DST(a6) with a number from the #
# FP register file. #
# #
#########################################################################
global load_fpn2
load_fpn2:
mov.w (tbl_load_fpn2.b,%pc,%d0.w*2), %d0
jmp (tbl_load_fpn2.b,%pc,%d0.w*1)
tbl_load_fpn2:
short load_fpn2_0 - tbl_load_fpn2
short load_fpn2_1 - tbl_load_fpn2
short load_fpn2_2 - tbl_load_fpn2
short load_fpn2_3 - tbl_load_fpn2
short load_fpn2_4 - tbl_load_fpn2
short load_fpn2_5 - tbl_load_fpn2
short load_fpn2_6 - tbl_load_fpn2
short load_fpn2_7 - tbl_load_fpn2
load_fpn2_0:
mov.l 0+EXC_FP0(%a6), 0+FP_DST(%a6)
mov.l 4+EXC_FP0(%a6), 4+FP_DST(%a6)
mov.l 8+EXC_FP0(%a6), 8+FP_DST(%a6)
lea FP_DST(%a6), %a0
rts
load_fpn2_1:
mov.l 0+EXC_FP1(%a6), 0+FP_DST(%a6)
mov.l 4+EXC_FP1(%a6), 4+FP_DST(%a6)
mov.l 8+EXC_FP1(%a6), 8+FP_DST(%a6)
lea FP_DST(%a6), %a0
rts
load_fpn2_2:
fmovm.x &0x20, FP_DST(%a6)
lea FP_DST(%a6), %a0
rts
load_fpn2_3:
fmovm.x &0x10, FP_DST(%a6)
lea FP_DST(%a6), %a0
rts
load_fpn2_4:
fmovm.x &0x08, FP_DST(%a6)
lea FP_DST(%a6), %a0
rts
load_fpn2_5:
fmovm.x &0x04, FP_DST(%a6)
lea FP_DST(%a6), %a0
rts
load_fpn2_6:
fmovm.x &0x02, FP_DST(%a6)
lea FP_DST(%a6), %a0
rts
load_fpn2_7:
fmovm.x &0x01, FP_DST(%a6)
lea FP_DST(%a6), %a0
rts
#############################################################################
#########################################################################
# XDEF **************************************************************** #
# store_fpreg(): store an fp value to the fpreg designated d0. #
# #
# XREF **************************************************************** #
# None #
# #
# INPUT *************************************************************** #
# fp0 = extended precision value to store #
# d0 = index of floating-point register #
# #
# OUTPUT ************************************************************** #
# None #
# #
# ALGORITHM *********************************************************** #
# Store the value in fp0 to the FP register designated by the #
# value in d0. The FP number can be DENORM or SNAN so we have to be #
# careful that we don't take an exception here. #
# #
#########################################################################
global store_fpreg
store_fpreg:
mov.w (tbl_store_fpreg.b,%pc,%d0.w*2), %d0
jmp (tbl_store_fpreg.b,%pc,%d0.w*1)
tbl_store_fpreg:
short store_fpreg_0 - tbl_store_fpreg
short store_fpreg_1 - tbl_store_fpreg
short store_fpreg_2 - tbl_store_fpreg
short store_fpreg_3 - tbl_store_fpreg
short store_fpreg_4 - tbl_store_fpreg
short store_fpreg_5 - tbl_store_fpreg
short store_fpreg_6 - tbl_store_fpreg
short store_fpreg_7 - tbl_store_fpreg
store_fpreg_0:
fmovm.x &0x80, EXC_FP0(%a6)
rts
store_fpreg_1:
fmovm.x &0x80, EXC_FP1(%a6)
rts
store_fpreg_2:
fmovm.x &0x01, -(%sp)
fmovm.x (%sp)+, &0x20
rts
store_fpreg_3:
fmovm.x &0x01, -(%sp)
fmovm.x (%sp)+, &0x10
rts
store_fpreg_4:
fmovm.x &0x01, -(%sp)
fmovm.x (%sp)+, &0x08
rts
store_fpreg_5:
fmovm.x &0x01, -(%sp)
fmovm.x (%sp)+, &0x04
rts
store_fpreg_6:
fmovm.x &0x01, -(%sp)
fmovm.x (%sp)+, &0x02
rts
store_fpreg_7:
fmovm.x &0x01, -(%sp)
fmovm.x (%sp)+, &0x01
rts
#########################################################################
# XDEF **************************************************************** #
# get_packed(): fetch a packed operand from memory and then #
# convert it to a floating-point binary number. #
# #
# XREF **************************************************************** #
# _dcalc_ea() - calculate the correct <ea> #
# _mem_read() - fetch the packed operand from memory #
# facc_in_x() - the fetch failed so jump to special exit code #
# decbin() - convert packed to binary extended precision #
# #
# INPUT *************************************************************** #
# None #
# #
# OUTPUT ************************************************************** #
# If no failure on _mem_read(): #
# FP_SRC(a6) = packed operand now as a binary FP number #
# #
# ALGORITHM *********************************************************** #
# Get the correct <ea> which is the value on the exception stack #
# frame w/ maybe a correction factor if the <ea> is -(an) or (an)+. #
# Then, fetch the operand from memory. If the fetch fails, exit #
# through facc_in_x(). #
# If the packed operand is a ZERO,NAN, or INF, convert it to #
# its binary representation here. Else, call decbin() which will #
# convert the packed value to an extended precision binary value. #
# #
#########################################################################
# the stacked <ea> for packed is correct except for -(An).
# the base reg must be updated for both -(An) and (An)+.
global get_packed
get_packed:
mov.l &0xc,%d0 # packed is 12 bytes
bsr.l _dcalc_ea # fetch <ea>; correct An
lea FP_SRC(%a6),%a1 # pass: ptr to super dst
mov.l &0xc,%d0 # pass: 12 bytes
bsr.l _dmem_read # read packed operand
tst.l %d1 # did dfetch fail?
bne.l facc_in_x # yes
# The packed operand is an INF or a NAN if the exponent field is all ones.
bfextu FP_SRC(%a6){&1:&15},%d0 # get exp
cmpi.w %d0,&0x7fff # INF or NAN?
bne.b gp_try_zero # no
rts # operand is an INF or NAN
# The packed operand is a zero if the mantissa is all zero, else it's
# a normal packed op.
gp_try_zero:
mov.b 3+FP_SRC(%a6),%d0 # get byte 4
andi.b &0x0f,%d0 # clear all but last nybble
bne.b gp_not_spec # not a zero
tst.l FP_SRC_HI(%a6) # is lw 2 zero?
bne.b gp_not_spec # not a zero
tst.l FP_SRC_LO(%a6) # is lw 3 zero?
bne.b gp_not_spec # not a zero
rts # operand is a ZERO
gp_not_spec:
lea FP_SRC(%a6),%a0 # pass: ptr to packed op
bsr.l decbin # convert to extended
fmovm.x &0x80,FP_SRC(%a6) # make this the srcop
rts
#########################################################################
# decbin(): Converts normalized packed bcd value pointed to by register #
# a0 to extended-precision value in fp0. #
# #
# INPUT *************************************************************** #
# a0 = pointer to normalized packed bcd value #
# #
# OUTPUT ************************************************************** #
# fp0 = exact fp representation of the packed bcd value. #
# #
# ALGORITHM *********************************************************** #
# Expected is a normal bcd (i.e. non-exceptional; all inf, zero, #
# and NaN operands are dispatched without entering this routine) #
# value in 68881/882 format at location (a0). #
# #
# A1. Convert the bcd exponent to binary by successive adds and #
# muls. Set the sign according to SE. Subtract 16 to compensate #
# for the mantissa which is to be interpreted as 17 integer #
# digits, rather than 1 integer and 16 fraction digits. #
# Note: this operation can never overflow. #
# #
# A2. Convert the bcd mantissa to binary by successive #
# adds and muls in FP0. Set the sign according to SM. #
# The mantissa digits will be converted with the decimal point #
# assumed following the least-significant digit. #
# Note: this operation can never overflow. #
# #
# A3. Count the number of leading/trailing zeros in the #
# bcd string. If SE is positive, count the leading zeros; #
# if negative, count the trailing zeros. Set the adjusted #
# exponent equal to the exponent from A1 and the zero count #
# added if SM = 1 and subtracted if SM = 0. Scale the #
# mantissa the equivalent of forcing in the bcd value: #
# #
# SM = 0 a non-zero digit in the integer position #
# SM = 1 a non-zero digit in Mant0, lsd of the fraction #
# #
# this will insure that any value, regardless of its #
# representation (ex. 0.1E2, 1E1, 10E0, 100E-1), is converted #
# consistently. #
# #
# A4. Calculate the factor 10^exp in FP1 using a table of #
# 10^(2^n) values. To reduce the error in forming factors #
# greater than 10^27, a directed rounding scheme is used with #
# tables rounded to RN, RM, and RP, according to the table #
# in the comments of the pwrten section. #
# #
# A5. Form the final binary number by scaling the mantissa by #
# the exponent factor. This is done by multiplying the #
# mantissa in FP0 by the factor in FP1 if the adjusted #
# exponent sign is positive, and dividing FP0 by FP1 if #
# it is negative. #
# #
# Clean up and return. Check if the final mul or div was inexact. #
# If so, set INEX1 in USER_FPSR. #
# #
#########################################################################
#
# PTENRN, PTENRM, and PTENRP are arrays of powers of 10 rounded
# to nearest, minus, and plus, respectively. The tables include
# 10**{1,2,4,8,16,32,64,128,256,512,1024,2048,4096}. No rounding
# is required until the power is greater than 27, however, all
# tables include the first 5 for ease of indexing.
#
RTABLE:
byte 0,0,0,0
byte 2,3,2,3
byte 2,3,3,2
byte 3,2,2,3
set FNIBS,7
set FSTRT,0
set ESTRT,4
set EDIGITS,2
global decbin
decbin:
mov.l 0x0(%a0),FP_SCR0_EX(%a6) # make a copy of input
mov.l 0x4(%a0),FP_SCR0_HI(%a6) # so we don't alter it
mov.l 0x8(%a0),FP_SCR0_LO(%a6)
lea FP_SCR0(%a6),%a0
movm.l &0x3c00,-(%sp) # save d2-d5
fmovm.x &0x1,-(%sp) # save fp1
#
# Calculate exponent:
# 1. Copy bcd value in memory for use as a working copy.
# 2. Calculate absolute value of exponent in d1 by mul and add.
# 3. Correct for exponent sign.
# 4. Subtract 16 to compensate for interpreting the mant as all integer digits.
# (i.e., all digits assumed left of the decimal point.)
#
# Register usage:
#
# calc_e:
# (*) d0: temp digit storage
# (*) d1: accumulator for binary exponent
# (*) d2: digit count
# (*) d3: offset pointer
# ( ) d4: first word of bcd
# ( ) a0: pointer to working bcd value
# ( ) a6: pointer to original bcd value
# (*) FP_SCR1: working copy of original bcd value
# (*) L_SCR1: copy of original exponent word
#
calc_e:
mov.l &EDIGITS,%d2 # # of nibbles (digits) in fraction part
mov.l &ESTRT,%d3 # counter to pick up digits
mov.l (%a0),%d4 # get first word of bcd
clr.l %d1 # zero d1 for accumulator
e_gd:
mulu.l &0xa,%d1 # mul partial product by one digit place
bfextu %d4{%d3:&4},%d0 # get the digit and zero extend into d0
add.l %d0,%d1 # d1 = d1 + d0
addq.b &4,%d3 # advance d3 to the next digit
dbf.w %d2,e_gd # if we have used all 3 digits, exit loop
btst &30,%d4 # get SE
beq.b e_pos # don't negate if pos
neg.l %d1 # negate before subtracting
e_pos:
sub.l &16,%d1 # sub to compensate for shift of mant
bge.b e_save # if still pos, do not neg
neg.l %d1 # now negative, make pos and set SE
or.l &0x40000000,%d4 # set SE in d4,
or.l &0x40000000,(%a0) # and in working bcd
e_save:
mov.l %d1,-(%sp) # save exp on stack
#
#
# Calculate mantissa:
# 1. Calculate absolute value of mantissa in fp0 by mul and add.
# 2. Correct for mantissa sign.
# (i.e., all digits assumed left of the decimal point.)
#
# Register usage:
#
# calc_m:
# (*) d0: temp digit storage
# (*) d1: lword counter
# (*) d2: digit count
# (*) d3: offset pointer
# ( ) d4: words 2 and 3 of bcd
# ( ) a0: pointer to working bcd value
# ( ) a6: pointer to original bcd value
# (*) fp0: mantissa accumulator
# ( ) FP_SCR1: working copy of original bcd value
# ( ) L_SCR1: copy of original exponent word
#
calc_m:
mov.l &1,%d1 # word counter, init to 1
fmov.s &0x00000000,%fp0 # accumulator
#
#
# Since the packed number has a long word between the first & second parts,
# get the integer digit then skip down & get the rest of the
# mantissa. We will unroll the loop once.
#
bfextu (%a0){&28:&4},%d0 # integer part is ls digit in long word
fadd.b %d0,%fp0 # add digit to sum in fp0
#
#
# Get the rest of the mantissa.
#
loadlw:
mov.l (%a0,%d1.L*4),%d4 # load mantissa lonqword into d4
mov.l &FSTRT,%d3 # counter to pick up digits
mov.l &FNIBS,%d2 # reset number of digits per a0 ptr
md2b:
fmul.s &0x41200000,%fp0 # fp0 = fp0 * 10
bfextu %d4{%d3:&4},%d0 # get the digit and zero extend
fadd.b %d0,%fp0 # fp0 = fp0 + digit
#
#
# If all the digits (8) in that long word have been converted (d2=0),
# then inc d1 (=2) to point to the next long word and reset d3 to 0
# to initialize the digit offset, and set d2 to 7 for the digit count;
# else continue with this long word.
#
addq.b &4,%d3 # advance d3 to the next digit
dbf.w %d2,md2b # check for last digit in this lw
nextlw:
addq.l &1,%d1 # inc lw pointer in mantissa
cmp.l %d1,&2 # test for last lw
ble.b loadlw # if not, get last one
#
# Check the sign of the mant and make the value in fp0 the same sign.
#
m_sign:
btst &31,(%a0) # test sign of the mantissa
beq.b ap_st_z # if clear, go to append/strip zeros
fneg.x %fp0 # if set, negate fp0
#
# Append/strip zeros:
#
# For adjusted exponents which have an absolute value greater than 27*,
# this routine calculates the amount needed to normalize the mantissa
# for the adjusted exponent. That number is subtracted from the exp
# if the exp was positive, and added if it was negative. The purpose
# of this is to reduce the value of the exponent and the possibility
# of error in calculation of pwrten.
#
# 1. Branch on the sign of the adjusted exponent.
# 2p.(positive exp)
# 2. Check M16 and the digits in lwords 2 and 3 in descending order.
# 3. Add one for each zero encountered until a non-zero digit.
# 4. Subtract the count from the exp.
# 5. Check if the exp has crossed zero in #3 above; make the exp abs
# and set SE.
# 6. Multiply the mantissa by 10**count.
# 2n.(negative exp)
# 2. Check the digits in lwords 3 and 2 in descending order.
# 3. Add one for each zero encountered until a non-zero digit.
# 4. Add the count to the exp.
# 5. Check if the exp has crossed zero in #3 above; clear SE.
# 6. Divide the mantissa by 10**count.
#
# *Why 27? If the adjusted exponent is within -28 < expA < 28, than
# any adjustment due to append/strip zeros will drive the resultane
# exponent towards zero. Since all pwrten constants with a power
# of 27 or less are exact, there is no need to use this routine to
# attempt to lessen the resultant exponent.
#
# Register usage:
#
# ap_st_z:
# (*) d0: temp digit storage
# (*) d1: zero count
# (*) d2: digit count
# (*) d3: offset pointer
# ( ) d4: first word of bcd
# (*) d5: lword counter
# ( ) a0: pointer to working bcd value
# ( ) FP_SCR1: working copy of original bcd value
# ( ) L_SCR1: copy of original exponent word
#
#
# First check the absolute value of the exponent to see if this
# routine is necessary. If so, then check the sign of the exponent
# and do append (+) or strip (-) zeros accordingly.
# This section handles a positive adjusted exponent.
#
ap_st_z:
mov.l (%sp),%d1 # load expA for range test
cmp.l %d1,&27 # test is with 27
ble.w pwrten # if abs(expA) <28, skip ap/st zeros
btst &30,(%a0) # check sign of exp
bne.b ap_st_n # if neg, go to neg side
clr.l %d1 # zero count reg
mov.l (%a0),%d4 # load lword 1 to d4
bfextu %d4{&28:&4},%d0 # get M16 in d0
bne.b ap_p_fx # if M16 is non-zero, go fix exp
addq.l &1,%d1 # inc zero count
mov.l &1,%d5 # init lword counter
mov.l (%a0,%d5.L*4),%d4 # get lword 2 to d4
bne.b ap_p_cl # if lw 2 is zero, skip it
addq.l &8,%d1 # and inc count by 8
addq.l &1,%d5 # inc lword counter
mov.l (%a0,%d5.L*4),%d4 # get lword 3 to d4
ap_p_cl:
clr.l %d3 # init offset reg
mov.l &7,%d2 # init digit counter
ap_p_gd:
bfextu %d4{%d3:&4},%d0 # get digit
bne.b ap_p_fx # if non-zero, go to fix exp
addq.l &4,%d3 # point to next digit
addq.l &1,%d1 # inc digit counter
dbf.w %d2,ap_p_gd # get next digit
ap_p_fx:
mov.l %d1,%d0 # copy counter to d2
mov.l (%sp),%d1 # get adjusted exp from memory
sub.l %d0,%d1 # subtract count from exp
bge.b ap_p_fm # if still pos, go to pwrten
neg.l %d1 # now its neg; get abs
mov.l (%a0),%d4 # load lword 1 to d4
or.l &0x40000000,%d4 # and set SE in d4
or.l &0x40000000,(%a0) # and in memory
#
# Calculate the mantissa multiplier to compensate for the striping of
# zeros from the mantissa.
#
ap_p_fm:
lea.l PTENRN(%pc),%a1 # get address of power-of-ten table
clr.l %d3 # init table index
fmov.s &0x3f800000,%fp1 # init fp1 to 1
mov.l &3,%d2 # init d2 to count bits in counter
ap_p_el:
asr.l &1,%d0 # shift lsb into carry
bcc.b ap_p_en # if 1, mul fp1 by pwrten factor
fmul.x (%a1,%d3),%fp1 # mul by 10**(d3_bit_no)
ap_p_en:
add.l &12,%d3 # inc d3 to next rtable entry
tst.l %d0 # check if d0 is zero
bne.b ap_p_el # if not, get next bit
fmul.x %fp1,%fp0 # mul mantissa by 10**(no_bits_shifted)
bra.b pwrten # go calc pwrten
#
# This section handles a negative adjusted exponent.
#
ap_st_n:
clr.l %d1 # clr counter
mov.l &2,%d5 # set up d5 to point to lword 3
mov.l (%a0,%d5.L*4),%d4 # get lword 3
bne.b ap_n_cl # if not zero, check digits
sub.l &1,%d5 # dec d5 to point to lword 2
addq.l &8,%d1 # inc counter by 8
mov.l (%a0,%d5.L*4),%d4 # get lword 2
ap_n_cl:
mov.l &28,%d3 # point to last digit
mov.l &7,%d2 # init digit counter
ap_n_gd:
bfextu %d4{%d3:&4},%d0 # get digit
bne.b ap_n_fx # if non-zero, go to exp fix
subq.l &4,%d3 # point to previous digit
addq.l &1,%d1 # inc digit counter
dbf.w %d2,ap_n_gd # get next digit
ap_n_fx:
mov.l %d1,%d0 # copy counter to d0
mov.l (%sp),%d1 # get adjusted exp from memory
sub.l %d0,%d1 # subtract count from exp
bgt.b ap_n_fm # if still pos, go fix mantissa
neg.l %d1 # take abs of exp and clr SE
mov.l (%a0),%d4 # load lword 1 to d4
and.l &0xbfffffff,%d4 # and clr SE in d4
and.l &0xbfffffff,(%a0) # and in memory
#
# Calculate the mantissa multiplier to compensate for the appending of
# zeros to the mantissa.
#
ap_n_fm:
lea.l PTENRN(%pc),%a1 # get address of power-of-ten table
clr.l %d3 # init table index
fmov.s &0x3f800000,%fp1 # init fp1 to 1
mov.l &3,%d2 # init d2 to count bits in counter
ap_n_el:
asr.l &1,%d0 # shift lsb into carry
bcc.b ap_n_en # if 1, mul fp1 by pwrten factor
fmul.x (%a1,%d3),%fp1 # mul by 10**(d3_bit_no)
ap_n_en:
add.l &12,%d3 # inc d3 to next rtable entry
tst.l %d0 # check if d0 is zero
bne.b ap_n_el # if not, get next bit
fdiv.x %fp1,%fp0 # div mantissa by 10**(no_bits_shifted)
#
#
# Calculate power-of-ten factor from adjusted and shifted exponent.
#
# Register usage:
#
# pwrten:
# (*) d0: temp
# ( ) d1: exponent
# (*) d2: {FPCR[6:5],SM,SE} as index in RTABLE; temp
# (*) d3: FPCR work copy
# ( ) d4: first word of bcd
# (*) a1: RTABLE pointer
# calc_p:
# (*) d0: temp
# ( ) d1: exponent
# (*) d3: PWRTxx table index
# ( ) a0: pointer to working copy of bcd
# (*) a1: PWRTxx pointer
# (*) fp1: power-of-ten accumulator
#
# Pwrten calculates the exponent factor in the selected rounding mode
# according to the following table:
#
# Sign of Mant Sign of Exp Rounding Mode PWRTEN Rounding Mode
#
# ANY ANY RN RN
#
# + + RP RP
# - + RP RM
# + - RP RM
# - - RP RP
#
# + + RM RM
# - + RM RP
# + - RM RP
# - - RM RM
#
# + + RZ RM
# - + RZ RM
# + - RZ RP
# - - RZ RP
#
#
pwrten:
mov.l USER_FPCR(%a6),%d3 # get user's FPCR
bfextu %d3{&26:&2},%d2 # isolate rounding mode bits
mov.l (%a0),%d4 # reload 1st bcd word to d4
asl.l &2,%d2 # format d2 to be
bfextu %d4{&0:&2},%d0 # {FPCR[6],FPCR[5],SM,SE}
add.l %d0,%d2 # in d2 as index into RTABLE
lea.l RTABLE(%pc),%a1 # load rtable base
mov.b (%a1,%d2),%d0 # load new rounding bits from table
clr.l %d3 # clear d3 to force no exc and extended
bfins %d0,%d3{&26:&2} # stuff new rounding bits in FPCR
fmov.l %d3,%fpcr # write new FPCR
asr.l &1,%d0 # write correct PTENxx table
bcc.b not_rp # to a1
lea.l PTENRP(%pc),%a1 # it is RP
bra.b calc_p # go to init section
not_rp:
asr.l &1,%d0 # keep checking
bcc.b not_rm
lea.l PTENRM(%pc),%a1 # it is RM
bra.b calc_p # go to init section
not_rm:
lea.l PTENRN(%pc),%a1 # it is RN
calc_p:
mov.l %d1,%d0 # copy exp to d0;use d0
bpl.b no_neg # if exp is negative,
neg.l %d0 # invert it
or.l &0x40000000,(%a0) # and set SE bit
no_neg:
clr.l %d3 # table index
fmov.s &0x3f800000,%fp1 # init fp1 to 1
e_loop:
asr.l &1,%d0 # shift next bit into carry
bcc.b e_next # if zero, skip the mul
fmul.x (%a1,%d3),%fp1 # mul by 10**(d3_bit_no)
e_next:
add.l &12,%d3 # inc d3 to next rtable entry
tst.l %d0 # check if d0 is zero
bne.b e_loop # not zero, continue shifting
#
#
# Check the sign of the adjusted exp and make the value in fp0 the
# same sign. If the exp was pos then multiply fp1*fp0;
# else divide fp0/fp1.
#
# Register Usage:
# norm:
# ( ) a0: pointer to working bcd value
# (*) fp0: mantissa accumulator
# ( ) fp1: scaling factor - 10**(abs(exp))
#
pnorm:
btst &30,(%a0) # test the sign of the exponent
beq.b mul # if clear, go to multiply
div:
fdiv.x %fp1,%fp0 # exp is negative, so divide mant by exp
bra.b end_dec
mul:
fmul.x %fp1,%fp0 # exp is positive, so multiply by exp
#
#
# Clean up and return with result in fp0.
#
# If the final mul/div in decbin incurred an inex exception,
# it will be inex2, but will be reported as inex1 by get_op.
#
end_dec:
fmov.l %fpsr,%d0 # get status register
bclr &inex2_bit+8,%d0 # test for inex2 and clear it
beq.b no_exc # skip this if no exc
ori.w &inx1a_mask,2+USER_FPSR(%a6) # set INEX1/AINEX
no_exc:
add.l &0x4,%sp # clear 1 lw param
fmovm.x (%sp)+,&0x40 # restore fp1
movm.l (%sp)+,&0x3c # restore d2-d5
fmov.l &0x0,%fpcr
fmov.l &0x0,%fpsr
rts
#########################################################################
# bindec(): Converts an input in extended precision format to bcd format#
# #
# INPUT *************************************************************** #
# a0 = pointer to the input extended precision value in memory. #
# the input may be either normalized, unnormalized, or #
# denormalized. #
# d0 = contains the k-factor sign-extended to 32-bits. #
# #
# OUTPUT ************************************************************** #
# FP_SCR0(a6) = bcd format result on the stack. #
# #
# ALGORITHM *********************************************************** #
# #
# A1. Set RM and size ext; Set SIGMA = sign of input. #
# The k-factor is saved for use in d7. Clear the #
# BINDEC_FLG for separating normalized/denormalized #
# input. If input is unnormalized or denormalized, #
# normalize it. #
# #
# A2. Set X = abs(input). #
# #
# A3. Compute ILOG. #
# ILOG is the log base 10 of the input value. It is #
# approximated by adding e + 0.f when the original #
# value is viewed as 2^^e * 1.f in extended precision. #
# This value is stored in d6. #
# #
# A4. Clr INEX bit. #
# The operation in A3 above may have set INEX2. #
# #
# A5. Set ICTR = 0; #
# ICTR is a flag used in A13. It must be set before the #
# loop entry A6. #
# #
# A6. Calculate LEN. #
# LEN is the number of digits to be displayed. The #
# k-factor can dictate either the total number of digits, #
# if it is a positive number, or the number of digits #
# after the decimal point which are to be included as #
# significant. See the 68882 manual for examples. #
# If LEN is computed to be greater than 17, set OPERR in #
# USER_FPSR. LEN is stored in d4. #
# #
# A7. Calculate SCALE. #
# SCALE is equal to 10^ISCALE, where ISCALE is the number #
# of decimal places needed to insure LEN integer digits #
# in the output before conversion to bcd. LAMBDA is the #
# sign of ISCALE, used in A9. Fp1 contains #
# 10^^(abs(ISCALE)) using a rounding mode which is a #
# function of the original rounding mode and the signs #
# of ISCALE and X. A table is given in the code. #
# #
# A8. Clr INEX; Force RZ. #
# The operation in A3 above may have set INEX2. #
# RZ mode is forced for the scaling operation to insure #
# only one rounding error. The grs bits are collected in #
# the INEX flag for use in A10. #
# #
# A9. Scale X -> Y. #
# The mantissa is scaled to the desired number of #
# significant digits. The excess digits are collected #
# in INEX2. #
# #
# A10. Or in INEX. #
# If INEX is set, round error occurred. This is #
# compensated for by 'or-ing' in the INEX2 flag to #
# the lsb of Y. #
# #
# A11. Restore original FPCR; set size ext. #
# Perform FINT operation in the user's rounding mode. #
# Keep the size to extended. #
# #
# A12. Calculate YINT = FINT(Y) according to user's rounding #
# mode. The FPSP routine sintd0 is used. The output #
# is in fp0. #
# #
# A13. Check for LEN digits. #
# If the int operation results in more than LEN digits, #
# or less than LEN -1 digits, adjust ILOG and repeat from #
# A6. This test occurs only on the first pass. If the #
# result is exactly 10^LEN, decrement ILOG and divide #
# the mantissa by 10. #
# #
# A14. Convert the mantissa to bcd. #
# The binstr routine is used to convert the LEN digit #
# mantissa to bcd in memory. The input to binstr is #
# to be a fraction; i.e. (mantissa)/10^LEN and adjusted #
# such that the decimal point is to the left of bit 63. #
# The bcd digits are stored in the correct position in #
# the final string area in memory. #
# #
# A15. Convert the exponent to bcd. #
# As in A14 above, the exp is converted to bcd and the #
# digits are stored in the final string. #
# Test the length of the final exponent string. If the #
# length is 4, set operr. #
# #
# A16. Write sign bits to final string. #
# #
#########################################################################
set BINDEC_FLG, EXC_TEMP # DENORM flag
# Constants in extended precision
PLOG2:
long 0x3FFD0000,0x9A209A84,0xFBCFF798,0x00000000
PLOG2UP1:
long 0x3FFD0000,0x9A209A84,0xFBCFF799,0x00000000
# Constants in single precision
FONE:
long 0x3F800000,0x00000000,0x00000000,0x00000000
FTWO:
long 0x40000000,0x00000000,0x00000000,0x00000000
FTEN:
long 0x41200000,0x00000000,0x00000000,0x00000000
F4933:
long 0x459A2800,0x00000000,0x00000000,0x00000000
RBDTBL:
byte 0,0,0,0
byte 3,3,2,2
byte 3,2,2,3
byte 2,3,3,2
# Implementation Notes:
#
# The registers are used as follows:
#
# d0: scratch; LEN input to binstr
# d1: scratch
# d2: upper 32-bits of mantissa for binstr
# d3: scratch;lower 32-bits of mantissa for binstr
# d4: LEN
# d5: LAMBDA/ICTR
# d6: ILOG
# d7: k-factor
# a0: ptr for original operand/final result
# a1: scratch pointer
# a2: pointer to FP_X; abs(original value) in ext
# fp0: scratch
# fp1: scratch
# fp2: scratch
# F_SCR1:
# F_SCR2:
# L_SCR1:
# L_SCR2:
global bindec
bindec:
movm.l &0x3f20,-(%sp) # {%d2-%d7/%a2}
fmovm.x &0x7,-(%sp) # {%fp0-%fp2}
# A1. Set RM and size ext. Set SIGMA = sign input;
# The k-factor is saved for use in d7. Clear BINDEC_FLG for
# separating normalized/denormalized input. If the input
# is a denormalized number, set the BINDEC_FLG memory word
# to signal denorm. If the input is unnormalized, normalize
# the input and test for denormalized result.
#
fmov.l &rm_mode*0x10,%fpcr # set RM and ext
mov.l (%a0),L_SCR2(%a6) # save exponent for sign check
mov.l %d0,%d7 # move k-factor to d7
clr.b BINDEC_FLG(%a6) # clr norm/denorm flag
cmpi.b STAG(%a6),&DENORM # is input a DENORM?
bne.w A2_str # no; input is a NORM
#
# Normalize the denorm
#
un_de_norm:
mov.w (%a0),%d0
and.w &0x7fff,%d0 # strip sign of normalized exp
mov.l 4(%a0),%d1
mov.l 8(%a0),%d2
norm_loop:
sub.w &1,%d0
lsl.l &1,%d2
roxl.l &1,%d1
tst.l %d1
bge.b norm_loop
#
# Test if the normalized input is denormalized
#
tst.w %d0
bgt.b pos_exp # if greater than zero, it is a norm
st BINDEC_FLG(%a6) # set flag for denorm
pos_exp:
and.w &0x7fff,%d0 # strip sign of normalized exp
mov.w %d0,(%a0)
mov.l %d1,4(%a0)
mov.l %d2,8(%a0)
# A2. Set X = abs(input).
#
A2_str:
mov.l (%a0),FP_SCR1(%a6) # move input to work space
mov.l 4(%a0),FP_SCR1+4(%a6) # move input to work space
mov.l 8(%a0),FP_SCR1+8(%a6) # move input to work space
and.l &0x7fffffff,FP_SCR1(%a6) # create abs(X)
# A3. Compute ILOG.
# ILOG is the log base 10 of the input value. It is approx-
# imated by adding e + 0.f when the original value is viewed
# as 2^^e * 1.f in extended precision. This value is stored
# in d6.
#
# Register usage:
# Input/Output
# d0: k-factor/exponent
# d2: x/x
# d3: x/x
# d4: x/x
# d5: x/x
# d6: x/ILOG
# d7: k-factor/Unchanged
# a0: ptr for original operand/final result
# a1: x/x
# a2: x/x
# fp0: x/float(ILOG)
# fp1: x/x
# fp2: x/x
# F_SCR1:x/x
# F_SCR2:Abs(X)/Abs(X) with $3fff exponent
# L_SCR1:x/x
# L_SCR2:first word of X packed/Unchanged
tst.b BINDEC_FLG(%a6) # check for denorm
beq.b A3_cont # if clr, continue with norm
mov.l &-4933,%d6 # force ILOG = -4933
bra.b A4_str
A3_cont:
mov.w FP_SCR1(%a6),%d0 # move exp to d0
mov.w &0x3fff,FP_SCR1(%a6) # replace exponent with 0x3fff
fmov.x FP_SCR1(%a6),%fp0 # now fp0 has 1.f
sub.w &0x3fff,%d0 # strip off bias
fadd.w %d0,%fp0 # add in exp
fsub.s FONE(%pc),%fp0 # subtract off 1.0
fbge.w pos_res # if pos, branch
fmul.x PLOG2UP1(%pc),%fp0 # if neg, mul by LOG2UP1
fmov.l %fp0,%d6 # put ILOG in d6 as a lword
bra.b A4_str # go move out ILOG
pos_res:
fmul.x PLOG2(%pc),%fp0 # if pos, mul by LOG2
fmov.l %fp0,%d6 # put ILOG in d6 as a lword
# A4. Clr INEX bit.
# The operation in A3 above may have set INEX2.
A4_str:
fmov.l &0,%fpsr # zero all of fpsr - nothing needed
# A5. Set ICTR = 0;
# ICTR is a flag used in A13. It must be set before the
# loop entry A6. The lower word of d5 is used for ICTR.
clr.w %d5 # clear ICTR
# A6. Calculate LEN.
# LEN is the number of digits to be displayed. The k-factor
# can dictate either the total number of digits, if it is
# a positive number, or the number of digits after the
# original decimal point which are to be included as
# significant. See the 68882 manual for examples.
# If LEN is computed to be greater than 17, set OPERR in
# USER_FPSR. LEN is stored in d4.
#
# Register usage:
# Input/Output
# d0: exponent/Unchanged
# d2: x/x/scratch
# d3: x/x
# d4: exc picture/LEN
# d5: ICTR/Unchanged
# d6: ILOG/Unchanged
# d7: k-factor/Unchanged
# a0: ptr for original operand/final result
# a1: x/x
# a2: x/x
# fp0: float(ILOG)/Unchanged
# fp1: x/x
# fp2: x/x
# F_SCR1:x/x
# F_SCR2:Abs(X) with $3fff exponent/Unchanged
# L_SCR1:x/x
# L_SCR2:first word of X packed/Unchanged
A6_str:
tst.l %d7 # branch on sign of k
ble.b k_neg # if k <= 0, LEN = ILOG + 1 - k
mov.l %d7,%d4 # if k > 0, LEN = k
bra.b len_ck # skip to LEN check
k_neg:
mov.l %d6,%d4 # first load ILOG to d4
sub.l %d7,%d4 # subtract off k
addq.l &1,%d4 # add in the 1
len_ck:
tst.l %d4 # LEN check: branch on sign of LEN
ble.b LEN_ng # if neg, set LEN = 1
cmp.l %d4,&17 # test if LEN > 17
ble.b A7_str # if not, forget it
mov.l &17,%d4 # set max LEN = 17
tst.l %d7 # if negative, never set OPERR
ble.b A7_str # if positive, continue
or.l &opaop_mask,USER_FPSR(%a6) # set OPERR & AIOP in USER_FPSR
bra.b A7_str # finished here
LEN_ng:
mov.l &1,%d4 # min LEN is 1
# A7. Calculate SCALE.
# SCALE is equal to 10^ISCALE, where ISCALE is the number
# of decimal places needed to insure LEN integer digits
# in the output before conversion to bcd. LAMBDA is the sign
# of ISCALE, used in A9. Fp1 contains 10^^(abs(ISCALE)) using
# the rounding mode as given in the following table (see
# Coonen, p. 7.23 as ref.; however, the SCALE variable is
# of opposite sign in bindec.sa from Coonen).
#
# Initial USE
# FPCR[6:5] LAMBDA SIGN(X) FPCR[6:5]
# ----------------------------------------------
# RN 00 0 0 00/0 RN
# RN 00 0 1 00/0 RN
# RN 00 1 0 00/0 RN
# RN 00 1 1 00/0 RN
# RZ 01 0 0 11/3 RP
# RZ 01 0 1 11/3 RP
# RZ 01 1 0 10/2 RM
# RZ 01 1 1 10/2 RM
# RM 10 0 0 11/3 RP
# RM 10 0 1 10/2 RM
# RM 10 1 0 10/2 RM
# RM 10 1 1 11/3 RP
# RP 11 0 0 10/2 RM
# RP 11 0 1 11/3 RP
# RP 11 1 0 11/3 RP
# RP 11 1 1 10/2 RM
#
# Register usage:
# Input/Output
# d0: exponent/scratch - final is 0
# d2: x/0 or 24 for A9
# d3: x/scratch - offset ptr into PTENRM array
# d4: LEN/Unchanged
# d5: 0/ICTR:LAMBDA
# d6: ILOG/ILOG or k if ((k<=0)&(ILOG<k))
# d7: k-factor/Unchanged
# a0: ptr for original operand/final result
# a1: x/ptr to PTENRM array
# a2: x/x
# fp0: float(ILOG)/Unchanged
# fp1: x/10^ISCALE
# fp2: x/x
# F_SCR1:x/x
# F_SCR2:Abs(X) with $3fff exponent/Unchanged
# L_SCR1:x/x
# L_SCR2:first word of X packed/Unchanged
A7_str:
tst.l %d7 # test sign of k
bgt.b k_pos # if pos and > 0, skip this
cmp.l %d7,%d6 # test k - ILOG
blt.b k_pos # if ILOG >= k, skip this
mov.l %d7,%d6 # if ((k<0) & (ILOG < k)) ILOG = k
k_pos:
mov.l %d6,%d0 # calc ILOG + 1 - LEN in d0
addq.l &1,%d0 # add the 1
sub.l %d4,%d0 # sub off LEN
swap %d5 # use upper word of d5 for LAMBDA
clr.w %d5 # set it zero initially
clr.w %d2 # set up d2 for very small case
tst.l %d0 # test sign of ISCALE
bge.b iscale # if pos, skip next inst
addq.w &1,%d5 # if neg, set LAMBDA true
cmp.l %d0,&0xffffecd4 # test iscale <= -4908
bgt.b no_inf # if false, skip rest
add.l &24,%d0 # add in 24 to iscale
mov.l &24,%d2 # put 24 in d2 for A9
no_inf:
neg.l %d0 # and take abs of ISCALE
iscale:
fmov.s FONE(%pc),%fp1 # init fp1 to 1
bfextu USER_FPCR(%a6){&26:&2},%d1 # get initial rmode bits
lsl.w &1,%d1 # put them in bits 2:1
add.w %d5,%d1 # add in LAMBDA
lsl.w &1,%d1 # put them in bits 3:1
tst.l L_SCR2(%a6) # test sign of original x
bge.b x_pos # if pos, don't set bit 0
addq.l &1,%d1 # if neg, set bit 0
x_pos:
lea.l RBDTBL(%pc),%a2 # load rbdtbl base
mov.b (%a2,%d1),%d3 # load d3 with new rmode
lsl.l &4,%d3 # put bits in proper position
fmov.l %d3,%fpcr # load bits into fpu
lsr.l &4,%d3 # put bits in proper position
tst.b %d3 # decode new rmode for pten table
bne.b not_rn # if zero, it is RN
lea.l PTENRN(%pc),%a1 # load a1 with RN table base
bra.b rmode # exit decode
not_rn:
lsr.b &1,%d3 # get lsb in carry
bcc.b not_rp2 # if carry clear, it is RM
lea.l PTENRP(%pc),%a1 # load a1 with RP table base
bra.b rmode # exit decode
not_rp2:
lea.l PTENRM(%pc),%a1 # load a1 with RM table base
rmode:
clr.l %d3 # clr table index
e_loop2:
lsr.l &1,%d0 # shift next bit into carry
bcc.b e_next2 # if zero, skip the mul
fmul.x (%a1,%d3),%fp1 # mul by 10**(d3_bit_no)
e_next2:
add.l &12,%d3 # inc d3 to next pwrten table entry
tst.l %d0 # test if ISCALE is zero
bne.b e_loop2 # if not, loop
# A8. Clr INEX; Force RZ.
# The operation in A3 above may have set INEX2.
# RZ mode is forced for the scaling operation to insure
# only one rounding error. The grs bits are collected in
# the INEX flag for use in A10.
#
# Register usage:
# Input/Output
fmov.l &0,%fpsr # clr INEX
fmov.l &rz_mode*0x10,%fpcr # set RZ rounding mode
# A9. Scale X -> Y.
# The mantissa is scaled to the desired number of significant
# digits. The excess digits are collected in INEX2. If mul,
# Check d2 for excess 10 exponential value. If not zero,
# the iscale value would have caused the pwrten calculation
# to overflow. Only a negative iscale can cause this, so
# multiply by 10^(d2), which is now only allowed to be 24,
# with a multiply by 10^8 and 10^16, which is exact since
# 10^24 is exact. If the input was denormalized, we must
# create a busy stack frame with the mul command and the
# two operands, and allow the fpu to complete the multiply.
#
# Register usage:
# Input/Output
# d0: FPCR with RZ mode/Unchanged
# d2: 0 or 24/unchanged
# d3: x/x
# d4: LEN/Unchanged
# d5: ICTR:LAMBDA
# d6: ILOG/Unchanged
# d7: k-factor/Unchanged
# a0: ptr for original operand/final result
# a1: ptr to PTENRM array/Unchanged
# a2: x/x
# fp0: float(ILOG)/X adjusted for SCALE (Y)
# fp1: 10^ISCALE/Unchanged
# fp2: x/x
# F_SCR1:x/x
# F_SCR2:Abs(X) with $3fff exponent/Unchanged
# L_SCR1:x/x
# L_SCR2:first word of X packed/Unchanged
A9_str:
fmov.x (%a0),%fp0 # load X from memory
fabs.x %fp0 # use abs(X)
tst.w %d5 # LAMBDA is in lower word of d5
bne.b sc_mul # if neg (LAMBDA = 1), scale by mul
fdiv.x %fp1,%fp0 # calculate X / SCALE -> Y to fp0
bra.w A10_st # branch to A10
sc_mul:
tst.b BINDEC_FLG(%a6) # check for denorm
beq.w A9_norm # if norm, continue with mul
# for DENORM, we must calculate:
# fp0 = input_op * 10^ISCALE * 10^24
# since the input operand is a DENORM, we can't multiply it directly.
# so, we do the multiplication of the exponents and mantissas separately.
# in this way, we avoid underflow on intermediate stages of the
# multiplication and guarantee a result without exception.
fmovm.x &0x2,-(%sp) # save 10^ISCALE to stack
mov.w (%sp),%d3 # grab exponent
andi.w &0x7fff,%d3 # clear sign
ori.w &0x8000,(%a0) # make DENORM exp negative
add.w (%a0),%d3 # add DENORM exp to 10^ISCALE exp
subi.w &0x3fff,%d3 # subtract BIAS
add.w 36(%a1),%d3
subi.w &0x3fff,%d3 # subtract BIAS
add.w 48(%a1),%d3
subi.w &0x3fff,%d3 # subtract BIAS
bmi.w sc_mul_err # is result is DENORM, punt!!!
andi.w &0x8000,(%sp) # keep sign
or.w %d3,(%sp) # insert new exponent
andi.w &0x7fff,(%a0) # clear sign bit on DENORM again
mov.l 0x8(%a0),-(%sp) # put input op mantissa on stk
mov.l 0x4(%a0),-(%sp)
mov.l &0x3fff0000,-(%sp) # force exp to zero
fmovm.x (%sp)+,&0x80 # load normalized DENORM into fp0
fmul.x (%sp)+,%fp0
# fmul.x 36(%a1),%fp0 # multiply fp0 by 10^8
# fmul.x 48(%a1),%fp0 # multiply fp0 by 10^16
mov.l 36+8(%a1),-(%sp) # get 10^8 mantissa
mov.l 36+4(%a1),-(%sp)
mov.l &0x3fff0000,-(%sp) # force exp to zero
mov.l 48+8(%a1),-(%sp) # get 10^16 mantissa
mov.l 48+4(%a1),-(%sp)
mov.l &0x3fff0000,-(%sp)# force exp to zero
fmul.x (%sp)+,%fp0 # multiply fp0 by 10^8
fmul.x (%sp)+,%fp0 # multiply fp0 by 10^16
bra.b A10_st
sc_mul_err:
bra.b sc_mul_err
A9_norm:
tst.w %d2 # test for small exp case
beq.b A9_con # if zero, continue as normal
fmul.x 36(%a1),%fp0 # multiply fp0 by 10^8
fmul.x 48(%a1),%fp0 # multiply fp0 by 10^16
A9_con:
fmul.x %fp1,%fp0 # calculate X * SCALE -> Y to fp0
# A10. Or in INEX.
# If INEX is set, round error occurred. This is compensated
# for by 'or-ing' in the INEX2 flag to the lsb of Y.
#
# Register usage:
# Input/Output
# d0: FPCR with RZ mode/FPSR with INEX2 isolated
# d2: x/x
# d3: x/x
# d4: LEN/Unchanged
# d5: ICTR:LAMBDA
# d6: ILOG/Unchanged
# d7: k-factor/Unchanged
# a0: ptr for original operand/final result
# a1: ptr to PTENxx array/Unchanged
# a2: x/ptr to FP_SCR1(a6)
# fp0: Y/Y with lsb adjusted
# fp1: 10^ISCALE/Unchanged
# fp2: x/x
A10_st:
fmov.l %fpsr,%d0 # get FPSR
fmov.x %fp0,FP_SCR1(%a6) # move Y to memory
lea.l FP_SCR1(%a6),%a2 # load a2 with ptr to FP_SCR1
btst &9,%d0 # check if INEX2 set
beq.b A11_st # if clear, skip rest
or.l &1,8(%a2) # or in 1 to lsb of mantissa
fmov.x FP_SCR1(%a6),%fp0 # write adjusted Y back to fpu
# A11. Restore original FPCR; set size ext.
# Perform FINT operation in the user's rounding mode. Keep
# the size to extended. The sintdo entry point in the sint
# routine expects the FPCR value to be in USER_FPCR for
# mode and precision. The original FPCR is saved in L_SCR1.
A11_st:
mov.l USER_FPCR(%a6),L_SCR1(%a6) # save it for later
and.l &0x00000030,USER_FPCR(%a6) # set size to ext,
# ;block exceptions
# A12. Calculate YINT = FINT(Y) according to user's rounding mode.
# The FPSP routine sintd0 is used. The output is in fp0.
#
# Register usage:
# Input/Output
# d0: FPSR with AINEX cleared/FPCR with size set to ext
# d2: x/x/scratch
# d3: x/x
# d4: LEN/Unchanged
# d5: ICTR:LAMBDA/Unchanged
# d6: ILOG/Unchanged
# d7: k-factor/Unchanged
# a0: ptr for original operand/src ptr for sintdo
# a1: ptr to PTENxx array/Unchanged
# a2: ptr to FP_SCR1(a6)/Unchanged
# a6: temp pointer to FP_SCR1(a6) - orig value saved and restored
# fp0: Y/YINT
# fp1: 10^ISCALE/Unchanged
# fp2: x/x
# F_SCR1:x/x
# F_SCR2:Y adjusted for inex/Y with original exponent
# L_SCR1:x/original USER_FPCR
# L_SCR2:first word of X packed/Unchanged
A12_st:
movm.l &0xc0c0,-(%sp) # save regs used by sintd0 {%d0-%d1/%a0-%a1}
mov.l L_SCR1(%a6),-(%sp)
mov.l L_SCR2(%a6),-(%sp)
lea.l FP_SCR1(%a6),%a0 # a0 is ptr to FP_SCR1(a6)
fmov.x %fp0,(%a0) # move Y to memory at FP_SCR1(a6)
tst.l L_SCR2(%a6) # test sign of original operand
bge.b do_fint12 # if pos, use Y
or.l &0x80000000,(%a0) # if neg, use -Y
do_fint12:
mov.l USER_FPSR(%a6),-(%sp)
# bsr sintdo # sint routine returns int in fp0
fmov.l USER_FPCR(%a6),%fpcr
fmov.l &0x0,%fpsr # clear the AEXC bits!!!
## mov.l USER_FPCR(%a6),%d0 # ext prec/keep rnd mode
## andi.l &0x00000030,%d0
## fmov.l %d0,%fpcr
fint.x FP_SCR1(%a6),%fp0 # do fint()
fmov.l %fpsr,%d0
or.w %d0,FPSR_EXCEPT(%a6)
## fmov.l &0x0,%fpcr
## fmov.l %fpsr,%d0 # don't keep ccodes
## or.w %d0,FPSR_EXCEPT(%a6)
mov.b (%sp),USER_FPSR(%a6)
add.l &4,%sp
mov.l (%sp)+,L_SCR2(%a6)
mov.l (%sp)+,L_SCR1(%a6)
movm.l (%sp)+,&0x303 # restore regs used by sint {%d0-%d1/%a0-%a1}
mov.l L_SCR2(%a6),FP_SCR1(%a6) # restore original exponent
mov.l L_SCR1(%a6),USER_FPCR(%a6) # restore user's FPCR
# A13. Check for LEN digits.
# If the int operation results in more than LEN digits,
# or less than LEN -1 digits, adjust ILOG and repeat from
# A6. This test occurs only on the first pass. If the
# result is exactly 10^LEN, decrement ILOG and divide
# the mantissa by 10. The calculation of 10^LEN cannot
# be inexact, since all powers of ten up to 10^27 are exact
# in extended precision, so the use of a previous power-of-ten
# table will introduce no error.
#
#
# Register usage:
# Input/Output
# d0: FPCR with size set to ext/scratch final = 0
# d2: x/x
# d3: x/scratch final = x
# d4: LEN/LEN adjusted
# d5: ICTR:LAMBDA/LAMBDA:ICTR
# d6: ILOG/ILOG adjusted
# d7: k-factor/Unchanged
# a0: pointer into memory for packed bcd string formation
# a1: ptr to PTENxx array/Unchanged
# a2: ptr to FP_SCR1(a6)/Unchanged
# fp0: int portion of Y/abs(YINT) adjusted
# fp1: 10^ISCALE/Unchanged
# fp2: x/10^LEN
# F_SCR1:x/x
# F_SCR2:Y with original exponent/Unchanged
# L_SCR1:original USER_FPCR/Unchanged
# L_SCR2:first word of X packed/Unchanged
A13_st:
swap %d5 # put ICTR in lower word of d5
tst.w %d5 # check if ICTR = 0
bne not_zr # if non-zero, go to second test
#
# Compute 10^(LEN-1)
#
fmov.s FONE(%pc),%fp2 # init fp2 to 1.0
mov.l %d4,%d0 # put LEN in d0
subq.l &1,%d0 # d0 = LEN -1
clr.l %d3 # clr table index
l_loop:
lsr.l &1,%d0 # shift next bit into carry
bcc.b l_next # if zero, skip the mul
fmul.x (%a1,%d3),%fp2 # mul by 10**(d3_bit_no)
l_next:
add.l &12,%d3 # inc d3 to next pwrten table entry
tst.l %d0 # test if LEN is zero
bne.b l_loop # if not, loop
#
# 10^LEN-1 is computed for this test and A14. If the input was
# denormalized, check only the case in which YINT > 10^LEN.
#
tst.b BINDEC_FLG(%a6) # check if input was norm
beq.b A13_con # if norm, continue with checking
fabs.x %fp0 # take abs of YINT
bra test_2
#
# Compare abs(YINT) to 10^(LEN-1) and 10^LEN
#
A13_con:
fabs.x %fp0 # take abs of YINT
fcmp.x %fp0,%fp2 # compare abs(YINT) with 10^(LEN-1)
fbge.w test_2 # if greater, do next test
subq.l &1,%d6 # subtract 1 from ILOG
mov.w &1,%d5 # set ICTR
fmov.l &rm_mode*0x10,%fpcr # set rmode to RM
fmul.s FTEN(%pc),%fp2 # compute 10^LEN
bra.w A6_str # return to A6 and recompute YINT
test_2:
fmul.s FTEN(%pc),%fp2 # compute 10^LEN
fcmp.x %fp0,%fp2 # compare abs(YINT) with 10^LEN
fblt.w A14_st # if less, all is ok, go to A14
fbgt.w fix_ex # if greater, fix and redo
fdiv.s FTEN(%pc),%fp0 # if equal, divide by 10
addq.l &1,%d6 # and inc ILOG
bra.b A14_st # and continue elsewhere
fix_ex:
addq.l &1,%d6 # increment ILOG by 1
mov.w &1,%d5 # set ICTR
fmov.l &rm_mode*0x10,%fpcr # set rmode to RM
bra.w A6_str # return to A6 and recompute YINT
#
# Since ICTR <> 0, we have already been through one adjustment,
# and shouldn't have another; this is to check if abs(YINT) = 10^LEN
# 10^LEN is again computed using whatever table is in a1 since the
# value calculated cannot be inexact.
#
not_zr:
fmov.s FONE(%pc),%fp2 # init fp2 to 1.0
mov.l %d4,%d0 # put LEN in d0
clr.l %d3 # clr table index
z_loop:
lsr.l &1,%d0 # shift next bit into carry
bcc.b z_next # if zero, skip the mul
fmul.x (%a1,%d3),%fp2 # mul by 10**(d3_bit_no)
z_next:
add.l &12,%d3 # inc d3 to next pwrten table entry
tst.l %d0 # test if LEN is zero
bne.b z_loop # if not, loop
fabs.x %fp0 # get abs(YINT)
fcmp.x %fp0,%fp2 # check if abs(YINT) = 10^LEN
fbneq.w A14_st # if not, skip this
fdiv.s FTEN(%pc),%fp0 # divide abs(YINT) by 10
addq.l &1,%d6 # and inc ILOG by 1
addq.l &1,%d4 # and inc LEN
fmul.s FTEN(%pc),%fp2 # if LEN++, the get 10^^LEN
# A14. Convert the mantissa to bcd.
# The binstr routine is used to convert the LEN digit
# mantissa to bcd in memory. The input to binstr is
# to be a fraction; i.e. (mantissa)/10^LEN and adjusted
# such that the decimal point is to the left of bit 63.
# The bcd digits are stored in the correct position in
# the final string area in memory.
#
#
# Register usage:
# Input/Output
# d0: x/LEN call to binstr - final is 0
# d1: x/0
# d2: x/ms 32-bits of mant of abs(YINT)
# d3: x/ls 32-bits of mant of abs(YINT)
# d4: LEN/Unchanged
# d5: ICTR:LAMBDA/LAMBDA:ICTR
# d6: ILOG
# d7: k-factor/Unchanged
# a0: pointer into memory for packed bcd string formation
# /ptr to first mantissa byte in result string
# a1: ptr to PTENxx array/Unchanged
# a2: ptr to FP_SCR1(a6)/Unchanged
# fp0: int portion of Y/abs(YINT) adjusted
# fp1: 10^ISCALE/Unchanged
# fp2: 10^LEN/Unchanged
# F_SCR1:x/Work area for final result
# F_SCR2:Y with original exponent/Unchanged
# L_SCR1:original USER_FPCR/Unchanged
# L_SCR2:first word of X packed/Unchanged
A14_st:
fmov.l &rz_mode*0x10,%fpcr # force rz for conversion
fdiv.x %fp2,%fp0 # divide abs(YINT) by 10^LEN
lea.l FP_SCR0(%a6),%a0
fmov.x %fp0,(%a0) # move abs(YINT)/10^LEN to memory
mov.l 4(%a0),%d2 # move 2nd word of FP_RES to d2
mov.l 8(%a0),%d3 # move 3rd word of FP_RES to d3
clr.l 4(%a0) # zero word 2 of FP_RES
clr.l 8(%a0) # zero word 3 of FP_RES
mov.l (%a0),%d0 # move exponent to d0
swap %d0 # put exponent in lower word
beq.b no_sft # if zero, don't shift
sub.l &0x3ffd,%d0 # sub bias less 2 to make fract
tst.l %d0 # check if > 1
bgt.b no_sft # if so, don't shift
neg.l %d0 # make exp positive
m_loop:
lsr.l &1,%d2 # shift d2:d3 right, add 0s
roxr.l &1,%d3 # the number of places
dbf.w %d0,m_loop # given in d0
no_sft:
tst.l %d2 # check for mantissa of zero
bne.b no_zr # if not, go on
tst.l %d3 # continue zero check
beq.b zer_m # if zero, go directly to binstr
no_zr:
clr.l %d1 # put zero in d1 for addx
add.l &0x00000080,%d3 # inc at bit 7
addx.l %d1,%d2 # continue inc
and.l &0xffffff80,%d3 # strip off lsb not used by 882
zer_m:
mov.l %d4,%d0 # put LEN in d0 for binstr call
addq.l &3,%a0 # a0 points to M16 byte in result
bsr binstr # call binstr to convert mant
# A15. Convert the exponent to bcd.
# As in A14 above, the exp is converted to bcd and the
# digits are stored in the final string.
#
# Digits are stored in L_SCR1(a6) on return from BINDEC as:
#
# 32 16 15 0
# -----------------------------------------
# | 0 | e3 | e2 | e1 | e4 | X | X | X |
# -----------------------------------------
#
# And are moved into their proper places in FP_SCR0. If digit e4
# is non-zero, OPERR is signaled. In all cases, all 4 digits are
# written as specified in the 881/882 manual for packed decimal.
#
# Register usage:
# Input/Output
# d0: x/LEN call to binstr - final is 0
# d1: x/scratch (0);shift count for final exponent packing
# d2: x/ms 32-bits of exp fraction/scratch
# d3: x/ls 32-bits of exp fraction
# d4: LEN/Unchanged
# d5: ICTR:LAMBDA/LAMBDA:ICTR
# d6: ILOG
# d7: k-factor/Unchanged
# a0: ptr to result string/ptr to L_SCR1(a6)
# a1: ptr to PTENxx array/Unchanged
# a2: ptr to FP_SCR1(a6)/Unchanged
# fp0: abs(YINT) adjusted/float(ILOG)
# fp1: 10^ISCALE/Unchanged
# fp2: 10^LEN/Unchanged
# F_SCR1:Work area for final result/BCD result
# F_SCR2:Y with original exponent/ILOG/10^4
# L_SCR1:original USER_FPCR/Exponent digits on return from binstr
# L_SCR2:first word of X packed/Unchanged
A15_st:
tst.b BINDEC_FLG(%a6) # check for denorm
beq.b not_denorm
ftest.x %fp0 # test for zero
fbeq.w den_zero # if zero, use k-factor or 4933
fmov.l %d6,%fp0 # float ILOG
fabs.x %fp0 # get abs of ILOG
bra.b convrt
den_zero:
tst.l %d7 # check sign of the k-factor
blt.b use_ilog # if negative, use ILOG
fmov.s F4933(%pc),%fp0 # force exponent to 4933
bra.b convrt # do it
use_ilog:
fmov.l %d6,%fp0 # float ILOG
fabs.x %fp0 # get abs of ILOG
bra.b convrt
not_denorm:
ftest.x %fp0 # test for zero
fbneq.w not_zero # if zero, force exponent
fmov.s FONE(%pc),%fp0 # force exponent to 1
bra.b convrt # do it
not_zero:
fmov.l %d6,%fp0 # float ILOG
fabs.x %fp0 # get abs of ILOG
convrt:
fdiv.x 24(%a1),%fp0 # compute ILOG/10^4
fmov.x %fp0,FP_SCR1(%a6) # store fp0 in memory
mov.l 4(%a2),%d2 # move word 2 to d2
mov.l 8(%a2),%d3 # move word 3 to d3
mov.w (%a2),%d0 # move exp to d0
beq.b x_loop_fin # if zero, skip the shift
sub.w &0x3ffd,%d0 # subtract off bias
neg.w %d0 # make exp positive
x_loop:
lsr.l &1,%d2 # shift d2:d3 right
roxr.l &1,%d3 # the number of places
dbf.w %d0,x_loop # given in d0
x_loop_fin:
clr.l %d1 # put zero in d1 for addx
add.l &0x00000080,%d3 # inc at bit 6
addx.l %d1,%d2 # continue inc
and.l &0xffffff80,%d3 # strip off lsb not used by 882
mov.l &4,%d0 # put 4 in d0 for binstr call
lea.l L_SCR1(%a6),%a0 # a0 is ptr to L_SCR1 for exp digits
bsr binstr # call binstr to convert exp
mov.l L_SCR1(%a6),%d0 # load L_SCR1 lword to d0
mov.l &12,%d1 # use d1 for shift count
lsr.l %d1,%d0 # shift d0 right by 12
bfins %d0,FP_SCR0(%a6){&4:&12} # put e3:e2:e1 in FP_SCR0
lsr.l %d1,%d0 # shift d0 right by 12
bfins %d0,FP_SCR0(%a6){&16:&4} # put e4 in FP_SCR0
tst.b %d0 # check if e4 is zero
beq.b A16_st # if zero, skip rest
or.l &opaop_mask,USER_FPSR(%a6) # set OPERR & AIOP in USER_FPSR
# A16. Write sign bits to final string.
# Sigma is bit 31 of initial value; RHO is bit 31 of d6 (ILOG).
#
# Register usage:
# Input/Output
# d0: x/scratch - final is x
# d2: x/x
# d3: x/x
# d4: LEN/Unchanged
# d5: ICTR:LAMBDA/LAMBDA:ICTR
# d6: ILOG/ILOG adjusted
# d7: k-factor/Unchanged
# a0: ptr to L_SCR1(a6)/Unchanged
# a1: ptr to PTENxx array/Unchanged
# a2: ptr to FP_SCR1(a6)/Unchanged
# fp0: float(ILOG)/Unchanged
# fp1: 10^ISCALE/Unchanged
# fp2: 10^LEN/Unchanged
# F_SCR1:BCD result with correct signs
# F_SCR2:ILOG/10^4
# L_SCR1:Exponent digits on return from binstr
# L_SCR2:first word of X packed/Unchanged
A16_st:
clr.l %d0 # clr d0 for collection of signs
and.b &0x0f,FP_SCR0(%a6) # clear first nibble of FP_SCR0
tst.l L_SCR2(%a6) # check sign of original mantissa
bge.b mant_p # if pos, don't set SM
mov.l &2,%d0 # move 2 in to d0 for SM
mant_p:
tst.l %d6 # check sign of ILOG
bge.b wr_sgn # if pos, don't set SE
addq.l &1,%d0 # set bit 0 in d0 for SE
wr_sgn:
bfins %d0,FP_SCR0(%a6){&0:&2} # insert SM and SE into FP_SCR0
# Clean up and restore all registers used.
fmov.l &0,%fpsr # clear possible inex2/ainex bits
fmovm.x (%sp)+,&0xe0 # {%fp0-%fp2}
movm.l (%sp)+,&0x4fc # {%d2-%d7/%a2}
rts
global PTENRN
PTENRN:
long 0x40020000,0xA0000000,0x00000000 # 10 ^ 1
long 0x40050000,0xC8000000,0x00000000 # 10 ^ 2
long 0x400C0000,0x9C400000,0x00000000 # 10 ^ 4
long 0x40190000,0xBEBC2000,0x00000000 # 10 ^ 8
long 0x40340000,0x8E1BC9BF,0x04000000 # 10 ^ 16
long 0x40690000,0x9DC5ADA8,0x2B70B59E # 10 ^ 32
long 0x40D30000,0xC2781F49,0xFFCFA6D5 # 10 ^ 64
long 0x41A80000,0x93BA47C9,0x80E98CE0 # 10 ^ 128
long 0x43510000,0xAA7EEBFB,0x9DF9DE8E # 10 ^ 256
long 0x46A30000,0xE319A0AE,0xA60E91C7 # 10 ^ 512
long 0x4D480000,0xC9767586,0x81750C17 # 10 ^ 1024
long 0x5A920000,0x9E8B3B5D,0xC53D5DE5 # 10 ^ 2048
long 0x75250000,0xC4605202,0x8A20979B # 10 ^ 4096
global PTENRP
PTENRP:
long 0x40020000,0xA0000000,0x00000000 # 10 ^ 1
long 0x40050000,0xC8000000,0x00000000 # 10 ^ 2
long 0x400C0000,0x9C400000,0x00000000 # 10 ^ 4
long 0x40190000,0xBEBC2000,0x00000000 # 10 ^ 8
long 0x40340000,0x8E1BC9BF,0x04000000 # 10 ^ 16
long 0x40690000,0x9DC5ADA8,0x2B70B59E # 10 ^ 32
long 0x40D30000,0xC2781F49,0xFFCFA6D6 # 10 ^ 64
long 0x41A80000,0x93BA47C9,0x80E98CE0 # 10 ^ 128
long 0x43510000,0xAA7EEBFB,0x9DF9DE8E # 10 ^ 256
long 0x46A30000,0xE319A0AE,0xA60E91C7 # 10 ^ 512
long 0x4D480000,0xC9767586,0x81750C18 # 10 ^ 1024
long 0x5A920000,0x9E8B3B5D,0xC53D5DE5 # 10 ^ 2048
long 0x75250000,0xC4605202,0x8A20979B # 10 ^ 4096
global PTENRM
PTENRM:
long 0x40020000,0xA0000000,0x00000000 # 10 ^ 1
long 0x40050000,0xC8000000,0x00000000 # 10 ^ 2
long 0x400C0000,0x9C400000,0x00000000 # 10 ^ 4
long 0x40190000,0xBEBC2000,0x00000000 # 10 ^ 8
long 0x40340000,0x8E1BC9BF,0x04000000 # 10 ^ 16
long 0x40690000,0x9DC5ADA8,0x2B70B59D # 10 ^ 32
long 0x40D30000,0xC2781F49,0xFFCFA6D5 # 10 ^ 64
long 0x41A80000,0x93BA47C9,0x80E98CDF # 10 ^ 128
long 0x43510000,0xAA7EEBFB,0x9DF9DE8D # 10 ^ 256
long 0x46A30000,0xE319A0AE,0xA60E91C6 # 10 ^ 512
long 0x4D480000,0xC9767586,0x81750C17 # 10 ^ 1024
long 0x5A920000,0x9E8B3B5D,0xC53D5DE4 # 10 ^ 2048
long 0x75250000,0xC4605202,0x8A20979A # 10 ^ 4096
#########################################################################
# binstr(): Converts a 64-bit binary integer to bcd. #
# #
# INPUT *************************************************************** #
# d2:d3 = 64-bit binary integer #
# d0 = desired length (LEN) #
# a0 = pointer to start in memory for bcd characters #
# (This pointer must point to byte 4 of the first #
# lword of the packed decimal memory string.) #
# #
# OUTPUT ************************************************************** #
# a0 = pointer to LEN bcd digits representing the 64-bit integer. #
# #
# ALGORITHM *********************************************************** #
# The 64-bit binary is assumed to have a decimal point before #
# bit 63. The fraction is multiplied by 10 using a mul by 2 #
# shift and a mul by 8 shift. The bits shifted out of the #
# msb form a decimal digit. This process is iterated until #
# LEN digits are formed. #
# #
# A1. Init d7 to 1. D7 is the byte digit counter, and if 1, the #
# digit formed will be assumed the least significant. This is #
# to force the first byte formed to have a 0 in the upper 4 bits. #
# #
# A2. Beginning of the loop: #
# Copy the fraction in d2:d3 to d4:d5. #
# #
# A3. Multiply the fraction in d2:d3 by 8 using bit-field #
# extracts and shifts. The three msbs from d2 will go into d1. #
# #
# A4. Multiply the fraction in d4:d5 by 2 using shifts. The msb #
# will be collected by the carry. #
# #
# A5. Add using the carry the 64-bit quantities in d2:d3 and d4:d5 #
# into d2:d3. D1 will contain the bcd digit formed. #
# #
# A6. Test d7. If zero, the digit formed is the ms digit. If non- #
# zero, it is the ls digit. Put the digit in its place in the #
# upper word of d0. If it is the ls digit, write the word #
# from d0 to memory. #
# #
# A7. Decrement d6 (LEN counter) and repeat the loop until zero. #
# #
#########################################################################
# Implementation Notes:
#
# The registers are used as follows:
#
# d0: LEN counter
# d1: temp used to form the digit
# d2: upper 32-bits of fraction for mul by 8
# d3: lower 32-bits of fraction for mul by 8
# d4: upper 32-bits of fraction for mul by 2
# d5: lower 32-bits of fraction for mul by 2
# d6: temp for bit-field extracts
# d7: byte digit formation word;digit count {0,1}
# a0: pointer into memory for packed bcd string formation
#
global binstr
binstr:
movm.l &0xff00,-(%sp) # {%d0-%d7}
#
# A1: Init d7
#
mov.l &1,%d7 # init d7 for second digit
subq.l &1,%d0 # for dbf d0 would have LEN+1 passes
#
# A2. Copy d2:d3 to d4:d5. Start loop.
#
loop:
mov.l %d2,%d4 # copy the fraction before muls
mov.l %d3,%d5 # to d4:d5
#
# A3. Multiply d2:d3 by 8; extract msbs into d1.
#
bfextu %d2{&0:&3},%d1 # copy 3 msbs of d2 into d1
asl.l &3,%d2 # shift d2 left by 3 places
bfextu %d3{&0:&3},%d6 # copy 3 msbs of d3 into d6
asl.l &3,%d3 # shift d3 left by 3 places
or.l %d6,%d2 # or in msbs from d3 into d2
#
# A4. Multiply d4:d5 by 2; add carry out to d1.
#
asl.l &1,%d5 # mul d5 by 2
roxl.l &1,%d4 # mul d4 by 2
swap %d6 # put 0 in d6 lower word
addx.w %d6,%d1 # add in extend from mul by 2
#
# A5. Add mul by 8 to mul by 2. D1 contains the digit formed.
#
add.l %d5,%d3 # add lower 32 bits
nop # ERRATA FIX #13 (Rev. 1.2 6/6/90)
addx.l %d4,%d2 # add with extend upper 32 bits
nop # ERRATA FIX #13 (Rev. 1.2 6/6/90)
addx.w %d6,%d1 # add in extend from add to d1
swap %d6 # with d6 = 0; put 0 in upper word
#
# A6. Test d7 and branch.
#
tst.w %d7 # if zero, store digit & to loop
beq.b first_d # if non-zero, form byte & write
sec_d:
swap %d7 # bring first digit to word d7b
asl.w &4,%d7 # first digit in upper 4 bits d7b
add.w %d1,%d7 # add in ls digit to d7b
mov.b %d7,(%a0)+ # store d7b byte in memory
swap %d7 # put LEN counter in word d7a
clr.w %d7 # set d7a to signal no digits done
dbf.w %d0,loop # do loop some more!
bra.b end_bstr # finished, so exit
first_d:
swap %d7 # put digit word in d7b
mov.w %d1,%d7 # put new digit in d7b
swap %d7 # put LEN counter in word d7a
addq.w &1,%d7 # set d7a to signal first digit done
dbf.w %d0,loop # do loop some more!
swap %d7 # put last digit in string
lsl.w &4,%d7 # move it to upper 4 bits
mov.b %d7,(%a0)+ # store it in memory string
#
# Clean up and return with result in fp0.
#
end_bstr:
movm.l (%sp)+,&0xff # {%d0-%d7}
rts
#########################################################################
# XDEF **************************************************************** #
# facc_in_b(): dmem_read_byte failed #
# facc_in_w(): dmem_read_word failed #
# facc_in_l(): dmem_read_long failed #
# facc_in_d(): dmem_read of dbl prec failed #
# facc_in_x(): dmem_read of ext prec failed #
# #
# facc_out_b(): dmem_write_byte failed #
# facc_out_w(): dmem_write_word failed #
# facc_out_l(): dmem_write_long failed #
# facc_out_d(): dmem_write of dbl prec failed #
# facc_out_x(): dmem_write of ext prec failed #
# #
# XREF **************************************************************** #
# _real_access() - exit through access error handler #
# #
# INPUT *************************************************************** #
# None #
# #
# OUTPUT ************************************************************** #
# None #
# #
# ALGORITHM *********************************************************** #
# Flow jumps here when an FP data fetch call gets an error #
# result. This means the operating system wants an access error frame #
# made out of the current exception stack frame. #
# So, we first call restore() which makes sure that any updated #
# -(an)+ register gets returned to its pre-exception value and then #
# we change the stack to an access error stack frame. #
# #
#########################################################################
facc_in_b:
movq.l &0x1,%d0 # one byte
bsr.w restore # fix An
mov.w &0x0121,EXC_VOFF(%a6) # set FSLW
bra.w facc_finish
facc_in_w:
movq.l &0x2,%d0 # two bytes
bsr.w restore # fix An
mov.w &0x0141,EXC_VOFF(%a6) # set FSLW
bra.b facc_finish
facc_in_l:
movq.l &0x4,%d0 # four bytes
bsr.w restore # fix An
mov.w &0x0101,EXC_VOFF(%a6) # set FSLW
bra.b facc_finish
facc_in_d:
movq.l &0x8,%d0 # eight bytes
bsr.w restore # fix An
mov.w &0x0161,EXC_VOFF(%a6) # set FSLW
bra.b facc_finish
facc_in_x:
movq.l &0xc,%d0 # twelve bytes
bsr.w restore # fix An
mov.w &0x0161,EXC_VOFF(%a6) # set FSLW
bra.b facc_finish
################################################################
facc_out_b:
movq.l &0x1,%d0 # one byte
bsr.w restore # restore An
mov.w &0x00a1,EXC_VOFF(%a6) # set FSLW
bra.b facc_finish
facc_out_w:
movq.l &0x2,%d0 # two bytes
bsr.w restore # restore An
mov.w &0x00c1,EXC_VOFF(%a6) # set FSLW
bra.b facc_finish
facc_out_l:
movq.l &0x4,%d0 # four bytes
bsr.w restore # restore An
mov.w &0x0081,EXC_VOFF(%a6) # set FSLW
bra.b facc_finish
facc_out_d:
movq.l &0x8,%d0 # eight bytes
bsr.w restore # restore An
mov.w &0x00e1,EXC_VOFF(%a6) # set FSLW
bra.b facc_finish
facc_out_x:
mov.l &0xc,%d0 # twelve bytes
bsr.w restore # restore An
mov.w &0x00e1,EXC_VOFF(%a6) # set FSLW
# here's where we actually create the access error frame from the
# current exception stack frame.
facc_finish:
mov.l USER_FPIAR(%a6),EXC_PC(%a6) # store current PC
fmovm.x EXC_FPREGS(%a6),&0xc0 # restore fp0-fp1
fmovm.l USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
unlk %a6
mov.l (%sp),-(%sp) # store SR, hi(PC)
mov.l 0x8(%sp),0x4(%sp) # store lo(PC)
mov.l 0xc(%sp),0x8(%sp) # store EA
mov.l &0x00000001,0xc(%sp) # store FSLW
mov.w 0x6(%sp),0xc(%sp) # fix FSLW (size)
mov.w &0x4008,0x6(%sp) # store voff
btst &0x5,(%sp) # supervisor or user mode?
beq.b facc_out2 # user
bset &0x2,0xd(%sp) # set supervisor TM bit
facc_out2:
bra.l _real_access
##################################################################
# if the effective addressing mode was predecrement or postincrement,
# the emulation has already changed its value to the correct post-
# instruction value. but since we're exiting to the access error
# handler, then AN must be returned to its pre-instruction value.
# we do that here.
restore:
mov.b EXC_OPWORD+0x1(%a6),%d1
andi.b &0x38,%d1 # extract opmode
cmpi.b %d1,&0x18 # postinc?
beq.w rest_inc
cmpi.b %d1,&0x20 # predec?
beq.w rest_dec
rts
rest_inc:
mov.b EXC_OPWORD+0x1(%a6),%d1
andi.w &0x0007,%d1 # fetch An
mov.w (tbl_rest_inc.b,%pc,%d1.w*2),%d1
jmp (tbl_rest_inc.b,%pc,%d1.w*1)
tbl_rest_inc:
short ri_a0 - tbl_rest_inc
short ri_a1 - tbl_rest_inc
short ri_a2 - tbl_rest_inc
short ri_a3 - tbl_rest_inc
short ri_a4 - tbl_rest_inc
short ri_a5 - tbl_rest_inc
short ri_a6 - tbl_rest_inc
short ri_a7 - tbl_rest_inc
ri_a0:
sub.l %d0,EXC_DREGS+0x8(%a6) # fix stacked a0
rts
ri_a1:
sub.l %d0,EXC_DREGS+0xc(%a6) # fix stacked a1
rts
ri_a2:
sub.l %d0,%a2 # fix a2
rts
ri_a3:
sub.l %d0,%a3 # fix a3
rts
ri_a4:
sub.l %d0,%a4 # fix a4
rts
ri_a5:
sub.l %d0,%a5 # fix a5
rts
ri_a6:
sub.l %d0,(%a6) # fix stacked a6
rts
# if it's a fmove out instruction, we don't have to fix a7
# because we hadn't changed it yet. if it's an opclass two
# instruction (data moved in) and the exception was in supervisor
# mode, then also also wasn't updated. if it was user mode, then
# restore the correct a7 which is in the USP currently.
ri_a7:
cmpi.b EXC_VOFF(%a6),&0x30 # move in or out?
bne.b ri_a7_done # out
btst &0x5,EXC_SR(%a6) # user or supervisor?
bne.b ri_a7_done # supervisor
movc %usp,%a0 # restore USP
sub.l %d0,%a0
movc %a0,%usp
ri_a7_done:
rts
# need to invert adjustment value if the <ea> was predec
rest_dec:
neg.l %d0
bra.b rest_inc
|
AirFortressIlikara/LS2K0300-linux-4.19
| 761,576
|
arch/m68k/ifpsp060/src/fpsp.S
|
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
MOTOROLA MICROPROCESSOR & MEMORY TECHNOLOGY GROUP
M68000 Hi-Performance Microprocessor Division
M68060 Software Package
Production Release P1.00 -- October 10, 1994
M68060 Software Package Copyright © 1993, 1994 Motorola Inc. All rights reserved.
THE SOFTWARE is provided on an "AS IS" basis and without warranty.
To the maximum extent permitted by applicable law,
MOTOROLA DISCLAIMS ALL WARRANTIES WHETHER EXPRESS OR IMPLIED,
INCLUDING IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE
and any warranty against infringement with regard to the SOFTWARE
(INCLUDING ANY MODIFIED VERSIONS THEREOF) and any accompanying written materials.
To the maximum extent permitted by applicable law,
IN NO EVENT SHALL MOTOROLA BE LIABLE FOR ANY DAMAGES WHATSOEVER
(INCLUDING WITHOUT LIMITATION, DAMAGES FOR LOSS OF BUSINESS PROFITS,
BUSINESS INTERRUPTION, LOSS OF BUSINESS INFORMATION, OR OTHER PECUNIARY LOSS)
ARISING OF THE USE OR INABILITY TO USE THE SOFTWARE.
Motorola assumes no responsibility for the maintenance and support of the SOFTWARE.
You are hereby granted a copyright license to use, modify, and distribute the SOFTWARE
so long as this entire notice is retained without alteration in any modified and/or
redistributed versions, and that such modified versions are clearly identified as such.
No licenses are granted by implication, estoppel or otherwise under any patents
or trademarks of Motorola, Inc.
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# freal.s:
# This file is appended to the top of the 060FPSP package
# and contains the entry points into the package. The user, in
# effect, branches to one of the branch table entries located
# after _060FPSP_TABLE.
# Also, subroutine stubs exist in this file (_fpsp_done for
# example) that are referenced by the FPSP package itself in order
# to call a given routine. The stub routine actually performs the
# callout. The FPSP code does a "bsr" to the stub routine. This
# extra layer of hierarchy adds a slight performance penalty but
# it makes the FPSP code easier to read and more mainatinable.
#
set _off_bsun, 0x00
set _off_snan, 0x04
set _off_operr, 0x08
set _off_ovfl, 0x0c
set _off_unfl, 0x10
set _off_dz, 0x14
set _off_inex, 0x18
set _off_fline, 0x1c
set _off_fpu_dis, 0x20
set _off_trap, 0x24
set _off_trace, 0x28
set _off_access, 0x2c
set _off_done, 0x30
set _off_imr, 0x40
set _off_dmr, 0x44
set _off_dmw, 0x48
set _off_irw, 0x4c
set _off_irl, 0x50
set _off_drb, 0x54
set _off_drw, 0x58
set _off_drl, 0x5c
set _off_dwb, 0x60
set _off_dww, 0x64
set _off_dwl, 0x68
_060FPSP_TABLE:
###############################################################
# Here's the table of ENTRY POINTS for those linking the package.
bra.l _fpsp_snan
short 0x0000
bra.l _fpsp_operr
short 0x0000
bra.l _fpsp_ovfl
short 0x0000
bra.l _fpsp_unfl
short 0x0000
bra.l _fpsp_dz
short 0x0000
bra.l _fpsp_inex
short 0x0000
bra.l _fpsp_fline
short 0x0000
bra.l _fpsp_unsupp
short 0x0000
bra.l _fpsp_effadd
short 0x0000
space 56
###############################################################
global _fpsp_done
_fpsp_done:
mov.l %d0,-(%sp)
mov.l (_060FPSP_TABLE-0x80+_off_done,%pc),%d0
pea.l (_060FPSP_TABLE-0x80,%pc,%d0)
mov.l 0x4(%sp),%d0
rtd &0x4
global _real_ovfl
_real_ovfl:
mov.l %d0,-(%sp)
mov.l (_060FPSP_TABLE-0x80+_off_ovfl,%pc),%d0
pea.l (_060FPSP_TABLE-0x80,%pc,%d0)
mov.l 0x4(%sp),%d0
rtd &0x4
global _real_unfl
_real_unfl:
mov.l %d0,-(%sp)
mov.l (_060FPSP_TABLE-0x80+_off_unfl,%pc),%d0
pea.l (_060FPSP_TABLE-0x80,%pc,%d0)
mov.l 0x4(%sp),%d0
rtd &0x4
global _real_inex
_real_inex:
mov.l %d0,-(%sp)
mov.l (_060FPSP_TABLE-0x80+_off_inex,%pc),%d0
pea.l (_060FPSP_TABLE-0x80,%pc,%d0)
mov.l 0x4(%sp),%d0
rtd &0x4
global _real_bsun
_real_bsun:
mov.l %d0,-(%sp)
mov.l (_060FPSP_TABLE-0x80+_off_bsun,%pc),%d0
pea.l (_060FPSP_TABLE-0x80,%pc,%d0)
mov.l 0x4(%sp),%d0
rtd &0x4
global _real_operr
_real_operr:
mov.l %d0,-(%sp)
mov.l (_060FPSP_TABLE-0x80+_off_operr,%pc),%d0
pea.l (_060FPSP_TABLE-0x80,%pc,%d0)
mov.l 0x4(%sp),%d0
rtd &0x4
global _real_snan
_real_snan:
mov.l %d0,-(%sp)
mov.l (_060FPSP_TABLE-0x80+_off_snan,%pc),%d0
pea.l (_060FPSP_TABLE-0x80,%pc,%d0)
mov.l 0x4(%sp),%d0
rtd &0x4
global _real_dz
_real_dz:
mov.l %d0,-(%sp)
mov.l (_060FPSP_TABLE-0x80+_off_dz,%pc),%d0
pea.l (_060FPSP_TABLE-0x80,%pc,%d0)
mov.l 0x4(%sp),%d0
rtd &0x4
global _real_fline
_real_fline:
mov.l %d0,-(%sp)
mov.l (_060FPSP_TABLE-0x80+_off_fline,%pc),%d0
pea.l (_060FPSP_TABLE-0x80,%pc,%d0)
mov.l 0x4(%sp),%d0
rtd &0x4
global _real_fpu_disabled
_real_fpu_disabled:
mov.l %d0,-(%sp)
mov.l (_060FPSP_TABLE-0x80+_off_fpu_dis,%pc),%d0
pea.l (_060FPSP_TABLE-0x80,%pc,%d0)
mov.l 0x4(%sp),%d0
rtd &0x4
global _real_trap
_real_trap:
mov.l %d0,-(%sp)
mov.l (_060FPSP_TABLE-0x80+_off_trap,%pc),%d0
pea.l (_060FPSP_TABLE-0x80,%pc,%d0)
mov.l 0x4(%sp),%d0
rtd &0x4
global _real_trace
_real_trace:
mov.l %d0,-(%sp)
mov.l (_060FPSP_TABLE-0x80+_off_trace,%pc),%d0
pea.l (_060FPSP_TABLE-0x80,%pc,%d0)
mov.l 0x4(%sp),%d0
rtd &0x4
global _real_access
_real_access:
mov.l %d0,-(%sp)
mov.l (_060FPSP_TABLE-0x80+_off_access,%pc),%d0
pea.l (_060FPSP_TABLE-0x80,%pc,%d0)
mov.l 0x4(%sp),%d0
rtd &0x4
#######################################
global _imem_read
_imem_read:
mov.l %d0,-(%sp)
mov.l (_060FPSP_TABLE-0x80+_off_imr,%pc),%d0
pea.l (_060FPSP_TABLE-0x80,%pc,%d0)
mov.l 0x4(%sp),%d0
rtd &0x4
global _dmem_read
_dmem_read:
mov.l %d0,-(%sp)
mov.l (_060FPSP_TABLE-0x80+_off_dmr,%pc),%d0
pea.l (_060FPSP_TABLE-0x80,%pc,%d0)
mov.l 0x4(%sp),%d0
rtd &0x4
global _dmem_write
_dmem_write:
mov.l %d0,-(%sp)
mov.l (_060FPSP_TABLE-0x80+_off_dmw,%pc),%d0
pea.l (_060FPSP_TABLE-0x80,%pc,%d0)
mov.l 0x4(%sp),%d0
rtd &0x4
global _imem_read_word
_imem_read_word:
mov.l %d0,-(%sp)
mov.l (_060FPSP_TABLE-0x80+_off_irw,%pc),%d0
pea.l (_060FPSP_TABLE-0x80,%pc,%d0)
mov.l 0x4(%sp),%d0
rtd &0x4
global _imem_read_long
_imem_read_long:
mov.l %d0,-(%sp)
mov.l (_060FPSP_TABLE-0x80+_off_irl,%pc),%d0
pea.l (_060FPSP_TABLE-0x80,%pc,%d0)
mov.l 0x4(%sp),%d0
rtd &0x4
global _dmem_read_byte
_dmem_read_byte:
mov.l %d0,-(%sp)
mov.l (_060FPSP_TABLE-0x80+_off_drb,%pc),%d0
pea.l (_060FPSP_TABLE-0x80,%pc,%d0)
mov.l 0x4(%sp),%d0
rtd &0x4
global _dmem_read_word
_dmem_read_word:
mov.l %d0,-(%sp)
mov.l (_060FPSP_TABLE-0x80+_off_drw,%pc),%d0
pea.l (_060FPSP_TABLE-0x80,%pc,%d0)
mov.l 0x4(%sp),%d0
rtd &0x4
global _dmem_read_long
_dmem_read_long:
mov.l %d0,-(%sp)
mov.l (_060FPSP_TABLE-0x80+_off_drl,%pc),%d0
pea.l (_060FPSP_TABLE-0x80,%pc,%d0)
mov.l 0x4(%sp),%d0
rtd &0x4
global _dmem_write_byte
_dmem_write_byte:
mov.l %d0,-(%sp)
mov.l (_060FPSP_TABLE-0x80+_off_dwb,%pc),%d0
pea.l (_060FPSP_TABLE-0x80,%pc,%d0)
mov.l 0x4(%sp),%d0
rtd &0x4
global _dmem_write_word
_dmem_write_word:
mov.l %d0,-(%sp)
mov.l (_060FPSP_TABLE-0x80+_off_dww,%pc),%d0
pea.l (_060FPSP_TABLE-0x80,%pc,%d0)
mov.l 0x4(%sp),%d0
rtd &0x4
global _dmem_write_long
_dmem_write_long:
mov.l %d0,-(%sp)
mov.l (_060FPSP_TABLE-0x80+_off_dwl,%pc),%d0
pea.l (_060FPSP_TABLE-0x80,%pc,%d0)
mov.l 0x4(%sp),%d0
rtd &0x4
#
# This file contains a set of define statements for constants
# in order to promote readability within the corecode itself.
#
set LOCAL_SIZE, 192 # stack frame size(bytes)
set LV, -LOCAL_SIZE # stack offset
set EXC_SR, 0x4 # stack status register
set EXC_PC, 0x6 # stack pc
set EXC_VOFF, 0xa # stacked vector offset
set EXC_EA, 0xc # stacked <ea>
set EXC_FP, 0x0 # frame pointer
set EXC_AREGS, -68 # offset of all address regs
set EXC_DREGS, -100 # offset of all data regs
set EXC_FPREGS, -36 # offset of all fp regs
set EXC_A7, EXC_AREGS+(7*4) # offset of saved a7
set OLD_A7, EXC_AREGS+(6*4) # extra copy of saved a7
set EXC_A6, EXC_AREGS+(6*4) # offset of saved a6
set EXC_A5, EXC_AREGS+(5*4)
set EXC_A4, EXC_AREGS+(4*4)
set EXC_A3, EXC_AREGS+(3*4)
set EXC_A2, EXC_AREGS+(2*4)
set EXC_A1, EXC_AREGS+(1*4)
set EXC_A0, EXC_AREGS+(0*4)
set EXC_D7, EXC_DREGS+(7*4)
set EXC_D6, EXC_DREGS+(6*4)
set EXC_D5, EXC_DREGS+(5*4)
set EXC_D4, EXC_DREGS+(4*4)
set EXC_D3, EXC_DREGS+(3*4)
set EXC_D2, EXC_DREGS+(2*4)
set EXC_D1, EXC_DREGS+(1*4)
set EXC_D0, EXC_DREGS+(0*4)
set EXC_FP0, EXC_FPREGS+(0*12) # offset of saved fp0
set EXC_FP1, EXC_FPREGS+(1*12) # offset of saved fp1
set EXC_FP2, EXC_FPREGS+(2*12) # offset of saved fp2 (not used)
set FP_SCR1, LV+80 # fp scratch 1
set FP_SCR1_EX, FP_SCR1+0
set FP_SCR1_SGN, FP_SCR1+2
set FP_SCR1_HI, FP_SCR1+4
set FP_SCR1_LO, FP_SCR1+8
set FP_SCR0, LV+68 # fp scratch 0
set FP_SCR0_EX, FP_SCR0+0
set FP_SCR0_SGN, FP_SCR0+2
set FP_SCR0_HI, FP_SCR0+4
set FP_SCR0_LO, FP_SCR0+8
set FP_DST, LV+56 # fp destination operand
set FP_DST_EX, FP_DST+0
set FP_DST_SGN, FP_DST+2
set FP_DST_HI, FP_DST+4
set FP_DST_LO, FP_DST+8
set FP_SRC, LV+44 # fp source operand
set FP_SRC_EX, FP_SRC+0
set FP_SRC_SGN, FP_SRC+2
set FP_SRC_HI, FP_SRC+4
set FP_SRC_LO, FP_SRC+8
set USER_FPIAR, LV+40 # FP instr address register
set USER_FPSR, LV+36 # FP status register
set FPSR_CC, USER_FPSR+0 # FPSR condition codes
set FPSR_QBYTE, USER_FPSR+1 # FPSR qoutient byte
set FPSR_EXCEPT, USER_FPSR+2 # FPSR exception status byte
set FPSR_AEXCEPT, USER_FPSR+3 # FPSR accrued exception byte
set USER_FPCR, LV+32 # FP control register
set FPCR_ENABLE, USER_FPCR+2 # FPCR exception enable
set FPCR_MODE, USER_FPCR+3 # FPCR rounding mode control
set L_SCR3, LV+28 # integer scratch 3
set L_SCR2, LV+24 # integer scratch 2
set L_SCR1, LV+20 # integer scratch 1
set STORE_FLG, LV+19 # flag: operand store (ie. not fcmp/ftst)
set EXC_TEMP2, LV+24 # temporary space
set EXC_TEMP, LV+16 # temporary space
set DTAG, LV+15 # destination operand type
set STAG, LV+14 # source operand type
set SPCOND_FLG, LV+10 # flag: special case (see below)
set EXC_CC, LV+8 # saved condition codes
set EXC_EXTWPTR, LV+4 # saved current PC (active)
set EXC_EXTWORD, LV+2 # saved extension word
set EXC_CMDREG, LV+2 # saved extension word
set EXC_OPWORD, LV+0 # saved operation word
################################
# Helpful macros
set FTEMP, 0 # offsets within an
set FTEMP_EX, 0 # extended precision
set FTEMP_SGN, 2 # value saved in memory.
set FTEMP_HI, 4
set FTEMP_LO, 8
set FTEMP_GRS, 12
set LOCAL, 0 # offsets within an
set LOCAL_EX, 0 # extended precision
set LOCAL_SGN, 2 # value saved in memory.
set LOCAL_HI, 4
set LOCAL_LO, 8
set LOCAL_GRS, 12
set DST, 0 # offsets within an
set DST_EX, 0 # extended precision
set DST_HI, 4 # value saved in memory.
set DST_LO, 8
set SRC, 0 # offsets within an
set SRC_EX, 0 # extended precision
set SRC_HI, 4 # value saved in memory.
set SRC_LO, 8
set SGL_LO, 0x3f81 # min sgl prec exponent
set SGL_HI, 0x407e # max sgl prec exponent
set DBL_LO, 0x3c01 # min dbl prec exponent
set DBL_HI, 0x43fe # max dbl prec exponent
set EXT_LO, 0x0 # min ext prec exponent
set EXT_HI, 0x7ffe # max ext prec exponent
set EXT_BIAS, 0x3fff # extended precision bias
set SGL_BIAS, 0x007f # single precision bias
set DBL_BIAS, 0x03ff # double precision bias
set NORM, 0x00 # operand type for STAG/DTAG
set ZERO, 0x01 # operand type for STAG/DTAG
set INF, 0x02 # operand type for STAG/DTAG
set QNAN, 0x03 # operand type for STAG/DTAG
set DENORM, 0x04 # operand type for STAG/DTAG
set SNAN, 0x05 # operand type for STAG/DTAG
set UNNORM, 0x06 # operand type for STAG/DTAG
##################
# FPSR/FPCR bits #
##################
set neg_bit, 0x3 # negative result
set z_bit, 0x2 # zero result
set inf_bit, 0x1 # infinite result
set nan_bit, 0x0 # NAN result
set q_sn_bit, 0x7 # sign bit of quotient byte
set bsun_bit, 7 # branch on unordered
set snan_bit, 6 # signalling NAN
set operr_bit, 5 # operand error
set ovfl_bit, 4 # overflow
set unfl_bit, 3 # underflow
set dz_bit, 2 # divide by zero
set inex2_bit, 1 # inexact result 2
set inex1_bit, 0 # inexact result 1
set aiop_bit, 7 # accrued inexact operation bit
set aovfl_bit, 6 # accrued overflow bit
set aunfl_bit, 5 # accrued underflow bit
set adz_bit, 4 # accrued dz bit
set ainex_bit, 3 # accrued inexact bit
#############################
# FPSR individual bit masks #
#############################
set neg_mask, 0x08000000 # negative bit mask (lw)
set inf_mask, 0x02000000 # infinity bit mask (lw)
set z_mask, 0x04000000 # zero bit mask (lw)
set nan_mask, 0x01000000 # nan bit mask (lw)
set neg_bmask, 0x08 # negative bit mask (byte)
set inf_bmask, 0x02 # infinity bit mask (byte)
set z_bmask, 0x04 # zero bit mask (byte)
set nan_bmask, 0x01 # nan bit mask (byte)
set bsun_mask, 0x00008000 # bsun exception mask
set snan_mask, 0x00004000 # snan exception mask
set operr_mask, 0x00002000 # operr exception mask
set ovfl_mask, 0x00001000 # overflow exception mask
set unfl_mask, 0x00000800 # underflow exception mask
set dz_mask, 0x00000400 # dz exception mask
set inex2_mask, 0x00000200 # inex2 exception mask
set inex1_mask, 0x00000100 # inex1 exception mask
set aiop_mask, 0x00000080 # accrued illegal operation
set aovfl_mask, 0x00000040 # accrued overflow
set aunfl_mask, 0x00000020 # accrued underflow
set adz_mask, 0x00000010 # accrued divide by zero
set ainex_mask, 0x00000008 # accrued inexact
######################################
# FPSR combinations used in the FPSP #
######################################
set dzinf_mask, inf_mask+dz_mask+adz_mask
set opnan_mask, nan_mask+operr_mask+aiop_mask
set nzi_mask, 0x01ffffff #clears N, Z, and I
set unfinx_mask, unfl_mask+inex2_mask+aunfl_mask+ainex_mask
set unf2inx_mask, unfl_mask+inex2_mask+ainex_mask
set ovfinx_mask, ovfl_mask+inex2_mask+aovfl_mask+ainex_mask
set inx1a_mask, inex1_mask+ainex_mask
set inx2a_mask, inex2_mask+ainex_mask
set snaniop_mask, nan_mask+snan_mask+aiop_mask
set snaniop2_mask, snan_mask+aiop_mask
set naniop_mask, nan_mask+aiop_mask
set neginf_mask, neg_mask+inf_mask
set infaiop_mask, inf_mask+aiop_mask
set negz_mask, neg_mask+z_mask
set opaop_mask, operr_mask+aiop_mask
set unfl_inx_mask, unfl_mask+aunfl_mask+ainex_mask
set ovfl_inx_mask, ovfl_mask+aovfl_mask+ainex_mask
#########
# misc. #
#########
set rnd_stky_bit, 29 # stky bit pos in longword
set sign_bit, 0x7 # sign bit
set signan_bit, 0x6 # signalling nan bit
set sgl_thresh, 0x3f81 # minimum sgl exponent
set dbl_thresh, 0x3c01 # minimum dbl exponent
set x_mode, 0x0 # extended precision
set s_mode, 0x4 # single precision
set d_mode, 0x8 # double precision
set rn_mode, 0x0 # round-to-nearest
set rz_mode, 0x1 # round-to-zero
set rm_mode, 0x2 # round-tp-minus-infinity
set rp_mode, 0x3 # round-to-plus-infinity
set mantissalen, 64 # length of mantissa in bits
set BYTE, 1 # len(byte) == 1 byte
set WORD, 2 # len(word) == 2 bytes
set LONG, 4 # len(longword) == 2 bytes
set BSUN_VEC, 0xc0 # bsun vector offset
set INEX_VEC, 0xc4 # inexact vector offset
set DZ_VEC, 0xc8 # dz vector offset
set UNFL_VEC, 0xcc # unfl vector offset
set OPERR_VEC, 0xd0 # operr vector offset
set OVFL_VEC, 0xd4 # ovfl vector offset
set SNAN_VEC, 0xd8 # snan vector offset
###########################
# SPecial CONDition FLaGs #
###########################
set ftrapcc_flg, 0x01 # flag bit: ftrapcc exception
set fbsun_flg, 0x02 # flag bit: bsun exception
set mia7_flg, 0x04 # flag bit: (a7)+ <ea>
set mda7_flg, 0x08 # flag bit: -(a7) <ea>
set fmovm_flg, 0x40 # flag bit: fmovm instruction
set immed_flg, 0x80 # flag bit: &<data> <ea>
set ftrapcc_bit, 0x0
set fbsun_bit, 0x1
set mia7_bit, 0x2
set mda7_bit, 0x3
set immed_bit, 0x7
##################################
# TRANSCENDENTAL "LAST-OP" FLAGS #
##################################
set FMUL_OP, 0x0 # fmul instr performed last
set FDIV_OP, 0x1 # fdiv performed last
set FADD_OP, 0x2 # fadd performed last
set FMOV_OP, 0x3 # fmov performed last
#############
# CONSTANTS #
#############
T1: long 0x40C62D38,0xD3D64634 # 16381 LOG2 LEAD
T2: long 0x3D6F90AE,0xB1E75CC7 # 16381 LOG2 TRAIL
PI: long 0x40000000,0xC90FDAA2,0x2168C235,0x00000000
PIBY2: long 0x3FFF0000,0xC90FDAA2,0x2168C235,0x00000000
TWOBYPI:
long 0x3FE45F30,0x6DC9C883
#########################################################################
# XDEF **************************************************************** #
# _fpsp_ovfl(): 060FPSP entry point for FP Overflow exception. #
# #
# This handler should be the first code executed upon taking the #
# FP Overflow exception in an operating system. #
# #
# XREF **************************************************************** #
# _imem_read_long() - read instruction longword #
# fix_skewed_ops() - adjust src operand in fsave frame #
# set_tag_x() - determine optype of src/dst operands #
# store_fpreg() - store opclass 0 or 2 result to FP regfile #
# unnorm_fix() - change UNNORM operands to NORM or ZERO #
# load_fpn2() - load dst operand from FP regfile #
# fout() - emulate an opclass 3 instruction #
# tbl_unsupp - add of table of emulation routines for opclass 0,2 #
# _fpsp_done() - "callout" for 060FPSP exit (all work done!) #
# _real_ovfl() - "callout" for Overflow exception enabled code #
# _real_inex() - "callout" for Inexact exception enabled code #
# _real_trace() - "callout" for Trace exception code #
# #
# INPUT *************************************************************** #
# - The system stack contains the FP Ovfl exception stack frame #
# - The fsave frame contains the source operand #
# #
# OUTPUT ************************************************************** #
# Overflow Exception enabled: #
# - The system stack is unchanged #
# - The fsave frame contains the adjusted src op for opclass 0,2 #
# Overflow Exception disabled: #
# - The system stack is unchanged #
# - The "exception present" flag in the fsave frame is cleared #
# #
# ALGORITHM *********************************************************** #
# On the 060, if an FP overflow is present as the result of any #
# instruction, the 060 will take an overflow exception whether the #
# exception is enabled or disabled in the FPCR. For the disabled case, #
# This handler emulates the instruction to determine what the correct #
# default result should be for the operation. This default result is #
# then stored in either the FP regfile, data regfile, or memory. #
# Finally, the handler exits through the "callout" _fpsp_done() #
# denoting that no exceptional conditions exist within the machine. #
# If the exception is enabled, then this handler must create the #
# exceptional operand and plave it in the fsave state frame, and store #
# the default result (only if the instruction is opclass 3). For #
# exceptions enabled, this handler must exit through the "callout" #
# _real_ovfl() so that the operating system enabled overflow handler #
# can handle this case. #
# Two other conditions exist. First, if overflow was disabled #
# but the inexact exception was enabled, this handler must exit #
# through the "callout" _real_inex() regardless of whether the result #
# was inexact. #
# Also, in the case of an opclass three instruction where #
# overflow was disabled and the trace exception was enabled, this #
# handler must exit through the "callout" _real_trace(). #
# #
#########################################################################
global _fpsp_ovfl
_fpsp_ovfl:
#$# sub.l &24,%sp # make room for src/dst
link.w %a6,&-LOCAL_SIZE # init stack frame
fsave FP_SRC(%a6) # grab the "busy" frame
movm.l &0x0303,EXC_DREGS(%a6) # save d0-d1/a0-a1
fmovm.l %fpcr,%fpsr,%fpiar,USER_FPCR(%a6) # save ctrl regs
fmovm.x &0xc0,EXC_FPREGS(%a6) # save fp0-fp1 on stack
# the FPIAR holds the "current PC" of the faulting instruction
mov.l USER_FPIAR(%a6),EXC_EXTWPTR(%a6)
mov.l EXC_EXTWPTR(%a6),%a0 # fetch instruction addr
addq.l &0x4,EXC_EXTWPTR(%a6) # incr instruction ptr
bsr.l _imem_read_long # fetch the instruction words
mov.l %d0,EXC_OPWORD(%a6)
##############################################################################
btst &0x5,EXC_CMDREG(%a6) # is instr an fmove out?
bne.w fovfl_out
lea FP_SRC(%a6),%a0 # pass: ptr to src op
bsr.l fix_skewed_ops # fix src op
# since, I believe, only NORMs and DENORMs can come through here,
# maybe we can avoid the subroutine call.
lea FP_SRC(%a6),%a0 # pass: ptr to src op
bsr.l set_tag_x # tag the operand type
mov.b %d0,STAG(%a6) # maybe NORM,DENORM
# bit five of the fp extension word separates the monadic and dyadic operations
# that can pass through fpsp_ovfl(). remember that fcmp, ftst, and fsincos
# will never take this exception.
btst &0x5,1+EXC_CMDREG(%a6) # is operation monadic or dyadic?
beq.b fovfl_extract # monadic
bfextu EXC_CMDREG(%a6){&6:&3},%d0 # dyadic; load dst reg
bsr.l load_fpn2 # load dst into FP_DST
lea FP_DST(%a6),%a0 # pass: ptr to dst op
bsr.l set_tag_x # tag the operand type
cmpi.b %d0,&UNNORM # is operand an UNNORM?
bne.b fovfl_op2_done # no
bsr.l unnorm_fix # yes; convert to NORM,DENORM,or ZERO
fovfl_op2_done:
mov.b %d0,DTAG(%a6) # save dst optype tag
fovfl_extract:
#$# mov.l FP_SRC_EX(%a6),TRAP_SRCOP_EX(%a6)
#$# mov.l FP_SRC_HI(%a6),TRAP_SRCOP_HI(%a6)
#$# mov.l FP_SRC_LO(%a6),TRAP_SRCOP_LO(%a6)
#$# mov.l FP_DST_EX(%a6),TRAP_DSTOP_EX(%a6)
#$# mov.l FP_DST_HI(%a6),TRAP_DSTOP_HI(%a6)
#$# mov.l FP_DST_LO(%a6),TRAP_DSTOP_LO(%a6)
clr.l %d0
mov.b FPCR_MODE(%a6),%d0 # pass rnd prec/mode
mov.b 1+EXC_CMDREG(%a6),%d1
andi.w &0x007f,%d1 # extract extension
andi.l &0x00ff01ff,USER_FPSR(%a6) # zero all but accured field
fmov.l &0x0,%fpcr # zero current control regs
fmov.l &0x0,%fpsr
lea FP_SRC(%a6),%a0
lea FP_DST(%a6),%a1
# maybe we can make these entry points ONLY the OVFL entry points of each routine.
mov.l (tbl_unsupp.l,%pc,%d1.w*4),%d1 # fetch routine addr
jsr (tbl_unsupp.l,%pc,%d1.l*1)
# the operation has been emulated. the result is in fp0.
# the EXOP, if an exception occurred, is in fp1.
# we must save the default result regardless of whether
# traps are enabled or disabled.
bfextu EXC_CMDREG(%a6){&6:&3},%d0
bsr.l store_fpreg
# the exceptional possibilities we have left ourselves with are ONLY overflow
# and inexact. and, the inexact is such that overflow occurred and was disabled
# but inexact was enabled.
btst &ovfl_bit,FPCR_ENABLE(%a6)
bne.b fovfl_ovfl_on
btst &inex2_bit,FPCR_ENABLE(%a6)
bne.b fovfl_inex_on
fmovm.x EXC_FPREGS(%a6),&0xc0 # restore fp0-fp1
fmovm.l USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
unlk %a6
#$# add.l &24,%sp
bra.l _fpsp_done
# overflow is enabled AND overflow, of course, occurred. so, we have the EXOP
# in fp1. now, simply jump to _real_ovfl()!
fovfl_ovfl_on:
fmovm.x &0x40,FP_SRC(%a6) # save EXOP (fp1) to stack
mov.w &0xe005,2+FP_SRC(%a6) # save exc status
fmovm.x EXC_FPREGS(%a6),&0xc0 # restore fp0-fp1
fmovm.l USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
frestore FP_SRC(%a6) # do this after fmovm,other f<op>s!
unlk %a6
bra.l _real_ovfl
# overflow occurred but is disabled. meanwhile, inexact is enabled. Therefore,
# we must jump to real_inex().
fovfl_inex_on:
fmovm.x &0x40,FP_SRC(%a6) # save EXOP (fp1) to stack
mov.b &0xc4,1+EXC_VOFF(%a6) # vector offset = 0xc4
mov.w &0xe001,2+FP_SRC(%a6) # save exc status
fmovm.x EXC_FPREGS(%a6),&0xc0 # restore fp0-fp1
fmovm.l USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
frestore FP_SRC(%a6) # do this after fmovm,other f<op>s!
unlk %a6
bra.l _real_inex
########################################################################
fovfl_out:
#$# mov.l FP_SRC_EX(%a6),TRAP_SRCOP_EX(%a6)
#$# mov.l FP_SRC_HI(%a6),TRAP_SRCOP_HI(%a6)
#$# mov.l FP_SRC_LO(%a6),TRAP_SRCOP_LO(%a6)
# the src operand is definitely a NORM(!), so tag it as such
mov.b &NORM,STAG(%a6) # set src optype tag
clr.l %d0
mov.b FPCR_MODE(%a6),%d0 # pass rnd prec/mode
and.l &0xffff00ff,USER_FPSR(%a6) # zero all but accured field
fmov.l &0x0,%fpcr # zero current control regs
fmov.l &0x0,%fpsr
lea FP_SRC(%a6),%a0 # pass ptr to src operand
bsr.l fout
btst &ovfl_bit,FPCR_ENABLE(%a6)
bne.w fovfl_ovfl_on
btst &inex2_bit,FPCR_ENABLE(%a6)
bne.w fovfl_inex_on
fmovm.x EXC_FPREGS(%a6),&0xc0 # restore fp0-fp1
fmovm.l USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
unlk %a6
#$# add.l &24,%sp
btst &0x7,(%sp) # is trace on?
beq.l _fpsp_done # no
fmov.l %fpiar,0x8(%sp) # "Current PC" is in FPIAR
mov.w &0x2024,0x6(%sp) # stk fmt = 0x2; voff = 0x024
bra.l _real_trace
#########################################################################
# XDEF **************************************************************** #
# _fpsp_unfl(): 060FPSP entry point for FP Underflow exception. #
# #
# This handler should be the first code executed upon taking the #
# FP Underflow exception in an operating system. #
# #
# XREF **************************************************************** #
# _imem_read_long() - read instruction longword #
# fix_skewed_ops() - adjust src operand in fsave frame #
# set_tag_x() - determine optype of src/dst operands #
# store_fpreg() - store opclass 0 or 2 result to FP regfile #
# unnorm_fix() - change UNNORM operands to NORM or ZERO #
# load_fpn2() - load dst operand from FP regfile #
# fout() - emulate an opclass 3 instruction #
# tbl_unsupp - add of table of emulation routines for opclass 0,2 #
# _fpsp_done() - "callout" for 060FPSP exit (all work done!) #
# _real_ovfl() - "callout" for Overflow exception enabled code #
# _real_inex() - "callout" for Inexact exception enabled code #
# _real_trace() - "callout" for Trace exception code #
# #
# INPUT *************************************************************** #
# - The system stack contains the FP Unfl exception stack frame #
# - The fsave frame contains the source operand #
# #
# OUTPUT ************************************************************** #
# Underflow Exception enabled: #
# - The system stack is unchanged #
# - The fsave frame contains the adjusted src op for opclass 0,2 #
# Underflow Exception disabled: #
# - The system stack is unchanged #
# - The "exception present" flag in the fsave frame is cleared #
# #
# ALGORITHM *********************************************************** #
# On the 060, if an FP underflow is present as the result of any #
# instruction, the 060 will take an underflow exception whether the #
# exception is enabled or disabled in the FPCR. For the disabled case, #
# This handler emulates the instruction to determine what the correct #
# default result should be for the operation. This default result is #
# then stored in either the FP regfile, data regfile, or memory. #
# Finally, the handler exits through the "callout" _fpsp_done() #
# denoting that no exceptional conditions exist within the machine. #
# If the exception is enabled, then this handler must create the #
# exceptional operand and plave it in the fsave state frame, and store #
# the default result (only if the instruction is opclass 3). For #
# exceptions enabled, this handler must exit through the "callout" #
# _real_unfl() so that the operating system enabled overflow handler #
# can handle this case. #
# Two other conditions exist. First, if underflow was disabled #
# but the inexact exception was enabled and the result was inexact, #
# this handler must exit through the "callout" _real_inex(). #
# was inexact. #
# Also, in the case of an opclass three instruction where #
# underflow was disabled and the trace exception was enabled, this #
# handler must exit through the "callout" _real_trace(). #
# #
#########################################################################
global _fpsp_unfl
_fpsp_unfl:
#$# sub.l &24,%sp # make room for src/dst
link.w %a6,&-LOCAL_SIZE # init stack frame
fsave FP_SRC(%a6) # grab the "busy" frame
movm.l &0x0303,EXC_DREGS(%a6) # save d0-d1/a0-a1
fmovm.l %fpcr,%fpsr,%fpiar,USER_FPCR(%a6) # save ctrl regs
fmovm.x &0xc0,EXC_FPREGS(%a6) # save fp0-fp1 on stack
# the FPIAR holds the "current PC" of the faulting instruction
mov.l USER_FPIAR(%a6),EXC_EXTWPTR(%a6)
mov.l EXC_EXTWPTR(%a6),%a0 # fetch instruction addr
addq.l &0x4,EXC_EXTWPTR(%a6) # incr instruction ptr
bsr.l _imem_read_long # fetch the instruction words
mov.l %d0,EXC_OPWORD(%a6)
##############################################################################
btst &0x5,EXC_CMDREG(%a6) # is instr an fmove out?
bne.w funfl_out
lea FP_SRC(%a6),%a0 # pass: ptr to src op
bsr.l fix_skewed_ops # fix src op
lea FP_SRC(%a6),%a0 # pass: ptr to src op
bsr.l set_tag_x # tag the operand type
mov.b %d0,STAG(%a6) # maybe NORM,DENORM
# bit five of the fp ext word separates the monadic and dyadic operations
# that can pass through fpsp_unfl(). remember that fcmp, and ftst
# will never take this exception.
btst &0x5,1+EXC_CMDREG(%a6) # is op monadic or dyadic?
beq.b funfl_extract # monadic
# now, what's left that's not dyadic is fsincos. we can distinguish it
# from all dyadics by the '0110xxx pattern
btst &0x4,1+EXC_CMDREG(%a6) # is op an fsincos?
bne.b funfl_extract # yes
bfextu EXC_CMDREG(%a6){&6:&3},%d0 # dyadic; load dst reg
bsr.l load_fpn2 # load dst into FP_DST
lea FP_DST(%a6),%a0 # pass: ptr to dst op
bsr.l set_tag_x # tag the operand type
cmpi.b %d0,&UNNORM # is operand an UNNORM?
bne.b funfl_op2_done # no
bsr.l unnorm_fix # yes; convert to NORM,DENORM,or ZERO
funfl_op2_done:
mov.b %d0,DTAG(%a6) # save dst optype tag
funfl_extract:
#$# mov.l FP_SRC_EX(%a6),TRAP_SRCOP_EX(%a6)
#$# mov.l FP_SRC_HI(%a6),TRAP_SRCOP_HI(%a6)
#$# mov.l FP_SRC_LO(%a6),TRAP_SRCOP_LO(%a6)
#$# mov.l FP_DST_EX(%a6),TRAP_DSTOP_EX(%a6)
#$# mov.l FP_DST_HI(%a6),TRAP_DSTOP_HI(%a6)
#$# mov.l FP_DST_LO(%a6),TRAP_DSTOP_LO(%a6)
clr.l %d0
mov.b FPCR_MODE(%a6),%d0 # pass rnd prec/mode
mov.b 1+EXC_CMDREG(%a6),%d1
andi.w &0x007f,%d1 # extract extension
andi.l &0x00ff01ff,USER_FPSR(%a6)
fmov.l &0x0,%fpcr # zero current control regs
fmov.l &0x0,%fpsr
lea FP_SRC(%a6),%a0
lea FP_DST(%a6),%a1
# maybe we can make these entry points ONLY the OVFL entry points of each routine.
mov.l (tbl_unsupp.l,%pc,%d1.w*4),%d1 # fetch routine addr
jsr (tbl_unsupp.l,%pc,%d1.l*1)
bfextu EXC_CMDREG(%a6){&6:&3},%d0
bsr.l store_fpreg
# The `060 FPU multiplier hardware is such that if the result of a
# multiply operation is the smallest possible normalized number
# (0x00000000_80000000_00000000), then the machine will take an
# underflow exception. Since this is incorrect, we need to check
# if our emulation, after re-doing the operation, decided that
# no underflow was called for. We do these checks only in
# funfl_{unfl,inex}_on() because w/ both exceptions disabled, this
# special case will simply exit gracefully with the correct result.
# the exceptional possibilities we have left ourselves with are ONLY overflow
# and inexact. and, the inexact is such that overflow occurred and was disabled
# but inexact was enabled.
btst &unfl_bit,FPCR_ENABLE(%a6)
bne.b funfl_unfl_on
funfl_chkinex:
btst &inex2_bit,FPCR_ENABLE(%a6)
bne.b funfl_inex_on
funfl_exit:
fmovm.x EXC_FPREGS(%a6),&0xc0 # restore fp0-fp1
fmovm.l USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
unlk %a6
#$# add.l &24,%sp
bra.l _fpsp_done
# overflow is enabled AND overflow, of course, occurred. so, we have the EXOP
# in fp1 (don't forget to save fp0). what to do now?
# well, we simply have to get to go to _real_unfl()!
funfl_unfl_on:
# The `060 FPU multiplier hardware is such that if the result of a
# multiply operation is the smallest possible normalized number
# (0x00000000_80000000_00000000), then the machine will take an
# underflow exception. Since this is incorrect, we check here to see
# if our emulation, after re-doing the operation, decided that
# no underflow was called for.
btst &unfl_bit,FPSR_EXCEPT(%a6)
beq.w funfl_chkinex
funfl_unfl_on2:
fmovm.x &0x40,FP_SRC(%a6) # save EXOP (fp1) to stack
mov.w &0xe003,2+FP_SRC(%a6) # save exc status
fmovm.x EXC_FPREGS(%a6),&0xc0 # restore fp0-fp1
fmovm.l USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
frestore FP_SRC(%a6) # do this after fmovm,other f<op>s!
unlk %a6
bra.l _real_unfl
# underflow occurred but is disabled. meanwhile, inexact is enabled. Therefore,
# we must jump to real_inex().
funfl_inex_on:
# The `060 FPU multiplier hardware is such that if the result of a
# multiply operation is the smallest possible normalized number
# (0x00000000_80000000_00000000), then the machine will take an
# underflow exception.
# But, whether bogus or not, if inexact is enabled AND it occurred,
# then we have to branch to real_inex.
btst &inex2_bit,FPSR_EXCEPT(%a6)
beq.w funfl_exit
funfl_inex_on2:
fmovm.x &0x40,FP_SRC(%a6) # save EXOP to stack
mov.b &0xc4,1+EXC_VOFF(%a6) # vector offset = 0xc4
mov.w &0xe001,2+FP_SRC(%a6) # save exc status
fmovm.x EXC_FPREGS(%a6),&0xc0 # restore fp0-fp1
fmovm.l USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
frestore FP_SRC(%a6) # do this after fmovm,other f<op>s!
unlk %a6
bra.l _real_inex
#######################################################################
funfl_out:
#$# mov.l FP_SRC_EX(%a6),TRAP_SRCOP_EX(%a6)
#$# mov.l FP_SRC_HI(%a6),TRAP_SRCOP_HI(%a6)
#$# mov.l FP_SRC_LO(%a6),TRAP_SRCOP_LO(%a6)
# the src operand is definitely a NORM(!), so tag it as such
mov.b &NORM,STAG(%a6) # set src optype tag
clr.l %d0
mov.b FPCR_MODE(%a6),%d0 # pass rnd prec/mode
and.l &0xffff00ff,USER_FPSR(%a6) # zero all but accured field
fmov.l &0x0,%fpcr # zero current control regs
fmov.l &0x0,%fpsr
lea FP_SRC(%a6),%a0 # pass ptr to src operand
bsr.l fout
btst &unfl_bit,FPCR_ENABLE(%a6)
bne.w funfl_unfl_on2
btst &inex2_bit,FPCR_ENABLE(%a6)
bne.w funfl_inex_on2
fmovm.x EXC_FPREGS(%a6),&0xc0 # restore fp0-fp1
fmovm.l USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
unlk %a6
#$# add.l &24,%sp
btst &0x7,(%sp) # is trace on?
beq.l _fpsp_done # no
fmov.l %fpiar,0x8(%sp) # "Current PC" is in FPIAR
mov.w &0x2024,0x6(%sp) # stk fmt = 0x2; voff = 0x024
bra.l _real_trace
#########################################################################
# XDEF **************************************************************** #
# _fpsp_unsupp(): 060FPSP entry point for FP "Unimplemented #
# Data Type" exception. #
# #
# This handler should be the first code executed upon taking the #
# FP Unimplemented Data Type exception in an operating system. #
# #
# XREF **************************************************************** #
# _imem_read_{word,long}() - read instruction word/longword #
# fix_skewed_ops() - adjust src operand in fsave frame #
# set_tag_x() - determine optype of src/dst operands #
# store_fpreg() - store opclass 0 or 2 result to FP regfile #
# unnorm_fix() - change UNNORM operands to NORM or ZERO #
# load_fpn2() - load dst operand from FP regfile #
# load_fpn1() - load src operand from FP regfile #
# fout() - emulate an opclass 3 instruction #
# tbl_unsupp - add of table of emulation routines for opclass 0,2 #
# _real_inex() - "callout" to operating system inexact handler #
# _fpsp_done() - "callout" for exit; work all done #
# _real_trace() - "callout" for Trace enabled exception #
# funimp_skew() - adjust fsave src ops to "incorrect" value #
# _real_snan() - "callout" for SNAN exception #
# _real_operr() - "callout" for OPERR exception #
# _real_ovfl() - "callout" for OVFL exception #
# _real_unfl() - "callout" for UNFL exception #
# get_packed() - fetch packed operand from memory #
# #
# INPUT *************************************************************** #
# - The system stack contains the "Unimp Data Type" stk frame #
# - The fsave frame contains the ssrc op (for UNNORM/DENORM) #
# #
# OUTPUT ************************************************************** #
# If Inexact exception (opclass 3): #
# - The system stack is changed to an Inexact exception stk frame #
# If SNAN exception (opclass 3): #
# - The system stack is changed to an SNAN exception stk frame #
# If OPERR exception (opclass 3): #
# - The system stack is changed to an OPERR exception stk frame #
# If OVFL exception (opclass 3): #
# - The system stack is changed to an OVFL exception stk frame #
# If UNFL exception (opclass 3): #
# - The system stack is changed to an UNFL exception stack frame #
# If Trace exception enabled: #
# - The system stack is changed to a Trace exception stack frame #
# Else: (normal case) #
# - Correct result has been stored as appropriate #
# #
# ALGORITHM *********************************************************** #
# Two main instruction types can enter here: (1) DENORM or UNNORM #
# unimplemented data types. These can be either opclass 0,2 or 3 #
# instructions, and (2) PACKED unimplemented data format instructions #
# also of opclasses 0,2, or 3. #
# For UNNORM/DENORM opclass 0 and 2, the handler fetches the src #
# operand from the fsave state frame and the dst operand (if dyadic) #
# from the FP register file. The instruction is then emulated by #
# choosing an emulation routine from a table of routines indexed by #
# instruction type. Once the instruction has been emulated and result #
# saved, then we check to see if any enabled exceptions resulted from #
# instruction emulation. If none, then we exit through the "callout" #
# _fpsp_done(). If there is an enabled FP exception, then we insert #
# this exception into the FPU in the fsave state frame and then exit #
# through _fpsp_done(). #
# PACKED opclass 0 and 2 is similar in how the instruction is #
# emulated and exceptions handled. The differences occur in how the #
# handler loads the packed op (by calling get_packed() routine) and #
# by the fact that a Trace exception could be pending for PACKED ops. #
# If a Trace exception is pending, then the current exception stack #
# frame is changed to a Trace exception stack frame and an exit is #
# made through _real_trace(). #
# For UNNORM/DENORM opclass 3, the actual move out to memory is #
# performed by calling the routine fout(). If no exception should occur #
# as the result of emulation, then an exit either occurs through #
# _fpsp_done() or through _real_trace() if a Trace exception is pending #
# (a Trace stack frame must be created here, too). If an FP exception #
# should occur, then we must create an exception stack frame of that #
# type and jump to either _real_snan(), _real_operr(), _real_inex(), #
# _real_unfl(), or _real_ovfl() as appropriate. PACKED opclass 3 #
# emulation is performed in a similar manner. #
# #
#########################################################################
#
# (1) DENORM and UNNORM (unimplemented) data types:
#
# post-instruction
# *****************
# * EA *
# pre-instruction * *
# ***************** *****************
# * 0x0 * 0x0dc * * 0x3 * 0x0dc *
# ***************** *****************
# * Next * * Next *
# * PC * * PC *
# ***************** *****************
# * SR * * SR *
# ***************** *****************
#
# (2) PACKED format (unsupported) opclasses two and three:
# *****************
# * EA *
# * *
# *****************
# * 0x2 * 0x0dc *
# *****************
# * Next *
# * PC *
# *****************
# * SR *
# *****************
#
global _fpsp_unsupp
_fpsp_unsupp:
link.w %a6,&-LOCAL_SIZE # init stack frame
fsave FP_SRC(%a6) # save fp state
movm.l &0x0303,EXC_DREGS(%a6) # save d0-d1/a0-a1
fmovm.l %fpcr,%fpsr,%fpiar,USER_FPCR(%a6) # save ctrl regs
fmovm.x &0xc0,EXC_FPREGS(%a6) # save fp0-fp1 on stack
btst &0x5,EXC_SR(%a6) # user or supervisor mode?
bne.b fu_s
fu_u:
mov.l %usp,%a0 # fetch user stack pointer
mov.l %a0,EXC_A7(%a6) # save on stack
bra.b fu_cont
# if the exception is an opclass zero or two unimplemented data type
# exception, then the a7' calculated here is wrong since it doesn't
# stack an ea. however, we don't need an a7' for this case anyways.
fu_s:
lea 0x4+EXC_EA(%a6),%a0 # load old a7'
mov.l %a0,EXC_A7(%a6) # save on stack
fu_cont:
# the FPIAR holds the "current PC" of the faulting instruction
# the FPIAR should be set correctly for ALL exceptions passing through
# this point.
mov.l USER_FPIAR(%a6),EXC_EXTWPTR(%a6)
mov.l EXC_EXTWPTR(%a6),%a0 # fetch instruction addr
addq.l &0x4,EXC_EXTWPTR(%a6) # incr instruction ptr
bsr.l _imem_read_long # fetch the instruction words
mov.l %d0,EXC_OPWORD(%a6) # store OPWORD and EXTWORD
############################
clr.b SPCOND_FLG(%a6) # clear special condition flag
# Separate opclass three (fpn-to-mem) ops since they have a different
# stack frame and protocol.
btst &0x5,EXC_CMDREG(%a6) # is it an fmove out?
bne.w fu_out # yes
# Separate packed opclass two instructions.
bfextu EXC_CMDREG(%a6){&0:&6},%d0
cmpi.b %d0,&0x13
beq.w fu_in_pack
# I'm not sure at this point what FPSR bits are valid for this instruction.
# so, since the emulation routines re-create them anyways, zero exception field
andi.l &0x00ff00ff,USER_FPSR(%a6) # zero exception field
fmov.l &0x0,%fpcr # zero current control regs
fmov.l &0x0,%fpsr
# Opclass two w/ memory-to-fpn operation will have an incorrect extended
# precision format if the src format was single or double and the
# source data type was an INF, NAN, DENORM, or UNNORM
lea FP_SRC(%a6),%a0 # pass ptr to input
bsr.l fix_skewed_ops
# we don't know whether the src operand or the dst operand (or both) is the
# UNNORM or DENORM. call the function that tags the operand type. if the
# input is an UNNORM, then convert it to a NORM, DENORM, or ZERO.
lea FP_SRC(%a6),%a0 # pass: ptr to src op
bsr.l set_tag_x # tag the operand type
cmpi.b %d0,&UNNORM # is operand an UNNORM?
bne.b fu_op2 # no
bsr.l unnorm_fix # yes; convert to NORM,DENORM,or ZERO
fu_op2:
mov.b %d0,STAG(%a6) # save src optype tag
bfextu EXC_CMDREG(%a6){&6:&3},%d0 # dyadic; load dst reg
# bit five of the fp extension word separates the monadic and dyadic operations
# at this point
btst &0x5,1+EXC_CMDREG(%a6) # is operation monadic or dyadic?
beq.b fu_extract # monadic
cmpi.b 1+EXC_CMDREG(%a6),&0x3a # is operation an ftst?
beq.b fu_extract # yes, so it's monadic, too
bsr.l load_fpn2 # load dst into FP_DST
lea FP_DST(%a6),%a0 # pass: ptr to dst op
bsr.l set_tag_x # tag the operand type
cmpi.b %d0,&UNNORM # is operand an UNNORM?
bne.b fu_op2_done # no
bsr.l unnorm_fix # yes; convert to NORM,DENORM,or ZERO
fu_op2_done:
mov.b %d0,DTAG(%a6) # save dst optype tag
fu_extract:
clr.l %d0
mov.b FPCR_MODE(%a6),%d0 # fetch rnd mode/prec
bfextu 1+EXC_CMDREG(%a6){&1:&7},%d1 # extract extension
lea FP_SRC(%a6),%a0
lea FP_DST(%a6),%a1
mov.l (tbl_unsupp.l,%pc,%d1.l*4),%d1 # fetch routine addr
jsr (tbl_unsupp.l,%pc,%d1.l*1)
#
# Exceptions in order of precedence:
# BSUN : none
# SNAN : all dyadic ops
# OPERR : fsqrt(-NORM)
# OVFL : all except ftst,fcmp
# UNFL : all except ftst,fcmp
# DZ : fdiv
# INEX2 : all except ftst,fcmp
# INEX1 : none (packed doesn't go through here)
#
# we determine the highest priority exception(if any) set by the
# emulation routine that has also been enabled by the user.
mov.b FPCR_ENABLE(%a6),%d0 # fetch exceptions set
bne.b fu_in_ena # some are enabled
fu_in_cont:
# fcmp and ftst do not store any result.
mov.b 1+EXC_CMDREG(%a6),%d0 # fetch extension
andi.b &0x38,%d0 # extract bits 3-5
cmpi.b %d0,&0x38 # is instr fcmp or ftst?
beq.b fu_in_exit # yes
bfextu EXC_CMDREG(%a6){&6:&3},%d0 # dyadic; load dst reg
bsr.l store_fpreg # store the result
fu_in_exit:
fmovm.x EXC_FPREGS(%a6),&0xc0 # restore fp0/fp1
fmovm.l USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
unlk %a6
bra.l _fpsp_done
fu_in_ena:
and.b FPSR_EXCEPT(%a6),%d0 # keep only ones enabled
bfffo %d0{&24:&8},%d0 # find highest priority exception
bne.b fu_in_exc # there is at least one set
#
# No exceptions occurred that were also enabled. Now:
#
# if (OVFL && ovfl_disabled && inexact_enabled) {
# branch to _real_inex() (even if the result was exact!);
# } else {
# save the result in the proper fp reg (unless the op is fcmp or ftst);
# return;
# }
#
btst &ovfl_bit,FPSR_EXCEPT(%a6) # was overflow set?
beq.b fu_in_cont # no
fu_in_ovflchk:
btst &inex2_bit,FPCR_ENABLE(%a6) # was inexact enabled?
beq.b fu_in_cont # no
bra.w fu_in_exc_ovfl # go insert overflow frame
#
# An exception occurred and that exception was enabled:
#
# shift enabled exception field into lo byte of d0;
# if (((INEX2 || INEX1) && inex_enabled && OVFL && ovfl_disabled) ||
# ((INEX2 || INEX1) && inex_enabled && UNFL && unfl_disabled)) {
# /*
# * this is the case where we must call _real_inex() now or else
# * there will be no other way to pass it the exceptional operand
# */
# call _real_inex();
# } else {
# restore exc state (SNAN||OPERR||OVFL||UNFL||DZ||INEX) into the FPU;
# }
#
fu_in_exc:
subi.l &24,%d0 # fix offset to be 0-8
cmpi.b %d0,&0x6 # is exception INEX? (6)
bne.b fu_in_exc_exit # no
# the enabled exception was inexact
btst &unfl_bit,FPSR_EXCEPT(%a6) # did disabled underflow occur?
bne.w fu_in_exc_unfl # yes
btst &ovfl_bit,FPSR_EXCEPT(%a6) # did disabled overflow occur?
bne.w fu_in_exc_ovfl # yes
# here, we insert the correct fsave status value into the fsave frame for the
# corresponding exception. the operand in the fsave frame should be the original
# src operand.
fu_in_exc_exit:
mov.l %d0,-(%sp) # save d0
bsr.l funimp_skew # skew sgl or dbl inputs
mov.l (%sp)+,%d0 # restore d0
mov.w (tbl_except.b,%pc,%d0.w*2),2+FP_SRC(%a6) # create exc status
fmovm.x EXC_FPREGS(%a6),&0xc0 # restore fp0/fp1
fmovm.l USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
frestore FP_SRC(%a6) # restore src op
unlk %a6
bra.l _fpsp_done
tbl_except:
short 0xe000,0xe006,0xe004,0xe005
short 0xe003,0xe002,0xe001,0xe001
fu_in_exc_unfl:
mov.w &0x4,%d0
bra.b fu_in_exc_exit
fu_in_exc_ovfl:
mov.w &0x03,%d0
bra.b fu_in_exc_exit
# If the input operand to this operation was opclass two and a single
# or double precision denorm, inf, or nan, the operand needs to be
# "corrected" in order to have the proper equivalent extended precision
# number.
global fix_skewed_ops
fix_skewed_ops:
bfextu EXC_CMDREG(%a6){&0:&6},%d0 # extract opclass,src fmt
cmpi.b %d0,&0x11 # is class = 2 & fmt = sgl?
beq.b fso_sgl # yes
cmpi.b %d0,&0x15 # is class = 2 & fmt = dbl?
beq.b fso_dbl # yes
rts # no
fso_sgl:
mov.w LOCAL_EX(%a0),%d0 # fetch src exponent
andi.w &0x7fff,%d0 # strip sign
cmpi.w %d0,&0x3f80 # is |exp| == $3f80?
beq.b fso_sgl_dnrm_zero # yes
cmpi.w %d0,&0x407f # no; is |exp| == $407f?
beq.b fso_infnan # yes
rts # no
fso_sgl_dnrm_zero:
andi.l &0x7fffffff,LOCAL_HI(%a0) # clear j-bit
beq.b fso_zero # it's a skewed zero
fso_sgl_dnrm:
# here, we count on norm not to alter a0...
bsr.l norm # normalize mantissa
neg.w %d0 # -shft amt
addi.w &0x3f81,%d0 # adjust new exponent
andi.w &0x8000,LOCAL_EX(%a0) # clear old exponent
or.w %d0,LOCAL_EX(%a0) # insert new exponent
rts
fso_zero:
andi.w &0x8000,LOCAL_EX(%a0) # clear bogus exponent
rts
fso_infnan:
andi.b &0x7f,LOCAL_HI(%a0) # clear j-bit
ori.w &0x7fff,LOCAL_EX(%a0) # make exponent = $7fff
rts
fso_dbl:
mov.w LOCAL_EX(%a0),%d0 # fetch src exponent
andi.w &0x7fff,%d0 # strip sign
cmpi.w %d0,&0x3c00 # is |exp| == $3c00?
beq.b fso_dbl_dnrm_zero # yes
cmpi.w %d0,&0x43ff # no; is |exp| == $43ff?
beq.b fso_infnan # yes
rts # no
fso_dbl_dnrm_zero:
andi.l &0x7fffffff,LOCAL_HI(%a0) # clear j-bit
bne.b fso_dbl_dnrm # it's a skewed denorm
tst.l LOCAL_LO(%a0) # is it a zero?
beq.b fso_zero # yes
fso_dbl_dnrm:
# here, we count on norm not to alter a0...
bsr.l norm # normalize mantissa
neg.w %d0 # -shft amt
addi.w &0x3c01,%d0 # adjust new exponent
andi.w &0x8000,LOCAL_EX(%a0) # clear old exponent
or.w %d0,LOCAL_EX(%a0) # insert new exponent
rts
#################################################################
# fmove out took an unimplemented data type exception.
# the src operand is in FP_SRC. Call _fout() to write out the result and
# to determine which exceptions, if any, to take.
fu_out:
# Separate packed move outs from the UNNORM and DENORM move outs.
bfextu EXC_CMDREG(%a6){&3:&3},%d0
cmpi.b %d0,&0x3
beq.w fu_out_pack
cmpi.b %d0,&0x7
beq.w fu_out_pack
# I'm not sure at this point what FPSR bits are valid for this instruction.
# so, since the emulation routines re-create them anyways, zero exception field.
# fmove out doesn't affect ccodes.
and.l &0xffff00ff,USER_FPSR(%a6) # zero exception field
fmov.l &0x0,%fpcr # zero current control regs
fmov.l &0x0,%fpsr
# the src can ONLY be a DENORM or an UNNORM! so, don't make any big subroutine
# call here. just figure out what it is...
mov.w FP_SRC_EX(%a6),%d0 # get exponent
andi.w &0x7fff,%d0 # strip sign
beq.b fu_out_denorm # it's a DENORM
lea FP_SRC(%a6),%a0
bsr.l unnorm_fix # yes; fix it
mov.b %d0,STAG(%a6)
bra.b fu_out_cont
fu_out_denorm:
mov.b &DENORM,STAG(%a6)
fu_out_cont:
clr.l %d0
mov.b FPCR_MODE(%a6),%d0 # fetch rnd mode/prec
lea FP_SRC(%a6),%a0 # pass ptr to src operand
mov.l (%a6),EXC_A6(%a6) # in case a6 changes
bsr.l fout # call fmove out routine
# Exceptions in order of precedence:
# BSUN : none
# SNAN : none
# OPERR : fmove.{b,w,l} out of large UNNORM
# OVFL : fmove.{s,d}
# UNFL : fmove.{s,d,x}
# DZ : none
# INEX2 : all
# INEX1 : none (packed doesn't travel through here)
# determine the highest priority exception(if any) set by the
# emulation routine that has also been enabled by the user.
mov.b FPCR_ENABLE(%a6),%d0 # fetch exceptions enabled
bne.w fu_out_ena # some are enabled
fu_out_done:
mov.l EXC_A6(%a6),(%a6) # in case a6 changed
# on extended precision opclass three instructions using pre-decrement or
# post-increment addressing mode, the address register is not updated. is the
# address register was the stack pointer used from user mode, then let's update
# it here. if it was used from supervisor mode, then we have to handle this
# as a special case.
btst &0x5,EXC_SR(%a6)
bne.b fu_out_done_s
mov.l EXC_A7(%a6),%a0 # restore a7
mov.l %a0,%usp
fu_out_done_cont:
fmovm.x EXC_FPREGS(%a6),&0xc0 # restore fp0/fp1
fmovm.l USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
unlk %a6
btst &0x7,(%sp) # is trace on?
bne.b fu_out_trace # yes
bra.l _fpsp_done
# is the ea mode pre-decrement of the stack pointer from supervisor mode?
# ("fmov.x fpm,-(a7)") if so,
fu_out_done_s:
cmpi.b SPCOND_FLG(%a6),&mda7_flg
bne.b fu_out_done_cont
# the extended precision result is still in fp0. but, we need to save it
# somewhere on the stack until we can copy it to its final resting place.
# here, we're counting on the top of the stack to be the old place-holders
# for fp0/fp1 which have already been restored. that way, we can write
# over those destinations with the shifted stack frame.
fmovm.x &0x80,FP_SRC(%a6) # put answer on stack
fmovm.x EXC_FPREGS(%a6),&0xc0 # restore fp0/fp1
fmovm.l USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
mov.l (%a6),%a6 # restore frame pointer
mov.l LOCAL_SIZE+EXC_SR(%sp),LOCAL_SIZE+EXC_SR-0xc(%sp)
mov.l LOCAL_SIZE+2+EXC_PC(%sp),LOCAL_SIZE+2+EXC_PC-0xc(%sp)
# now, copy the result to the proper place on the stack
mov.l LOCAL_SIZE+FP_SRC_EX(%sp),LOCAL_SIZE+EXC_SR+0x0(%sp)
mov.l LOCAL_SIZE+FP_SRC_HI(%sp),LOCAL_SIZE+EXC_SR+0x4(%sp)
mov.l LOCAL_SIZE+FP_SRC_LO(%sp),LOCAL_SIZE+EXC_SR+0x8(%sp)
add.l &LOCAL_SIZE-0x8,%sp
btst &0x7,(%sp)
bne.b fu_out_trace
bra.l _fpsp_done
fu_out_ena:
and.b FPSR_EXCEPT(%a6),%d0 # keep only ones enabled
bfffo %d0{&24:&8},%d0 # find highest priority exception
bne.b fu_out_exc # there is at least one set
# no exceptions were set.
# if a disabled overflow occurred and inexact was enabled but the result
# was exact, then a branch to _real_inex() is made.
btst &ovfl_bit,FPSR_EXCEPT(%a6) # was overflow set?
beq.w fu_out_done # no
fu_out_ovflchk:
btst &inex2_bit,FPCR_ENABLE(%a6) # was inexact enabled?
beq.w fu_out_done # no
bra.w fu_inex # yes
#
# The fp move out that took the "Unimplemented Data Type" exception was
# being traced. Since the stack frames are similar, get the "current" PC
# from FPIAR and put it in the trace stack frame then jump to _real_trace().
#
# UNSUPP FRAME TRACE FRAME
# ***************** *****************
# * EA * * Current *
# * * * PC *
# ***************** *****************
# * 0x3 * 0x0dc * * 0x2 * 0x024 *
# ***************** *****************
# * Next * * Next *
# * PC * * PC *
# ***************** *****************
# * SR * * SR *
# ***************** *****************
#
fu_out_trace:
mov.w &0x2024,0x6(%sp)
fmov.l %fpiar,0x8(%sp)
bra.l _real_trace
# an exception occurred and that exception was enabled.
fu_out_exc:
subi.l &24,%d0 # fix offset to be 0-8
# we don't mess with the existing fsave frame. just re-insert it and
# jump to the "_real_{}()" handler...
mov.w (tbl_fu_out.b,%pc,%d0.w*2),%d0
jmp (tbl_fu_out.b,%pc,%d0.w*1)
swbeg &0x8
tbl_fu_out:
short tbl_fu_out - tbl_fu_out # BSUN can't happen
short tbl_fu_out - tbl_fu_out # SNAN can't happen
short fu_operr - tbl_fu_out # OPERR
short fu_ovfl - tbl_fu_out # OVFL
short fu_unfl - tbl_fu_out # UNFL
short tbl_fu_out - tbl_fu_out # DZ can't happen
short fu_inex - tbl_fu_out # INEX2
short tbl_fu_out - tbl_fu_out # INEX1 won't make it here
# for snan,operr,ovfl,unfl, src op is still in FP_SRC so just
# frestore it.
fu_snan:
fmovm.x EXC_FPREGS(%a6),&0xc0 # restore fp0/fp1
fmovm.l USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
mov.w &0x30d8,EXC_VOFF(%a6) # vector offset = 0xd8
mov.w &0xe006,2+FP_SRC(%a6)
frestore FP_SRC(%a6)
unlk %a6
bra.l _real_snan
fu_operr:
fmovm.x EXC_FPREGS(%a6),&0xc0 # restore fp0/fp1
fmovm.l USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
mov.w &0x30d0,EXC_VOFF(%a6) # vector offset = 0xd0
mov.w &0xe004,2+FP_SRC(%a6)
frestore FP_SRC(%a6)
unlk %a6
bra.l _real_operr
fu_ovfl:
fmovm.x &0x40,FP_SRC(%a6) # save EXOP to the stack
fmovm.x EXC_FPREGS(%a6),&0xc0 # restore fp0/fp1
fmovm.l USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
mov.w &0x30d4,EXC_VOFF(%a6) # vector offset = 0xd4
mov.w &0xe005,2+FP_SRC(%a6)
frestore FP_SRC(%a6) # restore EXOP
unlk %a6
bra.l _real_ovfl
# underflow can happen for extended precision. extended precision opclass
# three instruction exceptions don't update the stack pointer. so, if the
# exception occurred from user mode, then simply update a7 and exit normally.
# if the exception occurred from supervisor mode, check if
fu_unfl:
mov.l EXC_A6(%a6),(%a6) # restore a6
btst &0x5,EXC_SR(%a6)
bne.w fu_unfl_s
mov.l EXC_A7(%a6),%a0 # restore a7 whether we need
mov.l %a0,%usp # to or not...
fu_unfl_cont:
fmovm.x &0x40,FP_SRC(%a6) # save EXOP to the stack
fmovm.x EXC_FPREGS(%a6),&0xc0 # restore fp0/fp1
fmovm.l USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
mov.w &0x30cc,EXC_VOFF(%a6) # vector offset = 0xcc
mov.w &0xe003,2+FP_SRC(%a6)
frestore FP_SRC(%a6) # restore EXOP
unlk %a6
bra.l _real_unfl
fu_unfl_s:
cmpi.b SPCOND_FLG(%a6),&mda7_flg # was the <ea> mode -(sp)?
bne.b fu_unfl_cont
# the extended precision result is still in fp0. but, we need to save it
# somewhere on the stack until we can copy it to its final resting place
# (where the exc frame is currently). make sure it's not at the top of the
# frame or it will get overwritten when the exc stack frame is shifted "down".
fmovm.x &0x80,FP_SRC(%a6) # put answer on stack
fmovm.x &0x40,FP_DST(%a6) # put EXOP on stack
fmovm.x EXC_FPREGS(%a6),&0xc0 # restore fp0/fp1
fmovm.l USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
mov.w &0x30cc,EXC_VOFF(%a6) # vector offset = 0xcc
mov.w &0xe003,2+FP_DST(%a6)
frestore FP_DST(%a6) # restore EXOP
mov.l (%a6),%a6 # restore frame pointer
mov.l LOCAL_SIZE+EXC_SR(%sp),LOCAL_SIZE+EXC_SR-0xc(%sp)
mov.l LOCAL_SIZE+2+EXC_PC(%sp),LOCAL_SIZE+2+EXC_PC-0xc(%sp)
mov.l LOCAL_SIZE+EXC_EA(%sp),LOCAL_SIZE+EXC_EA-0xc(%sp)
# now, copy the result to the proper place on the stack
mov.l LOCAL_SIZE+FP_SRC_EX(%sp),LOCAL_SIZE+EXC_SR+0x0(%sp)
mov.l LOCAL_SIZE+FP_SRC_HI(%sp),LOCAL_SIZE+EXC_SR+0x4(%sp)
mov.l LOCAL_SIZE+FP_SRC_LO(%sp),LOCAL_SIZE+EXC_SR+0x8(%sp)
add.l &LOCAL_SIZE-0x8,%sp
bra.l _real_unfl
# fmove in and out enter here.
fu_inex:
fmovm.x &0x40,FP_SRC(%a6) # save EXOP to the stack
fmovm.x EXC_FPREGS(%a6),&0xc0 # restore fp0/fp1
fmovm.l USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
mov.w &0x30c4,EXC_VOFF(%a6) # vector offset = 0xc4
mov.w &0xe001,2+FP_SRC(%a6)
frestore FP_SRC(%a6) # restore EXOP
unlk %a6
bra.l _real_inex
#########################################################################
#########################################################################
fu_in_pack:
# I'm not sure at this point what FPSR bits are valid for this instruction.
# so, since the emulation routines re-create them anyways, zero exception field
andi.l &0x0ff00ff,USER_FPSR(%a6) # zero exception field
fmov.l &0x0,%fpcr # zero current control regs
fmov.l &0x0,%fpsr
bsr.l get_packed # fetch packed src operand
lea FP_SRC(%a6),%a0 # pass ptr to src
bsr.l set_tag_x # set src optype tag
mov.b %d0,STAG(%a6) # save src optype tag
bfextu EXC_CMDREG(%a6){&6:&3},%d0 # dyadic; load dst reg
# bit five of the fp extension word separates the monadic and dyadic operations
# at this point
btst &0x5,1+EXC_CMDREG(%a6) # is operation monadic or dyadic?
beq.b fu_extract_p # monadic
cmpi.b 1+EXC_CMDREG(%a6),&0x3a # is operation an ftst?
beq.b fu_extract_p # yes, so it's monadic, too
bsr.l load_fpn2 # load dst into FP_DST
lea FP_DST(%a6),%a0 # pass: ptr to dst op
bsr.l set_tag_x # tag the operand type
cmpi.b %d0,&UNNORM # is operand an UNNORM?
bne.b fu_op2_done_p # no
bsr.l unnorm_fix # yes; convert to NORM,DENORM,or ZERO
fu_op2_done_p:
mov.b %d0,DTAG(%a6) # save dst optype tag
fu_extract_p:
clr.l %d0
mov.b FPCR_MODE(%a6),%d0 # fetch rnd mode/prec
bfextu 1+EXC_CMDREG(%a6){&1:&7},%d1 # extract extension
lea FP_SRC(%a6),%a0
lea FP_DST(%a6),%a1
mov.l (tbl_unsupp.l,%pc,%d1.l*4),%d1 # fetch routine addr
jsr (tbl_unsupp.l,%pc,%d1.l*1)
#
# Exceptions in order of precedence:
# BSUN : none
# SNAN : all dyadic ops
# OPERR : fsqrt(-NORM)
# OVFL : all except ftst,fcmp
# UNFL : all except ftst,fcmp
# DZ : fdiv
# INEX2 : all except ftst,fcmp
# INEX1 : all
#
# we determine the highest priority exception(if any) set by the
# emulation routine that has also been enabled by the user.
mov.b FPCR_ENABLE(%a6),%d0 # fetch exceptions enabled
bne.w fu_in_ena_p # some are enabled
fu_in_cont_p:
# fcmp and ftst do not store any result.
mov.b 1+EXC_CMDREG(%a6),%d0 # fetch extension
andi.b &0x38,%d0 # extract bits 3-5
cmpi.b %d0,&0x38 # is instr fcmp or ftst?
beq.b fu_in_exit_p # yes
bfextu EXC_CMDREG(%a6){&6:&3},%d0 # dyadic; load dst reg
bsr.l store_fpreg # store the result
fu_in_exit_p:
btst &0x5,EXC_SR(%a6) # user or supervisor?
bne.w fu_in_exit_s_p # supervisor
mov.l EXC_A7(%a6),%a0 # update user a7
mov.l %a0,%usp
fu_in_exit_cont_p:
fmovm.x EXC_FPREGS(%a6),&0xc0 # restore fp0/fp1
fmovm.l USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
unlk %a6 # unravel stack frame
btst &0x7,(%sp) # is trace on?
bne.w fu_trace_p # yes
bra.l _fpsp_done # exit to os
# the exception occurred in supervisor mode. check to see if the
# addressing mode was (a7)+. if so, we'll need to shift the
# stack frame "up".
fu_in_exit_s_p:
btst &mia7_bit,SPCOND_FLG(%a6) # was ea mode (a7)+
beq.b fu_in_exit_cont_p # no
fmovm.x EXC_FPREGS(%a6),&0xc0 # restore fp0/fp1
fmovm.l USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
unlk %a6 # unravel stack frame
# shift the stack frame "up". we don't really care about the <ea> field.
mov.l 0x4(%sp),0x10(%sp)
mov.l 0x0(%sp),0xc(%sp)
add.l &0xc,%sp
btst &0x7,(%sp) # is trace on?
bne.w fu_trace_p # yes
bra.l _fpsp_done # exit to os
fu_in_ena_p:
and.b FPSR_EXCEPT(%a6),%d0 # keep only ones enabled & set
bfffo %d0{&24:&8},%d0 # find highest priority exception
bne.b fu_in_exc_p # at least one was set
#
# No exceptions occurred that were also enabled. Now:
#
# if (OVFL && ovfl_disabled && inexact_enabled) {
# branch to _real_inex() (even if the result was exact!);
# } else {
# save the result in the proper fp reg (unless the op is fcmp or ftst);
# return;
# }
#
btst &ovfl_bit,FPSR_EXCEPT(%a6) # was overflow set?
beq.w fu_in_cont_p # no
fu_in_ovflchk_p:
btst &inex2_bit,FPCR_ENABLE(%a6) # was inexact enabled?
beq.w fu_in_cont_p # no
bra.w fu_in_exc_ovfl_p # do _real_inex() now
#
# An exception occurred and that exception was enabled:
#
# shift enabled exception field into lo byte of d0;
# if (((INEX2 || INEX1) && inex_enabled && OVFL && ovfl_disabled) ||
# ((INEX2 || INEX1) && inex_enabled && UNFL && unfl_disabled)) {
# /*
# * this is the case where we must call _real_inex() now or else
# * there will be no other way to pass it the exceptional operand
# */
# call _real_inex();
# } else {
# restore exc state (SNAN||OPERR||OVFL||UNFL||DZ||INEX) into the FPU;
# }
#
fu_in_exc_p:
subi.l &24,%d0 # fix offset to be 0-8
cmpi.b %d0,&0x6 # is exception INEX? (6 or 7)
blt.b fu_in_exc_exit_p # no
# the enabled exception was inexact
btst &unfl_bit,FPSR_EXCEPT(%a6) # did disabled underflow occur?
bne.w fu_in_exc_unfl_p # yes
btst &ovfl_bit,FPSR_EXCEPT(%a6) # did disabled overflow occur?
bne.w fu_in_exc_ovfl_p # yes
# here, we insert the correct fsave status value into the fsave frame for the
# corresponding exception. the operand in the fsave frame should be the original
# src operand.
# as a reminder for future predicted pain and agony, we are passing in fsave the
# "non-skewed" operand for cases of sgl and dbl src INFs,NANs, and DENORMs.
# this is INCORRECT for enabled SNAN which would give to the user the skewed SNAN!!!
fu_in_exc_exit_p:
btst &0x5,EXC_SR(%a6) # user or supervisor?
bne.w fu_in_exc_exit_s_p # supervisor
mov.l EXC_A7(%a6),%a0 # update user a7
mov.l %a0,%usp
fu_in_exc_exit_cont_p:
mov.w (tbl_except_p.b,%pc,%d0.w*2),2+FP_SRC(%a6)
fmovm.x EXC_FPREGS(%a6),&0xc0 # restore fp0/fp1
fmovm.l USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
frestore FP_SRC(%a6) # restore src op
unlk %a6
btst &0x7,(%sp) # is trace enabled?
bne.w fu_trace_p # yes
bra.l _fpsp_done
tbl_except_p:
short 0xe000,0xe006,0xe004,0xe005
short 0xe003,0xe002,0xe001,0xe001
fu_in_exc_ovfl_p:
mov.w &0x3,%d0
bra.w fu_in_exc_exit_p
fu_in_exc_unfl_p:
mov.w &0x4,%d0
bra.w fu_in_exc_exit_p
fu_in_exc_exit_s_p:
btst &mia7_bit,SPCOND_FLG(%a6)
beq.b fu_in_exc_exit_cont_p
mov.w (tbl_except_p.b,%pc,%d0.w*2),2+FP_SRC(%a6)
fmovm.x EXC_FPREGS(%a6),&0xc0 # restore fp0/fp1
fmovm.l USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
frestore FP_SRC(%a6) # restore src op
unlk %a6 # unravel stack frame
# shift stack frame "up". who cares about <ea> field.
mov.l 0x4(%sp),0x10(%sp)
mov.l 0x0(%sp),0xc(%sp)
add.l &0xc,%sp
btst &0x7,(%sp) # is trace on?
bne.b fu_trace_p # yes
bra.l _fpsp_done # exit to os
#
# The opclass two PACKED instruction that took an "Unimplemented Data Type"
# exception was being traced. Make the "current" PC the FPIAR and put it in the
# trace stack frame then jump to _real_trace().
#
# UNSUPP FRAME TRACE FRAME
# ***************** *****************
# * EA * * Current *
# * * * PC *
# ***************** *****************
# * 0x2 * 0x0dc * * 0x2 * 0x024 *
# ***************** *****************
# * Next * * Next *
# * PC * * PC *
# ***************** *****************
# * SR * * SR *
# ***************** *****************
fu_trace_p:
mov.w &0x2024,0x6(%sp)
fmov.l %fpiar,0x8(%sp)
bra.l _real_trace
#########################################################
#########################################################
fu_out_pack:
# I'm not sure at this point what FPSR bits are valid for this instruction.
# so, since the emulation routines re-create them anyways, zero exception field.
# fmove out doesn't affect ccodes.
and.l &0xffff00ff,USER_FPSR(%a6) # zero exception field
fmov.l &0x0,%fpcr # zero current control regs
fmov.l &0x0,%fpsr
bfextu EXC_CMDREG(%a6){&6:&3},%d0
bsr.l load_fpn1
# unlike other opclass 3, unimplemented data type exceptions, packed must be
# able to detect all operand types.
lea FP_SRC(%a6),%a0
bsr.l set_tag_x # tag the operand type
cmpi.b %d0,&UNNORM # is operand an UNNORM?
bne.b fu_op2_p # no
bsr.l unnorm_fix # yes; convert to NORM,DENORM,or ZERO
fu_op2_p:
mov.b %d0,STAG(%a6) # save src optype tag
clr.l %d0
mov.b FPCR_MODE(%a6),%d0 # fetch rnd mode/prec
lea FP_SRC(%a6),%a0 # pass ptr to src operand
mov.l (%a6),EXC_A6(%a6) # in case a6 changes
bsr.l fout # call fmove out routine
# Exceptions in order of precedence:
# BSUN : no
# SNAN : yes
# OPERR : if ((k_factor > +17) || (dec. exp exceeds 3 digits))
# OVFL : no
# UNFL : no
# DZ : no
# INEX2 : yes
# INEX1 : no
# determine the highest priority exception(if any) set by the
# emulation routine that has also been enabled by the user.
mov.b FPCR_ENABLE(%a6),%d0 # fetch exceptions enabled
bne.w fu_out_ena_p # some are enabled
fu_out_exit_p:
mov.l EXC_A6(%a6),(%a6) # restore a6
btst &0x5,EXC_SR(%a6) # user or supervisor?
bne.b fu_out_exit_s_p # supervisor
mov.l EXC_A7(%a6),%a0 # update user a7
mov.l %a0,%usp
fu_out_exit_cont_p:
fmovm.x EXC_FPREGS(%a6),&0xc0 # restore fp0/fp1
fmovm.l USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
unlk %a6 # unravel stack frame
btst &0x7,(%sp) # is trace on?
bne.w fu_trace_p # yes
bra.l _fpsp_done # exit to os
# the exception occurred in supervisor mode. check to see if the
# addressing mode was -(a7). if so, we'll need to shift the
# stack frame "down".
fu_out_exit_s_p:
btst &mda7_bit,SPCOND_FLG(%a6) # was ea mode -(a7)
beq.b fu_out_exit_cont_p # no
fmovm.x EXC_FPREGS(%a6),&0xc0 # restore fp0/fp1
fmovm.l USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
mov.l (%a6),%a6 # restore frame pointer
mov.l LOCAL_SIZE+EXC_SR(%sp),LOCAL_SIZE+EXC_SR-0xc(%sp)
mov.l LOCAL_SIZE+2+EXC_PC(%sp),LOCAL_SIZE+2+EXC_PC-0xc(%sp)
# now, copy the result to the proper place on the stack
mov.l LOCAL_SIZE+FP_DST_EX(%sp),LOCAL_SIZE+EXC_SR+0x0(%sp)
mov.l LOCAL_SIZE+FP_DST_HI(%sp),LOCAL_SIZE+EXC_SR+0x4(%sp)
mov.l LOCAL_SIZE+FP_DST_LO(%sp),LOCAL_SIZE+EXC_SR+0x8(%sp)
add.l &LOCAL_SIZE-0x8,%sp
btst &0x7,(%sp)
bne.w fu_trace_p
bra.l _fpsp_done
fu_out_ena_p:
and.b FPSR_EXCEPT(%a6),%d0 # keep only ones enabled
bfffo %d0{&24:&8},%d0 # find highest priority exception
beq.w fu_out_exit_p
mov.l EXC_A6(%a6),(%a6) # restore a6
# an exception occurred and that exception was enabled.
# the only exception possible on packed move out are INEX, OPERR, and SNAN.
fu_out_exc_p:
cmpi.b %d0,&0x1a
bgt.w fu_inex_p2
beq.w fu_operr_p
fu_snan_p:
btst &0x5,EXC_SR(%a6)
bne.b fu_snan_s_p
mov.l EXC_A7(%a6),%a0
mov.l %a0,%usp
bra.w fu_snan
fu_snan_s_p:
cmpi.b SPCOND_FLG(%a6),&mda7_flg
bne.w fu_snan
# the instruction was "fmove.p fpn,-(a7)" from supervisor mode.
# the strategy is to move the exception frame "down" 12 bytes. then, we
# can store the default result where the exception frame was.
fmovm.x EXC_FPREGS(%a6),&0xc0 # restore fp0/fp1
fmovm.l USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
mov.w &0x30d8,EXC_VOFF(%a6) # vector offset = 0xd0
mov.w &0xe006,2+FP_SRC(%a6) # set fsave status
frestore FP_SRC(%a6) # restore src operand
mov.l (%a6),%a6 # restore frame pointer
mov.l LOCAL_SIZE+EXC_SR(%sp),LOCAL_SIZE+EXC_SR-0xc(%sp)
mov.l LOCAL_SIZE+2+EXC_PC(%sp),LOCAL_SIZE+2+EXC_PC-0xc(%sp)
mov.l LOCAL_SIZE+EXC_EA(%sp),LOCAL_SIZE+EXC_EA-0xc(%sp)
# now, we copy the default result to its proper location
mov.l LOCAL_SIZE+FP_DST_EX(%sp),LOCAL_SIZE+0x4(%sp)
mov.l LOCAL_SIZE+FP_DST_HI(%sp),LOCAL_SIZE+0x8(%sp)
mov.l LOCAL_SIZE+FP_DST_LO(%sp),LOCAL_SIZE+0xc(%sp)
add.l &LOCAL_SIZE-0x8,%sp
bra.l _real_snan
fu_operr_p:
btst &0x5,EXC_SR(%a6)
bne.w fu_operr_p_s
mov.l EXC_A7(%a6),%a0
mov.l %a0,%usp
bra.w fu_operr
fu_operr_p_s:
cmpi.b SPCOND_FLG(%a6),&mda7_flg
bne.w fu_operr
# the instruction was "fmove.p fpn,-(a7)" from supervisor mode.
# the strategy is to move the exception frame "down" 12 bytes. then, we
# can store the default result where the exception frame was.
fmovm.x EXC_FPREGS(%a6),&0xc0 # restore fp0/fp1
fmovm.l USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
mov.w &0x30d0,EXC_VOFF(%a6) # vector offset = 0xd0
mov.w &0xe004,2+FP_SRC(%a6) # set fsave status
frestore FP_SRC(%a6) # restore src operand
mov.l (%a6),%a6 # restore frame pointer
mov.l LOCAL_SIZE+EXC_SR(%sp),LOCAL_SIZE+EXC_SR-0xc(%sp)
mov.l LOCAL_SIZE+2+EXC_PC(%sp),LOCAL_SIZE+2+EXC_PC-0xc(%sp)
mov.l LOCAL_SIZE+EXC_EA(%sp),LOCAL_SIZE+EXC_EA-0xc(%sp)
# now, we copy the default result to its proper location
mov.l LOCAL_SIZE+FP_DST_EX(%sp),LOCAL_SIZE+0x4(%sp)
mov.l LOCAL_SIZE+FP_DST_HI(%sp),LOCAL_SIZE+0x8(%sp)
mov.l LOCAL_SIZE+FP_DST_LO(%sp),LOCAL_SIZE+0xc(%sp)
add.l &LOCAL_SIZE-0x8,%sp
bra.l _real_operr
fu_inex_p2:
btst &0x5,EXC_SR(%a6)
bne.w fu_inex_s_p2
mov.l EXC_A7(%a6),%a0
mov.l %a0,%usp
bra.w fu_inex
fu_inex_s_p2:
cmpi.b SPCOND_FLG(%a6),&mda7_flg
bne.w fu_inex
# the instruction was "fmove.p fpn,-(a7)" from supervisor mode.
# the strategy is to move the exception frame "down" 12 bytes. then, we
# can store the default result where the exception frame was.
fmovm.x EXC_FPREGS(%a6),&0xc0 # restore fp0/fp1
fmovm.l USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
mov.w &0x30c4,EXC_VOFF(%a6) # vector offset = 0xc4
mov.w &0xe001,2+FP_SRC(%a6) # set fsave status
frestore FP_SRC(%a6) # restore src operand
mov.l (%a6),%a6 # restore frame pointer
mov.l LOCAL_SIZE+EXC_SR(%sp),LOCAL_SIZE+EXC_SR-0xc(%sp)
mov.l LOCAL_SIZE+2+EXC_PC(%sp),LOCAL_SIZE+2+EXC_PC-0xc(%sp)
mov.l LOCAL_SIZE+EXC_EA(%sp),LOCAL_SIZE+EXC_EA-0xc(%sp)
# now, we copy the default result to its proper location
mov.l LOCAL_SIZE+FP_DST_EX(%sp),LOCAL_SIZE+0x4(%sp)
mov.l LOCAL_SIZE+FP_DST_HI(%sp),LOCAL_SIZE+0x8(%sp)
mov.l LOCAL_SIZE+FP_DST_LO(%sp),LOCAL_SIZE+0xc(%sp)
add.l &LOCAL_SIZE-0x8,%sp
bra.l _real_inex
#########################################################################
#
# if we're stuffing a source operand back into an fsave frame then we
# have to make sure that for single or double source operands that the
# format stuffed is as weird as the hardware usually makes it.
#
global funimp_skew
funimp_skew:
bfextu EXC_EXTWORD(%a6){&3:&3},%d0 # extract src specifier
cmpi.b %d0,&0x1 # was src sgl?
beq.b funimp_skew_sgl # yes
cmpi.b %d0,&0x5 # was src dbl?
beq.b funimp_skew_dbl # yes
rts
funimp_skew_sgl:
mov.w FP_SRC_EX(%a6),%d0 # fetch DENORM exponent
andi.w &0x7fff,%d0 # strip sign
beq.b funimp_skew_sgl_not
cmpi.w %d0,&0x3f80
bgt.b funimp_skew_sgl_not
neg.w %d0 # make exponent negative
addi.w &0x3f81,%d0 # find amt to shift
mov.l FP_SRC_HI(%a6),%d1 # fetch DENORM hi(man)
lsr.l %d0,%d1 # shift it
bset &31,%d1 # set j-bit
mov.l %d1,FP_SRC_HI(%a6) # insert new hi(man)
andi.w &0x8000,FP_SRC_EX(%a6) # clear old exponent
ori.w &0x3f80,FP_SRC_EX(%a6) # insert new "skewed" exponent
funimp_skew_sgl_not:
rts
funimp_skew_dbl:
mov.w FP_SRC_EX(%a6),%d0 # fetch DENORM exponent
andi.w &0x7fff,%d0 # strip sign
beq.b funimp_skew_dbl_not
cmpi.w %d0,&0x3c00
bgt.b funimp_skew_dbl_not
tst.b FP_SRC_EX(%a6) # make "internal format"
smi.b 0x2+FP_SRC(%a6)
mov.w %d0,FP_SRC_EX(%a6) # insert exponent with cleared sign
clr.l %d0 # clear g,r,s
lea FP_SRC(%a6),%a0 # pass ptr to src op
mov.w &0x3c01,%d1 # pass denorm threshold
bsr.l dnrm_lp # denorm it
mov.w &0x3c00,%d0 # new exponent
tst.b 0x2+FP_SRC(%a6) # is sign set?
beq.b fss_dbl_denorm_done # no
bset &15,%d0 # set sign
fss_dbl_denorm_done:
bset &0x7,FP_SRC_HI(%a6) # set j-bit
mov.w %d0,FP_SRC_EX(%a6) # insert new exponent
funimp_skew_dbl_not:
rts
#########################################################################
global _mem_write2
_mem_write2:
btst &0x5,EXC_SR(%a6)
beq.l _dmem_write
mov.l 0x0(%a0),FP_DST_EX(%a6)
mov.l 0x4(%a0),FP_DST_HI(%a6)
mov.l 0x8(%a0),FP_DST_LO(%a6)
clr.l %d1
rts
#########################################################################
# XDEF **************************************************************** #
# _fpsp_effadd(): 060FPSP entry point for FP "Unimplemented #
# effective address" exception. #
# #
# This handler should be the first code executed upon taking the #
# FP Unimplemented Effective Address exception in an operating #
# system. #
# #
# XREF **************************************************************** #
# _imem_read_long() - read instruction longword #
# fix_skewed_ops() - adjust src operand in fsave frame #
# set_tag_x() - determine optype of src/dst operands #
# store_fpreg() - store opclass 0 or 2 result to FP regfile #
# unnorm_fix() - change UNNORM operands to NORM or ZERO #
# load_fpn2() - load dst operand from FP regfile #
# tbl_unsupp - add of table of emulation routines for opclass 0,2 #
# decbin() - convert packed data to FP binary data #
# _real_fpu_disabled() - "callout" for "FPU disabled" exception #
# _real_access() - "callout" for access error exception #
# _mem_read() - read extended immediate operand from memory #
# _fpsp_done() - "callout" for exit; work all done #
# _real_trace() - "callout" for Trace enabled exception #
# fmovm_dynamic() - emulate dynamic fmovm instruction #
# fmovm_ctrl() - emulate fmovm control instruction #
# #
# INPUT *************************************************************** #
# - The system stack contains the "Unimplemented <ea>" stk frame #
# #
# OUTPUT ************************************************************** #
# If access error: #
# - The system stack is changed to an access error stack frame #
# If FPU disabled: #
# - The system stack is changed to an FPU disabled stack frame #
# If Trace exception enabled: #
# - The system stack is changed to a Trace exception stack frame #
# Else: (normal case) #
# - None (correct result has been stored as appropriate) #
# #
# ALGORITHM *********************************************************** #
# This exception handles 3 types of operations: #
# (1) FP Instructions using extended precision or packed immediate #
# addressing mode. #
# (2) The "fmovm.x" instruction w/ dynamic register specification. #
# (3) The "fmovm.l" instruction w/ 2 or 3 control registers. #
# #
# For immediate data operations, the data is read in w/ a #
# _mem_read() "callout", converted to FP binary (if packed), and used #
# as the source operand to the instruction specified by the instruction #
# word. If no FP exception should be reported ads a result of the #
# emulation, then the result is stored to the destination register and #
# the handler exits through _fpsp_done(). If an enabled exc has been #
# signalled as a result of emulation, then an fsave state frame #
# corresponding to the FP exception type must be entered into the 060 #
# FPU before exiting. In either the enabled or disabled cases, we #
# must also check if a Trace exception is pending, in which case, we #
# must create a Trace exception stack frame from the current exception #
# stack frame. If no Trace is pending, we simply exit through #
# _fpsp_done(). #
# For "fmovm.x", call the routine fmovm_dynamic() which will #
# decode and emulate the instruction. No FP exceptions can be pending #
# as a result of this operation emulation. A Trace exception can be #
# pending, though, which means the current stack frame must be changed #
# to a Trace stack frame and an exit made through _real_trace(). #
# For the case of "fmovm.x Dn,-(a7)", where the offending instruction #
# was executed from supervisor mode, this handler must store the FP #
# register file values to the system stack by itself since #
# fmovm_dynamic() can't handle this. A normal exit is made through #
# fpsp_done(). #
# For "fmovm.l", fmovm_ctrl() is used to emulate the instruction. #
# Again, a Trace exception may be pending and an exit made through #
# _real_trace(). Else, a normal exit is made through _fpsp_done(). #
# #
# Before any of the above is attempted, it must be checked to #
# see if the FPU is disabled. Since the "Unimp <ea>" exception is taken #
# before the "FPU disabled" exception, but the "FPU disabled" exception #
# has higher priority, we check the disabled bit in the PCR. If set, #
# then we must create an 8 word "FPU disabled" exception stack frame #
# from the current 4 word exception stack frame. This includes #
# reproducing the effective address of the instruction to put on the #
# new stack frame. #
# #
# In the process of all emulation work, if a _mem_read() #
# "callout" returns a failing result indicating an access error, then #
# we must create an access error stack frame from the current stack #
# frame. This information includes a faulting address and a fault- #
# status-longword. These are created within this handler. #
# #
#########################################################################
global _fpsp_effadd
_fpsp_effadd:
# This exception type takes priority over the "Line F Emulator"
# exception. Therefore, the FPU could be disabled when entering here.
# So, we must check to see if it's disabled and handle that case separately.
mov.l %d0,-(%sp) # save d0
movc %pcr,%d0 # load proc cr
btst &0x1,%d0 # is FPU disabled?
bne.w iea_disabled # yes
mov.l (%sp)+,%d0 # restore d0
link %a6,&-LOCAL_SIZE # init stack frame
movm.l &0x0303,EXC_DREGS(%a6) # save d0-d1/a0-a1
fmovm.l %fpcr,%fpsr,%fpiar,USER_FPCR(%a6) # save ctrl regs
fmovm.x &0xc0,EXC_FPREGS(%a6) # save fp0-fp1 on stack
# PC of instruction that took the exception is the PC in the frame
mov.l EXC_PC(%a6),EXC_EXTWPTR(%a6)
mov.l EXC_EXTWPTR(%a6),%a0 # fetch instruction addr
addq.l &0x4,EXC_EXTWPTR(%a6) # incr instruction ptr
bsr.l _imem_read_long # fetch the instruction words
mov.l %d0,EXC_OPWORD(%a6) # store OPWORD and EXTWORD
#########################################################################
tst.w %d0 # is operation fmovem?
bmi.w iea_fmovm # yes
#
# here, we will have:
# fabs fdabs fsabs facos fmod
# fadd fdadd fsadd fasin frem
# fcmp fatan fscale
# fdiv fddiv fsdiv fatanh fsin
# fint fcos fsincos
# fintrz fcosh fsinh
# fmove fdmove fsmove fetox ftan
# fmul fdmul fsmul fetoxm1 ftanh
# fneg fdneg fsneg fgetexp ftentox
# fsgldiv fgetman ftwotox
# fsglmul flog10
# fsqrt flog2
# fsub fdsub fssub flogn
# ftst flognp1
# which can all use f<op>.{x,p}
# so, now it's immediate data extended precision AND PACKED FORMAT!
#
iea_op:
andi.l &0x00ff00ff,USER_FPSR(%a6)
btst &0xa,%d0 # is src fmt x or p?
bne.b iea_op_pack # packed
mov.l EXC_EXTWPTR(%a6),%a0 # pass: ptr to #<data>
lea FP_SRC(%a6),%a1 # pass: ptr to super addr
mov.l &0xc,%d0 # pass: 12 bytes
bsr.l _imem_read # read extended immediate
tst.l %d1 # did ifetch fail?
bne.w iea_iacc # yes
bra.b iea_op_setsrc
iea_op_pack:
mov.l EXC_EXTWPTR(%a6),%a0 # pass: ptr to #<data>
lea FP_SRC(%a6),%a1 # pass: ptr to super dst
mov.l &0xc,%d0 # pass: 12 bytes
bsr.l _imem_read # read packed operand
tst.l %d1 # did ifetch fail?
bne.w iea_iacc # yes
# The packed operand is an INF or a NAN if the exponent field is all ones.
bfextu FP_SRC(%a6){&1:&15},%d0 # get exp
cmpi.w %d0,&0x7fff # INF or NAN?
beq.b iea_op_setsrc # operand is an INF or NAN
# The packed operand is a zero if the mantissa is all zero, else it's
# a normal packed op.
mov.b 3+FP_SRC(%a6),%d0 # get byte 4
andi.b &0x0f,%d0 # clear all but last nybble
bne.b iea_op_gp_not_spec # not a zero
tst.l FP_SRC_HI(%a6) # is lw 2 zero?
bne.b iea_op_gp_not_spec # not a zero
tst.l FP_SRC_LO(%a6) # is lw 3 zero?
beq.b iea_op_setsrc # operand is a ZERO
iea_op_gp_not_spec:
lea FP_SRC(%a6),%a0 # pass: ptr to packed op
bsr.l decbin # convert to extended
fmovm.x &0x80,FP_SRC(%a6) # make this the srcop
iea_op_setsrc:
addi.l &0xc,EXC_EXTWPTR(%a6) # update extension word pointer
# FP_SRC now holds the src operand.
lea FP_SRC(%a6),%a0 # pass: ptr to src op
bsr.l set_tag_x # tag the operand type
mov.b %d0,STAG(%a6) # could be ANYTHING!!!
cmpi.b %d0,&UNNORM # is operand an UNNORM?
bne.b iea_op_getdst # no
bsr.l unnorm_fix # yes; convert to NORM/DENORM/ZERO
mov.b %d0,STAG(%a6) # set new optype tag
iea_op_getdst:
clr.b STORE_FLG(%a6) # clear "store result" boolean
btst &0x5,1+EXC_CMDREG(%a6) # is operation monadic or dyadic?
beq.b iea_op_extract # monadic
btst &0x4,1+EXC_CMDREG(%a6) # is operation fsincos,ftst,fcmp?
bne.b iea_op_spec # yes
iea_op_loaddst:
bfextu EXC_CMDREG(%a6){&6:&3},%d0 # fetch dst regno
bsr.l load_fpn2 # load dst operand
lea FP_DST(%a6),%a0 # pass: ptr to dst op
bsr.l set_tag_x # tag the operand type
mov.b %d0,DTAG(%a6) # could be ANYTHING!!!
cmpi.b %d0,&UNNORM # is operand an UNNORM?
bne.b iea_op_extract # no
bsr.l unnorm_fix # yes; convert to NORM/DENORM/ZERO
mov.b %d0,DTAG(%a6) # set new optype tag
bra.b iea_op_extract
# the operation is fsincos, ftst, or fcmp. only fcmp is dyadic
iea_op_spec:
btst &0x3,1+EXC_CMDREG(%a6) # is operation fsincos?
beq.b iea_op_extract # yes
# now, we're left with ftst and fcmp. so, first let's tag them so that they don't
# store a result. then, only fcmp will branch back and pick up a dst operand.
st STORE_FLG(%a6) # don't store a final result
btst &0x1,1+EXC_CMDREG(%a6) # is operation fcmp?
beq.b iea_op_loaddst # yes
iea_op_extract:
clr.l %d0
mov.b FPCR_MODE(%a6),%d0 # pass: rnd mode,prec
mov.b 1+EXC_CMDREG(%a6),%d1
andi.w &0x007f,%d1 # extract extension
fmov.l &0x0,%fpcr
fmov.l &0x0,%fpsr
lea FP_SRC(%a6),%a0
lea FP_DST(%a6),%a1
mov.l (tbl_unsupp.l,%pc,%d1.w*4),%d1 # fetch routine addr
jsr (tbl_unsupp.l,%pc,%d1.l*1)
#
# Exceptions in order of precedence:
# BSUN : none
# SNAN : all operations
# OPERR : all reg-reg or mem-reg operations that can normally operr
# OVFL : same as OPERR
# UNFL : same as OPERR
# DZ : same as OPERR
# INEX2 : same as OPERR
# INEX1 : all packed immediate operations
#
# we determine the highest priority exception(if any) set by the
# emulation routine that has also been enabled by the user.
mov.b FPCR_ENABLE(%a6),%d0 # fetch exceptions enabled
bne.b iea_op_ena # some are enabled
# now, we save the result, unless, of course, the operation was ftst or fcmp.
# these don't save results.
iea_op_save:
tst.b STORE_FLG(%a6) # does this op store a result?
bne.b iea_op_exit1 # exit with no frestore
iea_op_store:
bfextu EXC_CMDREG(%a6){&6:&3},%d0 # fetch dst regno
bsr.l store_fpreg # store the result
iea_op_exit1:
mov.l EXC_PC(%a6),USER_FPIAR(%a6) # set FPIAR to "Current PC"
mov.l EXC_EXTWPTR(%a6),EXC_PC(%a6) # set "Next PC" in exc frame
fmovm.x EXC_FPREGS(%a6),&0xc0 # restore fp0-fp1
fmovm.l USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
unlk %a6 # unravel the frame
btst &0x7,(%sp) # is trace on?
bne.w iea_op_trace # yes
bra.l _fpsp_done # exit to os
iea_op_ena:
and.b FPSR_EXCEPT(%a6),%d0 # keep only ones enable and set
bfffo %d0{&24:&8},%d0 # find highest priority exception
bne.b iea_op_exc # at least one was set
# no exception occurred. now, did a disabled, exact overflow occur with inexact
# enabled? if so, then we have to stuff an overflow frame into the FPU.
btst &ovfl_bit,FPSR_EXCEPT(%a6) # did overflow occur?
beq.b iea_op_save
iea_op_ovfl:
btst &inex2_bit,FPCR_ENABLE(%a6) # is inexact enabled?
beq.b iea_op_store # no
bra.b iea_op_exc_ovfl # yes
# an enabled exception occurred. we have to insert the exception type back into
# the machine.
iea_op_exc:
subi.l &24,%d0 # fix offset to be 0-8
cmpi.b %d0,&0x6 # is exception INEX?
bne.b iea_op_exc_force # no
# the enabled exception was inexact. so, if it occurs with an overflow
# or underflow that was disabled, then we have to force an overflow or
# underflow frame.
btst &ovfl_bit,FPSR_EXCEPT(%a6) # did overflow occur?
bne.b iea_op_exc_ovfl # yes
btst &unfl_bit,FPSR_EXCEPT(%a6) # did underflow occur?
bne.b iea_op_exc_unfl # yes
iea_op_exc_force:
mov.w (tbl_iea_except.b,%pc,%d0.w*2),2+FP_SRC(%a6)
bra.b iea_op_exit2 # exit with frestore
tbl_iea_except:
short 0xe002, 0xe006, 0xe004, 0xe005
short 0xe003, 0xe002, 0xe001, 0xe001
iea_op_exc_ovfl:
mov.w &0xe005,2+FP_SRC(%a6)
bra.b iea_op_exit2
iea_op_exc_unfl:
mov.w &0xe003,2+FP_SRC(%a6)
iea_op_exit2:
mov.l EXC_PC(%a6),USER_FPIAR(%a6) # set FPIAR to "Current PC"
mov.l EXC_EXTWPTR(%a6),EXC_PC(%a6) # set "Next PC" in exc frame
fmovm.x EXC_FPREGS(%a6),&0xc0 # restore fp0-fp1
fmovm.l USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
frestore FP_SRC(%a6) # restore exceptional state
unlk %a6 # unravel the frame
btst &0x7,(%sp) # is trace on?
bne.b iea_op_trace # yes
bra.l _fpsp_done # exit to os
#
# The opclass two instruction that took an "Unimplemented Effective Address"
# exception was being traced. Make the "current" PC the FPIAR and put it in
# the trace stack frame then jump to _real_trace().
#
# UNIMP EA FRAME TRACE FRAME
# ***************** *****************
# * 0x0 * 0x0f0 * * Current *
# ***************** * PC *
# * Current * *****************
# * PC * * 0x2 * 0x024 *
# ***************** *****************
# * SR * * Next *
# ***************** * PC *
# *****************
# * SR *
# *****************
iea_op_trace:
mov.l (%sp),-(%sp) # shift stack frame "down"
mov.w 0x8(%sp),0x4(%sp)
mov.w &0x2024,0x6(%sp) # stk fmt = 0x2; voff = 0x024
fmov.l %fpiar,0x8(%sp) # "Current PC" is in FPIAR
bra.l _real_trace
#########################################################################
iea_fmovm:
btst &14,%d0 # ctrl or data reg
beq.w iea_fmovm_ctrl
iea_fmovm_data:
btst &0x5,EXC_SR(%a6) # user or supervisor mode
bne.b iea_fmovm_data_s
iea_fmovm_data_u:
mov.l %usp,%a0
mov.l %a0,EXC_A7(%a6) # store current a7
bsr.l fmovm_dynamic # do dynamic fmovm
mov.l EXC_A7(%a6),%a0 # load possibly new a7
mov.l %a0,%usp # update usp
bra.w iea_fmovm_exit
iea_fmovm_data_s:
clr.b SPCOND_FLG(%a6)
lea 0x2+EXC_VOFF(%a6),%a0
mov.l %a0,EXC_A7(%a6)
bsr.l fmovm_dynamic # do dynamic fmovm
cmpi.b SPCOND_FLG(%a6),&mda7_flg
beq.w iea_fmovm_data_predec
cmpi.b SPCOND_FLG(%a6),&mia7_flg
bne.w iea_fmovm_exit
# right now, d0 = the size.
# the data has been fetched from the supervisor stack, but we have not
# incremented the stack pointer by the appropriate number of bytes.
# do it here.
iea_fmovm_data_postinc:
btst &0x7,EXC_SR(%a6)
bne.b iea_fmovm_data_pi_trace
mov.w EXC_SR(%a6),(EXC_SR,%a6,%d0)
mov.l EXC_EXTWPTR(%a6),(EXC_PC,%a6,%d0)
mov.w &0x00f0,(EXC_VOFF,%a6,%d0)
lea (EXC_SR,%a6,%d0),%a0
mov.l %a0,EXC_SR(%a6)
fmovm.x EXC_FP0(%a6),&0xc0 # restore fp0-fp1
fmovm.l USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
unlk %a6
mov.l (%sp)+,%sp
bra.l _fpsp_done
iea_fmovm_data_pi_trace:
mov.w EXC_SR(%a6),(EXC_SR-0x4,%a6,%d0)
mov.l EXC_EXTWPTR(%a6),(EXC_PC-0x4,%a6,%d0)
mov.w &0x2024,(EXC_VOFF-0x4,%a6,%d0)
mov.l EXC_PC(%a6),(EXC_VOFF+0x2-0x4,%a6,%d0)
lea (EXC_SR-0x4,%a6,%d0),%a0
mov.l %a0,EXC_SR(%a6)
fmovm.x EXC_FP0(%a6),&0xc0 # restore fp0-fp1
fmovm.l USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
unlk %a6
mov.l (%sp)+,%sp
bra.l _real_trace
# right now, d1 = size and d0 = the strg.
iea_fmovm_data_predec:
mov.b %d1,EXC_VOFF(%a6) # store strg
mov.b %d0,0x1+EXC_VOFF(%a6) # store size
fmovm.x EXC_FP0(%a6),&0xc0 # restore fp0-fp1
fmovm.l USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
mov.l (%a6),-(%sp) # make a copy of a6
mov.l %d0,-(%sp) # save d0
mov.l %d1,-(%sp) # save d1
mov.l EXC_EXTWPTR(%a6),-(%sp) # make a copy of Next PC
clr.l %d0
mov.b 0x1+EXC_VOFF(%a6),%d0 # fetch size
neg.l %d0 # get negative of size
btst &0x7,EXC_SR(%a6) # is trace enabled?
beq.b iea_fmovm_data_p2
mov.w EXC_SR(%a6),(EXC_SR-0x4,%a6,%d0)
mov.l EXC_PC(%a6),(EXC_VOFF-0x2,%a6,%d0)
mov.l (%sp)+,(EXC_PC-0x4,%a6,%d0)
mov.w &0x2024,(EXC_VOFF-0x4,%a6,%d0)
pea (%a6,%d0) # create final sp
bra.b iea_fmovm_data_p3
iea_fmovm_data_p2:
mov.w EXC_SR(%a6),(EXC_SR,%a6,%d0)
mov.l (%sp)+,(EXC_PC,%a6,%d0)
mov.w &0x00f0,(EXC_VOFF,%a6,%d0)
pea (0x4,%a6,%d0) # create final sp
iea_fmovm_data_p3:
clr.l %d1
mov.b EXC_VOFF(%a6),%d1 # fetch strg
tst.b %d1
bpl.b fm_1
fmovm.x &0x80,(0x4+0x8,%a6,%d0)
addi.l &0xc,%d0
fm_1:
lsl.b &0x1,%d1
bpl.b fm_2
fmovm.x &0x40,(0x4+0x8,%a6,%d0)
addi.l &0xc,%d0
fm_2:
lsl.b &0x1,%d1
bpl.b fm_3
fmovm.x &0x20,(0x4+0x8,%a6,%d0)
addi.l &0xc,%d0
fm_3:
lsl.b &0x1,%d1
bpl.b fm_4
fmovm.x &0x10,(0x4+0x8,%a6,%d0)
addi.l &0xc,%d0
fm_4:
lsl.b &0x1,%d1
bpl.b fm_5
fmovm.x &0x08,(0x4+0x8,%a6,%d0)
addi.l &0xc,%d0
fm_5:
lsl.b &0x1,%d1
bpl.b fm_6
fmovm.x &0x04,(0x4+0x8,%a6,%d0)
addi.l &0xc,%d0
fm_6:
lsl.b &0x1,%d1
bpl.b fm_7
fmovm.x &0x02,(0x4+0x8,%a6,%d0)
addi.l &0xc,%d0
fm_7:
lsl.b &0x1,%d1
bpl.b fm_end
fmovm.x &0x01,(0x4+0x8,%a6,%d0)
fm_end:
mov.l 0x4(%sp),%d1
mov.l 0x8(%sp),%d0
mov.l 0xc(%sp),%a6
mov.l (%sp)+,%sp
btst &0x7,(%sp) # is trace enabled?
beq.l _fpsp_done
bra.l _real_trace
#########################################################################
iea_fmovm_ctrl:
bsr.l fmovm_ctrl # load ctrl regs
iea_fmovm_exit:
fmovm.x EXC_FPREGS(%a6),&0xc0 # restore fp0-fp1
fmovm.l USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
btst &0x7,EXC_SR(%a6) # is trace on?
bne.b iea_fmovm_trace # yes
mov.l EXC_EXTWPTR(%a6),EXC_PC(%a6) # set Next PC
unlk %a6 # unravel the frame
bra.l _fpsp_done # exit to os
#
# The control reg instruction that took an "Unimplemented Effective Address"
# exception was being traced. The "Current PC" for the trace frame is the
# PC stacked for Unimp EA. The "Next PC" is in EXC_EXTWPTR.
# After fixing the stack frame, jump to _real_trace().
#
# UNIMP EA FRAME TRACE FRAME
# ***************** *****************
# * 0x0 * 0x0f0 * * Current *
# ***************** * PC *
# * Current * *****************
# * PC * * 0x2 * 0x024 *
# ***************** *****************
# * SR * * Next *
# ***************** * PC *
# *****************
# * SR *
# *****************
# this ain't a pretty solution, but it works:
# -restore a6 (not with unlk)
# -shift stack frame down over where old a6 used to be
# -add LOCAL_SIZE to stack pointer
iea_fmovm_trace:
mov.l (%a6),%a6 # restore frame pointer
mov.w EXC_SR+LOCAL_SIZE(%sp),0x0+LOCAL_SIZE(%sp)
mov.l EXC_PC+LOCAL_SIZE(%sp),0x8+LOCAL_SIZE(%sp)
mov.l EXC_EXTWPTR+LOCAL_SIZE(%sp),0x2+LOCAL_SIZE(%sp)
mov.w &0x2024,0x6+LOCAL_SIZE(%sp) # stk fmt = 0x2; voff = 0x024
add.l &LOCAL_SIZE,%sp # clear stack frame
bra.l _real_trace
#########################################################################
# The FPU is disabled and so we should really have taken the "Line
# F Emulator" exception. So, here we create an 8-word stack frame
# from our 4-word stack frame. This means we must calculate the length
# the faulting instruction to get the "next PC". This is trivial for
# immediate operands but requires some extra work for fmovm dynamic
# which can use most addressing modes.
iea_disabled:
mov.l (%sp)+,%d0 # restore d0
link %a6,&-LOCAL_SIZE # init stack frame
movm.l &0x0303,EXC_DREGS(%a6) # save d0-d1/a0-a1
# PC of instruction that took the exception is the PC in the frame
mov.l EXC_PC(%a6),EXC_EXTWPTR(%a6)
mov.l EXC_EXTWPTR(%a6),%a0 # fetch instruction addr
addq.l &0x4,EXC_EXTWPTR(%a6) # incr instruction ptr
bsr.l _imem_read_long # fetch the instruction words
mov.l %d0,EXC_OPWORD(%a6) # store OPWORD and EXTWORD
tst.w %d0 # is instr fmovm?
bmi.b iea_dis_fmovm # yes
# instruction is using an extended precision immediate operand. Therefore,
# the total instruction length is 16 bytes.
iea_dis_immed:
mov.l &0x10,%d0 # 16 bytes of instruction
bra.b iea_dis_cont
iea_dis_fmovm:
btst &0xe,%d0 # is instr fmovm ctrl
bne.b iea_dis_fmovm_data # no
# the instruction is a fmovm.l with 2 or 3 registers.
bfextu %d0{&19:&3},%d1
mov.l &0xc,%d0
cmpi.b %d1,&0x7 # move all regs?
bne.b iea_dis_cont
addq.l &0x4,%d0
bra.b iea_dis_cont
# the instruction is an fmovm.x dynamic which can use many addressing
# modes and thus can have several different total instruction lengths.
# call fmovm_calc_ea which will go through the ea calc process and,
# as a by-product, will tell us how long the instruction is.
iea_dis_fmovm_data:
clr.l %d0
bsr.l fmovm_calc_ea
mov.l EXC_EXTWPTR(%a6),%d0
sub.l EXC_PC(%a6),%d0
iea_dis_cont:
mov.w %d0,EXC_VOFF(%a6) # store stack shift value
movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
unlk %a6
# here, we actually create the 8-word frame from the 4-word frame,
# with the "next PC" as additional info.
# the <ea> field is let as undefined.
subq.l &0x8,%sp # make room for new stack
mov.l %d0,-(%sp) # save d0
mov.w 0xc(%sp),0x4(%sp) # move SR
mov.l 0xe(%sp),0x6(%sp) # move Current PC
clr.l %d0
mov.w 0x12(%sp),%d0
mov.l 0x6(%sp),0x10(%sp) # move Current PC
add.l %d0,0x6(%sp) # make Next PC
mov.w &0x402c,0xa(%sp) # insert offset,frame format
mov.l (%sp)+,%d0 # restore d0
bra.l _real_fpu_disabled
##########
iea_iacc:
movc %pcr,%d0
btst &0x1,%d0
bne.b iea_iacc_cont
fmovm.l USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
fmovm.x EXC_FPREGS(%a6),&0xc0 # restore fp0-fp1 on stack
iea_iacc_cont:
movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
unlk %a6
subq.w &0x8,%sp # make stack frame bigger
mov.l 0x8(%sp),(%sp) # store SR,hi(PC)
mov.w 0xc(%sp),0x4(%sp) # store lo(PC)
mov.w &0x4008,0x6(%sp) # store voff
mov.l 0x2(%sp),0x8(%sp) # store ea
mov.l &0x09428001,0xc(%sp) # store fslw
iea_acc_done:
btst &0x5,(%sp) # user or supervisor mode?
beq.b iea_acc_done2 # user
bset &0x2,0xd(%sp) # set supervisor TM bit
iea_acc_done2:
bra.l _real_access
iea_dacc:
lea -LOCAL_SIZE(%a6),%sp
movc %pcr,%d1
btst &0x1,%d1
bne.b iea_dacc_cont
fmovm.x EXC_FPREGS(%a6),&0xc0 # restore fp0-fp1 on stack
fmovm.l LOCAL_SIZE+USER_FPCR(%sp),%fpcr,%fpsr,%fpiar # restore ctrl regs
iea_dacc_cont:
mov.l (%a6),%a6
mov.l 0x4+LOCAL_SIZE(%sp),-0x8+0x4+LOCAL_SIZE(%sp)
mov.w 0x8+LOCAL_SIZE(%sp),-0x8+0x8+LOCAL_SIZE(%sp)
mov.w &0x4008,-0x8+0xa+LOCAL_SIZE(%sp)
mov.l %a0,-0x8+0xc+LOCAL_SIZE(%sp)
mov.w %d0,-0x8+0x10+LOCAL_SIZE(%sp)
mov.w &0x0001,-0x8+0x12+LOCAL_SIZE(%sp)
movm.l LOCAL_SIZE+EXC_DREGS(%sp),&0x0303 # restore d0-d1/a0-a1
add.w &LOCAL_SIZE-0x4,%sp
bra.b iea_acc_done
#########################################################################
# XDEF **************************************************************** #
# _fpsp_operr(): 060FPSP entry point for FP Operr exception. #
# #
# This handler should be the first code executed upon taking the #
# FP Operand Error exception in an operating system. #
# #
# XREF **************************************************************** #
# _imem_read_long() - read instruction longword #
# fix_skewed_ops() - adjust src operand in fsave frame #
# _real_operr() - "callout" to operating system operr handler #
# _dmem_write_{byte,word,long}() - store data to mem (opclass 3) #
# store_dreg_{b,w,l}() - store data to data regfile (opclass 3) #
# facc_out_{b,w,l}() - store to memory took access error (opcl 3) #
# #
# INPUT *************************************************************** #
# - The system stack contains the FP Operr exception frame #
# - The fsave frame contains the source operand #
# #
# OUTPUT ************************************************************** #
# No access error: #
# - The system stack is unchanged #
# - The fsave frame contains the adjusted src op for opclass 0,2 #
# #
# ALGORITHM *********************************************************** #
# In a system where the FP Operr exception is enabled, the goal #
# is to get to the handler specified at _real_operr(). But, on the 060, #
# for opclass zero and two instruction taking this exception, the #
# input operand in the fsave frame may be incorrect for some cases #
# and needs to be corrected. This handler calls fix_skewed_ops() to #
# do just this and then exits through _real_operr(). #
# For opclass 3 instructions, the 060 doesn't store the default #
# operr result out to memory or data register file as it should. #
# This code must emulate the move out before finally exiting through #
# _real_inex(). The move out, if to memory, is performed using #
# _mem_write() "callout" routines that may return a failing result. #
# In this special case, the handler must exit through facc_out() #
# which creates an access error stack frame from the current operr #
# stack frame. #
# #
#########################################################################
global _fpsp_operr
_fpsp_operr:
link.w %a6,&-LOCAL_SIZE # init stack frame
fsave FP_SRC(%a6) # grab the "busy" frame
movm.l &0x0303,EXC_DREGS(%a6) # save d0-d1/a0-a1
fmovm.l %fpcr,%fpsr,%fpiar,USER_FPCR(%a6) # save ctrl regs
fmovm.x &0xc0,EXC_FPREGS(%a6) # save fp0-fp1 on stack
# the FPIAR holds the "current PC" of the faulting instruction
mov.l USER_FPIAR(%a6),EXC_EXTWPTR(%a6)
mov.l EXC_EXTWPTR(%a6),%a0 # fetch instruction addr
addq.l &0x4,EXC_EXTWPTR(%a6) # incr instruction ptr
bsr.l _imem_read_long # fetch the instruction words
mov.l %d0,EXC_OPWORD(%a6)
##############################################################################
btst &13,%d0 # is instr an fmove out?
bne.b foperr_out # fmove out
# here, we simply see if the operand in the fsave frame needs to be "unskewed".
# this would be the case for opclass two operations with a source infinity or
# denorm operand in the sgl or dbl format. NANs also become skewed, but can't
# cause an operr so we don't need to check for them here.
lea FP_SRC(%a6),%a0 # pass: ptr to src op
bsr.l fix_skewed_ops # fix src op
foperr_exit:
fmovm.x EXC_FPREGS(%a6),&0xc0 # restore fp0-fp1
fmovm.l USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
frestore FP_SRC(%a6)
unlk %a6
bra.l _real_operr
########################################################################
#
# the hardware does not save the default result to memory on enabled
# operand error exceptions. we do this here before passing control to
# the user operand error handler.
#
# byte, word, and long destination format operations can pass
# through here. we simply need to test the sign of the src
# operand and save the appropriate minimum or maximum integer value
# to the effective address as pointed to by the stacked effective address.
#
# although packed opclass three operations can take operand error
# exceptions, they won't pass through here since they are caught
# first by the unsupported data format exception handler. that handler
# sends them directly to _real_operr() if necessary.
#
foperr_out:
mov.w FP_SRC_EX(%a6),%d1 # fetch exponent
andi.w &0x7fff,%d1
cmpi.w %d1,&0x7fff
bne.b foperr_out_not_qnan
# the operand is either an infinity or a QNAN.
tst.l FP_SRC_LO(%a6)
bne.b foperr_out_qnan
mov.l FP_SRC_HI(%a6),%d1
andi.l &0x7fffffff,%d1
beq.b foperr_out_not_qnan
foperr_out_qnan:
mov.l FP_SRC_HI(%a6),L_SCR1(%a6)
bra.b foperr_out_jmp
foperr_out_not_qnan:
mov.l &0x7fffffff,%d1
tst.b FP_SRC_EX(%a6)
bpl.b foperr_out_not_qnan2
addq.l &0x1,%d1
foperr_out_not_qnan2:
mov.l %d1,L_SCR1(%a6)
foperr_out_jmp:
bfextu %d0{&19:&3},%d0 # extract dst format field
mov.b 1+EXC_OPWORD(%a6),%d1 # extract <ea> mode,reg
mov.w (tbl_operr.b,%pc,%d0.w*2),%a0
jmp (tbl_operr.b,%pc,%a0)
tbl_operr:
short foperr_out_l - tbl_operr # long word integer
short tbl_operr - tbl_operr # sgl prec shouldn't happen
short tbl_operr - tbl_operr # ext prec shouldn't happen
short foperr_exit - tbl_operr # packed won't enter here
short foperr_out_w - tbl_operr # word integer
short tbl_operr - tbl_operr # dbl prec shouldn't happen
short foperr_out_b - tbl_operr # byte integer
short tbl_operr - tbl_operr # packed won't enter here
foperr_out_b:
mov.b L_SCR1(%a6),%d0 # load positive default result
cmpi.b %d1,&0x7 # is <ea> mode a data reg?
ble.b foperr_out_b_save_dn # yes
mov.l EXC_EA(%a6),%a0 # pass: <ea> of default result
bsr.l _dmem_write_byte # write the default result
tst.l %d1 # did dstore fail?
bne.l facc_out_b # yes
bra.w foperr_exit
foperr_out_b_save_dn:
andi.w &0x0007,%d1
bsr.l store_dreg_b # store result to regfile
bra.w foperr_exit
foperr_out_w:
mov.w L_SCR1(%a6),%d0 # load positive default result
cmpi.b %d1,&0x7 # is <ea> mode a data reg?
ble.b foperr_out_w_save_dn # yes
mov.l EXC_EA(%a6),%a0 # pass: <ea> of default result
bsr.l _dmem_write_word # write the default result
tst.l %d1 # did dstore fail?
bne.l facc_out_w # yes
bra.w foperr_exit
foperr_out_w_save_dn:
andi.w &0x0007,%d1
bsr.l store_dreg_w # store result to regfile
bra.w foperr_exit
foperr_out_l:
mov.l L_SCR1(%a6),%d0 # load positive default result
cmpi.b %d1,&0x7 # is <ea> mode a data reg?
ble.b foperr_out_l_save_dn # yes
mov.l EXC_EA(%a6),%a0 # pass: <ea> of default result
bsr.l _dmem_write_long # write the default result
tst.l %d1 # did dstore fail?
bne.l facc_out_l # yes
bra.w foperr_exit
foperr_out_l_save_dn:
andi.w &0x0007,%d1
bsr.l store_dreg_l # store result to regfile
bra.w foperr_exit
#########################################################################
# XDEF **************************************************************** #
# _fpsp_snan(): 060FPSP entry point for FP SNAN exception. #
# #
# This handler should be the first code executed upon taking the #
# FP Signalling NAN exception in an operating system. #
# #
# XREF **************************************************************** #
# _imem_read_long() - read instruction longword #
# fix_skewed_ops() - adjust src operand in fsave frame #
# _real_snan() - "callout" to operating system SNAN handler #
# _dmem_write_{byte,word,long}() - store data to mem (opclass 3) #
# store_dreg_{b,w,l}() - store data to data regfile (opclass 3) #
# facc_out_{b,w,l,d,x}() - store to mem took acc error (opcl 3) #
# _calc_ea_fout() - fix An if <ea> is -() or ()+; also get <ea> #
# #
# INPUT *************************************************************** #
# - The system stack contains the FP SNAN exception frame #
# - The fsave frame contains the source operand #
# #
# OUTPUT ************************************************************** #
# No access error: #
# - The system stack is unchanged #
# - The fsave frame contains the adjusted src op for opclass 0,2 #
# #
# ALGORITHM *********************************************************** #
# In a system where the FP SNAN exception is enabled, the goal #
# is to get to the handler specified at _real_snan(). But, on the 060, #
# for opclass zero and two instructions taking this exception, the #
# input operand in the fsave frame may be incorrect for some cases #
# and needs to be corrected. This handler calls fix_skewed_ops() to #
# do just this and then exits through _real_snan(). #
# For opclass 3 instructions, the 060 doesn't store the default #
# SNAN result out to memory or data register file as it should. #
# This code must emulate the move out before finally exiting through #
# _real_snan(). The move out, if to memory, is performed using #
# _mem_write() "callout" routines that may return a failing result. #
# In this special case, the handler must exit through facc_out() #
# which creates an access error stack frame from the current SNAN #
# stack frame. #
# For the case of an extended precision opclass 3 instruction, #
# if the effective addressing mode was -() or ()+, then the address #
# register must get updated by calling _calc_ea_fout(). If the <ea> #
# was -(a7) from supervisor mode, then the exception frame currently #
# on the system stack must be carefully moved "down" to make room #
# for the operand being moved. #
# #
#########################################################################
global _fpsp_snan
_fpsp_snan:
link.w %a6,&-LOCAL_SIZE # init stack frame
fsave FP_SRC(%a6) # grab the "busy" frame
movm.l &0x0303,EXC_DREGS(%a6) # save d0-d1/a0-a1
fmovm.l %fpcr,%fpsr,%fpiar,USER_FPCR(%a6) # save ctrl regs
fmovm.x &0xc0,EXC_FPREGS(%a6) # save fp0-fp1 on stack
# the FPIAR holds the "current PC" of the faulting instruction
mov.l USER_FPIAR(%a6),EXC_EXTWPTR(%a6)
mov.l EXC_EXTWPTR(%a6),%a0 # fetch instruction addr
addq.l &0x4,EXC_EXTWPTR(%a6) # incr instruction ptr
bsr.l _imem_read_long # fetch the instruction words
mov.l %d0,EXC_OPWORD(%a6)
##############################################################################
btst &13,%d0 # is instr an fmove out?
bne.w fsnan_out # fmove out
# here, we simply see if the operand in the fsave frame needs to be "unskewed".
# this would be the case for opclass two operations with a source infinity or
# denorm operand in the sgl or dbl format. NANs also become skewed and must be
# fixed here.
lea FP_SRC(%a6),%a0 # pass: ptr to src op
bsr.l fix_skewed_ops # fix src op
fsnan_exit:
fmovm.x EXC_FPREGS(%a6),&0xc0 # restore fp0-fp1
fmovm.l USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
frestore FP_SRC(%a6)
unlk %a6
bra.l _real_snan
########################################################################
#
# the hardware does not save the default result to memory on enabled
# snan exceptions. we do this here before passing control to
# the user snan handler.
#
# byte, word, long, and packed destination format operations can pass
# through here. since packed format operations already were handled by
# fpsp_unsupp(), then we need to do nothing else for them here.
# for byte, word, and long, we simply need to test the sign of the src
# operand and save the appropriate minimum or maximum integer value
# to the effective address as pointed to by the stacked effective address.
#
fsnan_out:
bfextu %d0{&19:&3},%d0 # extract dst format field
mov.b 1+EXC_OPWORD(%a6),%d1 # extract <ea> mode,reg
mov.w (tbl_snan.b,%pc,%d0.w*2),%a0
jmp (tbl_snan.b,%pc,%a0)
tbl_snan:
short fsnan_out_l - tbl_snan # long word integer
short fsnan_out_s - tbl_snan # sgl prec shouldn't happen
short fsnan_out_x - tbl_snan # ext prec shouldn't happen
short tbl_snan - tbl_snan # packed needs no help
short fsnan_out_w - tbl_snan # word integer
short fsnan_out_d - tbl_snan # dbl prec shouldn't happen
short fsnan_out_b - tbl_snan # byte integer
short tbl_snan - tbl_snan # packed needs no help
fsnan_out_b:
mov.b FP_SRC_HI(%a6),%d0 # load upper byte of SNAN
bset &6,%d0 # set SNAN bit
cmpi.b %d1,&0x7 # is <ea> mode a data reg?
ble.b fsnan_out_b_dn # yes
mov.l EXC_EA(%a6),%a0 # pass: <ea> of default result
bsr.l _dmem_write_byte # write the default result
tst.l %d1 # did dstore fail?
bne.l facc_out_b # yes
bra.w fsnan_exit
fsnan_out_b_dn:
andi.w &0x0007,%d1
bsr.l store_dreg_b # store result to regfile
bra.w fsnan_exit
fsnan_out_w:
mov.w FP_SRC_HI(%a6),%d0 # load upper word of SNAN
bset &14,%d0 # set SNAN bit
cmpi.b %d1,&0x7 # is <ea> mode a data reg?
ble.b fsnan_out_w_dn # yes
mov.l EXC_EA(%a6),%a0 # pass: <ea> of default result
bsr.l _dmem_write_word # write the default result
tst.l %d1 # did dstore fail?
bne.l facc_out_w # yes
bra.w fsnan_exit
fsnan_out_w_dn:
andi.w &0x0007,%d1
bsr.l store_dreg_w # store result to regfile
bra.w fsnan_exit
fsnan_out_l:
mov.l FP_SRC_HI(%a6),%d0 # load upper longword of SNAN
bset &30,%d0 # set SNAN bit
cmpi.b %d1,&0x7 # is <ea> mode a data reg?
ble.b fsnan_out_l_dn # yes
mov.l EXC_EA(%a6),%a0 # pass: <ea> of default result
bsr.l _dmem_write_long # write the default result
tst.l %d1 # did dstore fail?
bne.l facc_out_l # yes
bra.w fsnan_exit
fsnan_out_l_dn:
andi.w &0x0007,%d1
bsr.l store_dreg_l # store result to regfile
bra.w fsnan_exit
fsnan_out_s:
cmpi.b %d1,&0x7 # is <ea> mode a data reg?
ble.b fsnan_out_d_dn # yes
mov.l FP_SRC_EX(%a6),%d0 # fetch SNAN sign
andi.l &0x80000000,%d0 # keep sign
ori.l &0x7fc00000,%d0 # insert new exponent,SNAN bit
mov.l FP_SRC_HI(%a6),%d1 # load mantissa
lsr.l &0x8,%d1 # shift mantissa for sgl
or.l %d1,%d0 # create sgl SNAN
mov.l EXC_EA(%a6),%a0 # pass: <ea> of default result
bsr.l _dmem_write_long # write the default result
tst.l %d1 # did dstore fail?
bne.l facc_out_l # yes
bra.w fsnan_exit
fsnan_out_d_dn:
mov.l FP_SRC_EX(%a6),%d0 # fetch SNAN sign
andi.l &0x80000000,%d0 # keep sign
ori.l &0x7fc00000,%d0 # insert new exponent,SNAN bit
mov.l %d1,-(%sp)
mov.l FP_SRC_HI(%a6),%d1 # load mantissa
lsr.l &0x8,%d1 # shift mantissa for sgl
or.l %d1,%d0 # create sgl SNAN
mov.l (%sp)+,%d1
andi.w &0x0007,%d1
bsr.l store_dreg_l # store result to regfile
bra.w fsnan_exit
fsnan_out_d:
mov.l FP_SRC_EX(%a6),%d0 # fetch SNAN sign
andi.l &0x80000000,%d0 # keep sign
ori.l &0x7ff80000,%d0 # insert new exponent,SNAN bit
mov.l FP_SRC_HI(%a6),%d1 # load hi mantissa
mov.l %d0,FP_SCR0_EX(%a6) # store to temp space
mov.l &11,%d0 # load shift amt
lsr.l %d0,%d1
or.l %d1,FP_SCR0_EX(%a6) # create dbl hi
mov.l FP_SRC_HI(%a6),%d1 # load hi mantissa
andi.l &0x000007ff,%d1
ror.l %d0,%d1
mov.l %d1,FP_SCR0_HI(%a6) # store to temp space
mov.l FP_SRC_LO(%a6),%d1 # load lo mantissa
lsr.l %d0,%d1
or.l %d1,FP_SCR0_HI(%a6) # create dbl lo
lea FP_SCR0(%a6),%a0 # pass: ptr to operand
mov.l EXC_EA(%a6),%a1 # pass: dst addr
movq.l &0x8,%d0 # pass: size of 8 bytes
bsr.l _dmem_write # write the default result
tst.l %d1 # did dstore fail?
bne.l facc_out_d # yes
bra.w fsnan_exit
# for extended precision, if the addressing mode is pre-decrement or
# post-increment, then the address register did not get updated.
# in addition, for pre-decrement, the stacked <ea> is incorrect.
fsnan_out_x:
clr.b SPCOND_FLG(%a6) # clear special case flag
mov.w FP_SRC_EX(%a6),FP_SCR0_EX(%a6)
clr.w 2+FP_SCR0(%a6)
mov.l FP_SRC_HI(%a6),%d0
bset &30,%d0
mov.l %d0,FP_SCR0_HI(%a6)
mov.l FP_SRC_LO(%a6),FP_SCR0_LO(%a6)
btst &0x5,EXC_SR(%a6) # supervisor mode exception?
bne.b fsnan_out_x_s # yes
mov.l %usp,%a0 # fetch user stack pointer
mov.l %a0,EXC_A7(%a6) # save on stack for calc_ea()
mov.l (%a6),EXC_A6(%a6)
bsr.l _calc_ea_fout # find the correct ea,update An
mov.l %a0,%a1
mov.l %a0,EXC_EA(%a6) # stack correct <ea>
mov.l EXC_A7(%a6),%a0
mov.l %a0,%usp # restore user stack pointer
mov.l EXC_A6(%a6),(%a6)
fsnan_out_x_save:
lea FP_SCR0(%a6),%a0 # pass: ptr to operand
movq.l &0xc,%d0 # pass: size of extended
bsr.l _dmem_write # write the default result
tst.l %d1 # did dstore fail?
bne.l facc_out_x # yes
bra.w fsnan_exit
fsnan_out_x_s:
mov.l (%a6),EXC_A6(%a6)
bsr.l _calc_ea_fout # find the correct ea,update An
mov.l %a0,%a1
mov.l %a0,EXC_EA(%a6) # stack correct <ea>
mov.l EXC_A6(%a6),(%a6)
cmpi.b SPCOND_FLG(%a6),&mda7_flg # is <ea> mode -(a7)?
bne.b fsnan_out_x_save # no
# the operation was "fmove.x SNAN,-(a7)" from supervisor mode.
fmovm.x EXC_FPREGS(%a6),&0xc0 # restore fp0-fp1
fmovm.l USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
frestore FP_SRC(%a6)
mov.l EXC_A6(%a6),%a6 # restore frame pointer
mov.l LOCAL_SIZE+EXC_SR(%sp),LOCAL_SIZE+EXC_SR-0xc(%sp)
mov.l LOCAL_SIZE+EXC_PC+0x2(%sp),LOCAL_SIZE+EXC_PC+0x2-0xc(%sp)
mov.l LOCAL_SIZE+EXC_EA(%sp),LOCAL_SIZE+EXC_EA-0xc(%sp)
mov.l LOCAL_SIZE+FP_SCR0_EX(%sp),LOCAL_SIZE+EXC_SR(%sp)
mov.l LOCAL_SIZE+FP_SCR0_HI(%sp),LOCAL_SIZE+EXC_PC+0x2(%sp)
mov.l LOCAL_SIZE+FP_SCR0_LO(%sp),LOCAL_SIZE+EXC_EA(%sp)
add.l &LOCAL_SIZE-0x8,%sp
bra.l _real_snan
#########################################################################
# XDEF **************************************************************** #
# _fpsp_inex(): 060FPSP entry point for FP Inexact exception. #
# #
# This handler should be the first code executed upon taking the #
# FP Inexact exception in an operating system. #
# #
# XREF **************************************************************** #
# _imem_read_long() - read instruction longword #
# fix_skewed_ops() - adjust src operand in fsave frame #
# set_tag_x() - determine optype of src/dst operands #
# store_fpreg() - store opclass 0 or 2 result to FP regfile #
# unnorm_fix() - change UNNORM operands to NORM or ZERO #
# load_fpn2() - load dst operand from FP regfile #
# smovcr() - emulate an "fmovcr" instruction #
# fout() - emulate an opclass 3 instruction #
# tbl_unsupp - add of table of emulation routines for opclass 0,2 #
# _real_inex() - "callout" to operating system inexact handler #
# #
# INPUT *************************************************************** #
# - The system stack contains the FP Inexact exception frame #
# - The fsave frame contains the source operand #
# #
# OUTPUT ************************************************************** #
# - The system stack is unchanged #
# - The fsave frame contains the adjusted src op for opclass 0,2 #
# #
# ALGORITHM *********************************************************** #
# In a system where the FP Inexact exception is enabled, the goal #
# is to get to the handler specified at _real_inex(). But, on the 060, #
# for opclass zero and two instruction taking this exception, the #
# hardware doesn't store the correct result to the destination FP #
# register as did the '040 and '881/2. This handler must emulate the #
# instruction in order to get this value and then store it to the #
# correct register before calling _real_inex(). #
# For opclass 3 instructions, the 060 doesn't store the default #
# inexact result out to memory or data register file as it should. #
# This code must emulate the move out by calling fout() before finally #
# exiting through _real_inex(). #
# #
#########################################################################
global _fpsp_inex
_fpsp_inex:
link.w %a6,&-LOCAL_SIZE # init stack frame
fsave FP_SRC(%a6) # grab the "busy" frame
movm.l &0x0303,EXC_DREGS(%a6) # save d0-d1/a0-a1
fmovm.l %fpcr,%fpsr,%fpiar,USER_FPCR(%a6) # save ctrl regs
fmovm.x &0xc0,EXC_FPREGS(%a6) # save fp0-fp1 on stack
# the FPIAR holds the "current PC" of the faulting instruction
mov.l USER_FPIAR(%a6),EXC_EXTWPTR(%a6)
mov.l EXC_EXTWPTR(%a6),%a0 # fetch instruction addr
addq.l &0x4,EXC_EXTWPTR(%a6) # incr instruction ptr
bsr.l _imem_read_long # fetch the instruction words
mov.l %d0,EXC_OPWORD(%a6)
##############################################################################
btst &13,%d0 # is instr an fmove out?
bne.w finex_out # fmove out
# the hardware, for "fabs" and "fneg" w/ a long source format, puts the
# longword integer directly into the upper longword of the mantissa along
# w/ an exponent value of 0x401e. we convert this to extended precision here.
bfextu %d0{&19:&3},%d0 # fetch instr size
bne.b finex_cont # instr size is not long
cmpi.w FP_SRC_EX(%a6),&0x401e # is exponent 0x401e?
bne.b finex_cont # no
fmov.l &0x0,%fpcr
fmov.l FP_SRC_HI(%a6),%fp0 # load integer src
fmov.x %fp0,FP_SRC(%a6) # store integer as extended precision
mov.w &0xe001,0x2+FP_SRC(%a6)
finex_cont:
lea FP_SRC(%a6),%a0 # pass: ptr to src op
bsr.l fix_skewed_ops # fix src op
# Here, we zero the ccode and exception byte field since we're going to
# emulate the whole instruction. Notice, though, that we don't kill the
# INEX1 bit. This is because a packed op has long since been converted
# to extended before arriving here. Therefore, we need to retain the
# INEX1 bit from when the operand was first converted.
andi.l &0x00ff01ff,USER_FPSR(%a6) # zero all but accured field
fmov.l &0x0,%fpcr # zero current control regs
fmov.l &0x0,%fpsr
bfextu EXC_EXTWORD(%a6){&0:&6},%d1 # extract upper 6 of cmdreg
cmpi.b %d1,&0x17 # is op an fmovecr?
beq.w finex_fmovcr # yes
lea FP_SRC(%a6),%a0 # pass: ptr to src op
bsr.l set_tag_x # tag the operand type
mov.b %d0,STAG(%a6) # maybe NORM,DENORM
# bits four and five of the fp extension word separate the monadic and dyadic
# operations that can pass through fpsp_inex(). remember that fcmp and ftst
# will never take this exception, but fsincos will.
btst &0x5,1+EXC_CMDREG(%a6) # is operation monadic or dyadic?
beq.b finex_extract # monadic
btst &0x4,1+EXC_CMDREG(%a6) # is operation an fsincos?
bne.b finex_extract # yes
bfextu EXC_CMDREG(%a6){&6:&3},%d0 # dyadic; load dst reg
bsr.l load_fpn2 # load dst into FP_DST
lea FP_DST(%a6),%a0 # pass: ptr to dst op
bsr.l set_tag_x # tag the operand type
cmpi.b %d0,&UNNORM # is operand an UNNORM?
bne.b finex_op2_done # no
bsr.l unnorm_fix # yes; convert to NORM,DENORM,or ZERO
finex_op2_done:
mov.b %d0,DTAG(%a6) # save dst optype tag
finex_extract:
clr.l %d0
mov.b FPCR_MODE(%a6),%d0 # pass rnd prec/mode
mov.b 1+EXC_CMDREG(%a6),%d1
andi.w &0x007f,%d1 # extract extension
lea FP_SRC(%a6),%a0
lea FP_DST(%a6),%a1
mov.l (tbl_unsupp.l,%pc,%d1.w*4),%d1 # fetch routine addr
jsr (tbl_unsupp.l,%pc,%d1.l*1)
# the operation has been emulated. the result is in fp0.
finex_save:
bfextu EXC_CMDREG(%a6){&6:&3},%d0
bsr.l store_fpreg
finex_exit:
fmovm.x EXC_FPREGS(%a6),&0xc0 # restore fp0-fp1
fmovm.l USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
frestore FP_SRC(%a6)
unlk %a6
bra.l _real_inex
finex_fmovcr:
clr.l %d0
mov.b FPCR_MODE(%a6),%d0 # pass rnd prec,mode
mov.b 1+EXC_CMDREG(%a6),%d1
andi.l &0x0000007f,%d1 # pass rom offset
bsr.l smovcr
bra.b finex_save
########################################################################
#
# the hardware does not save the default result to memory on enabled
# inexact exceptions. we do this here before passing control to
# the user inexact handler.
#
# byte, word, and long destination format operations can pass
# through here. so can double and single precision.
# although packed opclass three operations can take inexact
# exceptions, they won't pass through here since they are caught
# first by the unsupported data format exception handler. that handler
# sends them directly to _real_inex() if necessary.
#
finex_out:
mov.b &NORM,STAG(%a6) # src is a NORM
clr.l %d0
mov.b FPCR_MODE(%a6),%d0 # pass rnd prec,mode
andi.l &0xffff00ff,USER_FPSR(%a6) # zero exception field
lea FP_SRC(%a6),%a0 # pass ptr to src operand
bsr.l fout # store the default result
bra.b finex_exit
#########################################################################
# XDEF **************************************************************** #
# _fpsp_dz(): 060FPSP entry point for FP DZ exception. #
# #
# This handler should be the first code executed upon taking #
# the FP DZ exception in an operating system. #
# #
# XREF **************************************************************** #
# _imem_read_long() - read instruction longword from memory #
# fix_skewed_ops() - adjust fsave operand #
# _real_dz() - "callout" exit point from FP DZ handler #
# #
# INPUT *************************************************************** #
# - The system stack contains the FP DZ exception stack. #
# - The fsave frame contains the source operand. #
# #
# OUTPUT ************************************************************** #
# - The system stack contains the FP DZ exception stack. #
# - The fsave frame contains the adjusted source operand. #
# #
# ALGORITHM *********************************************************** #
# In a system where the DZ exception is enabled, the goal is to #
# get to the handler specified at _real_dz(). But, on the 060, when the #
# exception is taken, the input operand in the fsave state frame may #
# be incorrect for some cases and need to be adjusted. So, this package #
# adjusts the operand using fix_skewed_ops() and then branches to #
# _real_dz(). #
# #
#########################################################################
global _fpsp_dz
_fpsp_dz:
link.w %a6,&-LOCAL_SIZE # init stack frame
fsave FP_SRC(%a6) # grab the "busy" frame
movm.l &0x0303,EXC_DREGS(%a6) # save d0-d1/a0-a1
fmovm.l %fpcr,%fpsr,%fpiar,USER_FPCR(%a6) # save ctrl regs
fmovm.x &0xc0,EXC_FPREGS(%a6) # save fp0-fp1 on stack
# the FPIAR holds the "current PC" of the faulting instruction
mov.l USER_FPIAR(%a6),EXC_EXTWPTR(%a6)
mov.l EXC_EXTWPTR(%a6),%a0 # fetch instruction addr
addq.l &0x4,EXC_EXTWPTR(%a6) # incr instruction ptr
bsr.l _imem_read_long # fetch the instruction words
mov.l %d0,EXC_OPWORD(%a6)
##############################################################################
# here, we simply see if the operand in the fsave frame needs to be "unskewed".
# this would be the case for opclass two operations with a source zero
# in the sgl or dbl format.
lea FP_SRC(%a6),%a0 # pass: ptr to src op
bsr.l fix_skewed_ops # fix src op
fdz_exit:
fmovm.x EXC_FPREGS(%a6),&0xc0 # restore fp0-fp1
fmovm.l USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
frestore FP_SRC(%a6)
unlk %a6
bra.l _real_dz
#########################################################################
# XDEF **************************************************************** #
# _fpsp_fline(): 060FPSP entry point for "Line F emulator" exc. #
# #
# This handler should be the first code executed upon taking the #
# "Line F Emulator" exception in an operating system. #
# #
# XREF **************************************************************** #
# _fpsp_unimp() - handle "FP Unimplemented" exceptions #
# _real_fpu_disabled() - handle "FPU disabled" exceptions #
# _real_fline() - handle "FLINE" exceptions #
# _imem_read_long() - read instruction longword #
# #
# INPUT *************************************************************** #
# - The system stack contains a "Line F Emulator" exception #
# stack frame. #
# #
# OUTPUT ************************************************************** #
# - The system stack is unchanged #
# #
# ALGORITHM *********************************************************** #
# When a "Line F Emulator" exception occurs, there are 3 possible #
# exception types, denoted by the exception stack frame format number: #
# (1) FPU unimplemented instruction (6 word stack frame) #
# (2) FPU disabled (8 word stack frame) #
# (3) Line F (4 word stack frame) #
# #
# This module determines which and forks the flow off to the #
# appropriate "callout" (for "disabled" and "Line F") or to the #
# correct emulation code (for "FPU unimplemented"). #
# This code also must check for "fmovecr" instructions w/ a #
# non-zero <ea> field. These may get flagged as "Line F" but should #
# really be flagged as "FPU Unimplemented". (This is a "feature" on #
# the '060. #
# #
#########################################################################
global _fpsp_fline
_fpsp_fline:
# check to see if this exception is a "FP Unimplemented Instruction"
# exception. if so, branch directly to that handler's entry point.
cmpi.w 0x6(%sp),&0x202c
beq.l _fpsp_unimp
# check to see if the FPU is disabled. if so, jump to the OS entry
# point for that condition.
cmpi.w 0x6(%sp),&0x402c
beq.l _real_fpu_disabled
# the exception was an "F-Line Illegal" exception. we check to see
# if the F-Line instruction is an "fmovecr" w/ a non-zero <ea>. if
# so, convert the F-Line exception stack frame to an FP Unimplemented
# Instruction exception stack frame else branch to the OS entry
# point for the F-Line exception handler.
link.w %a6,&-LOCAL_SIZE # init stack frame
movm.l &0x0303,EXC_DREGS(%a6) # save d0-d1/a0-a1
mov.l EXC_PC(%a6),EXC_EXTWPTR(%a6)
mov.l EXC_EXTWPTR(%a6),%a0 # fetch instruction addr
addq.l &0x4,EXC_EXTWPTR(%a6) # incr instruction ptr
bsr.l _imem_read_long # fetch instruction words
bfextu %d0{&0:&10},%d1 # is it an fmovecr?
cmpi.w %d1,&0x03c8
bne.b fline_fline # no
bfextu %d0{&16:&6},%d1 # is it an fmovecr?
cmpi.b %d1,&0x17
bne.b fline_fline # no
# it's an fmovecr w/ a non-zero <ea> that has entered through
# the F-Line Illegal exception.
# so, we need to convert the F-Line exception stack frame into an
# FP Unimplemented Instruction stack frame and jump to that entry
# point.
#
# but, if the FPU is disabled, then we need to jump to the FPU disabled
# entry point.
movc %pcr,%d0
btst &0x1,%d0
beq.b fline_fmovcr
movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
unlk %a6
sub.l &0x8,%sp # make room for "Next PC", <ea>
mov.w 0x8(%sp),(%sp)
mov.l 0xa(%sp),0x2(%sp) # move "Current PC"
mov.w &0x402c,0x6(%sp)
mov.l 0x2(%sp),0xc(%sp)
addq.l &0x4,0x2(%sp) # set "Next PC"
bra.l _real_fpu_disabled
fline_fmovcr:
movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
unlk %a6
fmov.l 0x2(%sp),%fpiar # set current PC
addq.l &0x4,0x2(%sp) # set Next PC
mov.l (%sp),-(%sp)
mov.l 0x8(%sp),0x4(%sp)
mov.b &0x20,0x6(%sp)
bra.l _fpsp_unimp
fline_fline:
movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
unlk %a6
bra.l _real_fline
#########################################################################
# XDEF **************************************************************** #
# _fpsp_unimp(): 060FPSP entry point for FP "Unimplemented #
# Instruction" exception. #
# #
# This handler should be the first code executed upon taking the #
# FP Unimplemented Instruction exception in an operating system. #
# #
# XREF **************************************************************** #
# _imem_read_{word,long}() - read instruction word/longword #
# load_fop() - load src/dst ops from memory and/or FP regfile #
# store_fpreg() - store opclass 0 or 2 result to FP regfile #
# tbl_trans - addr of table of emulation routines for trnscndls #
# _real_access() - "callout" for access error exception #
# _fpsp_done() - "callout" for exit; work all done #
# _real_trace() - "callout" for Trace enabled exception #
# smovcr() - emulate "fmovecr" instruction #
# funimp_skew() - adjust fsave src ops to "incorrect" value #
# _ftrapcc() - emulate an "ftrapcc" instruction #
# _fdbcc() - emulate an "fdbcc" instruction #
# _fscc() - emulate an "fscc" instruction #
# _real_trap() - "callout" for Trap exception #
# _real_bsun() - "callout" for enabled Bsun exception #
# #
# INPUT *************************************************************** #
# - The system stack contains the "Unimplemented Instr" stk frame #
# #
# OUTPUT ************************************************************** #
# If access error: #
# - The system stack is changed to an access error stack frame #
# If Trace exception enabled: #
# - The system stack is changed to a Trace exception stack frame #
# Else: (normal case) #
# - Correct result has been stored as appropriate #
# #
# ALGORITHM *********************************************************** #
# There are two main cases of instructions that may enter here to #
# be emulated: (1) the FPgen instructions, most of which were also #
# unimplemented on the 040, and (2) "ftrapcc", "fscc", and "fdbcc". #
# For the first set, this handler calls the routine load_fop() #
# to load the source and destination (for dyadic) operands to be used #
# for instruction emulation. The correct emulation routine is then #
# chosen by decoding the instruction type and indexing into an #
# emulation subroutine index table. After emulation returns, this #
# handler checks to see if an exception should occur as a result of the #
# FP instruction emulation. If so, then an FP exception of the correct #
# type is inserted into the FPU state frame using the "frestore" #
# instruction before exiting through _fpsp_done(). In either the #
# exceptional or non-exceptional cases, we must check to see if the #
# Trace exception is enabled. If so, then we must create a Trace #
# exception frame from the current exception frame and exit through #
# _real_trace(). #
# For "fdbcc", "ftrapcc", and "fscc", the emulation subroutines #
# _fdbcc(), _ftrapcc(), and _fscc() respectively are used. All three #
# may flag that a BSUN exception should be taken. If so, then the #
# current exception stack frame is converted into a BSUN exception #
# stack frame and an exit is made through _real_bsun(). If the #
# instruction was "ftrapcc" and a Trap exception should result, a Trap #
# exception stack frame is created from the current frame and an exit #
# is made through _real_trap(). If a Trace exception is pending, then #
# a Trace exception frame is created from the current frame and a jump #
# is made to _real_trace(). Finally, if none of these conditions exist, #
# then the handler exits though the callout _fpsp_done(). #
# #
# In any of the above scenarios, if a _mem_read() or _mem_write() #
# "callout" returns a failing value, then an access error stack frame #
# is created from the current stack frame and an exit is made through #
# _real_access(). #
# #
#########################################################################
#
# FP UNIMPLEMENTED INSTRUCTION STACK FRAME:
#
# *****************
# * * => <ea> of fp unimp instr.
# - EA -
# * *
# *****************
# * 0x2 * 0x02c * => frame format and vector offset(vector #11)
# *****************
# * *
# - Next PC - => PC of instr to execute after exc handling
# * *
# *****************
# * SR * => SR at the time the exception was taken
# *****************
#
# Note: the !NULL bit does not get set in the fsave frame when the
# machine encounters an fp unimp exception. Therefore, it must be set
# before leaving this handler.
#
global _fpsp_unimp
_fpsp_unimp:
link.w %a6,&-LOCAL_SIZE # init stack frame
movm.l &0x0303,EXC_DREGS(%a6) # save d0-d1/a0-a1
fmovm.l %fpcr,%fpsr,%fpiar,USER_FPCR(%a6) # save ctrl regs
fmovm.x &0xc0,EXC_FPREGS(%a6) # save fp0-fp1
btst &0x5,EXC_SR(%a6) # user mode exception?
bne.b funimp_s # no; supervisor mode
# save the value of the user stack pointer onto the stack frame
funimp_u:
mov.l %usp,%a0 # fetch user stack pointer
mov.l %a0,EXC_A7(%a6) # store in stack frame
bra.b funimp_cont
# store the value of the supervisor stack pointer BEFORE the exc occurred.
# old_sp is address just above stacked effective address.
funimp_s:
lea 4+EXC_EA(%a6),%a0 # load old a7'
mov.l %a0,EXC_A7(%a6) # store a7'
mov.l %a0,OLD_A7(%a6) # make a copy
funimp_cont:
# the FPIAR holds the "current PC" of the faulting instruction.
mov.l USER_FPIAR(%a6),EXC_EXTWPTR(%a6)
mov.l EXC_EXTWPTR(%a6),%a0 # fetch instruction addr
addq.l &0x4,EXC_EXTWPTR(%a6) # incr instruction ptr
bsr.l _imem_read_long # fetch the instruction words
mov.l %d0,EXC_OPWORD(%a6)
############################################################################
fmov.l &0x0,%fpcr # clear FPCR
fmov.l &0x0,%fpsr # clear FPSR
clr.b SPCOND_FLG(%a6) # clear "special case" flag
# Divide the fp instructions into 8 types based on the TYPE field in
# bits 6-8 of the opword(classes 6,7 are undefined).
# (for the '060, only two types can take this exception)
# bftst %d0{&7:&3} # test TYPE
btst &22,%d0 # type 0 or 1 ?
bne.w funimp_misc # type 1
#########################################
# TYPE == 0: General instructions #
#########################################
funimp_gen:
clr.b STORE_FLG(%a6) # clear "store result" flag
# clear the ccode byte and exception status byte
andi.l &0x00ff00ff,USER_FPSR(%a6)
bfextu %d0{&16:&6},%d1 # extract upper 6 of cmdreg
cmpi.b %d1,&0x17 # is op an fmovecr?
beq.w funimp_fmovcr # yes
funimp_gen_op:
bsr.l _load_fop # load
clr.l %d0
mov.b FPCR_MODE(%a6),%d0 # fetch rnd mode
mov.b 1+EXC_CMDREG(%a6),%d1
andi.w &0x003f,%d1 # extract extension bits
lsl.w &0x3,%d1 # shift right 3 bits
or.b STAG(%a6),%d1 # insert src optag bits
lea FP_DST(%a6),%a1 # pass dst ptr in a1
lea FP_SRC(%a6),%a0 # pass src ptr in a0
mov.w (tbl_trans.w,%pc,%d1.w*2),%d1
jsr (tbl_trans.w,%pc,%d1.w*1) # emulate
funimp_fsave:
mov.b FPCR_ENABLE(%a6),%d0 # fetch exceptions enabled
bne.w funimp_ena # some are enabled
funimp_store:
bfextu EXC_CMDREG(%a6){&6:&3},%d0 # fetch Dn
bsr.l store_fpreg # store result to fp regfile
funimp_gen_exit:
fmovm.x EXC_FP0(%a6),&0xc0 # restore fp0-fp1
fmovm.l USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
funimp_gen_exit_cmp:
cmpi.b SPCOND_FLG(%a6),&mia7_flg # was the ea mode (sp)+ ?
beq.b funimp_gen_exit_a7 # yes
cmpi.b SPCOND_FLG(%a6),&mda7_flg # was the ea mode -(sp) ?
beq.b funimp_gen_exit_a7 # yes
funimp_gen_exit_cont:
unlk %a6
funimp_gen_exit_cont2:
btst &0x7,(%sp) # is trace on?
beq.l _fpsp_done # no
# this catches a problem with the case where an exception will be re-inserted
# into the machine. the frestore has already been executed...so, the fmov.l
# alone of the control register would trigger an unwanted exception.
# until I feel like fixing this, we'll sidestep the exception.
fsave -(%sp)
fmov.l %fpiar,0x14(%sp) # "Current PC" is in FPIAR
frestore (%sp)+
mov.w &0x2024,0x6(%sp) # stk fmt = 0x2; voff = 0x24
bra.l _real_trace
funimp_gen_exit_a7:
btst &0x5,EXC_SR(%a6) # supervisor or user mode?
bne.b funimp_gen_exit_a7_s # supervisor
mov.l %a0,-(%sp)
mov.l EXC_A7(%a6),%a0
mov.l %a0,%usp
mov.l (%sp)+,%a0
bra.b funimp_gen_exit_cont
# if the instruction was executed from supervisor mode and the addressing
# mode was (a7)+, then the stack frame for the rte must be shifted "up"
# "n" bytes where "n" is the size of the src operand type.
# f<op>.{b,w,l,s,d,x,p}
funimp_gen_exit_a7_s:
mov.l %d0,-(%sp) # save d0
mov.l EXC_A7(%a6),%d0 # load new a7'
sub.l OLD_A7(%a6),%d0 # subtract old a7'
mov.l 0x2+EXC_PC(%a6),(0x2+EXC_PC,%a6,%d0) # shift stack frame
mov.l EXC_SR(%a6),(EXC_SR,%a6,%d0) # shift stack frame
mov.w %d0,EXC_SR(%a6) # store incr number
mov.l (%sp)+,%d0 # restore d0
unlk %a6
add.w (%sp),%sp # stack frame shifted
bra.b funimp_gen_exit_cont2
######################
# fmovecr.x #ccc,fpn #
######################
funimp_fmovcr:
clr.l %d0
mov.b FPCR_MODE(%a6),%d0
mov.b 1+EXC_CMDREG(%a6),%d1
andi.l &0x0000007f,%d1 # pass rom offset in d1
bsr.l smovcr
bra.w funimp_fsave
#########################################################################
#
# the user has enabled some exceptions. we figure not to see this too
# often so that's why it gets lower priority.
#
funimp_ena:
# was an exception set that was also enabled?
and.b FPSR_EXCEPT(%a6),%d0 # keep only ones enabled and set
bfffo %d0{&24:&8},%d0 # find highest priority exception
bne.b funimp_exc # at least one was set
# no exception that was enabled was set BUT if we got an exact overflow
# and overflow wasn't enabled but inexact was (yech!) then this is
# an inexact exception; otherwise, return to normal non-exception flow.
btst &ovfl_bit,FPSR_EXCEPT(%a6) # did overflow occur?
beq.w funimp_store # no; return to normal flow
# the overflow w/ exact result happened but was inexact set in the FPCR?
funimp_ovfl:
btst &inex2_bit,FPCR_ENABLE(%a6) # is inexact enabled?
beq.w funimp_store # no; return to normal flow
bra.b funimp_exc_ovfl # yes
# some exception happened that was actually enabled.
# we'll insert this new exception into the FPU and then return.
funimp_exc:
subi.l &24,%d0 # fix offset to be 0-8
cmpi.b %d0,&0x6 # is exception INEX?
bne.b funimp_exc_force # no
# the enabled exception was inexact. so, if it occurs with an overflow
# or underflow that was disabled, then we have to force an overflow or
# underflow frame. the eventual overflow or underflow handler will see that
# it's actually an inexact and act appropriately. this is the only easy
# way to have the EXOP available for the enabled inexact handler when
# a disabled overflow or underflow has also happened.
btst &ovfl_bit,FPSR_EXCEPT(%a6) # did overflow occur?
bne.b funimp_exc_ovfl # yes
btst &unfl_bit,FPSR_EXCEPT(%a6) # did underflow occur?
bne.b funimp_exc_unfl # yes
# force the fsave exception status bits to signal an exception of the
# appropriate type. don't forget to "skew" the source operand in case we
# "unskewed" the one the hardware initially gave us.
funimp_exc_force:
mov.l %d0,-(%sp) # save d0
bsr.l funimp_skew # check for special case
mov.l (%sp)+,%d0 # restore d0
mov.w (tbl_funimp_except.b,%pc,%d0.w*2),2+FP_SRC(%a6)
bra.b funimp_gen_exit2 # exit with frestore
tbl_funimp_except:
short 0xe002, 0xe006, 0xe004, 0xe005
short 0xe003, 0xe002, 0xe001, 0xe001
# insert an overflow frame
funimp_exc_ovfl:
bsr.l funimp_skew # check for special case
mov.w &0xe005,2+FP_SRC(%a6)
bra.b funimp_gen_exit2
# insert an underflow frame
funimp_exc_unfl:
bsr.l funimp_skew # check for special case
mov.w &0xe003,2+FP_SRC(%a6)
# this is the general exit point for an enabled exception that will be
# restored into the machine for the instruction just emulated.
funimp_gen_exit2:
fmovm.x EXC_FP0(%a6),&0xc0 # restore fp0-fp1
fmovm.l USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
frestore FP_SRC(%a6) # insert exceptional status
bra.w funimp_gen_exit_cmp
############################################################################
#
# TYPE == 1: FDB<cc>, FS<cc>, FTRAP<cc>
#
# These instructions were implemented on the '881/2 and '040 in hardware but
# are emulated in software on the '060.
#
funimp_misc:
bfextu %d0{&10:&3},%d1 # extract mode field
cmpi.b %d1,&0x1 # is it an fdb<cc>?
beq.w funimp_fdbcc # yes
cmpi.b %d1,&0x7 # is it an fs<cc>?
bne.w funimp_fscc # yes
bfextu %d0{&13:&3},%d1
cmpi.b %d1,&0x2 # is it an fs<cc>?
blt.w funimp_fscc # yes
#########################
# ftrap<cc> #
# ftrap<cc>.w #<data> #
# ftrap<cc>.l #<data> #
#########################
funimp_ftrapcc:
bsr.l _ftrapcc # FTRAP<cc>()
cmpi.b SPCOND_FLG(%a6),&fbsun_flg # is enabled bsun occurring?
beq.w funimp_bsun # yes
cmpi.b SPCOND_FLG(%a6),&ftrapcc_flg # should a trap occur?
bne.w funimp_done # no
# FP UNIMP FRAME TRAP FRAME
# ***************** *****************
# ** <EA> ** ** Current PC **
# ***************** *****************
# * 0x2 * 0x02c * * 0x2 * 0x01c *
# ***************** *****************
# ** Next PC ** ** Next PC **
# ***************** *****************
# * SR * * SR *
# ***************** *****************
# (6 words) (6 words)
#
# the ftrapcc instruction should take a trap. so, here we must create a
# trap stack frame from an unimplemented fp instruction stack frame and
# jump to the user supplied entry point for the trap exception
funimp_ftrapcc_tp:
mov.l USER_FPIAR(%a6),EXC_EA(%a6) # Address = Current PC
mov.w &0x201c,EXC_VOFF(%a6) # Vector Offset = 0x01c
fmovm.x EXC_FP0(%a6),&0xc0 # restore fp0-fp1
fmovm.l USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
unlk %a6
bra.l _real_trap
#########################
# fdb<cc> Dn,<label> #
#########################
funimp_fdbcc:
mov.l EXC_EXTWPTR(%a6),%a0 # fetch instruction addr
addq.l &0x2,EXC_EXTWPTR(%a6) # incr instruction ptr
bsr.l _imem_read_word # read displacement
tst.l %d1 # did ifetch fail?
bne.w funimp_iacc # yes
ext.l %d0 # sign extend displacement
bsr.l _fdbcc # FDB<cc>()
cmpi.b SPCOND_FLG(%a6),&fbsun_flg # is enabled bsun occurring?
beq.w funimp_bsun
bra.w funimp_done # branch to finish
#################
# fs<cc>.b <ea> #
#################
funimp_fscc:
bsr.l _fscc # FS<cc>()
# I am assuming here that an "fs<cc>.b -(An)" or "fs<cc>.b (An)+" instruction
# does not need to update "An" before taking a bsun exception.
cmpi.b SPCOND_FLG(%a6),&fbsun_flg # is enabled bsun occurring?
beq.w funimp_bsun
btst &0x5,EXC_SR(%a6) # yes; is it a user mode exception?
bne.b funimp_fscc_s # no
funimp_fscc_u:
mov.l EXC_A7(%a6),%a0 # yes; set new USP
mov.l %a0,%usp
bra.w funimp_done # branch to finish
# remember, I'm assuming that post-increment is bogus...(it IS!!!)
# so, the least significant WORD of the stacked effective address got
# overwritten by the "fs<cc> -(An)". We must shift the stack frame "down"
# so that the rte will work correctly without destroying the result.
# even though the operation size is byte, the stack ptr is decr by 2.
#
# remember, also, this instruction may be traced.
funimp_fscc_s:
cmpi.b SPCOND_FLG(%a6),&mda7_flg # was a7 modified?
bne.w funimp_done # no
fmovm.x EXC_FP0(%a6),&0xc0 # restore fp0-fp1
fmovm.l USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
unlk %a6
btst &0x7,(%sp) # is trace enabled?
bne.b funimp_fscc_s_trace # yes
subq.l &0x2,%sp
mov.l 0x2(%sp),(%sp) # shift SR,hi(PC) "down"
mov.l 0x6(%sp),0x4(%sp) # shift lo(PC),voff "down"
bra.l _fpsp_done
funimp_fscc_s_trace:
subq.l &0x2,%sp
mov.l 0x2(%sp),(%sp) # shift SR,hi(PC) "down"
mov.w 0x6(%sp),0x4(%sp) # shift lo(PC)
mov.w &0x2024,0x6(%sp) # fmt/voff = $2024
fmov.l %fpiar,0x8(%sp) # insert "current PC"
bra.l _real_trace
#
# The ftrap<cc>, fs<cc>, or fdb<cc> is to take an enabled bsun. we must convert
# the fp unimplemented instruction exception stack frame into a bsun stack frame,
# restore a bsun exception into the machine, and branch to the user
# supplied bsun hook.
#
# FP UNIMP FRAME BSUN FRAME
# ***************** *****************
# ** <EA> ** * 0x0 * 0x0c0 *
# ***************** *****************
# * 0x2 * 0x02c * ** Current PC **
# ***************** *****************
# ** Next PC ** * SR *
# ***************** *****************
# * SR * (4 words)
# *****************
# (6 words)
#
funimp_bsun:
mov.w &0x00c0,2+EXC_EA(%a6) # Fmt = 0x0; Vector Offset = 0x0c0
mov.l USER_FPIAR(%a6),EXC_VOFF(%a6) # PC = Current PC
mov.w EXC_SR(%a6),2+EXC_PC(%a6) # shift SR "up"
mov.w &0xe000,2+FP_SRC(%a6) # bsun exception enabled
fmovm.x EXC_FP0(%a6),&0xc0 # restore fp0-fp1
fmovm.l USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
frestore FP_SRC(%a6) # restore bsun exception
unlk %a6
addq.l &0x4,%sp # erase sludge
bra.l _real_bsun # branch to user bsun hook
#
# all ftrapcc/fscc/fdbcc processing has been completed. unwind the stack frame
# and return.
#
# as usual, we have to check for trace mode being on here. since instructions
# modifying the supervisor stack frame don't pass through here, this is a
# relatively easy task.
#
funimp_done:
fmovm.x EXC_FP0(%a6),&0xc0 # restore fp0-fp1
fmovm.l USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
unlk %a6
btst &0x7,(%sp) # is trace enabled?
bne.b funimp_trace # yes
bra.l _fpsp_done
# FP UNIMP FRAME TRACE FRAME
# ***************** *****************
# ** <EA> ** ** Current PC **
# ***************** *****************
# * 0x2 * 0x02c * * 0x2 * 0x024 *
# ***************** *****************
# ** Next PC ** ** Next PC **
# ***************** *****************
# * SR * * SR *
# ***************** *****************
# (6 words) (6 words)
#
# the fscc instruction should take a trace trap. so, here we must create a
# trace stack frame from an unimplemented fp instruction stack frame and
# jump to the user supplied entry point for the trace exception
funimp_trace:
fmov.l %fpiar,0x8(%sp) # current PC is in fpiar
mov.b &0x24,0x7(%sp) # vector offset = 0x024
bra.l _real_trace
################################################################
global tbl_trans
swbeg &0x1c0
tbl_trans:
short tbl_trans - tbl_trans # $00-0 fmovecr all
short tbl_trans - tbl_trans # $00-1 fmovecr all
short tbl_trans - tbl_trans # $00-2 fmovecr all
short tbl_trans - tbl_trans # $00-3 fmovecr all
short tbl_trans - tbl_trans # $00-4 fmovecr all
short tbl_trans - tbl_trans # $00-5 fmovecr all
short tbl_trans - tbl_trans # $00-6 fmovecr all
short tbl_trans - tbl_trans # $00-7 fmovecr all
short tbl_trans - tbl_trans # $01-0 fint norm
short tbl_trans - tbl_trans # $01-1 fint zero
short tbl_trans - tbl_trans # $01-2 fint inf
short tbl_trans - tbl_trans # $01-3 fint qnan
short tbl_trans - tbl_trans # $01-5 fint denorm
short tbl_trans - tbl_trans # $01-4 fint snan
short tbl_trans - tbl_trans # $01-6 fint unnorm
short tbl_trans - tbl_trans # $01-7 ERROR
short ssinh - tbl_trans # $02-0 fsinh norm
short src_zero - tbl_trans # $02-1 fsinh zero
short src_inf - tbl_trans # $02-2 fsinh inf
short src_qnan - tbl_trans # $02-3 fsinh qnan
short ssinhd - tbl_trans # $02-5 fsinh denorm
short src_snan - tbl_trans # $02-4 fsinh snan
short tbl_trans - tbl_trans # $02-6 fsinh unnorm
short tbl_trans - tbl_trans # $02-7 ERROR
short tbl_trans - tbl_trans # $03-0 fintrz norm
short tbl_trans - tbl_trans # $03-1 fintrz zero
short tbl_trans - tbl_trans # $03-2 fintrz inf
short tbl_trans - tbl_trans # $03-3 fintrz qnan
short tbl_trans - tbl_trans # $03-5 fintrz denorm
short tbl_trans - tbl_trans # $03-4 fintrz snan
short tbl_trans - tbl_trans # $03-6 fintrz unnorm
short tbl_trans - tbl_trans # $03-7 ERROR
short tbl_trans - tbl_trans # $04-0 fsqrt norm
short tbl_trans - tbl_trans # $04-1 fsqrt zero
short tbl_trans - tbl_trans # $04-2 fsqrt inf
short tbl_trans - tbl_trans # $04-3 fsqrt qnan
short tbl_trans - tbl_trans # $04-5 fsqrt denorm
short tbl_trans - tbl_trans # $04-4 fsqrt snan
short tbl_trans - tbl_trans # $04-6 fsqrt unnorm
short tbl_trans - tbl_trans # $04-7 ERROR
short tbl_trans - tbl_trans # $05-0 ERROR
short tbl_trans - tbl_trans # $05-1 ERROR
short tbl_trans - tbl_trans # $05-2 ERROR
short tbl_trans - tbl_trans # $05-3 ERROR
short tbl_trans - tbl_trans # $05-4 ERROR
short tbl_trans - tbl_trans # $05-5 ERROR
short tbl_trans - tbl_trans # $05-6 ERROR
short tbl_trans - tbl_trans # $05-7 ERROR
short slognp1 - tbl_trans # $06-0 flognp1 norm
short src_zero - tbl_trans # $06-1 flognp1 zero
short sopr_inf - tbl_trans # $06-2 flognp1 inf
short src_qnan - tbl_trans # $06-3 flognp1 qnan
short slognp1d - tbl_trans # $06-5 flognp1 denorm
short src_snan - tbl_trans # $06-4 flognp1 snan
short tbl_trans - tbl_trans # $06-6 flognp1 unnorm
short tbl_trans - tbl_trans # $06-7 ERROR
short tbl_trans - tbl_trans # $07-0 ERROR
short tbl_trans - tbl_trans # $07-1 ERROR
short tbl_trans - tbl_trans # $07-2 ERROR
short tbl_trans - tbl_trans # $07-3 ERROR
short tbl_trans - tbl_trans # $07-4 ERROR
short tbl_trans - tbl_trans # $07-5 ERROR
short tbl_trans - tbl_trans # $07-6 ERROR
short tbl_trans - tbl_trans # $07-7 ERROR
short setoxm1 - tbl_trans # $08-0 fetoxm1 norm
short src_zero - tbl_trans # $08-1 fetoxm1 zero
short setoxm1i - tbl_trans # $08-2 fetoxm1 inf
short src_qnan - tbl_trans # $08-3 fetoxm1 qnan
short setoxm1d - tbl_trans # $08-5 fetoxm1 denorm
short src_snan - tbl_trans # $08-4 fetoxm1 snan
short tbl_trans - tbl_trans # $08-6 fetoxm1 unnorm
short tbl_trans - tbl_trans # $08-7 ERROR
short stanh - tbl_trans # $09-0 ftanh norm
short src_zero - tbl_trans # $09-1 ftanh zero
short src_one - tbl_trans # $09-2 ftanh inf
short src_qnan - tbl_trans # $09-3 ftanh qnan
short stanhd - tbl_trans # $09-5 ftanh denorm
short src_snan - tbl_trans # $09-4 ftanh snan
short tbl_trans - tbl_trans # $09-6 ftanh unnorm
short tbl_trans - tbl_trans # $09-7 ERROR
short satan - tbl_trans # $0a-0 fatan norm
short src_zero - tbl_trans # $0a-1 fatan zero
short spi_2 - tbl_trans # $0a-2 fatan inf
short src_qnan - tbl_trans # $0a-3 fatan qnan
short satand - tbl_trans # $0a-5 fatan denorm
short src_snan - tbl_trans # $0a-4 fatan snan
short tbl_trans - tbl_trans # $0a-6 fatan unnorm
short tbl_trans - tbl_trans # $0a-7 ERROR
short tbl_trans - tbl_trans # $0b-0 ERROR
short tbl_trans - tbl_trans # $0b-1 ERROR
short tbl_trans - tbl_trans # $0b-2 ERROR
short tbl_trans - tbl_trans # $0b-3 ERROR
short tbl_trans - tbl_trans # $0b-4 ERROR
short tbl_trans - tbl_trans # $0b-5 ERROR
short tbl_trans - tbl_trans # $0b-6 ERROR
short tbl_trans - tbl_trans # $0b-7 ERROR
short sasin - tbl_trans # $0c-0 fasin norm
short src_zero - tbl_trans # $0c-1 fasin zero
short t_operr - tbl_trans # $0c-2 fasin inf
short src_qnan - tbl_trans # $0c-3 fasin qnan
short sasind - tbl_trans # $0c-5 fasin denorm
short src_snan - tbl_trans # $0c-4 fasin snan
short tbl_trans - tbl_trans # $0c-6 fasin unnorm
short tbl_trans - tbl_trans # $0c-7 ERROR
short satanh - tbl_trans # $0d-0 fatanh norm
short src_zero - tbl_trans # $0d-1 fatanh zero
short t_operr - tbl_trans # $0d-2 fatanh inf
short src_qnan - tbl_trans # $0d-3 fatanh qnan
short satanhd - tbl_trans # $0d-5 fatanh denorm
short src_snan - tbl_trans # $0d-4 fatanh snan
short tbl_trans - tbl_trans # $0d-6 fatanh unnorm
short tbl_trans - tbl_trans # $0d-7 ERROR
short ssin - tbl_trans # $0e-0 fsin norm
short src_zero - tbl_trans # $0e-1 fsin zero
short t_operr - tbl_trans # $0e-2 fsin inf
short src_qnan - tbl_trans # $0e-3 fsin qnan
short ssind - tbl_trans # $0e-5 fsin denorm
short src_snan - tbl_trans # $0e-4 fsin snan
short tbl_trans - tbl_trans # $0e-6 fsin unnorm
short tbl_trans - tbl_trans # $0e-7 ERROR
short stan - tbl_trans # $0f-0 ftan norm
short src_zero - tbl_trans # $0f-1 ftan zero
short t_operr - tbl_trans # $0f-2 ftan inf
short src_qnan - tbl_trans # $0f-3 ftan qnan
short stand - tbl_trans # $0f-5 ftan denorm
short src_snan - tbl_trans # $0f-4 ftan snan
short tbl_trans - tbl_trans # $0f-6 ftan unnorm
short tbl_trans - tbl_trans # $0f-7 ERROR
short setox - tbl_trans # $10-0 fetox norm
short ld_pone - tbl_trans # $10-1 fetox zero
short szr_inf - tbl_trans # $10-2 fetox inf
short src_qnan - tbl_trans # $10-3 fetox qnan
short setoxd - tbl_trans # $10-5 fetox denorm
short src_snan - tbl_trans # $10-4 fetox snan
short tbl_trans - tbl_trans # $10-6 fetox unnorm
short tbl_trans - tbl_trans # $10-7 ERROR
short stwotox - tbl_trans # $11-0 ftwotox norm
short ld_pone - tbl_trans # $11-1 ftwotox zero
short szr_inf - tbl_trans # $11-2 ftwotox inf
short src_qnan - tbl_trans # $11-3 ftwotox qnan
short stwotoxd - tbl_trans # $11-5 ftwotox denorm
short src_snan - tbl_trans # $11-4 ftwotox snan
short tbl_trans - tbl_trans # $11-6 ftwotox unnorm
short tbl_trans - tbl_trans # $11-7 ERROR
short stentox - tbl_trans # $12-0 ftentox norm
short ld_pone - tbl_trans # $12-1 ftentox zero
short szr_inf - tbl_trans # $12-2 ftentox inf
short src_qnan - tbl_trans # $12-3 ftentox qnan
short stentoxd - tbl_trans # $12-5 ftentox denorm
short src_snan - tbl_trans # $12-4 ftentox snan
short tbl_trans - tbl_trans # $12-6 ftentox unnorm
short tbl_trans - tbl_trans # $12-7 ERROR
short tbl_trans - tbl_trans # $13-0 ERROR
short tbl_trans - tbl_trans # $13-1 ERROR
short tbl_trans - tbl_trans # $13-2 ERROR
short tbl_trans - tbl_trans # $13-3 ERROR
short tbl_trans - tbl_trans # $13-4 ERROR
short tbl_trans - tbl_trans # $13-5 ERROR
short tbl_trans - tbl_trans # $13-6 ERROR
short tbl_trans - tbl_trans # $13-7 ERROR
short slogn - tbl_trans # $14-0 flogn norm
short t_dz2 - tbl_trans # $14-1 flogn zero
short sopr_inf - tbl_trans # $14-2 flogn inf
short src_qnan - tbl_trans # $14-3 flogn qnan
short slognd - tbl_trans # $14-5 flogn denorm
short src_snan - tbl_trans # $14-4 flogn snan
short tbl_trans - tbl_trans # $14-6 flogn unnorm
short tbl_trans - tbl_trans # $14-7 ERROR
short slog10 - tbl_trans # $15-0 flog10 norm
short t_dz2 - tbl_trans # $15-1 flog10 zero
short sopr_inf - tbl_trans # $15-2 flog10 inf
short src_qnan - tbl_trans # $15-3 flog10 qnan
short slog10d - tbl_trans # $15-5 flog10 denorm
short src_snan - tbl_trans # $15-4 flog10 snan
short tbl_trans - tbl_trans # $15-6 flog10 unnorm
short tbl_trans - tbl_trans # $15-7 ERROR
short slog2 - tbl_trans # $16-0 flog2 norm
short t_dz2 - tbl_trans # $16-1 flog2 zero
short sopr_inf - tbl_trans # $16-2 flog2 inf
short src_qnan - tbl_trans # $16-3 flog2 qnan
short slog2d - tbl_trans # $16-5 flog2 denorm
short src_snan - tbl_trans # $16-4 flog2 snan
short tbl_trans - tbl_trans # $16-6 flog2 unnorm
short tbl_trans - tbl_trans # $16-7 ERROR
short tbl_trans - tbl_trans # $17-0 ERROR
short tbl_trans - tbl_trans # $17-1 ERROR
short tbl_trans - tbl_trans # $17-2 ERROR
short tbl_trans - tbl_trans # $17-3 ERROR
short tbl_trans - tbl_trans # $17-4 ERROR
short tbl_trans - tbl_trans # $17-5 ERROR
short tbl_trans - tbl_trans # $17-6 ERROR
short tbl_trans - tbl_trans # $17-7 ERROR
short tbl_trans - tbl_trans # $18-0 fabs norm
short tbl_trans - tbl_trans # $18-1 fabs zero
short tbl_trans - tbl_trans # $18-2 fabs inf
short tbl_trans - tbl_trans # $18-3 fabs qnan
short tbl_trans - tbl_trans # $18-5 fabs denorm
short tbl_trans - tbl_trans # $18-4 fabs snan
short tbl_trans - tbl_trans # $18-6 fabs unnorm
short tbl_trans - tbl_trans # $18-7 ERROR
short scosh - tbl_trans # $19-0 fcosh norm
short ld_pone - tbl_trans # $19-1 fcosh zero
short ld_pinf - tbl_trans # $19-2 fcosh inf
short src_qnan - tbl_trans # $19-3 fcosh qnan
short scoshd - tbl_trans # $19-5 fcosh denorm
short src_snan - tbl_trans # $19-4 fcosh snan
short tbl_trans - tbl_trans # $19-6 fcosh unnorm
short tbl_trans - tbl_trans # $19-7 ERROR
short tbl_trans - tbl_trans # $1a-0 fneg norm
short tbl_trans - tbl_trans # $1a-1 fneg zero
short tbl_trans - tbl_trans # $1a-2 fneg inf
short tbl_trans - tbl_trans # $1a-3 fneg qnan
short tbl_trans - tbl_trans # $1a-5 fneg denorm
short tbl_trans - tbl_trans # $1a-4 fneg snan
short tbl_trans - tbl_trans # $1a-6 fneg unnorm
short tbl_trans - tbl_trans # $1a-7 ERROR
short tbl_trans - tbl_trans # $1b-0 ERROR
short tbl_trans - tbl_trans # $1b-1 ERROR
short tbl_trans - tbl_trans # $1b-2 ERROR
short tbl_trans - tbl_trans # $1b-3 ERROR
short tbl_trans - tbl_trans # $1b-4 ERROR
short tbl_trans - tbl_trans # $1b-5 ERROR
short tbl_trans - tbl_trans # $1b-6 ERROR
short tbl_trans - tbl_trans # $1b-7 ERROR
short sacos - tbl_trans # $1c-0 facos norm
short ld_ppi2 - tbl_trans # $1c-1 facos zero
short t_operr - tbl_trans # $1c-2 facos inf
short src_qnan - tbl_trans # $1c-3 facos qnan
short sacosd - tbl_trans # $1c-5 facos denorm
short src_snan - tbl_trans # $1c-4 facos snan
short tbl_trans - tbl_trans # $1c-6 facos unnorm
short tbl_trans - tbl_trans # $1c-7 ERROR
short scos - tbl_trans # $1d-0 fcos norm
short ld_pone - tbl_trans # $1d-1 fcos zero
short t_operr - tbl_trans # $1d-2 fcos inf
short src_qnan - tbl_trans # $1d-3 fcos qnan
short scosd - tbl_trans # $1d-5 fcos denorm
short src_snan - tbl_trans # $1d-4 fcos snan
short tbl_trans - tbl_trans # $1d-6 fcos unnorm
short tbl_trans - tbl_trans # $1d-7 ERROR
short sgetexp - tbl_trans # $1e-0 fgetexp norm
short src_zero - tbl_trans # $1e-1 fgetexp zero
short t_operr - tbl_trans # $1e-2 fgetexp inf
short src_qnan - tbl_trans # $1e-3 fgetexp qnan
short sgetexpd - tbl_trans # $1e-5 fgetexp denorm
short src_snan - tbl_trans # $1e-4 fgetexp snan
short tbl_trans - tbl_trans # $1e-6 fgetexp unnorm
short tbl_trans - tbl_trans # $1e-7 ERROR
short sgetman - tbl_trans # $1f-0 fgetman norm
short src_zero - tbl_trans # $1f-1 fgetman zero
short t_operr - tbl_trans # $1f-2 fgetman inf
short src_qnan - tbl_trans # $1f-3 fgetman qnan
short sgetmand - tbl_trans # $1f-5 fgetman denorm
short src_snan - tbl_trans # $1f-4 fgetman snan
short tbl_trans - tbl_trans # $1f-6 fgetman unnorm
short tbl_trans - tbl_trans # $1f-7 ERROR
short tbl_trans - tbl_trans # $20-0 fdiv norm
short tbl_trans - tbl_trans # $20-1 fdiv zero
short tbl_trans - tbl_trans # $20-2 fdiv inf
short tbl_trans - tbl_trans # $20-3 fdiv qnan
short tbl_trans - tbl_trans # $20-5 fdiv denorm
short tbl_trans - tbl_trans # $20-4 fdiv snan
short tbl_trans - tbl_trans # $20-6 fdiv unnorm
short tbl_trans - tbl_trans # $20-7 ERROR
short smod_snorm - tbl_trans # $21-0 fmod norm
short smod_szero - tbl_trans # $21-1 fmod zero
short smod_sinf - tbl_trans # $21-2 fmod inf
short sop_sqnan - tbl_trans # $21-3 fmod qnan
short smod_sdnrm - tbl_trans # $21-5 fmod denorm
short sop_ssnan - tbl_trans # $21-4 fmod snan
short tbl_trans - tbl_trans # $21-6 fmod unnorm
short tbl_trans - tbl_trans # $21-7 ERROR
short tbl_trans - tbl_trans # $22-0 fadd norm
short tbl_trans - tbl_trans # $22-1 fadd zero
short tbl_trans - tbl_trans # $22-2 fadd inf
short tbl_trans - tbl_trans # $22-3 fadd qnan
short tbl_trans - tbl_trans # $22-5 fadd denorm
short tbl_trans - tbl_trans # $22-4 fadd snan
short tbl_trans - tbl_trans # $22-6 fadd unnorm
short tbl_trans - tbl_trans # $22-7 ERROR
short tbl_trans - tbl_trans # $23-0 fmul norm
short tbl_trans - tbl_trans # $23-1 fmul zero
short tbl_trans - tbl_trans # $23-2 fmul inf
short tbl_trans - tbl_trans # $23-3 fmul qnan
short tbl_trans - tbl_trans # $23-5 fmul denorm
short tbl_trans - tbl_trans # $23-4 fmul snan
short tbl_trans - tbl_trans # $23-6 fmul unnorm
short tbl_trans - tbl_trans # $23-7 ERROR
short tbl_trans - tbl_trans # $24-0 fsgldiv norm
short tbl_trans - tbl_trans # $24-1 fsgldiv zero
short tbl_trans - tbl_trans # $24-2 fsgldiv inf
short tbl_trans - tbl_trans # $24-3 fsgldiv qnan
short tbl_trans - tbl_trans # $24-5 fsgldiv denorm
short tbl_trans - tbl_trans # $24-4 fsgldiv snan
short tbl_trans - tbl_trans # $24-6 fsgldiv unnorm
short tbl_trans - tbl_trans # $24-7 ERROR
short srem_snorm - tbl_trans # $25-0 frem norm
short srem_szero - tbl_trans # $25-1 frem zero
short srem_sinf - tbl_trans # $25-2 frem inf
short sop_sqnan - tbl_trans # $25-3 frem qnan
short srem_sdnrm - tbl_trans # $25-5 frem denorm
short sop_ssnan - tbl_trans # $25-4 frem snan
short tbl_trans - tbl_trans # $25-6 frem unnorm
short tbl_trans - tbl_trans # $25-7 ERROR
short sscale_snorm - tbl_trans # $26-0 fscale norm
short sscale_szero - tbl_trans # $26-1 fscale zero
short sscale_sinf - tbl_trans # $26-2 fscale inf
short sop_sqnan - tbl_trans # $26-3 fscale qnan
short sscale_sdnrm - tbl_trans # $26-5 fscale denorm
short sop_ssnan - tbl_trans # $26-4 fscale snan
short tbl_trans - tbl_trans # $26-6 fscale unnorm
short tbl_trans - tbl_trans # $26-7 ERROR
short tbl_trans - tbl_trans # $27-0 fsglmul norm
short tbl_trans - tbl_trans # $27-1 fsglmul zero
short tbl_trans - tbl_trans # $27-2 fsglmul inf
short tbl_trans - tbl_trans # $27-3 fsglmul qnan
short tbl_trans - tbl_trans # $27-5 fsglmul denorm
short tbl_trans - tbl_trans # $27-4 fsglmul snan
short tbl_trans - tbl_trans # $27-6 fsglmul unnorm
short tbl_trans - tbl_trans # $27-7 ERROR
short tbl_trans - tbl_trans # $28-0 fsub norm
short tbl_trans - tbl_trans # $28-1 fsub zero
short tbl_trans - tbl_trans # $28-2 fsub inf
short tbl_trans - tbl_trans # $28-3 fsub qnan
short tbl_trans - tbl_trans # $28-5 fsub denorm
short tbl_trans - tbl_trans # $28-4 fsub snan
short tbl_trans - tbl_trans # $28-6 fsub unnorm
short tbl_trans - tbl_trans # $28-7 ERROR
short tbl_trans - tbl_trans # $29-0 ERROR
short tbl_trans - tbl_trans # $29-1 ERROR
short tbl_trans - tbl_trans # $29-2 ERROR
short tbl_trans - tbl_trans # $29-3 ERROR
short tbl_trans - tbl_trans # $29-4 ERROR
short tbl_trans - tbl_trans # $29-5 ERROR
short tbl_trans - tbl_trans # $29-6 ERROR
short tbl_trans - tbl_trans # $29-7 ERROR
short tbl_trans - tbl_trans # $2a-0 ERROR
short tbl_trans - tbl_trans # $2a-1 ERROR
short tbl_trans - tbl_trans # $2a-2 ERROR
short tbl_trans - tbl_trans # $2a-3 ERROR
short tbl_trans - tbl_trans # $2a-4 ERROR
short tbl_trans - tbl_trans # $2a-5 ERROR
short tbl_trans - tbl_trans # $2a-6 ERROR
short tbl_trans - tbl_trans # $2a-7 ERROR
short tbl_trans - tbl_trans # $2b-0 ERROR
short tbl_trans - tbl_trans # $2b-1 ERROR
short tbl_trans - tbl_trans # $2b-2 ERROR
short tbl_trans - tbl_trans # $2b-3 ERROR
short tbl_trans - tbl_trans # $2b-4 ERROR
short tbl_trans - tbl_trans # $2b-5 ERROR
short tbl_trans - tbl_trans # $2b-6 ERROR
short tbl_trans - tbl_trans # $2b-7 ERROR
short tbl_trans - tbl_trans # $2c-0 ERROR
short tbl_trans - tbl_trans # $2c-1 ERROR
short tbl_trans - tbl_trans # $2c-2 ERROR
short tbl_trans - tbl_trans # $2c-3 ERROR
short tbl_trans - tbl_trans # $2c-4 ERROR
short tbl_trans - tbl_trans # $2c-5 ERROR
short tbl_trans - tbl_trans # $2c-6 ERROR
short tbl_trans - tbl_trans # $2c-7 ERROR
short tbl_trans - tbl_trans # $2d-0 ERROR
short tbl_trans - tbl_trans # $2d-1 ERROR
short tbl_trans - tbl_trans # $2d-2 ERROR
short tbl_trans - tbl_trans # $2d-3 ERROR
short tbl_trans - tbl_trans # $2d-4 ERROR
short tbl_trans - tbl_trans # $2d-5 ERROR
short tbl_trans - tbl_trans # $2d-6 ERROR
short tbl_trans - tbl_trans # $2d-7 ERROR
short tbl_trans - tbl_trans # $2e-0 ERROR
short tbl_trans - tbl_trans # $2e-1 ERROR
short tbl_trans - tbl_trans # $2e-2 ERROR
short tbl_trans - tbl_trans # $2e-3 ERROR
short tbl_trans - tbl_trans # $2e-4 ERROR
short tbl_trans - tbl_trans # $2e-5 ERROR
short tbl_trans - tbl_trans # $2e-6 ERROR
short tbl_trans - tbl_trans # $2e-7 ERROR
short tbl_trans - tbl_trans # $2f-0 ERROR
short tbl_trans - tbl_trans # $2f-1 ERROR
short tbl_trans - tbl_trans # $2f-2 ERROR
short tbl_trans - tbl_trans # $2f-3 ERROR
short tbl_trans - tbl_trans # $2f-4 ERROR
short tbl_trans - tbl_trans # $2f-5 ERROR
short tbl_trans - tbl_trans # $2f-6 ERROR
short tbl_trans - tbl_trans # $2f-7 ERROR
short ssincos - tbl_trans # $30-0 fsincos norm
short ssincosz - tbl_trans # $30-1 fsincos zero
short ssincosi - tbl_trans # $30-2 fsincos inf
short ssincosqnan - tbl_trans # $30-3 fsincos qnan
short ssincosd - tbl_trans # $30-5 fsincos denorm
short ssincossnan - tbl_trans # $30-4 fsincos snan
short tbl_trans - tbl_trans # $30-6 fsincos unnorm
short tbl_trans - tbl_trans # $30-7 ERROR
short ssincos - tbl_trans # $31-0 fsincos norm
short ssincosz - tbl_trans # $31-1 fsincos zero
short ssincosi - tbl_trans # $31-2 fsincos inf
short ssincosqnan - tbl_trans # $31-3 fsincos qnan
short ssincosd - tbl_trans # $31-5 fsincos denorm
short ssincossnan - tbl_trans # $31-4 fsincos snan
short tbl_trans - tbl_trans # $31-6 fsincos unnorm
short tbl_trans - tbl_trans # $31-7 ERROR
short ssincos - tbl_trans # $32-0 fsincos norm
short ssincosz - tbl_trans # $32-1 fsincos zero
short ssincosi - tbl_trans # $32-2 fsincos inf
short ssincosqnan - tbl_trans # $32-3 fsincos qnan
short ssincosd - tbl_trans # $32-5 fsincos denorm
short ssincossnan - tbl_trans # $32-4 fsincos snan
short tbl_trans - tbl_trans # $32-6 fsincos unnorm
short tbl_trans - tbl_trans # $32-7 ERROR
short ssincos - tbl_trans # $33-0 fsincos norm
short ssincosz - tbl_trans # $33-1 fsincos zero
short ssincosi - tbl_trans # $33-2 fsincos inf
short ssincosqnan - tbl_trans # $33-3 fsincos qnan
short ssincosd - tbl_trans # $33-5 fsincos denorm
short ssincossnan - tbl_trans # $33-4 fsincos snan
short tbl_trans - tbl_trans # $33-6 fsincos unnorm
short tbl_trans - tbl_trans # $33-7 ERROR
short ssincos - tbl_trans # $34-0 fsincos norm
short ssincosz - tbl_trans # $34-1 fsincos zero
short ssincosi - tbl_trans # $34-2 fsincos inf
short ssincosqnan - tbl_trans # $34-3 fsincos qnan
short ssincosd - tbl_trans # $34-5 fsincos denorm
short ssincossnan - tbl_trans # $34-4 fsincos snan
short tbl_trans - tbl_trans # $34-6 fsincos unnorm
short tbl_trans - tbl_trans # $34-7 ERROR
short ssincos - tbl_trans # $35-0 fsincos norm
short ssincosz - tbl_trans # $35-1 fsincos zero
short ssincosi - tbl_trans # $35-2 fsincos inf
short ssincosqnan - tbl_trans # $35-3 fsincos qnan
short ssincosd - tbl_trans # $35-5 fsincos denorm
short ssincossnan - tbl_trans # $35-4 fsincos snan
short tbl_trans - tbl_trans # $35-6 fsincos unnorm
short tbl_trans - tbl_trans # $35-7 ERROR
short ssincos - tbl_trans # $36-0 fsincos norm
short ssincosz - tbl_trans # $36-1 fsincos zero
short ssincosi - tbl_trans # $36-2 fsincos inf
short ssincosqnan - tbl_trans # $36-3 fsincos qnan
short ssincosd - tbl_trans # $36-5 fsincos denorm
short ssincossnan - tbl_trans # $36-4 fsincos snan
short tbl_trans - tbl_trans # $36-6 fsincos unnorm
short tbl_trans - tbl_trans # $36-7 ERROR
short ssincos - tbl_trans # $37-0 fsincos norm
short ssincosz - tbl_trans # $37-1 fsincos zero
short ssincosi - tbl_trans # $37-2 fsincos inf
short ssincosqnan - tbl_trans # $37-3 fsincos qnan
short ssincosd - tbl_trans # $37-5 fsincos denorm
short ssincossnan - tbl_trans # $37-4 fsincos snan
short tbl_trans - tbl_trans # $37-6 fsincos unnorm
short tbl_trans - tbl_trans # $37-7 ERROR
##########
# the instruction fetch access for the displacement word for the
# fdbcc emulation failed. here, we create an access error frame
# from the current frame and branch to _real_access().
funimp_iacc:
movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
fmovm.l USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
fmovm.x EXC_FPREGS(%a6),&0xc0 # restore fp0-fp1
mov.l USER_FPIAR(%a6),EXC_PC(%a6) # store current PC
unlk %a6
mov.l (%sp),-(%sp) # store SR,hi(PC)
mov.w 0x8(%sp),0x4(%sp) # store lo(PC)
mov.w &0x4008,0x6(%sp) # store voff
mov.l 0x2(%sp),0x8(%sp) # store EA
mov.l &0x09428001,0xc(%sp) # store FSLW
btst &0x5,(%sp) # user or supervisor mode?
beq.b funimp_iacc_end # user
bset &0x2,0xd(%sp) # set supervisor TM bit
funimp_iacc_end:
bra.l _real_access
#########################################################################
# ssin(): computes the sine of a normalized input #
# ssind(): computes the sine of a denormalized input #
# scos(): computes the cosine of a normalized input #
# scosd(): computes the cosine of a denormalized input #
# ssincos(): computes the sine and cosine of a normalized input #
# ssincosd(): computes the sine and cosine of a denormalized input #
# #
# INPUT *************************************************************** #
# a0 = pointer to extended precision input #
# d0 = round precision,mode #
# #
# OUTPUT ************************************************************** #
# fp0 = sin(X) or cos(X) #
# #
# For ssincos(X): #
# fp0 = sin(X) #
# fp1 = cos(X) #
# #
# ACCURACY and MONOTONICITY ******************************************* #
# The returned result is within 1 ulp in 64 significant bit, i.e. #
# within 0.5001 ulp to 53 bits if the result is subsequently #
# rounded to double precision. The result is provably monotonic #
# in double precision. #
# #
# ALGORITHM *********************************************************** #
# #
# SIN and COS: #
# 1. If SIN is invoked, set AdjN := 0; otherwise, set AdjN := 1. #
# #
# 2. If |X| >= 15Pi or |X| < 2**(-40), go to 7. #
# #
# 3. Decompose X as X = N(Pi/2) + r where |r| <= Pi/4. Let #
# k = N mod 4, so in particular, k = 0,1,2,or 3. #
# Overwrite k by k := k + AdjN. #
# #
# 4. If k is even, go to 6. #
# #
# 5. (k is odd) Set j := (k-1)/2, sgn := (-1)**j. #
# Return sgn*cos(r) where cos(r) is approximated by an #
# even polynomial in r, 1 + r*r*(B1+s*(B2+ ... + s*B8)), #
# s = r*r. #
# Exit. #
# #
# 6. (k is even) Set j := k/2, sgn := (-1)**j. Return sgn*sin(r) #
# where sin(r) is approximated by an odd polynomial in r #
# r + r*s*(A1+s*(A2+ ... + s*A7)), s = r*r. #
# Exit. #
# #
# 7. If |X| > 1, go to 9. #
# #
# 8. (|X|<2**(-40)) If SIN is invoked, return X; #
# otherwise return 1. #
# #
# 9. Overwrite X by X := X rem 2Pi. Now that |X| <= Pi, #
# go back to 3. #
# #
# SINCOS: #
# 1. If |X| >= 15Pi or |X| < 2**(-40), go to 6. #
# #
# 2. Decompose X as X = N(Pi/2) + r where |r| <= Pi/4. Let #
# k = N mod 4, so in particular, k = 0,1,2,or 3. #
# #
# 3. If k is even, go to 5. #
# #
# 4. (k is odd) Set j1 := (k-1)/2, j2 := j1 (EOR) (k mod 2), ie. #
# j1 exclusive or with the l.s.b. of k. #
# sgn1 := (-1)**j1, sgn2 := (-1)**j2. #
# SIN(X) = sgn1 * cos(r) and COS(X) = sgn2*sin(r) where #
# sin(r) and cos(r) are computed as odd and even #
# polynomials in r, respectively. Exit #
# #
# 5. (k is even) Set j1 := k/2, sgn1 := (-1)**j1. #
# SIN(X) = sgn1 * sin(r) and COS(X) = sgn1*cos(r) where #
# sin(r) and cos(r) are computed as odd and even #
# polynomials in r, respectively. Exit #
# #
# 6. If |X| > 1, go to 8. #
# #
# 7. (|X|<2**(-40)) SIN(X) = X and COS(X) = 1. Exit. #
# #
# 8. Overwrite X by X := X rem 2Pi. Now that |X| <= Pi, #
# go back to 2. #
# #
#########################################################################
SINA7: long 0xBD6AAA77,0xCCC994F5
SINA6: long 0x3DE61209,0x7AAE8DA1
SINA5: long 0xBE5AE645,0x2A118AE4
SINA4: long 0x3EC71DE3,0xA5341531
SINA3: long 0xBF2A01A0,0x1A018B59,0x00000000,0x00000000
SINA2: long 0x3FF80000,0x88888888,0x888859AF,0x00000000
SINA1: long 0xBFFC0000,0xAAAAAAAA,0xAAAAAA99,0x00000000
COSB8: long 0x3D2AC4D0,0xD6011EE3
COSB7: long 0xBDA9396F,0x9F45AC19
COSB6: long 0x3E21EED9,0x0612C972
COSB5: long 0xBE927E4F,0xB79D9FCF
COSB4: long 0x3EFA01A0,0x1A01D423,0x00000000,0x00000000
COSB3: long 0xBFF50000,0xB60B60B6,0x0B61D438,0x00000000
COSB2: long 0x3FFA0000,0xAAAAAAAA,0xAAAAAB5E
COSB1: long 0xBF000000
set INARG,FP_SCR0
set X,FP_SCR0
# set XDCARE,X+2
set XFRAC,X+4
set RPRIME,FP_SCR0
set SPRIME,FP_SCR1
set POSNEG1,L_SCR1
set TWOTO63,L_SCR1
set ENDFLAG,L_SCR2
set INT,L_SCR2
set ADJN,L_SCR3
############################################
global ssin
ssin:
mov.l &0,ADJN(%a6) # yes; SET ADJN TO 0
bra.b SINBGN
############################################
global scos
scos:
mov.l &1,ADJN(%a6) # yes; SET ADJN TO 1
############################################
SINBGN:
#--SAVE FPCR, FP1. CHECK IF |X| IS TOO SMALL OR LARGE
fmov.x (%a0),%fp0 # LOAD INPUT
fmov.x %fp0,X(%a6) # save input at X
# "COMPACTIFY" X
mov.l (%a0),%d1 # put exp in hi word
mov.w 4(%a0),%d1 # fetch hi(man)
and.l &0x7FFFFFFF,%d1 # strip sign
cmpi.l %d1,&0x3FD78000 # is |X| >= 2**(-40)?
bge.b SOK1 # no
bra.w SINSM # yes; input is very small
SOK1:
cmp.l %d1,&0x4004BC7E # is |X| < 15 PI?
blt.b SINMAIN # no
bra.w SREDUCEX # yes; input is very large
#--THIS IS THE USUAL CASE, |X| <= 15 PI.
#--THE ARGUMENT REDUCTION IS DONE BY TABLE LOOK UP.
SINMAIN:
fmov.x %fp0,%fp1
fmul.d TWOBYPI(%pc),%fp1 # X*2/PI
lea PITBL+0x200(%pc),%a1 # TABLE OF N*PI/2, N = -32,...,32
fmov.l %fp1,INT(%a6) # CONVERT TO INTEGER
mov.l INT(%a6),%d1 # make a copy of N
asl.l &4,%d1 # N *= 16
add.l %d1,%a1 # tbl_addr = a1 + (N*16)
# A1 IS THE ADDRESS OF N*PIBY2
# ...WHICH IS IN TWO PIECES Y1 & Y2
fsub.x (%a1)+,%fp0 # X-Y1
fsub.s (%a1),%fp0 # fp0 = R = (X-Y1)-Y2
SINCONT:
#--continuation from REDUCEX
#--GET N+ADJN AND SEE IF SIN(R) OR COS(R) IS NEEDED
mov.l INT(%a6),%d1
add.l ADJN(%a6),%d1 # SEE IF D0 IS ODD OR EVEN
ror.l &1,%d1 # D0 WAS ODD IFF D0 IS NEGATIVE
cmp.l %d1,&0
blt.w COSPOLY
#--LET J BE THE LEAST SIG. BIT OF D0, LET SGN := (-1)**J.
#--THEN WE RETURN SGN*SIN(R). SGN*SIN(R) IS COMPUTED BY
#--R' + R'*S*(A1 + S(A2 + S(A3 + S(A4 + ... + SA7)))), WHERE
#--R' = SGN*R, S=R*R. THIS CAN BE REWRITTEN AS
#--R' + R'*S*( [A1+T(A3+T(A5+TA7))] + [S(A2+T(A4+TA6))])
#--WHERE T=S*S.
#--NOTE THAT A3 THROUGH A7 ARE STORED IN DOUBLE PRECISION
#--WHILE A1 AND A2 ARE IN DOUBLE-EXTENDED FORMAT.
SINPOLY:
fmovm.x &0x0c,-(%sp) # save fp2/fp3
fmov.x %fp0,X(%a6) # X IS R
fmul.x %fp0,%fp0 # FP0 IS S
fmov.d SINA7(%pc),%fp3
fmov.d SINA6(%pc),%fp2
fmov.x %fp0,%fp1
fmul.x %fp1,%fp1 # FP1 IS T
ror.l &1,%d1
and.l &0x80000000,%d1
# ...LEAST SIG. BIT OF D0 IN SIGN POSITION
eor.l %d1,X(%a6) # X IS NOW R'= SGN*R
fmul.x %fp1,%fp3 # TA7
fmul.x %fp1,%fp2 # TA6
fadd.d SINA5(%pc),%fp3 # A5+TA7
fadd.d SINA4(%pc),%fp2 # A4+TA6
fmul.x %fp1,%fp3 # T(A5+TA7)
fmul.x %fp1,%fp2 # T(A4+TA6)
fadd.d SINA3(%pc),%fp3 # A3+T(A5+TA7)
fadd.x SINA2(%pc),%fp2 # A2+T(A4+TA6)
fmul.x %fp3,%fp1 # T(A3+T(A5+TA7))
fmul.x %fp0,%fp2 # S(A2+T(A4+TA6))
fadd.x SINA1(%pc),%fp1 # A1+T(A3+T(A5+TA7))
fmul.x X(%a6),%fp0 # R'*S
fadd.x %fp2,%fp1 # [A1+T(A3+T(A5+TA7))]+[S(A2+T(A4+TA6))]
fmul.x %fp1,%fp0 # SIN(R')-R'
fmovm.x (%sp)+,&0x30 # restore fp2/fp3
fmov.l %d0,%fpcr # restore users round mode,prec
fadd.x X(%a6),%fp0 # last inst - possible exception set
bra t_inx2
#--LET J BE THE LEAST SIG. BIT OF D0, LET SGN := (-1)**J.
#--THEN WE RETURN SGN*COS(R). SGN*COS(R) IS COMPUTED BY
#--SGN + S'*(B1 + S(B2 + S(B3 + S(B4 + ... + SB8)))), WHERE
#--S=R*R AND S'=SGN*S. THIS CAN BE REWRITTEN AS
#--SGN + S'*([B1+T(B3+T(B5+TB7))] + [S(B2+T(B4+T(B6+TB8)))])
#--WHERE T=S*S.
#--NOTE THAT B4 THROUGH B8 ARE STORED IN DOUBLE PRECISION
#--WHILE B2 AND B3 ARE IN DOUBLE-EXTENDED FORMAT, B1 IS -1/2
#--AND IS THEREFORE STORED AS SINGLE PRECISION.
COSPOLY:
fmovm.x &0x0c,-(%sp) # save fp2/fp3
fmul.x %fp0,%fp0 # FP0 IS S
fmov.d COSB8(%pc),%fp2
fmov.d COSB7(%pc),%fp3
fmov.x %fp0,%fp1
fmul.x %fp1,%fp1 # FP1 IS T
fmov.x %fp0,X(%a6) # X IS S
ror.l &1,%d1
and.l &0x80000000,%d1
# ...LEAST SIG. BIT OF D0 IN SIGN POSITION
fmul.x %fp1,%fp2 # TB8
eor.l %d1,X(%a6) # X IS NOW S'= SGN*S
and.l &0x80000000,%d1
fmul.x %fp1,%fp3 # TB7
or.l &0x3F800000,%d1 # D0 IS SGN IN SINGLE
mov.l %d1,POSNEG1(%a6)
fadd.d COSB6(%pc),%fp2 # B6+TB8
fadd.d COSB5(%pc),%fp3 # B5+TB7
fmul.x %fp1,%fp2 # T(B6+TB8)
fmul.x %fp1,%fp3 # T(B5+TB7)
fadd.d COSB4(%pc),%fp2 # B4+T(B6+TB8)
fadd.x COSB3(%pc),%fp3 # B3+T(B5+TB7)
fmul.x %fp1,%fp2 # T(B4+T(B6+TB8))
fmul.x %fp3,%fp1 # T(B3+T(B5+TB7))
fadd.x COSB2(%pc),%fp2 # B2+T(B4+T(B6+TB8))
fadd.s COSB1(%pc),%fp1 # B1+T(B3+T(B5+TB7))
fmul.x %fp2,%fp0 # S(B2+T(B4+T(B6+TB8)))
fadd.x %fp1,%fp0
fmul.x X(%a6),%fp0
fmovm.x (%sp)+,&0x30 # restore fp2/fp3
fmov.l %d0,%fpcr # restore users round mode,prec
fadd.s POSNEG1(%a6),%fp0 # last inst - possible exception set
bra t_inx2
##############################################
# SINe: Big OR Small?
#--IF |X| > 15PI, WE USE THE GENERAL ARGUMENT REDUCTION.
#--IF |X| < 2**(-40), RETURN X OR 1.
SINBORS:
cmp.l %d1,&0x3FFF8000
bgt.l SREDUCEX
SINSM:
mov.l ADJN(%a6),%d1
cmp.l %d1,&0
bgt.b COSTINY
# here, the operation may underflow iff the precision is sgl or dbl.
# extended denorms are handled through another entry point.
SINTINY:
# mov.w &0x0000,XDCARE(%a6) # JUST IN CASE
fmov.l %d0,%fpcr # restore users round mode,prec
mov.b &FMOV_OP,%d1 # last inst is MOVE
fmov.x X(%a6),%fp0 # last inst - possible exception set
bra t_catch
COSTINY:
fmov.s &0x3F800000,%fp0 # fp0 = 1.0
fmov.l %d0,%fpcr # restore users round mode,prec
fadd.s &0x80800000,%fp0 # last inst - possible exception set
bra t_pinx2
################################################
global ssind
#--SIN(X) = X FOR DENORMALIZED X
ssind:
bra t_extdnrm
############################################
global scosd
#--COS(X) = 1 FOR DENORMALIZED X
scosd:
fmov.s &0x3F800000,%fp0 # fp0 = 1.0
bra t_pinx2
##################################################
global ssincos
ssincos:
#--SET ADJN TO 4
mov.l &4,ADJN(%a6)
fmov.x (%a0),%fp0 # LOAD INPUT
fmov.x %fp0,X(%a6)
mov.l (%a0),%d1
mov.w 4(%a0),%d1
and.l &0x7FFFFFFF,%d1 # COMPACTIFY X
cmp.l %d1,&0x3FD78000 # |X| >= 2**(-40)?
bge.b SCOK1
bra.w SCSM
SCOK1:
cmp.l %d1,&0x4004BC7E # |X| < 15 PI?
blt.b SCMAIN
bra.w SREDUCEX
#--THIS IS THE USUAL CASE, |X| <= 15 PI.
#--THE ARGUMENT REDUCTION IS DONE BY TABLE LOOK UP.
SCMAIN:
fmov.x %fp0,%fp1
fmul.d TWOBYPI(%pc),%fp1 # X*2/PI
lea PITBL+0x200(%pc),%a1 # TABLE OF N*PI/2, N = -32,...,32
fmov.l %fp1,INT(%a6) # CONVERT TO INTEGER
mov.l INT(%a6),%d1
asl.l &4,%d1
add.l %d1,%a1 # ADDRESS OF N*PIBY2, IN Y1, Y2
fsub.x (%a1)+,%fp0 # X-Y1
fsub.s (%a1),%fp0 # FP0 IS R = (X-Y1)-Y2
SCCONT:
#--continuation point from REDUCEX
mov.l INT(%a6),%d1
ror.l &1,%d1
cmp.l %d1,&0 # D0 < 0 IFF N IS ODD
bge.w NEVEN
SNODD:
#--REGISTERS SAVED SO FAR: D0, A0, FP2.
fmovm.x &0x04,-(%sp) # save fp2
fmov.x %fp0,RPRIME(%a6)
fmul.x %fp0,%fp0 # FP0 IS S = R*R
fmov.d SINA7(%pc),%fp1 # A7
fmov.d COSB8(%pc),%fp2 # B8
fmul.x %fp0,%fp1 # SA7
fmul.x %fp0,%fp2 # SB8
mov.l %d2,-(%sp)
mov.l %d1,%d2
ror.l &1,%d2
and.l &0x80000000,%d2
eor.l %d1,%d2
and.l &0x80000000,%d2
fadd.d SINA6(%pc),%fp1 # A6+SA7
fadd.d COSB7(%pc),%fp2 # B7+SB8
fmul.x %fp0,%fp1 # S(A6+SA7)
eor.l %d2,RPRIME(%a6)
mov.l (%sp)+,%d2
fmul.x %fp0,%fp2 # S(B7+SB8)
ror.l &1,%d1
and.l &0x80000000,%d1
mov.l &0x3F800000,POSNEG1(%a6)
eor.l %d1,POSNEG1(%a6)
fadd.d SINA5(%pc),%fp1 # A5+S(A6+SA7)
fadd.d COSB6(%pc),%fp2 # B6+S(B7+SB8)
fmul.x %fp0,%fp1 # S(A5+S(A6+SA7))
fmul.x %fp0,%fp2 # S(B6+S(B7+SB8))
fmov.x %fp0,SPRIME(%a6)
fadd.d SINA4(%pc),%fp1 # A4+S(A5+S(A6+SA7))
eor.l %d1,SPRIME(%a6)
fadd.d COSB5(%pc),%fp2 # B5+S(B6+S(B7+SB8))
fmul.x %fp0,%fp1 # S(A4+...)
fmul.x %fp0,%fp2 # S(B5+...)
fadd.d SINA3(%pc),%fp1 # A3+S(A4+...)
fadd.d COSB4(%pc),%fp2 # B4+S(B5+...)
fmul.x %fp0,%fp1 # S(A3+...)
fmul.x %fp0,%fp2 # S(B4+...)
fadd.x SINA2(%pc),%fp1 # A2+S(A3+...)
fadd.x COSB3(%pc),%fp2 # B3+S(B4+...)
fmul.x %fp0,%fp1 # S(A2+...)
fmul.x %fp0,%fp2 # S(B3+...)
fadd.x SINA1(%pc),%fp1 # A1+S(A2+...)
fadd.x COSB2(%pc),%fp2 # B2+S(B3+...)
fmul.x %fp0,%fp1 # S(A1+...)
fmul.x %fp2,%fp0 # S(B2+...)
fmul.x RPRIME(%a6),%fp1 # R'S(A1+...)
fadd.s COSB1(%pc),%fp0 # B1+S(B2...)
fmul.x SPRIME(%a6),%fp0 # S'(B1+S(B2+...))
fmovm.x (%sp)+,&0x20 # restore fp2
fmov.l %d0,%fpcr
fadd.x RPRIME(%a6),%fp1 # COS(X)
bsr sto_cos # store cosine result
fadd.s POSNEG1(%a6),%fp0 # SIN(X)
bra t_inx2
NEVEN:
#--REGISTERS SAVED SO FAR: FP2.
fmovm.x &0x04,-(%sp) # save fp2
fmov.x %fp0,RPRIME(%a6)
fmul.x %fp0,%fp0 # FP0 IS S = R*R
fmov.d COSB8(%pc),%fp1 # B8
fmov.d SINA7(%pc),%fp2 # A7
fmul.x %fp0,%fp1 # SB8
fmov.x %fp0,SPRIME(%a6)
fmul.x %fp0,%fp2 # SA7
ror.l &1,%d1
and.l &0x80000000,%d1
fadd.d COSB7(%pc),%fp1 # B7+SB8
fadd.d SINA6(%pc),%fp2 # A6+SA7
eor.l %d1,RPRIME(%a6)
eor.l %d1,SPRIME(%a6)
fmul.x %fp0,%fp1 # S(B7+SB8)
or.l &0x3F800000,%d1
mov.l %d1,POSNEG1(%a6)
fmul.x %fp0,%fp2 # S(A6+SA7)
fadd.d COSB6(%pc),%fp1 # B6+S(B7+SB8)
fadd.d SINA5(%pc),%fp2 # A5+S(A6+SA7)
fmul.x %fp0,%fp1 # S(B6+S(B7+SB8))
fmul.x %fp0,%fp2 # S(A5+S(A6+SA7))
fadd.d COSB5(%pc),%fp1 # B5+S(B6+S(B7+SB8))
fadd.d SINA4(%pc),%fp2 # A4+S(A5+S(A6+SA7))
fmul.x %fp0,%fp1 # S(B5+...)
fmul.x %fp0,%fp2 # S(A4+...)
fadd.d COSB4(%pc),%fp1 # B4+S(B5+...)
fadd.d SINA3(%pc),%fp2 # A3+S(A4+...)
fmul.x %fp0,%fp1 # S(B4+...)
fmul.x %fp0,%fp2 # S(A3+...)
fadd.x COSB3(%pc),%fp1 # B3+S(B4+...)
fadd.x SINA2(%pc),%fp2 # A2+S(A3+...)
fmul.x %fp0,%fp1 # S(B3+...)
fmul.x %fp0,%fp2 # S(A2+...)
fadd.x COSB2(%pc),%fp1 # B2+S(B3+...)
fadd.x SINA1(%pc),%fp2 # A1+S(A2+...)
fmul.x %fp0,%fp1 # S(B2+...)
fmul.x %fp2,%fp0 # s(a1+...)
fadd.s COSB1(%pc),%fp1 # B1+S(B2...)
fmul.x RPRIME(%a6),%fp0 # R'S(A1+...)
fmul.x SPRIME(%a6),%fp1 # S'(B1+S(B2+...))
fmovm.x (%sp)+,&0x20 # restore fp2
fmov.l %d0,%fpcr
fadd.s POSNEG1(%a6),%fp1 # COS(X)
bsr sto_cos # store cosine result
fadd.x RPRIME(%a6),%fp0 # SIN(X)
bra t_inx2
################################################
SCBORS:
cmp.l %d1,&0x3FFF8000
bgt.w SREDUCEX
################################################
SCSM:
# mov.w &0x0000,XDCARE(%a6)
fmov.s &0x3F800000,%fp1
fmov.l %d0,%fpcr
fsub.s &0x00800000,%fp1
bsr sto_cos # store cosine result
fmov.l %fpcr,%d0 # d0 must have fpcr,too
mov.b &FMOV_OP,%d1 # last inst is MOVE
fmov.x X(%a6),%fp0
bra t_catch
##############################################
global ssincosd
#--SIN AND COS OF X FOR DENORMALIZED X
ssincosd:
mov.l %d0,-(%sp) # save d0
fmov.s &0x3F800000,%fp1
bsr sto_cos # store cosine result
mov.l (%sp)+,%d0 # restore d0
bra t_extdnrm
############################################
#--WHEN REDUCEX IS USED, THE CODE WILL INEVITABLY BE SLOW.
#--THIS REDUCTION METHOD, HOWEVER, IS MUCH FASTER THAN USING
#--THE REMAINDER INSTRUCTION WHICH IS NOW IN SOFTWARE.
SREDUCEX:
fmovm.x &0x3c,-(%sp) # save {fp2-fp5}
mov.l %d2,-(%sp) # save d2
fmov.s &0x00000000,%fp1 # fp1 = 0
#--If compact form of abs(arg) in d0=$7ffeffff, argument is so large that
#--there is a danger of unwanted overflow in first LOOP iteration. In this
#--case, reduce argument by one remainder step to make subsequent reduction
#--safe.
cmp.l %d1,&0x7ffeffff # is arg dangerously large?
bne.b SLOOP # no
# yes; create 2**16383*PI/2
mov.w &0x7ffe,FP_SCR0_EX(%a6)
mov.l &0xc90fdaa2,FP_SCR0_HI(%a6)
clr.l FP_SCR0_LO(%a6)
# create low half of 2**16383*PI/2 at FP_SCR1
mov.w &0x7fdc,FP_SCR1_EX(%a6)
mov.l &0x85a308d3,FP_SCR1_HI(%a6)
clr.l FP_SCR1_LO(%a6)
ftest.x %fp0 # test sign of argument
fblt.w sred_neg
or.b &0x80,FP_SCR0_EX(%a6) # positive arg
or.b &0x80,FP_SCR1_EX(%a6)
sred_neg:
fadd.x FP_SCR0(%a6),%fp0 # high part of reduction is exact
fmov.x %fp0,%fp1 # save high result in fp1
fadd.x FP_SCR1(%a6),%fp0 # low part of reduction
fsub.x %fp0,%fp1 # determine low component of result
fadd.x FP_SCR1(%a6),%fp1 # fp0/fp1 are reduced argument.
#--ON ENTRY, FP0 IS X, ON RETURN, FP0 IS X REM PI/2, |X| <= PI/4.
#--integer quotient will be stored in N
#--Intermeditate remainder is 66-bit long; (R,r) in (FP0,FP1)
SLOOP:
fmov.x %fp0,INARG(%a6) # +-2**K * F, 1 <= F < 2
mov.w INARG(%a6),%d1
mov.l %d1,%a1 # save a copy of D0
and.l &0x00007FFF,%d1
sub.l &0x00003FFF,%d1 # d0 = K
cmp.l %d1,&28
ble.b SLASTLOOP
SCONTLOOP:
sub.l &27,%d1 # d0 = L := K-27
mov.b &0,ENDFLAG(%a6)
bra.b SWORK
SLASTLOOP:
clr.l %d1 # d0 = L := 0
mov.b &1,ENDFLAG(%a6)
SWORK:
#--FIND THE REMAINDER OF (R,r) W.R.T. 2**L * (PI/2). L IS SO CHOSEN
#--THAT INT( X * (2/PI) / 2**(L) ) < 2**29.
#--CREATE 2**(-L) * (2/PI), SIGN(INARG)*2**(63),
#--2**L * (PIby2_1), 2**L * (PIby2_2)
mov.l &0x00003FFE,%d2 # BIASED EXP OF 2/PI
sub.l %d1,%d2 # BIASED EXP OF 2**(-L)*(2/PI)
mov.l &0xA2F9836E,FP_SCR0_HI(%a6)
mov.l &0x4E44152A,FP_SCR0_LO(%a6)
mov.w %d2,FP_SCR0_EX(%a6) # FP_SCR0 = 2**(-L)*(2/PI)
fmov.x %fp0,%fp2
fmul.x FP_SCR0(%a6),%fp2 # fp2 = X * 2**(-L)*(2/PI)
#--WE MUST NOW FIND INT(FP2). SINCE WE NEED THIS VALUE IN
#--FLOATING POINT FORMAT, THE TWO FMOVE'S FMOVE.L FP <--> N
#--WILL BE TOO INEFFICIENT. THE WAY AROUND IT IS THAT
#--(SIGN(INARG)*2**63 + FP2) - SIGN(INARG)*2**63 WILL GIVE
#--US THE DESIRED VALUE IN FLOATING POINT.
mov.l %a1,%d2
swap %d2
and.l &0x80000000,%d2
or.l &0x5F000000,%d2 # d2 = SIGN(INARG)*2**63 IN SGL
mov.l %d2,TWOTO63(%a6)
fadd.s TWOTO63(%a6),%fp2 # THE FRACTIONAL PART OF FP1 IS ROUNDED
fsub.s TWOTO63(%a6),%fp2 # fp2 = N
# fint.x %fp2
#--CREATING 2**(L)*Piby2_1 and 2**(L)*Piby2_2
mov.l %d1,%d2 # d2 = L
add.l &0x00003FFF,%d2 # BIASED EXP OF 2**L * (PI/2)
mov.w %d2,FP_SCR0_EX(%a6)
mov.l &0xC90FDAA2,FP_SCR0_HI(%a6)
clr.l FP_SCR0_LO(%a6) # FP_SCR0 = 2**(L) * Piby2_1
add.l &0x00003FDD,%d1
mov.w %d1,FP_SCR1_EX(%a6)
mov.l &0x85A308D3,FP_SCR1_HI(%a6)
clr.l FP_SCR1_LO(%a6) # FP_SCR1 = 2**(L) * Piby2_2
mov.b ENDFLAG(%a6),%d1
#--We are now ready to perform (R+r) - N*P1 - N*P2, P1 = 2**(L) * Piby2_1 and
#--P2 = 2**(L) * Piby2_2
fmov.x %fp2,%fp4 # fp4 = N
fmul.x FP_SCR0(%a6),%fp4 # fp4 = W = N*P1
fmov.x %fp2,%fp5 # fp5 = N
fmul.x FP_SCR1(%a6),%fp5 # fp5 = w = N*P2
fmov.x %fp4,%fp3 # fp3 = W = N*P1
#--we want P+p = W+w but |p| <= half ulp of P
#--Then, we need to compute A := R-P and a := r-p
fadd.x %fp5,%fp3 # fp3 = P
fsub.x %fp3,%fp4 # fp4 = W-P
fsub.x %fp3,%fp0 # fp0 = A := R - P
fadd.x %fp5,%fp4 # fp4 = p = (W-P)+w
fmov.x %fp0,%fp3 # fp3 = A
fsub.x %fp4,%fp1 # fp1 = a := r - p
#--Now we need to normalize (A,a) to "new (R,r)" where R+r = A+a but
#--|r| <= half ulp of R.
fadd.x %fp1,%fp0 # fp0 = R := A+a
#--No need to calculate r if this is the last loop
cmp.b %d1,&0
bgt.w SRESTORE
#--Need to calculate r
fsub.x %fp0,%fp3 # fp3 = A-R
fadd.x %fp3,%fp1 # fp1 = r := (A-R)+a
bra.w SLOOP
SRESTORE:
fmov.l %fp2,INT(%a6)
mov.l (%sp)+,%d2 # restore d2
fmovm.x (%sp)+,&0x3c # restore {fp2-fp5}
mov.l ADJN(%a6),%d1
cmp.l %d1,&4
blt.w SINCONT
bra.w SCCONT
#########################################################################
# stan(): computes the tangent of a normalized input #
# stand(): computes the tangent of a denormalized input #
# #
# INPUT *************************************************************** #
# a0 = pointer to extended precision input #
# d0 = round precision,mode #
# #
# OUTPUT ************************************************************** #
# fp0 = tan(X) #
# #
# ACCURACY and MONOTONICITY ******************************************* #
# The returned result is within 3 ulp in 64 significant bit, i.e. #
# within 0.5001 ulp to 53 bits if the result is subsequently #
# rounded to double precision. The result is provably monotonic #
# in double precision. #
# #
# ALGORITHM *********************************************************** #
# #
# 1. If |X| >= 15Pi or |X| < 2**(-40), go to 6. #
# #
# 2. Decompose X as X = N(Pi/2) + r where |r| <= Pi/4. Let #
# k = N mod 2, so in particular, k = 0 or 1. #
# #
# 3. If k is odd, go to 5. #
# #
# 4. (k is even) Tan(X) = tan(r) and tan(r) is approximated by a #
# rational function U/V where #
# U = r + r*s*(P1 + s*(P2 + s*P3)), and #
# V = 1 + s*(Q1 + s*(Q2 + s*(Q3 + s*Q4))), s = r*r. #
# Exit. #
# #
# 4. (k is odd) Tan(X) = -cot(r). Since tan(r) is approximated by #
# a rational function U/V where #
# U = r + r*s*(P1 + s*(P2 + s*P3)), and #
# V = 1 + s*(Q1 + s*(Q2 + s*(Q3 + s*Q4))), s = r*r, #
# -Cot(r) = -V/U. Exit. #
# #
# 6. If |X| > 1, go to 8. #
# #
# 7. (|X|<2**(-40)) Tan(X) = X. Exit. #
# #
# 8. Overwrite X by X := X rem 2Pi. Now that |X| <= Pi, go back #
# to 2. #
# #
#########################################################################
TANQ4:
long 0x3EA0B759,0xF50F8688
TANP3:
long 0xBEF2BAA5,0xA8924F04
TANQ3:
long 0xBF346F59,0xB39BA65F,0x00000000,0x00000000
TANP2:
long 0x3FF60000,0xE073D3FC,0x199C4A00,0x00000000
TANQ2:
long 0x3FF90000,0xD23CD684,0x15D95FA1,0x00000000
TANP1:
long 0xBFFC0000,0x8895A6C5,0xFB423BCA,0x00000000
TANQ1:
long 0xBFFD0000,0xEEF57E0D,0xA84BC8CE,0x00000000
INVTWOPI:
long 0x3FFC0000,0xA2F9836E,0x4E44152A,0x00000000
TWOPI1:
long 0x40010000,0xC90FDAA2,0x00000000,0x00000000
TWOPI2:
long 0x3FDF0000,0x85A308D4,0x00000000,0x00000000
#--N*PI/2, -32 <= N <= 32, IN A LEADING TERM IN EXT. AND TRAILING
#--TERM IN SGL. NOTE THAT PI IS 64-BIT LONG, THUS N*PI/2 IS AT
#--MOST 69 BITS LONG.
# global PITBL
PITBL:
long 0xC0040000,0xC90FDAA2,0x2168C235,0x21800000
long 0xC0040000,0xC2C75BCD,0x105D7C23,0xA0D00000
long 0xC0040000,0xBC7EDCF7,0xFF523611,0xA1E80000
long 0xC0040000,0xB6365E22,0xEE46F000,0x21480000
long 0xC0040000,0xAFEDDF4D,0xDD3BA9EE,0xA1200000
long 0xC0040000,0xA9A56078,0xCC3063DD,0x21FC0000
long 0xC0040000,0xA35CE1A3,0xBB251DCB,0x21100000
long 0xC0040000,0x9D1462CE,0xAA19D7B9,0xA1580000
long 0xC0040000,0x96CBE3F9,0x990E91A8,0x21E00000
long 0xC0040000,0x90836524,0x88034B96,0x20B00000
long 0xC0040000,0x8A3AE64F,0x76F80584,0xA1880000
long 0xC0040000,0x83F2677A,0x65ECBF73,0x21C40000
long 0xC0030000,0xFB53D14A,0xA9C2F2C2,0x20000000
long 0xC0030000,0xEEC2D3A0,0x87AC669F,0x21380000
long 0xC0030000,0xE231D5F6,0x6595DA7B,0xA1300000
long 0xC0030000,0xD5A0D84C,0x437F4E58,0x9FC00000
long 0xC0030000,0xC90FDAA2,0x2168C235,0x21000000
long 0xC0030000,0xBC7EDCF7,0xFF523611,0xA1680000
long 0xC0030000,0xAFEDDF4D,0xDD3BA9EE,0xA0A00000
long 0xC0030000,0xA35CE1A3,0xBB251DCB,0x20900000
long 0xC0030000,0x96CBE3F9,0x990E91A8,0x21600000
long 0xC0030000,0x8A3AE64F,0x76F80584,0xA1080000
long 0xC0020000,0xFB53D14A,0xA9C2F2C2,0x1F800000
long 0xC0020000,0xE231D5F6,0x6595DA7B,0xA0B00000
long 0xC0020000,0xC90FDAA2,0x2168C235,0x20800000
long 0xC0020000,0xAFEDDF4D,0xDD3BA9EE,0xA0200000
long 0xC0020000,0x96CBE3F9,0x990E91A8,0x20E00000
long 0xC0010000,0xFB53D14A,0xA9C2F2C2,0x1F000000
long 0xC0010000,0xC90FDAA2,0x2168C235,0x20000000
long 0xC0010000,0x96CBE3F9,0x990E91A8,0x20600000
long 0xC0000000,0xC90FDAA2,0x2168C235,0x1F800000
long 0xBFFF0000,0xC90FDAA2,0x2168C235,0x1F000000
long 0x00000000,0x00000000,0x00000000,0x00000000
long 0x3FFF0000,0xC90FDAA2,0x2168C235,0x9F000000
long 0x40000000,0xC90FDAA2,0x2168C235,0x9F800000
long 0x40010000,0x96CBE3F9,0x990E91A8,0xA0600000
long 0x40010000,0xC90FDAA2,0x2168C235,0xA0000000
long 0x40010000,0xFB53D14A,0xA9C2F2C2,0x9F000000
long 0x40020000,0x96CBE3F9,0x990E91A8,0xA0E00000
long 0x40020000,0xAFEDDF4D,0xDD3BA9EE,0x20200000
long 0x40020000,0xC90FDAA2,0x2168C235,0xA0800000
long 0x40020000,0xE231D5F6,0x6595DA7B,0x20B00000
long 0x40020000,0xFB53D14A,0xA9C2F2C2,0x9F800000
long 0x40030000,0x8A3AE64F,0x76F80584,0x21080000
long 0x40030000,0x96CBE3F9,0x990E91A8,0xA1600000
long 0x40030000,0xA35CE1A3,0xBB251DCB,0xA0900000
long 0x40030000,0xAFEDDF4D,0xDD3BA9EE,0x20A00000
long 0x40030000,0xBC7EDCF7,0xFF523611,0x21680000
long 0x40030000,0xC90FDAA2,0x2168C235,0xA1000000
long 0x40030000,0xD5A0D84C,0x437F4E58,0x1FC00000
long 0x40030000,0xE231D5F6,0x6595DA7B,0x21300000
long 0x40030000,0xEEC2D3A0,0x87AC669F,0xA1380000
long 0x40030000,0xFB53D14A,0xA9C2F2C2,0xA0000000
long 0x40040000,0x83F2677A,0x65ECBF73,0xA1C40000
long 0x40040000,0x8A3AE64F,0x76F80584,0x21880000
long 0x40040000,0x90836524,0x88034B96,0xA0B00000
long 0x40040000,0x96CBE3F9,0x990E91A8,0xA1E00000
long 0x40040000,0x9D1462CE,0xAA19D7B9,0x21580000
long 0x40040000,0xA35CE1A3,0xBB251DCB,0xA1100000
long 0x40040000,0xA9A56078,0xCC3063DD,0xA1FC0000
long 0x40040000,0xAFEDDF4D,0xDD3BA9EE,0x21200000
long 0x40040000,0xB6365E22,0xEE46F000,0xA1480000
long 0x40040000,0xBC7EDCF7,0xFF523611,0x21E80000
long 0x40040000,0xC2C75BCD,0x105D7C23,0x20D00000
long 0x40040000,0xC90FDAA2,0x2168C235,0xA1800000
set INARG,FP_SCR0
set TWOTO63,L_SCR1
set INT,L_SCR1
set ENDFLAG,L_SCR2
global stan
stan:
fmov.x (%a0),%fp0 # LOAD INPUT
mov.l (%a0),%d1
mov.w 4(%a0),%d1
and.l &0x7FFFFFFF,%d1
cmp.l %d1,&0x3FD78000 # |X| >= 2**(-40)?
bge.b TANOK1
bra.w TANSM
TANOK1:
cmp.l %d1,&0x4004BC7E # |X| < 15 PI?
blt.b TANMAIN
bra.w REDUCEX
TANMAIN:
#--THIS IS THE USUAL CASE, |X| <= 15 PI.
#--THE ARGUMENT REDUCTION IS DONE BY TABLE LOOK UP.
fmov.x %fp0,%fp1
fmul.d TWOBYPI(%pc),%fp1 # X*2/PI
lea.l PITBL+0x200(%pc),%a1 # TABLE OF N*PI/2, N = -32,...,32
fmov.l %fp1,%d1 # CONVERT TO INTEGER
asl.l &4,%d1
add.l %d1,%a1 # ADDRESS N*PIBY2 IN Y1, Y2
fsub.x (%a1)+,%fp0 # X-Y1
fsub.s (%a1),%fp0 # FP0 IS R = (X-Y1)-Y2
ror.l &5,%d1
and.l &0x80000000,%d1 # D0 WAS ODD IFF D0 < 0
TANCONT:
fmovm.x &0x0c,-(%sp) # save fp2,fp3
cmp.l %d1,&0
blt.w NODD
fmov.x %fp0,%fp1
fmul.x %fp1,%fp1 # S = R*R
fmov.d TANQ4(%pc),%fp3
fmov.d TANP3(%pc),%fp2
fmul.x %fp1,%fp3 # SQ4
fmul.x %fp1,%fp2 # SP3
fadd.d TANQ3(%pc),%fp3 # Q3+SQ4
fadd.x TANP2(%pc),%fp2 # P2+SP3
fmul.x %fp1,%fp3 # S(Q3+SQ4)
fmul.x %fp1,%fp2 # S(P2+SP3)
fadd.x TANQ2(%pc),%fp3 # Q2+S(Q3+SQ4)
fadd.x TANP1(%pc),%fp2 # P1+S(P2+SP3)
fmul.x %fp1,%fp3 # S(Q2+S(Q3+SQ4))
fmul.x %fp1,%fp2 # S(P1+S(P2+SP3))
fadd.x TANQ1(%pc),%fp3 # Q1+S(Q2+S(Q3+SQ4))
fmul.x %fp0,%fp2 # RS(P1+S(P2+SP3))
fmul.x %fp3,%fp1 # S(Q1+S(Q2+S(Q3+SQ4)))
fadd.x %fp2,%fp0 # R+RS(P1+S(P2+SP3))
fadd.s &0x3F800000,%fp1 # 1+S(Q1+...)
fmovm.x (%sp)+,&0x30 # restore fp2,fp3
fmov.l %d0,%fpcr # restore users round mode,prec
fdiv.x %fp1,%fp0 # last inst - possible exception set
bra t_inx2
NODD:
fmov.x %fp0,%fp1
fmul.x %fp0,%fp0 # S = R*R
fmov.d TANQ4(%pc),%fp3
fmov.d TANP3(%pc),%fp2
fmul.x %fp0,%fp3 # SQ4
fmul.x %fp0,%fp2 # SP3
fadd.d TANQ3(%pc),%fp3 # Q3+SQ4
fadd.x TANP2(%pc),%fp2 # P2+SP3
fmul.x %fp0,%fp3 # S(Q3+SQ4)
fmul.x %fp0,%fp2 # S(P2+SP3)
fadd.x TANQ2(%pc),%fp3 # Q2+S(Q3+SQ4)
fadd.x TANP1(%pc),%fp2 # P1+S(P2+SP3)
fmul.x %fp0,%fp3 # S(Q2+S(Q3+SQ4))
fmul.x %fp0,%fp2 # S(P1+S(P2+SP3))
fadd.x TANQ1(%pc),%fp3 # Q1+S(Q2+S(Q3+SQ4))
fmul.x %fp1,%fp2 # RS(P1+S(P2+SP3))
fmul.x %fp3,%fp0 # S(Q1+S(Q2+S(Q3+SQ4)))
fadd.x %fp2,%fp1 # R+RS(P1+S(P2+SP3))
fadd.s &0x3F800000,%fp0 # 1+S(Q1+...)
fmovm.x (%sp)+,&0x30 # restore fp2,fp3
fmov.x %fp1,-(%sp)
eor.l &0x80000000,(%sp)
fmov.l %d0,%fpcr # restore users round mode,prec
fdiv.x (%sp)+,%fp0 # last inst - possible exception set
bra t_inx2
TANBORS:
#--IF |X| > 15PI, WE USE THE GENERAL ARGUMENT REDUCTION.
#--IF |X| < 2**(-40), RETURN X OR 1.
cmp.l %d1,&0x3FFF8000
bgt.b REDUCEX
TANSM:
fmov.x %fp0,-(%sp)
fmov.l %d0,%fpcr # restore users round mode,prec
mov.b &FMOV_OP,%d1 # last inst is MOVE
fmov.x (%sp)+,%fp0 # last inst - posibble exception set
bra t_catch
global stand
#--TAN(X) = X FOR DENORMALIZED X
stand:
bra t_extdnrm
#--WHEN REDUCEX IS USED, THE CODE WILL INEVITABLY BE SLOW.
#--THIS REDUCTION METHOD, HOWEVER, IS MUCH FASTER THAN USING
#--THE REMAINDER INSTRUCTION WHICH IS NOW IN SOFTWARE.
REDUCEX:
fmovm.x &0x3c,-(%sp) # save {fp2-fp5}
mov.l %d2,-(%sp) # save d2
fmov.s &0x00000000,%fp1 # fp1 = 0
#--If compact form of abs(arg) in d0=$7ffeffff, argument is so large that
#--there is a danger of unwanted overflow in first LOOP iteration. In this
#--case, reduce argument by one remainder step to make subsequent reduction
#--safe.
cmp.l %d1,&0x7ffeffff # is arg dangerously large?
bne.b LOOP # no
# yes; create 2**16383*PI/2
mov.w &0x7ffe,FP_SCR0_EX(%a6)
mov.l &0xc90fdaa2,FP_SCR0_HI(%a6)
clr.l FP_SCR0_LO(%a6)
# create low half of 2**16383*PI/2 at FP_SCR1
mov.w &0x7fdc,FP_SCR1_EX(%a6)
mov.l &0x85a308d3,FP_SCR1_HI(%a6)
clr.l FP_SCR1_LO(%a6)
ftest.x %fp0 # test sign of argument
fblt.w red_neg
or.b &0x80,FP_SCR0_EX(%a6) # positive arg
or.b &0x80,FP_SCR1_EX(%a6)
red_neg:
fadd.x FP_SCR0(%a6),%fp0 # high part of reduction is exact
fmov.x %fp0,%fp1 # save high result in fp1
fadd.x FP_SCR1(%a6),%fp0 # low part of reduction
fsub.x %fp0,%fp1 # determine low component of result
fadd.x FP_SCR1(%a6),%fp1 # fp0/fp1 are reduced argument.
#--ON ENTRY, FP0 IS X, ON RETURN, FP0 IS X REM PI/2, |X| <= PI/4.
#--integer quotient will be stored in N
#--Intermeditate remainder is 66-bit long; (R,r) in (FP0,FP1)
LOOP:
fmov.x %fp0,INARG(%a6) # +-2**K * F, 1 <= F < 2
mov.w INARG(%a6),%d1
mov.l %d1,%a1 # save a copy of D0
and.l &0x00007FFF,%d1
sub.l &0x00003FFF,%d1 # d0 = K
cmp.l %d1,&28
ble.b LASTLOOP
CONTLOOP:
sub.l &27,%d1 # d0 = L := K-27
mov.b &0,ENDFLAG(%a6)
bra.b WORK
LASTLOOP:
clr.l %d1 # d0 = L := 0
mov.b &1,ENDFLAG(%a6)
WORK:
#--FIND THE REMAINDER OF (R,r) W.R.T. 2**L * (PI/2). L IS SO CHOSEN
#--THAT INT( X * (2/PI) / 2**(L) ) < 2**29.
#--CREATE 2**(-L) * (2/PI), SIGN(INARG)*2**(63),
#--2**L * (PIby2_1), 2**L * (PIby2_2)
mov.l &0x00003FFE,%d2 # BIASED EXP OF 2/PI
sub.l %d1,%d2 # BIASED EXP OF 2**(-L)*(2/PI)
mov.l &0xA2F9836E,FP_SCR0_HI(%a6)
mov.l &0x4E44152A,FP_SCR0_LO(%a6)
mov.w %d2,FP_SCR0_EX(%a6) # FP_SCR0 = 2**(-L)*(2/PI)
fmov.x %fp0,%fp2
fmul.x FP_SCR0(%a6),%fp2 # fp2 = X * 2**(-L)*(2/PI)
#--WE MUST NOW FIND INT(FP2). SINCE WE NEED THIS VALUE IN
#--FLOATING POINT FORMAT, THE TWO FMOVE'S FMOVE.L FP <--> N
#--WILL BE TOO INEFFICIENT. THE WAY AROUND IT IS THAT
#--(SIGN(INARG)*2**63 + FP2) - SIGN(INARG)*2**63 WILL GIVE
#--US THE DESIRED VALUE IN FLOATING POINT.
mov.l %a1,%d2
swap %d2
and.l &0x80000000,%d2
or.l &0x5F000000,%d2 # d2 = SIGN(INARG)*2**63 IN SGL
mov.l %d2,TWOTO63(%a6)
fadd.s TWOTO63(%a6),%fp2 # THE FRACTIONAL PART OF FP1 IS ROUNDED
fsub.s TWOTO63(%a6),%fp2 # fp2 = N
# fintrz.x %fp2,%fp2
#--CREATING 2**(L)*Piby2_1 and 2**(L)*Piby2_2
mov.l %d1,%d2 # d2 = L
add.l &0x00003FFF,%d2 # BIASED EXP OF 2**L * (PI/2)
mov.w %d2,FP_SCR0_EX(%a6)
mov.l &0xC90FDAA2,FP_SCR0_HI(%a6)
clr.l FP_SCR0_LO(%a6) # FP_SCR0 = 2**(L) * Piby2_1
add.l &0x00003FDD,%d1
mov.w %d1,FP_SCR1_EX(%a6)
mov.l &0x85A308D3,FP_SCR1_HI(%a6)
clr.l FP_SCR1_LO(%a6) # FP_SCR1 = 2**(L) * Piby2_2
mov.b ENDFLAG(%a6),%d1
#--We are now ready to perform (R+r) - N*P1 - N*P2, P1 = 2**(L) * Piby2_1 and
#--P2 = 2**(L) * Piby2_2
fmov.x %fp2,%fp4 # fp4 = N
fmul.x FP_SCR0(%a6),%fp4 # fp4 = W = N*P1
fmov.x %fp2,%fp5 # fp5 = N
fmul.x FP_SCR1(%a6),%fp5 # fp5 = w = N*P2
fmov.x %fp4,%fp3 # fp3 = W = N*P1
#--we want P+p = W+w but |p| <= half ulp of P
#--Then, we need to compute A := R-P and a := r-p
fadd.x %fp5,%fp3 # fp3 = P
fsub.x %fp3,%fp4 # fp4 = W-P
fsub.x %fp3,%fp0 # fp0 = A := R - P
fadd.x %fp5,%fp4 # fp4 = p = (W-P)+w
fmov.x %fp0,%fp3 # fp3 = A
fsub.x %fp4,%fp1 # fp1 = a := r - p
#--Now we need to normalize (A,a) to "new (R,r)" where R+r = A+a but
#--|r| <= half ulp of R.
fadd.x %fp1,%fp0 # fp0 = R := A+a
#--No need to calculate r if this is the last loop
cmp.b %d1,&0
bgt.w RESTORE
#--Need to calculate r
fsub.x %fp0,%fp3 # fp3 = A-R
fadd.x %fp3,%fp1 # fp1 = r := (A-R)+a
bra.w LOOP
RESTORE:
fmov.l %fp2,INT(%a6)
mov.l (%sp)+,%d2 # restore d2
fmovm.x (%sp)+,&0x3c # restore {fp2-fp5}
mov.l INT(%a6),%d1
ror.l &1,%d1
bra.w TANCONT
#########################################################################
# satan(): computes the arctangent of a normalized number #
# satand(): computes the arctangent of a denormalized number #
# #
# INPUT *************************************************************** #
# a0 = pointer to extended precision input #
# d0 = round precision,mode #
# #
# OUTPUT ************************************************************** #
# fp0 = arctan(X) #
# #
# ACCURACY and MONOTONICITY ******************************************* #
# The returned result is within 2 ulps in 64 significant bit, #
# i.e. within 0.5001 ulp to 53 bits if the result is subsequently #
# rounded to double precision. The result is provably monotonic #
# in double precision. #
# #
# ALGORITHM *********************************************************** #
# Step 1. If |X| >= 16 or |X| < 1/16, go to Step 5. #
# #
# Step 2. Let X = sgn * 2**k * 1.xxxxxxxx...x. #
# Note that k = -4, -3,..., or 3. #
# Define F = sgn * 2**k * 1.xxxx1, i.e. the first 5 #
# significant bits of X with a bit-1 attached at the 6-th #
# bit position. Define u to be u = (X-F) / (1 + X*F). #
# #
# Step 3. Approximate arctan(u) by a polynomial poly. #
# #
# Step 4. Return arctan(F) + poly, arctan(F) is fetched from a #
# table of values calculated beforehand. Exit. #
# #
# Step 5. If |X| >= 16, go to Step 7. #
# #
# Step 6. Approximate arctan(X) by an odd polynomial in X. Exit. #
# #
# Step 7. Define X' = -1/X. Approximate arctan(X') by an odd #
# polynomial in X'. #
# Arctan(X) = sign(X)*Pi/2 + arctan(X'). Exit. #
# #
#########################################################################
ATANA3: long 0xBFF6687E,0x314987D8
ATANA2: long 0x4002AC69,0x34A26DB3
ATANA1: long 0xBFC2476F,0x4E1DA28E
ATANB6: long 0x3FB34444,0x7F876989
ATANB5: long 0xBFB744EE,0x7FAF45DB
ATANB4: long 0x3FBC71C6,0x46940220
ATANB3: long 0xBFC24924,0x921872F9
ATANB2: long 0x3FC99999,0x99998FA9
ATANB1: long 0xBFD55555,0x55555555
ATANC5: long 0xBFB70BF3,0x98539E6A
ATANC4: long 0x3FBC7187,0x962D1D7D
ATANC3: long 0xBFC24924,0x827107B8
ATANC2: long 0x3FC99999,0x9996263E
ATANC1: long 0xBFD55555,0x55555536
PPIBY2: long 0x3FFF0000,0xC90FDAA2,0x2168C235,0x00000000
NPIBY2: long 0xBFFF0000,0xC90FDAA2,0x2168C235,0x00000000
PTINY: long 0x00010000,0x80000000,0x00000000,0x00000000
NTINY: long 0x80010000,0x80000000,0x00000000,0x00000000
ATANTBL:
long 0x3FFB0000,0x83D152C5,0x060B7A51,0x00000000
long 0x3FFB0000,0x8BC85445,0x65498B8B,0x00000000
long 0x3FFB0000,0x93BE4060,0x17626B0D,0x00000000
long 0x3FFB0000,0x9BB3078D,0x35AEC202,0x00000000
long 0x3FFB0000,0xA3A69A52,0x5DDCE7DE,0x00000000
long 0x3FFB0000,0xAB98E943,0x62765619,0x00000000
long 0x3FFB0000,0xB389E502,0xF9C59862,0x00000000
long 0x3FFB0000,0xBB797E43,0x6B09E6FB,0x00000000
long 0x3FFB0000,0xC367A5C7,0x39E5F446,0x00000000
long 0x3FFB0000,0xCB544C61,0xCFF7D5C6,0x00000000
long 0x3FFB0000,0xD33F62F8,0x2488533E,0x00000000
long 0x3FFB0000,0xDB28DA81,0x62404C77,0x00000000
long 0x3FFB0000,0xE310A407,0x8AD34F18,0x00000000
long 0x3FFB0000,0xEAF6B0A8,0x188EE1EB,0x00000000
long 0x3FFB0000,0xF2DAF194,0x9DBE79D5,0x00000000
long 0x3FFB0000,0xFABD5813,0x61D47E3E,0x00000000
long 0x3FFC0000,0x8346AC21,0x0959ECC4,0x00000000
long 0x3FFC0000,0x8B232A08,0x304282D8,0x00000000
long 0x3FFC0000,0x92FB70B8,0xD29AE2F9,0x00000000
long 0x3FFC0000,0x9ACF476F,0x5CCD1CB4,0x00000000
long 0x3FFC0000,0xA29E7630,0x4954F23F,0x00000000
long 0x3FFC0000,0xAA68C5D0,0x8AB85230,0x00000000
long 0x3FFC0000,0xB22DFFFD,0x9D539F83,0x00000000
long 0x3FFC0000,0xB9EDEF45,0x3E900EA5,0x00000000
long 0x3FFC0000,0xC1A85F1C,0xC75E3EA5,0x00000000
long 0x3FFC0000,0xC95D1BE8,0x28138DE6,0x00000000
long 0x3FFC0000,0xD10BF300,0x840D2DE4,0x00000000
long 0x3FFC0000,0xD8B4B2BA,0x6BC05E7A,0x00000000
long 0x3FFC0000,0xE0572A6B,0xB42335F6,0x00000000
long 0x3FFC0000,0xE7F32A70,0xEA9CAA8F,0x00000000
long 0x3FFC0000,0xEF888432,0x64ECEFAA,0x00000000
long 0x3FFC0000,0xF7170A28,0xECC06666,0x00000000
long 0x3FFD0000,0x812FD288,0x332DAD32,0x00000000
long 0x3FFD0000,0x88A8D1B1,0x218E4D64,0x00000000
long 0x3FFD0000,0x9012AB3F,0x23E4AEE8,0x00000000
long 0x3FFD0000,0x976CC3D4,0x11E7F1B9,0x00000000
long 0x3FFD0000,0x9EB68949,0x3889A227,0x00000000
long 0x3FFD0000,0xA5EF72C3,0x4487361B,0x00000000
long 0x3FFD0000,0xAD1700BA,0xF07A7227,0x00000000
long 0x3FFD0000,0xB42CBCFA,0xFD37EFB7,0x00000000
long 0x3FFD0000,0xBB303A94,0x0BA80F89,0x00000000
long 0x3FFD0000,0xC22115C6,0xFCAEBBAF,0x00000000
long 0x3FFD0000,0xC8FEF3E6,0x86331221,0x00000000
long 0x3FFD0000,0xCFC98330,0xB4000C70,0x00000000
long 0x3FFD0000,0xD6807AA1,0x102C5BF9,0x00000000
long 0x3FFD0000,0xDD2399BC,0x31252AA3,0x00000000
long 0x3FFD0000,0xE3B2A855,0x6B8FC517,0x00000000
long 0x3FFD0000,0xEA2D764F,0x64315989,0x00000000
long 0x3FFD0000,0xF3BF5BF8,0xBAD1A21D,0x00000000
long 0x3FFE0000,0x801CE39E,0x0D205C9A,0x00000000
long 0x3FFE0000,0x8630A2DA,0xDA1ED066,0x00000000
long 0x3FFE0000,0x8C1AD445,0xF3E09B8C,0x00000000
long 0x3FFE0000,0x91DB8F16,0x64F350E2,0x00000000
long 0x3FFE0000,0x97731420,0x365E538C,0x00000000
long 0x3FFE0000,0x9CE1C8E6,0xA0B8CDBA,0x00000000
long 0x3FFE0000,0xA22832DB,0xCADAAE09,0x00000000
long 0x3FFE0000,0xA746F2DD,0xB7602294,0x00000000
long 0x3FFE0000,0xAC3EC0FB,0x997DD6A2,0x00000000
long 0x3FFE0000,0xB110688A,0xEBDC6F6A,0x00000000
long 0x3FFE0000,0xB5BCC490,0x59ECC4B0,0x00000000
long 0x3FFE0000,0xBA44BC7D,0xD470782F,0x00000000
long 0x3FFE0000,0xBEA94144,0xFD049AAC,0x00000000
long 0x3FFE0000,0xC2EB4ABB,0x661628B6,0x00000000
long 0x3FFE0000,0xC70BD54C,0xE602EE14,0x00000000
long 0x3FFE0000,0xCD000549,0xADEC7159,0x00000000
long 0x3FFE0000,0xD48457D2,0xD8EA4EA3,0x00000000
long 0x3FFE0000,0xDB948DA7,0x12DECE3B,0x00000000
long 0x3FFE0000,0xE23855F9,0x69E8096A,0x00000000
long 0x3FFE0000,0xE8771129,0xC4353259,0x00000000
long 0x3FFE0000,0xEE57C16E,0x0D379C0D,0x00000000
long 0x3FFE0000,0xF3E10211,0xA87C3779,0x00000000
long 0x3FFE0000,0xF919039D,0x758B8D41,0x00000000
long 0x3FFE0000,0xFE058B8F,0x64935FB3,0x00000000
long 0x3FFF0000,0x8155FB49,0x7B685D04,0x00000000
long 0x3FFF0000,0x83889E35,0x49D108E1,0x00000000
long 0x3FFF0000,0x859CFA76,0x511D724B,0x00000000
long 0x3FFF0000,0x87952ECF,0xFF8131E7,0x00000000
long 0x3FFF0000,0x89732FD1,0x9557641B,0x00000000
long 0x3FFF0000,0x8B38CAD1,0x01932A35,0x00000000
long 0x3FFF0000,0x8CE7A8D8,0x301EE6B5,0x00000000
long 0x3FFF0000,0x8F46A39E,0x2EAE5281,0x00000000
long 0x3FFF0000,0x922DA7D7,0x91888487,0x00000000
long 0x3FFF0000,0x94D19FCB,0xDEDF5241,0x00000000
long 0x3FFF0000,0x973AB944,0x19D2A08B,0x00000000
long 0x3FFF0000,0x996FF00E,0x08E10B96,0x00000000
long 0x3FFF0000,0x9B773F95,0x12321DA7,0x00000000
long 0x3FFF0000,0x9D55CC32,0x0F935624,0x00000000
long 0x3FFF0000,0x9F100575,0x006CC571,0x00000000
long 0x3FFF0000,0xA0A9C290,0xD97CC06C,0x00000000
long 0x3FFF0000,0xA22659EB,0xEBC0630A,0x00000000
long 0x3FFF0000,0xA388B4AF,0xF6EF0EC9,0x00000000
long 0x3FFF0000,0xA4D35F10,0x61D292C4,0x00000000
long 0x3FFF0000,0xA60895DC,0xFBE3187E,0x00000000
long 0x3FFF0000,0xA72A51DC,0x7367BEAC,0x00000000
long 0x3FFF0000,0xA83A5153,0x0956168F,0x00000000
long 0x3FFF0000,0xA93A2007,0x7539546E,0x00000000
long 0x3FFF0000,0xAA9E7245,0x023B2605,0x00000000
long 0x3FFF0000,0xAC4C84BA,0x6FE4D58F,0x00000000
long 0x3FFF0000,0xADCE4A4A,0x606B9712,0x00000000
long 0x3FFF0000,0xAF2A2DCD,0x8D263C9C,0x00000000
long 0x3FFF0000,0xB0656F81,0xF22265C7,0x00000000
long 0x3FFF0000,0xB1846515,0x0F71496A,0x00000000
long 0x3FFF0000,0xB28AAA15,0x6F9ADA35,0x00000000
long 0x3FFF0000,0xB37B44FF,0x3766B895,0x00000000
long 0x3FFF0000,0xB458C3DC,0xE9630433,0x00000000
long 0x3FFF0000,0xB525529D,0x562246BD,0x00000000
long 0x3FFF0000,0xB5E2CCA9,0x5F9D88CC,0x00000000
long 0x3FFF0000,0xB692CADA,0x7ACA1ADA,0x00000000
long 0x3FFF0000,0xB736AEA7,0xA6925838,0x00000000
long 0x3FFF0000,0xB7CFAB28,0x7E9F7B36,0x00000000
long 0x3FFF0000,0xB85ECC66,0xCB219835,0x00000000
long 0x3FFF0000,0xB8E4FD5A,0x20A593DA,0x00000000
long 0x3FFF0000,0xB99F41F6,0x4AFF9BB5,0x00000000
long 0x3FFF0000,0xBA7F1E17,0x842BBE7B,0x00000000
long 0x3FFF0000,0xBB471285,0x7637E17D,0x00000000
long 0x3FFF0000,0xBBFABE8A,0x4788DF6F,0x00000000
long 0x3FFF0000,0xBC9D0FAD,0x2B689D79,0x00000000
long 0x3FFF0000,0xBD306A39,0x471ECD86,0x00000000
long 0x3FFF0000,0xBDB6C731,0x856AF18A,0x00000000
long 0x3FFF0000,0xBE31CAC5,0x02E80D70,0x00000000
long 0x3FFF0000,0xBEA2D55C,0xE33194E2,0x00000000
long 0x3FFF0000,0xBF0B10B7,0xC03128F0,0x00000000
long 0x3FFF0000,0xBF6B7A18,0xDACB778D,0x00000000
long 0x3FFF0000,0xBFC4EA46,0x63FA18F6,0x00000000
long 0x3FFF0000,0xC0181BDE,0x8B89A454,0x00000000
long 0x3FFF0000,0xC065B066,0xCFBF6439,0x00000000
long 0x3FFF0000,0xC0AE345F,0x56340AE6,0x00000000
long 0x3FFF0000,0xC0F22291,0x9CB9E6A7,0x00000000
set X,FP_SCR0
set XDCARE,X+2
set XFRAC,X+4
set XFRACLO,X+8
set ATANF,FP_SCR1
set ATANFHI,ATANF+4
set ATANFLO,ATANF+8
global satan
#--ENTRY POINT FOR ATAN(X), HERE X IS FINITE, NON-ZERO, AND NOT NAN'S
satan:
fmov.x (%a0),%fp0 # LOAD INPUT
mov.l (%a0),%d1
mov.w 4(%a0),%d1
fmov.x %fp0,X(%a6)
and.l &0x7FFFFFFF,%d1
cmp.l %d1,&0x3FFB8000 # |X| >= 1/16?
bge.b ATANOK1
bra.w ATANSM
ATANOK1:
cmp.l %d1,&0x4002FFFF # |X| < 16 ?
ble.b ATANMAIN
bra.w ATANBIG
#--THE MOST LIKELY CASE, |X| IN [1/16, 16). WE USE TABLE TECHNIQUE
#--THE IDEA IS ATAN(X) = ATAN(F) + ATAN( [X-F] / [1+XF] ).
#--SO IF F IS CHOSEN TO BE CLOSE TO X AND ATAN(F) IS STORED IN
#--A TABLE, ALL WE NEED IS TO APPROXIMATE ATAN(U) WHERE
#--U = (X-F)/(1+XF) IS SMALL (REMEMBER F IS CLOSE TO X). IT IS
#--TRUE THAT A DIVIDE IS NOW NEEDED, BUT THE APPROXIMATION FOR
#--ATAN(U) IS A VERY SHORT POLYNOMIAL AND THE INDEXING TO
#--FETCH F AND SAVING OF REGISTERS CAN BE ALL HIDED UNDER THE
#--DIVIDE. IN THE END THIS METHOD IS MUCH FASTER THAN A TRADITIONAL
#--ONE. NOTE ALSO THAT THE TRADITIONAL SCHEME THAT APPROXIMATE
#--ATAN(X) DIRECTLY WILL NEED TO USE A RATIONAL APPROXIMATION
#--(DIVISION NEEDED) ANYWAY BECAUSE A POLYNOMIAL APPROXIMATION
#--WILL INVOLVE A VERY LONG POLYNOMIAL.
#--NOW WE SEE X AS +-2^K * 1.BBBBBBB....B <- 1. + 63 BITS
#--WE CHOSE F TO BE +-2^K * 1.BBBB1
#--THAT IS IT MATCHES THE EXPONENT AND FIRST 5 BITS OF X, THE
#--SIXTH BITS IS SET TO BE 1. SINCE K = -4, -3, ..., 3, THERE
#--ARE ONLY 8 TIMES 16 = 2^7 = 128 |F|'S. SINCE ATAN(-|F|) IS
#-- -ATAN(|F|), WE NEED TO STORE ONLY ATAN(|F|).
ATANMAIN:
and.l &0xF8000000,XFRAC(%a6) # FIRST 5 BITS
or.l &0x04000000,XFRAC(%a6) # SET 6-TH BIT TO 1
mov.l &0x00000000,XFRACLO(%a6) # LOCATION OF X IS NOW F
fmov.x %fp0,%fp1 # FP1 IS X
fmul.x X(%a6),%fp1 # FP1 IS X*F, NOTE THAT X*F > 0
fsub.x X(%a6),%fp0 # FP0 IS X-F
fadd.s &0x3F800000,%fp1 # FP1 IS 1 + X*F
fdiv.x %fp1,%fp0 # FP0 IS U = (X-F)/(1+X*F)
#--WHILE THE DIVISION IS TAKING ITS TIME, WE FETCH ATAN(|F|)
#--CREATE ATAN(F) AND STORE IT IN ATANF, AND
#--SAVE REGISTERS FP2.
mov.l %d2,-(%sp) # SAVE d2 TEMPORARILY
mov.l %d1,%d2 # THE EXP AND 16 BITS OF X
and.l &0x00007800,%d1 # 4 VARYING BITS OF F'S FRACTION
and.l &0x7FFF0000,%d2 # EXPONENT OF F
sub.l &0x3FFB0000,%d2 # K+4
asr.l &1,%d2
add.l %d2,%d1 # THE 7 BITS IDENTIFYING F
asr.l &7,%d1 # INDEX INTO TBL OF ATAN(|F|)
lea ATANTBL(%pc),%a1
add.l %d1,%a1 # ADDRESS OF ATAN(|F|)
mov.l (%a1)+,ATANF(%a6)
mov.l (%a1)+,ATANFHI(%a6)
mov.l (%a1)+,ATANFLO(%a6) # ATANF IS NOW ATAN(|F|)
mov.l X(%a6),%d1 # LOAD SIGN AND EXPO. AGAIN
and.l &0x80000000,%d1 # SIGN(F)
or.l %d1,ATANF(%a6) # ATANF IS NOW SIGN(F)*ATAN(|F|)
mov.l (%sp)+,%d2 # RESTORE d2
#--THAT'S ALL I HAVE TO DO FOR NOW,
#--BUT ALAS, THE DIVIDE IS STILL CRANKING!
#--U IN FP0, WE ARE NOW READY TO COMPUTE ATAN(U) AS
#--U + A1*U*V*(A2 + V*(A3 + V)), V = U*U
#--THE POLYNOMIAL MAY LOOK STRANGE, BUT IS NEVERTHELESS CORRECT.
#--THE NATURAL FORM IS U + U*V*(A1 + V*(A2 + V*A3))
#--WHAT WE HAVE HERE IS MERELY A1 = A3, A2 = A1/A3, A3 = A2/A3.
#--THE REASON FOR THIS REARRANGEMENT IS TO MAKE THE INDEPENDENT
#--PARTS A1*U*V AND (A2 + ... STUFF) MORE LOAD-BALANCED
fmovm.x &0x04,-(%sp) # save fp2
fmov.x %fp0,%fp1
fmul.x %fp1,%fp1
fmov.d ATANA3(%pc),%fp2
fadd.x %fp1,%fp2 # A3+V
fmul.x %fp1,%fp2 # V*(A3+V)
fmul.x %fp0,%fp1 # U*V
fadd.d ATANA2(%pc),%fp2 # A2+V*(A3+V)
fmul.d ATANA1(%pc),%fp1 # A1*U*V
fmul.x %fp2,%fp1 # A1*U*V*(A2+V*(A3+V))
fadd.x %fp1,%fp0 # ATAN(U), FP1 RELEASED
fmovm.x (%sp)+,&0x20 # restore fp2
fmov.l %d0,%fpcr # restore users rnd mode,prec
fadd.x ATANF(%a6),%fp0 # ATAN(X)
bra t_inx2
ATANBORS:
#--|X| IS IN d0 IN COMPACT FORM. FP1, d0 SAVED.
#--FP0 IS X AND |X| <= 1/16 OR |X| >= 16.
cmp.l %d1,&0x3FFF8000
bgt.w ATANBIG # I.E. |X| >= 16
ATANSM:
#--|X| <= 1/16
#--IF |X| < 2^(-40), RETURN X AS ANSWER. OTHERWISE, APPROXIMATE
#--ATAN(X) BY X + X*Y*(B1+Y*(B2+Y*(B3+Y*(B4+Y*(B5+Y*B6)))))
#--WHICH IS X + X*Y*( [B1+Z*(B3+Z*B5)] + [Y*(B2+Z*(B4+Z*B6)] )
#--WHERE Y = X*X, AND Z = Y*Y.
cmp.l %d1,&0x3FD78000
blt.w ATANTINY
#--COMPUTE POLYNOMIAL
fmovm.x &0x0c,-(%sp) # save fp2/fp3
fmul.x %fp0,%fp0 # FPO IS Y = X*X
fmov.x %fp0,%fp1
fmul.x %fp1,%fp1 # FP1 IS Z = Y*Y
fmov.d ATANB6(%pc),%fp2
fmov.d ATANB5(%pc),%fp3
fmul.x %fp1,%fp2 # Z*B6
fmul.x %fp1,%fp3 # Z*B5
fadd.d ATANB4(%pc),%fp2 # B4+Z*B6
fadd.d ATANB3(%pc),%fp3 # B3+Z*B5
fmul.x %fp1,%fp2 # Z*(B4+Z*B6)
fmul.x %fp3,%fp1 # Z*(B3+Z*B5)
fadd.d ATANB2(%pc),%fp2 # B2+Z*(B4+Z*B6)
fadd.d ATANB1(%pc),%fp1 # B1+Z*(B3+Z*B5)
fmul.x %fp0,%fp2 # Y*(B2+Z*(B4+Z*B6))
fmul.x X(%a6),%fp0 # X*Y
fadd.x %fp2,%fp1 # [B1+Z*(B3+Z*B5)]+[Y*(B2+Z*(B4+Z*B6))]
fmul.x %fp1,%fp0 # X*Y*([B1+Z*(B3+Z*B5)]+[Y*(B2+Z*(B4+Z*B6))])
fmovm.x (%sp)+,&0x30 # restore fp2/fp3
fmov.l %d0,%fpcr # restore users rnd mode,prec
fadd.x X(%a6),%fp0
bra t_inx2
ATANTINY:
#--|X| < 2^(-40), ATAN(X) = X
fmov.l %d0,%fpcr # restore users rnd mode,prec
mov.b &FMOV_OP,%d1 # last inst is MOVE
fmov.x X(%a6),%fp0 # last inst - possible exception set
bra t_catch
ATANBIG:
#--IF |X| > 2^(100), RETURN SIGN(X)*(PI/2 - TINY). OTHERWISE,
#--RETURN SIGN(X)*PI/2 + ATAN(-1/X).
cmp.l %d1,&0x40638000
bgt.w ATANHUGE
#--APPROXIMATE ATAN(-1/X) BY
#--X'+X'*Y*(C1+Y*(C2+Y*(C3+Y*(C4+Y*C5)))), X' = -1/X, Y = X'*X'
#--THIS CAN BE RE-WRITTEN AS
#--X'+X'*Y*( [C1+Z*(C3+Z*C5)] + [Y*(C2+Z*C4)] ), Z = Y*Y.
fmovm.x &0x0c,-(%sp) # save fp2/fp3
fmov.s &0xBF800000,%fp1 # LOAD -1
fdiv.x %fp0,%fp1 # FP1 IS -1/X
#--DIVIDE IS STILL CRANKING
fmov.x %fp1,%fp0 # FP0 IS X'
fmul.x %fp0,%fp0 # FP0 IS Y = X'*X'
fmov.x %fp1,X(%a6) # X IS REALLY X'
fmov.x %fp0,%fp1
fmul.x %fp1,%fp1 # FP1 IS Z = Y*Y
fmov.d ATANC5(%pc),%fp3
fmov.d ATANC4(%pc),%fp2
fmul.x %fp1,%fp3 # Z*C5
fmul.x %fp1,%fp2 # Z*B4
fadd.d ATANC3(%pc),%fp3 # C3+Z*C5
fadd.d ATANC2(%pc),%fp2 # C2+Z*C4
fmul.x %fp3,%fp1 # Z*(C3+Z*C5), FP3 RELEASED
fmul.x %fp0,%fp2 # Y*(C2+Z*C4)
fadd.d ATANC1(%pc),%fp1 # C1+Z*(C3+Z*C5)
fmul.x X(%a6),%fp0 # X'*Y
fadd.x %fp2,%fp1 # [Y*(C2+Z*C4)]+[C1+Z*(C3+Z*C5)]
fmul.x %fp1,%fp0 # X'*Y*([B1+Z*(B3+Z*B5)]
# ... +[Y*(B2+Z*(B4+Z*B6))])
fadd.x X(%a6),%fp0
fmovm.x (%sp)+,&0x30 # restore fp2/fp3
fmov.l %d0,%fpcr # restore users rnd mode,prec
tst.b (%a0)
bpl.b pos_big
neg_big:
fadd.x NPIBY2(%pc),%fp0
bra t_minx2
pos_big:
fadd.x PPIBY2(%pc),%fp0
bra t_pinx2
ATANHUGE:
#--RETURN SIGN(X)*(PIBY2 - TINY) = SIGN(X)*PIBY2 - SIGN(X)*TINY
tst.b (%a0)
bpl.b pos_huge
neg_huge:
fmov.x NPIBY2(%pc),%fp0
fmov.l %d0,%fpcr
fadd.x PTINY(%pc),%fp0
bra t_minx2
pos_huge:
fmov.x PPIBY2(%pc),%fp0
fmov.l %d0,%fpcr
fadd.x NTINY(%pc),%fp0
bra t_pinx2
global satand
#--ENTRY POINT FOR ATAN(X) FOR DENORMALIZED ARGUMENT
satand:
bra t_extdnrm
#########################################################################
# sasin(): computes the inverse sine of a normalized input #
# sasind(): computes the inverse sine of a denormalized input #
# #
# INPUT *************************************************************** #
# a0 = pointer to extended precision input #
# d0 = round precision,mode #
# #
# OUTPUT ************************************************************** #
# fp0 = arcsin(X) #
# #
# ACCURACY and MONOTONICITY ******************************************* #
# The returned result is within 3 ulps in 64 significant bit, #
# i.e. within 0.5001 ulp to 53 bits if the result is subsequently #
# rounded to double precision. The result is provably monotonic #
# in double precision. #
# #
# ALGORITHM *********************************************************** #
# #
# ASIN #
# 1. If |X| >= 1, go to 3. #
# #
# 2. (|X| < 1) Calculate asin(X) by #
# z := sqrt( [1-X][1+X] ) #
# asin(X) = atan( x / z ). #
# Exit. #
# #
# 3. If |X| > 1, go to 5. #
# #
# 4. (|X| = 1) sgn := sign(X), return asin(X) := sgn * Pi/2. Exit.#
# #
# 5. (|X| > 1) Generate an invalid operation by 0 * infinity. #
# Exit. #
# #
#########################################################################
global sasin
sasin:
fmov.x (%a0),%fp0 # LOAD INPUT
mov.l (%a0),%d1
mov.w 4(%a0),%d1
and.l &0x7FFFFFFF,%d1
cmp.l %d1,&0x3FFF8000
bge.b ASINBIG
# This catch is added here for the '060 QSP. Originally, the call to
# satan() would handle this case by causing the exception which would
# not be caught until gen_except(). Now, with the exceptions being
# detected inside of satan(), the exception would have been handled there
# instead of inside sasin() as expected.
cmp.l %d1,&0x3FD78000
blt.w ASINTINY
#--THIS IS THE USUAL CASE, |X| < 1
#--ASIN(X) = ATAN( X / SQRT( (1-X)(1+X) ) )
ASINMAIN:
fmov.s &0x3F800000,%fp1
fsub.x %fp0,%fp1 # 1-X
fmovm.x &0x4,-(%sp) # {fp2}
fmov.s &0x3F800000,%fp2
fadd.x %fp0,%fp2 # 1+X
fmul.x %fp2,%fp1 # (1+X)(1-X)
fmovm.x (%sp)+,&0x20 # {fp2}
fsqrt.x %fp1 # SQRT([1-X][1+X])
fdiv.x %fp1,%fp0 # X/SQRT([1-X][1+X])
fmovm.x &0x01,-(%sp) # save X/SQRT(...)
lea (%sp),%a0 # pass ptr to X/SQRT(...)
bsr satan
add.l &0xc,%sp # clear X/SQRT(...) from stack
bra t_inx2
ASINBIG:
fabs.x %fp0 # |X|
fcmp.s %fp0,&0x3F800000
fbgt t_operr # cause an operr exception
#--|X| = 1, ASIN(X) = +- PI/2.
ASINONE:
fmov.x PIBY2(%pc),%fp0
mov.l (%a0),%d1
and.l &0x80000000,%d1 # SIGN BIT OF X
or.l &0x3F800000,%d1 # +-1 IN SGL FORMAT
mov.l %d1,-(%sp) # push SIGN(X) IN SGL-FMT
fmov.l %d0,%fpcr
fmul.s (%sp)+,%fp0
bra t_inx2
#--|X| < 2^(-40), ATAN(X) = X
ASINTINY:
fmov.l %d0,%fpcr # restore users rnd mode,prec
mov.b &FMOV_OP,%d1 # last inst is MOVE
fmov.x (%a0),%fp0 # last inst - possible exception
bra t_catch
global sasind
#--ASIN(X) = X FOR DENORMALIZED X
sasind:
bra t_extdnrm
#########################################################################
# sacos(): computes the inverse cosine of a normalized input #
# sacosd(): computes the inverse cosine of a denormalized input #
# #
# INPUT *************************************************************** #
# a0 = pointer to extended precision input #
# d0 = round precision,mode #
# #
# OUTPUT ************************************************************** #
# fp0 = arccos(X) #
# #
# ACCURACY and MONOTONICITY ******************************************* #
# The returned result is within 3 ulps in 64 significant bit, #
# i.e. within 0.5001 ulp to 53 bits if the result is subsequently #
# rounded to double precision. The result is provably monotonic #
# in double precision. #
# #
# ALGORITHM *********************************************************** #
# #
# ACOS #
# 1. If |X| >= 1, go to 3. #
# #
# 2. (|X| < 1) Calculate acos(X) by #
# z := (1-X) / (1+X) #
# acos(X) = 2 * atan( sqrt(z) ). #
# Exit. #
# #
# 3. If |X| > 1, go to 5. #
# #
# 4. (|X| = 1) If X > 0, return 0. Otherwise, return Pi. Exit. #
# #
# 5. (|X| > 1) Generate an invalid operation by 0 * infinity. #
# Exit. #
# #
#########################################################################
global sacos
sacos:
fmov.x (%a0),%fp0 # LOAD INPUT
mov.l (%a0),%d1 # pack exp w/ upper 16 fraction
mov.w 4(%a0),%d1
and.l &0x7FFFFFFF,%d1
cmp.l %d1,&0x3FFF8000
bge.b ACOSBIG
#--THIS IS THE USUAL CASE, |X| < 1
#--ACOS(X) = 2 * ATAN( SQRT( (1-X)/(1+X) ) )
ACOSMAIN:
fmov.s &0x3F800000,%fp1
fadd.x %fp0,%fp1 # 1+X
fneg.x %fp0 # -X
fadd.s &0x3F800000,%fp0 # 1-X
fdiv.x %fp1,%fp0 # (1-X)/(1+X)
fsqrt.x %fp0 # SQRT((1-X)/(1+X))
mov.l %d0,-(%sp) # save original users fpcr
clr.l %d0
fmovm.x &0x01,-(%sp) # save SQRT(...) to stack
lea (%sp),%a0 # pass ptr to sqrt
bsr satan # ATAN(SQRT([1-X]/[1+X]))
add.l &0xc,%sp # clear SQRT(...) from stack
fmov.l (%sp)+,%fpcr # restore users round prec,mode
fadd.x %fp0,%fp0 # 2 * ATAN( STUFF )
bra t_pinx2
ACOSBIG:
fabs.x %fp0
fcmp.s %fp0,&0x3F800000
fbgt t_operr # cause an operr exception
#--|X| = 1, ACOS(X) = 0 OR PI
tst.b (%a0) # is X positive or negative?
bpl.b ACOSP1
#--X = -1
#Returns PI and inexact exception
ACOSM1:
fmov.x PI(%pc),%fp0 # load PI
fmov.l %d0,%fpcr # load round mode,prec
fadd.s &0x00800000,%fp0 # add a small value
bra t_pinx2
ACOSP1:
bra ld_pzero # answer is positive zero
global sacosd
#--ACOS(X) = PI/2 FOR DENORMALIZED X
sacosd:
fmov.l %d0,%fpcr # load user's rnd mode/prec
fmov.x PIBY2(%pc),%fp0
bra t_pinx2
#########################################################################
# setox(): computes the exponential for a normalized input #
# setoxd(): computes the exponential for a denormalized input #
# setoxm1(): computes the exponential minus 1 for a normalized input #
# setoxm1d(): computes the exponential minus 1 for a denormalized input #
# #
# INPUT *************************************************************** #
# a0 = pointer to extended precision input #
# d0 = round precision,mode #
# #
# OUTPUT ************************************************************** #
# fp0 = exp(X) or exp(X)-1 #
# #
# ACCURACY and MONOTONICITY ******************************************* #
# The returned result is within 0.85 ulps in 64 significant bit, #
# i.e. within 0.5001 ulp to 53 bits if the result is subsequently #
# rounded to double precision. The result is provably monotonic #
# in double precision. #
# #
# ALGORITHM and IMPLEMENTATION **************************************** #
# #
# setoxd #
# ------ #
# Step 1. Set ans := 1.0 #
# #
# Step 2. Return ans := ans + sign(X)*2^(-126). Exit. #
# Notes: This will always generate one exception -- inexact. #
# #
# #
# setox #
# ----- #
# #
# Step 1. Filter out extreme cases of input argument. #
# 1.1 If |X| >= 2^(-65), go to Step 1.3. #
# 1.2 Go to Step 7. #
# 1.3 If |X| < 16380 log(2), go to Step 2. #
# 1.4 Go to Step 8. #
# Notes: The usual case should take the branches 1.1 -> 1.3 -> 2.#
# To avoid the use of floating-point comparisons, a #
# compact representation of |X| is used. This format is a #
# 32-bit integer, the upper (more significant) 16 bits #
# are the sign and biased exponent field of |X|; the #
# lower 16 bits are the 16 most significant fraction #
# (including the explicit bit) bits of |X|. Consequently, #
# the comparisons in Steps 1.1 and 1.3 can be performed #
# by integer comparison. Note also that the constant #
# 16380 log(2) used in Step 1.3 is also in the compact #
# form. Thus taking the branch to Step 2 guarantees #
# |X| < 16380 log(2). There is no harm to have a small #
# number of cases where |X| is less than, but close to, #
# 16380 log(2) and the branch to Step 9 is taken. #
# #
# Step 2. Calculate N = round-to-nearest-int( X * 64/log2 ). #
# 2.1 Set AdjFlag := 0 (indicates the branch 1.3 -> 2 #
# was taken) #
# 2.2 N := round-to-nearest-integer( X * 64/log2 ). #
# 2.3 Calculate J = N mod 64; so J = 0,1,2,..., #
# or 63. #
# 2.4 Calculate M = (N - J)/64; so N = 64M + J. #
# 2.5 Calculate the address of the stored value of #
# 2^(J/64). #
# 2.6 Create the value Scale = 2^M. #
# Notes: The calculation in 2.2 is really performed by #
# Z := X * constant #
# N := round-to-nearest-integer(Z) #
# where #
# constant := single-precision( 64/log 2 ). #
# #
# Using a single-precision constant avoids memory #
# access. Another effect of using a single-precision #
# "constant" is that the calculated value Z is #
# #
# Z = X*(64/log2)*(1+eps), |eps| <= 2^(-24). #
# #
# This error has to be considered later in Steps 3 and 4. #
# #
# Step 3. Calculate X - N*log2/64. #
# 3.1 R := X + N*L1, #
# where L1 := single-precision(-log2/64). #
# 3.2 R := R + N*L2, #
# L2 := extended-precision(-log2/64 - L1).#
# Notes: a) The way L1 and L2 are chosen ensures L1+L2 #
# approximate the value -log2/64 to 88 bits of accuracy. #
# b) N*L1 is exact because N is no longer than 22 bits #
# and L1 is no longer than 24 bits. #
# c) The calculation X+N*L1 is also exact due to #
# cancellation. Thus, R is practically X+N(L1+L2) to full #
# 64 bits. #
# d) It is important to estimate how large can |R| be #
# after Step 3.2. #
# #
# N = rnd-to-int( X*64/log2 (1+eps) ), |eps|<=2^(-24) #
# X*64/log2 (1+eps) = N + f, |f| <= 0.5 #
# X*64/log2 - N = f - eps*X 64/log2 #
# X - N*log2/64 = f*log2/64 - eps*X #
# #
# #
# Now |X| <= 16446 log2, thus #
# #
# |X - N*log2/64| <= (0.5 + 16446/2^(18))*log2/64 #
# <= 0.57 log2/64. #
# This bound will be used in Step 4. #
# #
# Step 4. Approximate exp(R)-1 by a polynomial #
# p = R + R*R*(A1 + R*(A2 + R*(A3 + R*(A4 + R*A5)))) #
# Notes: a) In order to reduce memory access, the coefficients #
# are made as "short" as possible: A1 (which is 1/2), A4 #
# and A5 are single precision; A2 and A3 are double #
# precision. #
# b) Even with the restrictions above, #
# |p - (exp(R)-1)| < 2^(-68.8) for all |R| <= 0.0062. #
# Note that 0.0062 is slightly bigger than 0.57 log2/64. #
# c) To fully utilize the pipeline, p is separated into #
# two independent pieces of roughly equal complexities #
# p = [ R + R*S*(A2 + S*A4) ] + #
# [ S*(A1 + S*(A3 + S*A5)) ] #
# where S = R*R. #
# #
# Step 5. Compute 2^(J/64)*exp(R) = 2^(J/64)*(1+p) by #
# ans := T + ( T*p + t) #
# where T and t are the stored values for 2^(J/64). #
# Notes: 2^(J/64) is stored as T and t where T+t approximates #
# 2^(J/64) to roughly 85 bits; T is in extended precision #
# and t is in single precision. Note also that T is #
# rounded to 62 bits so that the last two bits of T are #
# zero. The reason for such a special form is that T-1, #
# T-2, and T-8 will all be exact --- a property that will #
# give much more accurate computation of the function #
# EXPM1. #
# #
# Step 6. Reconstruction of exp(X) #
# exp(X) = 2^M * 2^(J/64) * exp(R). #
# 6.1 If AdjFlag = 0, go to 6.3 #
# 6.2 ans := ans * AdjScale #
# 6.3 Restore the user FPCR #
# 6.4 Return ans := ans * Scale. Exit. #
# Notes: If AdjFlag = 0, we have X = Mlog2 + Jlog2/64 + R, #
# |M| <= 16380, and Scale = 2^M. Moreover, exp(X) will #
# neither overflow nor underflow. If AdjFlag = 1, that #
# means that #
# X = (M1+M)log2 + Jlog2/64 + R, |M1+M| >= 16380. #
# Hence, exp(X) may overflow or underflow or neither. #
# When that is the case, AdjScale = 2^(M1) where M1 is #
# approximately M. Thus 6.2 will never cause #
# over/underflow. Possible exception in 6.4 is overflow #
# or underflow. The inexact exception is not generated in #
# 6.4. Although one can argue that the inexact flag #
# should always be raised, to simulate that exception #
# cost to much than the flag is worth in practical uses. #
# #
# Step 7. Return 1 + X. #
# 7.1 ans := X #
# 7.2 Restore user FPCR. #
# 7.3 Return ans := 1 + ans. Exit #
# Notes: For non-zero X, the inexact exception will always be #
# raised by 7.3. That is the only exception raised by 7.3.#
# Note also that we use the FMOVEM instruction to move X #
# in Step 7.1 to avoid unnecessary trapping. (Although #
# the FMOVEM may not seem relevant since X is normalized, #
# the precaution will be useful in the library version of #
# this code where the separate entry for denormalized #
# inputs will be done away with.) #
# #
# Step 8. Handle exp(X) where |X| >= 16380log2. #
# 8.1 If |X| > 16480 log2, go to Step 9. #
# (mimic 2.2 - 2.6) #
# 8.2 N := round-to-integer( X * 64/log2 ) #
# 8.3 Calculate J = N mod 64, J = 0,1,...,63 #
# 8.4 K := (N-J)/64, M1 := truncate(K/2), M = K-M1, #
# AdjFlag := 1. #
# 8.5 Calculate the address of the stored value #
# 2^(J/64). #
# 8.6 Create the values Scale = 2^M, AdjScale = 2^M1. #
# 8.7 Go to Step 3. #
# Notes: Refer to notes for 2.2 - 2.6. #
# #
# Step 9. Handle exp(X), |X| > 16480 log2. #
# 9.1 If X < 0, go to 9.3 #
# 9.2 ans := Huge, go to 9.4 #
# 9.3 ans := Tiny. #
# 9.4 Restore user FPCR. #
# 9.5 Return ans := ans * ans. Exit. #
# Notes: Exp(X) will surely overflow or underflow, depending on #
# X's sign. "Huge" and "Tiny" are respectively large/tiny #
# extended-precision numbers whose square over/underflow #
# with an inexact result. Thus, 9.5 always raises the #
# inexact together with either overflow or underflow. #
# #
# setoxm1d #
# -------- #
# #
# Step 1. Set ans := 0 #
# #
# Step 2. Return ans := X + ans. Exit. #
# Notes: This will return X with the appropriate rounding #
# precision prescribed by the user FPCR. #
# #
# setoxm1 #
# ------- #
# #
# Step 1. Check |X| #
# 1.1 If |X| >= 1/4, go to Step 1.3. #
# 1.2 Go to Step 7. #
# 1.3 If |X| < 70 log(2), go to Step 2. #
# 1.4 Go to Step 10. #
# Notes: The usual case should take the branches 1.1 -> 1.3 -> 2.#
# However, it is conceivable |X| can be small very often #
# because EXPM1 is intended to evaluate exp(X)-1 #
# accurately when |X| is small. For further details on #
# the comparisons, see the notes on Step 1 of setox. #
# #
# Step 2. Calculate N = round-to-nearest-int( X * 64/log2 ). #
# 2.1 N := round-to-nearest-integer( X * 64/log2 ). #
# 2.2 Calculate J = N mod 64; so J = 0,1,2,..., #
# or 63. #
# 2.3 Calculate M = (N - J)/64; so N = 64M + J. #
# 2.4 Calculate the address of the stored value of #
# 2^(J/64). #
# 2.5 Create the values Sc = 2^M and #
# OnebySc := -2^(-M). #
# Notes: See the notes on Step 2 of setox. #
# #
# Step 3. Calculate X - N*log2/64. #
# 3.1 R := X + N*L1, #
# where L1 := single-precision(-log2/64). #
# 3.2 R := R + N*L2, #
# L2 := extended-precision(-log2/64 - L1).#
# Notes: Applying the analysis of Step 3 of setox in this case #
# shows that |R| <= 0.0055 (note that |X| <= 70 log2 in #
# this case). #
# #
# Step 4. Approximate exp(R)-1 by a polynomial #
# p = R+R*R*(A1+R*(A2+R*(A3+R*(A4+R*(A5+R*A6))))) #
# Notes: a) In order to reduce memory access, the coefficients #
# are made as "short" as possible: A1 (which is 1/2), A5 #
# and A6 are single precision; A2, A3 and A4 are double #
# precision. #
# b) Even with the restriction above, #
# |p - (exp(R)-1)| < |R| * 2^(-72.7) #
# for all |R| <= 0.0055. #
# c) To fully utilize the pipeline, p is separated into #
# two independent pieces of roughly equal complexity #
# p = [ R*S*(A2 + S*(A4 + S*A6)) ] + #
# [ R + S*(A1 + S*(A3 + S*A5)) ] #
# where S = R*R. #
# #
# Step 5. Compute 2^(J/64)*p by #
# p := T*p #
# where T and t are the stored values for 2^(J/64). #
# Notes: 2^(J/64) is stored as T and t where T+t approximates #
# 2^(J/64) to roughly 85 bits; T is in extended precision #
# and t is in single precision. Note also that T is #
# rounded to 62 bits so that the last two bits of T are #
# zero. The reason for such a special form is that T-1, #
# T-2, and T-8 will all be exact --- a property that will #
# be exploited in Step 6 below. The total relative error #
# in p is no bigger than 2^(-67.7) compared to the final #
# result. #
# #
# Step 6. Reconstruction of exp(X)-1 #
# exp(X)-1 = 2^M * ( 2^(J/64) + p - 2^(-M) ). #
# 6.1 If M <= 63, go to Step 6.3. #
# 6.2 ans := T + (p + (t + OnebySc)). Go to 6.6 #
# 6.3 If M >= -3, go to 6.5. #
# 6.4 ans := (T + (p + t)) + OnebySc. Go to 6.6 #
# 6.5 ans := (T + OnebySc) + (p + t). #
# 6.6 Restore user FPCR. #
# 6.7 Return ans := Sc * ans. Exit. #
# Notes: The various arrangements of the expressions give #
# accurate evaluations. #
# #
# Step 7. exp(X)-1 for |X| < 1/4. #
# 7.1 If |X| >= 2^(-65), go to Step 9. #
# 7.2 Go to Step 8. #
# #
# Step 8. Calculate exp(X)-1, |X| < 2^(-65). #
# 8.1 If |X| < 2^(-16312), goto 8.3 #
# 8.2 Restore FPCR; return ans := X - 2^(-16382). #
# Exit. #
# 8.3 X := X * 2^(140). #
# 8.4 Restore FPCR; ans := ans - 2^(-16382). #
# Return ans := ans*2^(140). Exit #
# Notes: The idea is to return "X - tiny" under the user #
# precision and rounding modes. To avoid unnecessary #
# inefficiency, we stay away from denormalized numbers #
# the best we can. For |X| >= 2^(-16312), the #
# straightforward 8.2 generates the inexact exception as #
# the case warrants. #
# #
# Step 9. Calculate exp(X)-1, |X| < 1/4, by a polynomial #
# p = X + X*X*(B1 + X*(B2 + ... + X*B12)) #
# Notes: a) In order to reduce memory access, the coefficients #
# are made as "short" as possible: B1 (which is 1/2), B9 #
# to B12 are single precision; B3 to B8 are double #
# precision; and B2 is double extended. #
# b) Even with the restriction above, #
# |p - (exp(X)-1)| < |X| 2^(-70.6) #
# for all |X| <= 0.251. #
# Note that 0.251 is slightly bigger than 1/4. #
# c) To fully preserve accuracy, the polynomial is #
# computed as #
# X + ( S*B1 + Q ) where S = X*X and #
# Q = X*S*(B2 + X*(B3 + ... + X*B12)) #
# d) To fully utilize the pipeline, Q is separated into #
# two independent pieces of roughly equal complexity #
# Q = [ X*S*(B2 + S*(B4 + ... + S*B12)) ] + #
# [ S*S*(B3 + S*(B5 + ... + S*B11)) ] #
# #
# Step 10. Calculate exp(X)-1 for |X| >= 70 log 2. #
# 10.1 If X >= 70log2 , exp(X) - 1 = exp(X) for all #
# practical purposes. Therefore, go to Step 1 of setox. #
# 10.2 If X <= -70log2, exp(X) - 1 = -1 for all practical #
# purposes. #
# ans := -1 #
# Restore user FPCR #
# Return ans := ans + 2^(-126). Exit. #
# Notes: 10.2 will always create an inexact and return -1 + tiny #
# in the user rounding precision and mode. #
# #
#########################################################################
L2: long 0x3FDC0000,0x82E30865,0x4361C4C6,0x00000000
EEXPA3: long 0x3FA55555,0x55554CC1
EEXPA2: long 0x3FC55555,0x55554A54
EM1A4: long 0x3F811111,0x11174385
EM1A3: long 0x3FA55555,0x55554F5A
EM1A2: long 0x3FC55555,0x55555555,0x00000000,0x00000000
EM1B8: long 0x3EC71DE3,0xA5774682
EM1B7: long 0x3EFA01A0,0x19D7CB68
EM1B6: long 0x3F2A01A0,0x1A019DF3
EM1B5: long 0x3F56C16C,0x16C170E2
EM1B4: long 0x3F811111,0x11111111
EM1B3: long 0x3FA55555,0x55555555
EM1B2: long 0x3FFC0000,0xAAAAAAAA,0xAAAAAAAB
long 0x00000000
TWO140: long 0x48B00000,0x00000000
TWON140:
long 0x37300000,0x00000000
EEXPTBL:
long 0x3FFF0000,0x80000000,0x00000000,0x00000000
long 0x3FFF0000,0x8164D1F3,0xBC030774,0x9F841A9B
long 0x3FFF0000,0x82CD8698,0xAC2BA1D8,0x9FC1D5B9
long 0x3FFF0000,0x843A28C3,0xACDE4048,0xA0728369
long 0x3FFF0000,0x85AAC367,0xCC487B14,0x1FC5C95C
long 0x3FFF0000,0x871F6196,0x9E8D1010,0x1EE85C9F
long 0x3FFF0000,0x88980E80,0x92DA8528,0x9FA20729
long 0x3FFF0000,0x8A14D575,0x496EFD9C,0xA07BF9AF
long 0x3FFF0000,0x8B95C1E3,0xEA8BD6E8,0xA0020DCF
long 0x3FFF0000,0x8D1ADF5B,0x7E5BA9E4,0x205A63DA
long 0x3FFF0000,0x8EA4398B,0x45CD53C0,0x1EB70051
long 0x3FFF0000,0x9031DC43,0x1466B1DC,0x1F6EB029
long 0x3FFF0000,0x91C3D373,0xAB11C338,0xA0781494
long 0x3FFF0000,0x935A2B2F,0x13E6E92C,0x9EB319B0
long 0x3FFF0000,0x94F4EFA8,0xFEF70960,0x2017457D
long 0x3FFF0000,0x96942D37,0x20185A00,0x1F11D537
long 0x3FFF0000,0x9837F051,0x8DB8A970,0x9FB952DD
long 0x3FFF0000,0x99E04593,0x20B7FA64,0x1FE43087
long 0x3FFF0000,0x9B8D39B9,0xD54E5538,0x1FA2A818
long 0x3FFF0000,0x9D3ED9A7,0x2CFFB750,0x1FDE494D
long 0x3FFF0000,0x9EF53260,0x91A111AC,0x20504890
long 0x3FFF0000,0xA0B0510F,0xB9714FC4,0xA073691C
long 0x3FFF0000,0xA2704303,0x0C496818,0x1F9B7A05
long 0x3FFF0000,0xA43515AE,0x09E680A0,0xA0797126
long 0x3FFF0000,0xA5FED6A9,0xB15138EC,0xA071A140
long 0x3FFF0000,0xA7CD93B4,0xE9653568,0x204F62DA
long 0x3FFF0000,0xA9A15AB4,0xEA7C0EF8,0x1F283C4A
long 0x3FFF0000,0xAB7A39B5,0xA93ED338,0x9F9A7FDC
long 0x3FFF0000,0xAD583EEA,0x42A14AC8,0xA05B3FAC
long 0x3FFF0000,0xAF3B78AD,0x690A4374,0x1FDF2610
long 0x3FFF0000,0xB123F581,0xD2AC2590,0x9F705F90
long 0x3FFF0000,0xB311C412,0xA9112488,0x201F678A
long 0x3FFF0000,0xB504F333,0xF9DE6484,0x1F32FB13
long 0x3FFF0000,0xB6FD91E3,0x28D17790,0x20038B30
long 0x3FFF0000,0xB8FBAF47,0x62FB9EE8,0x200DC3CC
long 0x3FFF0000,0xBAFF5AB2,0x133E45FC,0x9F8B2AE6
long 0x3FFF0000,0xBD08A39F,0x580C36C0,0xA02BBF70
long 0x3FFF0000,0xBF1799B6,0x7A731084,0xA00BF518
long 0x3FFF0000,0xC12C4CCA,0x66709458,0xA041DD41
long 0x3FFF0000,0xC346CCDA,0x24976408,0x9FDF137B
long 0x3FFF0000,0xC5672A11,0x5506DADC,0x201F1568
long 0x3FFF0000,0xC78D74C8,0xABB9B15C,0x1FC13A2E
long 0x3FFF0000,0xC9B9BD86,0x6E2F27A4,0xA03F8F03
long 0x3FFF0000,0xCBEC14FE,0xF2727C5C,0x1FF4907D
long 0x3FFF0000,0xCE248C15,0x1F8480E4,0x9E6E53E4
long 0x3FFF0000,0xD06333DA,0xEF2B2594,0x1FD6D45C
long 0x3FFF0000,0xD2A81D91,0xF12AE45C,0xA076EDB9
long 0x3FFF0000,0xD4F35AAB,0xCFEDFA20,0x9FA6DE21
long 0x3FFF0000,0xD744FCCA,0xD69D6AF4,0x1EE69A2F
long 0x3FFF0000,0xD99D15C2,0x78AFD7B4,0x207F439F
long 0x3FFF0000,0xDBFBB797,0xDAF23754,0x201EC207
long 0x3FFF0000,0xDE60F482,0x5E0E9124,0x9E8BE175
long 0x3FFF0000,0xE0CCDEEC,0x2A94E110,0x20032C4B
long 0x3FFF0000,0xE33F8972,0xBE8A5A50,0x2004DFF5
long 0x3FFF0000,0xE5B906E7,0x7C8348A8,0x1E72F47A
long 0x3FFF0000,0xE8396A50,0x3C4BDC68,0x1F722F22
long 0x3FFF0000,0xEAC0C6E7,0xDD243930,0xA017E945
long 0x3FFF0000,0xED4F301E,0xD9942B84,0x1F401A5B
long 0x3FFF0000,0xEFE4B99B,0xDCDAF5CC,0x9FB9A9E3
long 0x3FFF0000,0xF281773C,0x59FFB138,0x20744C05
long 0x3FFF0000,0xF5257D15,0x2486CC2C,0x1F773A19
long 0x3FFF0000,0xF7D0DF73,0x0AD13BB8,0x1FFE90D5
long 0x3FFF0000,0xFA83B2DB,0x722A033C,0xA041ED22
long 0x3FFF0000,0xFD3E0C0C,0xF486C174,0x1F853F3A
set ADJFLAG,L_SCR2
set SCALE,FP_SCR0
set ADJSCALE,FP_SCR1
set SC,FP_SCR0
set ONEBYSC,FP_SCR1
global setox
setox:
#--entry point for EXP(X), here X is finite, non-zero, and not NaN's
#--Step 1.
mov.l (%a0),%d1 # load part of input X
and.l &0x7FFF0000,%d1 # biased expo. of X
cmp.l %d1,&0x3FBE0000 # 2^(-65)
bge.b EXPC1 # normal case
bra EXPSM
EXPC1:
#--The case |X| >= 2^(-65)
mov.w 4(%a0),%d1 # expo. and partial sig. of |X|
cmp.l %d1,&0x400CB167 # 16380 log2 trunc. 16 bits
blt.b EXPMAIN # normal case
bra EEXPBIG
EXPMAIN:
#--Step 2.
#--This is the normal branch: 2^(-65) <= |X| < 16380 log2.
fmov.x (%a0),%fp0 # load input from (a0)
fmov.x %fp0,%fp1
fmul.s &0x42B8AA3B,%fp0 # 64/log2 * X
fmovm.x &0xc,-(%sp) # save fp2 {%fp2/%fp3}
mov.l &0,ADJFLAG(%a6)
fmov.l %fp0,%d1 # N = int( X * 64/log2 )
lea EEXPTBL(%pc),%a1
fmov.l %d1,%fp0 # convert to floating-format
mov.l %d1,L_SCR1(%a6) # save N temporarily
and.l &0x3F,%d1 # D0 is J = N mod 64
lsl.l &4,%d1
add.l %d1,%a1 # address of 2^(J/64)
mov.l L_SCR1(%a6),%d1
asr.l &6,%d1 # D0 is M
add.w &0x3FFF,%d1 # biased expo. of 2^(M)
mov.w L2(%pc),L_SCR1(%a6) # prefetch L2, no need in CB
EXPCONT1:
#--Step 3.
#--fp1,fp2 saved on the stack. fp0 is N, fp1 is X,
#--a0 points to 2^(J/64), D0 is biased expo. of 2^(M)
fmov.x %fp0,%fp2
fmul.s &0xBC317218,%fp0 # N * L1, L1 = lead(-log2/64)
fmul.x L2(%pc),%fp2 # N * L2, L1+L2 = -log2/64
fadd.x %fp1,%fp0 # X + N*L1
fadd.x %fp2,%fp0 # fp0 is R, reduced arg.
#--Step 4.
#--WE NOW COMPUTE EXP(R)-1 BY A POLYNOMIAL
#-- R + R*R*(A1 + R*(A2 + R*(A3 + R*(A4 + R*A5))))
#--TO FULLY UTILIZE THE PIPELINE, WE COMPUTE S = R*R
#--[R+R*S*(A2+S*A4)] + [S*(A1+S*(A3+S*A5))]
fmov.x %fp0,%fp1
fmul.x %fp1,%fp1 # fp1 IS S = R*R
fmov.s &0x3AB60B70,%fp2 # fp2 IS A5
fmul.x %fp1,%fp2 # fp2 IS S*A5
fmov.x %fp1,%fp3
fmul.s &0x3C088895,%fp3 # fp3 IS S*A4
fadd.d EEXPA3(%pc),%fp2 # fp2 IS A3+S*A5
fadd.d EEXPA2(%pc),%fp3 # fp3 IS A2+S*A4
fmul.x %fp1,%fp2 # fp2 IS S*(A3+S*A5)
mov.w %d1,SCALE(%a6) # SCALE is 2^(M) in extended
mov.l &0x80000000,SCALE+4(%a6)
clr.l SCALE+8(%a6)
fmul.x %fp1,%fp3 # fp3 IS S*(A2+S*A4)
fadd.s &0x3F000000,%fp2 # fp2 IS A1+S*(A3+S*A5)
fmul.x %fp0,%fp3 # fp3 IS R*S*(A2+S*A4)
fmul.x %fp1,%fp2 # fp2 IS S*(A1+S*(A3+S*A5))
fadd.x %fp3,%fp0 # fp0 IS R+R*S*(A2+S*A4),
fmov.x (%a1)+,%fp1 # fp1 is lead. pt. of 2^(J/64)
fadd.x %fp2,%fp0 # fp0 is EXP(R) - 1
#--Step 5
#--final reconstruction process
#--EXP(X) = 2^M * ( 2^(J/64) + 2^(J/64)*(EXP(R)-1) )
fmul.x %fp1,%fp0 # 2^(J/64)*(Exp(R)-1)
fmovm.x (%sp)+,&0x30 # fp2 restored {%fp2/%fp3}
fadd.s (%a1),%fp0 # accurate 2^(J/64)
fadd.x %fp1,%fp0 # 2^(J/64) + 2^(J/64)*...
mov.l ADJFLAG(%a6),%d1
#--Step 6
tst.l %d1
beq.b NORMAL
ADJUST:
fmul.x ADJSCALE(%a6),%fp0
NORMAL:
fmov.l %d0,%fpcr # restore user FPCR
mov.b &FMUL_OP,%d1 # last inst is MUL
fmul.x SCALE(%a6),%fp0 # multiply 2^(M)
bra t_catch
EXPSM:
#--Step 7
fmovm.x (%a0),&0x80 # load X
fmov.l %d0,%fpcr
fadd.s &0x3F800000,%fp0 # 1+X in user mode
bra t_pinx2
EEXPBIG:
#--Step 8
cmp.l %d1,&0x400CB27C # 16480 log2
bgt.b EXP2BIG
#--Steps 8.2 -- 8.6
fmov.x (%a0),%fp0 # load input from (a0)
fmov.x %fp0,%fp1
fmul.s &0x42B8AA3B,%fp0 # 64/log2 * X
fmovm.x &0xc,-(%sp) # save fp2 {%fp2/%fp3}
mov.l &1,ADJFLAG(%a6)
fmov.l %fp0,%d1 # N = int( X * 64/log2 )
lea EEXPTBL(%pc),%a1
fmov.l %d1,%fp0 # convert to floating-format
mov.l %d1,L_SCR1(%a6) # save N temporarily
and.l &0x3F,%d1 # D0 is J = N mod 64
lsl.l &4,%d1
add.l %d1,%a1 # address of 2^(J/64)
mov.l L_SCR1(%a6),%d1
asr.l &6,%d1 # D0 is K
mov.l %d1,L_SCR1(%a6) # save K temporarily
asr.l &1,%d1 # D0 is M1
sub.l %d1,L_SCR1(%a6) # a1 is M
add.w &0x3FFF,%d1 # biased expo. of 2^(M1)
mov.w %d1,ADJSCALE(%a6) # ADJSCALE := 2^(M1)
mov.l &0x80000000,ADJSCALE+4(%a6)
clr.l ADJSCALE+8(%a6)
mov.l L_SCR1(%a6),%d1 # D0 is M
add.w &0x3FFF,%d1 # biased expo. of 2^(M)
bra.w EXPCONT1 # go back to Step 3
EXP2BIG:
#--Step 9
tst.b (%a0) # is X positive or negative?
bmi t_unfl2
bra t_ovfl2
global setoxd
setoxd:
#--entry point for EXP(X), X is denormalized
mov.l (%a0),-(%sp)
andi.l &0x80000000,(%sp)
ori.l &0x00800000,(%sp) # sign(X)*2^(-126)
fmov.s &0x3F800000,%fp0
fmov.l %d0,%fpcr
fadd.s (%sp)+,%fp0
bra t_pinx2
global setoxm1
setoxm1:
#--entry point for EXPM1(X), here X is finite, non-zero, non-NaN
#--Step 1.
#--Step 1.1
mov.l (%a0),%d1 # load part of input X
and.l &0x7FFF0000,%d1 # biased expo. of X
cmp.l %d1,&0x3FFD0000 # 1/4
bge.b EM1CON1 # |X| >= 1/4
bra EM1SM
EM1CON1:
#--Step 1.3
#--The case |X| >= 1/4
mov.w 4(%a0),%d1 # expo. and partial sig. of |X|
cmp.l %d1,&0x4004C215 # 70log2 rounded up to 16 bits
ble.b EM1MAIN # 1/4 <= |X| <= 70log2
bra EM1BIG
EM1MAIN:
#--Step 2.
#--This is the case: 1/4 <= |X| <= 70 log2.
fmov.x (%a0),%fp0 # load input from (a0)
fmov.x %fp0,%fp1
fmul.s &0x42B8AA3B,%fp0 # 64/log2 * X
fmovm.x &0xc,-(%sp) # save fp2 {%fp2/%fp3}
fmov.l %fp0,%d1 # N = int( X * 64/log2 )
lea EEXPTBL(%pc),%a1
fmov.l %d1,%fp0 # convert to floating-format
mov.l %d1,L_SCR1(%a6) # save N temporarily
and.l &0x3F,%d1 # D0 is J = N mod 64
lsl.l &4,%d1
add.l %d1,%a1 # address of 2^(J/64)
mov.l L_SCR1(%a6),%d1
asr.l &6,%d1 # D0 is M
mov.l %d1,L_SCR1(%a6) # save a copy of M
#--Step 3.
#--fp1,fp2 saved on the stack. fp0 is N, fp1 is X,
#--a0 points to 2^(J/64), D0 and a1 both contain M
fmov.x %fp0,%fp2
fmul.s &0xBC317218,%fp0 # N * L1, L1 = lead(-log2/64)
fmul.x L2(%pc),%fp2 # N * L2, L1+L2 = -log2/64
fadd.x %fp1,%fp0 # X + N*L1
fadd.x %fp2,%fp0 # fp0 is R, reduced arg.
add.w &0x3FFF,%d1 # D0 is biased expo. of 2^M
#--Step 4.
#--WE NOW COMPUTE EXP(R)-1 BY A POLYNOMIAL
#-- R + R*R*(A1 + R*(A2 + R*(A3 + R*(A4 + R*(A5 + R*A6)))))
#--TO FULLY UTILIZE THE PIPELINE, WE COMPUTE S = R*R
#--[R*S*(A2+S*(A4+S*A6))] + [R+S*(A1+S*(A3+S*A5))]
fmov.x %fp0,%fp1
fmul.x %fp1,%fp1 # fp1 IS S = R*R
fmov.s &0x3950097B,%fp2 # fp2 IS a6
fmul.x %fp1,%fp2 # fp2 IS S*A6
fmov.x %fp1,%fp3
fmul.s &0x3AB60B6A,%fp3 # fp3 IS S*A5
fadd.d EM1A4(%pc),%fp2 # fp2 IS A4+S*A6
fadd.d EM1A3(%pc),%fp3 # fp3 IS A3+S*A5
mov.w %d1,SC(%a6) # SC is 2^(M) in extended
mov.l &0x80000000,SC+4(%a6)
clr.l SC+8(%a6)
fmul.x %fp1,%fp2 # fp2 IS S*(A4+S*A6)
mov.l L_SCR1(%a6),%d1 # D0 is M
neg.w %d1 # D0 is -M
fmul.x %fp1,%fp3 # fp3 IS S*(A3+S*A5)
add.w &0x3FFF,%d1 # biased expo. of 2^(-M)
fadd.d EM1A2(%pc),%fp2 # fp2 IS A2+S*(A4+S*A6)
fadd.s &0x3F000000,%fp3 # fp3 IS A1+S*(A3+S*A5)
fmul.x %fp1,%fp2 # fp2 IS S*(A2+S*(A4+S*A6))
or.w &0x8000,%d1 # signed/expo. of -2^(-M)
mov.w %d1,ONEBYSC(%a6) # OnebySc is -2^(-M)
mov.l &0x80000000,ONEBYSC+4(%a6)
clr.l ONEBYSC+8(%a6)
fmul.x %fp3,%fp1 # fp1 IS S*(A1+S*(A3+S*A5))
fmul.x %fp0,%fp2 # fp2 IS R*S*(A2+S*(A4+S*A6))
fadd.x %fp1,%fp0 # fp0 IS R+S*(A1+S*(A3+S*A5))
fadd.x %fp2,%fp0 # fp0 IS EXP(R)-1
fmovm.x (%sp)+,&0x30 # fp2 restored {%fp2/%fp3}
#--Step 5
#--Compute 2^(J/64)*p
fmul.x (%a1),%fp0 # 2^(J/64)*(Exp(R)-1)
#--Step 6
#--Step 6.1
mov.l L_SCR1(%a6),%d1 # retrieve M
cmp.l %d1,&63
ble.b MLE63
#--Step 6.2 M >= 64
fmov.s 12(%a1),%fp1 # fp1 is t
fadd.x ONEBYSC(%a6),%fp1 # fp1 is t+OnebySc
fadd.x %fp1,%fp0 # p+(t+OnebySc), fp1 released
fadd.x (%a1),%fp0 # T+(p+(t+OnebySc))
bra EM1SCALE
MLE63:
#--Step 6.3 M <= 63
cmp.l %d1,&-3
bge.b MGEN3
MLTN3:
#--Step 6.4 M <= -4
fadd.s 12(%a1),%fp0 # p+t
fadd.x (%a1),%fp0 # T+(p+t)
fadd.x ONEBYSC(%a6),%fp0 # OnebySc + (T+(p+t))
bra EM1SCALE
MGEN3:
#--Step 6.5 -3 <= M <= 63
fmov.x (%a1)+,%fp1 # fp1 is T
fadd.s (%a1),%fp0 # fp0 is p+t
fadd.x ONEBYSC(%a6),%fp1 # fp1 is T+OnebySc
fadd.x %fp1,%fp0 # (T+OnebySc)+(p+t)
EM1SCALE:
#--Step 6.6
fmov.l %d0,%fpcr
fmul.x SC(%a6),%fp0
bra t_inx2
EM1SM:
#--Step 7 |X| < 1/4.
cmp.l %d1,&0x3FBE0000 # 2^(-65)
bge.b EM1POLY
EM1TINY:
#--Step 8 |X| < 2^(-65)
cmp.l %d1,&0x00330000 # 2^(-16312)
blt.b EM12TINY
#--Step 8.2
mov.l &0x80010000,SC(%a6) # SC is -2^(-16382)
mov.l &0x80000000,SC+4(%a6)
clr.l SC+8(%a6)
fmov.x (%a0),%fp0
fmov.l %d0,%fpcr
mov.b &FADD_OP,%d1 # last inst is ADD
fadd.x SC(%a6),%fp0
bra t_catch
EM12TINY:
#--Step 8.3
fmov.x (%a0),%fp0
fmul.d TWO140(%pc),%fp0
mov.l &0x80010000,SC(%a6)
mov.l &0x80000000,SC+4(%a6)
clr.l SC+8(%a6)
fadd.x SC(%a6),%fp0
fmov.l %d0,%fpcr
mov.b &FMUL_OP,%d1 # last inst is MUL
fmul.d TWON140(%pc),%fp0
bra t_catch
EM1POLY:
#--Step 9 exp(X)-1 by a simple polynomial
fmov.x (%a0),%fp0 # fp0 is X
fmul.x %fp0,%fp0 # fp0 is S := X*X
fmovm.x &0xc,-(%sp) # save fp2 {%fp2/%fp3}
fmov.s &0x2F30CAA8,%fp1 # fp1 is B12
fmul.x %fp0,%fp1 # fp1 is S*B12
fmov.s &0x310F8290,%fp2 # fp2 is B11
fadd.s &0x32D73220,%fp1 # fp1 is B10+S*B12
fmul.x %fp0,%fp2 # fp2 is S*B11
fmul.x %fp0,%fp1 # fp1 is S*(B10 + ...
fadd.s &0x3493F281,%fp2 # fp2 is B9+S*...
fadd.d EM1B8(%pc),%fp1 # fp1 is B8+S*...
fmul.x %fp0,%fp2 # fp2 is S*(B9+...
fmul.x %fp0,%fp1 # fp1 is S*(B8+...
fadd.d EM1B7(%pc),%fp2 # fp2 is B7+S*...
fadd.d EM1B6(%pc),%fp1 # fp1 is B6+S*...
fmul.x %fp0,%fp2 # fp2 is S*(B7+...
fmul.x %fp0,%fp1 # fp1 is S*(B6+...
fadd.d EM1B5(%pc),%fp2 # fp2 is B5+S*...
fadd.d EM1B4(%pc),%fp1 # fp1 is B4+S*...
fmul.x %fp0,%fp2 # fp2 is S*(B5+...
fmul.x %fp0,%fp1 # fp1 is S*(B4+...
fadd.d EM1B3(%pc),%fp2 # fp2 is B3+S*...
fadd.x EM1B2(%pc),%fp1 # fp1 is B2+S*...
fmul.x %fp0,%fp2 # fp2 is S*(B3+...
fmul.x %fp0,%fp1 # fp1 is S*(B2+...
fmul.x %fp0,%fp2 # fp2 is S*S*(B3+...)
fmul.x (%a0),%fp1 # fp1 is X*S*(B2...
fmul.s &0x3F000000,%fp0 # fp0 is S*B1
fadd.x %fp2,%fp1 # fp1 is Q
fmovm.x (%sp)+,&0x30 # fp2 restored {%fp2/%fp3}
fadd.x %fp1,%fp0 # fp0 is S*B1+Q
fmov.l %d0,%fpcr
fadd.x (%a0),%fp0
bra t_inx2
EM1BIG:
#--Step 10 |X| > 70 log2
mov.l (%a0),%d1
cmp.l %d1,&0
bgt.w EXPC1
#--Step 10.2
fmov.s &0xBF800000,%fp0 # fp0 is -1
fmov.l %d0,%fpcr
fadd.s &0x00800000,%fp0 # -1 + 2^(-126)
bra t_minx2
global setoxm1d
setoxm1d:
#--entry point for EXPM1(X), here X is denormalized
#--Step 0.
bra t_extdnrm
#########################################################################
# sgetexp(): returns the exponent portion of the input argument. #
# The exponent bias is removed and the exponent value is #
# returned as an extended precision number in fp0. #
# sgetexpd(): handles denormalized numbers. #
# #
# sgetman(): extracts the mantissa of the input argument. The #
# mantissa is converted to an extended precision number w/ #
# an exponent of $3fff and is returned in fp0. The range of #
# the result is [1.0 - 2.0). #
# sgetmand(): handles denormalized numbers. #
# #
# INPUT *************************************************************** #
# a0 = pointer to extended precision input #
# #
# OUTPUT ************************************************************** #
# fp0 = exponent(X) or mantissa(X) #
# #
#########################################################################
global sgetexp
sgetexp:
mov.w SRC_EX(%a0),%d0 # get the exponent
bclr &0xf,%d0 # clear the sign bit
subi.w &0x3fff,%d0 # subtract off the bias
fmov.w %d0,%fp0 # return exp in fp0
blt.b sgetexpn # it's negative
rts
sgetexpn:
mov.b &neg_bmask,FPSR_CC(%a6) # set 'N' ccode bit
rts
global sgetexpd
sgetexpd:
bsr.l norm # normalize
neg.w %d0 # new exp = -(shft amt)
subi.w &0x3fff,%d0 # subtract off the bias
fmov.w %d0,%fp0 # return exp in fp0
mov.b &neg_bmask,FPSR_CC(%a6) # set 'N' ccode bit
rts
global sgetman
sgetman:
mov.w SRC_EX(%a0),%d0 # get the exp
ori.w &0x7fff,%d0 # clear old exp
bclr &0xe,%d0 # make it the new exp +-3fff
# here, we build the result in a tmp location so as not to disturb the input
mov.l SRC_HI(%a0),FP_SCR0_HI(%a6) # copy to tmp loc
mov.l SRC_LO(%a0),FP_SCR0_LO(%a6) # copy to tmp loc
mov.w %d0,FP_SCR0_EX(%a6) # insert new exponent
fmov.x FP_SCR0(%a6),%fp0 # put new value back in fp0
bmi.b sgetmann # it's negative
rts
sgetmann:
mov.b &neg_bmask,FPSR_CC(%a6) # set 'N' ccode bit
rts
#
# For denormalized numbers, shift the mantissa until the j-bit = 1,
# then load the exponent with +/1 $3fff.
#
global sgetmand
sgetmand:
bsr.l norm # normalize exponent
bra.b sgetman
#########################################################################
# scosh(): computes the hyperbolic cosine of a normalized input #
# scoshd(): computes the hyperbolic cosine of a denormalized input #
# #
# INPUT *************************************************************** #
# a0 = pointer to extended precision input #
# d0 = round precision,mode #
# #
# OUTPUT ************************************************************** #
# fp0 = cosh(X) #
# #
# ACCURACY and MONOTONICITY ******************************************* #
# The returned result is within 3 ulps in 64 significant bit, #
# i.e. within 0.5001 ulp to 53 bits if the result is subsequently #
# rounded to double precision. The result is provably monotonic #
# in double precision. #
# #
# ALGORITHM *********************************************************** #
# #
# COSH #
# 1. If |X| > 16380 log2, go to 3. #
# #
# 2. (|X| <= 16380 log2) Cosh(X) is obtained by the formulae #
# y = |X|, z = exp(Y), and #
# cosh(X) = (1/2)*( z + 1/z ). #
# Exit. #
# #
# 3. (|X| > 16380 log2). If |X| > 16480 log2, go to 5. #
# #
# 4. (16380 log2 < |X| <= 16480 log2) #
# cosh(X) = sign(X) * exp(|X|)/2. #
# However, invoking exp(|X|) may cause premature #
# overflow. Thus, we calculate sinh(X) as follows: #
# Y := |X| #
# Fact := 2**(16380) #
# Y' := Y - 16381 log2 #
# cosh(X) := Fact * exp(Y'). #
# Exit. #
# #
# 5. (|X| > 16480 log2) sinh(X) must overflow. Return #
# Huge*Huge to generate overflow and an infinity with #
# the appropriate sign. Huge is the largest finite number #
# in extended format. Exit. #
# #
#########################################################################
TWO16380:
long 0x7FFB0000,0x80000000,0x00000000,0x00000000
global scosh
scosh:
fmov.x (%a0),%fp0 # LOAD INPUT
mov.l (%a0),%d1
mov.w 4(%a0),%d1
and.l &0x7FFFFFFF,%d1
cmp.l %d1,&0x400CB167
bgt.b COSHBIG
#--THIS IS THE USUAL CASE, |X| < 16380 LOG2
#--COSH(X) = (1/2) * ( EXP(X) + 1/EXP(X) )
fabs.x %fp0 # |X|
mov.l %d0,-(%sp)
clr.l %d0
fmovm.x &0x01,-(%sp) # save |X| to stack
lea (%sp),%a0 # pass ptr to |X|
bsr setox # FP0 IS EXP(|X|)
add.l &0xc,%sp # erase |X| from stack
fmul.s &0x3F000000,%fp0 # (1/2)EXP(|X|)
mov.l (%sp)+,%d0
fmov.s &0x3E800000,%fp1 # (1/4)
fdiv.x %fp0,%fp1 # 1/(2 EXP(|X|))
fmov.l %d0,%fpcr
mov.b &FADD_OP,%d1 # last inst is ADD
fadd.x %fp1,%fp0
bra t_catch
COSHBIG:
cmp.l %d1,&0x400CB2B3
bgt.b COSHHUGE
fabs.x %fp0
fsub.d T1(%pc),%fp0 # (|X|-16381LOG2_LEAD)
fsub.d T2(%pc),%fp0 # |X| - 16381 LOG2, ACCURATE
mov.l %d0,-(%sp)
clr.l %d0
fmovm.x &0x01,-(%sp) # save fp0 to stack
lea (%sp),%a0 # pass ptr to fp0
bsr setox
add.l &0xc,%sp # clear fp0 from stack
mov.l (%sp)+,%d0
fmov.l %d0,%fpcr
mov.b &FMUL_OP,%d1 # last inst is MUL
fmul.x TWO16380(%pc),%fp0
bra t_catch
COSHHUGE:
bra t_ovfl2
global scoshd
#--COSH(X) = 1 FOR DENORMALIZED X
scoshd:
fmov.s &0x3F800000,%fp0
fmov.l %d0,%fpcr
fadd.s &0x00800000,%fp0
bra t_pinx2
#########################################################################
# ssinh(): computes the hyperbolic sine of a normalized input #
# ssinhd(): computes the hyperbolic sine of a denormalized input #
# #
# INPUT *************************************************************** #
# a0 = pointer to extended precision input #
# d0 = round precision,mode #
# #
# OUTPUT ************************************************************** #
# fp0 = sinh(X) #
# #
# ACCURACY and MONOTONICITY ******************************************* #
# The returned result is within 3 ulps in 64 significant bit, #
# i.e. within 0.5001 ulp to 53 bits if the result is subsequently #
# rounded to double precision. The result is provably monotonic #
# in double precision. #
# #
# ALGORITHM *********************************************************** #
# #
# SINH #
# 1. If |X| > 16380 log2, go to 3. #
# #
# 2. (|X| <= 16380 log2) Sinh(X) is obtained by the formula #
# y = |X|, sgn = sign(X), and z = expm1(Y), #
# sinh(X) = sgn*(1/2)*( z + z/(1+z) ). #
# Exit. #
# #
# 3. If |X| > 16480 log2, go to 5. #
# #
# 4. (16380 log2 < |X| <= 16480 log2) #
# sinh(X) = sign(X) * exp(|X|)/2. #
# However, invoking exp(|X|) may cause premature overflow. #
# Thus, we calculate sinh(X) as follows: #
# Y := |X| #
# sgn := sign(X) #
# sgnFact := sgn * 2**(16380) #
# Y' := Y - 16381 log2 #
# sinh(X) := sgnFact * exp(Y'). #
# Exit. #
# #
# 5. (|X| > 16480 log2) sinh(X) must overflow. Return #
# sign(X)*Huge*Huge to generate overflow and an infinity with #
# the appropriate sign. Huge is the largest finite number in #
# extended format. Exit. #
# #
#########################################################################
global ssinh
ssinh:
fmov.x (%a0),%fp0 # LOAD INPUT
mov.l (%a0),%d1
mov.w 4(%a0),%d1
mov.l %d1,%a1 # save (compacted) operand
and.l &0x7FFFFFFF,%d1
cmp.l %d1,&0x400CB167
bgt.b SINHBIG
#--THIS IS THE USUAL CASE, |X| < 16380 LOG2
#--Y = |X|, Z = EXPM1(Y), SINH(X) = SIGN(X)*(1/2)*( Z + Z/(1+Z) )
fabs.x %fp0 # Y = |X|
movm.l &0x8040,-(%sp) # {a1/d0}
fmovm.x &0x01,-(%sp) # save Y on stack
lea (%sp),%a0 # pass ptr to Y
clr.l %d0
bsr setoxm1 # FP0 IS Z = EXPM1(Y)
add.l &0xc,%sp # clear Y from stack
fmov.l &0,%fpcr
movm.l (%sp)+,&0x0201 # {a1/d0}
fmov.x %fp0,%fp1
fadd.s &0x3F800000,%fp1 # 1+Z
fmov.x %fp0,-(%sp)
fdiv.x %fp1,%fp0 # Z/(1+Z)
mov.l %a1,%d1
and.l &0x80000000,%d1
or.l &0x3F000000,%d1
fadd.x (%sp)+,%fp0
mov.l %d1,-(%sp)
fmov.l %d0,%fpcr
mov.b &FMUL_OP,%d1 # last inst is MUL
fmul.s (%sp)+,%fp0 # last fp inst - possible exceptions set
bra t_catch
SINHBIG:
cmp.l %d1,&0x400CB2B3
bgt t_ovfl
fabs.x %fp0
fsub.d T1(%pc),%fp0 # (|X|-16381LOG2_LEAD)
mov.l &0,-(%sp)
mov.l &0x80000000,-(%sp)
mov.l %a1,%d1
and.l &0x80000000,%d1
or.l &0x7FFB0000,%d1
mov.l %d1,-(%sp) # EXTENDED FMT
fsub.d T2(%pc),%fp0 # |X| - 16381 LOG2, ACCURATE
mov.l %d0,-(%sp)
clr.l %d0
fmovm.x &0x01,-(%sp) # save fp0 on stack
lea (%sp),%a0 # pass ptr to fp0
bsr setox
add.l &0xc,%sp # clear fp0 from stack
mov.l (%sp)+,%d0
fmov.l %d0,%fpcr
mov.b &FMUL_OP,%d1 # last inst is MUL
fmul.x (%sp)+,%fp0 # possible exception
bra t_catch
global ssinhd
#--SINH(X) = X FOR DENORMALIZED X
ssinhd:
bra t_extdnrm
#########################################################################
# stanh(): computes the hyperbolic tangent of a normalized input #
# stanhd(): computes the hyperbolic tangent of a denormalized input #
# #
# INPUT *************************************************************** #
# a0 = pointer to extended precision input #
# d0 = round precision,mode #
# #
# OUTPUT ************************************************************** #
# fp0 = tanh(X) #
# #
# ACCURACY and MONOTONICITY ******************************************* #
# The returned result is within 3 ulps in 64 significant bit, #
# i.e. within 0.5001 ulp to 53 bits if the result is subsequently #
# rounded to double precision. The result is provably monotonic #
# in double precision. #
# #
# ALGORITHM *********************************************************** #
# #
# TANH #
# 1. If |X| >= (5/2) log2 or |X| <= 2**(-40), go to 3. #
# #
# 2. (2**(-40) < |X| < (5/2) log2) Calculate tanh(X) by #
# sgn := sign(X), y := 2|X|, z := expm1(Y), and #
# tanh(X) = sgn*( z/(2+z) ). #
# Exit. #
# #
# 3. (|X| <= 2**(-40) or |X| >= (5/2) log2). If |X| < 1, #
# go to 7. #
# #
# 4. (|X| >= (5/2) log2) If |X| >= 50 log2, go to 6. #
# #
# 5. ((5/2) log2 <= |X| < 50 log2) Calculate tanh(X) by #
# sgn := sign(X), y := 2|X|, z := exp(Y), #
# tanh(X) = sgn - [ sgn*2/(1+z) ]. #
# Exit. #
# #
# 6. (|X| >= 50 log2) Tanh(X) = +-1 (round to nearest). Thus, we #
# calculate Tanh(X) by #
# sgn := sign(X), Tiny := 2**(-126), #
# tanh(X) := sgn - sgn*Tiny. #
# Exit. #
# #
# 7. (|X| < 2**(-40)). Tanh(X) = X. Exit. #
# #
#########################################################################
set X,FP_SCR0
set XFRAC,X+4
set SGN,L_SCR3
set V,FP_SCR0
global stanh
stanh:
fmov.x (%a0),%fp0 # LOAD INPUT
fmov.x %fp0,X(%a6)
mov.l (%a0),%d1
mov.w 4(%a0),%d1
mov.l %d1,X(%a6)
and.l &0x7FFFFFFF,%d1
cmp.l %d1, &0x3fd78000 # is |X| < 2^(-40)?
blt.w TANHBORS # yes
cmp.l %d1, &0x3fffddce # is |X| > (5/2)LOG2?
bgt.w TANHBORS # yes
#--THIS IS THE USUAL CASE
#--Y = 2|X|, Z = EXPM1(Y), TANH(X) = SIGN(X) * Z / (Z+2).
mov.l X(%a6),%d1
mov.l %d1,SGN(%a6)
and.l &0x7FFF0000,%d1
add.l &0x00010000,%d1 # EXPONENT OF 2|X|
mov.l %d1,X(%a6)
and.l &0x80000000,SGN(%a6)
fmov.x X(%a6),%fp0 # FP0 IS Y = 2|X|
mov.l %d0,-(%sp)
clr.l %d0
fmovm.x &0x1,-(%sp) # save Y on stack
lea (%sp),%a0 # pass ptr to Y
bsr setoxm1 # FP0 IS Z = EXPM1(Y)
add.l &0xc,%sp # clear Y from stack
mov.l (%sp)+,%d0
fmov.x %fp0,%fp1
fadd.s &0x40000000,%fp1 # Z+2
mov.l SGN(%a6),%d1
fmov.x %fp1,V(%a6)
eor.l %d1,V(%a6)
fmov.l %d0,%fpcr # restore users round prec,mode
fdiv.x V(%a6),%fp0
bra t_inx2
TANHBORS:
cmp.l %d1,&0x3FFF8000
blt.w TANHSM
cmp.l %d1,&0x40048AA1
bgt.w TANHHUGE
#-- (5/2) LOG2 < |X| < 50 LOG2,
#--TANH(X) = 1 - (2/[EXP(2X)+1]). LET Y = 2|X|, SGN = SIGN(X),
#--TANH(X) = SGN - SGN*2/[EXP(Y)+1].
mov.l X(%a6),%d1
mov.l %d1,SGN(%a6)
and.l &0x7FFF0000,%d1
add.l &0x00010000,%d1 # EXPO OF 2|X|
mov.l %d1,X(%a6) # Y = 2|X|
and.l &0x80000000,SGN(%a6)
mov.l SGN(%a6),%d1
fmov.x X(%a6),%fp0 # Y = 2|X|
mov.l %d0,-(%sp)
clr.l %d0
fmovm.x &0x01,-(%sp) # save Y on stack
lea (%sp),%a0 # pass ptr to Y
bsr setox # FP0 IS EXP(Y)
add.l &0xc,%sp # clear Y from stack
mov.l (%sp)+,%d0
mov.l SGN(%a6),%d1
fadd.s &0x3F800000,%fp0 # EXP(Y)+1
eor.l &0xC0000000,%d1 # -SIGN(X)*2
fmov.s %d1,%fp1 # -SIGN(X)*2 IN SGL FMT
fdiv.x %fp0,%fp1 # -SIGN(X)2 / [EXP(Y)+1 ]
mov.l SGN(%a6),%d1
or.l &0x3F800000,%d1 # SGN
fmov.s %d1,%fp0 # SGN IN SGL FMT
fmov.l %d0,%fpcr # restore users round prec,mode
mov.b &FADD_OP,%d1 # last inst is ADD
fadd.x %fp1,%fp0
bra t_inx2
TANHSM:
fmov.l %d0,%fpcr # restore users round prec,mode
mov.b &FMOV_OP,%d1 # last inst is MOVE
fmov.x X(%a6),%fp0 # last inst - possible exception set
bra t_catch
#---RETURN SGN(X) - SGN(X)EPS
TANHHUGE:
mov.l X(%a6),%d1
and.l &0x80000000,%d1
or.l &0x3F800000,%d1
fmov.s %d1,%fp0
and.l &0x80000000,%d1
eor.l &0x80800000,%d1 # -SIGN(X)*EPS
fmov.l %d0,%fpcr # restore users round prec,mode
fadd.s %d1,%fp0
bra t_inx2
global stanhd
#--TANH(X) = X FOR DENORMALIZED X
stanhd:
bra t_extdnrm
#########################################################################
# slogn(): computes the natural logarithm of a normalized input #
# slognd(): computes the natural logarithm of a denormalized input #
# slognp1(): computes the log(1+X) of a normalized input #
# slognp1d(): computes the log(1+X) of a denormalized input #
# #
# INPUT *************************************************************** #
# a0 = pointer to extended precision input #
# d0 = round precision,mode #
# #
# OUTPUT ************************************************************** #
# fp0 = log(X) or log(1+X) #
# #
# ACCURACY and MONOTONICITY ******************************************* #
# The returned result is within 2 ulps in 64 significant bit, #
# i.e. within 0.5001 ulp to 53 bits if the result is subsequently #
# rounded to double precision. The result is provably monotonic #
# in double precision. #
# #
# ALGORITHM *********************************************************** #
# LOGN: #
# Step 1. If |X-1| < 1/16, approximate log(X) by an odd #
# polynomial in u, where u = 2(X-1)/(X+1). Otherwise, #
# move on to Step 2. #
# #
# Step 2. X = 2**k * Y where 1 <= Y < 2. Define F to be the first #
# seven significant bits of Y plus 2**(-7), i.e. #
# F = 1.xxxxxx1 in base 2 where the six "x" match those #
# of Y. Note that |Y-F| <= 2**(-7). #
# #
# Step 3. Define u = (Y-F)/F. Approximate log(1+u) by a #
# polynomial in u, log(1+u) = poly. #
# #
# Step 4. Reconstruct #
# log(X) = log( 2**k * Y ) = k*log(2) + log(F) + log(1+u) #
# by k*log(2) + (log(F) + poly). The values of log(F) are #
# calculated beforehand and stored in the program. #
# #
# lognp1: #
# Step 1: If |X| < 1/16, approximate log(1+X) by an odd #
# polynomial in u where u = 2X/(2+X). Otherwise, move on #
# to Step 2. #
# #
# Step 2: Let 1+X = 2**k * Y, where 1 <= Y < 2. Define F as done #
# in Step 2 of the algorithm for LOGN and compute #
# log(1+X) as k*log(2) + log(F) + poly where poly #
# approximates log(1+u), u = (Y-F)/F. #
# #
# Implementation Notes: #
# Note 1. There are 64 different possible values for F, thus 64 #
# log(F)'s need to be tabulated. Moreover, the values of #
# 1/F are also tabulated so that the division in (Y-F)/F #
# can be performed by a multiplication. #
# #
# Note 2. In Step 2 of lognp1, in order to preserved accuracy, #
# the value Y-F has to be calculated carefully when #
# 1/2 <= X < 3/2. #
# #
# Note 3. To fully exploit the pipeline, polynomials are usually #
# separated into two parts evaluated independently before #
# being added up. #
# #
#########################################################################
LOGOF2:
long 0x3FFE0000,0xB17217F7,0xD1CF79AC,0x00000000
one:
long 0x3F800000
zero:
long 0x00000000
infty:
long 0x7F800000
negone:
long 0xBF800000
LOGA6:
long 0x3FC2499A,0xB5E4040B
LOGA5:
long 0xBFC555B5,0x848CB7DB
LOGA4:
long 0x3FC99999,0x987D8730
LOGA3:
long 0xBFCFFFFF,0xFF6F7E97
LOGA2:
long 0x3FD55555,0x555555A4
LOGA1:
long 0xBFE00000,0x00000008
LOGB5:
long 0x3F175496,0xADD7DAD6
LOGB4:
long 0x3F3C71C2,0xFE80C7E0
LOGB3:
long 0x3F624924,0x928BCCFF
LOGB2:
long 0x3F899999,0x999995EC
LOGB1:
long 0x3FB55555,0x55555555
TWO:
long 0x40000000,0x00000000
LTHOLD:
long 0x3f990000,0x80000000,0x00000000,0x00000000
LOGTBL:
long 0x3FFE0000,0xFE03F80F,0xE03F80FE,0x00000000
long 0x3FF70000,0xFF015358,0x833C47E2,0x00000000
long 0x3FFE0000,0xFA232CF2,0x52138AC0,0x00000000
long 0x3FF90000,0xBDC8D83E,0xAD88D549,0x00000000
long 0x3FFE0000,0xF6603D98,0x0F6603DA,0x00000000
long 0x3FFA0000,0x9CF43DCF,0xF5EAFD48,0x00000000
long 0x3FFE0000,0xF2B9D648,0x0F2B9D65,0x00000000
long 0x3FFA0000,0xDA16EB88,0xCB8DF614,0x00000000
long 0x3FFE0000,0xEF2EB71F,0xC4345238,0x00000000
long 0x3FFB0000,0x8B29B775,0x1BD70743,0x00000000
long 0x3FFE0000,0xEBBDB2A5,0xC1619C8C,0x00000000
long 0x3FFB0000,0xA8D839F8,0x30C1FB49,0x00000000
long 0x3FFE0000,0xE865AC7B,0x7603A197,0x00000000
long 0x3FFB0000,0xC61A2EB1,0x8CD907AD,0x00000000
long 0x3FFE0000,0xE525982A,0xF70C880E,0x00000000
long 0x3FFB0000,0xE2F2A47A,0xDE3A18AF,0x00000000
long 0x3FFE0000,0xE1FC780E,0x1FC780E2,0x00000000
long 0x3FFB0000,0xFF64898E,0xDF55D551,0x00000000
long 0x3FFE0000,0xDEE95C4C,0xA037BA57,0x00000000
long 0x3FFC0000,0x8DB956A9,0x7B3D0148,0x00000000
long 0x3FFE0000,0xDBEB61EE,0xD19C5958,0x00000000
long 0x3FFC0000,0x9B8FE100,0xF47BA1DE,0x00000000
long 0x3FFE0000,0xD901B203,0x6406C80E,0x00000000
long 0x3FFC0000,0xA9372F1D,0x0DA1BD17,0x00000000
long 0x3FFE0000,0xD62B80D6,0x2B80D62C,0x00000000
long 0x3FFC0000,0xB6B07F38,0xCE90E46B,0x00000000
long 0x3FFE0000,0xD3680D36,0x80D3680D,0x00000000
long 0x3FFC0000,0xC3FD0329,0x06488481,0x00000000
long 0x3FFE0000,0xD0B69FCB,0xD2580D0B,0x00000000
long 0x3FFC0000,0xD11DE0FF,0x15AB18CA,0x00000000
long 0x3FFE0000,0xCE168A77,0x25080CE1,0x00000000
long 0x3FFC0000,0xDE1433A1,0x6C66B150,0x00000000
long 0x3FFE0000,0xCB8727C0,0x65C393E0,0x00000000
long 0x3FFC0000,0xEAE10B5A,0x7DDC8ADD,0x00000000
long 0x3FFE0000,0xC907DA4E,0x871146AD,0x00000000
long 0x3FFC0000,0xF7856E5E,0xE2C9B291,0x00000000
long 0x3FFE0000,0xC6980C69,0x80C6980C,0x00000000
long 0x3FFD0000,0x82012CA5,0xA68206D7,0x00000000
long 0x3FFE0000,0xC4372F85,0x5D824CA6,0x00000000
long 0x3FFD0000,0x882C5FCD,0x7256A8C5,0x00000000
long 0x3FFE0000,0xC1E4BBD5,0x95F6E947,0x00000000
long 0x3FFD0000,0x8E44C60B,0x4CCFD7DE,0x00000000
long 0x3FFE0000,0xBFA02FE8,0x0BFA02FF,0x00000000
long 0x3FFD0000,0x944AD09E,0xF4351AF6,0x00000000
long 0x3FFE0000,0xBD691047,0x07661AA3,0x00000000
long 0x3FFD0000,0x9A3EECD4,0xC3EAA6B2,0x00000000
long 0x3FFE0000,0xBB3EE721,0xA54D880C,0x00000000
long 0x3FFD0000,0xA0218434,0x353F1DE8,0x00000000
long 0x3FFE0000,0xB92143FA,0x36F5E02E,0x00000000
long 0x3FFD0000,0xA5F2FCAB,0xBBC506DA,0x00000000
long 0x3FFE0000,0xB70FBB5A,0x19BE3659,0x00000000
long 0x3FFD0000,0xABB3B8BA,0x2AD362A5,0x00000000
long 0x3FFE0000,0xB509E68A,0x9B94821F,0x00000000
long 0x3FFD0000,0xB1641795,0xCE3CA97B,0x00000000
long 0x3FFE0000,0xB30F6352,0x8917C80B,0x00000000
long 0x3FFD0000,0xB7047551,0x5D0F1C61,0x00000000
long 0x3FFE0000,0xB11FD3B8,0x0B11FD3C,0x00000000
long 0x3FFD0000,0xBC952AFE,0xEA3D13E1,0x00000000
long 0x3FFE0000,0xAF3ADDC6,0x80AF3ADE,0x00000000
long 0x3FFD0000,0xC2168ED0,0xF458BA4A,0x00000000
long 0x3FFE0000,0xAD602B58,0x0AD602B6,0x00000000
long 0x3FFD0000,0xC788F439,0xB3163BF1,0x00000000
long 0x3FFE0000,0xAB8F69E2,0x8359CD11,0x00000000
long 0x3FFD0000,0xCCECAC08,0xBF04565D,0x00000000
long 0x3FFE0000,0xA9C84A47,0xA07F5638,0x00000000
long 0x3FFD0000,0xD2420487,0x2DD85160,0x00000000
long 0x3FFE0000,0xA80A80A8,0x0A80A80B,0x00000000
long 0x3FFD0000,0xD7894992,0x3BC3588A,0x00000000
long 0x3FFE0000,0xA655C439,0x2D7B73A8,0x00000000
long 0x3FFD0000,0xDCC2C4B4,0x9887DACC,0x00000000
long 0x3FFE0000,0xA4A9CF1D,0x96833751,0x00000000
long 0x3FFD0000,0xE1EEBD3E,0x6D6A6B9E,0x00000000
long 0x3FFE0000,0xA3065E3F,0xAE7CD0E0,0x00000000
long 0x3FFD0000,0xE70D785C,0x2F9F5BDC,0x00000000
long 0x3FFE0000,0xA16B312E,0xA8FC377D,0x00000000
long 0x3FFD0000,0xEC1F392C,0x5179F283,0x00000000
long 0x3FFE0000,0x9FD809FD,0x809FD80A,0x00000000
long 0x3FFD0000,0xF12440D3,0xE36130E6,0x00000000
long 0x3FFE0000,0x9E4CAD23,0xDD5F3A20,0x00000000
long 0x3FFD0000,0xF61CCE92,0x346600BB,0x00000000
long 0x3FFE0000,0x9CC8E160,0xC3FB19B9,0x00000000
long 0x3FFD0000,0xFB091FD3,0x8145630A,0x00000000
long 0x3FFE0000,0x9B4C6F9E,0xF03A3CAA,0x00000000
long 0x3FFD0000,0xFFE97042,0xBFA4C2AD,0x00000000
long 0x3FFE0000,0x99D722DA,0xBDE58F06,0x00000000
long 0x3FFE0000,0x825EFCED,0x49369330,0x00000000
long 0x3FFE0000,0x9868C809,0x868C8098,0x00000000
long 0x3FFE0000,0x84C37A7A,0xB9A905C9,0x00000000
long 0x3FFE0000,0x97012E02,0x5C04B809,0x00000000
long 0x3FFE0000,0x87224C2E,0x8E645FB7,0x00000000
long 0x3FFE0000,0x95A02568,0x095A0257,0x00000000
long 0x3FFE0000,0x897B8CAC,0x9F7DE298,0x00000000
long 0x3FFE0000,0x94458094,0x45809446,0x00000000
long 0x3FFE0000,0x8BCF55DE,0xC4CD05FE,0x00000000
long 0x3FFE0000,0x92F11384,0x0497889C,0x00000000
long 0x3FFE0000,0x8E1DC0FB,0x89E125E5,0x00000000
long 0x3FFE0000,0x91A2B3C4,0xD5E6F809,0x00000000
long 0x3FFE0000,0x9066E68C,0x955B6C9B,0x00000000
long 0x3FFE0000,0x905A3863,0x3E06C43B,0x00000000
long 0x3FFE0000,0x92AADE74,0xC7BE59E0,0x00000000
long 0x3FFE0000,0x8F1779D9,0xFDC3A219,0x00000000
long 0x3FFE0000,0x94E9BFF6,0x15845643,0x00000000
long 0x3FFE0000,0x8DDA5202,0x37694809,0x00000000
long 0x3FFE0000,0x9723A1B7,0x20134203,0x00000000
long 0x3FFE0000,0x8CA29C04,0x6514E023,0x00000000
long 0x3FFE0000,0x995899C8,0x90EB8990,0x00000000
long 0x3FFE0000,0x8B70344A,0x139BC75A,0x00000000
long 0x3FFE0000,0x9B88BDAA,0x3A3DAE2F,0x00000000
long 0x3FFE0000,0x8A42F870,0x5669DB46,0x00000000
long 0x3FFE0000,0x9DB4224F,0xFFE1157C,0x00000000
long 0x3FFE0000,0x891AC73A,0xE9819B50,0x00000000
long 0x3FFE0000,0x9FDADC26,0x8B7A12DA,0x00000000
long 0x3FFE0000,0x87F78087,0xF78087F8,0x00000000
long 0x3FFE0000,0xA1FCFF17,0xCE733BD4,0x00000000
long 0x3FFE0000,0x86D90544,0x7A34ACC6,0x00000000
long 0x3FFE0000,0xA41A9E8F,0x5446FB9F,0x00000000
long 0x3FFE0000,0x85BF3761,0x2CEE3C9B,0x00000000
long 0x3FFE0000,0xA633CD7E,0x6771CD8B,0x00000000
long 0x3FFE0000,0x84A9F9C8,0x084A9F9D,0x00000000
long 0x3FFE0000,0xA8489E60,0x0B435A5E,0x00000000
long 0x3FFE0000,0x83993052,0x3FBE3368,0x00000000
long 0x3FFE0000,0xAA59233C,0xCCA4BD49,0x00000000
long 0x3FFE0000,0x828CBFBE,0xB9A020A3,0x00000000
long 0x3FFE0000,0xAC656DAE,0x6BCC4985,0x00000000
long 0x3FFE0000,0x81848DA8,0xFAF0D277,0x00000000
long 0x3FFE0000,0xAE6D8EE3,0x60BB2468,0x00000000
long 0x3FFE0000,0x80808080,0x80808081,0x00000000
long 0x3FFE0000,0xB07197A2,0x3C46C654,0x00000000
set ADJK,L_SCR1
set X,FP_SCR0
set XDCARE,X+2
set XFRAC,X+4
set F,FP_SCR1
set FFRAC,F+4
set KLOG2,FP_SCR0
set SAVEU,FP_SCR0
global slogn
#--ENTRY POINT FOR LOG(X) FOR X FINITE, NON-ZERO, NOT NAN'S
slogn:
fmov.x (%a0),%fp0 # LOAD INPUT
mov.l &0x00000000,ADJK(%a6)
LOGBGN:
#--FPCR SAVED AND CLEARED, INPUT IS 2^(ADJK)*FP0, FP0 CONTAINS
#--A FINITE, NON-ZERO, NORMALIZED NUMBER.
mov.l (%a0),%d1
mov.w 4(%a0),%d1
mov.l (%a0),X(%a6)
mov.l 4(%a0),X+4(%a6)
mov.l 8(%a0),X+8(%a6)
cmp.l %d1,&0 # CHECK IF X IS NEGATIVE
blt.w LOGNEG # LOG OF NEGATIVE ARGUMENT IS INVALID
# X IS POSITIVE, CHECK IF X IS NEAR 1
cmp.l %d1,&0x3ffef07d # IS X < 15/16?
blt.b LOGMAIN # YES
cmp.l %d1,&0x3fff8841 # IS X > 17/16?
ble.w LOGNEAR1 # NO
LOGMAIN:
#--THIS SHOULD BE THE USUAL CASE, X NOT VERY CLOSE TO 1
#--X = 2^(K) * Y, 1 <= Y < 2. THUS, Y = 1.XXXXXXXX....XX IN BINARY.
#--WE DEFINE F = 1.XXXXXX1, I.E. FIRST 7 BITS OF Y AND ATTACH A 1.
#--THE IDEA IS THAT LOG(X) = K*LOG2 + LOG(Y)
#-- = K*LOG2 + LOG(F) + LOG(1 + (Y-F)/F).
#--NOTE THAT U = (Y-F)/F IS VERY SMALL AND THUS APPROXIMATING
#--LOG(1+U) CAN BE VERY EFFICIENT.
#--ALSO NOTE THAT THE VALUE 1/F IS STORED IN A TABLE SO THAT NO
#--DIVISION IS NEEDED TO CALCULATE (Y-F)/F.
#--GET K, Y, F, AND ADDRESS OF 1/F.
asr.l &8,%d1
asr.l &8,%d1 # SHIFTED 16 BITS, BIASED EXPO. OF X
sub.l &0x3FFF,%d1 # THIS IS K
add.l ADJK(%a6),%d1 # ADJUST K, ORIGINAL INPUT MAY BE DENORM.
lea LOGTBL(%pc),%a0 # BASE ADDRESS OF 1/F AND LOG(F)
fmov.l %d1,%fp1 # CONVERT K TO FLOATING-POINT FORMAT
#--WHILE THE CONVERSION IS GOING ON, WE GET F AND ADDRESS OF 1/F
mov.l &0x3FFF0000,X(%a6) # X IS NOW Y, I.E. 2^(-K)*X
mov.l XFRAC(%a6),FFRAC(%a6)
and.l &0xFE000000,FFRAC(%a6) # FIRST 7 BITS OF Y
or.l &0x01000000,FFRAC(%a6) # GET F: ATTACH A 1 AT THE EIGHTH BIT
mov.l FFRAC(%a6),%d1 # READY TO GET ADDRESS OF 1/F
and.l &0x7E000000,%d1
asr.l &8,%d1
asr.l &8,%d1
asr.l &4,%d1 # SHIFTED 20, D0 IS THE DISPLACEMENT
add.l %d1,%a0 # A0 IS THE ADDRESS FOR 1/F
fmov.x X(%a6),%fp0
mov.l &0x3fff0000,F(%a6)
clr.l F+8(%a6)
fsub.x F(%a6),%fp0 # Y-F
fmovm.x &0xc,-(%sp) # SAVE FP2-3 WHILE FP0 IS NOT READY
#--SUMMARY: FP0 IS Y-F, A0 IS ADDRESS OF 1/F, FP1 IS K
#--REGISTERS SAVED: FPCR, FP1, FP2
LP1CONT1:
#--AN RE-ENTRY POINT FOR LOGNP1
fmul.x (%a0),%fp0 # FP0 IS U = (Y-F)/F
fmul.x LOGOF2(%pc),%fp1 # GET K*LOG2 WHILE FP0 IS NOT READY
fmov.x %fp0,%fp2
fmul.x %fp2,%fp2 # FP2 IS V=U*U
fmov.x %fp1,KLOG2(%a6) # PUT K*LOG2 IN MEMEORY, FREE FP1
#--LOG(1+U) IS APPROXIMATED BY
#--U + V*(A1+U*(A2+U*(A3+U*(A4+U*(A5+U*A6))))) WHICH IS
#--[U + V*(A1+V*(A3+V*A5))] + [U*V*(A2+V*(A4+V*A6))]
fmov.x %fp2,%fp3
fmov.x %fp2,%fp1
fmul.d LOGA6(%pc),%fp1 # V*A6
fmul.d LOGA5(%pc),%fp2 # V*A5
fadd.d LOGA4(%pc),%fp1 # A4+V*A6
fadd.d LOGA3(%pc),%fp2 # A3+V*A5
fmul.x %fp3,%fp1 # V*(A4+V*A6)
fmul.x %fp3,%fp2 # V*(A3+V*A5)
fadd.d LOGA2(%pc),%fp1 # A2+V*(A4+V*A6)
fadd.d LOGA1(%pc),%fp2 # A1+V*(A3+V*A5)
fmul.x %fp3,%fp1 # V*(A2+V*(A4+V*A6))
add.l &16,%a0 # ADDRESS OF LOG(F)
fmul.x %fp3,%fp2 # V*(A1+V*(A3+V*A5))
fmul.x %fp0,%fp1 # U*V*(A2+V*(A4+V*A6))
fadd.x %fp2,%fp0 # U+V*(A1+V*(A3+V*A5))
fadd.x (%a0),%fp1 # LOG(F)+U*V*(A2+V*(A4+V*A6))
fmovm.x (%sp)+,&0x30 # RESTORE FP2-3
fadd.x %fp1,%fp0 # FP0 IS LOG(F) + LOG(1+U)
fmov.l %d0,%fpcr
fadd.x KLOG2(%a6),%fp0 # FINAL ADD
bra t_inx2
LOGNEAR1:
# if the input is exactly equal to one, then exit through ld_pzero.
# if these 2 lines weren't here, the correct answer would be returned
# but the INEX2 bit would be set.
fcmp.b %fp0,&0x1 # is it equal to one?
fbeq.l ld_pzero # yes
#--REGISTERS SAVED: FPCR, FP1. FP0 CONTAINS THE INPUT.
fmov.x %fp0,%fp1
fsub.s one(%pc),%fp1 # FP1 IS X-1
fadd.s one(%pc),%fp0 # FP0 IS X+1
fadd.x %fp1,%fp1 # FP1 IS 2(X-1)
#--LOG(X) = LOG(1+U/2)-LOG(1-U/2) WHICH IS AN ODD POLYNOMIAL
#--IN U, U = 2(X-1)/(X+1) = FP1/FP0
LP1CONT2:
#--THIS IS AN RE-ENTRY POINT FOR LOGNP1
fdiv.x %fp0,%fp1 # FP1 IS U
fmovm.x &0xc,-(%sp) # SAVE FP2-3
#--REGISTERS SAVED ARE NOW FPCR,FP1,FP2,FP3
#--LET V=U*U, W=V*V, CALCULATE
#--U + U*V*(B1 + V*(B2 + V*(B3 + V*(B4 + V*B5)))) BY
#--U + U*V*( [B1 + W*(B3 + W*B5)] + [V*(B2 + W*B4)] )
fmov.x %fp1,%fp0
fmul.x %fp0,%fp0 # FP0 IS V
fmov.x %fp1,SAVEU(%a6) # STORE U IN MEMORY, FREE FP1
fmov.x %fp0,%fp1
fmul.x %fp1,%fp1 # FP1 IS W
fmov.d LOGB5(%pc),%fp3
fmov.d LOGB4(%pc),%fp2
fmul.x %fp1,%fp3 # W*B5
fmul.x %fp1,%fp2 # W*B4
fadd.d LOGB3(%pc),%fp3 # B3+W*B5
fadd.d LOGB2(%pc),%fp2 # B2+W*B4
fmul.x %fp3,%fp1 # W*(B3+W*B5), FP3 RELEASED
fmul.x %fp0,%fp2 # V*(B2+W*B4)
fadd.d LOGB1(%pc),%fp1 # B1+W*(B3+W*B5)
fmul.x SAVEU(%a6),%fp0 # FP0 IS U*V
fadd.x %fp2,%fp1 # B1+W*(B3+W*B5) + V*(B2+W*B4), FP2 RELEASED
fmovm.x (%sp)+,&0x30 # FP2-3 RESTORED
fmul.x %fp1,%fp0 # U*V*( [B1+W*(B3+W*B5)] + [V*(B2+W*B4)] )
fmov.l %d0,%fpcr
fadd.x SAVEU(%a6),%fp0
bra t_inx2
#--REGISTERS SAVED FPCR. LOG(-VE) IS INVALID
LOGNEG:
bra t_operr
global slognd
slognd:
#--ENTRY POINT FOR LOG(X) FOR DENORMALIZED INPUT
mov.l &-100,ADJK(%a6) # INPUT = 2^(ADJK) * FP0
#----normalize the input value by left shifting k bits (k to be determined
#----below), adjusting exponent and storing -k to ADJK
#----the value TWOTO100 is no longer needed.
#----Note that this code assumes the denormalized input is NON-ZERO.
movm.l &0x3f00,-(%sp) # save some registers {d2-d7}
mov.l (%a0),%d3 # D3 is exponent of smallest norm. #
mov.l 4(%a0),%d4
mov.l 8(%a0),%d5 # (D4,D5) is (Hi_X,Lo_X)
clr.l %d2 # D2 used for holding K
tst.l %d4
bne.b Hi_not0
Hi_0:
mov.l %d5,%d4
clr.l %d5
mov.l &32,%d2
clr.l %d6
bfffo %d4{&0:&32},%d6
lsl.l %d6,%d4
add.l %d6,%d2 # (D3,D4,D5) is normalized
mov.l %d3,X(%a6)
mov.l %d4,XFRAC(%a6)
mov.l %d5,XFRAC+4(%a6)
neg.l %d2
mov.l %d2,ADJK(%a6)
fmov.x X(%a6),%fp0
movm.l (%sp)+,&0xfc # restore registers {d2-d7}
lea X(%a6),%a0
bra.w LOGBGN # begin regular log(X)
Hi_not0:
clr.l %d6
bfffo %d4{&0:&32},%d6 # find first 1
mov.l %d6,%d2 # get k
lsl.l %d6,%d4
mov.l %d5,%d7 # a copy of D5
lsl.l %d6,%d5
neg.l %d6
add.l &32,%d6
lsr.l %d6,%d7
or.l %d7,%d4 # (D3,D4,D5) normalized
mov.l %d3,X(%a6)
mov.l %d4,XFRAC(%a6)
mov.l %d5,XFRAC+4(%a6)
neg.l %d2
mov.l %d2,ADJK(%a6)
fmov.x X(%a6),%fp0
movm.l (%sp)+,&0xfc # restore registers {d2-d7}
lea X(%a6),%a0
bra.w LOGBGN # begin regular log(X)
global slognp1
#--ENTRY POINT FOR LOG(1+X) FOR X FINITE, NON-ZERO, NOT NAN'S
slognp1:
fmov.x (%a0),%fp0 # LOAD INPUT
fabs.x %fp0 # test magnitude
fcmp.x %fp0,LTHOLD(%pc) # compare with min threshold
fbgt.w LP1REAL # if greater, continue
fmov.l %d0,%fpcr
mov.b &FMOV_OP,%d1 # last inst is MOVE
fmov.x (%a0),%fp0 # return signed argument
bra t_catch
LP1REAL:
fmov.x (%a0),%fp0 # LOAD INPUT
mov.l &0x00000000,ADJK(%a6)
fmov.x %fp0,%fp1 # FP1 IS INPUT Z
fadd.s one(%pc),%fp0 # X := ROUND(1+Z)
fmov.x %fp0,X(%a6)
mov.w XFRAC(%a6),XDCARE(%a6)
mov.l X(%a6),%d1
cmp.l %d1,&0
ble.w LP1NEG0 # LOG OF ZERO OR -VE
cmp.l %d1,&0x3ffe8000 # IS BOUNDS [1/2,3/2]?
blt.w LOGMAIN
cmp.l %d1,&0x3fffc000
bgt.w LOGMAIN
#--IF 1+Z > 3/2 OR 1+Z < 1/2, THEN X, WHICH IS ROUNDING 1+Z,
#--CONTAINS AT LEAST 63 BITS OF INFORMATION OF Z. IN THAT CASE,
#--SIMPLY INVOKE LOG(X) FOR LOG(1+Z).
LP1NEAR1:
#--NEXT SEE IF EXP(-1/16) < X < EXP(1/16)
cmp.l %d1,&0x3ffef07d
blt.w LP1CARE
cmp.l %d1,&0x3fff8841
bgt.w LP1CARE
LP1ONE16:
#--EXP(-1/16) < X < EXP(1/16). LOG(1+Z) = LOG(1+U/2) - LOG(1-U/2)
#--WHERE U = 2Z/(2+Z) = 2Z/(1+X).
fadd.x %fp1,%fp1 # FP1 IS 2Z
fadd.s one(%pc),%fp0 # FP0 IS 1+X
#--U = FP1/FP0
bra.w LP1CONT2
LP1CARE:
#--HERE WE USE THE USUAL TABLE DRIVEN APPROACH. CARE HAS TO BE
#--TAKEN BECAUSE 1+Z CAN HAVE 67 BITS OF INFORMATION AND WE MUST
#--PRESERVE ALL THE INFORMATION. BECAUSE 1+Z IS IN [1/2,3/2],
#--THERE ARE ONLY TWO CASES.
#--CASE 1: 1+Z < 1, THEN K = -1 AND Y-F = (2-F) + 2Z
#--CASE 2: 1+Z > 1, THEN K = 0 AND Y-F = (1-F) + Z
#--ON RETURNING TO LP1CONT1, WE MUST HAVE K IN FP1, ADDRESS OF
#--(1/F) IN A0, Y-F IN FP0, AND FP2 SAVED.
mov.l XFRAC(%a6),FFRAC(%a6)
and.l &0xFE000000,FFRAC(%a6)
or.l &0x01000000,FFRAC(%a6) # F OBTAINED
cmp.l %d1,&0x3FFF8000 # SEE IF 1+Z > 1
bge.b KISZERO
KISNEG1:
fmov.s TWO(%pc),%fp0
mov.l &0x3fff0000,F(%a6)
clr.l F+8(%a6)
fsub.x F(%a6),%fp0 # 2-F
mov.l FFRAC(%a6),%d1
and.l &0x7E000000,%d1
asr.l &8,%d1
asr.l &8,%d1
asr.l &4,%d1 # D0 CONTAINS DISPLACEMENT FOR 1/F
fadd.x %fp1,%fp1 # GET 2Z
fmovm.x &0xc,-(%sp) # SAVE FP2 {%fp2/%fp3}
fadd.x %fp1,%fp0 # FP0 IS Y-F = (2-F)+2Z
lea LOGTBL(%pc),%a0 # A0 IS ADDRESS OF 1/F
add.l %d1,%a0
fmov.s negone(%pc),%fp1 # FP1 IS K = -1
bra.w LP1CONT1
KISZERO:
fmov.s one(%pc),%fp0
mov.l &0x3fff0000,F(%a6)
clr.l F+8(%a6)
fsub.x F(%a6),%fp0 # 1-F
mov.l FFRAC(%a6),%d1
and.l &0x7E000000,%d1
asr.l &8,%d1
asr.l &8,%d1
asr.l &4,%d1
fadd.x %fp1,%fp0 # FP0 IS Y-F
fmovm.x &0xc,-(%sp) # FP2 SAVED {%fp2/%fp3}
lea LOGTBL(%pc),%a0
add.l %d1,%a0 # A0 IS ADDRESS OF 1/F
fmov.s zero(%pc),%fp1 # FP1 IS K = 0
bra.w LP1CONT1
LP1NEG0:
#--FPCR SAVED. D0 IS X IN COMPACT FORM.
cmp.l %d1,&0
blt.b LP1NEG
LP1ZERO:
fmov.s negone(%pc),%fp0
fmov.l %d0,%fpcr
bra t_dz
LP1NEG:
fmov.s zero(%pc),%fp0
fmov.l %d0,%fpcr
bra t_operr
global slognp1d
#--ENTRY POINT FOR LOG(1+Z) FOR DENORMALIZED INPUT
# Simply return the denorm
slognp1d:
bra t_extdnrm
#########################################################################
# satanh(): computes the inverse hyperbolic tangent of a norm input #
# satanhd(): computes the inverse hyperbolic tangent of a denorm input #
# #
# INPUT *************************************************************** #
# a0 = pointer to extended precision input #
# d0 = round precision,mode #
# #
# OUTPUT ************************************************************** #
# fp0 = arctanh(X) #
# #
# ACCURACY and MONOTONICITY ******************************************* #
# The returned result is within 3 ulps in 64 significant bit, #
# i.e. within 0.5001 ulp to 53 bits if the result is subsequently #
# rounded to double precision. The result is provably monotonic #
# in double precision. #
# #
# ALGORITHM *********************************************************** #
# #
# ATANH #
# 1. If |X| >= 1, go to 3. #
# #
# 2. (|X| < 1) Calculate atanh(X) by #
# sgn := sign(X) #
# y := |X| #
# z := 2y/(1-y) #
# atanh(X) := sgn * (1/2) * logp1(z) #
# Exit. #
# #
# 3. If |X| > 1, go to 5. #
# #
# 4. (|X| = 1) Generate infinity with an appropriate sign and #
# divide-by-zero by #
# sgn := sign(X) #
# atan(X) := sgn / (+0). #
# Exit. #
# #
# 5. (|X| > 1) Generate an invalid operation by 0 * infinity. #
# Exit. #
# #
#########################################################################
global satanh
satanh:
mov.l (%a0),%d1
mov.w 4(%a0),%d1
and.l &0x7FFFFFFF,%d1
cmp.l %d1,&0x3FFF8000
bge.b ATANHBIG
#--THIS IS THE USUAL CASE, |X| < 1
#--Y = |X|, Z = 2Y/(1-Y), ATANH(X) = SIGN(X) * (1/2) * LOG1P(Z).
fabs.x (%a0),%fp0 # Y = |X|
fmov.x %fp0,%fp1
fneg.x %fp1 # -Y
fadd.x %fp0,%fp0 # 2Y
fadd.s &0x3F800000,%fp1 # 1-Y
fdiv.x %fp1,%fp0 # 2Y/(1-Y)
mov.l (%a0),%d1
and.l &0x80000000,%d1
or.l &0x3F000000,%d1 # SIGN(X)*HALF
mov.l %d1,-(%sp)
mov.l %d0,-(%sp) # save rnd prec,mode
clr.l %d0 # pass ext prec,RN
fmovm.x &0x01,-(%sp) # save Z on stack
lea (%sp),%a0 # pass ptr to Z
bsr slognp1 # LOG1P(Z)
add.l &0xc,%sp # clear Z from stack
mov.l (%sp)+,%d0 # fetch old prec,mode
fmov.l %d0,%fpcr # load it
mov.b &FMUL_OP,%d1 # last inst is MUL
fmul.s (%sp)+,%fp0
bra t_catch
ATANHBIG:
fabs.x (%a0),%fp0 # |X|
fcmp.s %fp0,&0x3F800000
fbgt t_operr
bra t_dz
global satanhd
#--ATANH(X) = X FOR DENORMALIZED X
satanhd:
bra t_extdnrm
#########################################################################
# slog10(): computes the base-10 logarithm of a normalized input #
# slog10d(): computes the base-10 logarithm of a denormalized input #
# slog2(): computes the base-2 logarithm of a normalized input #
# slog2d(): computes the base-2 logarithm of a denormalized input #
# #
# INPUT *************************************************************** #
# a0 = pointer to extended precision input #
# d0 = round precision,mode #
# #
# OUTPUT ************************************************************** #
# fp0 = log_10(X) or log_2(X) #
# #
# ACCURACY and MONOTONICITY ******************************************* #
# The returned result is within 1.7 ulps in 64 significant bit, #
# i.e. within 0.5003 ulp to 53 bits if the result is subsequently #
# rounded to double precision. The result is provably monotonic #
# in double precision. #
# #
# ALGORITHM *********************************************************** #
# #
# slog10d: #
# #
# Step 0. If X < 0, create a NaN and raise the invalid operation #
# flag. Otherwise, save FPCR in D1; set FpCR to default. #
# Notes: Default means round-to-nearest mode, no floating-point #
# traps, and precision control = double extended. #
# #
# Step 1. Call slognd to obtain Y = log(X), the natural log of X. #
# Notes: Even if X is denormalized, log(X) is always normalized. #
# #
# Step 2. Compute log_10(X) = log(X) * (1/log(10)). #
# 2.1 Restore the user FPCR #
# 2.2 Return ans := Y * INV_L10. #
# #
# slog10: #
# #
# Step 0. If X < 0, create a NaN and raise the invalid operation #
# flag. Otherwise, save FPCR in D1; set FpCR to default. #
# Notes: Default means round-to-nearest mode, no floating-point #
# traps, and precision control = double extended. #
# #
# Step 1. Call sLogN to obtain Y = log(X), the natural log of X. #
# #
# Step 2. Compute log_10(X) = log(X) * (1/log(10)). #
# 2.1 Restore the user FPCR #
# 2.2 Return ans := Y * INV_L10. #
# #
# sLog2d: #
# #
# Step 0. If X < 0, create a NaN and raise the invalid operation #
# flag. Otherwise, save FPCR in D1; set FpCR to default. #
# Notes: Default means round-to-nearest mode, no floating-point #
# traps, and precision control = double extended. #
# #
# Step 1. Call slognd to obtain Y = log(X), the natural log of X. #
# Notes: Even if X is denormalized, log(X) is always normalized. #
# #
# Step 2. Compute log_10(X) = log(X) * (1/log(2)). #
# 2.1 Restore the user FPCR #
# 2.2 Return ans := Y * INV_L2. #
# #
# sLog2: #
# #
# Step 0. If X < 0, create a NaN and raise the invalid operation #
# flag. Otherwise, save FPCR in D1; set FpCR to default. #
# Notes: Default means round-to-nearest mode, no floating-point #
# traps, and precision control = double extended. #
# #
# Step 1. If X is not an integer power of two, i.e., X != 2^k, #
# go to Step 3. #
# #
# Step 2. Return k. #
# 2.1 Get integer k, X = 2^k. #
# 2.2 Restore the user FPCR. #
# 2.3 Return ans := convert-to-double-extended(k). #
# #
# Step 3. Call sLogN to obtain Y = log(X), the natural log of X. #
# #
# Step 4. Compute log_2(X) = log(X) * (1/log(2)). #
# 4.1 Restore the user FPCR #
# 4.2 Return ans := Y * INV_L2. #
# #
#########################################################################
INV_L10:
long 0x3FFD0000,0xDE5BD8A9,0x37287195,0x00000000
INV_L2:
long 0x3FFF0000,0xB8AA3B29,0x5C17F0BC,0x00000000
global slog10
#--entry point for Log10(X), X is normalized
slog10:
fmov.b &0x1,%fp0
fcmp.x %fp0,(%a0) # if operand == 1,
fbeq.l ld_pzero # return an EXACT zero
mov.l (%a0),%d1
blt.w invalid
mov.l %d0,-(%sp)
clr.l %d0
bsr slogn # log(X), X normal.
fmov.l (%sp)+,%fpcr
fmul.x INV_L10(%pc),%fp0
bra t_inx2
global slog10d
#--entry point for Log10(X), X is denormalized
slog10d:
mov.l (%a0),%d1
blt.w invalid
mov.l %d0,-(%sp)
clr.l %d0
bsr slognd # log(X), X denorm.
fmov.l (%sp)+,%fpcr
fmul.x INV_L10(%pc),%fp0
bra t_minx2
global slog2
#--entry point for Log2(X), X is normalized
slog2:
mov.l (%a0),%d1
blt.w invalid
mov.l 8(%a0),%d1
bne.b continue # X is not 2^k
mov.l 4(%a0),%d1
and.l &0x7FFFFFFF,%d1
bne.b continue
#--X = 2^k.
mov.w (%a0),%d1
and.l &0x00007FFF,%d1
sub.l &0x3FFF,%d1
beq.l ld_pzero
fmov.l %d0,%fpcr
fmov.l %d1,%fp0
bra t_inx2
continue:
mov.l %d0,-(%sp)
clr.l %d0
bsr slogn # log(X), X normal.
fmov.l (%sp)+,%fpcr
fmul.x INV_L2(%pc),%fp0
bra t_inx2
invalid:
bra t_operr
global slog2d
#--entry point for Log2(X), X is denormalized
slog2d:
mov.l (%a0),%d1
blt.w invalid
mov.l %d0,-(%sp)
clr.l %d0
bsr slognd # log(X), X denorm.
fmov.l (%sp)+,%fpcr
fmul.x INV_L2(%pc),%fp0
bra t_minx2
#########################################################################
# stwotox(): computes 2**X for a normalized input #
# stwotoxd(): computes 2**X for a denormalized input #
# stentox(): computes 10**X for a normalized input #
# stentoxd(): computes 10**X for a denormalized input #
# #
# INPUT *************************************************************** #
# a0 = pointer to extended precision input #
# d0 = round precision,mode #
# #
# OUTPUT ************************************************************** #
# fp0 = 2**X or 10**X #
# #
# ACCURACY and MONOTONICITY ******************************************* #
# The returned result is within 2 ulps in 64 significant bit, #
# i.e. within 0.5001 ulp to 53 bits if the result is subsequently #
# rounded to double precision. The result is provably monotonic #
# in double precision. #
# #
# ALGORITHM *********************************************************** #
# #
# twotox #
# 1. If |X| > 16480, go to ExpBig. #
# #
# 2. If |X| < 2**(-70), go to ExpSm. #
# #
# 3. Decompose X as X = N/64 + r where |r| <= 1/128. Furthermore #
# decompose N as #
# N = 64(M + M') + j, j = 0,1,2,...,63. #
# #
# 4. Overwrite r := r * log2. Then #
# 2**X = 2**(M') * 2**(M) * 2**(j/64) * exp(r). #
# Go to expr to compute that expression. #
# #
# tentox #
# 1. If |X| > 16480*log_10(2) (base 10 log of 2), go to ExpBig. #
# #
# 2. If |X| < 2**(-70), go to ExpSm. #
# #
# 3. Set y := X*log_2(10)*64 (base 2 log of 10). Set #
# N := round-to-int(y). Decompose N as #
# N = 64(M + M') + j, j = 0,1,2,...,63. #
# #
# 4. Define r as #
# r := ((X - N*L1)-N*L2) * L10 #
# where L1, L2 are the leading and trailing parts of #
# log_10(2)/64 and L10 is the natural log of 10. Then #
# 10**X = 2**(M') * 2**(M) * 2**(j/64) * exp(r). #
# Go to expr to compute that expression. #
# #
# expr #
# 1. Fetch 2**(j/64) from table as Fact1 and Fact2. #
# #
# 2. Overwrite Fact1 and Fact2 by #
# Fact1 := 2**(M) * Fact1 #
# Fact2 := 2**(M) * Fact2 #
# Thus Fact1 + Fact2 = 2**(M) * 2**(j/64). #
# #
# 3. Calculate P where 1 + P approximates exp(r): #
# P = r + r*r*(A1+r*(A2+...+r*A5)). #
# #
# 4. Let AdjFact := 2**(M'). Return #
# AdjFact * ( Fact1 + ((Fact1*P) + Fact2) ). #
# Exit. #
# #
# ExpBig #
# 1. Generate overflow by Huge * Huge if X > 0; otherwise, #
# generate underflow by Tiny * Tiny. #
# #
# ExpSm #
# 1. Return 1 + X. #
# #
#########################################################################
L2TEN64:
long 0x406A934F,0x0979A371 # 64LOG10/LOG2
L10TWO1:
long 0x3F734413,0x509F8000 # LOG2/64LOG10
L10TWO2:
long 0xBFCD0000,0xC0219DC1,0xDA994FD2,0x00000000
LOG10: long 0x40000000,0x935D8DDD,0xAAA8AC17,0x00000000
LOG2: long 0x3FFE0000,0xB17217F7,0xD1CF79AC,0x00000000
EXPA5: long 0x3F56C16D,0x6F7BD0B2
EXPA4: long 0x3F811112,0x302C712C
EXPA3: long 0x3FA55555,0x55554CC1
EXPA2: long 0x3FC55555,0x55554A54
EXPA1: long 0x3FE00000,0x00000000,0x00000000,0x00000000
TEXPTBL:
long 0x3FFF0000,0x80000000,0x00000000,0x3F738000
long 0x3FFF0000,0x8164D1F3,0xBC030773,0x3FBEF7CA
long 0x3FFF0000,0x82CD8698,0xAC2BA1D7,0x3FBDF8A9
long 0x3FFF0000,0x843A28C3,0xACDE4046,0x3FBCD7C9
long 0x3FFF0000,0x85AAC367,0xCC487B15,0xBFBDE8DA
long 0x3FFF0000,0x871F6196,0x9E8D1010,0x3FBDE85C
long 0x3FFF0000,0x88980E80,0x92DA8527,0x3FBEBBF1
long 0x3FFF0000,0x8A14D575,0x496EFD9A,0x3FBB80CA
long 0x3FFF0000,0x8B95C1E3,0xEA8BD6E7,0xBFBA8373
long 0x3FFF0000,0x8D1ADF5B,0x7E5BA9E6,0xBFBE9670
long 0x3FFF0000,0x8EA4398B,0x45CD53C0,0x3FBDB700
long 0x3FFF0000,0x9031DC43,0x1466B1DC,0x3FBEEEB0
long 0x3FFF0000,0x91C3D373,0xAB11C336,0x3FBBFD6D
long 0x3FFF0000,0x935A2B2F,0x13E6E92C,0xBFBDB319
long 0x3FFF0000,0x94F4EFA8,0xFEF70961,0x3FBDBA2B
long 0x3FFF0000,0x96942D37,0x20185A00,0x3FBE91D5
long 0x3FFF0000,0x9837F051,0x8DB8A96F,0x3FBE8D5A
long 0x3FFF0000,0x99E04593,0x20B7FA65,0xBFBCDE7B
long 0x3FFF0000,0x9B8D39B9,0xD54E5539,0xBFBEBAAF
long 0x3FFF0000,0x9D3ED9A7,0x2CFFB751,0xBFBD86DA
long 0x3FFF0000,0x9EF53260,0x91A111AE,0xBFBEBEDD
long 0x3FFF0000,0xA0B0510F,0xB9714FC2,0x3FBCC96E
long 0x3FFF0000,0xA2704303,0x0C496819,0xBFBEC90B
long 0x3FFF0000,0xA43515AE,0x09E6809E,0x3FBBD1DB
long 0x3FFF0000,0xA5FED6A9,0xB15138EA,0x3FBCE5EB
long 0x3FFF0000,0xA7CD93B4,0xE965356A,0xBFBEC274
long 0x3FFF0000,0xA9A15AB4,0xEA7C0EF8,0x3FBEA83C
long 0x3FFF0000,0xAB7A39B5,0xA93ED337,0x3FBECB00
long 0x3FFF0000,0xAD583EEA,0x42A14AC6,0x3FBE9301
long 0x3FFF0000,0xAF3B78AD,0x690A4375,0xBFBD8367
long 0x3FFF0000,0xB123F581,0xD2AC2590,0xBFBEF05F
long 0x3FFF0000,0xB311C412,0xA9112489,0x3FBDFB3C
long 0x3FFF0000,0xB504F333,0xF9DE6484,0x3FBEB2FB
long 0x3FFF0000,0xB6FD91E3,0x28D17791,0x3FBAE2CB
long 0x3FFF0000,0xB8FBAF47,0x62FB9EE9,0x3FBCDC3C
long 0x3FFF0000,0xBAFF5AB2,0x133E45FB,0x3FBEE9AA
long 0x3FFF0000,0xBD08A39F,0x580C36BF,0xBFBEAEFD
long 0x3FFF0000,0xBF1799B6,0x7A731083,0xBFBCBF51
long 0x3FFF0000,0xC12C4CCA,0x66709456,0x3FBEF88A
long 0x3FFF0000,0xC346CCDA,0x24976407,0x3FBD83B2
long 0x3FFF0000,0xC5672A11,0x5506DADD,0x3FBDF8AB
long 0x3FFF0000,0xC78D74C8,0xABB9B15D,0xBFBDFB17
long 0x3FFF0000,0xC9B9BD86,0x6E2F27A3,0xBFBEFE3C
long 0x3FFF0000,0xCBEC14FE,0xF2727C5D,0xBFBBB6F8
long 0x3FFF0000,0xCE248C15,0x1F8480E4,0xBFBCEE53
long 0x3FFF0000,0xD06333DA,0xEF2B2595,0xBFBDA4AE
long 0x3FFF0000,0xD2A81D91,0xF12AE45A,0x3FBC9124
long 0x3FFF0000,0xD4F35AAB,0xCFEDFA1F,0x3FBEB243
long 0x3FFF0000,0xD744FCCA,0xD69D6AF4,0x3FBDE69A
long 0x3FFF0000,0xD99D15C2,0x78AFD7B6,0xBFB8BC61
long 0x3FFF0000,0xDBFBB797,0xDAF23755,0x3FBDF610
long 0x3FFF0000,0xDE60F482,0x5E0E9124,0xBFBD8BE1
long 0x3FFF0000,0xE0CCDEEC,0x2A94E111,0x3FBACB12
long 0x3FFF0000,0xE33F8972,0xBE8A5A51,0x3FBB9BFE
long 0x3FFF0000,0xE5B906E7,0x7C8348A8,0x3FBCF2F4
long 0x3FFF0000,0xE8396A50,0x3C4BDC68,0x3FBEF22F
long 0x3FFF0000,0xEAC0C6E7,0xDD24392F,0xBFBDBF4A
long 0x3FFF0000,0xED4F301E,0xD9942B84,0x3FBEC01A
long 0x3FFF0000,0xEFE4B99B,0xDCDAF5CB,0x3FBE8CAC
long 0x3FFF0000,0xF281773C,0x59FFB13A,0xBFBCBB3F
long 0x3FFF0000,0xF5257D15,0x2486CC2C,0x3FBEF73A
long 0x3FFF0000,0xF7D0DF73,0x0AD13BB9,0xBFB8B795
long 0x3FFF0000,0xFA83B2DB,0x722A033A,0x3FBEF84B
long 0x3FFF0000,0xFD3E0C0C,0xF486C175,0xBFBEF581
set INT,L_SCR1
set X,FP_SCR0
set XDCARE,X+2
set XFRAC,X+4
set ADJFACT,FP_SCR0
set FACT1,FP_SCR0
set FACT1HI,FACT1+4
set FACT1LOW,FACT1+8
set FACT2,FP_SCR1
set FACT2HI,FACT2+4
set FACT2LOW,FACT2+8
global stwotox
#--ENTRY POINT FOR 2**(X), HERE X IS FINITE, NON-ZERO, AND NOT NAN'S
stwotox:
fmovm.x (%a0),&0x80 # LOAD INPUT
mov.l (%a0),%d1
mov.w 4(%a0),%d1
fmov.x %fp0,X(%a6)
and.l &0x7FFFFFFF,%d1
cmp.l %d1,&0x3FB98000 # |X| >= 2**(-70)?
bge.b TWOOK1
bra.w EXPBORS
TWOOK1:
cmp.l %d1,&0x400D80C0 # |X| > 16480?
ble.b TWOMAIN
bra.w EXPBORS
TWOMAIN:
#--USUAL CASE, 2^(-70) <= |X| <= 16480
fmov.x %fp0,%fp1
fmul.s &0x42800000,%fp1 # 64 * X
fmov.l %fp1,INT(%a6) # N = ROUND-TO-INT(64 X)
mov.l %d2,-(%sp)
lea TEXPTBL(%pc),%a1 # LOAD ADDRESS OF TABLE OF 2^(J/64)
fmov.l INT(%a6),%fp1 # N --> FLOATING FMT
mov.l INT(%a6),%d1
mov.l %d1,%d2
and.l &0x3F,%d1 # D0 IS J
asl.l &4,%d1 # DISPLACEMENT FOR 2^(J/64)
add.l %d1,%a1 # ADDRESS FOR 2^(J/64)
asr.l &6,%d2 # d2 IS L, N = 64L + J
mov.l %d2,%d1
asr.l &1,%d1 # D0 IS M
sub.l %d1,%d2 # d2 IS M', N = 64(M+M') + J
add.l &0x3FFF,%d2
#--SUMMARY: a1 IS ADDRESS FOR THE LEADING PORTION OF 2^(J/64),
#--D0 IS M WHERE N = 64(M+M') + J. NOTE THAT |M| <= 16140 BY DESIGN.
#--ADJFACT = 2^(M').
#--REGISTERS SAVED SO FAR ARE (IN ORDER) FPCR, D0, FP1, a1, AND FP2.
fmovm.x &0x0c,-(%sp) # save fp2/fp3
fmul.s &0x3C800000,%fp1 # (1/64)*N
mov.l (%a1)+,FACT1(%a6)
mov.l (%a1)+,FACT1HI(%a6)
mov.l (%a1)+,FACT1LOW(%a6)
mov.w (%a1)+,FACT2(%a6)
fsub.x %fp1,%fp0 # X - (1/64)*INT(64 X)
mov.w (%a1)+,FACT2HI(%a6)
clr.w FACT2HI+2(%a6)
clr.l FACT2LOW(%a6)
add.w %d1,FACT1(%a6)
fmul.x LOG2(%pc),%fp0 # FP0 IS R
add.w %d1,FACT2(%a6)
bra.w expr
EXPBORS:
#--FPCR, D0 SAVED
cmp.l %d1,&0x3FFF8000
bgt.b TEXPBIG
#--|X| IS SMALL, RETURN 1 + X
fmov.l %d0,%fpcr # restore users round prec,mode
fadd.s &0x3F800000,%fp0 # RETURN 1 + X
bra t_pinx2
TEXPBIG:
#--|X| IS LARGE, GENERATE OVERFLOW IF X > 0; ELSE GENERATE UNDERFLOW
#--REGISTERS SAVE SO FAR ARE FPCR AND D0
mov.l X(%a6),%d1
cmp.l %d1,&0
blt.b EXPNEG
bra t_ovfl2 # t_ovfl expects positive value
EXPNEG:
bra t_unfl2 # t_unfl expects positive value
global stwotoxd
stwotoxd:
#--ENTRY POINT FOR 2**(X) FOR DENORMALIZED ARGUMENT
fmov.l %d0,%fpcr # set user's rounding mode/precision
fmov.s &0x3F800000,%fp0 # RETURN 1 + X
mov.l (%a0),%d1
or.l &0x00800001,%d1
fadd.s %d1,%fp0
bra t_pinx2
global stentox
#--ENTRY POINT FOR 10**(X), HERE X IS FINITE, NON-ZERO, AND NOT NAN'S
stentox:
fmovm.x (%a0),&0x80 # LOAD INPUT
mov.l (%a0),%d1
mov.w 4(%a0),%d1
fmov.x %fp0,X(%a6)
and.l &0x7FFFFFFF,%d1
cmp.l %d1,&0x3FB98000 # |X| >= 2**(-70)?
bge.b TENOK1
bra.w EXPBORS
TENOK1:
cmp.l %d1,&0x400B9B07 # |X| <= 16480*log2/log10 ?
ble.b TENMAIN
bra.w EXPBORS
TENMAIN:
#--USUAL CASE, 2^(-70) <= |X| <= 16480 LOG 2 / LOG 10
fmov.x %fp0,%fp1
fmul.d L2TEN64(%pc),%fp1 # X*64*LOG10/LOG2
fmov.l %fp1,INT(%a6) # N=INT(X*64*LOG10/LOG2)
mov.l %d2,-(%sp)
lea TEXPTBL(%pc),%a1 # LOAD ADDRESS OF TABLE OF 2^(J/64)
fmov.l INT(%a6),%fp1 # N --> FLOATING FMT
mov.l INT(%a6),%d1
mov.l %d1,%d2
and.l &0x3F,%d1 # D0 IS J
asl.l &4,%d1 # DISPLACEMENT FOR 2^(J/64)
add.l %d1,%a1 # ADDRESS FOR 2^(J/64)
asr.l &6,%d2 # d2 IS L, N = 64L + J
mov.l %d2,%d1
asr.l &1,%d1 # D0 IS M
sub.l %d1,%d2 # d2 IS M', N = 64(M+M') + J
add.l &0x3FFF,%d2
#--SUMMARY: a1 IS ADDRESS FOR THE LEADING PORTION OF 2^(J/64),
#--D0 IS M WHERE N = 64(M+M') + J. NOTE THAT |M| <= 16140 BY DESIGN.
#--ADJFACT = 2^(M').
#--REGISTERS SAVED SO FAR ARE (IN ORDER) FPCR, D0, FP1, a1, AND FP2.
fmovm.x &0x0c,-(%sp) # save fp2/fp3
fmov.x %fp1,%fp2
fmul.d L10TWO1(%pc),%fp1 # N*(LOG2/64LOG10)_LEAD
mov.l (%a1)+,FACT1(%a6)
fmul.x L10TWO2(%pc),%fp2 # N*(LOG2/64LOG10)_TRAIL
mov.l (%a1)+,FACT1HI(%a6)
mov.l (%a1)+,FACT1LOW(%a6)
fsub.x %fp1,%fp0 # X - N L_LEAD
mov.w (%a1)+,FACT2(%a6)
fsub.x %fp2,%fp0 # X - N L_TRAIL
mov.w (%a1)+,FACT2HI(%a6)
clr.w FACT2HI+2(%a6)
clr.l FACT2LOW(%a6)
fmul.x LOG10(%pc),%fp0 # FP0 IS R
add.w %d1,FACT1(%a6)
add.w %d1,FACT2(%a6)
expr:
#--FPCR, FP2, FP3 ARE SAVED IN ORDER AS SHOWN.
#--ADJFACT CONTAINS 2**(M'), FACT1 + FACT2 = 2**(M) * 2**(J/64).
#--FP0 IS R. THE FOLLOWING CODE COMPUTES
#-- 2**(M'+M) * 2**(J/64) * EXP(R)
fmov.x %fp0,%fp1
fmul.x %fp1,%fp1 # FP1 IS S = R*R
fmov.d EXPA5(%pc),%fp2 # FP2 IS A5
fmov.d EXPA4(%pc),%fp3 # FP3 IS A4
fmul.x %fp1,%fp2 # FP2 IS S*A5
fmul.x %fp1,%fp3 # FP3 IS S*A4
fadd.d EXPA3(%pc),%fp2 # FP2 IS A3+S*A5
fadd.d EXPA2(%pc),%fp3 # FP3 IS A2+S*A4
fmul.x %fp1,%fp2 # FP2 IS S*(A3+S*A5)
fmul.x %fp1,%fp3 # FP3 IS S*(A2+S*A4)
fadd.d EXPA1(%pc),%fp2 # FP2 IS A1+S*(A3+S*A5)
fmul.x %fp0,%fp3 # FP3 IS R*S*(A2+S*A4)
fmul.x %fp1,%fp2 # FP2 IS S*(A1+S*(A3+S*A5))
fadd.x %fp3,%fp0 # FP0 IS R+R*S*(A2+S*A4)
fadd.x %fp2,%fp0 # FP0 IS EXP(R) - 1
fmovm.x (%sp)+,&0x30 # restore fp2/fp3
#--FINAL RECONSTRUCTION PROCESS
#--EXP(X) = 2^M*2^(J/64) + 2^M*2^(J/64)*(EXP(R)-1) - (1 OR 0)
fmul.x FACT1(%a6),%fp0
fadd.x FACT2(%a6),%fp0
fadd.x FACT1(%a6),%fp0
fmov.l %d0,%fpcr # restore users round prec,mode
mov.w %d2,ADJFACT(%a6) # INSERT EXPONENT
mov.l (%sp)+,%d2
mov.l &0x80000000,ADJFACT+4(%a6)
clr.l ADJFACT+8(%a6)
mov.b &FMUL_OP,%d1 # last inst is MUL
fmul.x ADJFACT(%a6),%fp0 # FINAL ADJUSTMENT
bra t_catch
global stentoxd
stentoxd:
#--ENTRY POINT FOR 10**(X) FOR DENORMALIZED ARGUMENT
fmov.l %d0,%fpcr # set user's rounding mode/precision
fmov.s &0x3F800000,%fp0 # RETURN 1 + X
mov.l (%a0),%d1
or.l &0x00800001,%d1
fadd.s %d1,%fp0
bra t_pinx2
#########################################################################
# smovcr(): returns the ROM constant at the offset specified in d1 #
# rounded to the mode and precision specified in d0. #
# #
# INPUT *************************************************************** #
# d0 = rnd prec,mode #
# d1 = ROM offset #
# #
# OUTPUT ************************************************************** #
# fp0 = the ROM constant rounded to the user's rounding mode,prec #
# #
#########################################################################
global smovcr
smovcr:
mov.l %d1,-(%sp) # save rom offset for a sec
lsr.b &0x4,%d0 # shift ctrl bits to lo
mov.l %d0,%d1 # make a copy
andi.w &0x3,%d1 # extract rnd mode
andi.w &0xc,%d0 # extract rnd prec
swap %d0 # put rnd prec in hi
mov.w %d1,%d0 # put rnd mode in lo
mov.l (%sp)+,%d1 # get rom offset
#
# check range of offset
#
tst.b %d1 # if zero, offset is to pi
beq.b pi_tbl # it is pi
cmpi.b %d1,&0x0a # check range $01 - $0a
ble.b z_val # if in this range, return zero
cmpi.b %d1,&0x0e # check range $0b - $0e
ble.b sm_tbl # valid constants in this range
cmpi.b %d1,&0x2f # check range $10 - $2f
ble.b z_val # if in this range, return zero
cmpi.b %d1,&0x3f # check range $30 - $3f
ble.b bg_tbl # valid constants in this range
z_val:
bra.l ld_pzero # return a zero
#
# the answer is PI rounded to the proper precision.
#
# fetch a pointer to the answer table relating to the proper rounding
# precision.
#
pi_tbl:
tst.b %d0 # is rmode RN?
bne.b pi_not_rn # no
pi_rn:
lea.l PIRN(%pc),%a0 # yes; load PI RN table addr
bra.w set_finx
pi_not_rn:
cmpi.b %d0,&rp_mode # is rmode RP?
beq.b pi_rp # yes
pi_rzrm:
lea.l PIRZRM(%pc),%a0 # no; load PI RZ,RM table addr
bra.b set_finx
pi_rp:
lea.l PIRP(%pc),%a0 # load PI RP table addr
bra.b set_finx
#
# the answer is one of:
# $0B log10(2) (inexact)
# $0C e (inexact)
# $0D log2(e) (inexact)
# $0E log10(e) (exact)
#
# fetch a pointer to the answer table relating to the proper rounding
# precision.
#
sm_tbl:
subi.b &0xb,%d1 # make offset in 0-4 range
tst.b %d0 # is rmode RN?
bne.b sm_not_rn # no
sm_rn:
lea.l SMALRN(%pc),%a0 # yes; load RN table addr
sm_tbl_cont:
cmpi.b %d1,&0x2 # is result log10(e)?
ble.b set_finx # no; answer is inexact
bra.b no_finx # yes; answer is exact
sm_not_rn:
cmpi.b %d0,&rp_mode # is rmode RP?
beq.b sm_rp # yes
sm_rzrm:
lea.l SMALRZRM(%pc),%a0 # no; load RZ,RM table addr
bra.b sm_tbl_cont
sm_rp:
lea.l SMALRP(%pc),%a0 # load RP table addr
bra.b sm_tbl_cont
#
# the answer is one of:
# $30 ln(2) (inexact)
# $31 ln(10) (inexact)
# $32 10^0 (exact)
# $33 10^1 (exact)
# $34 10^2 (exact)
# $35 10^4 (exact)
# $36 10^8 (exact)
# $37 10^16 (exact)
# $38 10^32 (inexact)
# $39 10^64 (inexact)
# $3A 10^128 (inexact)
# $3B 10^256 (inexact)
# $3C 10^512 (inexact)
# $3D 10^1024 (inexact)
# $3E 10^2048 (inexact)
# $3F 10^4096 (inexact)
#
# fetch a pointer to the answer table relating to the proper rounding
# precision.
#
bg_tbl:
subi.b &0x30,%d1 # make offset in 0-f range
tst.b %d0 # is rmode RN?
bne.b bg_not_rn # no
bg_rn:
lea.l BIGRN(%pc),%a0 # yes; load RN table addr
bg_tbl_cont:
cmpi.b %d1,&0x1 # is offset <= $31?
ble.b set_finx # yes; answer is inexact
cmpi.b %d1,&0x7 # is $32 <= offset <= $37?
ble.b no_finx # yes; answer is exact
bra.b set_finx # no; answer is inexact
bg_not_rn:
cmpi.b %d0,&rp_mode # is rmode RP?
beq.b bg_rp # yes
bg_rzrm:
lea.l BIGRZRM(%pc),%a0 # no; load RZ,RM table addr
bra.b bg_tbl_cont
bg_rp:
lea.l BIGRP(%pc),%a0 # load RP table addr
bra.b bg_tbl_cont
# answer is inexact, so set INEX2 and AINEX in the user's FPSR.
set_finx:
ori.l &inx2a_mask,USER_FPSR(%a6) # set INEX2/AINEX
no_finx:
mulu.w &0xc,%d1 # offset points into tables
swap %d0 # put rnd prec in lo word
tst.b %d0 # is precision extended?
bne.b not_ext # if xprec, do not call round
# Precision is extended
fmovm.x (%a0,%d1.w),&0x80 # return result in fp0
rts
# Precision is single or double
not_ext:
swap %d0 # rnd prec in upper word
# call round() to round the answer to the proper precision.
# exponents out of range for single or double DO NOT cause underflow
# or overflow.
mov.w 0x0(%a0,%d1.w),FP_SCR1_EX(%a6) # load first word
mov.l 0x4(%a0,%d1.w),FP_SCR1_HI(%a6) # load second word
mov.l 0x8(%a0,%d1.w),FP_SCR1_LO(%a6) # load third word
mov.l %d0,%d1
clr.l %d0 # clear g,r,s
lea FP_SCR1(%a6),%a0 # pass ptr to answer
clr.w LOCAL_SGN(%a0) # sign always positive
bsr.l _round # round the mantissa
fmovm.x (%a0),&0x80 # return rounded result in fp0
rts
align 0x4
PIRN: long 0x40000000,0xc90fdaa2,0x2168c235 # pi
PIRZRM: long 0x40000000,0xc90fdaa2,0x2168c234 # pi
PIRP: long 0x40000000,0xc90fdaa2,0x2168c235 # pi
SMALRN: long 0x3ffd0000,0x9a209a84,0xfbcff798 # log10(2)
long 0x40000000,0xadf85458,0xa2bb4a9a # e
long 0x3fff0000,0xb8aa3b29,0x5c17f0bc # log2(e)
long 0x3ffd0000,0xde5bd8a9,0x37287195 # log10(e)
long 0x00000000,0x00000000,0x00000000 # 0.0
SMALRZRM:
long 0x3ffd0000,0x9a209a84,0xfbcff798 # log10(2)
long 0x40000000,0xadf85458,0xa2bb4a9a # e
long 0x3fff0000,0xb8aa3b29,0x5c17f0bb # log2(e)
long 0x3ffd0000,0xde5bd8a9,0x37287195 # log10(e)
long 0x00000000,0x00000000,0x00000000 # 0.0
SMALRP: long 0x3ffd0000,0x9a209a84,0xfbcff799 # log10(2)
long 0x40000000,0xadf85458,0xa2bb4a9b # e
long 0x3fff0000,0xb8aa3b29,0x5c17f0bc # log2(e)
long 0x3ffd0000,0xde5bd8a9,0x37287195 # log10(e)
long 0x00000000,0x00000000,0x00000000 # 0.0
BIGRN: long 0x3ffe0000,0xb17217f7,0xd1cf79ac # ln(2)
long 0x40000000,0x935d8ddd,0xaaa8ac17 # ln(10)
long 0x3fff0000,0x80000000,0x00000000 # 10 ^ 0
long 0x40020000,0xA0000000,0x00000000 # 10 ^ 1
long 0x40050000,0xC8000000,0x00000000 # 10 ^ 2
long 0x400C0000,0x9C400000,0x00000000 # 10 ^ 4
long 0x40190000,0xBEBC2000,0x00000000 # 10 ^ 8
long 0x40340000,0x8E1BC9BF,0x04000000 # 10 ^ 16
long 0x40690000,0x9DC5ADA8,0x2B70B59E # 10 ^ 32
long 0x40D30000,0xC2781F49,0xFFCFA6D5 # 10 ^ 64
long 0x41A80000,0x93BA47C9,0x80E98CE0 # 10 ^ 128
long 0x43510000,0xAA7EEBFB,0x9DF9DE8E # 10 ^ 256
long 0x46A30000,0xE319A0AE,0xA60E91C7 # 10 ^ 512
long 0x4D480000,0xC9767586,0x81750C17 # 10 ^ 1024
long 0x5A920000,0x9E8B3B5D,0xC53D5DE5 # 10 ^ 2048
long 0x75250000,0xC4605202,0x8A20979B # 10 ^ 4096
BIGRZRM:
long 0x3ffe0000,0xb17217f7,0xd1cf79ab # ln(2)
long 0x40000000,0x935d8ddd,0xaaa8ac16 # ln(10)
long 0x3fff0000,0x80000000,0x00000000 # 10 ^ 0
long 0x40020000,0xA0000000,0x00000000 # 10 ^ 1
long 0x40050000,0xC8000000,0x00000000 # 10 ^ 2
long 0x400C0000,0x9C400000,0x00000000 # 10 ^ 4
long 0x40190000,0xBEBC2000,0x00000000 # 10 ^ 8
long 0x40340000,0x8E1BC9BF,0x04000000 # 10 ^ 16
long 0x40690000,0x9DC5ADA8,0x2B70B59D # 10 ^ 32
long 0x40D30000,0xC2781F49,0xFFCFA6D5 # 10 ^ 64
long 0x41A80000,0x93BA47C9,0x80E98CDF # 10 ^ 128
long 0x43510000,0xAA7EEBFB,0x9DF9DE8D # 10 ^ 256
long 0x46A30000,0xE319A0AE,0xA60E91C6 # 10 ^ 512
long 0x4D480000,0xC9767586,0x81750C17 # 10 ^ 1024
long 0x5A920000,0x9E8B3B5D,0xC53D5DE4 # 10 ^ 2048
long 0x75250000,0xC4605202,0x8A20979A # 10 ^ 4096
BIGRP:
long 0x3ffe0000,0xb17217f7,0xd1cf79ac # ln(2)
long 0x40000000,0x935d8ddd,0xaaa8ac17 # ln(10)
long 0x3fff0000,0x80000000,0x00000000 # 10 ^ 0
long 0x40020000,0xA0000000,0x00000000 # 10 ^ 1
long 0x40050000,0xC8000000,0x00000000 # 10 ^ 2
long 0x400C0000,0x9C400000,0x00000000 # 10 ^ 4
long 0x40190000,0xBEBC2000,0x00000000 # 10 ^ 8
long 0x40340000,0x8E1BC9BF,0x04000000 # 10 ^ 16
long 0x40690000,0x9DC5ADA8,0x2B70B59E # 10 ^ 32
long 0x40D30000,0xC2781F49,0xFFCFA6D6 # 10 ^ 64
long 0x41A80000,0x93BA47C9,0x80E98CE0 # 10 ^ 128
long 0x43510000,0xAA7EEBFB,0x9DF9DE8E # 10 ^ 256
long 0x46A30000,0xE319A0AE,0xA60E91C7 # 10 ^ 512
long 0x4D480000,0xC9767586,0x81750C18 # 10 ^ 1024
long 0x5A920000,0x9E8B3B5D,0xC53D5DE5 # 10 ^ 2048
long 0x75250000,0xC4605202,0x8A20979B # 10 ^ 4096
#########################################################################
# sscale(): computes the destination operand scaled by the source #
# operand. If the absoulute value of the source operand is #
# >= 2^14, an overflow or underflow is returned. #
# #
# INPUT *************************************************************** #
# a0 = pointer to double-extended source operand X #
# a1 = pointer to double-extended destination operand Y #
# #
# OUTPUT ************************************************************** #
# fp0 = scale(X,Y) #
# #
#########################################################################
set SIGN, L_SCR1
global sscale
sscale:
mov.l %d0,-(%sp) # store off ctrl bits for now
mov.w DST_EX(%a1),%d1 # get dst exponent
smi.b SIGN(%a6) # use SIGN to hold dst sign
andi.l &0x00007fff,%d1 # strip sign from dst exp
mov.w SRC_EX(%a0),%d0 # check src bounds
andi.w &0x7fff,%d0 # clr src sign bit
cmpi.w %d0,&0x3fff # is src ~ ZERO?
blt.w src_small # yes
cmpi.w %d0,&0x400c # no; is src too big?
bgt.w src_out # yes
#
# Source is within 2^14 range.
#
src_ok:
fintrz.x SRC(%a0),%fp0 # calc int of src
fmov.l %fp0,%d0 # int src to d0
# don't want any accrued bits from the fintrz showing up later since
# we may need to read the fpsr for the last fp op in t_catch2().
fmov.l &0x0,%fpsr
tst.b DST_HI(%a1) # is dst denormalized?
bmi.b sok_norm
# the dst is a DENORM. normalize the DENORM and add the adjustment to
# the src value. then, jump to the norm part of the routine.
sok_dnrm:
mov.l %d0,-(%sp) # save src for now
mov.w DST_EX(%a1),FP_SCR0_EX(%a6) # make a copy
mov.l DST_HI(%a1),FP_SCR0_HI(%a6)
mov.l DST_LO(%a1),FP_SCR0_LO(%a6)
lea FP_SCR0(%a6),%a0 # pass ptr to DENORM
bsr.l norm # normalize the DENORM
neg.l %d0
add.l (%sp)+,%d0 # add adjustment to src
fmovm.x FP_SCR0(%a6),&0x80 # load normalized DENORM
cmpi.w %d0,&-0x3fff # is the shft amt really low?
bge.b sok_norm2 # thank goodness no
# the multiply factor that we're trying to create should be a denorm
# for the multiply to work. Therefore, we're going to actually do a
# multiply with a denorm which will cause an unimplemented data type
# exception to be put into the machine which will be caught and corrected
# later. we don't do this with the DENORMs above because this method
# is slower. but, don't fret, I don't see it being used much either.
fmov.l (%sp)+,%fpcr # restore user fpcr
mov.l &0x80000000,%d1 # load normalized mantissa
subi.l &-0x3fff,%d0 # how many should we shift?
neg.l %d0 # make it positive
cmpi.b %d0,&0x20 # is it > 32?
bge.b sok_dnrm_32 # yes
lsr.l %d0,%d1 # no; bit stays in upper lw
clr.l -(%sp) # insert zero low mantissa
mov.l %d1,-(%sp) # insert new high mantissa
clr.l -(%sp) # make zero exponent
bra.b sok_norm_cont
sok_dnrm_32:
subi.b &0x20,%d0 # get shift count
lsr.l %d0,%d1 # make low mantissa longword
mov.l %d1,-(%sp) # insert new low mantissa
clr.l -(%sp) # insert zero high mantissa
clr.l -(%sp) # make zero exponent
bra.b sok_norm_cont
# the src will force the dst to a DENORM value or worse. so, let's
# create an fp multiply that will create the result.
sok_norm:
fmovm.x DST(%a1),&0x80 # load fp0 with normalized src
sok_norm2:
fmov.l (%sp)+,%fpcr # restore user fpcr
addi.w &0x3fff,%d0 # turn src amt into exp value
swap %d0 # put exponent in high word
clr.l -(%sp) # insert new exponent
mov.l &0x80000000,-(%sp) # insert new high mantissa
mov.l %d0,-(%sp) # insert new lo mantissa
sok_norm_cont:
fmov.l %fpcr,%d0 # d0 needs fpcr for t_catch2
mov.b &FMUL_OP,%d1 # last inst is MUL
fmul.x (%sp)+,%fp0 # do the multiply
bra t_catch2 # catch any exceptions
#
# Source is outside of 2^14 range. Test the sign and branch
# to the appropriate exception handler.
#
src_out:
mov.l (%sp)+,%d0 # restore ctrl bits
exg %a0,%a1 # swap src,dst ptrs
tst.b SRC_EX(%a1) # is src negative?
bmi t_unfl # yes; underflow
bra t_ovfl_sc # no; overflow
#
# The source input is below 1, so we check for denormalized numbers
# and set unfl.
#
src_small:
tst.b DST_HI(%a1) # is dst denormalized?
bpl.b ssmall_done # yes
mov.l (%sp)+,%d0
fmov.l %d0,%fpcr # no; load control bits
mov.b &FMOV_OP,%d1 # last inst is MOVE
fmov.x DST(%a1),%fp0 # simply return dest
bra t_catch2
ssmall_done:
mov.l (%sp)+,%d0 # load control bits into d1
mov.l %a1,%a0 # pass ptr to dst
bra t_resdnrm
#########################################################################
# smod(): computes the fp MOD of the input values X,Y. #
# srem(): computes the fp (IEEE) REM of the input values X,Y. #
# #
# INPUT *************************************************************** #
# a0 = pointer to extended precision input X #
# a1 = pointer to extended precision input Y #
# d0 = round precision,mode #
# #
# The input operands X and Y can be either normalized or #
# denormalized. #
# #
# OUTPUT ************************************************************** #
# fp0 = FREM(X,Y) or FMOD(X,Y) #
# #
# ALGORITHM *********************************************************** #
# #
# Step 1. Save and strip signs of X and Y: signX := sign(X), #
# signY := sign(Y), X := |X|, Y := |Y|, #
# signQ := signX EOR signY. Record whether MOD or REM #
# is requested. #
# #
# Step 2. Set L := expo(X)-expo(Y), k := 0, Q := 0. #
# If (L < 0) then #
# R := X, go to Step 4. #
# else #
# R := 2^(-L)X, j := L. #
# endif #
# #
# Step 3. Perform MOD(X,Y) #
# 3.1 If R = Y, go to Step 9. #
# 3.2 If R > Y, then { R := R - Y, Q := Q + 1} #
# 3.3 If j = 0, go to Step 4. #
# 3.4 k := k + 1, j := j - 1, Q := 2Q, R := 2R. Go to #
# Step 3.1. #
# #
# Step 4. At this point, R = X - QY = MOD(X,Y). Set #
# Last_Subtract := false (used in Step 7 below). If #
# MOD is requested, go to Step 6. #
# #
# Step 5. R = MOD(X,Y), but REM(X,Y) is requested. #
# 5.1 If R < Y/2, then R = MOD(X,Y) = REM(X,Y). Go to #
# Step 6. #
# 5.2 If R > Y/2, then { set Last_Subtract := true, #
# Q := Q + 1, Y := signY*Y }. Go to Step 6. #
# 5.3 This is the tricky case of R = Y/2. If Q is odd, #
# then { Q := Q + 1, signX := -signX }. #
# #
# Step 6. R := signX*R. #
# #
# Step 7. If Last_Subtract = true, R := R - Y. #
# #
# Step 8. Return signQ, last 7 bits of Q, and R as required. #
# #
# Step 9. At this point, R = 2^(-j)*X - Q Y = Y. Thus, #
# X = 2^(j)*(Q+1)Y. set Q := 2^(j)*(Q+1), #
# R := 0. Return signQ, last 7 bits of Q, and R. #
# #
#########################################################################
set Mod_Flag,L_SCR3
set Sc_Flag,L_SCR3+1
set SignY,L_SCR2
set SignX,L_SCR2+2
set SignQ,L_SCR3+2
set Y,FP_SCR0
set Y_Hi,Y+4
set Y_Lo,Y+8
set R,FP_SCR1
set R_Hi,R+4
set R_Lo,R+8
Scale:
long 0x00010000,0x80000000,0x00000000,0x00000000
global smod
smod:
clr.b FPSR_QBYTE(%a6)
mov.l %d0,-(%sp) # save ctrl bits
clr.b Mod_Flag(%a6)
bra.b Mod_Rem
global srem
srem:
clr.b FPSR_QBYTE(%a6)
mov.l %d0,-(%sp) # save ctrl bits
mov.b &0x1,Mod_Flag(%a6)
Mod_Rem:
#..Save sign of X and Y
movm.l &0x3f00,-(%sp) # save data registers
mov.w SRC_EX(%a0),%d3
mov.w %d3,SignY(%a6)
and.l &0x00007FFF,%d3 # Y := |Y|
#
mov.l SRC_HI(%a0),%d4
mov.l SRC_LO(%a0),%d5 # (D3,D4,D5) is |Y|
tst.l %d3
bne.b Y_Normal
mov.l &0x00003FFE,%d3 # $3FFD + 1
tst.l %d4
bne.b HiY_not0
HiY_0:
mov.l %d5,%d4
clr.l %d5
sub.l &32,%d3
clr.l %d6
bfffo %d4{&0:&32},%d6
lsl.l %d6,%d4
sub.l %d6,%d3 # (D3,D4,D5) is normalized
# ...with bias $7FFD
bra.b Chk_X
HiY_not0:
clr.l %d6
bfffo %d4{&0:&32},%d6
sub.l %d6,%d3
lsl.l %d6,%d4
mov.l %d5,%d7 # a copy of D5
lsl.l %d6,%d5
neg.l %d6
add.l &32,%d6
lsr.l %d6,%d7
or.l %d7,%d4 # (D3,D4,D5) normalized
# ...with bias $7FFD
bra.b Chk_X
Y_Normal:
add.l &0x00003FFE,%d3 # (D3,D4,D5) normalized
# ...with bias $7FFD
Chk_X:
mov.w DST_EX(%a1),%d0
mov.w %d0,SignX(%a6)
mov.w SignY(%a6),%d1
eor.l %d0,%d1
and.l &0x00008000,%d1
mov.w %d1,SignQ(%a6) # sign(Q) obtained
and.l &0x00007FFF,%d0
mov.l DST_HI(%a1),%d1
mov.l DST_LO(%a1),%d2 # (D0,D1,D2) is |X|
tst.l %d0
bne.b X_Normal
mov.l &0x00003FFE,%d0
tst.l %d1
bne.b HiX_not0
HiX_0:
mov.l %d2,%d1
clr.l %d2
sub.l &32,%d0
clr.l %d6
bfffo %d1{&0:&32},%d6
lsl.l %d6,%d1
sub.l %d6,%d0 # (D0,D1,D2) is normalized
# ...with bias $7FFD
bra.b Init
HiX_not0:
clr.l %d6
bfffo %d1{&0:&32},%d6
sub.l %d6,%d0
lsl.l %d6,%d1
mov.l %d2,%d7 # a copy of D2
lsl.l %d6,%d2
neg.l %d6
add.l &32,%d6
lsr.l %d6,%d7
or.l %d7,%d1 # (D0,D1,D2) normalized
# ...with bias $7FFD
bra.b Init
X_Normal:
add.l &0x00003FFE,%d0 # (D0,D1,D2) normalized
# ...with bias $7FFD
Init:
#
mov.l %d3,L_SCR1(%a6) # save biased exp(Y)
mov.l %d0,-(%sp) # save biased exp(X)
sub.l %d3,%d0 # L := expo(X)-expo(Y)
clr.l %d6 # D6 := carry <- 0
clr.l %d3 # D3 is Q
mov.l &0,%a1 # A1 is k; j+k=L, Q=0
#..(Carry,D1,D2) is R
tst.l %d0
bge.b Mod_Loop_pre
#..expo(X) < expo(Y). Thus X = mod(X,Y)
#
mov.l (%sp)+,%d0 # restore d0
bra.w Get_Mod
Mod_Loop_pre:
addq.l &0x4,%sp # erase exp(X)
#..At this point R = 2^(-L)X; Q = 0; k = 0; and k+j = L
Mod_Loop:
tst.l %d6 # test carry bit
bgt.b R_GT_Y
#..At this point carry = 0, R = (D1,D2), Y = (D4,D5)
cmp.l %d1,%d4 # compare hi(R) and hi(Y)
bne.b R_NE_Y
cmp.l %d2,%d5 # compare lo(R) and lo(Y)
bne.b R_NE_Y
#..At this point, R = Y
bra.w Rem_is_0
R_NE_Y:
#..use the borrow of the previous compare
bcs.b R_LT_Y # borrow is set iff R < Y
R_GT_Y:
#..If Carry is set, then Y < (Carry,D1,D2) < 2Y. Otherwise, Carry = 0
#..and Y < (D1,D2) < 2Y. Either way, perform R - Y
sub.l %d5,%d2 # lo(R) - lo(Y)
subx.l %d4,%d1 # hi(R) - hi(Y)
clr.l %d6 # clear carry
addq.l &1,%d3 # Q := Q + 1
R_LT_Y:
#..At this point, Carry=0, R < Y. R = 2^(k-L)X - QY; k+j = L; j >= 0.
tst.l %d0 # see if j = 0.
beq.b PostLoop
add.l %d3,%d3 # Q := 2Q
add.l %d2,%d2 # lo(R) = 2lo(R)
roxl.l &1,%d1 # hi(R) = 2hi(R) + carry
scs %d6 # set Carry if 2(R) overflows
addq.l &1,%a1 # k := k+1
subq.l &1,%d0 # j := j - 1
#..At this point, R=(Carry,D1,D2) = 2^(k-L)X - QY, j+k=L, j >= 0, R < 2Y.
bra.b Mod_Loop
PostLoop:
#..k = L, j = 0, Carry = 0, R = (D1,D2) = X - QY, R < Y.
#..normalize R.
mov.l L_SCR1(%a6),%d0 # new biased expo of R
tst.l %d1
bne.b HiR_not0
HiR_0:
mov.l %d2,%d1
clr.l %d2
sub.l &32,%d0
clr.l %d6
bfffo %d1{&0:&32},%d6
lsl.l %d6,%d1
sub.l %d6,%d0 # (D0,D1,D2) is normalized
# ...with bias $7FFD
bra.b Get_Mod
HiR_not0:
clr.l %d6
bfffo %d1{&0:&32},%d6
bmi.b Get_Mod # already normalized
sub.l %d6,%d0
lsl.l %d6,%d1
mov.l %d2,%d7 # a copy of D2
lsl.l %d6,%d2
neg.l %d6
add.l &32,%d6
lsr.l %d6,%d7
or.l %d7,%d1 # (D0,D1,D2) normalized
#
Get_Mod:
cmp.l %d0,&0x000041FE
bge.b No_Scale
Do_Scale:
mov.w %d0,R(%a6)
mov.l %d1,R_Hi(%a6)
mov.l %d2,R_Lo(%a6)
mov.l L_SCR1(%a6),%d6
mov.w %d6,Y(%a6)
mov.l %d4,Y_Hi(%a6)
mov.l %d5,Y_Lo(%a6)
fmov.x R(%a6),%fp0 # no exception
mov.b &1,Sc_Flag(%a6)
bra.b ModOrRem
No_Scale:
mov.l %d1,R_Hi(%a6)
mov.l %d2,R_Lo(%a6)
sub.l &0x3FFE,%d0
mov.w %d0,R(%a6)
mov.l L_SCR1(%a6),%d6
sub.l &0x3FFE,%d6
mov.l %d6,L_SCR1(%a6)
fmov.x R(%a6),%fp0
mov.w %d6,Y(%a6)
mov.l %d4,Y_Hi(%a6)
mov.l %d5,Y_Lo(%a6)
clr.b Sc_Flag(%a6)
#
ModOrRem:
tst.b Mod_Flag(%a6)
beq.b Fix_Sign
mov.l L_SCR1(%a6),%d6 # new biased expo(Y)
subq.l &1,%d6 # biased expo(Y/2)
cmp.l %d0,%d6
blt.b Fix_Sign
bgt.b Last_Sub
cmp.l %d1,%d4
bne.b Not_EQ
cmp.l %d2,%d5
bne.b Not_EQ
bra.w Tie_Case
Not_EQ:
bcs.b Fix_Sign
Last_Sub:
#
fsub.x Y(%a6),%fp0 # no exceptions
addq.l &1,%d3 # Q := Q + 1
#
Fix_Sign:
#..Get sign of X
mov.w SignX(%a6),%d6
bge.b Get_Q
fneg.x %fp0
#..Get Q
#
Get_Q:
clr.l %d6
mov.w SignQ(%a6),%d6 # D6 is sign(Q)
mov.l &8,%d7
lsr.l %d7,%d6
and.l &0x0000007F,%d3 # 7 bits of Q
or.l %d6,%d3 # sign and bits of Q
# swap %d3
# fmov.l %fpsr,%d6
# and.l &0xFF00FFFF,%d6
# or.l %d3,%d6
# fmov.l %d6,%fpsr # put Q in fpsr
mov.b %d3,FPSR_QBYTE(%a6) # put Q in fpsr
#
Restore:
movm.l (%sp)+,&0xfc # {%d2-%d7}
mov.l (%sp)+,%d0
fmov.l %d0,%fpcr
tst.b Sc_Flag(%a6)
beq.b Finish
mov.b &FMUL_OP,%d1 # last inst is MUL
fmul.x Scale(%pc),%fp0 # may cause underflow
bra t_catch2
# the '040 package did this apparently to see if the dst operand for the
# preceding fmul was a denorm. but, it better not have been since the
# algorithm just got done playing with fp0 and expected no exceptions
# as a result. trust me...
# bra t_avoid_unsupp # check for denorm as a
# ;result of the scaling
Finish:
mov.b &FMOV_OP,%d1 # last inst is MOVE
fmov.x %fp0,%fp0 # capture exceptions & round
bra t_catch2
Rem_is_0:
#..R = 2^(-j)X - Q Y = Y, thus R = 0 and quotient = 2^j (Q+1)
addq.l &1,%d3
cmp.l %d0,&8 # D0 is j
bge.b Q_Big
lsl.l %d0,%d3
bra.b Set_R_0
Q_Big:
clr.l %d3
Set_R_0:
fmov.s &0x00000000,%fp0
clr.b Sc_Flag(%a6)
bra.w Fix_Sign
Tie_Case:
#..Check parity of Q
mov.l %d3,%d6
and.l &0x00000001,%d6
tst.l %d6
beq.w Fix_Sign # Q is even
#..Q is odd, Q := Q + 1, signX := -signX
addq.l &1,%d3
mov.w SignX(%a6),%d6
eor.l &0x00008000,%d6
mov.w %d6,SignX(%a6)
bra.w Fix_Sign
qnan: long 0x7fff0000, 0xffffffff, 0xffffffff
#########################################################################
# XDEF **************************************************************** #
# t_dz(): Handle DZ exception during transcendental emulation. #
# Sets N bit according to sign of source operand. #
# t_dz2(): Handle DZ exception during transcendental emulation. #
# Sets N bit always. #
# #
# XREF **************************************************************** #
# None #
# #
# INPUT *************************************************************** #
# a0 = pointer to source operand #
# #
# OUTPUT ************************************************************** #
# fp0 = default result #
# #
# ALGORITHM *********************************************************** #
# - Store properly signed INF into fp0. #
# - Set FPSR exception status dz bit, ccode inf bit, and #
# accrued dz bit. #
# #
#########################################################################
global t_dz
t_dz:
tst.b SRC_EX(%a0) # no; is src negative?
bmi.b t_dz2 # yes
dz_pinf:
fmov.s &0x7f800000,%fp0 # return +INF in fp0
ori.l &dzinf_mask,USER_FPSR(%a6) # set I/DZ/ADZ
rts
global t_dz2
t_dz2:
fmov.s &0xff800000,%fp0 # return -INF in fp0
ori.l &dzinf_mask+neg_mask,USER_FPSR(%a6) # set N/I/DZ/ADZ
rts
#################################################################
# OPERR exception: #
# - set FPSR exception status operr bit, condition code #
# nan bit; Store default NAN into fp0 #
#################################################################
global t_operr
t_operr:
ori.l &opnan_mask,USER_FPSR(%a6) # set NaN/OPERR/AIOP
fmovm.x qnan(%pc),&0x80 # return default NAN in fp0
rts
#################################################################
# Extended DENORM: #
# - For all functions that have a denormalized input and #
# that f(x)=x, this is the entry point. #
# - we only return the EXOP here if either underflow or #
# inexact is enabled. #
#################################################################
# Entry point for scale w/ extended denorm. The function does
# NOT set INEX2/AUNFL/AINEX.
global t_resdnrm
t_resdnrm:
ori.l &unfl_mask,USER_FPSR(%a6) # set UNFL
bra.b xdnrm_con
global t_extdnrm
t_extdnrm:
ori.l &unfinx_mask,USER_FPSR(%a6) # set UNFL/INEX2/AUNFL/AINEX
xdnrm_con:
mov.l %a0,%a1 # make copy of src ptr
mov.l %d0,%d1 # make copy of rnd prec,mode
andi.b &0xc0,%d1 # extended precision?
bne.b xdnrm_sd # no
# result precision is extended.
tst.b LOCAL_EX(%a0) # is denorm negative?
bpl.b xdnrm_exit # no
bset &neg_bit,FPSR_CC(%a6) # yes; set 'N' ccode bit
bra.b xdnrm_exit
# result precision is single or double
xdnrm_sd:
mov.l %a1,-(%sp)
tst.b LOCAL_EX(%a0) # is denorm pos or neg?
smi.b %d1 # set d0 accordingly
bsr.l unf_sub
mov.l (%sp)+,%a1
xdnrm_exit:
fmovm.x (%a0),&0x80 # return default result in fp0
mov.b FPCR_ENABLE(%a6),%d0
andi.b &0x0a,%d0 # is UNFL or INEX enabled?
bne.b xdnrm_ena # yes
rts
################
# unfl enabled #
################
# we have a DENORM that needs to be converted into an EXOP.
# so, normalize the mantissa, add 0x6000 to the new exponent,
# and return the result in fp1.
xdnrm_ena:
mov.w LOCAL_EX(%a1),FP_SCR0_EX(%a6)
mov.l LOCAL_HI(%a1),FP_SCR0_HI(%a6)
mov.l LOCAL_LO(%a1),FP_SCR0_LO(%a6)
lea FP_SCR0(%a6),%a0
bsr.l norm # normalize mantissa
addi.l &0x6000,%d0 # add extra bias
andi.w &0x8000,FP_SCR0_EX(%a6) # keep old sign
or.w %d0,FP_SCR0_EX(%a6) # insert new exponent
fmovm.x FP_SCR0(%a6),&0x40 # return EXOP in fp1
rts
#################################################################
# UNFL exception: #
# - This routine is for cases where even an EXOP isn't #
# large enough to hold the range of this result. #
# In such a case, the EXOP equals zero. #
# - Return the default result to the proper precision #
# with the sign of this result being the same as that #
# of the src operand. #
# - t_unfl2() is provided to force the result sign to #
# positive which is the desired result for fetox(). #
#################################################################
global t_unfl
t_unfl:
ori.l &unfinx_mask,USER_FPSR(%a6) # set UNFL/INEX2/AUNFL/AINEX
tst.b (%a0) # is result pos or neg?
smi.b %d1 # set d1 accordingly
bsr.l unf_sub # calc default unfl result
fmovm.x (%a0),&0x80 # return default result in fp0
fmov.s &0x00000000,%fp1 # return EXOP in fp1
rts
# t_unfl2 ALWAYS tells unf_sub to create a positive result
global t_unfl2
t_unfl2:
ori.l &unfinx_mask,USER_FPSR(%a6) # set UNFL/INEX2/AUNFL/AINEX
sf.b %d1 # set d0 to represent positive
bsr.l unf_sub # calc default unfl result
fmovm.x (%a0),&0x80 # return default result in fp0
fmov.s &0x0000000,%fp1 # return EXOP in fp1
rts
#################################################################
# OVFL exception: #
# - This routine is for cases where even an EXOP isn't #
# large enough to hold the range of this result. #
# - Return the default result to the proper precision #
# with the sign of this result being the same as that #
# of the src operand. #
# - t_ovfl2() is provided to force the result sign to #
# positive which is the desired result for fcosh(). #
# - t_ovfl_sc() is provided for scale() which only sets #
# the inexact bits if the number is inexact for the #
# precision indicated. #
#################################################################
global t_ovfl_sc
t_ovfl_sc:
ori.l &ovfl_inx_mask,USER_FPSR(%a6) # set OVFL/AOVFL/AINEX
mov.b %d0,%d1 # fetch rnd mode/prec
andi.b &0xc0,%d1 # extract rnd prec
beq.b ovfl_work # prec is extended
tst.b LOCAL_HI(%a0) # is dst a DENORM?
bmi.b ovfl_sc_norm # no
# dst op is a DENORM. we have to normalize the mantissa to see if the
# result would be inexact for the given precision. make a copy of the
# dst so we don't screw up the version passed to us.
mov.w LOCAL_EX(%a0),FP_SCR0_EX(%a6)
mov.l LOCAL_HI(%a0),FP_SCR0_HI(%a6)
mov.l LOCAL_LO(%a0),FP_SCR0_LO(%a6)
lea FP_SCR0(%a6),%a0 # pass ptr to FP_SCR0
movm.l &0xc080,-(%sp) # save d0-d1/a0
bsr.l norm # normalize mantissa
movm.l (%sp)+,&0x0103 # restore d0-d1/a0
ovfl_sc_norm:
cmpi.b %d1,&0x40 # is prec dbl?
bne.b ovfl_sc_dbl # no; sgl
ovfl_sc_sgl:
tst.l LOCAL_LO(%a0) # is lo lw of sgl set?
bne.b ovfl_sc_inx # yes
tst.b 3+LOCAL_HI(%a0) # is lo byte of hi lw set?
bne.b ovfl_sc_inx # yes
bra.b ovfl_work # don't set INEX2
ovfl_sc_dbl:
mov.l LOCAL_LO(%a0),%d1 # are any of lo 11 bits of
andi.l &0x7ff,%d1 # dbl mantissa set?
beq.b ovfl_work # no; don't set INEX2
ovfl_sc_inx:
ori.l &inex2_mask,USER_FPSR(%a6) # set INEX2
bra.b ovfl_work # continue
global t_ovfl
t_ovfl:
ori.l &ovfinx_mask,USER_FPSR(%a6) # set OVFL/INEX2/AOVFL/AINEX
ovfl_work:
tst.b LOCAL_EX(%a0) # what is the sign?
smi.b %d1 # set d1 accordingly
bsr.l ovf_res # calc default ovfl result
mov.b %d0,FPSR_CC(%a6) # insert new ccodes
fmovm.x (%a0),&0x80 # return default result in fp0
fmov.s &0x00000000,%fp1 # return EXOP in fp1
rts
# t_ovfl2 ALWAYS tells ovf_res to create a positive result
global t_ovfl2
t_ovfl2:
ori.l &ovfinx_mask,USER_FPSR(%a6) # set OVFL/INEX2/AOVFL/AINEX
sf.b %d1 # clear sign flag for positive
bsr.l ovf_res # calc default ovfl result
mov.b %d0,FPSR_CC(%a6) # insert new ccodes
fmovm.x (%a0),&0x80 # return default result in fp0
fmov.s &0x00000000,%fp1 # return EXOP in fp1
rts
#################################################################
# t_catch(): #
# - the last operation of a transcendental emulation #
# routine may have caused an underflow or overflow. #
# we find out if this occurred by doing an fsave and #
# checking the exception bit. if one did occur, then we #
# jump to fgen_except() which creates the default #
# result and EXOP for us. #
#################################################################
global t_catch
t_catch:
fsave -(%sp)
tst.b 0x2(%sp)
bmi.b catch
add.l &0xc,%sp
#################################################################
# INEX2 exception: #
# - The inex2 and ainex bits are set. #
#################################################################
global t_inx2
t_inx2:
fblt.w t_minx2
fbeq.w inx2_zero
global t_pinx2
t_pinx2:
ori.w &inx2a_mask,2+USER_FPSR(%a6) # set INEX2/AINEX
rts
global t_minx2
t_minx2:
ori.l &inx2a_mask+neg_mask,USER_FPSR(%a6) # set N/INEX2/AINEX
rts
inx2_zero:
mov.b &z_bmask,FPSR_CC(%a6)
ori.w &inx2a_mask,2+USER_FPSR(%a6) # set INEX2/AINEX
rts
# an underflow or overflow exception occurred.
# we must set INEX/AINEX since the fmul/fdiv/fmov emulation may not!
catch:
ori.w &inx2a_mask,FPSR_EXCEPT(%a6)
catch2:
bsr.l fgen_except
add.l &0xc,%sp
rts
global t_catch2
t_catch2:
fsave -(%sp)
tst.b 0x2(%sp)
bmi.b catch2
add.l &0xc,%sp
fmov.l %fpsr,%d0
or.l %d0,USER_FPSR(%a6)
rts
#########################################################################
#########################################################################
# unf_res(): underflow default result calculation for transcendentals #
# #
# INPUT: #
# d0 : rnd mode,precision #
# d1.b : sign bit of result ('11111111 = (-) ; '00000000 = (+)) #
# OUTPUT: #
# a0 : points to result (in instruction memory) #
#########################################################################
unf_sub:
ori.l &unfinx_mask,USER_FPSR(%a6)
andi.w &0x10,%d1 # keep sign bit in 4th spot
lsr.b &0x4,%d0 # shift rnd prec,mode to lo bits
andi.b &0xf,%d0 # strip hi rnd mode bit
or.b %d1,%d0 # concat {sgn,mode,prec}
mov.l %d0,%d1 # make a copy
lsl.b &0x1,%d1 # mult index 2 by 2
mov.b (tbl_unf_cc.b,%pc,%d0.w*1),FPSR_CC(%a6) # insert ccode bits
lea (tbl_unf_result.b,%pc,%d1.w*8),%a0 # grab result ptr
rts
tbl_unf_cc:
byte 0x4, 0x4, 0x4, 0x0
byte 0x4, 0x4, 0x4, 0x0
byte 0x4, 0x4, 0x4, 0x0
byte 0x0, 0x0, 0x0, 0x0
byte 0x8+0x4, 0x8+0x4, 0x8, 0x8+0x4
byte 0x8+0x4, 0x8+0x4, 0x8, 0x8+0x4
byte 0x8+0x4, 0x8+0x4, 0x8, 0x8+0x4
tbl_unf_result:
long 0x00000000, 0x00000000, 0x00000000, 0x0 # ZERO;ext
long 0x00000000, 0x00000000, 0x00000000, 0x0 # ZERO;ext
long 0x00000000, 0x00000000, 0x00000000, 0x0 # ZERO;ext
long 0x00000000, 0x00000000, 0x00000001, 0x0 # MIN; ext
long 0x3f810000, 0x00000000, 0x00000000, 0x0 # ZERO;sgl
long 0x3f810000, 0x00000000, 0x00000000, 0x0 # ZERO;sgl
long 0x3f810000, 0x00000000, 0x00000000, 0x0 # ZERO;sgl
long 0x3f810000, 0x00000100, 0x00000000, 0x0 # MIN; sgl
long 0x3c010000, 0x00000000, 0x00000000, 0x0 # ZERO;dbl
long 0x3c010000, 0x00000000, 0x00000000, 0x0 # ZER0;dbl
long 0x3c010000, 0x00000000, 0x00000000, 0x0 # ZERO;dbl
long 0x3c010000, 0x00000000, 0x00000800, 0x0 # MIN; dbl
long 0x0,0x0,0x0,0x0
long 0x0,0x0,0x0,0x0
long 0x0,0x0,0x0,0x0
long 0x0,0x0,0x0,0x0
long 0x80000000, 0x00000000, 0x00000000, 0x0 # ZERO;ext
long 0x80000000, 0x00000000, 0x00000000, 0x0 # ZERO;ext
long 0x80000000, 0x00000000, 0x00000001, 0x0 # MIN; ext
long 0x80000000, 0x00000000, 0x00000000, 0x0 # ZERO;ext
long 0xbf810000, 0x00000000, 0x00000000, 0x0 # ZERO;sgl
long 0xbf810000, 0x00000000, 0x00000000, 0x0 # ZERO;sgl
long 0xbf810000, 0x00000100, 0x00000000, 0x0 # MIN; sgl
long 0xbf810000, 0x00000000, 0x00000000, 0x0 # ZERO;sgl
long 0xbc010000, 0x00000000, 0x00000000, 0x0 # ZERO;dbl
long 0xbc010000, 0x00000000, 0x00000000, 0x0 # ZERO;dbl
long 0xbc010000, 0x00000000, 0x00000800, 0x0 # MIN; dbl
long 0xbc010000, 0x00000000, 0x00000000, 0x0 # ZERO;dbl
############################################################
#########################################################################
# src_zero(): Return signed zero according to sign of src operand. #
#########################################################################
global src_zero
src_zero:
tst.b SRC_EX(%a0) # get sign of src operand
bmi.b ld_mzero # if neg, load neg zero
#
# ld_pzero(): return a positive zero.
#
global ld_pzero
ld_pzero:
fmov.s &0x00000000,%fp0 # load +0
mov.b &z_bmask,FPSR_CC(%a6) # set 'Z' ccode bit
rts
# ld_mzero(): return a negative zero.
global ld_mzero
ld_mzero:
fmov.s &0x80000000,%fp0 # load -0
mov.b &neg_bmask+z_bmask,FPSR_CC(%a6) # set 'N','Z' ccode bits
rts
#########################################################################
# dst_zero(): Return signed zero according to sign of dst operand. #
#########################################################################
global dst_zero
dst_zero:
tst.b DST_EX(%a1) # get sign of dst operand
bmi.b ld_mzero # if neg, load neg zero
bra.b ld_pzero # load positive zero
#########################################################################
# src_inf(): Return signed inf according to sign of src operand. #
#########################################################################
global src_inf
src_inf:
tst.b SRC_EX(%a0) # get sign of src operand
bmi.b ld_minf # if negative branch
#
# ld_pinf(): return a positive infinity.
#
global ld_pinf
ld_pinf:
fmov.s &0x7f800000,%fp0 # load +INF
mov.b &inf_bmask,FPSR_CC(%a6) # set 'INF' ccode bit
rts
#
# ld_minf():return a negative infinity.
#
global ld_minf
ld_minf:
fmov.s &0xff800000,%fp0 # load -INF
mov.b &neg_bmask+inf_bmask,FPSR_CC(%a6) # set 'N','I' ccode bits
rts
#########################################################################
# dst_inf(): Return signed inf according to sign of dst operand. #
#########################################################################
global dst_inf
dst_inf:
tst.b DST_EX(%a1) # get sign of dst operand
bmi.b ld_minf # if negative branch
bra.b ld_pinf
global szr_inf
#################################################################
# szr_inf(): Return +ZERO for a negative src operand or #
# +INF for a positive src operand. #
# Routine used for fetox, ftwotox, and ftentox. #
#################################################################
szr_inf:
tst.b SRC_EX(%a0) # check sign of source
bmi.b ld_pzero
bra.b ld_pinf
#########################################################################
# sopr_inf(): Return +INF for a positive src operand or #
# jump to operand error routine for a negative src operand. #
# Routine used for flogn, flognp1, flog10, and flog2. #
#########################################################################
global sopr_inf
sopr_inf:
tst.b SRC_EX(%a0) # check sign of source
bmi.w t_operr
bra.b ld_pinf
#################################################################
# setoxm1i(): Return minus one for a negative src operand or #
# positive infinity for a positive src operand. #
# Routine used for fetoxm1. #
#################################################################
global setoxm1i
setoxm1i:
tst.b SRC_EX(%a0) # check sign of source
bmi.b ld_mone
bra.b ld_pinf
#########################################################################
# src_one(): Return signed one according to sign of src operand. #
#########################################################################
global src_one
src_one:
tst.b SRC_EX(%a0) # check sign of source
bmi.b ld_mone
#
# ld_pone(): return positive one.
#
global ld_pone
ld_pone:
fmov.s &0x3f800000,%fp0 # load +1
clr.b FPSR_CC(%a6)
rts
#
# ld_mone(): return negative one.
#
global ld_mone
ld_mone:
fmov.s &0xbf800000,%fp0 # load -1
mov.b &neg_bmask,FPSR_CC(%a6) # set 'N' ccode bit
rts
ppiby2: long 0x3fff0000, 0xc90fdaa2, 0x2168c235
mpiby2: long 0xbfff0000, 0xc90fdaa2, 0x2168c235
#################################################################
# spi_2(): Return signed PI/2 according to sign of src operand. #
#################################################################
global spi_2
spi_2:
tst.b SRC_EX(%a0) # check sign of source
bmi.b ld_mpi2
#
# ld_ppi2(): return positive PI/2.
#
global ld_ppi2
ld_ppi2:
fmov.l %d0,%fpcr
fmov.x ppiby2(%pc),%fp0 # load +pi/2
bra.w t_pinx2 # set INEX2
#
# ld_mpi2(): return negative PI/2.
#
global ld_mpi2
ld_mpi2:
fmov.l %d0,%fpcr
fmov.x mpiby2(%pc),%fp0 # load -pi/2
bra.w t_minx2 # set INEX2
####################################################
# The following routines give support for fsincos. #
####################################################
#
# ssincosz(): When the src operand is ZERO, store a one in the
# cosine register and return a ZERO in fp0 w/ the same sign
# as the src operand.
#
global ssincosz
ssincosz:
fmov.s &0x3f800000,%fp1
tst.b SRC_EX(%a0) # test sign
bpl.b sincoszp
fmov.s &0x80000000,%fp0 # return sin result in fp0
mov.b &z_bmask+neg_bmask,FPSR_CC(%a6)
bra.b sto_cos # store cosine result
sincoszp:
fmov.s &0x00000000,%fp0 # return sin result in fp0
mov.b &z_bmask,FPSR_CC(%a6)
bra.b sto_cos # store cosine result
#
# ssincosi(): When the src operand is INF, store a QNAN in the cosine
# register and jump to the operand error routine for negative
# src operands.
#
global ssincosi
ssincosi:
fmov.x qnan(%pc),%fp1 # load NAN
bsr.l sto_cos # store cosine result
bra.w t_operr
#
# ssincosqnan(): When the src operand is a QNAN, store the QNAN in the cosine
# register and branch to the src QNAN routine.
#
global ssincosqnan
ssincosqnan:
fmov.x LOCAL_EX(%a0),%fp1
bsr.l sto_cos
bra.w src_qnan
#
# ssincossnan(): When the src operand is an SNAN, store the SNAN w/ the SNAN bit set
# in the cosine register and branch to the src SNAN routine.
#
global ssincossnan
ssincossnan:
fmov.x LOCAL_EX(%a0),%fp1
bsr.l sto_cos
bra.w src_snan
########################################################################
#########################################################################
# sto_cos(): store fp1 to the fpreg designated by the CMDREG dst field. #
# fp1 holds the result of the cosine portion of ssincos(). #
# the value in fp1 will not take any exceptions when moved. #
# INPUT: #
# fp1 : fp value to store #
# MODIFIED: #
# d0 #
#########################################################################
global sto_cos
sto_cos:
mov.b 1+EXC_CMDREG(%a6),%d0
andi.w &0x7,%d0
mov.w (tbl_sto_cos.b,%pc,%d0.w*2),%d0
jmp (tbl_sto_cos.b,%pc,%d0.w*1)
tbl_sto_cos:
short sto_cos_0 - tbl_sto_cos
short sto_cos_1 - tbl_sto_cos
short sto_cos_2 - tbl_sto_cos
short sto_cos_3 - tbl_sto_cos
short sto_cos_4 - tbl_sto_cos
short sto_cos_5 - tbl_sto_cos
short sto_cos_6 - tbl_sto_cos
short sto_cos_7 - tbl_sto_cos
sto_cos_0:
fmovm.x &0x40,EXC_FP0(%a6)
rts
sto_cos_1:
fmovm.x &0x40,EXC_FP1(%a6)
rts
sto_cos_2:
fmov.x %fp1,%fp2
rts
sto_cos_3:
fmov.x %fp1,%fp3
rts
sto_cos_4:
fmov.x %fp1,%fp4
rts
sto_cos_5:
fmov.x %fp1,%fp5
rts
sto_cos_6:
fmov.x %fp1,%fp6
rts
sto_cos_7:
fmov.x %fp1,%fp7
rts
##################################################################
global smod_sdnrm
global smod_snorm
smod_sdnrm:
smod_snorm:
mov.b DTAG(%a6),%d1
beq.l smod
cmpi.b %d1,&ZERO
beq.w smod_zro
cmpi.b %d1,&INF
beq.l t_operr
cmpi.b %d1,&DENORM
beq.l smod
cmpi.b %d1,&SNAN
beq.l dst_snan
bra.l dst_qnan
global smod_szero
smod_szero:
mov.b DTAG(%a6),%d1
beq.l t_operr
cmpi.b %d1,&ZERO
beq.l t_operr
cmpi.b %d1,&INF
beq.l t_operr
cmpi.b %d1,&DENORM
beq.l t_operr
cmpi.b %d1,&QNAN
beq.l dst_qnan
bra.l dst_snan
global smod_sinf
smod_sinf:
mov.b DTAG(%a6),%d1
beq.l smod_fpn
cmpi.b %d1,&ZERO
beq.l smod_zro
cmpi.b %d1,&INF
beq.l t_operr
cmpi.b %d1,&DENORM
beq.l smod_fpn
cmpi.b %d1,&QNAN
beq.l dst_qnan
bra.l dst_snan
smod_zro:
srem_zro:
mov.b SRC_EX(%a0),%d1 # get src sign
mov.b DST_EX(%a1),%d0 # get dst sign
eor.b %d0,%d1 # get qbyte sign
andi.b &0x80,%d1
mov.b %d1,FPSR_QBYTE(%a6)
tst.b %d0
bpl.w ld_pzero
bra.w ld_mzero
smod_fpn:
srem_fpn:
clr.b FPSR_QBYTE(%a6)
mov.l %d0,-(%sp)
mov.b SRC_EX(%a0),%d1 # get src sign
mov.b DST_EX(%a1),%d0 # get dst sign
eor.b %d0,%d1 # get qbyte sign
andi.b &0x80,%d1
mov.b %d1,FPSR_QBYTE(%a6)
cmpi.b DTAG(%a6),&DENORM
bne.b smod_nrm
lea DST(%a1),%a0
mov.l (%sp)+,%d0
bra t_resdnrm
smod_nrm:
fmov.l (%sp)+,%fpcr
fmov.x DST(%a1),%fp0
tst.b DST_EX(%a1)
bmi.b smod_nrm_neg
rts
smod_nrm_neg:
mov.b &neg_bmask,FPSR_CC(%a6) # set 'N' ccode
rts
#########################################################################
global srem_snorm
global srem_sdnrm
srem_sdnrm:
srem_snorm:
mov.b DTAG(%a6),%d1
beq.l srem
cmpi.b %d1,&ZERO
beq.w srem_zro
cmpi.b %d1,&INF
beq.l t_operr
cmpi.b %d1,&DENORM
beq.l srem
cmpi.b %d1,&QNAN
beq.l dst_qnan
bra.l dst_snan
global srem_szero
srem_szero:
mov.b DTAG(%a6),%d1
beq.l t_operr
cmpi.b %d1,&ZERO
beq.l t_operr
cmpi.b %d1,&INF
beq.l t_operr
cmpi.b %d1,&DENORM
beq.l t_operr
cmpi.b %d1,&QNAN
beq.l dst_qnan
bra.l dst_snan
global srem_sinf
srem_sinf:
mov.b DTAG(%a6),%d1
beq.w srem_fpn
cmpi.b %d1,&ZERO
beq.w srem_zro
cmpi.b %d1,&INF
beq.l t_operr
cmpi.b %d1,&DENORM
beq.l srem_fpn
cmpi.b %d1,&QNAN
beq.l dst_qnan
bra.l dst_snan
#########################################################################
global sscale_snorm
global sscale_sdnrm
sscale_snorm:
sscale_sdnrm:
mov.b DTAG(%a6),%d1
beq.l sscale
cmpi.b %d1,&ZERO
beq.l dst_zero
cmpi.b %d1,&INF
beq.l dst_inf
cmpi.b %d1,&DENORM
beq.l sscale
cmpi.b %d1,&QNAN
beq.l dst_qnan
bra.l dst_snan
global sscale_szero
sscale_szero:
mov.b DTAG(%a6),%d1
beq.l sscale
cmpi.b %d1,&ZERO
beq.l dst_zero
cmpi.b %d1,&INF
beq.l dst_inf
cmpi.b %d1,&DENORM
beq.l sscale
cmpi.b %d1,&QNAN
beq.l dst_qnan
bra.l dst_snan
global sscale_sinf
sscale_sinf:
mov.b DTAG(%a6),%d1
beq.l t_operr
cmpi.b %d1,&QNAN
beq.l dst_qnan
cmpi.b %d1,&SNAN
beq.l dst_snan
bra.l t_operr
########################################################################
#
# sop_sqnan(): The src op for frem/fmod/fscale was a QNAN.
#
global sop_sqnan
sop_sqnan:
mov.b DTAG(%a6),%d1
cmpi.b %d1,&QNAN
beq.b dst_qnan
cmpi.b %d1,&SNAN
beq.b dst_snan
bra.b src_qnan
#
# sop_ssnan(): The src op for frem/fmod/fscale was an SNAN.
#
global sop_ssnan
sop_ssnan:
mov.b DTAG(%a6),%d1
cmpi.b %d1,&QNAN
beq.b dst_qnan_src_snan
cmpi.b %d1,&SNAN
beq.b dst_snan
bra.b src_snan
dst_qnan_src_snan:
ori.l &snaniop_mask,USER_FPSR(%a6) # set NAN/SNAN/AIOP
bra.b dst_qnan
#
# dst_qnan(): Return the dst SNAN w/ the SNAN bit set.
#
global dst_snan
dst_snan:
fmov.x DST(%a1),%fp0 # the fmove sets the SNAN bit
fmov.l %fpsr,%d0 # catch resulting status
or.l %d0,USER_FPSR(%a6) # store status
rts
#
# dst_qnan(): Return the dst QNAN.
#
global dst_qnan
dst_qnan:
fmov.x DST(%a1),%fp0 # return the non-signalling nan
tst.b DST_EX(%a1) # set ccodes according to QNAN sign
bmi.b dst_qnan_m
dst_qnan_p:
mov.b &nan_bmask,FPSR_CC(%a6)
rts
dst_qnan_m:
mov.b &neg_bmask+nan_bmask,FPSR_CC(%a6)
rts
#
# src_snan(): Return the src SNAN w/ the SNAN bit set.
#
global src_snan
src_snan:
fmov.x SRC(%a0),%fp0 # the fmove sets the SNAN bit
fmov.l %fpsr,%d0 # catch resulting status
or.l %d0,USER_FPSR(%a6) # store status
rts
#
# src_qnan(): Return the src QNAN.
#
global src_qnan
src_qnan:
fmov.x SRC(%a0),%fp0 # return the non-signalling nan
tst.b SRC_EX(%a0) # set ccodes according to QNAN sign
bmi.b dst_qnan_m
src_qnan_p:
mov.b &nan_bmask,FPSR_CC(%a6)
rts
src_qnan_m:
mov.b &neg_bmask+nan_bmask,FPSR_CC(%a6)
rts
#
# fkern2.s:
# These entry points are used by the exception handler
# routines where an instruction is selected by an index into
# a large jump table corresponding to a given instruction which
# has been decoded. Flow continues here where we now decode
# further according to the source operand type.
#
global fsinh
fsinh:
mov.b STAG(%a6),%d1
beq.l ssinh
cmpi.b %d1,&ZERO
beq.l src_zero
cmpi.b %d1,&INF
beq.l src_inf
cmpi.b %d1,&DENORM
beq.l ssinhd
cmpi.b %d1,&QNAN
beq.l src_qnan
bra.l src_snan
global flognp1
flognp1:
mov.b STAG(%a6),%d1
beq.l slognp1
cmpi.b %d1,&ZERO
beq.l src_zero
cmpi.b %d1,&INF
beq.l sopr_inf
cmpi.b %d1,&DENORM
beq.l slognp1d
cmpi.b %d1,&QNAN
beq.l src_qnan
bra.l src_snan
global fetoxm1
fetoxm1:
mov.b STAG(%a6),%d1
beq.l setoxm1
cmpi.b %d1,&ZERO
beq.l src_zero
cmpi.b %d1,&INF
beq.l setoxm1i
cmpi.b %d1,&DENORM
beq.l setoxm1d
cmpi.b %d1,&QNAN
beq.l src_qnan
bra.l src_snan
global ftanh
ftanh:
mov.b STAG(%a6),%d1
beq.l stanh
cmpi.b %d1,&ZERO
beq.l src_zero
cmpi.b %d1,&INF
beq.l src_one
cmpi.b %d1,&DENORM
beq.l stanhd
cmpi.b %d1,&QNAN
beq.l src_qnan
bra.l src_snan
global fatan
fatan:
mov.b STAG(%a6),%d1
beq.l satan
cmpi.b %d1,&ZERO
beq.l src_zero
cmpi.b %d1,&INF
beq.l spi_2
cmpi.b %d1,&DENORM
beq.l satand
cmpi.b %d1,&QNAN
beq.l src_qnan
bra.l src_snan
global fasin
fasin:
mov.b STAG(%a6),%d1
beq.l sasin
cmpi.b %d1,&ZERO
beq.l src_zero
cmpi.b %d1,&INF
beq.l t_operr
cmpi.b %d1,&DENORM
beq.l sasind
cmpi.b %d1,&QNAN
beq.l src_qnan
bra.l src_snan
global fatanh
fatanh:
mov.b STAG(%a6),%d1
beq.l satanh
cmpi.b %d1,&ZERO
beq.l src_zero
cmpi.b %d1,&INF
beq.l t_operr
cmpi.b %d1,&DENORM
beq.l satanhd
cmpi.b %d1,&QNAN
beq.l src_qnan
bra.l src_snan
global fsine
fsine:
mov.b STAG(%a6),%d1
beq.l ssin
cmpi.b %d1,&ZERO
beq.l src_zero
cmpi.b %d1,&INF
beq.l t_operr
cmpi.b %d1,&DENORM
beq.l ssind
cmpi.b %d1,&QNAN
beq.l src_qnan
bra.l src_snan
global ftan
ftan:
mov.b STAG(%a6),%d1
beq.l stan
cmpi.b %d1,&ZERO
beq.l src_zero
cmpi.b %d1,&INF
beq.l t_operr
cmpi.b %d1,&DENORM
beq.l stand
cmpi.b %d1,&QNAN
beq.l src_qnan
bra.l src_snan
global fetox
fetox:
mov.b STAG(%a6),%d1
beq.l setox
cmpi.b %d1,&ZERO
beq.l ld_pone
cmpi.b %d1,&INF
beq.l szr_inf
cmpi.b %d1,&DENORM
beq.l setoxd
cmpi.b %d1,&QNAN
beq.l src_qnan
bra.l src_snan
global ftwotox
ftwotox:
mov.b STAG(%a6),%d1
beq.l stwotox
cmpi.b %d1,&ZERO
beq.l ld_pone
cmpi.b %d1,&INF
beq.l szr_inf
cmpi.b %d1,&DENORM
beq.l stwotoxd
cmpi.b %d1,&QNAN
beq.l src_qnan
bra.l src_snan
global ftentox
ftentox:
mov.b STAG(%a6),%d1
beq.l stentox
cmpi.b %d1,&ZERO
beq.l ld_pone
cmpi.b %d1,&INF
beq.l szr_inf
cmpi.b %d1,&DENORM
beq.l stentoxd
cmpi.b %d1,&QNAN
beq.l src_qnan
bra.l src_snan
global flogn
flogn:
mov.b STAG(%a6),%d1
beq.l slogn
cmpi.b %d1,&ZERO
beq.l t_dz2
cmpi.b %d1,&INF
beq.l sopr_inf
cmpi.b %d1,&DENORM
beq.l slognd
cmpi.b %d1,&QNAN
beq.l src_qnan
bra.l src_snan
global flog10
flog10:
mov.b STAG(%a6),%d1
beq.l slog10
cmpi.b %d1,&ZERO
beq.l t_dz2
cmpi.b %d1,&INF
beq.l sopr_inf
cmpi.b %d1,&DENORM
beq.l slog10d
cmpi.b %d1,&QNAN
beq.l src_qnan
bra.l src_snan
global flog2
flog2:
mov.b STAG(%a6),%d1
beq.l slog2
cmpi.b %d1,&ZERO
beq.l t_dz2
cmpi.b %d1,&INF
beq.l sopr_inf
cmpi.b %d1,&DENORM
beq.l slog2d
cmpi.b %d1,&QNAN
beq.l src_qnan
bra.l src_snan
global fcosh
fcosh:
mov.b STAG(%a6),%d1
beq.l scosh
cmpi.b %d1,&ZERO
beq.l ld_pone
cmpi.b %d1,&INF
beq.l ld_pinf
cmpi.b %d1,&DENORM
beq.l scoshd
cmpi.b %d1,&QNAN
beq.l src_qnan
bra.l src_snan
global facos
facos:
mov.b STAG(%a6),%d1
beq.l sacos
cmpi.b %d1,&ZERO
beq.l ld_ppi2
cmpi.b %d1,&INF
beq.l t_operr
cmpi.b %d1,&DENORM
beq.l sacosd
cmpi.b %d1,&QNAN
beq.l src_qnan
bra.l src_snan
global fcos
fcos:
mov.b STAG(%a6),%d1
beq.l scos
cmpi.b %d1,&ZERO
beq.l ld_pone
cmpi.b %d1,&INF
beq.l t_operr
cmpi.b %d1,&DENORM
beq.l scosd
cmpi.b %d1,&QNAN
beq.l src_qnan
bra.l src_snan
global fgetexp
fgetexp:
mov.b STAG(%a6),%d1
beq.l sgetexp
cmpi.b %d1,&ZERO
beq.l src_zero
cmpi.b %d1,&INF
beq.l t_operr
cmpi.b %d1,&DENORM
beq.l sgetexpd
cmpi.b %d1,&QNAN
beq.l src_qnan
bra.l src_snan
global fgetman
fgetman:
mov.b STAG(%a6),%d1
beq.l sgetman
cmpi.b %d1,&ZERO
beq.l src_zero
cmpi.b %d1,&INF
beq.l t_operr
cmpi.b %d1,&DENORM
beq.l sgetmand
cmpi.b %d1,&QNAN
beq.l src_qnan
bra.l src_snan
global fsincos
fsincos:
mov.b STAG(%a6),%d1
beq.l ssincos
cmpi.b %d1,&ZERO
beq.l ssincosz
cmpi.b %d1,&INF
beq.l ssincosi
cmpi.b %d1,&DENORM
beq.l ssincosd
cmpi.b %d1,&QNAN
beq.l ssincosqnan
bra.l ssincossnan
global fmod
fmod:
mov.b STAG(%a6),%d1
beq.l smod_snorm
cmpi.b %d1,&ZERO
beq.l smod_szero
cmpi.b %d1,&INF
beq.l smod_sinf
cmpi.b %d1,&DENORM
beq.l smod_sdnrm
cmpi.b %d1,&QNAN
beq.l sop_sqnan
bra.l sop_ssnan
global frem
frem:
mov.b STAG(%a6),%d1
beq.l srem_snorm
cmpi.b %d1,&ZERO
beq.l srem_szero
cmpi.b %d1,&INF
beq.l srem_sinf
cmpi.b %d1,&DENORM
beq.l srem_sdnrm
cmpi.b %d1,&QNAN
beq.l sop_sqnan
bra.l sop_ssnan
global fscale
fscale:
mov.b STAG(%a6),%d1
beq.l sscale_snorm
cmpi.b %d1,&ZERO
beq.l sscale_szero
cmpi.b %d1,&INF
beq.l sscale_sinf
cmpi.b %d1,&DENORM
beq.l sscale_sdnrm
cmpi.b %d1,&QNAN
beq.l sop_sqnan
bra.l sop_ssnan
#########################################################################
# XDEF **************************************************************** #
# fgen_except(): catch an exception during transcendental #
# emulation #
# #
# XREF **************************************************************** #
# fmul() - emulate a multiply instruction #
# fadd() - emulate an add instruction #
# fin() - emulate an fmove instruction #
# #
# INPUT *************************************************************** #
# fp0 = destination operand #
# d0 = type of instruction that took exception #
# fsave frame = source operand #
# #
# OUTPUT ************************************************************** #
# fp0 = result #
# fp1 = EXOP #
# #
# ALGORITHM *********************************************************** #
# An exception occurred on the last instruction of the #
# transcendental emulation. hopefully, this won't be happening much #
# because it will be VERY slow. #
# The only exceptions capable of passing through here are #
# Overflow, Underflow, and Unsupported Data Type. #
# #
#########################################################################
global fgen_except
fgen_except:
cmpi.b 0x3(%sp),&0x7 # is exception UNSUPP?
beq.b fge_unsupp # yes
mov.b &NORM,STAG(%a6)
fge_cont:
mov.b &NORM,DTAG(%a6)
# ok, I have a problem with putting the dst op at FP_DST. the emulation
# routines aren't supposed to alter the operands but we've just squashed
# FP_DST here...
# 8/17/93 - this turns out to be more of a "cleanliness" standpoint
# then a potential bug. to begin with, only the dyadic functions
# frem,fmod, and fscale would get the dst trashed here. But, for
# the 060SP, the FP_DST is never used again anyways.
fmovm.x &0x80,FP_DST(%a6) # dst op is in fp0
lea 0x4(%sp),%a0 # pass: ptr to src op
lea FP_DST(%a6),%a1 # pass: ptr to dst op
cmpi.b %d1,&FMOV_OP
beq.b fge_fin # it was an "fmov"
cmpi.b %d1,&FADD_OP
beq.b fge_fadd # it was an "fadd"
fge_fmul:
bsr.l fmul
rts
fge_fadd:
bsr.l fadd
rts
fge_fin:
bsr.l fin
rts
fge_unsupp:
mov.b &DENORM,STAG(%a6)
bra.b fge_cont
#
# This table holds the offsets of the emulation routines for each individual
# math operation relative to the address of this table. Included are
# routines like fadd/fmul/fabs as well as the transcendentals.
# The location within the table is determined by the extension bits of the
# operation longword.
#
swbeg &109
tbl_unsupp:
long fin - tbl_unsupp # 00: fmove
long fint - tbl_unsupp # 01: fint
long fsinh - tbl_unsupp # 02: fsinh
long fintrz - tbl_unsupp # 03: fintrz
long fsqrt - tbl_unsupp # 04: fsqrt
long tbl_unsupp - tbl_unsupp
long flognp1 - tbl_unsupp # 06: flognp1
long tbl_unsupp - tbl_unsupp
long fetoxm1 - tbl_unsupp # 08: fetoxm1
long ftanh - tbl_unsupp # 09: ftanh
long fatan - tbl_unsupp # 0a: fatan
long tbl_unsupp - tbl_unsupp
long fasin - tbl_unsupp # 0c: fasin
long fatanh - tbl_unsupp # 0d: fatanh
long fsine - tbl_unsupp # 0e: fsin
long ftan - tbl_unsupp # 0f: ftan
long fetox - tbl_unsupp # 10: fetox
long ftwotox - tbl_unsupp # 11: ftwotox
long ftentox - tbl_unsupp # 12: ftentox
long tbl_unsupp - tbl_unsupp
long flogn - tbl_unsupp # 14: flogn
long flog10 - tbl_unsupp # 15: flog10
long flog2 - tbl_unsupp # 16: flog2
long tbl_unsupp - tbl_unsupp
long fabs - tbl_unsupp # 18: fabs
long fcosh - tbl_unsupp # 19: fcosh
long fneg - tbl_unsupp # 1a: fneg
long tbl_unsupp - tbl_unsupp
long facos - tbl_unsupp # 1c: facos
long fcos - tbl_unsupp # 1d: fcos
long fgetexp - tbl_unsupp # 1e: fgetexp
long fgetman - tbl_unsupp # 1f: fgetman
long fdiv - tbl_unsupp # 20: fdiv
long fmod - tbl_unsupp # 21: fmod
long fadd - tbl_unsupp # 22: fadd
long fmul - tbl_unsupp # 23: fmul
long fsgldiv - tbl_unsupp # 24: fsgldiv
long frem - tbl_unsupp # 25: frem
long fscale - tbl_unsupp # 26: fscale
long fsglmul - tbl_unsupp # 27: fsglmul
long fsub - tbl_unsupp # 28: fsub
long tbl_unsupp - tbl_unsupp
long tbl_unsupp - tbl_unsupp
long tbl_unsupp - tbl_unsupp
long tbl_unsupp - tbl_unsupp
long tbl_unsupp - tbl_unsupp
long tbl_unsupp - tbl_unsupp
long tbl_unsupp - tbl_unsupp
long fsincos - tbl_unsupp # 30: fsincos
long fsincos - tbl_unsupp # 31: fsincos
long fsincos - tbl_unsupp # 32: fsincos
long fsincos - tbl_unsupp # 33: fsincos
long fsincos - tbl_unsupp # 34: fsincos
long fsincos - tbl_unsupp # 35: fsincos
long fsincos - tbl_unsupp # 36: fsincos
long fsincos - tbl_unsupp # 37: fsincos
long fcmp - tbl_unsupp # 38: fcmp
long tbl_unsupp - tbl_unsupp
long ftst - tbl_unsupp # 3a: ftst
long tbl_unsupp - tbl_unsupp
long tbl_unsupp - tbl_unsupp
long tbl_unsupp - tbl_unsupp
long tbl_unsupp - tbl_unsupp
long tbl_unsupp - tbl_unsupp
long fsin - tbl_unsupp # 40: fsmove
long fssqrt - tbl_unsupp # 41: fssqrt
long tbl_unsupp - tbl_unsupp
long tbl_unsupp - tbl_unsupp
long fdin - tbl_unsupp # 44: fdmove
long fdsqrt - tbl_unsupp # 45: fdsqrt
long tbl_unsupp - tbl_unsupp
long tbl_unsupp - tbl_unsupp
long tbl_unsupp - tbl_unsupp
long tbl_unsupp - tbl_unsupp
long tbl_unsupp - tbl_unsupp
long tbl_unsupp - tbl_unsupp
long tbl_unsupp - tbl_unsupp
long tbl_unsupp - tbl_unsupp
long tbl_unsupp - tbl_unsupp
long tbl_unsupp - tbl_unsupp
long tbl_unsupp - tbl_unsupp
long tbl_unsupp - tbl_unsupp
long tbl_unsupp - tbl_unsupp
long tbl_unsupp - tbl_unsupp
long tbl_unsupp - tbl_unsupp
long tbl_unsupp - tbl_unsupp
long tbl_unsupp - tbl_unsupp
long tbl_unsupp - tbl_unsupp
long fsabs - tbl_unsupp # 58: fsabs
long tbl_unsupp - tbl_unsupp
long fsneg - tbl_unsupp # 5a: fsneg
long tbl_unsupp - tbl_unsupp
long fdabs - tbl_unsupp # 5c: fdabs
long tbl_unsupp - tbl_unsupp
long fdneg - tbl_unsupp # 5e: fdneg
long tbl_unsupp - tbl_unsupp
long fsdiv - tbl_unsupp # 60: fsdiv
long tbl_unsupp - tbl_unsupp
long fsadd - tbl_unsupp # 62: fsadd
long fsmul - tbl_unsupp # 63: fsmul
long fddiv - tbl_unsupp # 64: fddiv
long tbl_unsupp - tbl_unsupp
long fdadd - tbl_unsupp # 66: fdadd
long fdmul - tbl_unsupp # 67: fdmul
long fssub - tbl_unsupp # 68: fssub
long tbl_unsupp - tbl_unsupp
long tbl_unsupp - tbl_unsupp
long tbl_unsupp - tbl_unsupp
long fdsub - tbl_unsupp # 6c: fdsub
#########################################################################
# XDEF **************************************************************** #
# fmul(): emulates the fmul instruction #
# fsmul(): emulates the fsmul instruction #
# fdmul(): emulates the fdmul instruction #
# #
# XREF **************************************************************** #
# scale_to_zero_src() - scale src exponent to zero #
# scale_to_zero_dst() - scale dst exponent to zero #
# unf_res() - return default underflow result #
# ovf_res() - return default overflow result #
# res_qnan() - return QNAN result #
# res_snan() - return SNAN result #
# #
# INPUT *************************************************************** #
# a0 = pointer to extended precision source operand #
# a1 = pointer to extended precision destination operand #
# d0 rnd prec,mode #
# #
# OUTPUT ************************************************************** #
# fp0 = result #
# fp1 = EXOP (if exception occurred) #
# #
# ALGORITHM *********************************************************** #
# Handle NANs, infinities, and zeroes as special cases. Divide #
# norms/denorms into ext/sgl/dbl precision. #
# For norms/denorms, scale the exponents such that a multiply #
# instruction won't cause an exception. Use the regular fmul to #
# compute a result. Check if the regular operands would have taken #
# an exception. If so, return the default overflow/underflow result #
# and return the EXOP if exceptions are enabled. Else, scale the #
# result operand to the proper exponent. #
# #
#########################################################################
align 0x10
tbl_fmul_ovfl:
long 0x3fff - 0x7ffe # ext_max
long 0x3fff - 0x407e # sgl_max
long 0x3fff - 0x43fe # dbl_max
tbl_fmul_unfl:
long 0x3fff + 0x0001 # ext_unfl
long 0x3fff - 0x3f80 # sgl_unfl
long 0x3fff - 0x3c00 # dbl_unfl
global fsmul
fsmul:
andi.b &0x30,%d0 # clear rnd prec
ori.b &s_mode*0x10,%d0 # insert sgl prec
bra.b fmul
global fdmul
fdmul:
andi.b &0x30,%d0
ori.b &d_mode*0x10,%d0 # insert dbl prec
global fmul
fmul:
mov.l %d0,L_SCR3(%a6) # store rnd info
clr.w %d1
mov.b DTAG(%a6),%d1
lsl.b &0x3,%d1
or.b STAG(%a6),%d1 # combine src tags
bne.w fmul_not_norm # optimize on non-norm input
fmul_norm:
mov.w DST_EX(%a1),FP_SCR1_EX(%a6)
mov.l DST_HI(%a1),FP_SCR1_HI(%a6)
mov.l DST_LO(%a1),FP_SCR1_LO(%a6)
mov.w SRC_EX(%a0),FP_SCR0_EX(%a6)
mov.l SRC_HI(%a0),FP_SCR0_HI(%a6)
mov.l SRC_LO(%a0),FP_SCR0_LO(%a6)
bsr.l scale_to_zero_src # scale src exponent
mov.l %d0,-(%sp) # save scale factor 1
bsr.l scale_to_zero_dst # scale dst exponent
add.l %d0,(%sp) # SCALE_FACTOR = scale1 + scale2
mov.w 2+L_SCR3(%a6),%d1 # fetch precision
lsr.b &0x6,%d1 # shift to lo bits
mov.l (%sp)+,%d0 # load S.F.
cmp.l %d0,(tbl_fmul_ovfl.w,%pc,%d1.w*4) # would result ovfl?
beq.w fmul_may_ovfl # result may rnd to overflow
blt.w fmul_ovfl # result will overflow
cmp.l %d0,(tbl_fmul_unfl.w,%pc,%d1.w*4) # would result unfl?
beq.w fmul_may_unfl # result may rnd to no unfl
bgt.w fmul_unfl # result will underflow
#
# NORMAL:
# - the result of the multiply operation will neither overflow nor underflow.
# - do the multiply to the proper precision and rounding mode.
# - scale the result exponent using the scale factor. if both operands were
# normalized then we really don't need to go through this scaling. but for now,
# this will do.
#
fmul_normal:
fmovm.x FP_SCR1(%a6),&0x80 # load dst operand
fmov.l L_SCR3(%a6),%fpcr # set FPCR
fmov.l &0x0,%fpsr # clear FPSR
fmul.x FP_SCR0(%a6),%fp0 # execute multiply
fmov.l %fpsr,%d1 # save status
fmov.l &0x0,%fpcr # clear FPCR
or.l %d1,USER_FPSR(%a6) # save INEX2,N
fmul_normal_exit:
fmovm.x &0x80,FP_SCR0(%a6) # store out result
mov.l %d2,-(%sp) # save d2
mov.w FP_SCR0_EX(%a6),%d1 # load {sgn,exp}
mov.l %d1,%d2 # make a copy
andi.l &0x7fff,%d1 # strip sign
andi.w &0x8000,%d2 # keep old sign
sub.l %d0,%d1 # add scale factor
or.w %d2,%d1 # concat old sign,new exp
mov.w %d1,FP_SCR0_EX(%a6) # insert new exponent
mov.l (%sp)+,%d2 # restore d2
fmovm.x FP_SCR0(%a6),&0x80 # return default result in fp0
rts
#
# OVERFLOW:
# - the result of the multiply operation is an overflow.
# - do the multiply to the proper precision and rounding mode in order to
# set the inexact bits.
# - calculate the default result and return it in fp0.
# - if overflow or inexact is enabled, we need a multiply result rounded to
# extended precision. if the original operation was extended, then we have this
# result. if the original operation was single or double, we have to do another
# multiply using extended precision and the correct rounding mode. the result
# of this operation then has its exponent scaled by -0x6000 to create the
# exceptional operand.
#
fmul_ovfl:
fmovm.x FP_SCR1(%a6),&0x80 # load dst operand
fmov.l L_SCR3(%a6),%fpcr # set FPCR
fmov.l &0x0,%fpsr # clear FPSR
fmul.x FP_SCR0(%a6),%fp0 # execute multiply
fmov.l %fpsr,%d1 # save status
fmov.l &0x0,%fpcr # clear FPCR
or.l %d1,USER_FPSR(%a6) # save INEX2,N
# save setting this until now because this is where fmul_may_ovfl may jump in
fmul_ovfl_tst:
or.l &ovfl_inx_mask,USER_FPSR(%a6) # set ovfl/aovfl/ainex
mov.b FPCR_ENABLE(%a6),%d1
andi.b &0x13,%d1 # is OVFL or INEX enabled?
bne.b fmul_ovfl_ena # yes
# calculate the default result
fmul_ovfl_dis:
btst &neg_bit,FPSR_CC(%a6) # is result negative?
sne %d1 # set sign param accordingly
mov.l L_SCR3(%a6),%d0 # pass rnd prec,mode
bsr.l ovf_res # calculate default result
or.b %d0,FPSR_CC(%a6) # set INF,N if applicable
fmovm.x (%a0),&0x80 # return default result in fp0
rts
#
# OVFL is enabled; Create EXOP:
# - if precision is extended, then we have the EXOP. simply bias the exponent
# with an extra -0x6000. if the precision is single or double, we need to
# calculate a result rounded to extended precision.
#
fmul_ovfl_ena:
mov.l L_SCR3(%a6),%d1
andi.b &0xc0,%d1 # test the rnd prec
bne.b fmul_ovfl_ena_sd # it's sgl or dbl
fmul_ovfl_ena_cont:
fmovm.x &0x80,FP_SCR0(%a6) # move result to stack
mov.l %d2,-(%sp) # save d2
mov.w FP_SCR0_EX(%a6),%d1 # fetch {sgn,exp}
mov.w %d1,%d2 # make a copy
andi.l &0x7fff,%d1 # strip sign
sub.l %d0,%d1 # add scale factor
subi.l &0x6000,%d1 # subtract bias
andi.w &0x7fff,%d1 # clear sign bit
andi.w &0x8000,%d2 # keep old sign
or.w %d2,%d1 # concat old sign,new exp
mov.w %d1,FP_SCR0_EX(%a6) # insert new exponent
mov.l (%sp)+,%d2 # restore d2
fmovm.x FP_SCR0(%a6),&0x40 # return EXOP in fp1
bra.b fmul_ovfl_dis
fmul_ovfl_ena_sd:
fmovm.x FP_SCR1(%a6),&0x80 # load dst operand
mov.l L_SCR3(%a6),%d1
andi.b &0x30,%d1 # keep rnd mode only
fmov.l %d1,%fpcr # set FPCR
fmul.x FP_SCR0(%a6),%fp0 # execute multiply
fmov.l &0x0,%fpcr # clear FPCR
bra.b fmul_ovfl_ena_cont
#
# may OVERFLOW:
# - the result of the multiply operation MAY overflow.
# - do the multiply to the proper precision and rounding mode in order to
# set the inexact bits.
# - calculate the default result and return it in fp0.
#
fmul_may_ovfl:
fmovm.x FP_SCR1(%a6),&0x80 # load dst op
fmov.l L_SCR3(%a6),%fpcr # set FPCR
fmov.l &0x0,%fpsr # clear FPSR
fmul.x FP_SCR0(%a6),%fp0 # execute multiply
fmov.l %fpsr,%d1 # save status
fmov.l &0x0,%fpcr # clear FPCR
or.l %d1,USER_FPSR(%a6) # save INEX2,N
fabs.x %fp0,%fp1 # make a copy of result
fcmp.b %fp1,&0x2 # is |result| >= 2.b?
fbge.w fmul_ovfl_tst # yes; overflow has occurred
# no, it didn't overflow; we have correct result
bra.w fmul_normal_exit
#
# UNDERFLOW:
# - the result of the multiply operation is an underflow.
# - do the multiply to the proper precision and rounding mode in order to
# set the inexact bits.
# - calculate the default result and return it in fp0.
# - if overflow or inexact is enabled, we need a multiply result rounded to
# extended precision. if the original operation was extended, then we have this
# result. if the original operation was single or double, we have to do another
# multiply using extended precision and the correct rounding mode. the result
# of this operation then has its exponent scaled by -0x6000 to create the
# exceptional operand.
#
fmul_unfl:
bset &unfl_bit,FPSR_EXCEPT(%a6) # set unfl exc bit
# for fun, let's use only extended precision, round to zero. then, let
# the unf_res() routine figure out all the rest.
# will we get the correct answer.
fmovm.x FP_SCR1(%a6),&0x80 # load dst operand
fmov.l &rz_mode*0x10,%fpcr # set FPCR
fmov.l &0x0,%fpsr # clear FPSR
fmul.x FP_SCR0(%a6),%fp0 # execute multiply
fmov.l %fpsr,%d1 # save status
fmov.l &0x0,%fpcr # clear FPCR
or.l %d1,USER_FPSR(%a6) # save INEX2,N
mov.b FPCR_ENABLE(%a6),%d1
andi.b &0x0b,%d1 # is UNFL or INEX enabled?
bne.b fmul_unfl_ena # yes
fmul_unfl_dis:
fmovm.x &0x80,FP_SCR0(%a6) # store out result
lea FP_SCR0(%a6),%a0 # pass: result addr
mov.l L_SCR3(%a6),%d1 # pass: rnd prec,mode
bsr.l unf_res # calculate default result
or.b %d0,FPSR_CC(%a6) # unf_res2 may have set 'Z'
fmovm.x FP_SCR0(%a6),&0x80 # return default result in fp0
rts
#
# UNFL is enabled.
#
fmul_unfl_ena:
fmovm.x FP_SCR1(%a6),&0x40 # load dst op
mov.l L_SCR3(%a6),%d1
andi.b &0xc0,%d1 # is precision extended?
bne.b fmul_unfl_ena_sd # no, sgl or dbl
# if the rnd mode is anything but RZ, then we have to re-do the above
# multiplication because we used RZ for all.
fmov.l L_SCR3(%a6),%fpcr # set FPCR
fmul_unfl_ena_cont:
fmov.l &0x0,%fpsr # clear FPSR
fmul.x FP_SCR0(%a6),%fp1 # execute multiply
fmov.l &0x0,%fpcr # clear FPCR
fmovm.x &0x40,FP_SCR0(%a6) # save result to stack
mov.l %d2,-(%sp) # save d2
mov.w FP_SCR0_EX(%a6),%d1 # fetch {sgn,exp}
mov.l %d1,%d2 # make a copy
andi.l &0x7fff,%d1 # strip sign
andi.w &0x8000,%d2 # keep old sign
sub.l %d0,%d1 # add scale factor
addi.l &0x6000,%d1 # add bias
andi.w &0x7fff,%d1
or.w %d2,%d1 # concat old sign,new exp
mov.w %d1,FP_SCR0_EX(%a6) # insert new exponent
mov.l (%sp)+,%d2 # restore d2
fmovm.x FP_SCR0(%a6),&0x40 # return EXOP in fp1
bra.w fmul_unfl_dis
fmul_unfl_ena_sd:
mov.l L_SCR3(%a6),%d1
andi.b &0x30,%d1 # use only rnd mode
fmov.l %d1,%fpcr # set FPCR
bra.b fmul_unfl_ena_cont
# MAY UNDERFLOW:
# -use the correct rounding mode and precision. this code favors operations
# that do not underflow.
fmul_may_unfl:
fmovm.x FP_SCR1(%a6),&0x80 # load dst operand
fmov.l L_SCR3(%a6),%fpcr # set FPCR
fmov.l &0x0,%fpsr # clear FPSR
fmul.x FP_SCR0(%a6),%fp0 # execute multiply
fmov.l %fpsr,%d1 # save status
fmov.l &0x0,%fpcr # clear FPCR
or.l %d1,USER_FPSR(%a6) # save INEX2,N
fabs.x %fp0,%fp1 # make a copy of result
fcmp.b %fp1,&0x2 # is |result| > 2.b?
fbgt.w fmul_normal_exit # no; no underflow occurred
fblt.w fmul_unfl # yes; underflow occurred
#
# we still don't know if underflow occurred. result is ~ equal to 2. but,
# we don't know if the result was an underflow that rounded up to a 2 or
# a normalized number that rounded down to a 2. so, redo the entire operation
# using RZ as the rounding mode to see what the pre-rounded result is.
# this case should be relatively rare.
#
fmovm.x FP_SCR1(%a6),&0x40 # load dst operand
mov.l L_SCR3(%a6),%d1
andi.b &0xc0,%d1 # keep rnd prec
ori.b &rz_mode*0x10,%d1 # insert RZ
fmov.l %d1,%fpcr # set FPCR
fmov.l &0x0,%fpsr # clear FPSR
fmul.x FP_SCR0(%a6),%fp1 # execute multiply
fmov.l &0x0,%fpcr # clear FPCR
fabs.x %fp1 # make absolute value
fcmp.b %fp1,&0x2 # is |result| < 2.b?
fbge.w fmul_normal_exit # no; no underflow occurred
bra.w fmul_unfl # yes, underflow occurred
################################################################################
#
# Multiply: inputs are not both normalized; what are they?
#
fmul_not_norm:
mov.w (tbl_fmul_op.b,%pc,%d1.w*2),%d1
jmp (tbl_fmul_op.b,%pc,%d1.w)
swbeg &48
tbl_fmul_op:
short fmul_norm - tbl_fmul_op # NORM x NORM
short fmul_zero - tbl_fmul_op # NORM x ZERO
short fmul_inf_src - tbl_fmul_op # NORM x INF
short fmul_res_qnan - tbl_fmul_op # NORM x QNAN
short fmul_norm - tbl_fmul_op # NORM x DENORM
short fmul_res_snan - tbl_fmul_op # NORM x SNAN
short tbl_fmul_op - tbl_fmul_op #
short tbl_fmul_op - tbl_fmul_op #
short fmul_zero - tbl_fmul_op # ZERO x NORM
short fmul_zero - tbl_fmul_op # ZERO x ZERO
short fmul_res_operr - tbl_fmul_op # ZERO x INF
short fmul_res_qnan - tbl_fmul_op # ZERO x QNAN
short fmul_zero - tbl_fmul_op # ZERO x DENORM
short fmul_res_snan - tbl_fmul_op # ZERO x SNAN
short tbl_fmul_op - tbl_fmul_op #
short tbl_fmul_op - tbl_fmul_op #
short fmul_inf_dst - tbl_fmul_op # INF x NORM
short fmul_res_operr - tbl_fmul_op # INF x ZERO
short fmul_inf_dst - tbl_fmul_op # INF x INF
short fmul_res_qnan - tbl_fmul_op # INF x QNAN
short fmul_inf_dst - tbl_fmul_op # INF x DENORM
short fmul_res_snan - tbl_fmul_op # INF x SNAN
short tbl_fmul_op - tbl_fmul_op #
short tbl_fmul_op - tbl_fmul_op #
short fmul_res_qnan - tbl_fmul_op # QNAN x NORM
short fmul_res_qnan - tbl_fmul_op # QNAN x ZERO
short fmul_res_qnan - tbl_fmul_op # QNAN x INF
short fmul_res_qnan - tbl_fmul_op # QNAN x QNAN
short fmul_res_qnan - tbl_fmul_op # QNAN x DENORM
short fmul_res_snan - tbl_fmul_op # QNAN x SNAN
short tbl_fmul_op - tbl_fmul_op #
short tbl_fmul_op - tbl_fmul_op #
short fmul_norm - tbl_fmul_op # NORM x NORM
short fmul_zero - tbl_fmul_op # NORM x ZERO
short fmul_inf_src - tbl_fmul_op # NORM x INF
short fmul_res_qnan - tbl_fmul_op # NORM x QNAN
short fmul_norm - tbl_fmul_op # NORM x DENORM
short fmul_res_snan - tbl_fmul_op # NORM x SNAN
short tbl_fmul_op - tbl_fmul_op #
short tbl_fmul_op - tbl_fmul_op #
short fmul_res_snan - tbl_fmul_op # SNAN x NORM
short fmul_res_snan - tbl_fmul_op # SNAN x ZERO
short fmul_res_snan - tbl_fmul_op # SNAN x INF
short fmul_res_snan - tbl_fmul_op # SNAN x QNAN
short fmul_res_snan - tbl_fmul_op # SNAN x DENORM
short fmul_res_snan - tbl_fmul_op # SNAN x SNAN
short tbl_fmul_op - tbl_fmul_op #
short tbl_fmul_op - tbl_fmul_op #
fmul_res_operr:
bra.l res_operr
fmul_res_snan:
bra.l res_snan
fmul_res_qnan:
bra.l res_qnan
#
# Multiply: (Zero x Zero) || (Zero x norm) || (Zero x denorm)
#
global fmul_zero # global for fsglmul
fmul_zero:
mov.b SRC_EX(%a0),%d0 # exclusive or the signs
mov.b DST_EX(%a1),%d1
eor.b %d0,%d1
bpl.b fmul_zero_p # result ZERO is pos.
fmul_zero_n:
fmov.s &0x80000000,%fp0 # load -ZERO
mov.b &z_bmask+neg_bmask,FPSR_CC(%a6) # set Z/N
rts
fmul_zero_p:
fmov.s &0x00000000,%fp0 # load +ZERO
mov.b &z_bmask,FPSR_CC(%a6) # set Z
rts
#
# Multiply: (inf x inf) || (inf x norm) || (inf x denorm)
#
# Note: The j-bit for an infinity is a don't-care. However, to be
# strictly compatible w/ the 68881/882, we make sure to return an
# INF w/ the j-bit set if the input INF j-bit was set. Destination
# INFs take priority.
#
global fmul_inf_dst # global for fsglmul
fmul_inf_dst:
fmovm.x DST(%a1),&0x80 # return INF result in fp0
mov.b SRC_EX(%a0),%d0 # exclusive or the signs
mov.b DST_EX(%a1),%d1
eor.b %d0,%d1
bpl.b fmul_inf_dst_p # result INF is pos.
fmul_inf_dst_n:
fabs.x %fp0 # clear result sign
fneg.x %fp0 # set result sign
mov.b &inf_bmask+neg_bmask,FPSR_CC(%a6) # set INF/N
rts
fmul_inf_dst_p:
fabs.x %fp0 # clear result sign
mov.b &inf_bmask,FPSR_CC(%a6) # set INF
rts
global fmul_inf_src # global for fsglmul
fmul_inf_src:
fmovm.x SRC(%a0),&0x80 # return INF result in fp0
mov.b SRC_EX(%a0),%d0 # exclusive or the signs
mov.b DST_EX(%a1),%d1
eor.b %d0,%d1
bpl.b fmul_inf_dst_p # result INF is pos.
bra.b fmul_inf_dst_n
#########################################################################
# XDEF **************************************************************** #
# fin(): emulates the fmove instruction #
# fsin(): emulates the fsmove instruction #
# fdin(): emulates the fdmove instruction #
# #
# XREF **************************************************************** #
# norm() - normalize mantissa for EXOP on denorm #
# scale_to_zero_src() - scale src exponent to zero #
# ovf_res() - return default overflow result #
# unf_res() - return default underflow result #
# res_qnan_1op() - return QNAN result #
# res_snan_1op() - return SNAN result #
# #
# INPUT *************************************************************** #
# a0 = pointer to extended precision source operand #
# d0 = round prec/mode #
# #
# OUTPUT ************************************************************** #
# fp0 = result #
# fp1 = EXOP (if exception occurred) #
# #
# ALGORITHM *********************************************************** #
# Handle NANs, infinities, and zeroes as special cases. Divide #
# norms into extended, single, and double precision. #
# Norms can be emulated w/ a regular fmove instruction. For #
# sgl/dbl, must scale exponent and perform an "fmove". Check to see #
# if the result would have overflowed/underflowed. If so, use unf_res() #
# or ovf_res() to return the default result. Also return EXOP if #
# exception is enabled. If no exception, return the default result. #
# Unnorms don't pass through here. #
# #
#########################################################################
global fsin
fsin:
andi.b &0x30,%d0 # clear rnd prec
ori.b &s_mode*0x10,%d0 # insert sgl precision
bra.b fin
global fdin
fdin:
andi.b &0x30,%d0 # clear rnd prec
ori.b &d_mode*0x10,%d0 # insert dbl precision
global fin
fin:
mov.l %d0,L_SCR3(%a6) # store rnd info
mov.b STAG(%a6),%d1 # fetch src optype tag
bne.w fin_not_norm # optimize on non-norm input
#
# FP MOVE IN: NORMs and DENORMs ONLY!
#
fin_norm:
andi.b &0xc0,%d0 # is precision extended?
bne.w fin_not_ext # no, so go handle dbl or sgl
#
# precision selected is extended. so...we cannot get an underflow
# or overflow because of rounding to the correct precision. so...
# skip the scaling and unscaling...
#
tst.b SRC_EX(%a0) # is the operand negative?
bpl.b fin_norm_done # no
bset &neg_bit,FPSR_CC(%a6) # yes, so set 'N' ccode bit
fin_norm_done:
fmovm.x SRC(%a0),&0x80 # return result in fp0
rts
#
# for an extended precision DENORM, the UNFL exception bit is set
# the accrued bit is NOT set in this instance(no inexactness!)
#
fin_denorm:
andi.b &0xc0,%d0 # is precision extended?
bne.w fin_not_ext # no, so go handle dbl or sgl
bset &unfl_bit,FPSR_EXCEPT(%a6) # set unfl exc bit
tst.b SRC_EX(%a0) # is the operand negative?
bpl.b fin_denorm_done # no
bset &neg_bit,FPSR_CC(%a6) # yes, so set 'N' ccode bit
fin_denorm_done:
fmovm.x SRC(%a0),&0x80 # return result in fp0
btst &unfl_bit,FPCR_ENABLE(%a6) # is UNFL enabled?
bne.b fin_denorm_unfl_ena # yes
rts
#
# the input is an extended DENORM and underflow is enabled in the FPCR.
# normalize the mantissa and add the bias of 0x6000 to the resulting negative
# exponent and insert back into the operand.
#
fin_denorm_unfl_ena:
mov.w SRC_EX(%a0),FP_SCR0_EX(%a6)
mov.l SRC_HI(%a0),FP_SCR0_HI(%a6)
mov.l SRC_LO(%a0),FP_SCR0_LO(%a6)
lea FP_SCR0(%a6),%a0 # pass: ptr to operand
bsr.l norm # normalize result
neg.w %d0 # new exponent = -(shft val)
addi.w &0x6000,%d0 # add new bias to exponent
mov.w FP_SCR0_EX(%a6),%d1 # fetch old sign,exp
andi.w &0x8000,%d1 # keep old sign
andi.w &0x7fff,%d0 # clear sign position
or.w %d1,%d0 # concat new exo,old sign
mov.w %d0,FP_SCR0_EX(%a6) # insert new exponent
fmovm.x FP_SCR0(%a6),&0x40 # return EXOP in fp1
rts
#
# operand is to be rounded to single or double precision
#
fin_not_ext:
cmpi.b %d0,&s_mode*0x10 # separate sgl/dbl prec
bne.b fin_dbl
#
# operand is to be rounded to single precision
#
fin_sgl:
mov.w SRC_EX(%a0),FP_SCR0_EX(%a6)
mov.l SRC_HI(%a0),FP_SCR0_HI(%a6)
mov.l SRC_LO(%a0),FP_SCR0_LO(%a6)
bsr.l scale_to_zero_src # calculate scale factor
cmpi.l %d0,&0x3fff-0x3f80 # will move in underflow?
bge.w fin_sd_unfl # yes; go handle underflow
cmpi.l %d0,&0x3fff-0x407e # will move in overflow?
beq.w fin_sd_may_ovfl # maybe; go check
blt.w fin_sd_ovfl # yes; go handle overflow
#
# operand will NOT overflow or underflow when moved into the fp reg file
#
fin_sd_normal:
fmov.l &0x0,%fpsr # clear FPSR
fmov.l L_SCR3(%a6),%fpcr # set FPCR
fmov.x FP_SCR0(%a6),%fp0 # perform move
fmov.l %fpsr,%d1 # save FPSR
fmov.l &0x0,%fpcr # clear FPCR
or.l %d1,USER_FPSR(%a6) # save INEX2,N
fin_sd_normal_exit:
mov.l %d2,-(%sp) # save d2
fmovm.x &0x80,FP_SCR0(%a6) # store out result
mov.w FP_SCR0_EX(%a6),%d1 # load {sgn,exp}
mov.w %d1,%d2 # make a copy
andi.l &0x7fff,%d1 # strip sign
sub.l %d0,%d1 # add scale factor
andi.w &0x8000,%d2 # keep old sign
or.w %d1,%d2 # concat old sign,new exponent
mov.w %d2,FP_SCR0_EX(%a6) # insert new exponent
mov.l (%sp)+,%d2 # restore d2
fmovm.x FP_SCR0(%a6),&0x80 # return result in fp0
rts
#
# operand is to be rounded to double precision
#
fin_dbl:
mov.w SRC_EX(%a0),FP_SCR0_EX(%a6)
mov.l SRC_HI(%a0),FP_SCR0_HI(%a6)
mov.l SRC_LO(%a0),FP_SCR0_LO(%a6)
bsr.l scale_to_zero_src # calculate scale factor
cmpi.l %d0,&0x3fff-0x3c00 # will move in underflow?
bge.w fin_sd_unfl # yes; go handle underflow
cmpi.l %d0,&0x3fff-0x43fe # will move in overflow?
beq.w fin_sd_may_ovfl # maybe; go check
blt.w fin_sd_ovfl # yes; go handle overflow
bra.w fin_sd_normal # no; ho handle normalized op
#
# operand WILL underflow when moved in to the fp register file
#
fin_sd_unfl:
bset &unfl_bit,FPSR_EXCEPT(%a6) # set unfl exc bit
tst.b FP_SCR0_EX(%a6) # is operand negative?
bpl.b fin_sd_unfl_tst
bset &neg_bit,FPSR_CC(%a6) # set 'N' ccode bit
# if underflow or inexact is enabled, then go calculate the EXOP first.
fin_sd_unfl_tst:
mov.b FPCR_ENABLE(%a6),%d1
andi.b &0x0b,%d1 # is UNFL or INEX enabled?
bne.b fin_sd_unfl_ena # yes
fin_sd_unfl_dis:
lea FP_SCR0(%a6),%a0 # pass: result addr
mov.l L_SCR3(%a6),%d1 # pass: rnd prec,mode
bsr.l unf_res # calculate default result
or.b %d0,FPSR_CC(%a6) # unf_res may have set 'Z'
fmovm.x FP_SCR0(%a6),&0x80 # return default result in fp0
rts
#
# operand will underflow AND underflow or inexact is enabled.
# Therefore, we must return the result rounded to extended precision.
#
fin_sd_unfl_ena:
mov.l FP_SCR0_HI(%a6),FP_SCR1_HI(%a6)
mov.l FP_SCR0_LO(%a6),FP_SCR1_LO(%a6)
mov.w FP_SCR0_EX(%a6),%d1 # load current exponent
mov.l %d2,-(%sp) # save d2
mov.w %d1,%d2 # make a copy
andi.l &0x7fff,%d1 # strip sign
sub.l %d0,%d1 # subtract scale factor
andi.w &0x8000,%d2 # extract old sign
addi.l &0x6000,%d1 # add new bias
andi.w &0x7fff,%d1
or.w %d1,%d2 # concat old sign,new exp
mov.w %d2,FP_SCR1_EX(%a6) # insert new exponent
fmovm.x FP_SCR1(%a6),&0x40 # return EXOP in fp1
mov.l (%sp)+,%d2 # restore d2
bra.b fin_sd_unfl_dis
#
# operand WILL overflow.
#
fin_sd_ovfl:
fmov.l &0x0,%fpsr # clear FPSR
fmov.l L_SCR3(%a6),%fpcr # set FPCR
fmov.x FP_SCR0(%a6),%fp0 # perform move
fmov.l &0x0,%fpcr # clear FPCR
fmov.l %fpsr,%d1 # save FPSR
or.l %d1,USER_FPSR(%a6) # save INEX2,N
fin_sd_ovfl_tst:
or.l &ovfl_inx_mask,USER_FPSR(%a6) # set ovfl/aovfl/ainex
mov.b FPCR_ENABLE(%a6),%d1
andi.b &0x13,%d1 # is OVFL or INEX enabled?
bne.b fin_sd_ovfl_ena # yes
#
# OVFL is not enabled; therefore, we must create the default result by
# calling ovf_res().
#
fin_sd_ovfl_dis:
btst &neg_bit,FPSR_CC(%a6) # is result negative?
sne %d1 # set sign param accordingly
mov.l L_SCR3(%a6),%d0 # pass: prec,mode
bsr.l ovf_res # calculate default result
or.b %d0,FPSR_CC(%a6) # set INF,N if applicable
fmovm.x (%a0),&0x80 # return default result in fp0
rts
#
# OVFL is enabled.
# the INEX2 bit has already been updated by the round to the correct precision.
# now, round to extended(and don't alter the FPSR).
#
fin_sd_ovfl_ena:
mov.l %d2,-(%sp) # save d2
mov.w FP_SCR0_EX(%a6),%d1 # fetch {sgn,exp}
mov.l %d1,%d2 # make a copy
andi.l &0x7fff,%d1 # strip sign
andi.w &0x8000,%d2 # keep old sign
sub.l %d0,%d1 # add scale factor
sub.l &0x6000,%d1 # subtract bias
andi.w &0x7fff,%d1
or.w %d2,%d1
mov.w %d1,FP_SCR0_EX(%a6) # insert new exponent
mov.l (%sp)+,%d2 # restore d2
fmovm.x FP_SCR0(%a6),&0x40 # return EXOP in fp1
bra.b fin_sd_ovfl_dis
#
# the move in MAY overflow. so...
#
fin_sd_may_ovfl:
fmov.l &0x0,%fpsr # clear FPSR
fmov.l L_SCR3(%a6),%fpcr # set FPCR
fmov.x FP_SCR0(%a6),%fp0 # perform the move
fmov.l %fpsr,%d1 # save status
fmov.l &0x0,%fpcr # clear FPCR
or.l %d1,USER_FPSR(%a6) # save INEX2,N
fabs.x %fp0,%fp1 # make a copy of result
fcmp.b %fp1,&0x2 # is |result| >= 2.b?
fbge.w fin_sd_ovfl_tst # yes; overflow has occurred
# no, it didn't overflow; we have correct result
bra.w fin_sd_normal_exit
##########################################################################
#
# operand is not a NORM: check its optype and branch accordingly
#
fin_not_norm:
cmpi.b %d1,&DENORM # weed out DENORM
beq.w fin_denorm
cmpi.b %d1,&SNAN # weed out SNANs
beq.l res_snan_1op
cmpi.b %d1,&QNAN # weed out QNANs
beq.l res_qnan_1op
#
# do the fmove in; at this point, only possible ops are ZERO and INF.
# use fmov to determine ccodes.
# prec:mode should be zero at this point but it won't affect answer anyways.
#
fmov.x SRC(%a0),%fp0 # do fmove in
fmov.l %fpsr,%d0 # no exceptions possible
rol.l &0x8,%d0 # put ccodes in lo byte
mov.b %d0,FPSR_CC(%a6) # insert correct ccodes
rts
#########################################################################
# XDEF **************************************************************** #
# fdiv(): emulates the fdiv instruction #
# fsdiv(): emulates the fsdiv instruction #
# fddiv(): emulates the fddiv instruction #
# #
# XREF **************************************************************** #
# scale_to_zero_src() - scale src exponent to zero #
# scale_to_zero_dst() - scale dst exponent to zero #
# unf_res() - return default underflow result #
# ovf_res() - return default overflow result #
# res_qnan() - return QNAN result #
# res_snan() - return SNAN result #
# #
# INPUT *************************************************************** #
# a0 = pointer to extended precision source operand #
# a1 = pointer to extended precision destination operand #
# d0 rnd prec,mode #
# #
# OUTPUT ************************************************************** #
# fp0 = result #
# fp1 = EXOP (if exception occurred) #
# #
# ALGORITHM *********************************************************** #
# Handle NANs, infinities, and zeroes as special cases. Divide #
# norms/denorms into ext/sgl/dbl precision. #
# For norms/denorms, scale the exponents such that a divide #
# instruction won't cause an exception. Use the regular fdiv to #
# compute a result. Check if the regular operands would have taken #
# an exception. If so, return the default overflow/underflow result #
# and return the EXOP if exceptions are enabled. Else, scale the #
# result operand to the proper exponent. #
# #
#########################################################################
align 0x10
tbl_fdiv_unfl:
long 0x3fff - 0x0000 # ext_unfl
long 0x3fff - 0x3f81 # sgl_unfl
long 0x3fff - 0x3c01 # dbl_unfl
tbl_fdiv_ovfl:
long 0x3fff - 0x7ffe # ext overflow exponent
long 0x3fff - 0x407e # sgl overflow exponent
long 0x3fff - 0x43fe # dbl overflow exponent
global fsdiv
fsdiv:
andi.b &0x30,%d0 # clear rnd prec
ori.b &s_mode*0x10,%d0 # insert sgl prec
bra.b fdiv
global fddiv
fddiv:
andi.b &0x30,%d0 # clear rnd prec
ori.b &d_mode*0x10,%d0 # insert dbl prec
global fdiv
fdiv:
mov.l %d0,L_SCR3(%a6) # store rnd info
clr.w %d1
mov.b DTAG(%a6),%d1
lsl.b &0x3,%d1
or.b STAG(%a6),%d1 # combine src tags
bne.w fdiv_not_norm # optimize on non-norm input
#
# DIVIDE: NORMs and DENORMs ONLY!
#
fdiv_norm:
mov.w DST_EX(%a1),FP_SCR1_EX(%a6)
mov.l DST_HI(%a1),FP_SCR1_HI(%a6)
mov.l DST_LO(%a1),FP_SCR1_LO(%a6)
mov.w SRC_EX(%a0),FP_SCR0_EX(%a6)
mov.l SRC_HI(%a0),FP_SCR0_HI(%a6)
mov.l SRC_LO(%a0),FP_SCR0_LO(%a6)
bsr.l scale_to_zero_src # scale src exponent
mov.l %d0,-(%sp) # save scale factor 1
bsr.l scale_to_zero_dst # scale dst exponent
neg.l (%sp) # SCALE FACTOR = scale1 - scale2
add.l %d0,(%sp)
mov.w 2+L_SCR3(%a6),%d1 # fetch precision
lsr.b &0x6,%d1 # shift to lo bits
mov.l (%sp)+,%d0 # load S.F.
cmp.l %d0,(tbl_fdiv_ovfl.b,%pc,%d1.w*4) # will result overflow?
ble.w fdiv_may_ovfl # result will overflow
cmp.l %d0,(tbl_fdiv_unfl.w,%pc,%d1.w*4) # will result underflow?
beq.w fdiv_may_unfl # maybe
bgt.w fdiv_unfl # yes; go handle underflow
fdiv_normal:
fmovm.x FP_SCR1(%a6),&0x80 # load dst op
fmov.l L_SCR3(%a6),%fpcr # save FPCR
fmov.l &0x0,%fpsr # clear FPSR
fdiv.x FP_SCR0(%a6),%fp0 # perform divide
fmov.l %fpsr,%d1 # save FPSR
fmov.l &0x0,%fpcr # clear FPCR
or.l %d1,USER_FPSR(%a6) # save INEX2,N
fdiv_normal_exit:
fmovm.x &0x80,FP_SCR0(%a6) # store result on stack
mov.l %d2,-(%sp) # store d2
mov.w FP_SCR0_EX(%a6),%d1 # load {sgn,exp}
mov.l %d1,%d2 # make a copy
andi.l &0x7fff,%d1 # strip sign
andi.w &0x8000,%d2 # keep old sign
sub.l %d0,%d1 # add scale factor
or.w %d2,%d1 # concat old sign,new exp
mov.w %d1,FP_SCR0_EX(%a6) # insert new exponent
mov.l (%sp)+,%d2 # restore d2
fmovm.x FP_SCR0(%a6),&0x80 # return result in fp0
rts
tbl_fdiv_ovfl2:
long 0x7fff
long 0x407f
long 0x43ff
fdiv_no_ovfl:
mov.l (%sp)+,%d0 # restore scale factor
bra.b fdiv_normal_exit
fdiv_may_ovfl:
mov.l %d0,-(%sp) # save scale factor
fmovm.x FP_SCR1(%a6),&0x80 # load dst op
fmov.l L_SCR3(%a6),%fpcr # set FPCR
fmov.l &0x0,%fpsr # set FPSR
fdiv.x FP_SCR0(%a6),%fp0 # execute divide
fmov.l %fpsr,%d0
fmov.l &0x0,%fpcr
or.l %d0,USER_FPSR(%a6) # save INEX,N
fmovm.x &0x01,-(%sp) # save result to stack
mov.w (%sp),%d0 # fetch new exponent
add.l &0xc,%sp # clear result from stack
andi.l &0x7fff,%d0 # strip sign
sub.l (%sp),%d0 # add scale factor
cmp.l %d0,(tbl_fdiv_ovfl2.b,%pc,%d1.w*4)
blt.b fdiv_no_ovfl
mov.l (%sp)+,%d0
fdiv_ovfl_tst:
or.l &ovfl_inx_mask,USER_FPSR(%a6) # set ovfl/aovfl/ainex
mov.b FPCR_ENABLE(%a6),%d1
andi.b &0x13,%d1 # is OVFL or INEX enabled?
bne.b fdiv_ovfl_ena # yes
fdiv_ovfl_dis:
btst &neg_bit,FPSR_CC(%a6) # is result negative?
sne %d1 # set sign param accordingly
mov.l L_SCR3(%a6),%d0 # pass prec:rnd
bsr.l ovf_res # calculate default result
or.b %d0,FPSR_CC(%a6) # set INF if applicable
fmovm.x (%a0),&0x80 # return default result in fp0
rts
fdiv_ovfl_ena:
mov.l L_SCR3(%a6),%d1
andi.b &0xc0,%d1 # is precision extended?
bne.b fdiv_ovfl_ena_sd # no, do sgl or dbl
fdiv_ovfl_ena_cont:
fmovm.x &0x80,FP_SCR0(%a6) # move result to stack
mov.l %d2,-(%sp) # save d2
mov.w FP_SCR0_EX(%a6),%d1 # fetch {sgn,exp}
mov.w %d1,%d2 # make a copy
andi.l &0x7fff,%d1 # strip sign
sub.l %d0,%d1 # add scale factor
subi.l &0x6000,%d1 # subtract bias
andi.w &0x7fff,%d1 # clear sign bit
andi.w &0x8000,%d2 # keep old sign
or.w %d2,%d1 # concat old sign,new exp
mov.w %d1,FP_SCR0_EX(%a6) # insert new exponent
mov.l (%sp)+,%d2 # restore d2
fmovm.x FP_SCR0(%a6),&0x40 # return EXOP in fp1
bra.b fdiv_ovfl_dis
fdiv_ovfl_ena_sd:
fmovm.x FP_SCR1(%a6),&0x80 # load dst operand
mov.l L_SCR3(%a6),%d1
andi.b &0x30,%d1 # keep rnd mode
fmov.l %d1,%fpcr # set FPCR
fdiv.x FP_SCR0(%a6),%fp0 # execute divide
fmov.l &0x0,%fpcr # clear FPCR
bra.b fdiv_ovfl_ena_cont
fdiv_unfl:
bset &unfl_bit,FPSR_EXCEPT(%a6) # set unfl exc bit
fmovm.x FP_SCR1(%a6),&0x80 # load dst op
fmov.l &rz_mode*0x10,%fpcr # set FPCR
fmov.l &0x0,%fpsr # clear FPSR
fdiv.x FP_SCR0(%a6),%fp0 # execute divide
fmov.l %fpsr,%d1 # save status
fmov.l &0x0,%fpcr # clear FPCR
or.l %d1,USER_FPSR(%a6) # save INEX2,N
mov.b FPCR_ENABLE(%a6),%d1
andi.b &0x0b,%d1 # is UNFL or INEX enabled?
bne.b fdiv_unfl_ena # yes
fdiv_unfl_dis:
fmovm.x &0x80,FP_SCR0(%a6) # store out result
lea FP_SCR0(%a6),%a0 # pass: result addr
mov.l L_SCR3(%a6),%d1 # pass: rnd prec,mode
bsr.l unf_res # calculate default result
or.b %d0,FPSR_CC(%a6) # 'Z' may have been set
fmovm.x FP_SCR0(%a6),&0x80 # return default result in fp0
rts
#
# UNFL is enabled.
#
fdiv_unfl_ena:
fmovm.x FP_SCR1(%a6),&0x40 # load dst op
mov.l L_SCR3(%a6),%d1
andi.b &0xc0,%d1 # is precision extended?
bne.b fdiv_unfl_ena_sd # no, sgl or dbl
fmov.l L_SCR3(%a6),%fpcr # set FPCR
fdiv_unfl_ena_cont:
fmov.l &0x0,%fpsr # clear FPSR
fdiv.x FP_SCR0(%a6),%fp1 # execute divide
fmov.l &0x0,%fpcr # clear FPCR
fmovm.x &0x40,FP_SCR0(%a6) # save result to stack
mov.l %d2,-(%sp) # save d2
mov.w FP_SCR0_EX(%a6),%d1 # fetch {sgn,exp}
mov.l %d1,%d2 # make a copy
andi.l &0x7fff,%d1 # strip sign
andi.w &0x8000,%d2 # keep old sign
sub.l %d0,%d1 # add scale factoer
addi.l &0x6000,%d1 # add bias
andi.w &0x7fff,%d1
or.w %d2,%d1 # concat old sign,new exp
mov.w %d1,FP_SCR0_EX(%a6) # insert new exp
mov.l (%sp)+,%d2 # restore d2
fmovm.x FP_SCR0(%a6),&0x40 # return EXOP in fp1
bra.w fdiv_unfl_dis
fdiv_unfl_ena_sd:
mov.l L_SCR3(%a6),%d1
andi.b &0x30,%d1 # use only rnd mode
fmov.l %d1,%fpcr # set FPCR
bra.b fdiv_unfl_ena_cont
#
# the divide operation MAY underflow:
#
fdiv_may_unfl:
fmovm.x FP_SCR1(%a6),&0x80 # load dst op
fmov.l L_SCR3(%a6),%fpcr # set FPCR
fmov.l &0x0,%fpsr # clear FPSR
fdiv.x FP_SCR0(%a6),%fp0 # execute divide
fmov.l %fpsr,%d1 # save status
fmov.l &0x0,%fpcr # clear FPCR
or.l %d1,USER_FPSR(%a6) # save INEX2,N
fabs.x %fp0,%fp1 # make a copy of result
fcmp.b %fp1,&0x1 # is |result| > 1.b?
fbgt.w fdiv_normal_exit # no; no underflow occurred
fblt.w fdiv_unfl # yes; underflow occurred
#
# we still don't know if underflow occurred. result is ~ equal to 1. but,
# we don't know if the result was an underflow that rounded up to a 1
# or a normalized number that rounded down to a 1. so, redo the entire
# operation using RZ as the rounding mode to see what the pre-rounded
# result is. this case should be relatively rare.
#
fmovm.x FP_SCR1(%a6),&0x40 # load dst op into fp1
mov.l L_SCR3(%a6),%d1
andi.b &0xc0,%d1 # keep rnd prec
ori.b &rz_mode*0x10,%d1 # insert RZ
fmov.l %d1,%fpcr # set FPCR
fmov.l &0x0,%fpsr # clear FPSR
fdiv.x FP_SCR0(%a6),%fp1 # execute divide
fmov.l &0x0,%fpcr # clear FPCR
fabs.x %fp1 # make absolute value
fcmp.b %fp1,&0x1 # is |result| < 1.b?
fbge.w fdiv_normal_exit # no; no underflow occurred
bra.w fdiv_unfl # yes; underflow occurred
############################################################################
#
# Divide: inputs are not both normalized; what are they?
#
fdiv_not_norm:
mov.w (tbl_fdiv_op.b,%pc,%d1.w*2),%d1
jmp (tbl_fdiv_op.b,%pc,%d1.w*1)
swbeg &48
tbl_fdiv_op:
short fdiv_norm - tbl_fdiv_op # NORM / NORM
short fdiv_inf_load - tbl_fdiv_op # NORM / ZERO
short fdiv_zero_load - tbl_fdiv_op # NORM / INF
short fdiv_res_qnan - tbl_fdiv_op # NORM / QNAN
short fdiv_norm - tbl_fdiv_op # NORM / DENORM
short fdiv_res_snan - tbl_fdiv_op # NORM / SNAN
short tbl_fdiv_op - tbl_fdiv_op #
short tbl_fdiv_op - tbl_fdiv_op #
short fdiv_zero_load - tbl_fdiv_op # ZERO / NORM
short fdiv_res_operr - tbl_fdiv_op # ZERO / ZERO
short fdiv_zero_load - tbl_fdiv_op # ZERO / INF
short fdiv_res_qnan - tbl_fdiv_op # ZERO / QNAN
short fdiv_zero_load - tbl_fdiv_op # ZERO / DENORM
short fdiv_res_snan - tbl_fdiv_op # ZERO / SNAN
short tbl_fdiv_op - tbl_fdiv_op #
short tbl_fdiv_op - tbl_fdiv_op #
short fdiv_inf_dst - tbl_fdiv_op # INF / NORM
short fdiv_inf_dst - tbl_fdiv_op # INF / ZERO
short fdiv_res_operr - tbl_fdiv_op # INF / INF
short fdiv_res_qnan - tbl_fdiv_op # INF / QNAN
short fdiv_inf_dst - tbl_fdiv_op # INF / DENORM
short fdiv_res_snan - tbl_fdiv_op # INF / SNAN
short tbl_fdiv_op - tbl_fdiv_op #
short tbl_fdiv_op - tbl_fdiv_op #
short fdiv_res_qnan - tbl_fdiv_op # QNAN / NORM
short fdiv_res_qnan - tbl_fdiv_op # QNAN / ZERO
short fdiv_res_qnan - tbl_fdiv_op # QNAN / INF
short fdiv_res_qnan - tbl_fdiv_op # QNAN / QNAN
short fdiv_res_qnan - tbl_fdiv_op # QNAN / DENORM
short fdiv_res_snan - tbl_fdiv_op # QNAN / SNAN
short tbl_fdiv_op - tbl_fdiv_op #
short tbl_fdiv_op - tbl_fdiv_op #
short fdiv_norm - tbl_fdiv_op # DENORM / NORM
short fdiv_inf_load - tbl_fdiv_op # DENORM / ZERO
short fdiv_zero_load - tbl_fdiv_op # DENORM / INF
short fdiv_res_qnan - tbl_fdiv_op # DENORM / QNAN
short fdiv_norm - tbl_fdiv_op # DENORM / DENORM
short fdiv_res_snan - tbl_fdiv_op # DENORM / SNAN
short tbl_fdiv_op - tbl_fdiv_op #
short tbl_fdiv_op - tbl_fdiv_op #
short fdiv_res_snan - tbl_fdiv_op # SNAN / NORM
short fdiv_res_snan - tbl_fdiv_op # SNAN / ZERO
short fdiv_res_snan - tbl_fdiv_op # SNAN / INF
short fdiv_res_snan - tbl_fdiv_op # SNAN / QNAN
short fdiv_res_snan - tbl_fdiv_op # SNAN / DENORM
short fdiv_res_snan - tbl_fdiv_op # SNAN / SNAN
short tbl_fdiv_op - tbl_fdiv_op #
short tbl_fdiv_op - tbl_fdiv_op #
fdiv_res_qnan:
bra.l res_qnan
fdiv_res_snan:
bra.l res_snan
fdiv_res_operr:
bra.l res_operr
global fdiv_zero_load # global for fsgldiv
fdiv_zero_load:
mov.b SRC_EX(%a0),%d0 # result sign is exclusive
mov.b DST_EX(%a1),%d1 # or of input signs.
eor.b %d0,%d1
bpl.b fdiv_zero_load_p # result is positive
fmov.s &0x80000000,%fp0 # load a -ZERO
mov.b &z_bmask+neg_bmask,FPSR_CC(%a6) # set Z/N
rts
fdiv_zero_load_p:
fmov.s &0x00000000,%fp0 # load a +ZERO
mov.b &z_bmask,FPSR_CC(%a6) # set Z
rts
#
# The destination was In Range and the source was a ZERO. The result,
# Therefore, is an INF w/ the proper sign.
# So, determine the sign and return a new INF (w/ the j-bit cleared).
#
global fdiv_inf_load # global for fsgldiv
fdiv_inf_load:
ori.w &dz_mask+adz_mask,2+USER_FPSR(%a6) # no; set DZ/ADZ
mov.b SRC_EX(%a0),%d0 # load both signs
mov.b DST_EX(%a1),%d1
eor.b %d0,%d1
bpl.b fdiv_inf_load_p # result is positive
fmov.s &0xff800000,%fp0 # make result -INF
mov.b &inf_bmask+neg_bmask,FPSR_CC(%a6) # set INF/N
rts
fdiv_inf_load_p:
fmov.s &0x7f800000,%fp0 # make result +INF
mov.b &inf_bmask,FPSR_CC(%a6) # set INF
rts
#
# The destination was an INF w/ an In Range or ZERO source, the result is
# an INF w/ the proper sign.
# The 68881/882 returns the destination INF w/ the new sign(if the j-bit of the
# dst INF is set, then then j-bit of the result INF is also set).
#
global fdiv_inf_dst # global for fsgldiv
fdiv_inf_dst:
mov.b DST_EX(%a1),%d0 # load both signs
mov.b SRC_EX(%a0),%d1
eor.b %d0,%d1
bpl.b fdiv_inf_dst_p # result is positive
fmovm.x DST(%a1),&0x80 # return result in fp0
fabs.x %fp0 # clear sign bit
fneg.x %fp0 # set sign bit
mov.b &inf_bmask+neg_bmask,FPSR_CC(%a6) # set INF/NEG
rts
fdiv_inf_dst_p:
fmovm.x DST(%a1),&0x80 # return result in fp0
fabs.x %fp0 # return positive INF
mov.b &inf_bmask,FPSR_CC(%a6) # set INF
rts
#########################################################################
# XDEF **************************************************************** #
# fneg(): emulates the fneg instruction #
# fsneg(): emulates the fsneg instruction #
# fdneg(): emulates the fdneg instruction #
# #
# XREF **************************************************************** #
# norm() - normalize a denorm to provide EXOP #
# scale_to_zero_src() - scale sgl/dbl source exponent #
# ovf_res() - return default overflow result #
# unf_res() - return default underflow result #
# res_qnan_1op() - return QNAN result #
# res_snan_1op() - return SNAN result #
# #
# INPUT *************************************************************** #
# a0 = pointer to extended precision source operand #
# d0 = rnd prec,mode #
# #
# OUTPUT ************************************************************** #
# fp0 = result #
# fp1 = EXOP (if exception occurred) #
# #
# ALGORITHM *********************************************************** #
# Handle NANs, zeroes, and infinities as special cases. Separate #
# norms/denorms into ext/sgl/dbl precisions. Extended precision can be #
# emulated by simply setting sign bit. Sgl/dbl operands must be scaled #
# and an actual fneg performed to see if overflow/underflow would have #
# occurred. If so, return default underflow/overflow result. Else, #
# scale the result exponent and return result. FPSR gets set based on #
# the result value. #
# #
#########################################################################
global fsneg
fsneg:
andi.b &0x30,%d0 # clear rnd prec
ori.b &s_mode*0x10,%d0 # insert sgl precision
bra.b fneg
global fdneg
fdneg:
andi.b &0x30,%d0 # clear rnd prec
ori.b &d_mode*0x10,%d0 # insert dbl prec
global fneg
fneg:
mov.l %d0,L_SCR3(%a6) # store rnd info
mov.b STAG(%a6),%d1
bne.w fneg_not_norm # optimize on non-norm input
#
# NEGATE SIGN : norms and denorms ONLY!
#
fneg_norm:
andi.b &0xc0,%d0 # is precision extended?
bne.w fneg_not_ext # no; go handle sgl or dbl
#
# precision selected is extended. so...we can not get an underflow
# or overflow because of rounding to the correct precision. so...
# skip the scaling and unscaling...
#
mov.l SRC_HI(%a0),FP_SCR0_HI(%a6)
mov.l SRC_LO(%a0),FP_SCR0_LO(%a6)
mov.w SRC_EX(%a0),%d0
eori.w &0x8000,%d0 # negate sign
bpl.b fneg_norm_load # sign is positive
mov.b &neg_bmask,FPSR_CC(%a6) # set 'N' ccode bit
fneg_norm_load:
mov.w %d0,FP_SCR0_EX(%a6)
fmovm.x FP_SCR0(%a6),&0x80 # return result in fp0
rts
#
# for an extended precision DENORM, the UNFL exception bit is set
# the accrued bit is NOT set in this instance(no inexactness!)
#
fneg_denorm:
andi.b &0xc0,%d0 # is precision extended?
bne.b fneg_not_ext # no; go handle sgl or dbl
bset &unfl_bit,FPSR_EXCEPT(%a6) # set unfl exc bit
mov.l SRC_HI(%a0),FP_SCR0_HI(%a6)
mov.l SRC_LO(%a0),FP_SCR0_LO(%a6)
mov.w SRC_EX(%a0),%d0
eori.w &0x8000,%d0 # negate sign
bpl.b fneg_denorm_done # no
mov.b &neg_bmask,FPSR_CC(%a6) # yes, set 'N' ccode bit
fneg_denorm_done:
mov.w %d0,FP_SCR0_EX(%a6)
fmovm.x FP_SCR0(%a6),&0x80 # return default result in fp0
btst &unfl_bit,FPCR_ENABLE(%a6) # is UNFL enabled?
bne.b fneg_ext_unfl_ena # yes
rts
#
# the input is an extended DENORM and underflow is enabled in the FPCR.
# normalize the mantissa and add the bias of 0x6000 to the resulting negative
# exponent and insert back into the operand.
#
fneg_ext_unfl_ena:
lea FP_SCR0(%a6),%a0 # pass: ptr to operand
bsr.l norm # normalize result
neg.w %d0 # new exponent = -(shft val)
addi.w &0x6000,%d0 # add new bias to exponent
mov.w FP_SCR0_EX(%a6),%d1 # fetch old sign,exp
andi.w &0x8000,%d1 # keep old sign
andi.w &0x7fff,%d0 # clear sign position
or.w %d1,%d0 # concat old sign, new exponent
mov.w %d0,FP_SCR0_EX(%a6) # insert new exponent
fmovm.x FP_SCR0(%a6),&0x40 # return EXOP in fp1
rts
#
# operand is either single or double
#
fneg_not_ext:
cmpi.b %d0,&s_mode*0x10 # separate sgl/dbl prec
bne.b fneg_dbl
#
# operand is to be rounded to single precision
#
fneg_sgl:
mov.w SRC_EX(%a0),FP_SCR0_EX(%a6)
mov.l SRC_HI(%a0),FP_SCR0_HI(%a6)
mov.l SRC_LO(%a0),FP_SCR0_LO(%a6)
bsr.l scale_to_zero_src # calculate scale factor
cmpi.l %d0,&0x3fff-0x3f80 # will move in underflow?
bge.w fneg_sd_unfl # yes; go handle underflow
cmpi.l %d0,&0x3fff-0x407e # will move in overflow?
beq.w fneg_sd_may_ovfl # maybe; go check
blt.w fneg_sd_ovfl # yes; go handle overflow
#
# operand will NOT overflow or underflow when moved in to the fp reg file
#
fneg_sd_normal:
fmov.l &0x0,%fpsr # clear FPSR
fmov.l L_SCR3(%a6),%fpcr # set FPCR
fneg.x FP_SCR0(%a6),%fp0 # perform negation
fmov.l %fpsr,%d1 # save FPSR
fmov.l &0x0,%fpcr # clear FPCR
or.l %d1,USER_FPSR(%a6) # save INEX2,N
fneg_sd_normal_exit:
mov.l %d2,-(%sp) # save d2
fmovm.x &0x80,FP_SCR0(%a6) # store out result
mov.w FP_SCR0_EX(%a6),%d1 # load sgn,exp
mov.w %d1,%d2 # make a copy
andi.l &0x7fff,%d1 # strip sign
sub.l %d0,%d1 # add scale factor
andi.w &0x8000,%d2 # keep old sign
or.w %d1,%d2 # concat old sign,new exp
mov.w %d2,FP_SCR0_EX(%a6) # insert new exponent
mov.l (%sp)+,%d2 # restore d2
fmovm.x FP_SCR0(%a6),&0x80 # return result in fp0
rts
#
# operand is to be rounded to double precision
#
fneg_dbl:
mov.w SRC_EX(%a0),FP_SCR0_EX(%a6)
mov.l SRC_HI(%a0),FP_SCR0_HI(%a6)
mov.l SRC_LO(%a0),FP_SCR0_LO(%a6)
bsr.l scale_to_zero_src # calculate scale factor
cmpi.l %d0,&0x3fff-0x3c00 # will move in underflow?
bge.b fneg_sd_unfl # yes; go handle underflow
cmpi.l %d0,&0x3fff-0x43fe # will move in overflow?
beq.w fneg_sd_may_ovfl # maybe; go check
blt.w fneg_sd_ovfl # yes; go handle overflow
bra.w fneg_sd_normal # no; ho handle normalized op
#
# operand WILL underflow when moved in to the fp register file
#
fneg_sd_unfl:
bset &unfl_bit,FPSR_EXCEPT(%a6) # set unfl exc bit
eori.b &0x80,FP_SCR0_EX(%a6) # negate sign
bpl.b fneg_sd_unfl_tst
bset &neg_bit,FPSR_CC(%a6) # set 'N' ccode bit
# if underflow or inexact is enabled, go calculate EXOP first.
fneg_sd_unfl_tst:
mov.b FPCR_ENABLE(%a6),%d1
andi.b &0x0b,%d1 # is UNFL or INEX enabled?
bne.b fneg_sd_unfl_ena # yes
fneg_sd_unfl_dis:
lea FP_SCR0(%a6),%a0 # pass: result addr
mov.l L_SCR3(%a6),%d1 # pass: rnd prec,mode
bsr.l unf_res # calculate default result
or.b %d0,FPSR_CC(%a6) # unf_res may have set 'Z'
fmovm.x FP_SCR0(%a6),&0x80 # return default result in fp0
rts
#
# operand will underflow AND underflow is enabled.
# Therefore, we must return the result rounded to extended precision.
#
fneg_sd_unfl_ena:
mov.l FP_SCR0_HI(%a6),FP_SCR1_HI(%a6)
mov.l FP_SCR0_LO(%a6),FP_SCR1_LO(%a6)
mov.w FP_SCR0_EX(%a6),%d1 # load current exponent
mov.l %d2,-(%sp) # save d2
mov.l %d1,%d2 # make a copy
andi.l &0x7fff,%d1 # strip sign
andi.w &0x8000,%d2 # keep old sign
sub.l %d0,%d1 # subtract scale factor
addi.l &0x6000,%d1 # add new bias
andi.w &0x7fff,%d1
or.w %d2,%d1 # concat new sign,new exp
mov.w %d1,FP_SCR1_EX(%a6) # insert new exp
fmovm.x FP_SCR1(%a6),&0x40 # return EXOP in fp1
mov.l (%sp)+,%d2 # restore d2
bra.b fneg_sd_unfl_dis
#
# operand WILL overflow.
#
fneg_sd_ovfl:
fmov.l &0x0,%fpsr # clear FPSR
fmov.l L_SCR3(%a6),%fpcr # set FPCR
fneg.x FP_SCR0(%a6),%fp0 # perform negation
fmov.l &0x0,%fpcr # clear FPCR
fmov.l %fpsr,%d1 # save FPSR
or.l %d1,USER_FPSR(%a6) # save INEX2,N
fneg_sd_ovfl_tst:
or.l &ovfl_inx_mask,USER_FPSR(%a6) # set ovfl/aovfl/ainex
mov.b FPCR_ENABLE(%a6),%d1
andi.b &0x13,%d1 # is OVFL or INEX enabled?
bne.b fneg_sd_ovfl_ena # yes
#
# OVFL is not enabled; therefore, we must create the default result by
# calling ovf_res().
#
fneg_sd_ovfl_dis:
btst &neg_bit,FPSR_CC(%a6) # is result negative?
sne %d1 # set sign param accordingly
mov.l L_SCR3(%a6),%d0 # pass: prec,mode
bsr.l ovf_res # calculate default result
or.b %d0,FPSR_CC(%a6) # set INF,N if applicable
fmovm.x (%a0),&0x80 # return default result in fp0
rts
#
# OVFL is enabled.
# the INEX2 bit has already been updated by the round to the correct precision.
# now, round to extended(and don't alter the FPSR).
#
fneg_sd_ovfl_ena:
mov.l %d2,-(%sp) # save d2
mov.w FP_SCR0_EX(%a6),%d1 # fetch {sgn,exp}
mov.l %d1,%d2 # make a copy
andi.l &0x7fff,%d1 # strip sign
andi.w &0x8000,%d2 # keep old sign
sub.l %d0,%d1 # add scale factor
subi.l &0x6000,%d1 # subtract bias
andi.w &0x7fff,%d1
or.w %d2,%d1 # concat sign,exp
mov.w %d1,FP_SCR0_EX(%a6) # insert new exponent
fmovm.x FP_SCR0(%a6),&0x40 # return EXOP in fp1
mov.l (%sp)+,%d2 # restore d2
bra.b fneg_sd_ovfl_dis
#
# the move in MAY underflow. so...
#
fneg_sd_may_ovfl:
fmov.l &0x0,%fpsr # clear FPSR
fmov.l L_SCR3(%a6),%fpcr # set FPCR
fneg.x FP_SCR0(%a6),%fp0 # perform negation
fmov.l %fpsr,%d1 # save status
fmov.l &0x0,%fpcr # clear FPCR
or.l %d1,USER_FPSR(%a6) # save INEX2,N
fabs.x %fp0,%fp1 # make a copy of result
fcmp.b %fp1,&0x2 # is |result| >= 2.b?
fbge.w fneg_sd_ovfl_tst # yes; overflow has occurred
# no, it didn't overflow; we have correct result
bra.w fneg_sd_normal_exit
##########################################################################
#
# input is not normalized; what is it?
#
fneg_not_norm:
cmpi.b %d1,&DENORM # weed out DENORM
beq.w fneg_denorm
cmpi.b %d1,&SNAN # weed out SNAN
beq.l res_snan_1op
cmpi.b %d1,&QNAN # weed out QNAN
beq.l res_qnan_1op
#
# do the fneg; at this point, only possible ops are ZERO and INF.
# use fneg to determine ccodes.
# prec:mode should be zero at this point but it won't affect answer anyways.
#
fneg.x SRC_EX(%a0),%fp0 # do fneg
fmov.l %fpsr,%d0
rol.l &0x8,%d0 # put ccodes in lo byte
mov.b %d0,FPSR_CC(%a6) # insert correct ccodes
rts
#########################################################################
# XDEF **************************************************************** #
# ftst(): emulates the ftest instruction #
# #
# XREF **************************************************************** #
# res{s,q}nan_1op() - set NAN result for monadic instruction #
# #
# INPUT *************************************************************** #
# a0 = pointer to extended precision source operand #
# #
# OUTPUT ************************************************************** #
# none #
# #
# ALGORITHM *********************************************************** #
# Check the source operand tag (STAG) and set the FPCR according #
# to the operand type and sign. #
# #
#########################################################################
global ftst
ftst:
mov.b STAG(%a6),%d1
bne.b ftst_not_norm # optimize on non-norm input
#
# Norm:
#
ftst_norm:
tst.b SRC_EX(%a0) # is operand negative?
bmi.b ftst_norm_m # yes
rts
ftst_norm_m:
mov.b &neg_bmask,FPSR_CC(%a6) # set 'N' ccode bit
rts
#
# input is not normalized; what is it?
#
ftst_not_norm:
cmpi.b %d1,&ZERO # weed out ZERO
beq.b ftst_zero
cmpi.b %d1,&INF # weed out INF
beq.b ftst_inf
cmpi.b %d1,&SNAN # weed out SNAN
beq.l res_snan_1op
cmpi.b %d1,&QNAN # weed out QNAN
beq.l res_qnan_1op
#
# Denorm:
#
ftst_denorm:
tst.b SRC_EX(%a0) # is operand negative?
bmi.b ftst_denorm_m # yes
rts
ftst_denorm_m:
mov.b &neg_bmask,FPSR_CC(%a6) # set 'N' ccode bit
rts
#
# Infinity:
#
ftst_inf:
tst.b SRC_EX(%a0) # is operand negative?
bmi.b ftst_inf_m # yes
ftst_inf_p:
mov.b &inf_bmask,FPSR_CC(%a6) # set 'I' ccode bit
rts
ftst_inf_m:
mov.b &inf_bmask+neg_bmask,FPSR_CC(%a6) # set 'I','N' ccode bits
rts
#
# Zero:
#
ftst_zero:
tst.b SRC_EX(%a0) # is operand negative?
bmi.b ftst_zero_m # yes
ftst_zero_p:
mov.b &z_bmask,FPSR_CC(%a6) # set 'N' ccode bit
rts
ftst_zero_m:
mov.b &z_bmask+neg_bmask,FPSR_CC(%a6) # set 'Z','N' ccode bits
rts
#########################################################################
# XDEF **************************************************************** #
# fint(): emulates the fint instruction #
# #
# XREF **************************************************************** #
# res_{s,q}nan_1op() - set NAN result for monadic operation #
# #
# INPUT *************************************************************** #
# a0 = pointer to extended precision source operand #
# d0 = round precision/mode #
# #
# OUTPUT ************************************************************** #
# fp0 = result #
# #
# ALGORITHM *********************************************************** #
# Separate according to operand type. Unnorms don't pass through #
# here. For norms, load the rounding mode/prec, execute a "fint", then #
# store the resulting FPSR bits. #
# For denorms, force the j-bit to a one and do the same as for #
# norms. Denorms are so low that the answer will either be a zero or a #
# one. #
# For zeroes/infs/NANs, return the same while setting the FPSR #
# as appropriate. #
# #
#########################################################################
global fint
fint:
mov.b STAG(%a6),%d1
bne.b fint_not_norm # optimize on non-norm input
#
# Norm:
#
fint_norm:
andi.b &0x30,%d0 # set prec = ext
fmov.l %d0,%fpcr # set FPCR
fmov.l &0x0,%fpsr # clear FPSR
fint.x SRC(%a0),%fp0 # execute fint
fmov.l &0x0,%fpcr # clear FPCR
fmov.l %fpsr,%d0 # save FPSR
or.l %d0,USER_FPSR(%a6) # set exception bits
rts
#
# input is not normalized; what is it?
#
fint_not_norm:
cmpi.b %d1,&ZERO # weed out ZERO
beq.b fint_zero
cmpi.b %d1,&INF # weed out INF
beq.b fint_inf
cmpi.b %d1,&DENORM # weed out DENORM
beq.b fint_denorm
cmpi.b %d1,&SNAN # weed out SNAN
beq.l res_snan_1op
bra.l res_qnan_1op # weed out QNAN
#
# Denorm:
#
# for DENORMs, the result will be either (+/-)ZERO or (+/-)1.
# also, the INEX2 and AINEX exception bits will be set.
# so, we could either set these manually or force the DENORM
# to a very small NORM and ship it to the NORM routine.
# I do the latter.
#
fint_denorm:
mov.w SRC_EX(%a0),FP_SCR0_EX(%a6) # copy sign, zero exp
mov.b &0x80,FP_SCR0_HI(%a6) # force DENORM ==> small NORM
lea FP_SCR0(%a6),%a0
bra.b fint_norm
#
# Zero:
#
fint_zero:
tst.b SRC_EX(%a0) # is ZERO negative?
bmi.b fint_zero_m # yes
fint_zero_p:
fmov.s &0x00000000,%fp0 # return +ZERO in fp0
mov.b &z_bmask,FPSR_CC(%a6) # set 'Z' ccode bit
rts
fint_zero_m:
fmov.s &0x80000000,%fp0 # return -ZERO in fp0
mov.b &z_bmask+neg_bmask,FPSR_CC(%a6) # set 'Z','N' ccode bits
rts
#
# Infinity:
#
fint_inf:
fmovm.x SRC(%a0),&0x80 # return result in fp0
tst.b SRC_EX(%a0) # is INF negative?
bmi.b fint_inf_m # yes
fint_inf_p:
mov.b &inf_bmask,FPSR_CC(%a6) # set 'I' ccode bit
rts
fint_inf_m:
mov.b &inf_bmask+neg_bmask,FPSR_CC(%a6) # set 'N','I' ccode bits
rts
#########################################################################
# XDEF **************************************************************** #
# fintrz(): emulates the fintrz instruction #
# #
# XREF **************************************************************** #
# res_{s,q}nan_1op() - set NAN result for monadic operation #
# #
# INPUT *************************************************************** #
# a0 = pointer to extended precision source operand #
# d0 = round precision/mode #
# #
# OUTPUT ************************************************************** #
# fp0 = result #
# #
# ALGORITHM *********************************************************** #
# Separate according to operand type. Unnorms don't pass through #
# here. For norms, load the rounding mode/prec, execute a "fintrz", #
# then store the resulting FPSR bits. #
# For denorms, force the j-bit to a one and do the same as for #
# norms. Denorms are so low that the answer will either be a zero or a #
# one. #
# For zeroes/infs/NANs, return the same while setting the FPSR #
# as appropriate. #
# #
#########################################################################
global fintrz
fintrz:
mov.b STAG(%a6),%d1
bne.b fintrz_not_norm # optimize on non-norm input
#
# Norm:
#
fintrz_norm:
fmov.l &0x0,%fpsr # clear FPSR
fintrz.x SRC(%a0),%fp0 # execute fintrz
fmov.l %fpsr,%d0 # save FPSR
or.l %d0,USER_FPSR(%a6) # set exception bits
rts
#
# input is not normalized; what is it?
#
fintrz_not_norm:
cmpi.b %d1,&ZERO # weed out ZERO
beq.b fintrz_zero
cmpi.b %d1,&INF # weed out INF
beq.b fintrz_inf
cmpi.b %d1,&DENORM # weed out DENORM
beq.b fintrz_denorm
cmpi.b %d1,&SNAN # weed out SNAN
beq.l res_snan_1op
bra.l res_qnan_1op # weed out QNAN
#
# Denorm:
#
# for DENORMs, the result will be (+/-)ZERO.
# also, the INEX2 and AINEX exception bits will be set.
# so, we could either set these manually or force the DENORM
# to a very small NORM and ship it to the NORM routine.
# I do the latter.
#
fintrz_denorm:
mov.w SRC_EX(%a0),FP_SCR0_EX(%a6) # copy sign, zero exp
mov.b &0x80,FP_SCR0_HI(%a6) # force DENORM ==> small NORM
lea FP_SCR0(%a6),%a0
bra.b fintrz_norm
#
# Zero:
#
fintrz_zero:
tst.b SRC_EX(%a0) # is ZERO negative?
bmi.b fintrz_zero_m # yes
fintrz_zero_p:
fmov.s &0x00000000,%fp0 # return +ZERO in fp0
mov.b &z_bmask,FPSR_CC(%a6) # set 'Z' ccode bit
rts
fintrz_zero_m:
fmov.s &0x80000000,%fp0 # return -ZERO in fp0
mov.b &z_bmask+neg_bmask,FPSR_CC(%a6) # set 'Z','N' ccode bits
rts
#
# Infinity:
#
fintrz_inf:
fmovm.x SRC(%a0),&0x80 # return result in fp0
tst.b SRC_EX(%a0) # is INF negative?
bmi.b fintrz_inf_m # yes
fintrz_inf_p:
mov.b &inf_bmask,FPSR_CC(%a6) # set 'I' ccode bit
rts
fintrz_inf_m:
mov.b &inf_bmask+neg_bmask,FPSR_CC(%a6) # set 'N','I' ccode bits
rts
#########################################################################
# XDEF **************************************************************** #
# fabs(): emulates the fabs instruction #
# fsabs(): emulates the fsabs instruction #
# fdabs(): emulates the fdabs instruction #
# #
# XREF **************************************************************** #
# norm() - normalize denorm mantissa to provide EXOP #
# scale_to_zero_src() - make exponent. = 0; get scale factor #
# unf_res() - calculate underflow result #
# ovf_res() - calculate overflow result #
# res_{s,q}nan_1op() - set NAN result for monadic operation #
# #
# INPUT *************************************************************** #
# a0 = pointer to extended precision source operand #
# d0 = rnd precision/mode #
# #
# OUTPUT ************************************************************** #
# fp0 = result #
# fp1 = EXOP (if exception occurred) #
# #
# ALGORITHM *********************************************************** #
# Handle NANs, infinities, and zeroes as special cases. Divide #
# norms into extended, single, and double precision. #
# Simply clear sign for extended precision norm. Ext prec denorm #
# gets an EXOP created for it since it's an underflow. #
# Double and single precision can overflow and underflow. First, #
# scale the operand such that the exponent is zero. Perform an "fabs" #
# using the correct rnd mode/prec. Check to see if the original #
# exponent would take an exception. If so, use unf_res() or ovf_res() #
# to calculate the default result. Also, create the EXOP for the #
# exceptional case. If no exception should occur, insert the correct #
# result exponent and return. #
# Unnorms don't pass through here. #
# #
#########################################################################
global fsabs
fsabs:
andi.b &0x30,%d0 # clear rnd prec
ori.b &s_mode*0x10,%d0 # insert sgl precision
bra.b fabs
global fdabs
fdabs:
andi.b &0x30,%d0 # clear rnd prec
ori.b &d_mode*0x10,%d0 # insert dbl precision
global fabs
fabs:
mov.l %d0,L_SCR3(%a6) # store rnd info
mov.b STAG(%a6),%d1
bne.w fabs_not_norm # optimize on non-norm input
#
# ABSOLUTE VALUE: norms and denorms ONLY!
#
fabs_norm:
andi.b &0xc0,%d0 # is precision extended?
bne.b fabs_not_ext # no; go handle sgl or dbl
#
# precision selected is extended. so...we can not get an underflow
# or overflow because of rounding to the correct precision. so...
# skip the scaling and unscaling...
#
mov.l SRC_HI(%a0),FP_SCR0_HI(%a6)
mov.l SRC_LO(%a0),FP_SCR0_LO(%a6)
mov.w SRC_EX(%a0),%d1
bclr &15,%d1 # force absolute value
mov.w %d1,FP_SCR0_EX(%a6) # insert exponent
fmovm.x FP_SCR0(%a6),&0x80 # return result in fp0
rts
#
# for an extended precision DENORM, the UNFL exception bit is set
# the accrued bit is NOT set in this instance(no inexactness!)
#
fabs_denorm:
andi.b &0xc0,%d0 # is precision extended?
bne.b fabs_not_ext # no
bset &unfl_bit,FPSR_EXCEPT(%a6) # set unfl exc bit
mov.l SRC_HI(%a0),FP_SCR0_HI(%a6)
mov.l SRC_LO(%a0),FP_SCR0_LO(%a6)
mov.w SRC_EX(%a0),%d0
bclr &15,%d0 # clear sign
mov.w %d0,FP_SCR0_EX(%a6) # insert exponent
fmovm.x FP_SCR0(%a6),&0x80 # return default result in fp0
btst &unfl_bit,FPCR_ENABLE(%a6) # is UNFL enabled?
bne.b fabs_ext_unfl_ena
rts
#
# the input is an extended DENORM and underflow is enabled in the FPCR.
# normalize the mantissa and add the bias of 0x6000 to the resulting negative
# exponent and insert back into the operand.
#
fabs_ext_unfl_ena:
lea FP_SCR0(%a6),%a0 # pass: ptr to operand
bsr.l norm # normalize result
neg.w %d0 # new exponent = -(shft val)
addi.w &0x6000,%d0 # add new bias to exponent
mov.w FP_SCR0_EX(%a6),%d1 # fetch old sign,exp
andi.w &0x8000,%d1 # keep old sign
andi.w &0x7fff,%d0 # clear sign position
or.w %d1,%d0 # concat old sign, new exponent
mov.w %d0,FP_SCR0_EX(%a6) # insert new exponent
fmovm.x FP_SCR0(%a6),&0x40 # return EXOP in fp1
rts
#
# operand is either single or double
#
fabs_not_ext:
cmpi.b %d0,&s_mode*0x10 # separate sgl/dbl prec
bne.b fabs_dbl
#
# operand is to be rounded to single precision
#
fabs_sgl:
mov.w SRC_EX(%a0),FP_SCR0_EX(%a6)
mov.l SRC_HI(%a0),FP_SCR0_HI(%a6)
mov.l SRC_LO(%a0),FP_SCR0_LO(%a6)
bsr.l scale_to_zero_src # calculate scale factor
cmpi.l %d0,&0x3fff-0x3f80 # will move in underflow?
bge.w fabs_sd_unfl # yes; go handle underflow
cmpi.l %d0,&0x3fff-0x407e # will move in overflow?
beq.w fabs_sd_may_ovfl # maybe; go check
blt.w fabs_sd_ovfl # yes; go handle overflow
#
# operand will NOT overflow or underflow when moved in to the fp reg file
#
fabs_sd_normal:
fmov.l &0x0,%fpsr # clear FPSR
fmov.l L_SCR3(%a6),%fpcr # set FPCR
fabs.x FP_SCR0(%a6),%fp0 # perform absolute
fmov.l %fpsr,%d1 # save FPSR
fmov.l &0x0,%fpcr # clear FPCR
or.l %d1,USER_FPSR(%a6) # save INEX2,N
fabs_sd_normal_exit:
mov.l %d2,-(%sp) # save d2
fmovm.x &0x80,FP_SCR0(%a6) # store out result
mov.w FP_SCR0_EX(%a6),%d1 # load sgn,exp
mov.l %d1,%d2 # make a copy
andi.l &0x7fff,%d1 # strip sign
sub.l %d0,%d1 # add scale factor
andi.w &0x8000,%d2 # keep old sign
or.w %d1,%d2 # concat old sign,new exp
mov.w %d2,FP_SCR0_EX(%a6) # insert new exponent
mov.l (%sp)+,%d2 # restore d2
fmovm.x FP_SCR0(%a6),&0x80 # return result in fp0
rts
#
# operand is to be rounded to double precision
#
fabs_dbl:
mov.w SRC_EX(%a0),FP_SCR0_EX(%a6)
mov.l SRC_HI(%a0),FP_SCR0_HI(%a6)
mov.l SRC_LO(%a0),FP_SCR0_LO(%a6)
bsr.l scale_to_zero_src # calculate scale factor
cmpi.l %d0,&0x3fff-0x3c00 # will move in underflow?
bge.b fabs_sd_unfl # yes; go handle underflow
cmpi.l %d0,&0x3fff-0x43fe # will move in overflow?
beq.w fabs_sd_may_ovfl # maybe; go check
blt.w fabs_sd_ovfl # yes; go handle overflow
bra.w fabs_sd_normal # no; ho handle normalized op
#
# operand WILL underflow when moved in to the fp register file
#
fabs_sd_unfl:
bset &unfl_bit,FPSR_EXCEPT(%a6) # set unfl exc bit
bclr &0x7,FP_SCR0_EX(%a6) # force absolute value
# if underflow or inexact is enabled, go calculate EXOP first.
mov.b FPCR_ENABLE(%a6),%d1
andi.b &0x0b,%d1 # is UNFL or INEX enabled?
bne.b fabs_sd_unfl_ena # yes
fabs_sd_unfl_dis:
lea FP_SCR0(%a6),%a0 # pass: result addr
mov.l L_SCR3(%a6),%d1 # pass: rnd prec,mode
bsr.l unf_res # calculate default result
or.b %d0,FPSR_CC(%a6) # set possible 'Z' ccode
fmovm.x FP_SCR0(%a6),&0x80 # return default result in fp0
rts
#
# operand will underflow AND underflow is enabled.
# Therefore, we must return the result rounded to extended precision.
#
fabs_sd_unfl_ena:
mov.l FP_SCR0_HI(%a6),FP_SCR1_HI(%a6)
mov.l FP_SCR0_LO(%a6),FP_SCR1_LO(%a6)
mov.w FP_SCR0_EX(%a6),%d1 # load current exponent
mov.l %d2,-(%sp) # save d2
mov.l %d1,%d2 # make a copy
andi.l &0x7fff,%d1 # strip sign
andi.w &0x8000,%d2 # keep old sign
sub.l %d0,%d1 # subtract scale factor
addi.l &0x6000,%d1 # add new bias
andi.w &0x7fff,%d1
or.w %d2,%d1 # concat new sign,new exp
mov.w %d1,FP_SCR1_EX(%a6) # insert new exp
fmovm.x FP_SCR1(%a6),&0x40 # return EXOP in fp1
mov.l (%sp)+,%d2 # restore d2
bra.b fabs_sd_unfl_dis
#
# operand WILL overflow.
#
fabs_sd_ovfl:
fmov.l &0x0,%fpsr # clear FPSR
fmov.l L_SCR3(%a6),%fpcr # set FPCR
fabs.x FP_SCR0(%a6),%fp0 # perform absolute
fmov.l &0x0,%fpcr # clear FPCR
fmov.l %fpsr,%d1 # save FPSR
or.l %d1,USER_FPSR(%a6) # save INEX2,N
fabs_sd_ovfl_tst:
or.l &ovfl_inx_mask,USER_FPSR(%a6) # set ovfl/aovfl/ainex
mov.b FPCR_ENABLE(%a6),%d1
andi.b &0x13,%d1 # is OVFL or INEX enabled?
bne.b fabs_sd_ovfl_ena # yes
#
# OVFL is not enabled; therefore, we must create the default result by
# calling ovf_res().
#
fabs_sd_ovfl_dis:
btst &neg_bit,FPSR_CC(%a6) # is result negative?
sne %d1 # set sign param accordingly
mov.l L_SCR3(%a6),%d0 # pass: prec,mode
bsr.l ovf_res # calculate default result
or.b %d0,FPSR_CC(%a6) # set INF,N if applicable
fmovm.x (%a0),&0x80 # return default result in fp0
rts
#
# OVFL is enabled.
# the INEX2 bit has already been updated by the round to the correct precision.
# now, round to extended(and don't alter the FPSR).
#
fabs_sd_ovfl_ena:
mov.l %d2,-(%sp) # save d2
mov.w FP_SCR0_EX(%a6),%d1 # fetch {sgn,exp}
mov.l %d1,%d2 # make a copy
andi.l &0x7fff,%d1 # strip sign
andi.w &0x8000,%d2 # keep old sign
sub.l %d0,%d1 # add scale factor
subi.l &0x6000,%d1 # subtract bias
andi.w &0x7fff,%d1
or.w %d2,%d1 # concat sign,exp
mov.w %d1,FP_SCR0_EX(%a6) # insert new exponent
fmovm.x FP_SCR0(%a6),&0x40 # return EXOP in fp1
mov.l (%sp)+,%d2 # restore d2
bra.b fabs_sd_ovfl_dis
#
# the move in MAY underflow. so...
#
fabs_sd_may_ovfl:
fmov.l &0x0,%fpsr # clear FPSR
fmov.l L_SCR3(%a6),%fpcr # set FPCR
fabs.x FP_SCR0(%a6),%fp0 # perform absolute
fmov.l %fpsr,%d1 # save status
fmov.l &0x0,%fpcr # clear FPCR
or.l %d1,USER_FPSR(%a6) # save INEX2,N
fabs.x %fp0,%fp1 # make a copy of result
fcmp.b %fp1,&0x2 # is |result| >= 2.b?
fbge.w fabs_sd_ovfl_tst # yes; overflow has occurred
# no, it didn't overflow; we have correct result
bra.w fabs_sd_normal_exit
##########################################################################
#
# input is not normalized; what is it?
#
fabs_not_norm:
cmpi.b %d1,&DENORM # weed out DENORM
beq.w fabs_denorm
cmpi.b %d1,&SNAN # weed out SNAN
beq.l res_snan_1op
cmpi.b %d1,&QNAN # weed out QNAN
beq.l res_qnan_1op
fabs.x SRC(%a0),%fp0 # force absolute value
cmpi.b %d1,&INF # weed out INF
beq.b fabs_inf
fabs_zero:
mov.b &z_bmask,FPSR_CC(%a6) # set 'Z' ccode bit
rts
fabs_inf:
mov.b &inf_bmask,FPSR_CC(%a6) # set 'I' ccode bit
rts
#########################################################################
# XDEF **************************************************************** #
# fcmp(): fp compare op routine #
# #
# XREF **************************************************************** #
# res_qnan() - return QNAN result #
# res_snan() - return SNAN result #
# #
# INPUT *************************************************************** #
# a0 = pointer to extended precision source operand #
# a1 = pointer to extended precision destination operand #
# d0 = round prec/mode #
# #
# OUTPUT ************************************************************** #
# None #
# #
# ALGORITHM *********************************************************** #
# Handle NANs and denorms as special cases. For everything else, #
# just use the actual fcmp instruction to produce the correct condition #
# codes. #
# #
#########################################################################
global fcmp
fcmp:
clr.w %d1
mov.b DTAG(%a6),%d1
lsl.b &0x3,%d1
or.b STAG(%a6),%d1
bne.b fcmp_not_norm # optimize on non-norm input
#
# COMPARE FP OPs : NORMs, ZEROs, INFs, and "corrected" DENORMs
#
fcmp_norm:
fmovm.x DST(%a1),&0x80 # load dst op
fcmp.x %fp0,SRC(%a0) # do compare
fmov.l %fpsr,%d0 # save FPSR
rol.l &0x8,%d0 # extract ccode bits
mov.b %d0,FPSR_CC(%a6) # set ccode bits(no exc bits are set)
rts
#
# fcmp: inputs are not both normalized; what are they?
#
fcmp_not_norm:
mov.w (tbl_fcmp_op.b,%pc,%d1.w*2),%d1
jmp (tbl_fcmp_op.b,%pc,%d1.w*1)
swbeg &48
tbl_fcmp_op:
short fcmp_norm - tbl_fcmp_op # NORM - NORM
short fcmp_norm - tbl_fcmp_op # NORM - ZERO
short fcmp_norm - tbl_fcmp_op # NORM - INF
short fcmp_res_qnan - tbl_fcmp_op # NORM - QNAN
short fcmp_nrm_dnrm - tbl_fcmp_op # NORM - DENORM
short fcmp_res_snan - tbl_fcmp_op # NORM - SNAN
short tbl_fcmp_op - tbl_fcmp_op #
short tbl_fcmp_op - tbl_fcmp_op #
short fcmp_norm - tbl_fcmp_op # ZERO - NORM
short fcmp_norm - tbl_fcmp_op # ZERO - ZERO
short fcmp_norm - tbl_fcmp_op # ZERO - INF
short fcmp_res_qnan - tbl_fcmp_op # ZERO - QNAN
short fcmp_dnrm_s - tbl_fcmp_op # ZERO - DENORM
short fcmp_res_snan - tbl_fcmp_op # ZERO - SNAN
short tbl_fcmp_op - tbl_fcmp_op #
short tbl_fcmp_op - tbl_fcmp_op #
short fcmp_norm - tbl_fcmp_op # INF - NORM
short fcmp_norm - tbl_fcmp_op # INF - ZERO
short fcmp_norm - tbl_fcmp_op # INF - INF
short fcmp_res_qnan - tbl_fcmp_op # INF - QNAN
short fcmp_dnrm_s - tbl_fcmp_op # INF - DENORM
short fcmp_res_snan - tbl_fcmp_op # INF - SNAN
short tbl_fcmp_op - tbl_fcmp_op #
short tbl_fcmp_op - tbl_fcmp_op #
short fcmp_res_qnan - tbl_fcmp_op # QNAN - NORM
short fcmp_res_qnan - tbl_fcmp_op # QNAN - ZERO
short fcmp_res_qnan - tbl_fcmp_op # QNAN - INF
short fcmp_res_qnan - tbl_fcmp_op # QNAN - QNAN
short fcmp_res_qnan - tbl_fcmp_op # QNAN - DENORM
short fcmp_res_snan - tbl_fcmp_op # QNAN - SNAN
short tbl_fcmp_op - tbl_fcmp_op #
short tbl_fcmp_op - tbl_fcmp_op #
short fcmp_dnrm_nrm - tbl_fcmp_op # DENORM - NORM
short fcmp_dnrm_d - tbl_fcmp_op # DENORM - ZERO
short fcmp_dnrm_d - tbl_fcmp_op # DENORM - INF
short fcmp_res_qnan - tbl_fcmp_op # DENORM - QNAN
short fcmp_dnrm_sd - tbl_fcmp_op # DENORM - DENORM
short fcmp_res_snan - tbl_fcmp_op # DENORM - SNAN
short tbl_fcmp_op - tbl_fcmp_op #
short tbl_fcmp_op - tbl_fcmp_op #
short fcmp_res_snan - tbl_fcmp_op # SNAN - NORM
short fcmp_res_snan - tbl_fcmp_op # SNAN - ZERO
short fcmp_res_snan - tbl_fcmp_op # SNAN - INF
short fcmp_res_snan - tbl_fcmp_op # SNAN - QNAN
short fcmp_res_snan - tbl_fcmp_op # SNAN - DENORM
short fcmp_res_snan - tbl_fcmp_op # SNAN - SNAN
short tbl_fcmp_op - tbl_fcmp_op #
short tbl_fcmp_op - tbl_fcmp_op #
# unlike all other functions for QNAN and SNAN, fcmp does NOT set the
# 'N' bit for a negative QNAN or SNAN input so we must squelch it here.
fcmp_res_qnan:
bsr.l res_qnan
andi.b &0xf7,FPSR_CC(%a6)
rts
fcmp_res_snan:
bsr.l res_snan
andi.b &0xf7,FPSR_CC(%a6)
rts
#
# DENORMs are a little more difficult.
# If you have a 2 DENORMs, then you can just force the j-bit to a one
# and use the fcmp_norm routine.
# If you have a DENORM and an INF or ZERO, just force the DENORM's j-bit to a one
# and use the fcmp_norm routine.
# If you have a DENORM and a NORM with opposite signs, then use fcmp_norm, also.
# But with a DENORM and a NORM of the same sign, the neg bit is set if the
# (1) signs are (+) and the DENORM is the dst or
# (2) signs are (-) and the DENORM is the src
#
fcmp_dnrm_s:
mov.w SRC_EX(%a0),FP_SCR0_EX(%a6)
mov.l SRC_HI(%a0),%d0
bset &31,%d0 # DENORM src; make into small norm
mov.l %d0,FP_SCR0_HI(%a6)
mov.l SRC_LO(%a0),FP_SCR0_LO(%a6)
lea FP_SCR0(%a6),%a0
bra.w fcmp_norm
fcmp_dnrm_d:
mov.l DST_EX(%a1),FP_SCR0_EX(%a6)
mov.l DST_HI(%a1),%d0
bset &31,%d0 # DENORM src; make into small norm
mov.l %d0,FP_SCR0_HI(%a6)
mov.l DST_LO(%a1),FP_SCR0_LO(%a6)
lea FP_SCR0(%a6),%a1
bra.w fcmp_norm
fcmp_dnrm_sd:
mov.w DST_EX(%a1),FP_SCR1_EX(%a6)
mov.w SRC_EX(%a0),FP_SCR0_EX(%a6)
mov.l DST_HI(%a1),%d0
bset &31,%d0 # DENORM dst; make into small norm
mov.l %d0,FP_SCR1_HI(%a6)
mov.l SRC_HI(%a0),%d0
bset &31,%d0 # DENORM dst; make into small norm
mov.l %d0,FP_SCR0_HI(%a6)
mov.l DST_LO(%a1),FP_SCR1_LO(%a6)
mov.l SRC_LO(%a0),FP_SCR0_LO(%a6)
lea FP_SCR1(%a6),%a1
lea FP_SCR0(%a6),%a0
bra.w fcmp_norm
fcmp_nrm_dnrm:
mov.b SRC_EX(%a0),%d0 # determine if like signs
mov.b DST_EX(%a1),%d1
eor.b %d0,%d1
bmi.w fcmp_dnrm_s
# signs are the same, so must determine the answer ourselves.
tst.b %d0 # is src op negative?
bmi.b fcmp_nrm_dnrm_m # yes
rts
fcmp_nrm_dnrm_m:
mov.b &neg_bmask,FPSR_CC(%a6) # set 'Z' ccode bit
rts
fcmp_dnrm_nrm:
mov.b SRC_EX(%a0),%d0 # determine if like signs
mov.b DST_EX(%a1),%d1
eor.b %d0,%d1
bmi.w fcmp_dnrm_d
# signs are the same, so must determine the answer ourselves.
tst.b %d0 # is src op negative?
bpl.b fcmp_dnrm_nrm_m # no
rts
fcmp_dnrm_nrm_m:
mov.b &neg_bmask,FPSR_CC(%a6) # set 'Z' ccode bit
rts
#########################################################################
# XDEF **************************************************************** #
# fsglmul(): emulates the fsglmul instruction #
# #
# XREF **************************************************************** #
# scale_to_zero_src() - scale src exponent to zero #
# scale_to_zero_dst() - scale dst exponent to zero #
# unf_res4() - return default underflow result for sglop #
# ovf_res() - return default overflow result #
# res_qnan() - return QNAN result #
# res_snan() - return SNAN result #
# #
# INPUT *************************************************************** #
# a0 = pointer to extended precision source operand #
# a1 = pointer to extended precision destination operand #
# d0 rnd prec,mode #
# #
# OUTPUT ************************************************************** #
# fp0 = result #
# fp1 = EXOP (if exception occurred) #
# #
# ALGORITHM *********************************************************** #
# Handle NANs, infinities, and zeroes as special cases. Divide #
# norms/denorms into ext/sgl/dbl precision. #
# For norms/denorms, scale the exponents such that a multiply #
# instruction won't cause an exception. Use the regular fsglmul to #
# compute a result. Check if the regular operands would have taken #
# an exception. If so, return the default overflow/underflow result #
# and return the EXOP if exceptions are enabled. Else, scale the #
# result operand to the proper exponent. #
# #
#########################################################################
global fsglmul
fsglmul:
mov.l %d0,L_SCR3(%a6) # store rnd info
clr.w %d1
mov.b DTAG(%a6),%d1
lsl.b &0x3,%d1
or.b STAG(%a6),%d1
bne.w fsglmul_not_norm # optimize on non-norm input
fsglmul_norm:
mov.w DST_EX(%a1),FP_SCR1_EX(%a6)
mov.l DST_HI(%a1),FP_SCR1_HI(%a6)
mov.l DST_LO(%a1),FP_SCR1_LO(%a6)
mov.w SRC_EX(%a0),FP_SCR0_EX(%a6)
mov.l SRC_HI(%a0),FP_SCR0_HI(%a6)
mov.l SRC_LO(%a0),FP_SCR0_LO(%a6)
bsr.l scale_to_zero_src # scale exponent
mov.l %d0,-(%sp) # save scale factor 1
bsr.l scale_to_zero_dst # scale dst exponent
add.l (%sp)+,%d0 # SCALE_FACTOR = scale1 + scale2
cmpi.l %d0,&0x3fff-0x7ffe # would result ovfl?
beq.w fsglmul_may_ovfl # result may rnd to overflow
blt.w fsglmul_ovfl # result will overflow
cmpi.l %d0,&0x3fff+0x0001 # would result unfl?
beq.w fsglmul_may_unfl # result may rnd to no unfl
bgt.w fsglmul_unfl # result will underflow
fsglmul_normal:
fmovm.x FP_SCR1(%a6),&0x80 # load dst op
fmov.l L_SCR3(%a6),%fpcr # set FPCR
fmov.l &0x0,%fpsr # clear FPSR
fsglmul.x FP_SCR0(%a6),%fp0 # execute sgl multiply
fmov.l %fpsr,%d1 # save status
fmov.l &0x0,%fpcr # clear FPCR
or.l %d1,USER_FPSR(%a6) # save INEX2,N
fsglmul_normal_exit:
fmovm.x &0x80,FP_SCR0(%a6) # store out result
mov.l %d2,-(%sp) # save d2
mov.w FP_SCR0_EX(%a6),%d1 # load {sgn,exp}
mov.l %d1,%d2 # make a copy
andi.l &0x7fff,%d1 # strip sign
andi.w &0x8000,%d2 # keep old sign
sub.l %d0,%d1 # add scale factor
or.w %d2,%d1 # concat old sign,new exp
mov.w %d1,FP_SCR0_EX(%a6) # insert new exponent
mov.l (%sp)+,%d2 # restore d2
fmovm.x FP_SCR0(%a6),&0x80 # return result in fp0
rts
fsglmul_ovfl:
fmovm.x FP_SCR1(%a6),&0x80 # load dst op
fmov.l L_SCR3(%a6),%fpcr # set FPCR
fmov.l &0x0,%fpsr # clear FPSR
fsglmul.x FP_SCR0(%a6),%fp0 # execute sgl multiply
fmov.l %fpsr,%d1 # save status
fmov.l &0x0,%fpcr # clear FPCR
or.l %d1,USER_FPSR(%a6) # save INEX2,N
fsglmul_ovfl_tst:
# save setting this until now because this is where fsglmul_may_ovfl may jump in
or.l &ovfl_inx_mask, USER_FPSR(%a6) # set ovfl/aovfl/ainex
mov.b FPCR_ENABLE(%a6),%d1
andi.b &0x13,%d1 # is OVFL or INEX enabled?
bne.b fsglmul_ovfl_ena # yes
fsglmul_ovfl_dis:
btst &neg_bit,FPSR_CC(%a6) # is result negative?
sne %d1 # set sign param accordingly
mov.l L_SCR3(%a6),%d0 # pass prec:rnd
andi.b &0x30,%d0 # force prec = ext
bsr.l ovf_res # calculate default result
or.b %d0,FPSR_CC(%a6) # set INF,N if applicable
fmovm.x (%a0),&0x80 # return default result in fp0
rts
fsglmul_ovfl_ena:
fmovm.x &0x80,FP_SCR0(%a6) # move result to stack
mov.l %d2,-(%sp) # save d2
mov.w FP_SCR0_EX(%a6),%d1 # fetch {sgn,exp}
mov.l %d1,%d2 # make a copy
andi.l &0x7fff,%d1 # strip sign
sub.l %d0,%d1 # add scale factor
subi.l &0x6000,%d1 # subtract bias
andi.w &0x7fff,%d1
andi.w &0x8000,%d2 # keep old sign
or.w %d2,%d1 # concat old sign,new exp
mov.w %d1,FP_SCR0_EX(%a6) # insert new exponent
mov.l (%sp)+,%d2 # restore d2
fmovm.x FP_SCR0(%a6),&0x40 # return EXOP in fp1
bra.b fsglmul_ovfl_dis
fsglmul_may_ovfl:
fmovm.x FP_SCR1(%a6),&0x80 # load dst op
fmov.l L_SCR3(%a6),%fpcr # set FPCR
fmov.l &0x0,%fpsr # clear FPSR
fsglmul.x FP_SCR0(%a6),%fp0 # execute sgl multiply
fmov.l %fpsr,%d1 # save status
fmov.l &0x0,%fpcr # clear FPCR
or.l %d1,USER_FPSR(%a6) # save INEX2,N
fabs.x %fp0,%fp1 # make a copy of result
fcmp.b %fp1,&0x2 # is |result| >= 2.b?
fbge.w fsglmul_ovfl_tst # yes; overflow has occurred
# no, it didn't overflow; we have correct result
bra.w fsglmul_normal_exit
fsglmul_unfl:
bset &unfl_bit,FPSR_EXCEPT(%a6) # set unfl exc bit
fmovm.x FP_SCR1(%a6),&0x80 # load dst op
fmov.l &rz_mode*0x10,%fpcr # set FPCR
fmov.l &0x0,%fpsr # clear FPSR
fsglmul.x FP_SCR0(%a6),%fp0 # execute sgl multiply
fmov.l %fpsr,%d1 # save status
fmov.l &0x0,%fpcr # clear FPCR
or.l %d1,USER_FPSR(%a6) # save INEX2,N
mov.b FPCR_ENABLE(%a6),%d1
andi.b &0x0b,%d1 # is UNFL or INEX enabled?
bne.b fsglmul_unfl_ena # yes
fsglmul_unfl_dis:
fmovm.x &0x80,FP_SCR0(%a6) # store out result
lea FP_SCR0(%a6),%a0 # pass: result addr
mov.l L_SCR3(%a6),%d1 # pass: rnd prec,mode
bsr.l unf_res4 # calculate default result
or.b %d0,FPSR_CC(%a6) # 'Z' bit may have been set
fmovm.x FP_SCR0(%a6),&0x80 # return default result in fp0
rts
#
# UNFL is enabled.
#
fsglmul_unfl_ena:
fmovm.x FP_SCR1(%a6),&0x40 # load dst op
fmov.l L_SCR3(%a6),%fpcr # set FPCR
fmov.l &0x0,%fpsr # clear FPSR
fsglmul.x FP_SCR0(%a6),%fp1 # execute sgl multiply
fmov.l &0x0,%fpcr # clear FPCR
fmovm.x &0x40,FP_SCR0(%a6) # save result to stack
mov.l %d2,-(%sp) # save d2
mov.w FP_SCR0_EX(%a6),%d1 # fetch {sgn,exp}
mov.l %d1,%d2 # make a copy
andi.l &0x7fff,%d1 # strip sign
andi.w &0x8000,%d2 # keep old sign
sub.l %d0,%d1 # add scale factor
addi.l &0x6000,%d1 # add bias
andi.w &0x7fff,%d1
or.w %d2,%d1 # concat old sign,new exp
mov.w %d1,FP_SCR0_EX(%a6) # insert new exponent
mov.l (%sp)+,%d2 # restore d2
fmovm.x FP_SCR0(%a6),&0x40 # return EXOP in fp1
bra.w fsglmul_unfl_dis
fsglmul_may_unfl:
fmovm.x FP_SCR1(%a6),&0x80 # load dst op
fmov.l L_SCR3(%a6),%fpcr # set FPCR
fmov.l &0x0,%fpsr # clear FPSR
fsglmul.x FP_SCR0(%a6),%fp0 # execute sgl multiply
fmov.l %fpsr,%d1 # save status
fmov.l &0x0,%fpcr # clear FPCR
or.l %d1,USER_FPSR(%a6) # save INEX2,N
fabs.x %fp0,%fp1 # make a copy of result
fcmp.b %fp1,&0x2 # is |result| > 2.b?
fbgt.w fsglmul_normal_exit # no; no underflow occurred
fblt.w fsglmul_unfl # yes; underflow occurred
#
# we still don't know if underflow occurred. result is ~ equal to 2. but,
# we don't know if the result was an underflow that rounded up to a 2 or
# a normalized number that rounded down to a 2. so, redo the entire operation
# using RZ as the rounding mode to see what the pre-rounded result is.
# this case should be relatively rare.
#
fmovm.x FP_SCR1(%a6),&0x40 # load dst op into fp1
mov.l L_SCR3(%a6),%d1
andi.b &0xc0,%d1 # keep rnd prec
ori.b &rz_mode*0x10,%d1 # insert RZ
fmov.l %d1,%fpcr # set FPCR
fmov.l &0x0,%fpsr # clear FPSR
fsglmul.x FP_SCR0(%a6),%fp1 # execute sgl multiply
fmov.l &0x0,%fpcr # clear FPCR
fabs.x %fp1 # make absolute value
fcmp.b %fp1,&0x2 # is |result| < 2.b?
fbge.w fsglmul_normal_exit # no; no underflow occurred
bra.w fsglmul_unfl # yes, underflow occurred
##############################################################################
#
# Single Precision Multiply: inputs are not both normalized; what are they?
#
fsglmul_not_norm:
mov.w (tbl_fsglmul_op.b,%pc,%d1.w*2),%d1
jmp (tbl_fsglmul_op.b,%pc,%d1.w*1)
swbeg &48
tbl_fsglmul_op:
short fsglmul_norm - tbl_fsglmul_op # NORM x NORM
short fsglmul_zero - tbl_fsglmul_op # NORM x ZERO
short fsglmul_inf_src - tbl_fsglmul_op # NORM x INF
short fsglmul_res_qnan - tbl_fsglmul_op # NORM x QNAN
short fsglmul_norm - tbl_fsglmul_op # NORM x DENORM
short fsglmul_res_snan - tbl_fsglmul_op # NORM x SNAN
short tbl_fsglmul_op - tbl_fsglmul_op #
short tbl_fsglmul_op - tbl_fsglmul_op #
short fsglmul_zero - tbl_fsglmul_op # ZERO x NORM
short fsglmul_zero - tbl_fsglmul_op # ZERO x ZERO
short fsglmul_res_operr - tbl_fsglmul_op # ZERO x INF
short fsglmul_res_qnan - tbl_fsglmul_op # ZERO x QNAN
short fsglmul_zero - tbl_fsglmul_op # ZERO x DENORM
short fsglmul_res_snan - tbl_fsglmul_op # ZERO x SNAN
short tbl_fsglmul_op - tbl_fsglmul_op #
short tbl_fsglmul_op - tbl_fsglmul_op #
short fsglmul_inf_dst - tbl_fsglmul_op # INF x NORM
short fsglmul_res_operr - tbl_fsglmul_op # INF x ZERO
short fsglmul_inf_dst - tbl_fsglmul_op # INF x INF
short fsglmul_res_qnan - tbl_fsglmul_op # INF x QNAN
short fsglmul_inf_dst - tbl_fsglmul_op # INF x DENORM
short fsglmul_res_snan - tbl_fsglmul_op # INF x SNAN
short tbl_fsglmul_op - tbl_fsglmul_op #
short tbl_fsglmul_op - tbl_fsglmul_op #
short fsglmul_res_qnan - tbl_fsglmul_op # QNAN x NORM
short fsglmul_res_qnan - tbl_fsglmul_op # QNAN x ZERO
short fsglmul_res_qnan - tbl_fsglmul_op # QNAN x INF
short fsglmul_res_qnan - tbl_fsglmul_op # QNAN x QNAN
short fsglmul_res_qnan - tbl_fsglmul_op # QNAN x DENORM
short fsglmul_res_snan - tbl_fsglmul_op # QNAN x SNAN
short tbl_fsglmul_op - tbl_fsglmul_op #
short tbl_fsglmul_op - tbl_fsglmul_op #
short fsglmul_norm - tbl_fsglmul_op # NORM x NORM
short fsglmul_zero - tbl_fsglmul_op # NORM x ZERO
short fsglmul_inf_src - tbl_fsglmul_op # NORM x INF
short fsglmul_res_qnan - tbl_fsglmul_op # NORM x QNAN
short fsglmul_norm - tbl_fsglmul_op # NORM x DENORM
short fsglmul_res_snan - tbl_fsglmul_op # NORM x SNAN
short tbl_fsglmul_op - tbl_fsglmul_op #
short tbl_fsglmul_op - tbl_fsglmul_op #
short fsglmul_res_snan - tbl_fsglmul_op # SNAN x NORM
short fsglmul_res_snan - tbl_fsglmul_op # SNAN x ZERO
short fsglmul_res_snan - tbl_fsglmul_op # SNAN x INF
short fsglmul_res_snan - tbl_fsglmul_op # SNAN x QNAN
short fsglmul_res_snan - tbl_fsglmul_op # SNAN x DENORM
short fsglmul_res_snan - tbl_fsglmul_op # SNAN x SNAN
short tbl_fsglmul_op - tbl_fsglmul_op #
short tbl_fsglmul_op - tbl_fsglmul_op #
fsglmul_res_operr:
bra.l res_operr
fsglmul_res_snan:
bra.l res_snan
fsglmul_res_qnan:
bra.l res_qnan
fsglmul_zero:
bra.l fmul_zero
fsglmul_inf_src:
bra.l fmul_inf_src
fsglmul_inf_dst:
bra.l fmul_inf_dst
#########################################################################
# XDEF **************************************************************** #
# fsgldiv(): emulates the fsgldiv instruction #
# #
# XREF **************************************************************** #
# scale_to_zero_src() - scale src exponent to zero #
# scale_to_zero_dst() - scale dst exponent to zero #
# unf_res4() - return default underflow result for sglop #
# ovf_res() - return default overflow result #
# res_qnan() - return QNAN result #
# res_snan() - return SNAN result #
# #
# INPUT *************************************************************** #
# a0 = pointer to extended precision source operand #
# a1 = pointer to extended precision destination operand #
# d0 rnd prec,mode #
# #
# OUTPUT ************************************************************** #
# fp0 = result #
# fp1 = EXOP (if exception occurred) #
# #
# ALGORITHM *********************************************************** #
# Handle NANs, infinities, and zeroes as special cases. Divide #
# norms/denorms into ext/sgl/dbl precision. #
# For norms/denorms, scale the exponents such that a divide #
# instruction won't cause an exception. Use the regular fsgldiv to #
# compute a result. Check if the regular operands would have taken #
# an exception. If so, return the default overflow/underflow result #
# and return the EXOP if exceptions are enabled. Else, scale the #
# result operand to the proper exponent. #
# #
#########################################################################
global fsgldiv
fsgldiv:
mov.l %d0,L_SCR3(%a6) # store rnd info
clr.w %d1
mov.b DTAG(%a6),%d1
lsl.b &0x3,%d1
or.b STAG(%a6),%d1 # combine src tags
bne.w fsgldiv_not_norm # optimize on non-norm input
#
# DIVIDE: NORMs and DENORMs ONLY!
#
fsgldiv_norm:
mov.w DST_EX(%a1),FP_SCR1_EX(%a6)
mov.l DST_HI(%a1),FP_SCR1_HI(%a6)
mov.l DST_LO(%a1),FP_SCR1_LO(%a6)
mov.w SRC_EX(%a0),FP_SCR0_EX(%a6)
mov.l SRC_HI(%a0),FP_SCR0_HI(%a6)
mov.l SRC_LO(%a0),FP_SCR0_LO(%a6)
bsr.l scale_to_zero_src # calculate scale factor 1
mov.l %d0,-(%sp) # save scale factor 1
bsr.l scale_to_zero_dst # calculate scale factor 2
neg.l (%sp) # S.F. = scale1 - scale2
add.l %d0,(%sp)
mov.w 2+L_SCR3(%a6),%d1 # fetch precision,mode
lsr.b &0x6,%d1
mov.l (%sp)+,%d0
cmpi.l %d0,&0x3fff-0x7ffe
ble.w fsgldiv_may_ovfl
cmpi.l %d0,&0x3fff-0x0000 # will result underflow?
beq.w fsgldiv_may_unfl # maybe
bgt.w fsgldiv_unfl # yes; go handle underflow
fsgldiv_normal:
fmovm.x FP_SCR1(%a6),&0x80 # load dst op
fmov.l L_SCR3(%a6),%fpcr # save FPCR
fmov.l &0x0,%fpsr # clear FPSR
fsgldiv.x FP_SCR0(%a6),%fp0 # perform sgl divide
fmov.l %fpsr,%d1 # save FPSR
fmov.l &0x0,%fpcr # clear FPCR
or.l %d1,USER_FPSR(%a6) # save INEX2,N
fsgldiv_normal_exit:
fmovm.x &0x80,FP_SCR0(%a6) # store result on stack
mov.l %d2,-(%sp) # save d2
mov.w FP_SCR0_EX(%a6),%d1 # load {sgn,exp}
mov.l %d1,%d2 # make a copy
andi.l &0x7fff,%d1 # strip sign
andi.w &0x8000,%d2 # keep old sign
sub.l %d0,%d1 # add scale factor
or.w %d2,%d1 # concat old sign,new exp
mov.w %d1,FP_SCR0_EX(%a6) # insert new exponent
mov.l (%sp)+,%d2 # restore d2
fmovm.x FP_SCR0(%a6),&0x80 # return result in fp0
rts
fsgldiv_may_ovfl:
fmovm.x FP_SCR1(%a6),&0x80 # load dst op
fmov.l L_SCR3(%a6),%fpcr # set FPCR
fmov.l &0x0,%fpsr # set FPSR
fsgldiv.x FP_SCR0(%a6),%fp0 # execute divide
fmov.l %fpsr,%d1
fmov.l &0x0,%fpcr
or.l %d1,USER_FPSR(%a6) # save INEX,N
fmovm.x &0x01,-(%sp) # save result to stack
mov.w (%sp),%d1 # fetch new exponent
add.l &0xc,%sp # clear result
andi.l &0x7fff,%d1 # strip sign
sub.l %d0,%d1 # add scale factor
cmp.l %d1,&0x7fff # did divide overflow?
blt.b fsgldiv_normal_exit
fsgldiv_ovfl_tst:
or.w &ovfl_inx_mask,2+USER_FPSR(%a6) # set ovfl/aovfl/ainex
mov.b FPCR_ENABLE(%a6),%d1
andi.b &0x13,%d1 # is OVFL or INEX enabled?
bne.b fsgldiv_ovfl_ena # yes
fsgldiv_ovfl_dis:
btst &neg_bit,FPSR_CC(%a6) # is result negative
sne %d1 # set sign param accordingly
mov.l L_SCR3(%a6),%d0 # pass prec:rnd
andi.b &0x30,%d0 # kill precision
bsr.l ovf_res # calculate default result
or.b %d0,FPSR_CC(%a6) # set INF if applicable
fmovm.x (%a0),&0x80 # return default result in fp0
rts
fsgldiv_ovfl_ena:
fmovm.x &0x80,FP_SCR0(%a6) # move result to stack
mov.l %d2,-(%sp) # save d2
mov.w FP_SCR0_EX(%a6),%d1 # fetch {sgn,exp}
mov.l %d1,%d2 # make a copy
andi.l &0x7fff,%d1 # strip sign
andi.w &0x8000,%d2 # keep old sign
sub.l %d0,%d1 # add scale factor
subi.l &0x6000,%d1 # subtract new bias
andi.w &0x7fff,%d1 # clear ms bit
or.w %d2,%d1 # concat old sign,new exp
mov.w %d1,FP_SCR0_EX(%a6) # insert new exponent
mov.l (%sp)+,%d2 # restore d2
fmovm.x FP_SCR0(%a6),&0x40 # return EXOP in fp1
bra.b fsgldiv_ovfl_dis
fsgldiv_unfl:
bset &unfl_bit,FPSR_EXCEPT(%a6) # set unfl exc bit
fmovm.x FP_SCR1(%a6),&0x80 # load dst op
fmov.l &rz_mode*0x10,%fpcr # set FPCR
fmov.l &0x0,%fpsr # clear FPSR
fsgldiv.x FP_SCR0(%a6),%fp0 # execute sgl divide
fmov.l %fpsr,%d1 # save status
fmov.l &0x0,%fpcr # clear FPCR
or.l %d1,USER_FPSR(%a6) # save INEX2,N
mov.b FPCR_ENABLE(%a6),%d1
andi.b &0x0b,%d1 # is UNFL or INEX enabled?
bne.b fsgldiv_unfl_ena # yes
fsgldiv_unfl_dis:
fmovm.x &0x80,FP_SCR0(%a6) # store out result
lea FP_SCR0(%a6),%a0 # pass: result addr
mov.l L_SCR3(%a6),%d1 # pass: rnd prec,mode
bsr.l unf_res4 # calculate default result
or.b %d0,FPSR_CC(%a6) # 'Z' bit may have been set
fmovm.x FP_SCR0(%a6),&0x80 # return default result in fp0
rts
#
# UNFL is enabled.
#
fsgldiv_unfl_ena:
fmovm.x FP_SCR1(%a6),&0x40 # load dst op
fmov.l L_SCR3(%a6),%fpcr # set FPCR
fmov.l &0x0,%fpsr # clear FPSR
fsgldiv.x FP_SCR0(%a6),%fp1 # execute sgl divide
fmov.l &0x0,%fpcr # clear FPCR
fmovm.x &0x40,FP_SCR0(%a6) # save result to stack
mov.l %d2,-(%sp) # save d2
mov.w FP_SCR0_EX(%a6),%d1 # fetch {sgn,exp}
mov.l %d1,%d2 # make a copy
andi.l &0x7fff,%d1 # strip sign
andi.w &0x8000,%d2 # keep old sign
sub.l %d0,%d1 # add scale factor
addi.l &0x6000,%d1 # add bias
andi.w &0x7fff,%d1 # clear top bit
or.w %d2,%d1 # concat old sign, new exp
mov.w %d1,FP_SCR0_EX(%a6) # insert new exponent
mov.l (%sp)+,%d2 # restore d2
fmovm.x FP_SCR0(%a6),&0x40 # return EXOP in fp1
bra.b fsgldiv_unfl_dis
#
# the divide operation MAY underflow:
#
fsgldiv_may_unfl:
fmovm.x FP_SCR1(%a6),&0x80 # load dst op
fmov.l L_SCR3(%a6),%fpcr # set FPCR
fmov.l &0x0,%fpsr # clear FPSR
fsgldiv.x FP_SCR0(%a6),%fp0 # execute sgl divide
fmov.l %fpsr,%d1 # save status
fmov.l &0x0,%fpcr # clear FPCR
or.l %d1,USER_FPSR(%a6) # save INEX2,N
fabs.x %fp0,%fp1 # make a copy of result
fcmp.b %fp1,&0x1 # is |result| > 1.b?
fbgt.w fsgldiv_normal_exit # no; no underflow occurred
fblt.w fsgldiv_unfl # yes; underflow occurred
#
# we still don't know if underflow occurred. result is ~ equal to 1. but,
# we don't know if the result was an underflow that rounded up to a 1
# or a normalized number that rounded down to a 1. so, redo the entire
# operation using RZ as the rounding mode to see what the pre-rounded
# result is. this case should be relatively rare.
#
fmovm.x FP_SCR1(%a6),&0x40 # load dst op into %fp1
clr.l %d1 # clear scratch register
ori.b &rz_mode*0x10,%d1 # force RZ rnd mode
fmov.l %d1,%fpcr # set FPCR
fmov.l &0x0,%fpsr # clear FPSR
fsgldiv.x FP_SCR0(%a6),%fp1 # execute sgl divide
fmov.l &0x0,%fpcr # clear FPCR
fabs.x %fp1 # make absolute value
fcmp.b %fp1,&0x1 # is |result| < 1.b?
fbge.w fsgldiv_normal_exit # no; no underflow occurred
bra.w fsgldiv_unfl # yes; underflow occurred
############################################################################
#
# Divide: inputs are not both normalized; what are they?
#
fsgldiv_not_norm:
mov.w (tbl_fsgldiv_op.b,%pc,%d1.w*2),%d1
jmp (tbl_fsgldiv_op.b,%pc,%d1.w*1)
swbeg &48
tbl_fsgldiv_op:
short fsgldiv_norm - tbl_fsgldiv_op # NORM / NORM
short fsgldiv_inf_load - tbl_fsgldiv_op # NORM / ZERO
short fsgldiv_zero_load - tbl_fsgldiv_op # NORM / INF
short fsgldiv_res_qnan - tbl_fsgldiv_op # NORM / QNAN
short fsgldiv_norm - tbl_fsgldiv_op # NORM / DENORM
short fsgldiv_res_snan - tbl_fsgldiv_op # NORM / SNAN
short tbl_fsgldiv_op - tbl_fsgldiv_op #
short tbl_fsgldiv_op - tbl_fsgldiv_op #
short fsgldiv_zero_load - tbl_fsgldiv_op # ZERO / NORM
short fsgldiv_res_operr - tbl_fsgldiv_op # ZERO / ZERO
short fsgldiv_zero_load - tbl_fsgldiv_op # ZERO / INF
short fsgldiv_res_qnan - tbl_fsgldiv_op # ZERO / QNAN
short fsgldiv_zero_load - tbl_fsgldiv_op # ZERO / DENORM
short fsgldiv_res_snan - tbl_fsgldiv_op # ZERO / SNAN
short tbl_fsgldiv_op - tbl_fsgldiv_op #
short tbl_fsgldiv_op - tbl_fsgldiv_op #
short fsgldiv_inf_dst - tbl_fsgldiv_op # INF / NORM
short fsgldiv_inf_dst - tbl_fsgldiv_op # INF / ZERO
short fsgldiv_res_operr - tbl_fsgldiv_op # INF / INF
short fsgldiv_res_qnan - tbl_fsgldiv_op # INF / QNAN
short fsgldiv_inf_dst - tbl_fsgldiv_op # INF / DENORM
short fsgldiv_res_snan - tbl_fsgldiv_op # INF / SNAN
short tbl_fsgldiv_op - tbl_fsgldiv_op #
short tbl_fsgldiv_op - tbl_fsgldiv_op #
short fsgldiv_res_qnan - tbl_fsgldiv_op # QNAN / NORM
short fsgldiv_res_qnan - tbl_fsgldiv_op # QNAN / ZERO
short fsgldiv_res_qnan - tbl_fsgldiv_op # QNAN / INF
short fsgldiv_res_qnan - tbl_fsgldiv_op # QNAN / QNAN
short fsgldiv_res_qnan - tbl_fsgldiv_op # QNAN / DENORM
short fsgldiv_res_snan - tbl_fsgldiv_op # QNAN / SNAN
short tbl_fsgldiv_op - tbl_fsgldiv_op #
short tbl_fsgldiv_op - tbl_fsgldiv_op #
short fsgldiv_norm - tbl_fsgldiv_op # DENORM / NORM
short fsgldiv_inf_load - tbl_fsgldiv_op # DENORM / ZERO
short fsgldiv_zero_load - tbl_fsgldiv_op # DENORM / INF
short fsgldiv_res_qnan - tbl_fsgldiv_op # DENORM / QNAN
short fsgldiv_norm - tbl_fsgldiv_op # DENORM / DENORM
short fsgldiv_res_snan - tbl_fsgldiv_op # DENORM / SNAN
short tbl_fsgldiv_op - tbl_fsgldiv_op #
short tbl_fsgldiv_op - tbl_fsgldiv_op #
short fsgldiv_res_snan - tbl_fsgldiv_op # SNAN / NORM
short fsgldiv_res_snan - tbl_fsgldiv_op # SNAN / ZERO
short fsgldiv_res_snan - tbl_fsgldiv_op # SNAN / INF
short fsgldiv_res_snan - tbl_fsgldiv_op # SNAN / QNAN
short fsgldiv_res_snan - tbl_fsgldiv_op # SNAN / DENORM
short fsgldiv_res_snan - tbl_fsgldiv_op # SNAN / SNAN
short tbl_fsgldiv_op - tbl_fsgldiv_op #
short tbl_fsgldiv_op - tbl_fsgldiv_op #
fsgldiv_res_qnan:
bra.l res_qnan
fsgldiv_res_snan:
bra.l res_snan
fsgldiv_res_operr:
bra.l res_operr
fsgldiv_inf_load:
bra.l fdiv_inf_load
fsgldiv_zero_load:
bra.l fdiv_zero_load
fsgldiv_inf_dst:
bra.l fdiv_inf_dst
#########################################################################
# XDEF **************************************************************** #
# fadd(): emulates the fadd instruction #
# fsadd(): emulates the fadd instruction #
# fdadd(): emulates the fdadd instruction #
# #
# XREF **************************************************************** #
# addsub_scaler2() - scale the operands so they won't take exc #
# ovf_res() - return default overflow result #
# unf_res() - return default underflow result #
# res_qnan() - set QNAN result #
# res_snan() - set SNAN result #
# res_operr() - set OPERR result #
# scale_to_zero_src() - set src operand exponent equal to zero #
# scale_to_zero_dst() - set dst operand exponent equal to zero #
# #
# INPUT *************************************************************** #
# a0 = pointer to extended precision source operand #
# a1 = pointer to extended precision destination operand #
# #
# OUTPUT ************************************************************** #
# fp0 = result #
# fp1 = EXOP (if exception occurred) #
# #
# ALGORITHM *********************************************************** #
# Handle NANs, infinities, and zeroes as special cases. Divide #
# norms into extended, single, and double precision. #
# Do addition after scaling exponents such that exception won't #
# occur. Then, check result exponent to see if exception would have #
# occurred. If so, return default result and maybe EXOP. Else, insert #
# the correct result exponent and return. Set FPSR bits as appropriate. #
# #
#########################################################################
global fsadd
fsadd:
andi.b &0x30,%d0 # clear rnd prec
ori.b &s_mode*0x10,%d0 # insert sgl prec
bra.b fadd
global fdadd
fdadd:
andi.b &0x30,%d0 # clear rnd prec
ori.b &d_mode*0x10,%d0 # insert dbl prec
global fadd
fadd:
mov.l %d0,L_SCR3(%a6) # store rnd info
clr.w %d1
mov.b DTAG(%a6),%d1
lsl.b &0x3,%d1
or.b STAG(%a6),%d1 # combine src tags
bne.w fadd_not_norm # optimize on non-norm input
#
# ADD: norms and denorms
#
fadd_norm:
bsr.l addsub_scaler2 # scale exponents
fadd_zero_entry:
fmovm.x FP_SCR1(%a6),&0x80 # load dst op
fmov.l &0x0,%fpsr # clear FPSR
fmov.l L_SCR3(%a6),%fpcr # set FPCR
fadd.x FP_SCR0(%a6),%fp0 # execute add
fmov.l &0x0,%fpcr # clear FPCR
fmov.l %fpsr,%d1 # fetch INEX2,N,Z
or.l %d1,USER_FPSR(%a6) # save exc and ccode bits
fbeq.w fadd_zero_exit # if result is zero, end now
mov.l %d2,-(%sp) # save d2
fmovm.x &0x01,-(%sp) # save result to stack
mov.w 2+L_SCR3(%a6),%d1
lsr.b &0x6,%d1
mov.w (%sp),%d2 # fetch new sign, exp
andi.l &0x7fff,%d2 # strip sign
sub.l %d0,%d2 # add scale factor
cmp.l %d2,(tbl_fadd_ovfl.b,%pc,%d1.w*4) # is it an overflow?
bge.b fadd_ovfl # yes
cmp.l %d2,(tbl_fadd_unfl.b,%pc,%d1.w*4) # is it an underflow?
blt.w fadd_unfl # yes
beq.w fadd_may_unfl # maybe; go find out
fadd_normal:
mov.w (%sp),%d1
andi.w &0x8000,%d1 # keep sign
or.w %d2,%d1 # concat sign,new exp
mov.w %d1,(%sp) # insert new exponent
fmovm.x (%sp)+,&0x80 # return result in fp0
mov.l (%sp)+,%d2 # restore d2
rts
fadd_zero_exit:
# fmov.s &0x00000000,%fp0 # return zero in fp0
rts
tbl_fadd_ovfl:
long 0x7fff # ext ovfl
long 0x407f # sgl ovfl
long 0x43ff # dbl ovfl
tbl_fadd_unfl:
long 0x0000 # ext unfl
long 0x3f81 # sgl unfl
long 0x3c01 # dbl unfl
fadd_ovfl:
or.l &ovfl_inx_mask,USER_FPSR(%a6) # set ovfl/aovfl/ainex
mov.b FPCR_ENABLE(%a6),%d1
andi.b &0x13,%d1 # is OVFL or INEX enabled?
bne.b fadd_ovfl_ena # yes
add.l &0xc,%sp
fadd_ovfl_dis:
btst &neg_bit,FPSR_CC(%a6) # is result negative?
sne %d1 # set sign param accordingly
mov.l L_SCR3(%a6),%d0 # pass prec:rnd
bsr.l ovf_res # calculate default result
or.b %d0,FPSR_CC(%a6) # set INF,N if applicable
fmovm.x (%a0),&0x80 # return default result in fp0
mov.l (%sp)+,%d2 # restore d2
rts
fadd_ovfl_ena:
mov.b L_SCR3(%a6),%d1
andi.b &0xc0,%d1 # is precision extended?
bne.b fadd_ovfl_ena_sd # no; prec = sgl or dbl
fadd_ovfl_ena_cont:
mov.w (%sp),%d1
andi.w &0x8000,%d1 # keep sign
subi.l &0x6000,%d2 # add extra bias
andi.w &0x7fff,%d2
or.w %d2,%d1 # concat sign,new exp
mov.w %d1,(%sp) # insert new exponent
fmovm.x (%sp)+,&0x40 # return EXOP in fp1
bra.b fadd_ovfl_dis
fadd_ovfl_ena_sd:
fmovm.x FP_SCR1(%a6),&0x80 # load dst op
mov.l L_SCR3(%a6),%d1
andi.b &0x30,%d1 # keep rnd mode
fmov.l %d1,%fpcr # set FPCR
fadd.x FP_SCR0(%a6),%fp0 # execute add
fmov.l &0x0,%fpcr # clear FPCR
add.l &0xc,%sp
fmovm.x &0x01,-(%sp)
bra.b fadd_ovfl_ena_cont
fadd_unfl:
bset &unfl_bit,FPSR_EXCEPT(%a6) # set unfl exc bit
add.l &0xc,%sp
fmovm.x FP_SCR1(%a6),&0x80 # load dst op
fmov.l &rz_mode*0x10,%fpcr # set FPCR
fmov.l &0x0,%fpsr # clear FPSR
fadd.x FP_SCR0(%a6),%fp0 # execute add
fmov.l &0x0,%fpcr # clear FPCR
fmov.l %fpsr,%d1 # save status
or.l %d1,USER_FPSR(%a6) # save INEX,N
mov.b FPCR_ENABLE(%a6),%d1
andi.b &0x0b,%d1 # is UNFL or INEX enabled?
bne.b fadd_unfl_ena # yes
fadd_unfl_dis:
fmovm.x &0x80,FP_SCR0(%a6) # store out result
lea FP_SCR0(%a6),%a0 # pass: result addr
mov.l L_SCR3(%a6),%d1 # pass: rnd prec,mode
bsr.l unf_res # calculate default result
or.b %d0,FPSR_CC(%a6) # 'Z' bit may have been set
fmovm.x FP_SCR0(%a6),&0x80 # return default result in fp0
mov.l (%sp)+,%d2 # restore d2
rts
fadd_unfl_ena:
fmovm.x FP_SCR1(%a6),&0x40 # load dst op
mov.l L_SCR3(%a6),%d1
andi.b &0xc0,%d1 # is precision extended?
bne.b fadd_unfl_ena_sd # no; sgl or dbl
fmov.l L_SCR3(%a6),%fpcr # set FPCR
fadd_unfl_ena_cont:
fmov.l &0x0,%fpsr # clear FPSR
fadd.x FP_SCR0(%a6),%fp1 # execute multiply
fmov.l &0x0,%fpcr # clear FPCR
fmovm.x &0x40,FP_SCR0(%a6) # save result to stack
mov.w FP_SCR0_EX(%a6),%d1 # fetch {sgn,exp}
mov.l %d1,%d2 # make a copy
andi.l &0x7fff,%d1 # strip sign
andi.w &0x8000,%d2 # keep old sign
sub.l %d0,%d1 # add scale factor
addi.l &0x6000,%d1 # add new bias
andi.w &0x7fff,%d1 # clear top bit
or.w %d2,%d1 # concat sign,new exp
mov.w %d1,FP_SCR0_EX(%a6) # insert new exponent
fmovm.x FP_SCR0(%a6),&0x40 # return EXOP in fp1
bra.w fadd_unfl_dis
fadd_unfl_ena_sd:
mov.l L_SCR3(%a6),%d1
andi.b &0x30,%d1 # use only rnd mode
fmov.l %d1,%fpcr # set FPCR
bra.b fadd_unfl_ena_cont
#
# result is equal to the smallest normalized number in the selected precision
# if the precision is extended, this result could not have come from an
# underflow that rounded up.
#
fadd_may_unfl:
mov.l L_SCR3(%a6),%d1
andi.b &0xc0,%d1
beq.w fadd_normal # yes; no underflow occurred
mov.l 0x4(%sp),%d1 # extract hi(man)
cmpi.l %d1,&0x80000000 # is hi(man) = 0x80000000?
bne.w fadd_normal # no; no underflow occurred
tst.l 0x8(%sp) # is lo(man) = 0x0?
bne.w fadd_normal # no; no underflow occurred
btst &inex2_bit,FPSR_EXCEPT(%a6) # is INEX2 set?
beq.w fadd_normal # no; no underflow occurred
#
# ok, so now the result has a exponent equal to the smallest normalized
# exponent for the selected precision. also, the mantissa is equal to
# 0x8000000000000000 and this mantissa is the result of rounding non-zero
# g,r,s.
# now, we must determine whether the pre-rounded result was an underflow
# rounded "up" or a normalized number rounded "down".
# so, we do this be re-executing the add using RZ as the rounding mode and
# seeing if the new result is smaller or equal to the current result.
#
fmovm.x FP_SCR1(%a6),&0x40 # load dst op into fp1
mov.l L_SCR3(%a6),%d1
andi.b &0xc0,%d1 # keep rnd prec
ori.b &rz_mode*0x10,%d1 # insert rnd mode
fmov.l %d1,%fpcr # set FPCR
fmov.l &0x0,%fpsr # clear FPSR
fadd.x FP_SCR0(%a6),%fp1 # execute add
fmov.l &0x0,%fpcr # clear FPCR
fabs.x %fp0 # compare absolute values
fabs.x %fp1
fcmp.x %fp0,%fp1 # is first result > second?
fbgt.w fadd_unfl # yes; it's an underflow
bra.w fadd_normal # no; it's not an underflow
##########################################################################
#
# Add: inputs are not both normalized; what are they?
#
fadd_not_norm:
mov.w (tbl_fadd_op.b,%pc,%d1.w*2),%d1
jmp (tbl_fadd_op.b,%pc,%d1.w*1)
swbeg &48
tbl_fadd_op:
short fadd_norm - tbl_fadd_op # NORM + NORM
short fadd_zero_src - tbl_fadd_op # NORM + ZERO
short fadd_inf_src - tbl_fadd_op # NORM + INF
short fadd_res_qnan - tbl_fadd_op # NORM + QNAN
short fadd_norm - tbl_fadd_op # NORM + DENORM
short fadd_res_snan - tbl_fadd_op # NORM + SNAN
short tbl_fadd_op - tbl_fadd_op #
short tbl_fadd_op - tbl_fadd_op #
short fadd_zero_dst - tbl_fadd_op # ZERO + NORM
short fadd_zero_2 - tbl_fadd_op # ZERO + ZERO
short fadd_inf_src - tbl_fadd_op # ZERO + INF
short fadd_res_qnan - tbl_fadd_op # NORM + QNAN
short fadd_zero_dst - tbl_fadd_op # ZERO + DENORM
short fadd_res_snan - tbl_fadd_op # NORM + SNAN
short tbl_fadd_op - tbl_fadd_op #
short tbl_fadd_op - tbl_fadd_op #
short fadd_inf_dst - tbl_fadd_op # INF + NORM
short fadd_inf_dst - tbl_fadd_op # INF + ZERO
short fadd_inf_2 - tbl_fadd_op # INF + INF
short fadd_res_qnan - tbl_fadd_op # NORM + QNAN
short fadd_inf_dst - tbl_fadd_op # INF + DENORM
short fadd_res_snan - tbl_fadd_op # NORM + SNAN
short tbl_fadd_op - tbl_fadd_op #
short tbl_fadd_op - tbl_fadd_op #
short fadd_res_qnan - tbl_fadd_op # QNAN + NORM
short fadd_res_qnan - tbl_fadd_op # QNAN + ZERO
short fadd_res_qnan - tbl_fadd_op # QNAN + INF
short fadd_res_qnan - tbl_fadd_op # QNAN + QNAN
short fadd_res_qnan - tbl_fadd_op # QNAN + DENORM
short fadd_res_snan - tbl_fadd_op # QNAN + SNAN
short tbl_fadd_op - tbl_fadd_op #
short tbl_fadd_op - tbl_fadd_op #
short fadd_norm - tbl_fadd_op # DENORM + NORM
short fadd_zero_src - tbl_fadd_op # DENORM + ZERO
short fadd_inf_src - tbl_fadd_op # DENORM + INF
short fadd_res_qnan - tbl_fadd_op # NORM + QNAN
short fadd_norm - tbl_fadd_op # DENORM + DENORM
short fadd_res_snan - tbl_fadd_op # NORM + SNAN
short tbl_fadd_op - tbl_fadd_op #
short tbl_fadd_op - tbl_fadd_op #
short fadd_res_snan - tbl_fadd_op # SNAN + NORM
short fadd_res_snan - tbl_fadd_op # SNAN + ZERO
short fadd_res_snan - tbl_fadd_op # SNAN + INF
short fadd_res_snan - tbl_fadd_op # SNAN + QNAN
short fadd_res_snan - tbl_fadd_op # SNAN + DENORM
short fadd_res_snan - tbl_fadd_op # SNAN + SNAN
short tbl_fadd_op - tbl_fadd_op #
short tbl_fadd_op - tbl_fadd_op #
fadd_res_qnan:
bra.l res_qnan
fadd_res_snan:
bra.l res_snan
#
# both operands are ZEROes
#
fadd_zero_2:
mov.b SRC_EX(%a0),%d0 # are the signs opposite
mov.b DST_EX(%a1),%d1
eor.b %d0,%d1
bmi.w fadd_zero_2_chk_rm # weed out (-ZERO)+(+ZERO)
# the signs are the same. so determine whether they are positive or negative
# and return the appropriately signed zero.
tst.b %d0 # are ZEROes positive or negative?
bmi.b fadd_zero_rm # negative
fmov.s &0x00000000,%fp0 # return +ZERO
mov.b &z_bmask,FPSR_CC(%a6) # set Z
rts
#
# the ZEROes have opposite signs:
# - Therefore, we return +ZERO if the rounding modes are RN,RZ, or RP.
# - -ZERO is returned in the case of RM.
#
fadd_zero_2_chk_rm:
mov.b 3+L_SCR3(%a6),%d1
andi.b &0x30,%d1 # extract rnd mode
cmpi.b %d1,&rm_mode*0x10 # is rnd mode == RM?
beq.b fadd_zero_rm # yes
fmov.s &0x00000000,%fp0 # return +ZERO
mov.b &z_bmask,FPSR_CC(%a6) # set Z
rts
fadd_zero_rm:
fmov.s &0x80000000,%fp0 # return -ZERO
mov.b &neg_bmask+z_bmask,FPSR_CC(%a6) # set NEG/Z
rts
#
# one operand is a ZERO and the other is a DENORM or NORM. scale
# the DENORM or NORM and jump to the regular fadd routine.
#
fadd_zero_dst:
mov.w SRC_EX(%a0),FP_SCR0_EX(%a6)
mov.l SRC_HI(%a0),FP_SCR0_HI(%a6)
mov.l SRC_LO(%a0),FP_SCR0_LO(%a6)
bsr.l scale_to_zero_src # scale the operand
clr.w FP_SCR1_EX(%a6)
clr.l FP_SCR1_HI(%a6)
clr.l FP_SCR1_LO(%a6)
bra.w fadd_zero_entry # go execute fadd
fadd_zero_src:
mov.w DST_EX(%a1),FP_SCR1_EX(%a6)
mov.l DST_HI(%a1),FP_SCR1_HI(%a6)
mov.l DST_LO(%a1),FP_SCR1_LO(%a6)
bsr.l scale_to_zero_dst # scale the operand
clr.w FP_SCR0_EX(%a6)
clr.l FP_SCR0_HI(%a6)
clr.l FP_SCR0_LO(%a6)
bra.w fadd_zero_entry # go execute fadd
#
# both operands are INFs. an OPERR will result if the INFs have
# different signs. else, an INF of the same sign is returned
#
fadd_inf_2:
mov.b SRC_EX(%a0),%d0 # exclusive or the signs
mov.b DST_EX(%a1),%d1
eor.b %d1,%d0
bmi.l res_operr # weed out (-INF)+(+INF)
# ok, so it's not an OPERR. but, we do have to remember to return the
# src INF since that's where the 881/882 gets the j-bit from...
#
# operands are INF and one of {ZERO, INF, DENORM, NORM}
#
fadd_inf_src:
fmovm.x SRC(%a0),&0x80 # return src INF
tst.b SRC_EX(%a0) # is INF positive?
bpl.b fadd_inf_done # yes; we're done
mov.b &neg_bmask+inf_bmask,FPSR_CC(%a6) # set INF/NEG
rts
#
# operands are INF and one of {ZERO, INF, DENORM, NORM}
#
fadd_inf_dst:
fmovm.x DST(%a1),&0x80 # return dst INF
tst.b DST_EX(%a1) # is INF positive?
bpl.b fadd_inf_done # yes; we're done
mov.b &neg_bmask+inf_bmask,FPSR_CC(%a6) # set INF/NEG
rts
fadd_inf_done:
mov.b &inf_bmask,FPSR_CC(%a6) # set INF
rts
#########################################################################
# XDEF **************************************************************** #
# fsub(): emulates the fsub instruction #
# fssub(): emulates the fssub instruction #
# fdsub(): emulates the fdsub instruction #
# #
# XREF **************************************************************** #
# addsub_scaler2() - scale the operands so they won't take exc #
# ovf_res() - return default overflow result #
# unf_res() - return default underflow result #
# res_qnan() - set QNAN result #
# res_snan() - set SNAN result #
# res_operr() - set OPERR result #
# scale_to_zero_src() - set src operand exponent equal to zero #
# scale_to_zero_dst() - set dst operand exponent equal to zero #
# #
# INPUT *************************************************************** #
# a0 = pointer to extended precision source operand #
# a1 = pointer to extended precision destination operand #
# #
# OUTPUT ************************************************************** #
# fp0 = result #
# fp1 = EXOP (if exception occurred) #
# #
# ALGORITHM *********************************************************** #
# Handle NANs, infinities, and zeroes as special cases. Divide #
# norms into extended, single, and double precision. #
# Do subtraction after scaling exponents such that exception won't#
# occur. Then, check result exponent to see if exception would have #
# occurred. If so, return default result and maybe EXOP. Else, insert #
# the correct result exponent and return. Set FPSR bits as appropriate. #
# #
#########################################################################
global fssub
fssub:
andi.b &0x30,%d0 # clear rnd prec
ori.b &s_mode*0x10,%d0 # insert sgl prec
bra.b fsub
global fdsub
fdsub:
andi.b &0x30,%d0 # clear rnd prec
ori.b &d_mode*0x10,%d0 # insert dbl prec
global fsub
fsub:
mov.l %d0,L_SCR3(%a6) # store rnd info
clr.w %d1
mov.b DTAG(%a6),%d1
lsl.b &0x3,%d1
or.b STAG(%a6),%d1 # combine src tags
bne.w fsub_not_norm # optimize on non-norm input
#
# SUB: norms and denorms
#
fsub_norm:
bsr.l addsub_scaler2 # scale exponents
fsub_zero_entry:
fmovm.x FP_SCR1(%a6),&0x80 # load dst op
fmov.l &0x0,%fpsr # clear FPSR
fmov.l L_SCR3(%a6),%fpcr # set FPCR
fsub.x FP_SCR0(%a6),%fp0 # execute subtract
fmov.l &0x0,%fpcr # clear FPCR
fmov.l %fpsr,%d1 # fetch INEX2, N, Z
or.l %d1,USER_FPSR(%a6) # save exc and ccode bits
fbeq.w fsub_zero_exit # if result zero, end now
mov.l %d2,-(%sp) # save d2
fmovm.x &0x01,-(%sp) # save result to stack
mov.w 2+L_SCR3(%a6),%d1
lsr.b &0x6,%d1
mov.w (%sp),%d2 # fetch new exponent
andi.l &0x7fff,%d2 # strip sign
sub.l %d0,%d2 # add scale factor
cmp.l %d2,(tbl_fsub_ovfl.b,%pc,%d1.w*4) # is it an overflow?
bge.b fsub_ovfl # yes
cmp.l %d2,(tbl_fsub_unfl.b,%pc,%d1.w*4) # is it an underflow?
blt.w fsub_unfl # yes
beq.w fsub_may_unfl # maybe; go find out
fsub_normal:
mov.w (%sp),%d1
andi.w &0x8000,%d1 # keep sign
or.w %d2,%d1 # insert new exponent
mov.w %d1,(%sp) # insert new exponent
fmovm.x (%sp)+,&0x80 # return result in fp0
mov.l (%sp)+,%d2 # restore d2
rts
fsub_zero_exit:
# fmov.s &0x00000000,%fp0 # return zero in fp0
rts
tbl_fsub_ovfl:
long 0x7fff # ext ovfl
long 0x407f # sgl ovfl
long 0x43ff # dbl ovfl
tbl_fsub_unfl:
long 0x0000 # ext unfl
long 0x3f81 # sgl unfl
long 0x3c01 # dbl unfl
fsub_ovfl:
or.l &ovfl_inx_mask,USER_FPSR(%a6) # set ovfl/aovfl/ainex
mov.b FPCR_ENABLE(%a6),%d1
andi.b &0x13,%d1 # is OVFL or INEX enabled?
bne.b fsub_ovfl_ena # yes
add.l &0xc,%sp
fsub_ovfl_dis:
btst &neg_bit,FPSR_CC(%a6) # is result negative?
sne %d1 # set sign param accordingly
mov.l L_SCR3(%a6),%d0 # pass prec:rnd
bsr.l ovf_res # calculate default result
or.b %d0,FPSR_CC(%a6) # set INF,N if applicable
fmovm.x (%a0),&0x80 # return default result in fp0
mov.l (%sp)+,%d2 # restore d2
rts
fsub_ovfl_ena:
mov.b L_SCR3(%a6),%d1
andi.b &0xc0,%d1 # is precision extended?
bne.b fsub_ovfl_ena_sd # no
fsub_ovfl_ena_cont:
mov.w (%sp),%d1 # fetch {sgn,exp}
andi.w &0x8000,%d1 # keep sign
subi.l &0x6000,%d2 # subtract new bias
andi.w &0x7fff,%d2 # clear top bit
or.w %d2,%d1 # concat sign,exp
mov.w %d1,(%sp) # insert new exponent
fmovm.x (%sp)+,&0x40 # return EXOP in fp1
bra.b fsub_ovfl_dis
fsub_ovfl_ena_sd:
fmovm.x FP_SCR1(%a6),&0x80 # load dst op
mov.l L_SCR3(%a6),%d1
andi.b &0x30,%d1 # clear rnd prec
fmov.l %d1,%fpcr # set FPCR
fsub.x FP_SCR0(%a6),%fp0 # execute subtract
fmov.l &0x0,%fpcr # clear FPCR
add.l &0xc,%sp
fmovm.x &0x01,-(%sp)
bra.b fsub_ovfl_ena_cont
fsub_unfl:
bset &unfl_bit,FPSR_EXCEPT(%a6) # set unfl exc bit
add.l &0xc,%sp
fmovm.x FP_SCR1(%a6),&0x80 # load dst op
fmov.l &rz_mode*0x10,%fpcr # set FPCR
fmov.l &0x0,%fpsr # clear FPSR
fsub.x FP_SCR0(%a6),%fp0 # execute subtract
fmov.l &0x0,%fpcr # clear FPCR
fmov.l %fpsr,%d1 # save status
or.l %d1,USER_FPSR(%a6)
mov.b FPCR_ENABLE(%a6),%d1
andi.b &0x0b,%d1 # is UNFL or INEX enabled?
bne.b fsub_unfl_ena # yes
fsub_unfl_dis:
fmovm.x &0x80,FP_SCR0(%a6) # store out result
lea FP_SCR0(%a6),%a0 # pass: result addr
mov.l L_SCR3(%a6),%d1 # pass: rnd prec,mode
bsr.l unf_res # calculate default result
or.b %d0,FPSR_CC(%a6) # 'Z' may have been set
fmovm.x FP_SCR0(%a6),&0x80 # return default result in fp0
mov.l (%sp)+,%d2 # restore d2
rts
fsub_unfl_ena:
fmovm.x FP_SCR1(%a6),&0x40
mov.l L_SCR3(%a6),%d1
andi.b &0xc0,%d1 # is precision extended?
bne.b fsub_unfl_ena_sd # no
fmov.l L_SCR3(%a6),%fpcr # set FPCR
fsub_unfl_ena_cont:
fmov.l &0x0,%fpsr # clear FPSR
fsub.x FP_SCR0(%a6),%fp1 # execute subtract
fmov.l &0x0,%fpcr # clear FPCR
fmovm.x &0x40,FP_SCR0(%a6) # store result to stack
mov.w FP_SCR0_EX(%a6),%d1 # fetch {sgn,exp}
mov.l %d1,%d2 # make a copy
andi.l &0x7fff,%d1 # strip sign
andi.w &0x8000,%d2 # keep old sign
sub.l %d0,%d1 # add scale factor
addi.l &0x6000,%d1 # subtract new bias
andi.w &0x7fff,%d1 # clear top bit
or.w %d2,%d1 # concat sgn,exp
mov.w %d1,FP_SCR0_EX(%a6) # insert new exponent
fmovm.x FP_SCR0(%a6),&0x40 # return EXOP in fp1
bra.w fsub_unfl_dis
fsub_unfl_ena_sd:
mov.l L_SCR3(%a6),%d1
andi.b &0x30,%d1 # clear rnd prec
fmov.l %d1,%fpcr # set FPCR
bra.b fsub_unfl_ena_cont
#
# result is equal to the smallest normalized number in the selected precision
# if the precision is extended, this result could not have come from an
# underflow that rounded up.
#
fsub_may_unfl:
mov.l L_SCR3(%a6),%d1
andi.b &0xc0,%d1 # fetch rnd prec
beq.w fsub_normal # yes; no underflow occurred
mov.l 0x4(%sp),%d1
cmpi.l %d1,&0x80000000 # is hi(man) = 0x80000000?
bne.w fsub_normal # no; no underflow occurred
tst.l 0x8(%sp) # is lo(man) = 0x0?
bne.w fsub_normal # no; no underflow occurred
btst &inex2_bit,FPSR_EXCEPT(%a6) # is INEX2 set?
beq.w fsub_normal # no; no underflow occurred
#
# ok, so now the result has a exponent equal to the smallest normalized
# exponent for the selected precision. also, the mantissa is equal to
# 0x8000000000000000 and this mantissa is the result of rounding non-zero
# g,r,s.
# now, we must determine whether the pre-rounded result was an underflow
# rounded "up" or a normalized number rounded "down".
# so, we do this be re-executing the add using RZ as the rounding mode and
# seeing if the new result is smaller or equal to the current result.
#
fmovm.x FP_SCR1(%a6),&0x40 # load dst op into fp1
mov.l L_SCR3(%a6),%d1
andi.b &0xc0,%d1 # keep rnd prec
ori.b &rz_mode*0x10,%d1 # insert rnd mode
fmov.l %d1,%fpcr # set FPCR
fmov.l &0x0,%fpsr # clear FPSR
fsub.x FP_SCR0(%a6),%fp1 # execute subtract
fmov.l &0x0,%fpcr # clear FPCR
fabs.x %fp0 # compare absolute values
fabs.x %fp1
fcmp.x %fp0,%fp1 # is first result > second?
fbgt.w fsub_unfl # yes; it's an underflow
bra.w fsub_normal # no; it's not an underflow
##########################################################################
#
# Sub: inputs are not both normalized; what are they?
#
fsub_not_norm:
mov.w (tbl_fsub_op.b,%pc,%d1.w*2),%d1
jmp (tbl_fsub_op.b,%pc,%d1.w*1)
swbeg &48
tbl_fsub_op:
short fsub_norm - tbl_fsub_op # NORM - NORM
short fsub_zero_src - tbl_fsub_op # NORM - ZERO
short fsub_inf_src - tbl_fsub_op # NORM - INF
short fsub_res_qnan - tbl_fsub_op # NORM - QNAN
short fsub_norm - tbl_fsub_op # NORM - DENORM
short fsub_res_snan - tbl_fsub_op # NORM - SNAN
short tbl_fsub_op - tbl_fsub_op #
short tbl_fsub_op - tbl_fsub_op #
short fsub_zero_dst - tbl_fsub_op # ZERO - NORM
short fsub_zero_2 - tbl_fsub_op # ZERO - ZERO
short fsub_inf_src - tbl_fsub_op # ZERO - INF
short fsub_res_qnan - tbl_fsub_op # NORM - QNAN
short fsub_zero_dst - tbl_fsub_op # ZERO - DENORM
short fsub_res_snan - tbl_fsub_op # NORM - SNAN
short tbl_fsub_op - tbl_fsub_op #
short tbl_fsub_op - tbl_fsub_op #
short fsub_inf_dst - tbl_fsub_op # INF - NORM
short fsub_inf_dst - tbl_fsub_op # INF - ZERO
short fsub_inf_2 - tbl_fsub_op # INF - INF
short fsub_res_qnan - tbl_fsub_op # NORM - QNAN
short fsub_inf_dst - tbl_fsub_op # INF - DENORM
short fsub_res_snan - tbl_fsub_op # NORM - SNAN
short tbl_fsub_op - tbl_fsub_op #
short tbl_fsub_op - tbl_fsub_op #
short fsub_res_qnan - tbl_fsub_op # QNAN - NORM
short fsub_res_qnan - tbl_fsub_op # QNAN - ZERO
short fsub_res_qnan - tbl_fsub_op # QNAN - INF
short fsub_res_qnan - tbl_fsub_op # QNAN - QNAN
short fsub_res_qnan - tbl_fsub_op # QNAN - DENORM
short fsub_res_snan - tbl_fsub_op # QNAN - SNAN
short tbl_fsub_op - tbl_fsub_op #
short tbl_fsub_op - tbl_fsub_op #
short fsub_norm - tbl_fsub_op # DENORM - NORM
short fsub_zero_src - tbl_fsub_op # DENORM - ZERO
short fsub_inf_src - tbl_fsub_op # DENORM - INF
short fsub_res_qnan - tbl_fsub_op # NORM - QNAN
short fsub_norm - tbl_fsub_op # DENORM - DENORM
short fsub_res_snan - tbl_fsub_op # NORM - SNAN
short tbl_fsub_op - tbl_fsub_op #
short tbl_fsub_op - tbl_fsub_op #
short fsub_res_snan - tbl_fsub_op # SNAN - NORM
short fsub_res_snan - tbl_fsub_op # SNAN - ZERO
short fsub_res_snan - tbl_fsub_op # SNAN - INF
short fsub_res_snan - tbl_fsub_op # SNAN - QNAN
short fsub_res_snan - tbl_fsub_op # SNAN - DENORM
short fsub_res_snan - tbl_fsub_op # SNAN - SNAN
short tbl_fsub_op - tbl_fsub_op #
short tbl_fsub_op - tbl_fsub_op #
fsub_res_qnan:
bra.l res_qnan
fsub_res_snan:
bra.l res_snan
#
# both operands are ZEROes
#
fsub_zero_2:
mov.b SRC_EX(%a0),%d0
mov.b DST_EX(%a1),%d1
eor.b %d1,%d0
bpl.b fsub_zero_2_chk_rm
# the signs are opposite, so, return a ZERO w/ the sign of the dst ZERO
tst.b %d0 # is dst negative?
bmi.b fsub_zero_2_rm # yes
fmov.s &0x00000000,%fp0 # no; return +ZERO
mov.b &z_bmask,FPSR_CC(%a6) # set Z
rts
#
# the ZEROes have the same signs:
# - Therefore, we return +ZERO if the rounding mode is RN,RZ, or RP
# - -ZERO is returned in the case of RM.
#
fsub_zero_2_chk_rm:
mov.b 3+L_SCR3(%a6),%d1
andi.b &0x30,%d1 # extract rnd mode
cmpi.b %d1,&rm_mode*0x10 # is rnd mode = RM?
beq.b fsub_zero_2_rm # yes
fmov.s &0x00000000,%fp0 # no; return +ZERO
mov.b &z_bmask,FPSR_CC(%a6) # set Z
rts
fsub_zero_2_rm:
fmov.s &0x80000000,%fp0 # return -ZERO
mov.b &z_bmask+neg_bmask,FPSR_CC(%a6) # set Z/NEG
rts
#
# one operand is a ZERO and the other is a DENORM or a NORM.
# scale the DENORM or NORM and jump to the regular fsub routine.
#
fsub_zero_dst:
mov.w SRC_EX(%a0),FP_SCR0_EX(%a6)
mov.l SRC_HI(%a0),FP_SCR0_HI(%a6)
mov.l SRC_LO(%a0),FP_SCR0_LO(%a6)
bsr.l scale_to_zero_src # scale the operand
clr.w FP_SCR1_EX(%a6)
clr.l FP_SCR1_HI(%a6)
clr.l FP_SCR1_LO(%a6)
bra.w fsub_zero_entry # go execute fsub
fsub_zero_src:
mov.w DST_EX(%a1),FP_SCR1_EX(%a6)
mov.l DST_HI(%a1),FP_SCR1_HI(%a6)
mov.l DST_LO(%a1),FP_SCR1_LO(%a6)
bsr.l scale_to_zero_dst # scale the operand
clr.w FP_SCR0_EX(%a6)
clr.l FP_SCR0_HI(%a6)
clr.l FP_SCR0_LO(%a6)
bra.w fsub_zero_entry # go execute fsub
#
# both operands are INFs. an OPERR will result if the INFs have the
# same signs. else,
#
fsub_inf_2:
mov.b SRC_EX(%a0),%d0 # exclusive or the signs
mov.b DST_EX(%a1),%d1
eor.b %d1,%d0
bpl.l res_operr # weed out (-INF)+(+INF)
# ok, so it's not an OPERR. but we do have to remember to return
# the src INF since that's where the 881/882 gets the j-bit.
fsub_inf_src:
fmovm.x SRC(%a0),&0x80 # return src INF
fneg.x %fp0 # invert sign
fbge.w fsub_inf_done # sign is now positive
mov.b &neg_bmask+inf_bmask,FPSR_CC(%a6) # set INF/NEG
rts
fsub_inf_dst:
fmovm.x DST(%a1),&0x80 # return dst INF
tst.b DST_EX(%a1) # is INF negative?
bpl.b fsub_inf_done # no
mov.b &neg_bmask+inf_bmask,FPSR_CC(%a6) # set INF/NEG
rts
fsub_inf_done:
mov.b &inf_bmask,FPSR_CC(%a6) # set INF
rts
#########################################################################
# XDEF **************************************************************** #
# fsqrt(): emulates the fsqrt instruction #
# fssqrt(): emulates the fssqrt instruction #
# fdsqrt(): emulates the fdsqrt instruction #
# #
# XREF **************************************************************** #
# scale_sqrt() - scale the source operand #
# unf_res() - return default underflow result #
# ovf_res() - return default overflow result #
# res_qnan_1op() - return QNAN result #
# res_snan_1op() - return SNAN result #
# #
# INPUT *************************************************************** #
# a0 = pointer to extended precision source operand #
# d0 rnd prec,mode #
# #
# OUTPUT ************************************************************** #
# fp0 = result #
# fp1 = EXOP (if exception occurred) #
# #
# ALGORITHM *********************************************************** #
# Handle NANs, infinities, and zeroes as special cases. Divide #
# norms/denorms into ext/sgl/dbl precision. #
# For norms/denorms, scale the exponents such that a sqrt #
# instruction won't cause an exception. Use the regular fsqrt to #
# compute a result. Check if the regular operands would have taken #
# an exception. If so, return the default overflow/underflow result #
# and return the EXOP if exceptions are enabled. Else, scale the #
# result operand to the proper exponent. #
# #
#########################################################################
global fssqrt
fssqrt:
andi.b &0x30,%d0 # clear rnd prec
ori.b &s_mode*0x10,%d0 # insert sgl precision
bra.b fsqrt
global fdsqrt
fdsqrt:
andi.b &0x30,%d0 # clear rnd prec
ori.b &d_mode*0x10,%d0 # insert dbl precision
global fsqrt
fsqrt:
mov.l %d0,L_SCR3(%a6) # store rnd info
clr.w %d1
mov.b STAG(%a6),%d1
bne.w fsqrt_not_norm # optimize on non-norm input
#
# SQUARE ROOT: norms and denorms ONLY!
#
fsqrt_norm:
tst.b SRC_EX(%a0) # is operand negative?
bmi.l res_operr # yes
andi.b &0xc0,%d0 # is precision extended?
bne.b fsqrt_not_ext # no; go handle sgl or dbl
fmov.l L_SCR3(%a6),%fpcr # set FPCR
fmov.l &0x0,%fpsr # clear FPSR
fsqrt.x (%a0),%fp0 # execute square root
fmov.l %fpsr,%d1
or.l %d1,USER_FPSR(%a6) # set N,INEX
rts
fsqrt_denorm:
tst.b SRC_EX(%a0) # is operand negative?
bmi.l res_operr # yes
andi.b &0xc0,%d0 # is precision extended?
bne.b fsqrt_not_ext # no; go handle sgl or dbl
mov.w SRC_EX(%a0),FP_SCR0_EX(%a6)
mov.l SRC_HI(%a0),FP_SCR0_HI(%a6)
mov.l SRC_LO(%a0),FP_SCR0_LO(%a6)
bsr.l scale_sqrt # calculate scale factor
bra.w fsqrt_sd_normal
#
# operand is either single or double
#
fsqrt_not_ext:
cmpi.b %d0,&s_mode*0x10 # separate sgl/dbl prec
bne.w fsqrt_dbl
#
# operand is to be rounded to single precision
#
fsqrt_sgl:
mov.w SRC_EX(%a0),FP_SCR0_EX(%a6)
mov.l SRC_HI(%a0),FP_SCR0_HI(%a6)
mov.l SRC_LO(%a0),FP_SCR0_LO(%a6)
bsr.l scale_sqrt # calculate scale factor
cmpi.l %d0,&0x3fff-0x3f81 # will move in underflow?
beq.w fsqrt_sd_may_unfl
bgt.w fsqrt_sd_unfl # yes; go handle underflow
cmpi.l %d0,&0x3fff-0x407f # will move in overflow?
beq.w fsqrt_sd_may_ovfl # maybe; go check
blt.w fsqrt_sd_ovfl # yes; go handle overflow
#
# operand will NOT overflow or underflow when moved in to the fp reg file
#
fsqrt_sd_normal:
fmov.l &0x0,%fpsr # clear FPSR
fmov.l L_SCR3(%a6),%fpcr # set FPCR
fsqrt.x FP_SCR0(%a6),%fp0 # perform absolute
fmov.l %fpsr,%d1 # save FPSR
fmov.l &0x0,%fpcr # clear FPCR
or.l %d1,USER_FPSR(%a6) # save INEX2,N
fsqrt_sd_normal_exit:
mov.l %d2,-(%sp) # save d2
fmovm.x &0x80,FP_SCR0(%a6) # store out result
mov.w FP_SCR0_EX(%a6),%d1 # load sgn,exp
mov.l %d1,%d2 # make a copy
andi.l &0x7fff,%d1 # strip sign
sub.l %d0,%d1 # add scale factor
andi.w &0x8000,%d2 # keep old sign
or.w %d1,%d2 # concat old sign,new exp
mov.w %d2,FP_SCR0_EX(%a6) # insert new exponent
mov.l (%sp)+,%d2 # restore d2
fmovm.x FP_SCR0(%a6),&0x80 # return result in fp0
rts
#
# operand is to be rounded to double precision
#
fsqrt_dbl:
mov.w SRC_EX(%a0),FP_SCR0_EX(%a6)
mov.l SRC_HI(%a0),FP_SCR0_HI(%a6)
mov.l SRC_LO(%a0),FP_SCR0_LO(%a6)
bsr.l scale_sqrt # calculate scale factor
cmpi.l %d0,&0x3fff-0x3c01 # will move in underflow?
beq.w fsqrt_sd_may_unfl
bgt.b fsqrt_sd_unfl # yes; go handle underflow
cmpi.l %d0,&0x3fff-0x43ff # will move in overflow?
beq.w fsqrt_sd_may_ovfl # maybe; go check
blt.w fsqrt_sd_ovfl # yes; go handle overflow
bra.w fsqrt_sd_normal # no; ho handle normalized op
# we're on the line here and the distinguising characteristic is whether
# the exponent is 3fff or 3ffe. if it's 3ffe, then it's a safe number
# elsewise fall through to underflow.
fsqrt_sd_may_unfl:
btst &0x0,1+FP_SCR0_EX(%a6) # is exponent 0x3fff?
bne.w fsqrt_sd_normal # yes, so no underflow
#
# operand WILL underflow when moved in to the fp register file
#
fsqrt_sd_unfl:
bset &unfl_bit,FPSR_EXCEPT(%a6) # set unfl exc bit
fmov.l &rz_mode*0x10,%fpcr # set FPCR
fmov.l &0x0,%fpsr # clear FPSR
fsqrt.x FP_SCR0(%a6),%fp0 # execute square root
fmov.l %fpsr,%d1 # save status
fmov.l &0x0,%fpcr # clear FPCR
or.l %d1,USER_FPSR(%a6) # save INEX2,N
# if underflow or inexact is enabled, go calculate EXOP first.
mov.b FPCR_ENABLE(%a6),%d1
andi.b &0x0b,%d1 # is UNFL or INEX enabled?
bne.b fsqrt_sd_unfl_ena # yes
fsqrt_sd_unfl_dis:
fmovm.x &0x80,FP_SCR0(%a6) # store out result
lea FP_SCR0(%a6),%a0 # pass: result addr
mov.l L_SCR3(%a6),%d1 # pass: rnd prec,mode
bsr.l unf_res # calculate default result
or.b %d0,FPSR_CC(%a6) # set possible 'Z' ccode
fmovm.x FP_SCR0(%a6),&0x80 # return default result in fp0
rts
#
# operand will underflow AND underflow is enabled.
# Therefore, we must return the result rounded to extended precision.
#
fsqrt_sd_unfl_ena:
mov.l FP_SCR0_HI(%a6),FP_SCR1_HI(%a6)
mov.l FP_SCR0_LO(%a6),FP_SCR1_LO(%a6)
mov.w FP_SCR0_EX(%a6),%d1 # load current exponent
mov.l %d2,-(%sp) # save d2
mov.l %d1,%d2 # make a copy
andi.l &0x7fff,%d1 # strip sign
andi.w &0x8000,%d2 # keep old sign
sub.l %d0,%d1 # subtract scale factor
addi.l &0x6000,%d1 # add new bias
andi.w &0x7fff,%d1
or.w %d2,%d1 # concat new sign,new exp
mov.w %d1,FP_SCR1_EX(%a6) # insert new exp
fmovm.x FP_SCR1(%a6),&0x40 # return EXOP in fp1
mov.l (%sp)+,%d2 # restore d2
bra.b fsqrt_sd_unfl_dis
#
# operand WILL overflow.
#
fsqrt_sd_ovfl:
fmov.l &0x0,%fpsr # clear FPSR
fmov.l L_SCR3(%a6),%fpcr # set FPCR
fsqrt.x FP_SCR0(%a6),%fp0 # perform square root
fmov.l &0x0,%fpcr # clear FPCR
fmov.l %fpsr,%d1 # save FPSR
or.l %d1,USER_FPSR(%a6) # save INEX2,N
fsqrt_sd_ovfl_tst:
or.l &ovfl_inx_mask,USER_FPSR(%a6) # set ovfl/aovfl/ainex
mov.b FPCR_ENABLE(%a6),%d1
andi.b &0x13,%d1 # is OVFL or INEX enabled?
bne.b fsqrt_sd_ovfl_ena # yes
#
# OVFL is not enabled; therefore, we must create the default result by
# calling ovf_res().
#
fsqrt_sd_ovfl_dis:
btst &neg_bit,FPSR_CC(%a6) # is result negative?
sne %d1 # set sign param accordingly
mov.l L_SCR3(%a6),%d0 # pass: prec,mode
bsr.l ovf_res # calculate default result
or.b %d0,FPSR_CC(%a6) # set INF,N if applicable
fmovm.x (%a0),&0x80 # return default result in fp0
rts
#
# OVFL is enabled.
# the INEX2 bit has already been updated by the round to the correct precision.
# now, round to extended(and don't alter the FPSR).
#
fsqrt_sd_ovfl_ena:
mov.l %d2,-(%sp) # save d2
mov.w FP_SCR0_EX(%a6),%d1 # fetch {sgn,exp}
mov.l %d1,%d2 # make a copy
andi.l &0x7fff,%d1 # strip sign
andi.w &0x8000,%d2 # keep old sign
sub.l %d0,%d1 # add scale factor
subi.l &0x6000,%d1 # subtract bias
andi.w &0x7fff,%d1
or.w %d2,%d1 # concat sign,exp
mov.w %d1,FP_SCR0_EX(%a6) # insert new exponent
fmovm.x FP_SCR0(%a6),&0x40 # return EXOP in fp1
mov.l (%sp)+,%d2 # restore d2
bra.b fsqrt_sd_ovfl_dis
#
# the move in MAY underflow. so...
#
fsqrt_sd_may_ovfl:
btst &0x0,1+FP_SCR0_EX(%a6) # is exponent 0x3fff?
bne.w fsqrt_sd_ovfl # yes, so overflow
fmov.l &0x0,%fpsr # clear FPSR
fmov.l L_SCR3(%a6),%fpcr # set FPCR
fsqrt.x FP_SCR0(%a6),%fp0 # perform absolute
fmov.l %fpsr,%d1 # save status
fmov.l &0x0,%fpcr # clear FPCR
or.l %d1,USER_FPSR(%a6) # save INEX2,N
fmov.x %fp0,%fp1 # make a copy of result
fcmp.b %fp1,&0x1 # is |result| >= 1.b?
fbge.w fsqrt_sd_ovfl_tst # yes; overflow has occurred
# no, it didn't overflow; we have correct result
bra.w fsqrt_sd_normal_exit
##########################################################################
#
# input is not normalized; what is it?
#
fsqrt_not_norm:
cmpi.b %d1,&DENORM # weed out DENORM
beq.w fsqrt_denorm
cmpi.b %d1,&ZERO # weed out ZERO
beq.b fsqrt_zero
cmpi.b %d1,&INF # weed out INF
beq.b fsqrt_inf
cmpi.b %d1,&SNAN # weed out SNAN
beq.l res_snan_1op
bra.l res_qnan_1op
#
# fsqrt(+0) = +0
# fsqrt(-0) = -0
# fsqrt(+INF) = +INF
# fsqrt(-INF) = OPERR
#
fsqrt_zero:
tst.b SRC_EX(%a0) # is ZERO positive or negative?
bmi.b fsqrt_zero_m # negative
fsqrt_zero_p:
fmov.s &0x00000000,%fp0 # return +ZERO
mov.b &z_bmask,FPSR_CC(%a6) # set 'Z' ccode bit
rts
fsqrt_zero_m:
fmov.s &0x80000000,%fp0 # return -ZERO
mov.b &z_bmask+neg_bmask,FPSR_CC(%a6) # set 'Z','N' ccode bits
rts
fsqrt_inf:
tst.b SRC_EX(%a0) # is INF positive or negative?
bmi.l res_operr # negative
fsqrt_inf_p:
fmovm.x SRC(%a0),&0x80 # return +INF in fp0
mov.b &inf_bmask,FPSR_CC(%a6) # set 'I' ccode bit
rts
##########################################################################
#########################################################################
# XDEF **************************************************************** #
# addsub_scaler2(): scale inputs to fadd/fsub such that no #
# OVFL/UNFL exceptions will result #
# #
# XREF **************************************************************** #
# norm() - normalize mantissa after adjusting exponent #
# #
# INPUT *************************************************************** #
# FP_SRC(a6) = fp op1(src) #
# FP_DST(a6) = fp op2(dst) #
# #
# OUTPUT ************************************************************** #
# FP_SRC(a6) = fp op1 scaled(src) #
# FP_DST(a6) = fp op2 scaled(dst) #
# d0 = scale amount #
# #
# ALGORITHM *********************************************************** #
# If the DST exponent is > the SRC exponent, set the DST exponent #
# equal to 0x3fff and scale the SRC exponent by the value that the #
# DST exponent was scaled by. If the SRC exponent is greater or equal, #
# do the opposite. Return this scale factor in d0. #
# If the two exponents differ by > the number of mantissa bits #
# plus two, then set the smallest exponent to a very small value as a #
# quick shortcut. #
# #
#########################################################################
global addsub_scaler2
addsub_scaler2:
mov.l SRC_HI(%a0),FP_SCR0_HI(%a6)
mov.l DST_HI(%a1),FP_SCR1_HI(%a6)
mov.l SRC_LO(%a0),FP_SCR0_LO(%a6)
mov.l DST_LO(%a1),FP_SCR1_LO(%a6)
mov.w SRC_EX(%a0),%d0
mov.w DST_EX(%a1),%d1
mov.w %d0,FP_SCR0_EX(%a6)
mov.w %d1,FP_SCR1_EX(%a6)
andi.w &0x7fff,%d0
andi.w &0x7fff,%d1
mov.w %d0,L_SCR1(%a6) # store src exponent
mov.w %d1,2+L_SCR1(%a6) # store dst exponent
cmp.w %d0, %d1 # is src exp >= dst exp?
bge.l src_exp_ge2
# dst exp is > src exp; scale dst to exp = 0x3fff
dst_exp_gt2:
bsr.l scale_to_zero_dst
mov.l %d0,-(%sp) # save scale factor
cmpi.b STAG(%a6),&DENORM # is dst denormalized?
bne.b cmpexp12
lea FP_SCR0(%a6),%a0
bsr.l norm # normalize the denorm; result is new exp
neg.w %d0 # new exp = -(shft val)
mov.w %d0,L_SCR1(%a6) # inset new exp
cmpexp12:
mov.w 2+L_SCR1(%a6),%d0
subi.w &mantissalen+2,%d0 # subtract mantissalen+2 from larger exp
cmp.w %d0,L_SCR1(%a6) # is difference >= len(mantissa)+2?
bge.b quick_scale12
mov.w L_SCR1(%a6),%d0
add.w 0x2(%sp),%d0 # scale src exponent by scale factor
mov.w FP_SCR0_EX(%a6),%d1
and.w &0x8000,%d1
or.w %d1,%d0 # concat {sgn,new exp}
mov.w %d0,FP_SCR0_EX(%a6) # insert new dst exponent
mov.l (%sp)+,%d0 # return SCALE factor
rts
quick_scale12:
andi.w &0x8000,FP_SCR0_EX(%a6) # zero src exponent
bset &0x0,1+FP_SCR0_EX(%a6) # set exp = 1
mov.l (%sp)+,%d0 # return SCALE factor
rts
# src exp is >= dst exp; scale src to exp = 0x3fff
src_exp_ge2:
bsr.l scale_to_zero_src
mov.l %d0,-(%sp) # save scale factor
cmpi.b DTAG(%a6),&DENORM # is dst denormalized?
bne.b cmpexp22
lea FP_SCR1(%a6),%a0
bsr.l norm # normalize the denorm; result is new exp
neg.w %d0 # new exp = -(shft val)
mov.w %d0,2+L_SCR1(%a6) # inset new exp
cmpexp22:
mov.w L_SCR1(%a6),%d0
subi.w &mantissalen+2,%d0 # subtract mantissalen+2 from larger exp
cmp.w %d0,2+L_SCR1(%a6) # is difference >= len(mantissa)+2?
bge.b quick_scale22
mov.w 2+L_SCR1(%a6),%d0
add.w 0x2(%sp),%d0 # scale dst exponent by scale factor
mov.w FP_SCR1_EX(%a6),%d1
andi.w &0x8000,%d1
or.w %d1,%d0 # concat {sgn,new exp}
mov.w %d0,FP_SCR1_EX(%a6) # insert new dst exponent
mov.l (%sp)+,%d0 # return SCALE factor
rts
quick_scale22:
andi.w &0x8000,FP_SCR1_EX(%a6) # zero dst exponent
bset &0x0,1+FP_SCR1_EX(%a6) # set exp = 1
mov.l (%sp)+,%d0 # return SCALE factor
rts
##########################################################################
#########################################################################
# XDEF **************************************************************** #
# scale_to_zero_src(): scale the exponent of extended precision #
# value at FP_SCR0(a6). #
# #
# XREF **************************************************************** #
# norm() - normalize the mantissa if the operand was a DENORM #
# #
# INPUT *************************************************************** #
# FP_SCR0(a6) = extended precision operand to be scaled #
# #
# OUTPUT ************************************************************** #
# FP_SCR0(a6) = scaled extended precision operand #
# d0 = scale value #
# #
# ALGORITHM *********************************************************** #
# Set the exponent of the input operand to 0x3fff. Save the value #
# of the difference between the original and new exponent. Then, #
# normalize the operand if it was a DENORM. Add this normalization #
# value to the previous value. Return the result. #
# #
#########################################################################
global scale_to_zero_src
scale_to_zero_src:
mov.w FP_SCR0_EX(%a6),%d1 # extract operand's {sgn,exp}
mov.w %d1,%d0 # make a copy
andi.l &0x7fff,%d1 # extract operand's exponent
andi.w &0x8000,%d0 # extract operand's sgn
or.w &0x3fff,%d0 # insert new operand's exponent(=0)
mov.w %d0,FP_SCR0_EX(%a6) # insert biased exponent
cmpi.b STAG(%a6),&DENORM # is operand normalized?
beq.b stzs_denorm # normalize the DENORM
stzs_norm:
mov.l &0x3fff,%d0
sub.l %d1,%d0 # scale = BIAS + (-exp)
rts
stzs_denorm:
lea FP_SCR0(%a6),%a0 # pass ptr to src op
bsr.l norm # normalize denorm
neg.l %d0 # new exponent = -(shft val)
mov.l %d0,%d1 # prepare for op_norm call
bra.b stzs_norm # finish scaling
###
#########################################################################
# XDEF **************************************************************** #
# scale_sqrt(): scale the input operand exponent so a subsequent #
# fsqrt operation won't take an exception. #
# #
# XREF **************************************************************** #
# norm() - normalize the mantissa if the operand was a DENORM #
# #
# INPUT *************************************************************** #
# FP_SCR0(a6) = extended precision operand to be scaled #
# #
# OUTPUT ************************************************************** #
# FP_SCR0(a6) = scaled extended precision operand #
# d0 = scale value #
# #
# ALGORITHM *********************************************************** #
# If the input operand is a DENORM, normalize it. #
# If the exponent of the input operand is even, set the exponent #
# to 0x3ffe and return a scale factor of "(exp-0x3ffe)/2". If the #
# exponent of the input operand is off, set the exponent to ox3fff and #
# return a scale factor of "(exp-0x3fff)/2". #
# #
#########################################################################
global scale_sqrt
scale_sqrt:
cmpi.b STAG(%a6),&DENORM # is operand normalized?
beq.b ss_denorm # normalize the DENORM
mov.w FP_SCR0_EX(%a6),%d1 # extract operand's {sgn,exp}
andi.l &0x7fff,%d1 # extract operand's exponent
andi.w &0x8000,FP_SCR0_EX(%a6) # extract operand's sgn
btst &0x0,%d1 # is exp even or odd?
beq.b ss_norm_even
ori.w &0x3fff,FP_SCR0_EX(%a6) # insert new operand's exponent(=0)
mov.l &0x3fff,%d0
sub.l %d1,%d0 # scale = BIAS + (-exp)
asr.l &0x1,%d0 # divide scale factor by 2
rts
ss_norm_even:
ori.w &0x3ffe,FP_SCR0_EX(%a6) # insert new operand's exponent(=0)
mov.l &0x3ffe,%d0
sub.l %d1,%d0 # scale = BIAS + (-exp)
asr.l &0x1,%d0 # divide scale factor by 2
rts
ss_denorm:
lea FP_SCR0(%a6),%a0 # pass ptr to src op
bsr.l norm # normalize denorm
btst &0x0,%d0 # is exp even or odd?
beq.b ss_denorm_even
ori.w &0x3fff,FP_SCR0_EX(%a6) # insert new operand's exponent(=0)
add.l &0x3fff,%d0
asr.l &0x1,%d0 # divide scale factor by 2
rts
ss_denorm_even:
ori.w &0x3ffe,FP_SCR0_EX(%a6) # insert new operand's exponent(=0)
add.l &0x3ffe,%d0
asr.l &0x1,%d0 # divide scale factor by 2
rts
###
#########################################################################
# XDEF **************************************************************** #
# scale_to_zero_dst(): scale the exponent of extended precision #
# value at FP_SCR1(a6). #
# #
# XREF **************************************************************** #
# norm() - normalize the mantissa if the operand was a DENORM #
# #
# INPUT *************************************************************** #
# FP_SCR1(a6) = extended precision operand to be scaled #
# #
# OUTPUT ************************************************************** #
# FP_SCR1(a6) = scaled extended precision operand #
# d0 = scale value #
# #
# ALGORITHM *********************************************************** #
# Set the exponent of the input operand to 0x3fff. Save the value #
# of the difference between the original and new exponent. Then, #
# normalize the operand if it was a DENORM. Add this normalization #
# value to the previous value. Return the result. #
# #
#########################################################################
global scale_to_zero_dst
scale_to_zero_dst:
mov.w FP_SCR1_EX(%a6),%d1 # extract operand's {sgn,exp}
mov.w %d1,%d0 # make a copy
andi.l &0x7fff,%d1 # extract operand's exponent
andi.w &0x8000,%d0 # extract operand's sgn
or.w &0x3fff,%d0 # insert new operand's exponent(=0)
mov.w %d0,FP_SCR1_EX(%a6) # insert biased exponent
cmpi.b DTAG(%a6),&DENORM # is operand normalized?
beq.b stzd_denorm # normalize the DENORM
stzd_norm:
mov.l &0x3fff,%d0
sub.l %d1,%d0 # scale = BIAS + (-exp)
rts
stzd_denorm:
lea FP_SCR1(%a6),%a0 # pass ptr to dst op
bsr.l norm # normalize denorm
neg.l %d0 # new exponent = -(shft val)
mov.l %d0,%d1 # prepare for op_norm call
bra.b stzd_norm # finish scaling
##########################################################################
#########################################################################
# XDEF **************************************************************** #
# res_qnan(): return default result w/ QNAN operand for dyadic #
# res_snan(): return default result w/ SNAN operand for dyadic #
# res_qnan_1op(): return dflt result w/ QNAN operand for monadic #
# res_snan_1op(): return dflt result w/ SNAN operand for monadic #
# #
# XREF **************************************************************** #
# None #
# #
# INPUT *************************************************************** #
# FP_SRC(a6) = pointer to extended precision src operand #
# FP_DST(a6) = pointer to extended precision dst operand #
# #
# OUTPUT ************************************************************** #
# fp0 = default result #
# #
# ALGORITHM *********************************************************** #
# If either operand (but not both operands) of an operation is a #
# nonsignalling NAN, then that NAN is returned as the result. If both #
# operands are nonsignalling NANs, then the destination operand #
# nonsignalling NAN is returned as the result. #
# If either operand to an operation is a signalling NAN (SNAN), #
# then, the SNAN bit is set in the FPSR EXC byte. If the SNAN trap #
# enable bit is set in the FPCR, then the trap is taken and the #
# destination is not modified. If the SNAN trap enable bit is not set, #
# then the SNAN is converted to a nonsignalling NAN (by setting the #
# SNAN bit in the operand to one), and the operation continues as #
# described in the preceding paragraph, for nonsignalling NANs. #
# Make sure the appropriate FPSR bits are set before exiting. #
# #
#########################################################################
global res_qnan
global res_snan
res_qnan:
res_snan:
cmp.b DTAG(%a6), &SNAN # is the dst an SNAN?
beq.b dst_snan2
cmp.b DTAG(%a6), &QNAN # is the dst a QNAN?
beq.b dst_qnan2
src_nan:
cmp.b STAG(%a6), &QNAN
beq.b src_qnan2
global res_snan_1op
res_snan_1op:
src_snan2:
bset &0x6, FP_SRC_HI(%a6) # set SNAN bit
or.l &nan_mask+aiop_mask+snan_mask, USER_FPSR(%a6)
lea FP_SRC(%a6), %a0
bra.b nan_comp
global res_qnan_1op
res_qnan_1op:
src_qnan2:
or.l &nan_mask, USER_FPSR(%a6)
lea FP_SRC(%a6), %a0
bra.b nan_comp
dst_snan2:
or.l &nan_mask+aiop_mask+snan_mask, USER_FPSR(%a6)
bset &0x6, FP_DST_HI(%a6) # set SNAN bit
lea FP_DST(%a6), %a0
bra.b nan_comp
dst_qnan2:
lea FP_DST(%a6), %a0
cmp.b STAG(%a6), &SNAN
bne nan_done
or.l &aiop_mask+snan_mask, USER_FPSR(%a6)
nan_done:
or.l &nan_mask, USER_FPSR(%a6)
nan_comp:
btst &0x7, FTEMP_EX(%a0) # is NAN neg?
beq.b nan_not_neg
or.l &neg_mask, USER_FPSR(%a6)
nan_not_neg:
fmovm.x (%a0), &0x80
rts
#########################################################################
# XDEF **************************************************************** #
# res_operr(): return default result during operand error #
# #
# XREF **************************************************************** #
# None #
# #
# INPUT *************************************************************** #
# None #
# #
# OUTPUT ************************************************************** #
# fp0 = default operand error result #
# #
# ALGORITHM *********************************************************** #
# An nonsignalling NAN is returned as the default result when #
# an operand error occurs for the following cases: #
# #
# Multiply: (Infinity x Zero) #
# Divide : (Zero / Zero) || (Infinity / Infinity) #
# #
#########################################################################
global res_operr
res_operr:
or.l &nan_mask+operr_mask+aiop_mask, USER_FPSR(%a6)
fmovm.x nan_return(%pc), &0x80
rts
nan_return:
long 0x7fff0000, 0xffffffff, 0xffffffff
#########################################################################
# fdbcc(): routine to emulate the fdbcc instruction #
# #
# XDEF **************************************************************** #
# _fdbcc() #
# #
# XREF **************************************************************** #
# fetch_dreg() - fetch Dn value #
# store_dreg_l() - store updated Dn value #
# #
# INPUT *************************************************************** #
# d0 = displacement #
# #
# OUTPUT ************************************************************** #
# none #
# #
# ALGORITHM *********************************************************** #
# This routine checks which conditional predicate is specified by #
# the stacked fdbcc instruction opcode and then branches to a routine #
# for that predicate. The corresponding fbcc instruction is then used #
# to see whether the condition (specified by the stacked FPSR) is true #
# or false. #
# If a BSUN exception should be indicated, the BSUN and ABSUN #
# bits are set in the stacked FPSR. If the BSUN exception is enabled, #
# the fbsun_flg is set in the SPCOND_FLG location on the stack. If an #
# enabled BSUN should not be flagged and the predicate is true, then #
# Dn is fetched and decremented by one. If Dn is not equal to -1, add #
# the displacement value to the stacked PC so that when an "rte" is #
# finally executed, the branch occurs. #
# #
#########################################################################
global _fdbcc
_fdbcc:
mov.l %d0,L_SCR1(%a6) # save displacement
mov.w EXC_CMDREG(%a6),%d0 # fetch predicate
clr.l %d1 # clear scratch reg
mov.b FPSR_CC(%a6),%d1 # fetch fp ccodes
ror.l &0x8,%d1 # rotate to top byte
fmov.l %d1,%fpsr # insert into FPSR
mov.w (tbl_fdbcc.b,%pc,%d0.w*2),%d1 # load table
jmp (tbl_fdbcc.b,%pc,%d1.w) # jump to fdbcc routine
tbl_fdbcc:
short fdbcc_f - tbl_fdbcc # 00
short fdbcc_eq - tbl_fdbcc # 01
short fdbcc_ogt - tbl_fdbcc # 02
short fdbcc_oge - tbl_fdbcc # 03
short fdbcc_olt - tbl_fdbcc # 04
short fdbcc_ole - tbl_fdbcc # 05
short fdbcc_ogl - tbl_fdbcc # 06
short fdbcc_or - tbl_fdbcc # 07
short fdbcc_un - tbl_fdbcc # 08
short fdbcc_ueq - tbl_fdbcc # 09
short fdbcc_ugt - tbl_fdbcc # 10
short fdbcc_uge - tbl_fdbcc # 11
short fdbcc_ult - tbl_fdbcc # 12
short fdbcc_ule - tbl_fdbcc # 13
short fdbcc_neq - tbl_fdbcc # 14
short fdbcc_t - tbl_fdbcc # 15
short fdbcc_sf - tbl_fdbcc # 16
short fdbcc_seq - tbl_fdbcc # 17
short fdbcc_gt - tbl_fdbcc # 18
short fdbcc_ge - tbl_fdbcc # 19
short fdbcc_lt - tbl_fdbcc # 20
short fdbcc_le - tbl_fdbcc # 21
short fdbcc_gl - tbl_fdbcc # 22
short fdbcc_gle - tbl_fdbcc # 23
short fdbcc_ngle - tbl_fdbcc # 24
short fdbcc_ngl - tbl_fdbcc # 25
short fdbcc_nle - tbl_fdbcc # 26
short fdbcc_nlt - tbl_fdbcc # 27
short fdbcc_nge - tbl_fdbcc # 28
short fdbcc_ngt - tbl_fdbcc # 29
short fdbcc_sneq - tbl_fdbcc # 30
short fdbcc_st - tbl_fdbcc # 31
#########################################################################
# #
# IEEE Nonaware tests #
# #
# For the IEEE nonaware tests, only the false branch changes the #
# counter. However, the true branch may set bsun so we check to see #
# if the NAN bit is set, in which case BSUN and AIOP will be set. #
# #
# The cases EQ and NE are shared by the Aware and Nonaware groups #
# and are incapable of setting the BSUN exception bit. #
# #
# Typically, only one of the two possible branch directions could #
# have the NAN bit set. #
# (This is assuming the mutual exclusiveness of FPSR cc bit groupings #
# is preserved.) #
# #
#########################################################################
#
# equal:
#
# Z
#
fdbcc_eq:
fbeq.w fdbcc_eq_yes # equal?
fdbcc_eq_no:
bra.w fdbcc_false # no; go handle counter
fdbcc_eq_yes:
rts
#
# not equal:
# _
# Z
#
fdbcc_neq:
fbneq.w fdbcc_neq_yes # not equal?
fdbcc_neq_no:
bra.w fdbcc_false # no; go handle counter
fdbcc_neq_yes:
rts
#
# greater than:
# _______
# NANvZvN
#
fdbcc_gt:
fbgt.w fdbcc_gt_yes # greater than?
btst &nan_bit, FPSR_CC(%a6) # is NAN set in cc?
beq.w fdbcc_false # no;go handle counter
ori.l &bsun_mask+aiop_mask, USER_FPSR(%a6) # set BSUN exc bit
btst &bsun_bit, FPCR_ENABLE(%a6) # is BSUN enabled?
bne.w fdbcc_bsun # yes; we have an exception
bra.w fdbcc_false # no; go handle counter
fdbcc_gt_yes:
rts # do nothing
#
# not greater than:
#
# NANvZvN
#
fdbcc_ngt:
fbngt.w fdbcc_ngt_yes # not greater than?
fdbcc_ngt_no:
bra.w fdbcc_false # no; go handle counter
fdbcc_ngt_yes:
btst &nan_bit, FPSR_CC(%a6) # is NAN set in cc?
beq.b fdbcc_ngt_done # no;go finish
ori.l &bsun_mask+aiop_mask, USER_FPSR(%a6) # set BSUN exc bit
btst &bsun_bit, FPCR_ENABLE(%a6) # is BSUN enabled?
bne.w fdbcc_bsun # yes; we have an exception
fdbcc_ngt_done:
rts # no; do nothing
#
# greater than or equal:
# _____
# Zv(NANvN)
#
fdbcc_ge:
fbge.w fdbcc_ge_yes # greater than or equal?
fdbcc_ge_no:
btst &nan_bit, FPSR_CC(%a6) # is NAN set in cc?
beq.w fdbcc_false # no;go handle counter
ori.l &bsun_mask+aiop_mask, USER_FPSR(%a6) # set BSUN exc bit
btst &bsun_bit, FPCR_ENABLE(%a6) # is BSUN enabled?
bne.w fdbcc_bsun # yes; we have an exception
bra.w fdbcc_false # no; go handle counter
fdbcc_ge_yes:
btst &nan_bit, FPSR_CC(%a6) # is NAN set in cc?
beq.b fdbcc_ge_yes_done # no;go do nothing
ori.l &bsun_mask+aiop_mask, USER_FPSR(%a6) # set BSUN exc bit
btst &bsun_bit, FPCR_ENABLE(%a6) # is BSUN enabled?
bne.w fdbcc_bsun # yes; we have an exception
fdbcc_ge_yes_done:
rts # do nothing
#
# not (greater than or equal):
# _
# NANv(N^Z)
#
fdbcc_nge:
fbnge.w fdbcc_nge_yes # not (greater than or equal)?
fdbcc_nge_no:
bra.w fdbcc_false # no; go handle counter
fdbcc_nge_yes:
btst &nan_bit, FPSR_CC(%a6) # is NAN set in cc?
beq.b fdbcc_nge_done # no;go finish
ori.l &bsun_mask+aiop_mask, USER_FPSR(%a6) # set BSUN exc bit
btst &bsun_bit, FPCR_ENABLE(%a6) # is BSUN enabled?
bne.w fdbcc_bsun # yes; we have an exception
fdbcc_nge_done:
rts # no; do nothing
#
# less than:
# _____
# N^(NANvZ)
#
fdbcc_lt:
fblt.w fdbcc_lt_yes # less than?
fdbcc_lt_no:
btst &nan_bit, FPSR_CC(%a6) # is NAN set in cc?
beq.w fdbcc_false # no; go handle counter
ori.l &bsun_mask+aiop_mask, USER_FPSR(%a6) # set BSUN exc bit
btst &bsun_bit, FPCR_ENABLE(%a6) # is BSUN enabled?
bne.w fdbcc_bsun # yes; we have an exception
bra.w fdbcc_false # no; go handle counter
fdbcc_lt_yes:
rts # do nothing
#
# not less than:
# _
# NANv(ZvN)
#
fdbcc_nlt:
fbnlt.w fdbcc_nlt_yes # not less than?
fdbcc_nlt_no:
bra.w fdbcc_false # no; go handle counter
fdbcc_nlt_yes:
btst &nan_bit, FPSR_CC(%a6) # is NAN set in cc?
beq.b fdbcc_nlt_done # no;go finish
ori.l &bsun_mask+aiop_mask, USER_FPSR(%a6) # set BSUN exc bit
btst &bsun_bit, FPCR_ENABLE(%a6) # is BSUN enabled?
bne.w fdbcc_bsun # yes; we have an exception
fdbcc_nlt_done:
rts # no; do nothing
#
# less than or equal:
# ___
# Zv(N^NAN)
#
fdbcc_le:
fble.w fdbcc_le_yes # less than or equal?
fdbcc_le_no:
btst &nan_bit, FPSR_CC(%a6) # is NAN set in cc?
beq.w fdbcc_false # no; go handle counter
ori.l &bsun_mask+aiop_mask, USER_FPSR(%a6) # set BSUN exc bit
btst &bsun_bit, FPCR_ENABLE(%a6) # is BSUN enabled?
bne.w fdbcc_bsun # yes; we have an exception
bra.w fdbcc_false # no; go handle counter
fdbcc_le_yes:
btst &nan_bit, FPSR_CC(%a6) # is NAN set in cc?
beq.b fdbcc_le_yes_done # no; go do nothing
ori.l &bsun_mask+aiop_mask, USER_FPSR(%a6) # set BSUN exc bit
btst &bsun_bit, FPCR_ENABLE(%a6) # is BSUN enabled?
bne.w fdbcc_bsun # yes; we have an exception
fdbcc_le_yes_done:
rts # do nothing
#
# not (less than or equal):
# ___
# NANv(NvZ)
#
fdbcc_nle:
fbnle.w fdbcc_nle_yes # not (less than or equal)?
fdbcc_nle_no:
bra.w fdbcc_false # no; go handle counter
fdbcc_nle_yes:
btst &nan_bit, FPSR_CC(%a6) # is NAN set in cc?
beq.w fdbcc_nle_done # no; go finish
ori.l &bsun_mask+aiop_mask, USER_FPSR(%a6) # set BSUN exc bit
btst &bsun_bit, FPCR_ENABLE(%a6) # is BSUN enabled?
bne.w fdbcc_bsun # yes; we have an exception
fdbcc_nle_done:
rts # no; do nothing
#
# greater or less than:
# _____
# NANvZ
#
fdbcc_gl:
fbgl.w fdbcc_gl_yes # greater or less than?
fdbcc_gl_no:
btst &nan_bit, FPSR_CC(%a6) # is NAN set in cc?
beq.w fdbcc_false # no; handle counter
ori.l &bsun_mask+aiop_mask, USER_FPSR(%a6) # set BSUN exc bit
btst &bsun_bit, FPCR_ENABLE(%a6) # is BSUN enabled?
bne.w fdbcc_bsun # yes; we have an exception
bra.w fdbcc_false # no; go handle counter
fdbcc_gl_yes:
rts # do nothing
#
# not (greater or less than):
#
# NANvZ
#
fdbcc_ngl:
fbngl.w fdbcc_ngl_yes # not (greater or less than)?
fdbcc_ngl_no:
bra.w fdbcc_false # no; go handle counter
fdbcc_ngl_yes:
btst &nan_bit, FPSR_CC(%a6) # is NAN set in cc?
beq.b fdbcc_ngl_done # no; go finish
ori.l &bsun_mask+aiop_mask, USER_FPSR(%a6) # set BSUN exc bit
btst &bsun_bit, FPCR_ENABLE(%a6) # is BSUN enabled?
bne.w fdbcc_bsun # yes; we have an exception
fdbcc_ngl_done:
rts # no; do nothing
#
# greater, less, or equal:
# ___
# NAN
#
fdbcc_gle:
fbgle.w fdbcc_gle_yes # greater, less, or equal?
fdbcc_gle_no:
ori.l &bsun_mask+aiop_mask, USER_FPSR(%a6) # set BSUN exc bit
btst &bsun_bit, FPCR_ENABLE(%a6) # is BSUN enabled?
bne.w fdbcc_bsun # yes; we have an exception
bra.w fdbcc_false # no; go handle counter
fdbcc_gle_yes:
rts # do nothing
#
# not (greater, less, or equal):
#
# NAN
#
fdbcc_ngle:
fbngle.w fdbcc_ngle_yes # not (greater, less, or equal)?
fdbcc_ngle_no:
bra.w fdbcc_false # no; go handle counter
fdbcc_ngle_yes:
ori.l &bsun_mask+aiop_mask, USER_FPSR(%a6) # set BSUN exc bit
btst &bsun_bit, FPCR_ENABLE(%a6) # is BSUN enabled?
bne.w fdbcc_bsun # yes; we have an exception
rts # no; do nothing
#########################################################################
# #
# Miscellaneous tests #
# #
# For the IEEE miscellaneous tests, all but fdbf and fdbt can set bsun. #
# #
#########################################################################
#
# false:
#
# False
#
fdbcc_f: # no bsun possible
bra.w fdbcc_false # go handle counter
#
# true:
#
# True
#
fdbcc_t: # no bsun possible
rts # do nothing
#
# signalling false:
#
# False
#
fdbcc_sf:
btst &nan_bit, FPSR_CC(%a6) # is NAN set?
beq.w fdbcc_false # no;go handle counter
ori.l &bsun_mask+aiop_mask, USER_FPSR(%a6) # set BSUN exc bit
btst &bsun_bit, FPCR_ENABLE(%a6) # is BSUN enabled?
bne.w fdbcc_bsun # yes; we have an exception
bra.w fdbcc_false # go handle counter
#
# signalling true:
#
# True
#
fdbcc_st:
btst &nan_bit, FPSR_CC(%a6) # is NAN set?
beq.b fdbcc_st_done # no;go finish
ori.l &bsun_mask+aiop_mask, USER_FPSR(%a6) # set BSUN exc bit
btst &bsun_bit, FPCR_ENABLE(%a6) # is BSUN enabled?
bne.w fdbcc_bsun # yes; we have an exception
fdbcc_st_done:
rts
#
# signalling equal:
#
# Z
#
fdbcc_seq:
fbseq.w fdbcc_seq_yes # signalling equal?
fdbcc_seq_no:
btst &nan_bit, FPSR_CC(%a6) # is NAN set?
beq.w fdbcc_false # no;go handle counter
ori.l &bsun_mask+aiop_mask, USER_FPSR(%a6) # set BSUN exc bit
btst &bsun_bit, FPCR_ENABLE(%a6) # is BSUN enabled?
bne.w fdbcc_bsun # yes; we have an exception
bra.w fdbcc_false # go handle counter
fdbcc_seq_yes:
btst &nan_bit, FPSR_CC(%a6) # is NAN set?
beq.b fdbcc_seq_yes_done # no;go do nothing
ori.l &bsun_mask+aiop_mask, USER_FPSR(%a6) # set BSUN exc bit
btst &bsun_bit, FPCR_ENABLE(%a6) # is BSUN enabled?
bne.w fdbcc_bsun # yes; we have an exception
fdbcc_seq_yes_done:
rts # yes; do nothing
#
# signalling not equal:
# _
# Z
#
fdbcc_sneq:
fbsneq.w fdbcc_sneq_yes # signalling not equal?
fdbcc_sneq_no:
btst &nan_bit, FPSR_CC(%a6) # is NAN set?
beq.w fdbcc_false # no;go handle counter
ori.l &bsun_mask+aiop_mask, USER_FPSR(%a6) # set BSUN exc bit
btst &bsun_bit, FPCR_ENABLE(%a6) # is BSUN enabled?
bne.w fdbcc_bsun # yes; we have an exception
bra.w fdbcc_false # go handle counter
fdbcc_sneq_yes:
btst &nan_bit, FPSR_CC(%a6) # set BSUN exc bit
beq.w fdbcc_sneq_done # no;go finish
ori.l &bsun_mask+aiop_mask, USER_FPSR(%a6) # set BSUN exc bit
btst &bsun_bit, FPCR_ENABLE(%a6) # is BSUN enabled?
bne.w fdbcc_bsun # yes; we have an exception
fdbcc_sneq_done:
rts
#########################################################################
# #
# IEEE Aware tests #
# #
# For the IEEE aware tests, action is only taken if the result is false.#
# Therefore, the opposite branch type is used to jump to the decrement #
# routine. #
# The BSUN exception will not be set for any of these tests. #
# #
#########################################################################
#
# ordered greater than:
# _______
# NANvZvN
#
fdbcc_ogt:
fbogt.w fdbcc_ogt_yes # ordered greater than?
fdbcc_ogt_no:
bra.w fdbcc_false # no; go handle counter
fdbcc_ogt_yes:
rts # yes; do nothing
#
# unordered or less or equal:
# _______
# NANvZvN
#
fdbcc_ule:
fbule.w fdbcc_ule_yes # unordered or less or equal?
fdbcc_ule_no:
bra.w fdbcc_false # no; go handle counter
fdbcc_ule_yes:
rts # yes; do nothing
#
# ordered greater than or equal:
# _____
# Zv(NANvN)
#
fdbcc_oge:
fboge.w fdbcc_oge_yes # ordered greater than or equal?
fdbcc_oge_no:
bra.w fdbcc_false # no; go handle counter
fdbcc_oge_yes:
rts # yes; do nothing
#
# unordered or less than:
# _
# NANv(N^Z)
#
fdbcc_ult:
fbult.w fdbcc_ult_yes # unordered or less than?
fdbcc_ult_no:
bra.w fdbcc_false # no; go handle counter
fdbcc_ult_yes:
rts # yes; do nothing
#
# ordered less than:
# _____
# N^(NANvZ)
#
fdbcc_olt:
fbolt.w fdbcc_olt_yes # ordered less than?
fdbcc_olt_no:
bra.w fdbcc_false # no; go handle counter
fdbcc_olt_yes:
rts # yes; do nothing
#
# unordered or greater or equal:
#
# NANvZvN
#
fdbcc_uge:
fbuge.w fdbcc_uge_yes # unordered or greater than?
fdbcc_uge_no:
bra.w fdbcc_false # no; go handle counter
fdbcc_uge_yes:
rts # yes; do nothing
#
# ordered less than or equal:
# ___
# Zv(N^NAN)
#
fdbcc_ole:
fbole.w fdbcc_ole_yes # ordered greater or less than?
fdbcc_ole_no:
bra.w fdbcc_false # no; go handle counter
fdbcc_ole_yes:
rts # yes; do nothing
#
# unordered or greater than:
# ___
# NANv(NvZ)
#
fdbcc_ugt:
fbugt.w fdbcc_ugt_yes # unordered or greater than?
fdbcc_ugt_no:
bra.w fdbcc_false # no; go handle counter
fdbcc_ugt_yes:
rts # yes; do nothing
#
# ordered greater or less than:
# _____
# NANvZ
#
fdbcc_ogl:
fbogl.w fdbcc_ogl_yes # ordered greater or less than?
fdbcc_ogl_no:
bra.w fdbcc_false # no; go handle counter
fdbcc_ogl_yes:
rts # yes; do nothing
#
# unordered or equal:
#
# NANvZ
#
fdbcc_ueq:
fbueq.w fdbcc_ueq_yes # unordered or equal?
fdbcc_ueq_no:
bra.w fdbcc_false # no; go handle counter
fdbcc_ueq_yes:
rts # yes; do nothing
#
# ordered:
# ___
# NAN
#
fdbcc_or:
fbor.w fdbcc_or_yes # ordered?
fdbcc_or_no:
bra.w fdbcc_false # no; go handle counter
fdbcc_or_yes:
rts # yes; do nothing
#
# unordered:
#
# NAN
#
fdbcc_un:
fbun.w fdbcc_un_yes # unordered?
fdbcc_un_no:
bra.w fdbcc_false # no; go handle counter
fdbcc_un_yes:
rts # yes; do nothing
#######################################################################
#
# the bsun exception bit was not set.
#
# (1) subtract 1 from the count register
# (2) if (cr == -1) then
# pc = pc of next instruction
# else
# pc += sign_ext(16-bit displacement)
#
fdbcc_false:
mov.b 1+EXC_OPWORD(%a6), %d1 # fetch lo opword
andi.w &0x7, %d1 # extract count register
bsr.l fetch_dreg # fetch count value
# make sure that d0 isn't corrupted between calls...
subq.w &0x1, %d0 # Dn - 1 -> Dn
bsr.l store_dreg_l # store new count value
cmpi.w %d0, &-0x1 # is (Dn == -1)?
bne.b fdbcc_false_cont # no;
rts
fdbcc_false_cont:
mov.l L_SCR1(%a6),%d0 # fetch displacement
add.l USER_FPIAR(%a6),%d0 # add instruction PC
addq.l &0x4,%d0 # add instruction length
mov.l %d0,EXC_PC(%a6) # set new PC
rts
# the emulation routine set bsun and BSUN was enabled. have to
# fix stack and jump to the bsun handler.
# let the caller of this routine shift the stack frame up to
# eliminate the effective address field.
fdbcc_bsun:
mov.b &fbsun_flg,SPCOND_FLG(%a6)
rts
#########################################################################
# ftrapcc(): routine to emulate the ftrapcc instruction #
# #
# XDEF **************************************************************** #
# _ftrapcc() #
# #
# XREF **************************************************************** #
# none #
# #
# INPUT *************************************************************** #
# none #
# #
# OUTPUT ************************************************************** #
# none #
# #
# ALGORITHM *********************************************************** #
# This routine checks which conditional predicate is specified by #
# the stacked ftrapcc instruction opcode and then branches to a routine #
# for that predicate. The corresponding fbcc instruction is then used #
# to see whether the condition (specified by the stacked FPSR) is true #
# or false. #
# If a BSUN exception should be indicated, the BSUN and ABSUN #
# bits are set in the stacked FPSR. If the BSUN exception is enabled, #
# the fbsun_flg is set in the SPCOND_FLG location on the stack. If an #
# enabled BSUN should not be flagged and the predicate is true, then #
# the ftrapcc_flg is set in the SPCOND_FLG location. These special #
# flags indicate to the calling routine to emulate the exceptional #
# condition. #
# #
#########################################################################
global _ftrapcc
_ftrapcc:
mov.w EXC_CMDREG(%a6),%d0 # fetch predicate
clr.l %d1 # clear scratch reg
mov.b FPSR_CC(%a6),%d1 # fetch fp ccodes
ror.l &0x8,%d1 # rotate to top byte
fmov.l %d1,%fpsr # insert into FPSR
mov.w (tbl_ftrapcc.b,%pc,%d0.w*2), %d1 # load table
jmp (tbl_ftrapcc.b,%pc,%d1.w) # jump to ftrapcc routine
tbl_ftrapcc:
short ftrapcc_f - tbl_ftrapcc # 00
short ftrapcc_eq - tbl_ftrapcc # 01
short ftrapcc_ogt - tbl_ftrapcc # 02
short ftrapcc_oge - tbl_ftrapcc # 03
short ftrapcc_olt - tbl_ftrapcc # 04
short ftrapcc_ole - tbl_ftrapcc # 05
short ftrapcc_ogl - tbl_ftrapcc # 06
short ftrapcc_or - tbl_ftrapcc # 07
short ftrapcc_un - tbl_ftrapcc # 08
short ftrapcc_ueq - tbl_ftrapcc # 09
short ftrapcc_ugt - tbl_ftrapcc # 10
short ftrapcc_uge - tbl_ftrapcc # 11
short ftrapcc_ult - tbl_ftrapcc # 12
short ftrapcc_ule - tbl_ftrapcc # 13
short ftrapcc_neq - tbl_ftrapcc # 14
short ftrapcc_t - tbl_ftrapcc # 15
short ftrapcc_sf - tbl_ftrapcc # 16
short ftrapcc_seq - tbl_ftrapcc # 17
short ftrapcc_gt - tbl_ftrapcc # 18
short ftrapcc_ge - tbl_ftrapcc # 19
short ftrapcc_lt - tbl_ftrapcc # 20
short ftrapcc_le - tbl_ftrapcc # 21
short ftrapcc_gl - tbl_ftrapcc # 22
short ftrapcc_gle - tbl_ftrapcc # 23
short ftrapcc_ngle - tbl_ftrapcc # 24
short ftrapcc_ngl - tbl_ftrapcc # 25
short ftrapcc_nle - tbl_ftrapcc # 26
short ftrapcc_nlt - tbl_ftrapcc # 27
short ftrapcc_nge - tbl_ftrapcc # 28
short ftrapcc_ngt - tbl_ftrapcc # 29
short ftrapcc_sneq - tbl_ftrapcc # 30
short ftrapcc_st - tbl_ftrapcc # 31
#########################################################################
# #
# IEEE Nonaware tests #
# #
# For the IEEE nonaware tests, we set the result based on the #
# floating point condition codes. In addition, we check to see #
# if the NAN bit is set, in which case BSUN and AIOP will be set. #
# #
# The cases EQ and NE are shared by the Aware and Nonaware groups #
# and are incapable of setting the BSUN exception bit. #
# #
# Typically, only one of the two possible branch directions could #
# have the NAN bit set. #
# #
#########################################################################
#
# equal:
#
# Z
#
ftrapcc_eq:
fbeq.w ftrapcc_trap # equal?
ftrapcc_eq_no:
rts # do nothing
#
# not equal:
# _
# Z
#
ftrapcc_neq:
fbneq.w ftrapcc_trap # not equal?
ftrapcc_neq_no:
rts # do nothing
#
# greater than:
# _______
# NANvZvN
#
ftrapcc_gt:
fbgt.w ftrapcc_trap # greater than?
ftrapcc_gt_no:
btst &nan_bit, FPSR_CC(%a6) # is NAN set in cc?
beq.b ftrapcc_gt_done # no
ori.l &bsun_mask+aiop_mask, USER_FPSR(%a6) # set BSUN exc bit
btst &bsun_bit, FPCR_ENABLE(%a6) # was BSUN set?
bne.w ftrapcc_bsun # yes
ftrapcc_gt_done:
rts # no; do nothing
#
# not greater than:
#
# NANvZvN
#
ftrapcc_ngt:
fbngt.w ftrapcc_ngt_yes # not greater than?
ftrapcc_ngt_no:
rts # do nothing
ftrapcc_ngt_yes:
btst &nan_bit, FPSR_CC(%a6) # is NAN set in cc?
beq.w ftrapcc_trap # no; go take trap
ori.l &bsun_mask+aiop_mask, USER_FPSR(%a6) # set BSUN exc bit
btst &bsun_bit, FPCR_ENABLE(%a6) # was BSUN set?
bne.w ftrapcc_bsun # yes
bra.w ftrapcc_trap # no; go take trap
#
# greater than or equal:
# _____
# Zv(NANvN)
#
ftrapcc_ge:
fbge.w ftrapcc_ge_yes # greater than or equal?
ftrapcc_ge_no:
btst &nan_bit, FPSR_CC(%a6) # is NAN set in cc?
beq.b ftrapcc_ge_done # no; go finish
ori.l &bsun_mask+aiop_mask, USER_FPSR(%a6) # set BSUN exc bit
btst &bsun_bit, FPCR_ENABLE(%a6) # was BSUN set?
bne.w ftrapcc_bsun # yes
ftrapcc_ge_done:
rts # no; do nothing
ftrapcc_ge_yes:
btst &nan_bit, FPSR_CC(%a6) # is NAN set in cc?
beq.w ftrapcc_trap # no; go take trap
ori.l &bsun_mask+aiop_mask, USER_FPSR(%a6) # set BSUN exc bit
btst &bsun_bit, FPCR_ENABLE(%a6) # was BSUN set?
bne.w ftrapcc_bsun # yes
bra.w ftrapcc_trap # no; go take trap
#
# not (greater than or equal):
# _
# NANv(N^Z)
#
ftrapcc_nge:
fbnge.w ftrapcc_nge_yes # not (greater than or equal)?
ftrapcc_nge_no:
rts # do nothing
ftrapcc_nge_yes:
btst &nan_bit, FPSR_CC(%a6) # is NAN set in cc?
beq.w ftrapcc_trap # no; go take trap
ori.l &bsun_mask+aiop_mask, USER_FPSR(%a6) # set BSUN exc bit
btst &bsun_bit, FPCR_ENABLE(%a6) # was BSUN set?
bne.w ftrapcc_bsun # yes
bra.w ftrapcc_trap # no; go take trap
#
# less than:
# _____
# N^(NANvZ)
#
ftrapcc_lt:
fblt.w ftrapcc_trap # less than?
ftrapcc_lt_no:
btst &nan_bit, FPSR_CC(%a6) # is NAN set in cc?
beq.b ftrapcc_lt_done # no; go finish
ori.l &bsun_mask+aiop_mask, USER_FPSR(%a6) # set BSUN exc bit
btst &bsun_bit, FPCR_ENABLE(%a6) # was BSUN set?
bne.w ftrapcc_bsun # yes
ftrapcc_lt_done:
rts # no; do nothing
#
# not less than:
# _
# NANv(ZvN)
#
ftrapcc_nlt:
fbnlt.w ftrapcc_nlt_yes # not less than?
ftrapcc_nlt_no:
rts # do nothing
ftrapcc_nlt_yes:
btst &nan_bit, FPSR_CC(%a6) # is NAN set in cc?
beq.w ftrapcc_trap # no; go take trap
ori.l &bsun_mask+aiop_mask, USER_FPSR(%a6) # set BSUN exc bit
btst &bsun_bit, FPCR_ENABLE(%a6) # was BSUN set?
bne.w ftrapcc_bsun # yes
bra.w ftrapcc_trap # no; go take trap
#
# less than or equal:
# ___
# Zv(N^NAN)
#
ftrapcc_le:
fble.w ftrapcc_le_yes # less than or equal?
ftrapcc_le_no:
btst &nan_bit, FPSR_CC(%a6) # is NAN set in cc?
beq.b ftrapcc_le_done # no; go finish
ori.l &bsun_mask+aiop_mask, USER_FPSR(%a6) # set BSUN exc bit
btst &bsun_bit, FPCR_ENABLE(%a6) # was BSUN set?
bne.w ftrapcc_bsun # yes
ftrapcc_le_done:
rts # no; do nothing
ftrapcc_le_yes:
btst &nan_bit, FPSR_CC(%a6) # is NAN set in cc?
beq.w ftrapcc_trap # no; go take trap
ori.l &bsun_mask+aiop_mask, USER_FPSR(%a6) # set BSUN exc bit
btst &bsun_bit, FPCR_ENABLE(%a6) # was BSUN set?
bne.w ftrapcc_bsun # yes
bra.w ftrapcc_trap # no; go take trap
#
# not (less than or equal):
# ___
# NANv(NvZ)
#
ftrapcc_nle:
fbnle.w ftrapcc_nle_yes # not (less than or equal)?
ftrapcc_nle_no:
rts # do nothing
ftrapcc_nle_yes:
btst &nan_bit, FPSR_CC(%a6) # is NAN set in cc?
beq.w ftrapcc_trap # no; go take trap
ori.l &bsun_mask+aiop_mask, USER_FPSR(%a6) # set BSUN exc bit
btst &bsun_bit, FPCR_ENABLE(%a6) # was BSUN set?
bne.w ftrapcc_bsun # yes
bra.w ftrapcc_trap # no; go take trap
#
# greater or less than:
# _____
# NANvZ
#
ftrapcc_gl:
fbgl.w ftrapcc_trap # greater or less than?
ftrapcc_gl_no:
btst &nan_bit, FPSR_CC(%a6) # is NAN set in cc?
beq.b ftrapcc_gl_done # no; go finish
ori.l &bsun_mask+aiop_mask, USER_FPSR(%a6) # set BSUN exc bit
btst &bsun_bit, FPCR_ENABLE(%a6) # was BSUN set?
bne.w ftrapcc_bsun # yes
ftrapcc_gl_done:
rts # no; do nothing
#
# not (greater or less than):
#
# NANvZ
#
ftrapcc_ngl:
fbngl.w ftrapcc_ngl_yes # not (greater or less than)?
ftrapcc_ngl_no:
rts # do nothing
ftrapcc_ngl_yes:
btst &nan_bit, FPSR_CC(%a6) # is NAN set in cc?
beq.w ftrapcc_trap # no; go take trap
ori.l &bsun_mask+aiop_mask, USER_FPSR(%a6) # set BSUN exc bit
btst &bsun_bit, FPCR_ENABLE(%a6) # was BSUN set?
bne.w ftrapcc_bsun # yes
bra.w ftrapcc_trap # no; go take trap
#
# greater, less, or equal:
# ___
# NAN
#
ftrapcc_gle:
fbgle.w ftrapcc_trap # greater, less, or equal?
ftrapcc_gle_no:
ori.l &bsun_mask+aiop_mask, USER_FPSR(%a6) # set BSUN exc bit
btst &bsun_bit, FPCR_ENABLE(%a6) # was BSUN set?
bne.w ftrapcc_bsun # yes
rts # no; do nothing
#
# not (greater, less, or equal):
#
# NAN
#
ftrapcc_ngle:
fbngle.w ftrapcc_ngle_yes # not (greater, less, or equal)?
ftrapcc_ngle_no:
rts # do nothing
ftrapcc_ngle_yes:
ori.l &bsun_mask+aiop_mask, USER_FPSR(%a6) # set BSUN exc bit
btst &bsun_bit, FPCR_ENABLE(%a6) # was BSUN set?
bne.w ftrapcc_bsun # yes
bra.w ftrapcc_trap # no; go take trap
#########################################################################
# #
# Miscellaneous tests #
# #
# For the IEEE aware tests, we only have to set the result based on the #
# floating point condition codes. The BSUN exception will not be #
# set for any of these tests. #
# #
#########################################################################
#
# false:
#
# False
#
ftrapcc_f:
rts # do nothing
#
# true:
#
# True
#
ftrapcc_t:
bra.w ftrapcc_trap # go take trap
#
# signalling false:
#
# False
#
ftrapcc_sf:
btst &nan_bit, FPSR_CC(%a6) # set BSUN exc bit
beq.b ftrapcc_sf_done # no; go finish
ori.l &bsun_mask+aiop_mask, USER_FPSR(%a6) # set BSUN exc bit
btst &bsun_bit, FPCR_ENABLE(%a6) # was BSUN set?
bne.w ftrapcc_bsun # yes
ftrapcc_sf_done:
rts # no; do nothing
#
# signalling true:
#
# True
#
ftrapcc_st:
btst &nan_bit, FPSR_CC(%a6) # set BSUN exc bit
beq.w ftrapcc_trap # no; go take trap
ori.l &bsun_mask+aiop_mask, USER_FPSR(%a6) # set BSUN exc bit
btst &bsun_bit, FPCR_ENABLE(%a6) # was BSUN set?
bne.w ftrapcc_bsun # yes
bra.w ftrapcc_trap # no; go take trap
#
# signalling equal:
#
# Z
#
ftrapcc_seq:
fbseq.w ftrapcc_seq_yes # signalling equal?
ftrapcc_seq_no:
btst &nan_bit, FPSR_CC(%a6) # set BSUN exc bit
beq.w ftrapcc_seq_done # no; go finish
ori.l &bsun_mask+aiop_mask, USER_FPSR(%a6) # set BSUN exc bit
btst &bsun_bit, FPCR_ENABLE(%a6) # was BSUN set?
bne.w ftrapcc_bsun # yes
ftrapcc_seq_done:
rts # no; do nothing
ftrapcc_seq_yes:
btst &nan_bit, FPSR_CC(%a6) # set BSUN exc bit
beq.w ftrapcc_trap # no; go take trap
ori.l &bsun_mask+aiop_mask, USER_FPSR(%a6) # set BSUN exc bit
btst &bsun_bit, FPCR_ENABLE(%a6) # was BSUN set?
bne.w ftrapcc_bsun # yes
bra.w ftrapcc_trap # no; go take trap
#
# signalling not equal:
# _
# Z
#
ftrapcc_sneq:
fbsneq.w ftrapcc_sneq_yes # signalling equal?
ftrapcc_sneq_no:
btst &nan_bit, FPSR_CC(%a6) # set BSUN exc bit
beq.w ftrapcc_sneq_no_done # no; go finish
ori.l &bsun_mask+aiop_mask, USER_FPSR(%a6) # set BSUN exc bit
btst &bsun_bit, FPCR_ENABLE(%a6) # was BSUN set?
bne.w ftrapcc_bsun # yes
ftrapcc_sneq_no_done:
rts # do nothing
ftrapcc_sneq_yes:
btst &nan_bit, FPSR_CC(%a6) # set BSUN exc bit
beq.w ftrapcc_trap # no; go take trap
ori.l &bsun_mask+aiop_mask, USER_FPSR(%a6) # set BSUN exc bit
btst &bsun_bit, FPCR_ENABLE(%a6) # was BSUN set?
bne.w ftrapcc_bsun # yes
bra.w ftrapcc_trap # no; go take trap
#########################################################################
# #
# IEEE Aware tests #
# #
# For the IEEE aware tests, we only have to set the result based on the #
# floating point condition codes. The BSUN exception will not be #
# set for any of these tests. #
# #
#########################################################################
#
# ordered greater than:
# _______
# NANvZvN
#
ftrapcc_ogt:
fbogt.w ftrapcc_trap # ordered greater than?
ftrapcc_ogt_no:
rts # do nothing
#
# unordered or less or equal:
# _______
# NANvZvN
#
ftrapcc_ule:
fbule.w ftrapcc_trap # unordered or less or equal?
ftrapcc_ule_no:
rts # do nothing
#
# ordered greater than or equal:
# _____
# Zv(NANvN)
#
ftrapcc_oge:
fboge.w ftrapcc_trap # ordered greater than or equal?
ftrapcc_oge_no:
rts # do nothing
#
# unordered or less than:
# _
# NANv(N^Z)
#
ftrapcc_ult:
fbult.w ftrapcc_trap # unordered or less than?
ftrapcc_ult_no:
rts # do nothing
#
# ordered less than:
# _____
# N^(NANvZ)
#
ftrapcc_olt:
fbolt.w ftrapcc_trap # ordered less than?
ftrapcc_olt_no:
rts # do nothing
#
# unordered or greater or equal:
#
# NANvZvN
#
ftrapcc_uge:
fbuge.w ftrapcc_trap # unordered or greater than?
ftrapcc_uge_no:
rts # do nothing
#
# ordered less than or equal:
# ___
# Zv(N^NAN)
#
ftrapcc_ole:
fbole.w ftrapcc_trap # ordered greater or less than?
ftrapcc_ole_no:
rts # do nothing
#
# unordered or greater than:
# ___
# NANv(NvZ)
#
ftrapcc_ugt:
fbugt.w ftrapcc_trap # unordered or greater than?
ftrapcc_ugt_no:
rts # do nothing
#
# ordered greater or less than:
# _____
# NANvZ
#
ftrapcc_ogl:
fbogl.w ftrapcc_trap # ordered greater or less than?
ftrapcc_ogl_no:
rts # do nothing
#
# unordered or equal:
#
# NANvZ
#
ftrapcc_ueq:
fbueq.w ftrapcc_trap # unordered or equal?
ftrapcc_ueq_no:
rts # do nothing
#
# ordered:
# ___
# NAN
#
ftrapcc_or:
fbor.w ftrapcc_trap # ordered?
ftrapcc_or_no:
rts # do nothing
#
# unordered:
#
# NAN
#
ftrapcc_un:
fbun.w ftrapcc_trap # unordered?
ftrapcc_un_no:
rts # do nothing
#######################################################################
# the bsun exception bit was not set.
# we will need to jump to the ftrapcc vector. the stack frame
# is the same size as that of the fp unimp instruction. the
# only difference is that the <ea> field should hold the PC
# of the ftrapcc instruction and the vector offset field
# should denote the ftrapcc trap.
ftrapcc_trap:
mov.b &ftrapcc_flg,SPCOND_FLG(%a6)
rts
# the emulation routine set bsun and BSUN was enabled. have to
# fix stack and jump to the bsun handler.
# let the caller of this routine shift the stack frame up to
# eliminate the effective address field.
ftrapcc_bsun:
mov.b &fbsun_flg,SPCOND_FLG(%a6)
rts
#########################################################################
# fscc(): routine to emulate the fscc instruction #
# #
# XDEF **************************************************************** #
# _fscc() #
# #
# XREF **************************************************************** #
# store_dreg_b() - store result to data register file #
# dec_areg() - decrement an areg for -(an) mode #
# inc_areg() - increment an areg for (an)+ mode #
# _dmem_write_byte() - store result to memory #
# #
# INPUT *************************************************************** #
# none #
# #
# OUTPUT ************************************************************** #
# none #
# #
# ALGORITHM *********************************************************** #
# This routine checks which conditional predicate is specified by #
# the stacked fscc instruction opcode and then branches to a routine #
# for that predicate. The corresponding fbcc instruction is then used #
# to see whether the condition (specified by the stacked FPSR) is true #
# or false. #
# If a BSUN exception should be indicated, the BSUN and ABSUN #
# bits are set in the stacked FPSR. If the BSUN exception is enabled, #
# the fbsun_flg is set in the SPCOND_FLG location on the stack. If an #
# enabled BSUN should not be flagged and the predicate is true, then #
# the result is stored to the data register file or memory #
# #
#########################################################################
global _fscc
_fscc:
mov.w EXC_CMDREG(%a6),%d0 # fetch predicate
clr.l %d1 # clear scratch reg
mov.b FPSR_CC(%a6),%d1 # fetch fp ccodes
ror.l &0x8,%d1 # rotate to top byte
fmov.l %d1,%fpsr # insert into FPSR
mov.w (tbl_fscc.b,%pc,%d0.w*2),%d1 # load table
jmp (tbl_fscc.b,%pc,%d1.w) # jump to fscc routine
tbl_fscc:
short fscc_f - tbl_fscc # 00
short fscc_eq - tbl_fscc # 01
short fscc_ogt - tbl_fscc # 02
short fscc_oge - tbl_fscc # 03
short fscc_olt - tbl_fscc # 04
short fscc_ole - tbl_fscc # 05
short fscc_ogl - tbl_fscc # 06
short fscc_or - tbl_fscc # 07
short fscc_un - tbl_fscc # 08
short fscc_ueq - tbl_fscc # 09
short fscc_ugt - tbl_fscc # 10
short fscc_uge - tbl_fscc # 11
short fscc_ult - tbl_fscc # 12
short fscc_ule - tbl_fscc # 13
short fscc_neq - tbl_fscc # 14
short fscc_t - tbl_fscc # 15
short fscc_sf - tbl_fscc # 16
short fscc_seq - tbl_fscc # 17
short fscc_gt - tbl_fscc # 18
short fscc_ge - tbl_fscc # 19
short fscc_lt - tbl_fscc # 20
short fscc_le - tbl_fscc # 21
short fscc_gl - tbl_fscc # 22
short fscc_gle - tbl_fscc # 23
short fscc_ngle - tbl_fscc # 24
short fscc_ngl - tbl_fscc # 25
short fscc_nle - tbl_fscc # 26
short fscc_nlt - tbl_fscc # 27
short fscc_nge - tbl_fscc # 28
short fscc_ngt - tbl_fscc # 29
short fscc_sneq - tbl_fscc # 30
short fscc_st - tbl_fscc # 31
#########################################################################
# #
# IEEE Nonaware tests #
# #
# For the IEEE nonaware tests, we set the result based on the #
# floating point condition codes. In addition, we check to see #
# if the NAN bit is set, in which case BSUN and AIOP will be set. #
# #
# The cases EQ and NE are shared by the Aware and Nonaware groups #
# and are incapable of setting the BSUN exception bit. #
# #
# Typically, only one of the two possible branch directions could #
# have the NAN bit set. #
# #
#########################################################################
#
# equal:
#
# Z
#
fscc_eq:
fbeq.w fscc_eq_yes # equal?
fscc_eq_no:
clr.b %d0 # set false
bra.w fscc_done # go finish
fscc_eq_yes:
st %d0 # set true
bra.w fscc_done # go finish
#
# not equal:
# _
# Z
#
fscc_neq:
fbneq.w fscc_neq_yes # not equal?
fscc_neq_no:
clr.b %d0 # set false
bra.w fscc_done # go finish
fscc_neq_yes:
st %d0 # set true
bra.w fscc_done # go finish
#
# greater than:
# _______
# NANvZvN
#
fscc_gt:
fbgt.w fscc_gt_yes # greater than?
fscc_gt_no:
clr.b %d0 # set false
btst &nan_bit, FPSR_CC(%a6) # is NAN set in cc?
beq.w fscc_done # no;go finish
ori.l &bsun_mask+aiop_mask, USER_FPSR(%a6) # set BSUN exc bit
bra.w fscc_chk_bsun # go finish
fscc_gt_yes:
st %d0 # set true
bra.w fscc_done # go finish
#
# not greater than:
#
# NANvZvN
#
fscc_ngt:
fbngt.w fscc_ngt_yes # not greater than?
fscc_ngt_no:
clr.b %d0 # set false
bra.w fscc_done # go finish
fscc_ngt_yes:
st %d0 # set true
btst &nan_bit, FPSR_CC(%a6) # is NAN set in cc?
beq.w fscc_done # no;go finish
ori.l &bsun_mask+aiop_mask, USER_FPSR(%a6) # set BSUN exc bit
bra.w fscc_chk_bsun # go finish
#
# greater than or equal:
# _____
# Zv(NANvN)
#
fscc_ge:
fbge.w fscc_ge_yes # greater than or equal?
fscc_ge_no:
clr.b %d0 # set false
btst &nan_bit, FPSR_CC(%a6) # is NAN set in cc?
beq.w fscc_done # no;go finish
ori.l &bsun_mask+aiop_mask, USER_FPSR(%a6) # set BSUN exc bit
bra.w fscc_chk_bsun # go finish
fscc_ge_yes:
st %d0 # set true
btst &nan_bit, FPSR_CC(%a6) # is NAN set in cc?
beq.w fscc_done # no;go finish
ori.l &bsun_mask+aiop_mask, USER_FPSR(%a6) # set BSUN exc bit
bra.w fscc_chk_bsun # go finish
#
# not (greater than or equal):
# _
# NANv(N^Z)
#
fscc_nge:
fbnge.w fscc_nge_yes # not (greater than or equal)?
fscc_nge_no:
clr.b %d0 # set false
bra.w fscc_done # go finish
fscc_nge_yes:
st %d0 # set true
btst &nan_bit, FPSR_CC(%a6) # is NAN set in cc?
beq.w fscc_done # no;go finish
ori.l &bsun_mask+aiop_mask, USER_FPSR(%a6) # set BSUN exc bit
bra.w fscc_chk_bsun # go finish
#
# less than:
# _____
# N^(NANvZ)
#
fscc_lt:
fblt.w fscc_lt_yes # less than?
fscc_lt_no:
clr.b %d0 # set false
btst &nan_bit, FPSR_CC(%a6) # is NAN set in cc?
beq.w fscc_done # no;go finish
ori.l &bsun_mask+aiop_mask, USER_FPSR(%a6) # set BSUN exc bit
bra.w fscc_chk_bsun # go finish
fscc_lt_yes:
st %d0 # set true
bra.w fscc_done # go finish
#
# not less than:
# _
# NANv(ZvN)
#
fscc_nlt:
fbnlt.w fscc_nlt_yes # not less than?
fscc_nlt_no:
clr.b %d0 # set false
bra.w fscc_done # go finish
fscc_nlt_yes:
st %d0 # set true
btst &nan_bit, FPSR_CC(%a6) # is NAN set in cc?
beq.w fscc_done # no;go finish
ori.l &bsun_mask+aiop_mask, USER_FPSR(%a6) # set BSUN exc bit
bra.w fscc_chk_bsun # go finish
#
# less than or equal:
# ___
# Zv(N^NAN)
#
fscc_le:
fble.w fscc_le_yes # less than or equal?
fscc_le_no:
clr.b %d0 # set false
btst &nan_bit, FPSR_CC(%a6) # is NAN set in cc?
beq.w fscc_done # no;go finish
ori.l &bsun_mask+aiop_mask, USER_FPSR(%a6) # set BSUN exc bit
bra.w fscc_chk_bsun # go finish
fscc_le_yes:
st %d0 # set true
btst &nan_bit, FPSR_CC(%a6) # is NAN set in cc?
beq.w fscc_done # no;go finish
ori.l &bsun_mask+aiop_mask, USER_FPSR(%a6) # set BSUN exc bit
bra.w fscc_chk_bsun # go finish
#
# not (less than or equal):
# ___
# NANv(NvZ)
#
fscc_nle:
fbnle.w fscc_nle_yes # not (less than or equal)?
fscc_nle_no:
clr.b %d0 # set false
bra.w fscc_done # go finish
fscc_nle_yes:
st %d0 # set true
btst &nan_bit, FPSR_CC(%a6) # is NAN set in cc?
beq.w fscc_done # no;go finish
ori.l &bsun_mask+aiop_mask, USER_FPSR(%a6) # set BSUN exc bit
bra.w fscc_chk_bsun # go finish
#
# greater or less than:
# _____
# NANvZ
#
fscc_gl:
fbgl.w fscc_gl_yes # greater or less than?
fscc_gl_no:
clr.b %d0 # set false
btst &nan_bit, FPSR_CC(%a6) # is NAN set in cc?
beq.w fscc_done # no;go finish
ori.l &bsun_mask+aiop_mask, USER_FPSR(%a6) # set BSUN exc bit
bra.w fscc_chk_bsun # go finish
fscc_gl_yes:
st %d0 # set true
bra.w fscc_done # go finish
#
# not (greater or less than):
#
# NANvZ
#
fscc_ngl:
fbngl.w fscc_ngl_yes # not (greater or less than)?
fscc_ngl_no:
clr.b %d0 # set false
bra.w fscc_done # go finish
fscc_ngl_yes:
st %d0 # set true
btst &nan_bit, FPSR_CC(%a6) # is NAN set in cc?
beq.w fscc_done # no;go finish
ori.l &bsun_mask+aiop_mask, USER_FPSR(%a6) # set BSUN exc bit
bra.w fscc_chk_bsun # go finish
#
# greater, less, or equal:
# ___
# NAN
#
fscc_gle:
fbgle.w fscc_gle_yes # greater, less, or equal?
fscc_gle_no:
clr.b %d0 # set false
ori.l &bsun_mask+aiop_mask, USER_FPSR(%a6) # set BSUN exc bit
bra.w fscc_chk_bsun # go finish
fscc_gle_yes:
st %d0 # set true
bra.w fscc_done # go finish
#
# not (greater, less, or equal):
#
# NAN
#
fscc_ngle:
fbngle.w fscc_ngle_yes # not (greater, less, or equal)?
fscc_ngle_no:
clr.b %d0 # set false
bra.w fscc_done # go finish
fscc_ngle_yes:
st %d0 # set true
ori.l &bsun_mask+aiop_mask, USER_FPSR(%a6) # set BSUN exc bit
bra.w fscc_chk_bsun # go finish
#########################################################################
# #
# Miscellaneous tests #
# #
# For the IEEE aware tests, we only have to set the result based on the #
# floating point condition codes. The BSUN exception will not be #
# set for any of these tests. #
# #
#########################################################################
#
# false:
#
# False
#
fscc_f:
clr.b %d0 # set false
bra.w fscc_done # go finish
#
# true:
#
# True
#
fscc_t:
st %d0 # set true
bra.w fscc_done # go finish
#
# signalling false:
#
# False
#
fscc_sf:
clr.b %d0 # set false
btst &nan_bit, FPSR_CC(%a6) # set BSUN exc bit
beq.w fscc_done # no;go finish
ori.l &bsun_mask+aiop_mask, USER_FPSR(%a6) # set BSUN exc bit
bra.w fscc_chk_bsun # go finish
#
# signalling true:
#
# True
#
fscc_st:
st %d0 # set false
btst &nan_bit, FPSR_CC(%a6) # set BSUN exc bit
beq.w fscc_done # no;go finish
ori.l &bsun_mask+aiop_mask, USER_FPSR(%a6) # set BSUN exc bit
bra.w fscc_chk_bsun # go finish
#
# signalling equal:
#
# Z
#
fscc_seq:
fbseq.w fscc_seq_yes # signalling equal?
fscc_seq_no:
clr.b %d0 # set false
btst &nan_bit, FPSR_CC(%a6) # set BSUN exc bit
beq.w fscc_done # no;go finish
ori.l &bsun_mask+aiop_mask, USER_FPSR(%a6) # set BSUN exc bit
bra.w fscc_chk_bsun # go finish
fscc_seq_yes:
st %d0 # set true
btst &nan_bit, FPSR_CC(%a6) # set BSUN exc bit
beq.w fscc_done # no;go finish
ori.l &bsun_mask+aiop_mask, USER_FPSR(%a6) # set BSUN exc bit
bra.w fscc_chk_bsun # go finish
#
# signalling not equal:
# _
# Z
#
fscc_sneq:
fbsneq.w fscc_sneq_yes # signalling equal?
fscc_sneq_no:
clr.b %d0 # set false
btst &nan_bit, FPSR_CC(%a6) # set BSUN exc bit
beq.w fscc_done # no;go finish
ori.l &bsun_mask+aiop_mask, USER_FPSR(%a6) # set BSUN exc bit
bra.w fscc_chk_bsun # go finish
fscc_sneq_yes:
st %d0 # set true
btst &nan_bit, FPSR_CC(%a6) # set BSUN exc bit
beq.w fscc_done # no;go finish
ori.l &bsun_mask+aiop_mask, USER_FPSR(%a6) # set BSUN exc bit
bra.w fscc_chk_bsun # go finish
#########################################################################
# #
# IEEE Aware tests #
# #
# For the IEEE aware tests, we only have to set the result based on the #
# floating point condition codes. The BSUN exception will not be #
# set for any of these tests. #
# #
#########################################################################
#
# ordered greater than:
# _______
# NANvZvN
#
fscc_ogt:
fbogt.w fscc_ogt_yes # ordered greater than?
fscc_ogt_no:
clr.b %d0 # set false
bra.w fscc_done # go finish
fscc_ogt_yes:
st %d0 # set true
bra.w fscc_done # go finish
#
# unordered or less or equal:
# _______
# NANvZvN
#
fscc_ule:
fbule.w fscc_ule_yes # unordered or less or equal?
fscc_ule_no:
clr.b %d0 # set false
bra.w fscc_done # go finish
fscc_ule_yes:
st %d0 # set true
bra.w fscc_done # go finish
#
# ordered greater than or equal:
# _____
# Zv(NANvN)
#
fscc_oge:
fboge.w fscc_oge_yes # ordered greater than or equal?
fscc_oge_no:
clr.b %d0 # set false
bra.w fscc_done # go finish
fscc_oge_yes:
st %d0 # set true
bra.w fscc_done # go finish
#
# unordered or less than:
# _
# NANv(N^Z)
#
fscc_ult:
fbult.w fscc_ult_yes # unordered or less than?
fscc_ult_no:
clr.b %d0 # set false
bra.w fscc_done # go finish
fscc_ult_yes:
st %d0 # set true
bra.w fscc_done # go finish
#
# ordered less than:
# _____
# N^(NANvZ)
#
fscc_olt:
fbolt.w fscc_olt_yes # ordered less than?
fscc_olt_no:
clr.b %d0 # set false
bra.w fscc_done # go finish
fscc_olt_yes:
st %d0 # set true
bra.w fscc_done # go finish
#
# unordered or greater or equal:
#
# NANvZvN
#
fscc_uge:
fbuge.w fscc_uge_yes # unordered or greater than?
fscc_uge_no:
clr.b %d0 # set false
bra.w fscc_done # go finish
fscc_uge_yes:
st %d0 # set true
bra.w fscc_done # go finish
#
# ordered less than or equal:
# ___
# Zv(N^NAN)
#
fscc_ole:
fbole.w fscc_ole_yes # ordered greater or less than?
fscc_ole_no:
clr.b %d0 # set false
bra.w fscc_done # go finish
fscc_ole_yes:
st %d0 # set true
bra.w fscc_done # go finish
#
# unordered or greater than:
# ___
# NANv(NvZ)
#
fscc_ugt:
fbugt.w fscc_ugt_yes # unordered or greater than?
fscc_ugt_no:
clr.b %d0 # set false
bra.w fscc_done # go finish
fscc_ugt_yes:
st %d0 # set true
bra.w fscc_done # go finish
#
# ordered greater or less than:
# _____
# NANvZ
#
fscc_ogl:
fbogl.w fscc_ogl_yes # ordered greater or less than?
fscc_ogl_no:
clr.b %d0 # set false
bra.w fscc_done # go finish
fscc_ogl_yes:
st %d0 # set true
bra.w fscc_done # go finish
#
# unordered or equal:
#
# NANvZ
#
fscc_ueq:
fbueq.w fscc_ueq_yes # unordered or equal?
fscc_ueq_no:
clr.b %d0 # set false
bra.w fscc_done # go finish
fscc_ueq_yes:
st %d0 # set true
bra.w fscc_done # go finish
#
# ordered:
# ___
# NAN
#
fscc_or:
fbor.w fscc_or_yes # ordered?
fscc_or_no:
clr.b %d0 # set false
bra.w fscc_done # go finish
fscc_or_yes:
st %d0 # set true
bra.w fscc_done # go finish
#
# unordered:
#
# NAN
#
fscc_un:
fbun.w fscc_un_yes # unordered?
fscc_un_no:
clr.b %d0 # set false
bra.w fscc_done # go finish
fscc_un_yes:
st %d0 # set true
bra.w fscc_done # go finish
#######################################################################
#
# the bsun exception bit was set. now, check to see is BSUN
# is enabled. if so, don't store result and correct stack frame
# for a bsun exception.
#
fscc_chk_bsun:
btst &bsun_bit,FPCR_ENABLE(%a6) # was BSUN set?
bne.w fscc_bsun
#
# the bsun exception bit was not set.
# the result has been selected.
# now, check to see if the result is to be stored in the data register
# file or in memory.
#
fscc_done:
mov.l %d0,%a0 # save result for a moment
mov.b 1+EXC_OPWORD(%a6),%d1 # fetch lo opword
mov.l %d1,%d0 # make a copy
andi.b &0x38,%d1 # extract src mode
bne.b fscc_mem_op # it's a memory operation
mov.l %d0,%d1
andi.w &0x7,%d1 # pass index in d1
mov.l %a0,%d0 # pass result in d0
bsr.l store_dreg_b # save result in regfile
rts
#
# the stacked <ea> is correct with the exception of:
# -> Dn : <ea> is garbage
#
# if the addressing mode is post-increment or pre-decrement,
# then the address registers have not been updated.
#
fscc_mem_op:
cmpi.b %d1,&0x18 # is <ea> (An)+ ?
beq.b fscc_mem_inc # yes
cmpi.b %d1,&0x20 # is <ea> -(An) ?
beq.b fscc_mem_dec # yes
mov.l %a0,%d0 # pass result in d0
mov.l EXC_EA(%a6),%a0 # fetch <ea>
bsr.l _dmem_write_byte # write result byte
tst.l %d1 # did dstore fail?
bne.w fscc_err # yes
rts
# addressing mode is post-increment. write the result byte. if the write
# fails then don't update the address register. if write passes then
# call inc_areg() to update the address register.
fscc_mem_inc:
mov.l %a0,%d0 # pass result in d0
mov.l EXC_EA(%a6),%a0 # fetch <ea>
bsr.l _dmem_write_byte # write result byte
tst.l %d1 # did dstore fail?
bne.w fscc_err # yes
mov.b 0x1+EXC_OPWORD(%a6),%d1 # fetch opword
andi.w &0x7,%d1 # pass index in d1
movq.l &0x1,%d0 # pass amt to inc by
bsr.l inc_areg # increment address register
rts
# addressing mode is pre-decrement. write the result byte. if the write
# fails then don't update the address register. if the write passes then
# call dec_areg() to update the address register.
fscc_mem_dec:
mov.l %a0,%d0 # pass result in d0
mov.l EXC_EA(%a6),%a0 # fetch <ea>
bsr.l _dmem_write_byte # write result byte
tst.l %d1 # did dstore fail?
bne.w fscc_err # yes
mov.b 0x1+EXC_OPWORD(%a6),%d1 # fetch opword
andi.w &0x7,%d1 # pass index in d1
movq.l &0x1,%d0 # pass amt to dec by
bsr.l dec_areg # decrement address register
rts
# the emulation routine set bsun and BSUN was enabled. have to
# fix stack and jump to the bsun handler.
# let the caller of this routine shift the stack frame up to
# eliminate the effective address field.
fscc_bsun:
mov.b &fbsun_flg,SPCOND_FLG(%a6)
rts
# the byte write to memory has failed. pass the failing effective address
# and a FSLW to funimp_dacc().
fscc_err:
mov.w &0x00a1,EXC_VOFF(%a6)
bra.l facc_finish
#########################################################################
# XDEF **************************************************************** #
# fmovm_dynamic(): emulate "fmovm" dynamic instruction #
# #
# XREF **************************************************************** #
# fetch_dreg() - fetch data register #
# {i,d,}mem_read() - fetch data from memory #
# _mem_write() - write data to memory #
# iea_iacc() - instruction memory access error occurred #
# iea_dacc() - data memory access error occurred #
# restore() - restore An index regs if access error occurred #
# #
# INPUT *************************************************************** #
# None #
# #
# OUTPUT ************************************************************** #
# If instr is "fmovm Dn,-(A7)" from supervisor mode, #
# d0 = size of dump #
# d1 = Dn #
# Else if instruction access error, #
# d0 = FSLW #
# Else if data access error, #
# d0 = FSLW #
# a0 = address of fault #
# Else #
# none. #
# #
# ALGORITHM *********************************************************** #
# The effective address must be calculated since this is entered #
# from an "Unimplemented Effective Address" exception handler. So, we #
# have our own fcalc_ea() routine here. If an access error is flagged #
# by a _{i,d,}mem_read() call, we must exit through the special #
# handler. #
# The data register is determined and its value loaded to get the #
# string of FP registers affected. This value is used as an index into #
# a lookup table such that we can determine the number of bytes #
# involved. #
# If the instruction is "fmovm.x <ea>,Dn", a _mem_read() is used #
# to read in all FP values. Again, _mem_read() may fail and require a #
# special exit. #
# If the instruction is "fmovm.x DN,<ea>", a _mem_write() is used #
# to write all FP values. _mem_write() may also fail. #
# If the instruction is "fmovm.x DN,-(a7)" from supervisor mode, #
# then we return the size of the dump and the string to the caller #
# so that the move can occur outside of this routine. This special #
# case is required so that moves to the system stack are handled #
# correctly. #
# #
# DYNAMIC: #
# fmovm.x dn, <ea> #
# fmovm.x <ea>, dn #
# #
# <WORD 1> <WORD2> #
# 1111 0010 00 |<ea>| 11@& 1000 0$$$ 0000 #
# #
# & = (0): predecrement addressing mode #
# (1): postincrement or control addressing mode #
# @ = (0): move listed regs from memory to the FPU #
# (1): move listed regs from the FPU to memory #
# $$$ : index of data register holding reg select mask #
# #
# NOTES: #
# If the data register holds a zero, then the #
# instruction is a nop. #
# #
#########################################################################
global fmovm_dynamic
fmovm_dynamic:
# extract the data register in which the bit string resides...
mov.b 1+EXC_EXTWORD(%a6),%d1 # fetch extword
andi.w &0x70,%d1 # extract reg bits
lsr.b &0x4,%d1 # shift into lo bits
# fetch the bit string into d0...
bsr.l fetch_dreg # fetch reg string
andi.l &0x000000ff,%d0 # keep only lo byte
mov.l %d0,-(%sp) # save strg
mov.b (tbl_fmovm_size.w,%pc,%d0),%d0
mov.l %d0,-(%sp) # save size
bsr.l fmovm_calc_ea # calculate <ea>
mov.l (%sp)+,%d0 # restore size
mov.l (%sp)+,%d1 # restore strg
# if the bit string is a zero, then the operation is a no-op
# but, make sure that we've calculated ea and advanced the opword pointer
beq.w fmovm_data_done
# separate move ins from move outs...
btst &0x5,EXC_EXTWORD(%a6) # is it a move in or out?
beq.w fmovm_data_in # it's a move out
#############
# MOVE OUT: #
#############
fmovm_data_out:
btst &0x4,EXC_EXTWORD(%a6) # control or predecrement?
bne.w fmovm_out_ctrl # control
############################
fmovm_out_predec:
# for predecrement mode, the bit string is the opposite of both control
# operations and postincrement mode. (bit7 = FP7 ... bit0 = FP0)
# here, we convert it to be just like the others...
mov.b (tbl_fmovm_convert.w,%pc,%d1.w*1),%d1
btst &0x5,EXC_SR(%a6) # user or supervisor mode?
beq.b fmovm_out_ctrl # user
fmovm_out_predec_s:
cmpi.b SPCOND_FLG(%a6),&mda7_flg # is <ea> mode -(a7)?
bne.b fmovm_out_ctrl
# the operation was unfortunately an: fmovm.x dn,-(sp)
# called from supervisor mode.
# we're also passing "size" and "strg" back to the calling routine
rts
############################
fmovm_out_ctrl:
mov.l %a0,%a1 # move <ea> to a1
sub.l %d0,%sp # subtract size of dump
lea (%sp),%a0
tst.b %d1 # should FP0 be moved?
bpl.b fmovm_out_ctrl_fp1 # no
mov.l 0x0+EXC_FP0(%a6),(%a0)+ # yes
mov.l 0x4+EXC_FP0(%a6),(%a0)+
mov.l 0x8+EXC_FP0(%a6),(%a0)+
fmovm_out_ctrl_fp1:
lsl.b &0x1,%d1 # should FP1 be moved?
bpl.b fmovm_out_ctrl_fp2 # no
mov.l 0x0+EXC_FP1(%a6),(%a0)+ # yes
mov.l 0x4+EXC_FP1(%a6),(%a0)+
mov.l 0x8+EXC_FP1(%a6),(%a0)+
fmovm_out_ctrl_fp2:
lsl.b &0x1,%d1 # should FP2 be moved?
bpl.b fmovm_out_ctrl_fp3 # no
fmovm.x &0x20,(%a0) # yes
add.l &0xc,%a0
fmovm_out_ctrl_fp3:
lsl.b &0x1,%d1 # should FP3 be moved?
bpl.b fmovm_out_ctrl_fp4 # no
fmovm.x &0x10,(%a0) # yes
add.l &0xc,%a0
fmovm_out_ctrl_fp4:
lsl.b &0x1,%d1 # should FP4 be moved?
bpl.b fmovm_out_ctrl_fp5 # no
fmovm.x &0x08,(%a0) # yes
add.l &0xc,%a0
fmovm_out_ctrl_fp5:
lsl.b &0x1,%d1 # should FP5 be moved?
bpl.b fmovm_out_ctrl_fp6 # no
fmovm.x &0x04,(%a0) # yes
add.l &0xc,%a0
fmovm_out_ctrl_fp6:
lsl.b &0x1,%d1 # should FP6 be moved?
bpl.b fmovm_out_ctrl_fp7 # no
fmovm.x &0x02,(%a0) # yes
add.l &0xc,%a0
fmovm_out_ctrl_fp7:
lsl.b &0x1,%d1 # should FP7 be moved?
bpl.b fmovm_out_ctrl_done # no
fmovm.x &0x01,(%a0) # yes
add.l &0xc,%a0
fmovm_out_ctrl_done:
mov.l %a1,L_SCR1(%a6)
lea (%sp),%a0 # pass: supervisor src
mov.l %d0,-(%sp) # save size
bsr.l _dmem_write # copy data to user mem
mov.l (%sp)+,%d0
add.l %d0,%sp # clear fpreg data from stack
tst.l %d1 # did dstore err?
bne.w fmovm_out_err # yes
rts
############
# MOVE IN: #
############
fmovm_data_in:
mov.l %a0,L_SCR1(%a6)
sub.l %d0,%sp # make room for fpregs
lea (%sp),%a1
mov.l %d1,-(%sp) # save bit string for later
mov.l %d0,-(%sp) # save # of bytes
bsr.l _dmem_read # copy data from user mem
mov.l (%sp)+,%d0 # retrieve # of bytes
tst.l %d1 # did dfetch fail?
bne.w fmovm_in_err # yes
mov.l (%sp)+,%d1 # load bit string
lea (%sp),%a0 # addr of stack
tst.b %d1 # should FP0 be moved?
bpl.b fmovm_data_in_fp1 # no
mov.l (%a0)+,0x0+EXC_FP0(%a6) # yes
mov.l (%a0)+,0x4+EXC_FP0(%a6)
mov.l (%a0)+,0x8+EXC_FP0(%a6)
fmovm_data_in_fp1:
lsl.b &0x1,%d1 # should FP1 be moved?
bpl.b fmovm_data_in_fp2 # no
mov.l (%a0)+,0x0+EXC_FP1(%a6) # yes
mov.l (%a0)+,0x4+EXC_FP1(%a6)
mov.l (%a0)+,0x8+EXC_FP1(%a6)
fmovm_data_in_fp2:
lsl.b &0x1,%d1 # should FP2 be moved?
bpl.b fmovm_data_in_fp3 # no
fmovm.x (%a0)+,&0x20 # yes
fmovm_data_in_fp3:
lsl.b &0x1,%d1 # should FP3 be moved?
bpl.b fmovm_data_in_fp4 # no
fmovm.x (%a0)+,&0x10 # yes
fmovm_data_in_fp4:
lsl.b &0x1,%d1 # should FP4 be moved?
bpl.b fmovm_data_in_fp5 # no
fmovm.x (%a0)+,&0x08 # yes
fmovm_data_in_fp5:
lsl.b &0x1,%d1 # should FP5 be moved?
bpl.b fmovm_data_in_fp6 # no
fmovm.x (%a0)+,&0x04 # yes
fmovm_data_in_fp6:
lsl.b &0x1,%d1 # should FP6 be moved?
bpl.b fmovm_data_in_fp7 # no
fmovm.x (%a0)+,&0x02 # yes
fmovm_data_in_fp7:
lsl.b &0x1,%d1 # should FP7 be moved?
bpl.b fmovm_data_in_done # no
fmovm.x (%a0)+,&0x01 # yes
fmovm_data_in_done:
add.l %d0,%sp # remove fpregs from stack
rts
#####################################
fmovm_data_done:
rts
##############################################################################
#
# table indexed by the operation's bit string that gives the number
# of bytes that will be moved.
#
# number of bytes = (# of 1's in bit string) * 12(bytes/fpreg)
#
tbl_fmovm_size:
byte 0x00,0x0c,0x0c,0x18,0x0c,0x18,0x18,0x24
byte 0x0c,0x18,0x18,0x24,0x18,0x24,0x24,0x30
byte 0x0c,0x18,0x18,0x24,0x18,0x24,0x24,0x30
byte 0x18,0x24,0x24,0x30,0x24,0x30,0x30,0x3c
byte 0x0c,0x18,0x18,0x24,0x18,0x24,0x24,0x30
byte 0x18,0x24,0x24,0x30,0x24,0x30,0x30,0x3c
byte 0x18,0x24,0x24,0x30,0x24,0x30,0x30,0x3c
byte 0x24,0x30,0x30,0x3c,0x30,0x3c,0x3c,0x48
byte 0x0c,0x18,0x18,0x24,0x18,0x24,0x24,0x30
byte 0x18,0x24,0x24,0x30,0x24,0x30,0x30,0x3c
byte 0x18,0x24,0x24,0x30,0x24,0x30,0x30,0x3c
byte 0x24,0x30,0x30,0x3c,0x30,0x3c,0x3c,0x48
byte 0x18,0x24,0x24,0x30,0x24,0x30,0x30,0x3c
byte 0x24,0x30,0x30,0x3c,0x30,0x3c,0x3c,0x48
byte 0x24,0x30,0x30,0x3c,0x30,0x3c,0x3c,0x48
byte 0x30,0x3c,0x3c,0x48,0x3c,0x48,0x48,0x54
byte 0x0c,0x18,0x18,0x24,0x18,0x24,0x24,0x30
byte 0x18,0x24,0x24,0x30,0x24,0x30,0x30,0x3c
byte 0x18,0x24,0x24,0x30,0x24,0x30,0x30,0x3c
byte 0x24,0x30,0x30,0x3c,0x30,0x3c,0x3c,0x48
byte 0x18,0x24,0x24,0x30,0x24,0x30,0x30,0x3c
byte 0x24,0x30,0x30,0x3c,0x30,0x3c,0x3c,0x48
byte 0x24,0x30,0x30,0x3c,0x30,0x3c,0x3c,0x48
byte 0x30,0x3c,0x3c,0x48,0x3c,0x48,0x48,0x54
byte 0x18,0x24,0x24,0x30,0x24,0x30,0x30,0x3c
byte 0x24,0x30,0x30,0x3c,0x30,0x3c,0x3c,0x48
byte 0x24,0x30,0x30,0x3c,0x30,0x3c,0x3c,0x48
byte 0x30,0x3c,0x3c,0x48,0x3c,0x48,0x48,0x54
byte 0x24,0x30,0x30,0x3c,0x30,0x3c,0x3c,0x48
byte 0x30,0x3c,0x3c,0x48,0x3c,0x48,0x48,0x54
byte 0x30,0x3c,0x3c,0x48,0x3c,0x48,0x48,0x54
byte 0x3c,0x48,0x48,0x54,0x48,0x54,0x54,0x60
#
# table to convert a pre-decrement bit string into a post-increment
# or control bit string.
# ex: 0x00 ==> 0x00
# 0x01 ==> 0x80
# 0x02 ==> 0x40
# .
# .
# 0xfd ==> 0xbf
# 0xfe ==> 0x7f
# 0xff ==> 0xff
#
tbl_fmovm_convert:
byte 0x00,0x80,0x40,0xc0,0x20,0xa0,0x60,0xe0
byte 0x10,0x90,0x50,0xd0,0x30,0xb0,0x70,0xf0
byte 0x08,0x88,0x48,0xc8,0x28,0xa8,0x68,0xe8
byte 0x18,0x98,0x58,0xd8,0x38,0xb8,0x78,0xf8
byte 0x04,0x84,0x44,0xc4,0x24,0xa4,0x64,0xe4
byte 0x14,0x94,0x54,0xd4,0x34,0xb4,0x74,0xf4
byte 0x0c,0x8c,0x4c,0xcc,0x2c,0xac,0x6c,0xec
byte 0x1c,0x9c,0x5c,0xdc,0x3c,0xbc,0x7c,0xfc
byte 0x02,0x82,0x42,0xc2,0x22,0xa2,0x62,0xe2
byte 0x12,0x92,0x52,0xd2,0x32,0xb2,0x72,0xf2
byte 0x0a,0x8a,0x4a,0xca,0x2a,0xaa,0x6a,0xea
byte 0x1a,0x9a,0x5a,0xda,0x3a,0xba,0x7a,0xfa
byte 0x06,0x86,0x46,0xc6,0x26,0xa6,0x66,0xe6
byte 0x16,0x96,0x56,0xd6,0x36,0xb6,0x76,0xf6
byte 0x0e,0x8e,0x4e,0xce,0x2e,0xae,0x6e,0xee
byte 0x1e,0x9e,0x5e,0xde,0x3e,0xbe,0x7e,0xfe
byte 0x01,0x81,0x41,0xc1,0x21,0xa1,0x61,0xe1
byte 0x11,0x91,0x51,0xd1,0x31,0xb1,0x71,0xf1
byte 0x09,0x89,0x49,0xc9,0x29,0xa9,0x69,0xe9
byte 0x19,0x99,0x59,0xd9,0x39,0xb9,0x79,0xf9
byte 0x05,0x85,0x45,0xc5,0x25,0xa5,0x65,0xe5
byte 0x15,0x95,0x55,0xd5,0x35,0xb5,0x75,0xf5
byte 0x0d,0x8d,0x4d,0xcd,0x2d,0xad,0x6d,0xed
byte 0x1d,0x9d,0x5d,0xdd,0x3d,0xbd,0x7d,0xfd
byte 0x03,0x83,0x43,0xc3,0x23,0xa3,0x63,0xe3
byte 0x13,0x93,0x53,0xd3,0x33,0xb3,0x73,0xf3
byte 0x0b,0x8b,0x4b,0xcb,0x2b,0xab,0x6b,0xeb
byte 0x1b,0x9b,0x5b,0xdb,0x3b,0xbb,0x7b,0xfb
byte 0x07,0x87,0x47,0xc7,0x27,0xa7,0x67,0xe7
byte 0x17,0x97,0x57,0xd7,0x37,0xb7,0x77,0xf7
byte 0x0f,0x8f,0x4f,0xcf,0x2f,0xaf,0x6f,0xef
byte 0x1f,0x9f,0x5f,0xdf,0x3f,0xbf,0x7f,0xff
global fmovm_calc_ea
###############################################
# _fmovm_calc_ea: calculate effective address #
###############################################
fmovm_calc_ea:
mov.l %d0,%a0 # move # bytes to a0
# currently, MODE and REG are taken from the EXC_OPWORD. this could be
# easily changed if they were inputs passed in registers.
mov.w EXC_OPWORD(%a6),%d0 # fetch opcode word
mov.w %d0,%d1 # make a copy
andi.w &0x3f,%d0 # extract mode field
andi.l &0x7,%d1 # extract reg field
# jump to the corresponding function for each {MODE,REG} pair.
mov.w (tbl_fea_mode.b,%pc,%d0.w*2),%d0 # fetch jmp distance
jmp (tbl_fea_mode.b,%pc,%d0.w*1) # jmp to correct ea mode
swbeg &64
tbl_fea_mode:
short tbl_fea_mode - tbl_fea_mode
short tbl_fea_mode - tbl_fea_mode
short tbl_fea_mode - tbl_fea_mode
short tbl_fea_mode - tbl_fea_mode
short tbl_fea_mode - tbl_fea_mode
short tbl_fea_mode - tbl_fea_mode
short tbl_fea_mode - tbl_fea_mode
short tbl_fea_mode - tbl_fea_mode
short tbl_fea_mode - tbl_fea_mode
short tbl_fea_mode - tbl_fea_mode
short tbl_fea_mode - tbl_fea_mode
short tbl_fea_mode - tbl_fea_mode
short tbl_fea_mode - tbl_fea_mode
short tbl_fea_mode - tbl_fea_mode
short tbl_fea_mode - tbl_fea_mode
short tbl_fea_mode - tbl_fea_mode
short faddr_ind_a0 - tbl_fea_mode
short faddr_ind_a1 - tbl_fea_mode
short faddr_ind_a2 - tbl_fea_mode
short faddr_ind_a3 - tbl_fea_mode
short faddr_ind_a4 - tbl_fea_mode
short faddr_ind_a5 - tbl_fea_mode
short faddr_ind_a6 - tbl_fea_mode
short faddr_ind_a7 - tbl_fea_mode
short faddr_ind_p_a0 - tbl_fea_mode
short faddr_ind_p_a1 - tbl_fea_mode
short faddr_ind_p_a2 - tbl_fea_mode
short faddr_ind_p_a3 - tbl_fea_mode
short faddr_ind_p_a4 - tbl_fea_mode
short faddr_ind_p_a5 - tbl_fea_mode
short faddr_ind_p_a6 - tbl_fea_mode
short faddr_ind_p_a7 - tbl_fea_mode
short faddr_ind_m_a0 - tbl_fea_mode
short faddr_ind_m_a1 - tbl_fea_mode
short faddr_ind_m_a2 - tbl_fea_mode
short faddr_ind_m_a3 - tbl_fea_mode
short faddr_ind_m_a4 - tbl_fea_mode
short faddr_ind_m_a5 - tbl_fea_mode
short faddr_ind_m_a6 - tbl_fea_mode
short faddr_ind_m_a7 - tbl_fea_mode
short faddr_ind_disp_a0 - tbl_fea_mode
short faddr_ind_disp_a1 - tbl_fea_mode
short faddr_ind_disp_a2 - tbl_fea_mode
short faddr_ind_disp_a3 - tbl_fea_mode
short faddr_ind_disp_a4 - tbl_fea_mode
short faddr_ind_disp_a5 - tbl_fea_mode
short faddr_ind_disp_a6 - tbl_fea_mode
short faddr_ind_disp_a7 - tbl_fea_mode
short faddr_ind_ext - tbl_fea_mode
short faddr_ind_ext - tbl_fea_mode
short faddr_ind_ext - tbl_fea_mode
short faddr_ind_ext - tbl_fea_mode
short faddr_ind_ext - tbl_fea_mode
short faddr_ind_ext - tbl_fea_mode
short faddr_ind_ext - tbl_fea_mode
short faddr_ind_ext - tbl_fea_mode
short fabs_short - tbl_fea_mode
short fabs_long - tbl_fea_mode
short fpc_ind - tbl_fea_mode
short fpc_ind_ext - tbl_fea_mode
short tbl_fea_mode - tbl_fea_mode
short tbl_fea_mode - tbl_fea_mode
short tbl_fea_mode - tbl_fea_mode
short tbl_fea_mode - tbl_fea_mode
###################################
# Address register indirect: (An) #
###################################
faddr_ind_a0:
mov.l EXC_DREGS+0x8(%a6),%a0 # Get current a0
rts
faddr_ind_a1:
mov.l EXC_DREGS+0xc(%a6),%a0 # Get current a1
rts
faddr_ind_a2:
mov.l %a2,%a0 # Get current a2
rts
faddr_ind_a3:
mov.l %a3,%a0 # Get current a3
rts
faddr_ind_a4:
mov.l %a4,%a0 # Get current a4
rts
faddr_ind_a5:
mov.l %a5,%a0 # Get current a5
rts
faddr_ind_a6:
mov.l (%a6),%a0 # Get current a6
rts
faddr_ind_a7:
mov.l EXC_A7(%a6),%a0 # Get current a7
rts
#####################################################
# Address register indirect w/ postincrement: (An)+ #
#####################################################
faddr_ind_p_a0:
mov.l EXC_DREGS+0x8(%a6),%d0 # Get current a0
mov.l %d0,%d1
add.l %a0,%d1 # Increment
mov.l %d1,EXC_DREGS+0x8(%a6) # Save incr value
mov.l %d0,%a0
rts
faddr_ind_p_a1:
mov.l EXC_DREGS+0xc(%a6),%d0 # Get current a1
mov.l %d0,%d1
add.l %a0,%d1 # Increment
mov.l %d1,EXC_DREGS+0xc(%a6) # Save incr value
mov.l %d0,%a0
rts
faddr_ind_p_a2:
mov.l %a2,%d0 # Get current a2
mov.l %d0,%d1
add.l %a0,%d1 # Increment
mov.l %d1,%a2 # Save incr value
mov.l %d0,%a0
rts
faddr_ind_p_a3:
mov.l %a3,%d0 # Get current a3
mov.l %d0,%d1
add.l %a0,%d1 # Increment
mov.l %d1,%a3 # Save incr value
mov.l %d0,%a0
rts
faddr_ind_p_a4:
mov.l %a4,%d0 # Get current a4
mov.l %d0,%d1
add.l %a0,%d1 # Increment
mov.l %d1,%a4 # Save incr value
mov.l %d0,%a0
rts
faddr_ind_p_a5:
mov.l %a5,%d0 # Get current a5
mov.l %d0,%d1
add.l %a0,%d1 # Increment
mov.l %d1,%a5 # Save incr value
mov.l %d0,%a0
rts
faddr_ind_p_a6:
mov.l (%a6),%d0 # Get current a6
mov.l %d0,%d1
add.l %a0,%d1 # Increment
mov.l %d1,(%a6) # Save incr value
mov.l %d0,%a0
rts
faddr_ind_p_a7:
mov.b &mia7_flg,SPCOND_FLG(%a6) # set "special case" flag
mov.l EXC_A7(%a6),%d0 # Get current a7
mov.l %d0,%d1
add.l %a0,%d1 # Increment
mov.l %d1,EXC_A7(%a6) # Save incr value
mov.l %d0,%a0
rts
####################################################
# Address register indirect w/ predecrement: -(An) #
####################################################
faddr_ind_m_a0:
mov.l EXC_DREGS+0x8(%a6),%d0 # Get current a0
sub.l %a0,%d0 # Decrement
mov.l %d0,EXC_DREGS+0x8(%a6) # Save decr value
mov.l %d0,%a0
rts
faddr_ind_m_a1:
mov.l EXC_DREGS+0xc(%a6),%d0 # Get current a1
sub.l %a0,%d0 # Decrement
mov.l %d0,EXC_DREGS+0xc(%a6) # Save decr value
mov.l %d0,%a0
rts
faddr_ind_m_a2:
mov.l %a2,%d0 # Get current a2
sub.l %a0,%d0 # Decrement
mov.l %d0,%a2 # Save decr value
mov.l %d0,%a0
rts
faddr_ind_m_a3:
mov.l %a3,%d0 # Get current a3
sub.l %a0,%d0 # Decrement
mov.l %d0,%a3 # Save decr value
mov.l %d0,%a0
rts
faddr_ind_m_a4:
mov.l %a4,%d0 # Get current a4
sub.l %a0,%d0 # Decrement
mov.l %d0,%a4 # Save decr value
mov.l %d0,%a0
rts
faddr_ind_m_a5:
mov.l %a5,%d0 # Get current a5
sub.l %a0,%d0 # Decrement
mov.l %d0,%a5 # Save decr value
mov.l %d0,%a0
rts
faddr_ind_m_a6:
mov.l (%a6),%d0 # Get current a6
sub.l %a0,%d0 # Decrement
mov.l %d0,(%a6) # Save decr value
mov.l %d0,%a0
rts
faddr_ind_m_a7:
mov.b &mda7_flg,SPCOND_FLG(%a6) # set "special case" flag
mov.l EXC_A7(%a6),%d0 # Get current a7
sub.l %a0,%d0 # Decrement
mov.l %d0,EXC_A7(%a6) # Save decr value
mov.l %d0,%a0
rts
########################################################
# Address register indirect w/ displacement: (d16, An) #
########################################################
faddr_ind_disp_a0:
mov.l EXC_EXTWPTR(%a6),%a0 # fetch instruction addr
addq.l &0x2,EXC_EXTWPTR(%a6) # incr instruction ptr
bsr.l _imem_read_word
tst.l %d1 # did ifetch fail?
bne.l iea_iacc # yes
mov.w %d0,%a0 # sign extend displacement
add.l EXC_DREGS+0x8(%a6),%a0 # a0 + d16
rts
faddr_ind_disp_a1:
mov.l EXC_EXTWPTR(%a6),%a0 # fetch instruction addr
addq.l &0x2,EXC_EXTWPTR(%a6) # incr instruction ptr
bsr.l _imem_read_word
tst.l %d1 # did ifetch fail?
bne.l iea_iacc # yes
mov.w %d0,%a0 # sign extend displacement
add.l EXC_DREGS+0xc(%a6),%a0 # a1 + d16
rts
faddr_ind_disp_a2:
mov.l EXC_EXTWPTR(%a6),%a0 # fetch instruction addr
addq.l &0x2,EXC_EXTWPTR(%a6) # incr instruction ptr
bsr.l _imem_read_word
tst.l %d1 # did ifetch fail?
bne.l iea_iacc # yes
mov.w %d0,%a0 # sign extend displacement
add.l %a2,%a0 # a2 + d16
rts
faddr_ind_disp_a3:
mov.l EXC_EXTWPTR(%a6),%a0 # fetch instruction addr
addq.l &0x2,EXC_EXTWPTR(%a6) # incr instruction ptr
bsr.l _imem_read_word
tst.l %d1 # did ifetch fail?
bne.l iea_iacc # yes
mov.w %d0,%a0 # sign extend displacement
add.l %a3,%a0 # a3 + d16
rts
faddr_ind_disp_a4:
mov.l EXC_EXTWPTR(%a6),%a0 # fetch instruction addr
addq.l &0x2,EXC_EXTWPTR(%a6) # incr instruction ptr
bsr.l _imem_read_word
tst.l %d1 # did ifetch fail?
bne.l iea_iacc # yes
mov.w %d0,%a0 # sign extend displacement
add.l %a4,%a0 # a4 + d16
rts
faddr_ind_disp_a5:
mov.l EXC_EXTWPTR(%a6),%a0 # fetch instruction addr
addq.l &0x2,EXC_EXTWPTR(%a6) # incr instruction ptr
bsr.l _imem_read_word
tst.l %d1 # did ifetch fail?
bne.l iea_iacc # yes
mov.w %d0,%a0 # sign extend displacement
add.l %a5,%a0 # a5 + d16
rts
faddr_ind_disp_a6:
mov.l EXC_EXTWPTR(%a6),%a0 # fetch instruction addr
addq.l &0x2,EXC_EXTWPTR(%a6) # incr instruction ptr
bsr.l _imem_read_word
tst.l %d1 # did ifetch fail?
bne.l iea_iacc # yes
mov.w %d0,%a0 # sign extend displacement
add.l (%a6),%a0 # a6 + d16
rts
faddr_ind_disp_a7:
mov.l EXC_EXTWPTR(%a6),%a0 # fetch instruction addr
addq.l &0x2,EXC_EXTWPTR(%a6) # incr instruction ptr
bsr.l _imem_read_word
tst.l %d1 # did ifetch fail?
bne.l iea_iacc # yes
mov.w %d0,%a0 # sign extend displacement
add.l EXC_A7(%a6),%a0 # a7 + d16
rts
########################################################################
# Address register indirect w/ index(8-bit displacement): (d8, An, Xn) #
# " " " w/ " (base displacement): (bd, An, Xn) #
# Memory indirect postindexed: ([bd, An], Xn, od) #
# Memory indirect preindexed: ([bd, An, Xn], od) #
########################################################################
faddr_ind_ext:
addq.l &0x8,%d1
bsr.l fetch_dreg # fetch base areg
mov.l %d0,-(%sp)
mov.l EXC_EXTWPTR(%a6),%a0 # fetch instruction addr
addq.l &0x2,EXC_EXTWPTR(%a6) # incr instruction ptr
bsr.l _imem_read_word # fetch extword in d0
tst.l %d1 # did ifetch fail?
bne.l iea_iacc # yes
mov.l (%sp)+,%a0
btst &0x8,%d0
bne.w fcalc_mem_ind
mov.l %d0,L_SCR1(%a6) # hold opword
mov.l %d0,%d1
rol.w &0x4,%d1
andi.w &0xf,%d1 # extract index regno
# count on fetch_dreg() not to alter a0...
bsr.l fetch_dreg # fetch index
mov.l %d2,-(%sp) # save d2
mov.l L_SCR1(%a6),%d2 # fetch opword
btst &0xb,%d2 # is it word or long?
bne.b faii8_long
ext.l %d0 # sign extend word index
faii8_long:
mov.l %d2,%d1
rol.w &0x7,%d1
andi.l &0x3,%d1 # extract scale value
lsl.l %d1,%d0 # shift index by scale
extb.l %d2 # sign extend displacement
add.l %d2,%d0 # index + disp
add.l %d0,%a0 # An + (index + disp)
mov.l (%sp)+,%d2 # restore old d2
rts
###########################
# Absolute short: (XXX).W #
###########################
fabs_short:
mov.l EXC_EXTWPTR(%a6),%a0 # fetch instruction addr
addq.l &0x2,EXC_EXTWPTR(%a6) # incr instruction ptr
bsr.l _imem_read_word # fetch short address
tst.l %d1 # did ifetch fail?
bne.l iea_iacc # yes
mov.w %d0,%a0 # return <ea> in a0
rts
##########################
# Absolute long: (XXX).L #
##########################
fabs_long:
mov.l EXC_EXTWPTR(%a6),%a0 # fetch instruction addr
addq.l &0x4,EXC_EXTWPTR(%a6) # incr instruction ptr
bsr.l _imem_read_long # fetch long address
tst.l %d1 # did ifetch fail?
bne.l iea_iacc # yes
mov.l %d0,%a0 # return <ea> in a0
rts
#######################################################
# Program counter indirect w/ displacement: (d16, PC) #
#######################################################
fpc_ind:
mov.l EXC_EXTWPTR(%a6),%a0 # fetch instruction addr
addq.l &0x2,EXC_EXTWPTR(%a6) # incr instruction ptr
bsr.l _imem_read_word # fetch word displacement
tst.l %d1 # did ifetch fail?
bne.l iea_iacc # yes
mov.w %d0,%a0 # sign extend displacement
add.l EXC_EXTWPTR(%a6),%a0 # pc + d16
# _imem_read_word() increased the extwptr by 2. need to adjust here.
subq.l &0x2,%a0 # adjust <ea>
rts
##########################################################
# PC indirect w/ index(8-bit displacement): (d8, PC, An) #
# " " w/ " (base displacement): (bd, PC, An) #
# PC memory indirect postindexed: ([bd, PC], Xn, od) #
# PC memory indirect preindexed: ([bd, PC, Xn], od) #
##########################################################
fpc_ind_ext:
mov.l EXC_EXTWPTR(%a6),%a0 # fetch instruction addr
addq.l &0x2,EXC_EXTWPTR(%a6) # incr instruction ptr
bsr.l _imem_read_word # fetch ext word
tst.l %d1 # did ifetch fail?
bne.l iea_iacc # yes
mov.l EXC_EXTWPTR(%a6),%a0 # put base in a0
subq.l &0x2,%a0 # adjust base
btst &0x8,%d0 # is disp only 8 bits?
bne.w fcalc_mem_ind # calc memory indirect
mov.l %d0,L_SCR1(%a6) # store opword
mov.l %d0,%d1 # make extword copy
rol.w &0x4,%d1 # rotate reg num into place
andi.w &0xf,%d1 # extract register number
# count on fetch_dreg() not to alter a0...
bsr.l fetch_dreg # fetch index
mov.l %d2,-(%sp) # save d2
mov.l L_SCR1(%a6),%d2 # fetch opword
btst &0xb,%d2 # is index word or long?
bne.b fpii8_long # long
ext.l %d0 # sign extend word index
fpii8_long:
mov.l %d2,%d1
rol.w &0x7,%d1 # rotate scale value into place
andi.l &0x3,%d1 # extract scale value
lsl.l %d1,%d0 # shift index by scale
extb.l %d2 # sign extend displacement
add.l %d2,%d0 # disp + index
add.l %d0,%a0 # An + (index + disp)
mov.l (%sp)+,%d2 # restore temp register
rts
# d2 = index
# d3 = base
# d4 = od
# d5 = extword
fcalc_mem_ind:
btst &0x6,%d0 # is the index suppressed?
beq.b fcalc_index
movm.l &0x3c00,-(%sp) # save d2-d5
mov.l %d0,%d5 # put extword in d5
mov.l %a0,%d3 # put base in d3
clr.l %d2 # yes, so index = 0
bra.b fbase_supp_ck
# index:
fcalc_index:
mov.l %d0,L_SCR1(%a6) # save d0 (opword)
bfextu %d0{&16:&4},%d1 # fetch dreg index
bsr.l fetch_dreg
movm.l &0x3c00,-(%sp) # save d2-d5
mov.l %d0,%d2 # put index in d2
mov.l L_SCR1(%a6),%d5
mov.l %a0,%d3
btst &0xb,%d5 # is index word or long?
bne.b fno_ext
ext.l %d2
fno_ext:
bfextu %d5{&21:&2},%d0
lsl.l %d0,%d2
# base address (passed as parameter in d3):
# we clear the value here if it should actually be suppressed.
fbase_supp_ck:
btst &0x7,%d5 # is the bd suppressed?
beq.b fno_base_sup
clr.l %d3
# base displacement:
fno_base_sup:
bfextu %d5{&26:&2},%d0 # get bd size
# beq.l fmovm_error # if (size == 0) it's reserved
cmpi.b %d0,&0x2
blt.b fno_bd
beq.b fget_word_bd
mov.l EXC_EXTWPTR(%a6),%a0 # fetch instruction addr
addq.l &0x4,EXC_EXTWPTR(%a6) # incr instruction ptr
bsr.l _imem_read_long
tst.l %d1 # did ifetch fail?
bne.l fcea_iacc # yes
bra.b fchk_ind
fget_word_bd:
mov.l EXC_EXTWPTR(%a6),%a0 # fetch instruction addr
addq.l &0x2,EXC_EXTWPTR(%a6) # incr instruction ptr
bsr.l _imem_read_word
tst.l %d1 # did ifetch fail?
bne.l fcea_iacc # yes
ext.l %d0 # sign extend bd
fchk_ind:
add.l %d0,%d3 # base += bd
# outer displacement:
fno_bd:
bfextu %d5{&30:&2},%d0 # is od suppressed?
beq.w faii_bd
cmpi.b %d0,&0x2
blt.b fnull_od
beq.b fword_od
mov.l EXC_EXTWPTR(%a6),%a0 # fetch instruction addr
addq.l &0x4,EXC_EXTWPTR(%a6) # incr instruction ptr
bsr.l _imem_read_long
tst.l %d1 # did ifetch fail?
bne.l fcea_iacc # yes
bra.b fadd_them
fword_od:
mov.l EXC_EXTWPTR(%a6),%a0 # fetch instruction addr
addq.l &0x2,EXC_EXTWPTR(%a6) # incr instruction ptr
bsr.l _imem_read_word
tst.l %d1 # did ifetch fail?
bne.l fcea_iacc # yes
ext.l %d0 # sign extend od
bra.b fadd_them
fnull_od:
clr.l %d0
fadd_them:
mov.l %d0,%d4
btst &0x2,%d5 # pre or post indexing?
beq.b fpre_indexed
mov.l %d3,%a0
bsr.l _dmem_read_long
tst.l %d1 # did dfetch fail?
bne.w fcea_err # yes
add.l %d2,%d0 # <ea> += index
add.l %d4,%d0 # <ea> += od
bra.b fdone_ea
fpre_indexed:
add.l %d2,%d3 # preindexing
mov.l %d3,%a0
bsr.l _dmem_read_long
tst.l %d1 # did dfetch fail?
bne.w fcea_err # yes
add.l %d4,%d0 # ea += od
bra.b fdone_ea
faii_bd:
add.l %d2,%d3 # ea = (base + bd) + index
mov.l %d3,%d0
fdone_ea:
mov.l %d0,%a0
movm.l (%sp)+,&0x003c # restore d2-d5
rts
#########################################################
fcea_err:
mov.l %d3,%a0
movm.l (%sp)+,&0x003c # restore d2-d5
mov.w &0x0101,%d0
bra.l iea_dacc
fcea_iacc:
movm.l (%sp)+,&0x003c # restore d2-d5
bra.l iea_iacc
fmovm_out_err:
bsr.l restore
mov.w &0x00e1,%d0
bra.b fmovm_err
fmovm_in_err:
bsr.l restore
mov.w &0x0161,%d0
fmovm_err:
mov.l L_SCR1(%a6),%a0
bra.l iea_dacc
#########################################################################
# XDEF **************************************************************** #
# fmovm_ctrl(): emulate fmovm.l of control registers instr #
# #
# XREF **************************************************************** #
# _imem_read_long() - read longword from memory #
# iea_iacc() - _imem_read_long() failed; error recovery #
# #
# INPUT *************************************************************** #
# None #
# #
# OUTPUT ************************************************************** #
# If _imem_read_long() doesn't fail: #
# USER_FPCR(a6) = new FPCR value #
# USER_FPSR(a6) = new FPSR value #
# USER_FPIAR(a6) = new FPIAR value #
# #
# ALGORITHM *********************************************************** #
# Decode the instruction type by looking at the extension word #
# in order to see how many control registers to fetch from memory. #
# Fetch them using _imem_read_long(). If this fetch fails, exit through #
# the special access error exit handler iea_iacc(). #
# #
# Instruction word decoding: #
# #
# fmovem.l #<data>, {FPIAR&|FPCR&|FPSR} #
# #
# WORD1 WORD2 #
# 1111 0010 00 111100 100$ $$00 0000 0000 #
# #
# $$$ (100): FPCR #
# (010): FPSR #
# (001): FPIAR #
# (000): FPIAR #
# #
#########################################################################
global fmovm_ctrl
fmovm_ctrl:
mov.b EXC_EXTWORD(%a6),%d0 # fetch reg select bits
cmpi.b %d0,&0x9c # fpcr & fpsr & fpiar ?
beq.w fctrl_in_7 # yes
cmpi.b %d0,&0x98 # fpcr & fpsr ?
beq.w fctrl_in_6 # yes
cmpi.b %d0,&0x94 # fpcr & fpiar ?
beq.b fctrl_in_5 # yes
# fmovem.l #<data>, fpsr/fpiar
fctrl_in_3:
mov.l EXC_EXTWPTR(%a6),%a0 # fetch instruction addr
addq.l &0x4,EXC_EXTWPTR(%a6) # incr instruction ptr
bsr.l _imem_read_long # fetch FPSR from mem
tst.l %d1 # did ifetch fail?
bne.l iea_iacc # yes
mov.l %d0,USER_FPSR(%a6) # store new FPSR to stack
mov.l EXC_EXTWPTR(%a6),%a0 # fetch instruction addr
addq.l &0x4,EXC_EXTWPTR(%a6) # incr instruction ptr
bsr.l _imem_read_long # fetch FPIAR from mem
tst.l %d1 # did ifetch fail?
bne.l iea_iacc # yes
mov.l %d0,USER_FPIAR(%a6) # store new FPIAR to stack
rts
# fmovem.l #<data>, fpcr/fpiar
fctrl_in_5:
mov.l EXC_EXTWPTR(%a6),%a0 # fetch instruction addr
addq.l &0x4,EXC_EXTWPTR(%a6) # incr instruction ptr
bsr.l _imem_read_long # fetch FPCR from mem
tst.l %d1 # did ifetch fail?
bne.l iea_iacc # yes
mov.l %d0,USER_FPCR(%a6) # store new FPCR to stack
mov.l EXC_EXTWPTR(%a6),%a0 # fetch instruction addr
addq.l &0x4,EXC_EXTWPTR(%a6) # incr instruction ptr
bsr.l _imem_read_long # fetch FPIAR from mem
tst.l %d1 # did ifetch fail?
bne.l iea_iacc # yes
mov.l %d0,USER_FPIAR(%a6) # store new FPIAR to stack
rts
# fmovem.l #<data>, fpcr/fpsr
fctrl_in_6:
mov.l EXC_EXTWPTR(%a6),%a0 # fetch instruction addr
addq.l &0x4,EXC_EXTWPTR(%a6) # incr instruction ptr
bsr.l _imem_read_long # fetch FPCR from mem
tst.l %d1 # did ifetch fail?
bne.l iea_iacc # yes
mov.l %d0,USER_FPCR(%a6) # store new FPCR to mem
mov.l EXC_EXTWPTR(%a6),%a0 # fetch instruction addr
addq.l &0x4,EXC_EXTWPTR(%a6) # incr instruction ptr
bsr.l _imem_read_long # fetch FPSR from mem
tst.l %d1 # did ifetch fail?
bne.l iea_iacc # yes
mov.l %d0,USER_FPSR(%a6) # store new FPSR to mem
rts
# fmovem.l #<data>, fpcr/fpsr/fpiar
fctrl_in_7:
mov.l EXC_EXTWPTR(%a6),%a0 # fetch instruction addr
addq.l &0x4,EXC_EXTWPTR(%a6) # incr instruction ptr
bsr.l _imem_read_long # fetch FPCR from mem
tst.l %d1 # did ifetch fail?
bne.l iea_iacc # yes
mov.l %d0,USER_FPCR(%a6) # store new FPCR to mem
mov.l EXC_EXTWPTR(%a6),%a0 # fetch instruction addr
addq.l &0x4,EXC_EXTWPTR(%a6) # incr instruction ptr
bsr.l _imem_read_long # fetch FPSR from mem
tst.l %d1 # did ifetch fail?
bne.l iea_iacc # yes
mov.l %d0,USER_FPSR(%a6) # store new FPSR to mem
mov.l EXC_EXTWPTR(%a6),%a0 # fetch instruction addr
addq.l &0x4,EXC_EXTWPTR(%a6) # incr instruction ptr
bsr.l _imem_read_long # fetch FPIAR from mem
tst.l %d1 # did ifetch fail?
bne.l iea_iacc # yes
mov.l %d0,USER_FPIAR(%a6) # store new FPIAR to mem
rts
#########################################################################
# XDEF **************************************************************** #
# _dcalc_ea(): calc correct <ea> from <ea> stacked on exception #
# #
# XREF **************************************************************** #
# inc_areg() - increment an address register #
# dec_areg() - decrement an address register #
# #
# INPUT *************************************************************** #
# d0 = number of bytes to adjust <ea> by #
# #
# OUTPUT ************************************************************** #
# None #
# #
# ALGORITHM *********************************************************** #
# "Dummy" CALCulate Effective Address: #
# The stacked <ea> for FP unimplemented instructions and opclass #
# two packed instructions is correct with the exception of... #
# #
# 1) -(An) : The register is not updated regardless of size. #
# Also, for extended precision and packed, the #
# stacked <ea> value is 8 bytes too big #
# 2) (An)+ : The register is not updated. #
# 3) #<data> : The upper longword of the immediate operand is #
# stacked b,w,l and s sizes are completely stacked. #
# d,x, and p are not. #
# #
#########################################################################
global _dcalc_ea
_dcalc_ea:
mov.l %d0, %a0 # move # bytes to %a0
mov.b 1+EXC_OPWORD(%a6), %d0 # fetch opcode word
mov.l %d0, %d1 # make a copy
andi.w &0x38, %d0 # extract mode field
andi.l &0x7, %d1 # extract reg field
cmpi.b %d0,&0x18 # is mode (An)+ ?
beq.b dcea_pi # yes
cmpi.b %d0,&0x20 # is mode -(An) ?
beq.b dcea_pd # yes
or.w %d1,%d0 # concat mode,reg
cmpi.b %d0,&0x3c # is mode #<data>?
beq.b dcea_imm # yes
mov.l EXC_EA(%a6),%a0 # return <ea>
rts
# need to set immediate data flag here since we'll need to do
# an imem_read to fetch this later.
dcea_imm:
mov.b &immed_flg,SPCOND_FLG(%a6)
lea ([USER_FPIAR,%a6],0x4),%a0 # no; return <ea>
rts
# here, the <ea> is stacked correctly. however, we must update the
# address register...
dcea_pi:
mov.l %a0,%d0 # pass amt to inc by
bsr.l inc_areg # inc addr register
mov.l EXC_EA(%a6),%a0 # stacked <ea> is correct
rts
# the <ea> is stacked correctly for all but extended and packed which
# the <ea>s are 8 bytes too large.
# it would make no sense to have a pre-decrement to a7 in supervisor
# mode so we don't even worry about this tricky case here : )
dcea_pd:
mov.l %a0,%d0 # pass amt to dec by
bsr.l dec_areg # dec addr register
mov.l EXC_EA(%a6),%a0 # stacked <ea> is correct
cmpi.b %d0,&0xc # is opsize ext or packed?
beq.b dcea_pd2 # yes
rts
dcea_pd2:
sub.l &0x8,%a0 # correct <ea>
mov.l %a0,EXC_EA(%a6) # put correct <ea> on stack
rts
#########################################################################
# XDEF **************************************************************** #
# _calc_ea_fout(): calculate correct stacked <ea> for extended #
# and packed data opclass 3 operations. #
# #
# XREF **************************************************************** #
# None #
# #
# INPUT *************************************************************** #
# None #
# #
# OUTPUT ************************************************************** #
# a0 = return correct effective address #
# #
# ALGORITHM *********************************************************** #
# For opclass 3 extended and packed data operations, the <ea> #
# stacked for the exception is incorrect for -(an) and (an)+ addressing #
# modes. Also, while we're at it, the index register itself must get #
# updated. #
# So, for -(an), we must subtract 8 off of the stacked <ea> value #
# and return that value as the correct <ea> and store that value in An. #
# For (an)+, the stacked <ea> is correct but we must adjust An by +12. #
# #
#########################################################################
# This calc_ea is currently used to retrieve the correct <ea>
# for fmove outs of type extended and packed.
global _calc_ea_fout
_calc_ea_fout:
mov.b 1+EXC_OPWORD(%a6),%d0 # fetch opcode word
mov.l %d0,%d1 # make a copy
andi.w &0x38,%d0 # extract mode field
andi.l &0x7,%d1 # extract reg field
cmpi.b %d0,&0x18 # is mode (An)+ ?
beq.b ceaf_pi # yes
cmpi.b %d0,&0x20 # is mode -(An) ?
beq.w ceaf_pd # yes
mov.l EXC_EA(%a6),%a0 # stacked <ea> is correct
rts
# (An)+ : extended and packed fmove out
# : stacked <ea> is correct
# : "An" not updated
ceaf_pi:
mov.w (tbl_ceaf_pi.b,%pc,%d1.w*2),%d1
mov.l EXC_EA(%a6),%a0
jmp (tbl_ceaf_pi.b,%pc,%d1.w*1)
swbeg &0x8
tbl_ceaf_pi:
short ceaf_pi0 - tbl_ceaf_pi
short ceaf_pi1 - tbl_ceaf_pi
short ceaf_pi2 - tbl_ceaf_pi
short ceaf_pi3 - tbl_ceaf_pi
short ceaf_pi4 - tbl_ceaf_pi
short ceaf_pi5 - tbl_ceaf_pi
short ceaf_pi6 - tbl_ceaf_pi
short ceaf_pi7 - tbl_ceaf_pi
ceaf_pi0:
addi.l &0xc,EXC_DREGS+0x8(%a6)
rts
ceaf_pi1:
addi.l &0xc,EXC_DREGS+0xc(%a6)
rts
ceaf_pi2:
add.l &0xc,%a2
rts
ceaf_pi3:
add.l &0xc,%a3
rts
ceaf_pi4:
add.l &0xc,%a4
rts
ceaf_pi5:
add.l &0xc,%a5
rts
ceaf_pi6:
addi.l &0xc,EXC_A6(%a6)
rts
ceaf_pi7:
mov.b &mia7_flg,SPCOND_FLG(%a6)
addi.l &0xc,EXC_A7(%a6)
rts
# -(An) : extended and packed fmove out
# : stacked <ea> = actual <ea> + 8
# : "An" not updated
ceaf_pd:
mov.w (tbl_ceaf_pd.b,%pc,%d1.w*2),%d1
mov.l EXC_EA(%a6),%a0
sub.l &0x8,%a0
sub.l &0x8,EXC_EA(%a6)
jmp (tbl_ceaf_pd.b,%pc,%d1.w*1)
swbeg &0x8
tbl_ceaf_pd:
short ceaf_pd0 - tbl_ceaf_pd
short ceaf_pd1 - tbl_ceaf_pd
short ceaf_pd2 - tbl_ceaf_pd
short ceaf_pd3 - tbl_ceaf_pd
short ceaf_pd4 - tbl_ceaf_pd
short ceaf_pd5 - tbl_ceaf_pd
short ceaf_pd6 - tbl_ceaf_pd
short ceaf_pd7 - tbl_ceaf_pd
ceaf_pd0:
mov.l %a0,EXC_DREGS+0x8(%a6)
rts
ceaf_pd1:
mov.l %a0,EXC_DREGS+0xc(%a6)
rts
ceaf_pd2:
mov.l %a0,%a2
rts
ceaf_pd3:
mov.l %a0,%a3
rts
ceaf_pd4:
mov.l %a0,%a4
rts
ceaf_pd5:
mov.l %a0,%a5
rts
ceaf_pd6:
mov.l %a0,EXC_A6(%a6)
rts
ceaf_pd7:
mov.l %a0,EXC_A7(%a6)
mov.b &mda7_flg,SPCOND_FLG(%a6)
rts
#########################################################################
# XDEF **************************************************************** #
# _load_fop(): load operand for unimplemented FP exception #
# #
# XREF **************************************************************** #
# set_tag_x() - determine ext prec optype tag #
# set_tag_s() - determine sgl prec optype tag #
# set_tag_d() - determine dbl prec optype tag #
# unnorm_fix() - convert normalized number to denorm or zero #
# norm() - normalize a denormalized number #
# get_packed() - fetch a packed operand from memory #
# _dcalc_ea() - calculate <ea>, fixing An in process #
# #
# _imem_read_{word,long}() - read from instruction memory #
# _dmem_read() - read from data memory #
# _dmem_read_{byte,word,long}() - read from data memory #
# #
# facc_in_{b,w,l,d,x}() - mem read failed; special exit point #
# #
# INPUT *************************************************************** #
# None #
# #
# OUTPUT ************************************************************** #
# If memory access doesn't fail: #
# FP_SRC(a6) = source operand in extended precision #
# FP_DST(a6) = destination operand in extended precision #
# #
# ALGORITHM *********************************************************** #
# This is called from the Unimplemented FP exception handler in #
# order to load the source and maybe destination operand into #
# FP_SRC(a6) and FP_DST(a6). If the instruction was opclass zero, load #
# the source and destination from the FP register file. Set the optype #
# tags for both if dyadic, one for monadic. If a number is an UNNORM, #
# convert it to a DENORM or a ZERO. #
# If the instruction is opclass two (memory->reg), then fetch #
# the destination from the register file and the source operand from #
# memory. Tag and fix both as above w/ opclass zero instructions. #
# If the source operand is byte,word,long, or single, it may be #
# in the data register file. If it's actually out in memory, use one of #
# the mem_read() routines to fetch it. If the mem_read() access returns #
# a failing value, exit through the special facc_in() routine which #
# will create an access error exception frame from the current exception #
# frame. #
# Immediate data and regular data accesses are separated because #
# if an immediate data access fails, the resulting fault status #
# longword stacked for the access error exception must have the #
# instruction bit set. #
# #
#########################################################################
global _load_fop
_load_fop:
# 15 13 12 10 9 7 6 0
# / \ / \ / \ / \
# ---------------------------------
# | opclass | RX | RY | EXTENSION | (2nd word of general FP instruction)
# ---------------------------------
#
# bfextu EXC_CMDREG(%a6){&0:&3}, %d0 # extract opclass
# cmpi.b %d0, &0x2 # which class is it? ('000,'010,'011)
# beq.w op010 # handle <ea> -> fpn
# bgt.w op011 # handle fpn -> <ea>
# we're not using op011 for now...
btst &0x6,EXC_CMDREG(%a6)
bne.b op010
############################
# OPCLASS '000: reg -> reg #
############################
op000:
mov.b 1+EXC_CMDREG(%a6),%d0 # fetch extension word lo
btst &0x5,%d0 # testing extension bits
beq.b op000_src # (bit 5 == 0) => monadic
btst &0x4,%d0 # (bit 5 == 1)
beq.b op000_dst # (bit 4 == 0) => dyadic
and.w &0x007f,%d0 # extract extension bits {6:0}
cmpi.w %d0,&0x0038 # is it an fcmp (dyadic) ?
bne.b op000_src # it's an fcmp
op000_dst:
bfextu EXC_CMDREG(%a6){&6:&3}, %d0 # extract dst field
bsr.l load_fpn2 # fetch dst fpreg into FP_DST
bsr.l set_tag_x # get dst optype tag
cmpi.b %d0, &UNNORM # is dst fpreg an UNNORM?
beq.b op000_dst_unnorm # yes
op000_dst_cont:
mov.b %d0, DTAG(%a6) # store the dst optype tag
op000_src:
bfextu EXC_CMDREG(%a6){&3:&3}, %d0 # extract src field
bsr.l load_fpn1 # fetch src fpreg into FP_SRC
bsr.l set_tag_x # get src optype tag
cmpi.b %d0, &UNNORM # is src fpreg an UNNORM?
beq.b op000_src_unnorm # yes
op000_src_cont:
mov.b %d0, STAG(%a6) # store the src optype tag
rts
op000_dst_unnorm:
bsr.l unnorm_fix # fix the dst UNNORM
bra.b op000_dst_cont
op000_src_unnorm:
bsr.l unnorm_fix # fix the src UNNORM
bra.b op000_src_cont
#############################
# OPCLASS '010: <ea> -> reg #
#############################
op010:
mov.w EXC_CMDREG(%a6),%d0 # fetch extension word
btst &0x5,%d0 # testing extension bits
beq.b op010_src # (bit 5 == 0) => monadic
btst &0x4,%d0 # (bit 5 == 1)
beq.b op010_dst # (bit 4 == 0) => dyadic
and.w &0x007f,%d0 # extract extension bits {6:0}
cmpi.w %d0,&0x0038 # is it an fcmp (dyadic) ?
bne.b op010_src # it's an fcmp
op010_dst:
bfextu EXC_CMDREG(%a6){&6:&3}, %d0 # extract dst field
bsr.l load_fpn2 # fetch dst fpreg ptr
bsr.l set_tag_x # get dst type tag
cmpi.b %d0, &UNNORM # is dst fpreg an UNNORM?
beq.b op010_dst_unnorm # yes
op010_dst_cont:
mov.b %d0, DTAG(%a6) # store the dst optype tag
op010_src:
bfextu EXC_CMDREG(%a6){&3:&3}, %d0 # extract src type field
bfextu EXC_OPWORD(%a6){&10:&3}, %d1 # extract <ea> mode field
bne.w fetch_from_mem # src op is in memory
op010_dreg:
clr.b STAG(%a6) # either NORM or ZERO
bfextu EXC_OPWORD(%a6){&13:&3}, %d1 # extract src reg field
mov.w (tbl_op010_dreg.b,%pc,%d0.w*2), %d0 # jmp based on optype
jmp (tbl_op010_dreg.b,%pc,%d0.w*1) # fetch src from dreg
op010_dst_unnorm:
bsr.l unnorm_fix # fix the dst UNNORM
bra.b op010_dst_cont
swbeg &0x8
tbl_op010_dreg:
short opd_long - tbl_op010_dreg
short opd_sgl - tbl_op010_dreg
short tbl_op010_dreg - tbl_op010_dreg
short tbl_op010_dreg - tbl_op010_dreg
short opd_word - tbl_op010_dreg
short tbl_op010_dreg - tbl_op010_dreg
short opd_byte - tbl_op010_dreg
short tbl_op010_dreg - tbl_op010_dreg
#
# LONG: can be either NORM or ZERO...
#
opd_long:
bsr.l fetch_dreg # fetch long in d0
fmov.l %d0, %fp0 # load a long
fmovm.x &0x80, FP_SRC(%a6) # return src op in FP_SRC
fbeq.w opd_long_zero # long is a ZERO
rts
opd_long_zero:
mov.b &ZERO, STAG(%a6) # set ZERO optype flag
rts
#
# WORD: can be either NORM or ZERO...
#
opd_word:
bsr.l fetch_dreg # fetch word in d0
fmov.w %d0, %fp0 # load a word
fmovm.x &0x80, FP_SRC(%a6) # return src op in FP_SRC
fbeq.w opd_word_zero # WORD is a ZERO
rts
opd_word_zero:
mov.b &ZERO, STAG(%a6) # set ZERO optype flag
rts
#
# BYTE: can be either NORM or ZERO...
#
opd_byte:
bsr.l fetch_dreg # fetch word in d0
fmov.b %d0, %fp0 # load a byte
fmovm.x &0x80, FP_SRC(%a6) # return src op in FP_SRC
fbeq.w opd_byte_zero # byte is a ZERO
rts
opd_byte_zero:
mov.b &ZERO, STAG(%a6) # set ZERO optype flag
rts
#
# SGL: can be either NORM, DENORM, ZERO, INF, QNAN or SNAN but not UNNORM
#
# separate SNANs and DENORMs so they can be loaded w/ special care.
# all others can simply be moved "in" using fmove.
#
opd_sgl:
bsr.l fetch_dreg # fetch sgl in d0
mov.l %d0,L_SCR1(%a6)
lea L_SCR1(%a6), %a0 # pass: ptr to the sgl
bsr.l set_tag_s # determine sgl type
mov.b %d0, STAG(%a6) # save the src tag
cmpi.b %d0, &SNAN # is it an SNAN?
beq.w get_sgl_snan # yes
cmpi.b %d0, &DENORM # is it a DENORM?
beq.w get_sgl_denorm # yes
fmov.s (%a0), %fp0 # no, so can load it regular
fmovm.x &0x80, FP_SRC(%a6) # return src op in FP_SRC
rts
##############################################################################
#########################################################################
# fetch_from_mem(): #
# - src is out in memory. must: #
# (1) calc ea - must read AFTER you know the src type since #
# if the ea is -() or ()+, need to know # of bytes. #
# (2) read it in from either user or supervisor space #
# (3) if (b || w || l) then simply read in #
# if (s || d || x) then check for SNAN,UNNORM,DENORM #
# if (packed) then punt for now #
# INPUT: #
# %d0 : src type field #
#########################################################################
fetch_from_mem:
clr.b STAG(%a6) # either NORM or ZERO
mov.w (tbl_fp_type.b,%pc,%d0.w*2), %d0 # index by src type field
jmp (tbl_fp_type.b,%pc,%d0.w*1)
swbeg &0x8
tbl_fp_type:
short load_long - tbl_fp_type
short load_sgl - tbl_fp_type
short load_ext - tbl_fp_type
short load_packed - tbl_fp_type
short load_word - tbl_fp_type
short load_dbl - tbl_fp_type
short load_byte - tbl_fp_type
short tbl_fp_type - tbl_fp_type
#########################################
# load a LONG into %fp0: #
# -number can't fault #
# (1) calc ea #
# (2) read 4 bytes into L_SCR1 #
# (3) fmov.l into %fp0 #
#########################################
load_long:
movq.l &0x4, %d0 # pass: 4 (bytes)
bsr.l _dcalc_ea # calc <ea>; <ea> in %a0
cmpi.b SPCOND_FLG(%a6),&immed_flg
beq.b load_long_immed
bsr.l _dmem_read_long # fetch src operand from memory
tst.l %d1 # did dfetch fail?
bne.l facc_in_l # yes
load_long_cont:
fmov.l %d0, %fp0 # read into %fp0;convert to xprec
fmovm.x &0x80, FP_SRC(%a6) # return src op in FP_SRC
fbeq.w load_long_zero # src op is a ZERO
rts
load_long_zero:
mov.b &ZERO, STAG(%a6) # set optype tag to ZERO
rts
load_long_immed:
bsr.l _imem_read_long # fetch src operand immed data
tst.l %d1 # did ifetch fail?
bne.l funimp_iacc # yes
bra.b load_long_cont
#########################################
# load a WORD into %fp0: #
# -number can't fault #
# (1) calc ea #
# (2) read 2 bytes into L_SCR1 #
# (3) fmov.w into %fp0 #
#########################################
load_word:
movq.l &0x2, %d0 # pass: 2 (bytes)
bsr.l _dcalc_ea # calc <ea>; <ea> in %a0
cmpi.b SPCOND_FLG(%a6),&immed_flg
beq.b load_word_immed
bsr.l _dmem_read_word # fetch src operand from memory
tst.l %d1 # did dfetch fail?
bne.l facc_in_w # yes
load_word_cont:
fmov.w %d0, %fp0 # read into %fp0;convert to xprec
fmovm.x &0x80, FP_SRC(%a6) # return src op in FP_SRC
fbeq.w load_word_zero # src op is a ZERO
rts
load_word_zero:
mov.b &ZERO, STAG(%a6) # set optype tag to ZERO
rts
load_word_immed:
bsr.l _imem_read_word # fetch src operand immed data
tst.l %d1 # did ifetch fail?
bne.l funimp_iacc # yes
bra.b load_word_cont
#########################################
# load a BYTE into %fp0: #
# -number can't fault #
# (1) calc ea #
# (2) read 1 byte into L_SCR1 #
# (3) fmov.b into %fp0 #
#########################################
load_byte:
movq.l &0x1, %d0 # pass: 1 (byte)
bsr.l _dcalc_ea # calc <ea>; <ea> in %a0
cmpi.b SPCOND_FLG(%a6),&immed_flg
beq.b load_byte_immed
bsr.l _dmem_read_byte # fetch src operand from memory
tst.l %d1 # did dfetch fail?
bne.l facc_in_b # yes
load_byte_cont:
fmov.b %d0, %fp0 # read into %fp0;convert to xprec
fmovm.x &0x80, FP_SRC(%a6) # return src op in FP_SRC
fbeq.w load_byte_zero # src op is a ZERO
rts
load_byte_zero:
mov.b &ZERO, STAG(%a6) # set optype tag to ZERO
rts
load_byte_immed:
bsr.l _imem_read_word # fetch src operand immed data
tst.l %d1 # did ifetch fail?
bne.l funimp_iacc # yes
bra.b load_byte_cont
#########################################
# load a SGL into %fp0: #
# -number can't fault #
# (1) calc ea #
# (2) read 4 bytes into L_SCR1 #
# (3) fmov.s into %fp0 #
#########################################
load_sgl:
movq.l &0x4, %d0 # pass: 4 (bytes)
bsr.l _dcalc_ea # calc <ea>; <ea> in %a0
cmpi.b SPCOND_FLG(%a6),&immed_flg
beq.b load_sgl_immed
bsr.l _dmem_read_long # fetch src operand from memory
mov.l %d0, L_SCR1(%a6) # store src op on stack
tst.l %d1 # did dfetch fail?
bne.l facc_in_l # yes
load_sgl_cont:
lea L_SCR1(%a6), %a0 # pass: ptr to sgl src op
bsr.l set_tag_s # determine src type tag
mov.b %d0, STAG(%a6) # save src optype tag on stack
cmpi.b %d0, &DENORM # is it a sgl DENORM?
beq.w get_sgl_denorm # yes
cmpi.b %d0, &SNAN # is it a sgl SNAN?
beq.w get_sgl_snan # yes
fmov.s L_SCR1(%a6), %fp0 # read into %fp0;convert to xprec
fmovm.x &0x80, FP_SRC(%a6) # return src op in FP_SRC
rts
load_sgl_immed:
bsr.l _imem_read_long # fetch src operand immed data
tst.l %d1 # did ifetch fail?
bne.l funimp_iacc # yes
bra.b load_sgl_cont
# must convert sgl denorm format to an Xprec denorm fmt suitable for
# normalization...
# %a0 : points to sgl denorm
get_sgl_denorm:
clr.w FP_SRC_EX(%a6)
bfextu (%a0){&9:&23}, %d0 # fetch sgl hi(_mantissa)
lsl.l &0x8, %d0
mov.l %d0, FP_SRC_HI(%a6) # set ext hi(_mantissa)
clr.l FP_SRC_LO(%a6) # set ext lo(_mantissa)
clr.w FP_SRC_EX(%a6)
btst &0x7, (%a0) # is sgn bit set?
beq.b sgl_dnrm_norm
bset &0x7, FP_SRC_EX(%a6) # set sgn of xprec value
sgl_dnrm_norm:
lea FP_SRC(%a6), %a0
bsr.l norm # normalize number
mov.w &0x3f81, %d1 # xprec exp = 0x3f81
sub.w %d0, %d1 # exp = 0x3f81 - shft amt.
or.w %d1, FP_SRC_EX(%a6) # {sgn,exp}
mov.b &NORM, STAG(%a6) # fix src type tag
rts
# convert sgl to ext SNAN
# %a0 : points to sgl SNAN
get_sgl_snan:
mov.w &0x7fff, FP_SRC_EX(%a6) # set exp of SNAN
bfextu (%a0){&9:&23}, %d0
lsl.l &0x8, %d0 # extract and insert hi(man)
mov.l %d0, FP_SRC_HI(%a6)
clr.l FP_SRC_LO(%a6)
btst &0x7, (%a0) # see if sign of SNAN is set
beq.b no_sgl_snan_sgn
bset &0x7, FP_SRC_EX(%a6)
no_sgl_snan_sgn:
rts
#########################################
# load a DBL into %fp0: #
# -number can't fault #
# (1) calc ea #
# (2) read 8 bytes into L_SCR(1,2)#
# (3) fmov.d into %fp0 #
#########################################
load_dbl:
movq.l &0x8, %d0 # pass: 8 (bytes)
bsr.l _dcalc_ea # calc <ea>; <ea> in %a0
cmpi.b SPCOND_FLG(%a6),&immed_flg
beq.b load_dbl_immed
lea L_SCR1(%a6), %a1 # pass: ptr to input dbl tmp space
movq.l &0x8, %d0 # pass: # bytes to read
bsr.l _dmem_read # fetch src operand from memory
tst.l %d1 # did dfetch fail?
bne.l facc_in_d # yes
load_dbl_cont:
lea L_SCR1(%a6), %a0 # pass: ptr to input dbl
bsr.l set_tag_d # determine src type tag
mov.b %d0, STAG(%a6) # set src optype tag
cmpi.b %d0, &DENORM # is it a dbl DENORM?
beq.w get_dbl_denorm # yes
cmpi.b %d0, &SNAN # is it a dbl SNAN?
beq.w get_dbl_snan # yes
fmov.d L_SCR1(%a6), %fp0 # read into %fp0;convert to xprec
fmovm.x &0x80, FP_SRC(%a6) # return src op in FP_SRC
rts
load_dbl_immed:
lea L_SCR1(%a6), %a1 # pass: ptr to input dbl tmp space
movq.l &0x8, %d0 # pass: # bytes to read
bsr.l _imem_read # fetch src operand from memory
tst.l %d1 # did ifetch fail?
bne.l funimp_iacc # yes
bra.b load_dbl_cont
# must convert dbl denorm format to an Xprec denorm fmt suitable for
# normalization...
# %a0 : loc. of dbl denorm
get_dbl_denorm:
clr.w FP_SRC_EX(%a6)
bfextu (%a0){&12:&31}, %d0 # fetch hi(_mantissa)
mov.l %d0, FP_SRC_HI(%a6)
bfextu 4(%a0){&11:&21}, %d0 # fetch lo(_mantissa)
mov.l &0xb, %d1
lsl.l %d1, %d0
mov.l %d0, FP_SRC_LO(%a6)
btst &0x7, (%a0) # is sgn bit set?
beq.b dbl_dnrm_norm
bset &0x7, FP_SRC_EX(%a6) # set sgn of xprec value
dbl_dnrm_norm:
lea FP_SRC(%a6), %a0
bsr.l norm # normalize number
mov.w &0x3c01, %d1 # xprec exp = 0x3c01
sub.w %d0, %d1 # exp = 0x3c01 - shft amt.
or.w %d1, FP_SRC_EX(%a6) # {sgn,exp}
mov.b &NORM, STAG(%a6) # fix src type tag
rts
# convert dbl to ext SNAN
# %a0 : points to dbl SNAN
get_dbl_snan:
mov.w &0x7fff, FP_SRC_EX(%a6) # set exp of SNAN
bfextu (%a0){&12:&31}, %d0 # fetch hi(_mantissa)
mov.l %d0, FP_SRC_HI(%a6)
bfextu 4(%a0){&11:&21}, %d0 # fetch lo(_mantissa)
mov.l &0xb, %d1
lsl.l %d1, %d0
mov.l %d0, FP_SRC_LO(%a6)
btst &0x7, (%a0) # see if sign of SNAN is set
beq.b no_dbl_snan_sgn
bset &0x7, FP_SRC_EX(%a6)
no_dbl_snan_sgn:
rts
#################################################
# load a Xprec into %fp0: #
# -number can't fault #
# (1) calc ea #
# (2) read 12 bytes into L_SCR(1,2) #
# (3) fmov.x into %fp0 #
#################################################
load_ext:
mov.l &0xc, %d0 # pass: 12 (bytes)
bsr.l _dcalc_ea # calc <ea>
lea FP_SRC(%a6), %a1 # pass: ptr to input ext tmp space
mov.l &0xc, %d0 # pass: # of bytes to read
bsr.l _dmem_read # fetch src operand from memory
tst.l %d1 # did dfetch fail?
bne.l facc_in_x # yes
lea FP_SRC(%a6), %a0 # pass: ptr to src op
bsr.l set_tag_x # determine src type tag
cmpi.b %d0, &UNNORM # is the src op an UNNORM?
beq.b load_ext_unnorm # yes
mov.b %d0, STAG(%a6) # store the src optype tag
rts
load_ext_unnorm:
bsr.l unnorm_fix # fix the src UNNORM
mov.b %d0, STAG(%a6) # store the src optype tag
rts
#################################################
# load a packed into %fp0: #
# -number can't fault #
# (1) calc ea #
# (2) read 12 bytes into L_SCR(1,2,3) #
# (3) fmov.x into %fp0 #
#################################################
load_packed:
bsr.l get_packed
lea FP_SRC(%a6),%a0 # pass ptr to src op
bsr.l set_tag_x # determine src type tag
cmpi.b %d0,&UNNORM # is the src op an UNNORM ZERO?
beq.b load_packed_unnorm # yes
mov.b %d0,STAG(%a6) # store the src optype tag
rts
load_packed_unnorm:
bsr.l unnorm_fix # fix the UNNORM ZERO
mov.b %d0,STAG(%a6) # store the src optype tag
rts
#########################################################################
# XDEF **************************************************************** #
# fout(): move from fp register to memory or data register #
# #
# XREF **************************************************************** #
# _round() - needed to create EXOP for sgl/dbl precision #
# norm() - needed to create EXOP for extended precision #
# ovf_res() - create default overflow result for sgl/dbl precision#
# unf_res() - create default underflow result for sgl/dbl prec. #
# dst_dbl() - create rounded dbl precision result. #
# dst_sgl() - create rounded sgl precision result. #
# fetch_dreg() - fetch dynamic k-factor reg for packed. #
# bindec() - convert FP binary number to packed number. #
# _mem_write() - write data to memory. #
# _mem_write2() - write data to memory unless supv mode -(a7) exc.#
# _dmem_write_{byte,word,long}() - write data to memory. #
# store_dreg_{b,w,l}() - store data to data register file. #
# facc_out_{b,w,l,d,x}() - data access error occurred. #
# #
# INPUT *************************************************************** #
# a0 = pointer to extended precision source operand #
# d0 = round prec,mode #
# #
# OUTPUT ************************************************************** #
# fp0 : intermediate underflow or overflow result if #
# OVFL/UNFL occurred for a sgl or dbl operand #
# #
# ALGORITHM *********************************************************** #
# This routine is accessed by many handlers that need to do an #
# opclass three move of an operand out to memory. #
# Decode an fmove out (opclass 3) instruction to determine if #
# it's b,w,l,s,d,x, or p in size. b,w,l can be stored to either a data #
# register or memory. The algorithm uses a standard "fmove" to create #
# the rounded result. Also, since exceptions are disabled, this also #
# create the correct OPERR default result if appropriate. #
# For sgl or dbl precision, overflow or underflow can occur. If #
# either occurs and is enabled, the EXOP. #
# For extended precision, the stacked <ea> must be fixed along #
# w/ the address index register as appropriate w/ _calc_ea_fout(). If #
# the source is a denorm and if underflow is enabled, an EXOP must be #
# created. #
# For packed, the k-factor must be fetched from the instruction #
# word or a data register. The <ea> must be fixed as w/ extended #
# precision. Then, bindec() is called to create the appropriate #
# packed result. #
# If at any time an access error is flagged by one of the move- #
# to-memory routines, then a special exit must be made so that the #
# access error can be handled properly. #
# #
#########################################################################
global fout
fout:
bfextu EXC_CMDREG(%a6){&3:&3},%d1 # extract dst fmt
mov.w (tbl_fout.b,%pc,%d1.w*2),%a1 # use as index
jmp (tbl_fout.b,%pc,%a1) # jump to routine
swbeg &0x8
tbl_fout:
short fout_long - tbl_fout
short fout_sgl - tbl_fout
short fout_ext - tbl_fout
short fout_pack - tbl_fout
short fout_word - tbl_fout
short fout_dbl - tbl_fout
short fout_byte - tbl_fout
short fout_pack - tbl_fout
#################################################################
# fmove.b out ###################################################
#################################################################
# Only "Unimplemented Data Type" exceptions enter here. The operand
# is either a DENORM or a NORM.
fout_byte:
tst.b STAG(%a6) # is operand normalized?
bne.b fout_byte_denorm # no
fmovm.x SRC(%a0),&0x80 # load value
fout_byte_norm:
fmov.l %d0,%fpcr # insert rnd prec,mode
fmov.b %fp0,%d0 # exec move out w/ correct rnd mode
fmov.l &0x0,%fpcr # clear FPCR
fmov.l %fpsr,%d1 # fetch FPSR
or.w %d1,2+USER_FPSR(%a6) # save new exc,accrued bits
mov.b 1+EXC_OPWORD(%a6),%d1 # extract dst mode
andi.b &0x38,%d1 # is mode == 0? (Dreg dst)
beq.b fout_byte_dn # must save to integer regfile
mov.l EXC_EA(%a6),%a0 # stacked <ea> is correct
bsr.l _dmem_write_byte # write byte
tst.l %d1 # did dstore fail?
bne.l facc_out_b # yes
rts
fout_byte_dn:
mov.b 1+EXC_OPWORD(%a6),%d1 # extract Dn
andi.w &0x7,%d1
bsr.l store_dreg_b
rts
fout_byte_denorm:
mov.l SRC_EX(%a0),%d1
andi.l &0x80000000,%d1 # keep DENORM sign
ori.l &0x00800000,%d1 # make smallest sgl
fmov.s %d1,%fp0
bra.b fout_byte_norm
#################################################################
# fmove.w out ###################################################
#################################################################
# Only "Unimplemented Data Type" exceptions enter here. The operand
# is either a DENORM or a NORM.
fout_word:
tst.b STAG(%a6) # is operand normalized?
bne.b fout_word_denorm # no
fmovm.x SRC(%a0),&0x80 # load value
fout_word_norm:
fmov.l %d0,%fpcr # insert rnd prec:mode
fmov.w %fp0,%d0 # exec move out w/ correct rnd mode
fmov.l &0x0,%fpcr # clear FPCR
fmov.l %fpsr,%d1 # fetch FPSR
or.w %d1,2+USER_FPSR(%a6) # save new exc,accrued bits
mov.b 1+EXC_OPWORD(%a6),%d1 # extract dst mode
andi.b &0x38,%d1 # is mode == 0? (Dreg dst)
beq.b fout_word_dn # must save to integer regfile
mov.l EXC_EA(%a6),%a0 # stacked <ea> is correct
bsr.l _dmem_write_word # write word
tst.l %d1 # did dstore fail?
bne.l facc_out_w # yes
rts
fout_word_dn:
mov.b 1+EXC_OPWORD(%a6),%d1 # extract Dn
andi.w &0x7,%d1
bsr.l store_dreg_w
rts
fout_word_denorm:
mov.l SRC_EX(%a0),%d1
andi.l &0x80000000,%d1 # keep DENORM sign
ori.l &0x00800000,%d1 # make smallest sgl
fmov.s %d1,%fp0
bra.b fout_word_norm
#################################################################
# fmove.l out ###################################################
#################################################################
# Only "Unimplemented Data Type" exceptions enter here. The operand
# is either a DENORM or a NORM.
fout_long:
tst.b STAG(%a6) # is operand normalized?
bne.b fout_long_denorm # no
fmovm.x SRC(%a0),&0x80 # load value
fout_long_norm:
fmov.l %d0,%fpcr # insert rnd prec:mode
fmov.l %fp0,%d0 # exec move out w/ correct rnd mode
fmov.l &0x0,%fpcr # clear FPCR
fmov.l %fpsr,%d1 # fetch FPSR
or.w %d1,2+USER_FPSR(%a6) # save new exc,accrued bits
fout_long_write:
mov.b 1+EXC_OPWORD(%a6),%d1 # extract dst mode
andi.b &0x38,%d1 # is mode == 0? (Dreg dst)
beq.b fout_long_dn # must save to integer regfile
mov.l EXC_EA(%a6),%a0 # stacked <ea> is correct
bsr.l _dmem_write_long # write long
tst.l %d1 # did dstore fail?
bne.l facc_out_l # yes
rts
fout_long_dn:
mov.b 1+EXC_OPWORD(%a6),%d1 # extract Dn
andi.w &0x7,%d1
bsr.l store_dreg_l
rts
fout_long_denorm:
mov.l SRC_EX(%a0),%d1
andi.l &0x80000000,%d1 # keep DENORM sign
ori.l &0x00800000,%d1 # make smallest sgl
fmov.s %d1,%fp0
bra.b fout_long_norm
#################################################################
# fmove.x out ###################################################
#################################################################
# Only "Unimplemented Data Type" exceptions enter here. The operand
# is either a DENORM or a NORM.
# The DENORM causes an Underflow exception.
fout_ext:
# we copy the extended precision result to FP_SCR0 so that the reserved
# 16-bit field gets zeroed. we do this since we promise not to disturb
# what's at SRC(a0).
mov.w SRC_EX(%a0),FP_SCR0_EX(%a6)
clr.w 2+FP_SCR0_EX(%a6) # clear reserved field
mov.l SRC_HI(%a0),FP_SCR0_HI(%a6)
mov.l SRC_LO(%a0),FP_SCR0_LO(%a6)
fmovm.x SRC(%a0),&0x80 # return result
bsr.l _calc_ea_fout # fix stacked <ea>
mov.l %a0,%a1 # pass: dst addr
lea FP_SCR0(%a6),%a0 # pass: src addr
mov.l &0xc,%d0 # pass: opsize is 12 bytes
# we must not yet write the extended precision data to the stack
# in the pre-decrement case from supervisor mode or else we'll corrupt
# the stack frame. so, leave it in FP_SRC for now and deal with it later...
cmpi.b SPCOND_FLG(%a6),&mda7_flg
beq.b fout_ext_a7
bsr.l _dmem_write # write ext prec number to memory
tst.l %d1 # did dstore fail?
bne.w fout_ext_err # yes
tst.b STAG(%a6) # is operand normalized?
bne.b fout_ext_denorm # no
rts
# the number is a DENORM. must set the underflow exception bit
fout_ext_denorm:
bset &unfl_bit,FPSR_EXCEPT(%a6) # set underflow exc bit
mov.b FPCR_ENABLE(%a6),%d0
andi.b &0x0a,%d0 # is UNFL or INEX enabled?
bne.b fout_ext_exc # yes
rts
# we don't want to do the write if the exception occurred in supervisor mode
# so _mem_write2() handles this for us.
fout_ext_a7:
bsr.l _mem_write2 # write ext prec number to memory
tst.l %d1 # did dstore fail?
bne.w fout_ext_err # yes
tst.b STAG(%a6) # is operand normalized?
bne.b fout_ext_denorm # no
rts
fout_ext_exc:
lea FP_SCR0(%a6),%a0
bsr.l norm # normalize the mantissa
neg.w %d0 # new exp = -(shft amt)
andi.w &0x7fff,%d0
andi.w &0x8000,FP_SCR0_EX(%a6) # keep only old sign
or.w %d0,FP_SCR0_EX(%a6) # insert new exponent
fmovm.x FP_SCR0(%a6),&0x40 # return EXOP in fp1
rts
fout_ext_err:
mov.l EXC_A6(%a6),(%a6) # fix stacked a6
bra.l facc_out_x
#########################################################################
# fmove.s out ###########################################################
#########################################################################
fout_sgl:
andi.b &0x30,%d0 # clear rnd prec
ori.b &s_mode*0x10,%d0 # insert sgl prec
mov.l %d0,L_SCR3(%a6) # save rnd prec,mode on stack
#
# operand is a normalized number. first, we check to see if the move out
# would cause either an underflow or overflow. these cases are handled
# separately. otherwise, set the FPCR to the proper rounding mode and
# execute the move.
#
mov.w SRC_EX(%a0),%d0 # extract exponent
andi.w &0x7fff,%d0 # strip sign
cmpi.w %d0,&SGL_HI # will operand overflow?
bgt.w fout_sgl_ovfl # yes; go handle OVFL
beq.w fout_sgl_may_ovfl # maybe; go handle possible OVFL
cmpi.w %d0,&SGL_LO # will operand underflow?
blt.w fout_sgl_unfl # yes; go handle underflow
#
# NORMs(in range) can be stored out by a simple "fmov.s"
# Unnormalized inputs can come through this point.
#
fout_sgl_exg:
fmovm.x SRC(%a0),&0x80 # fetch fop from stack
fmov.l L_SCR3(%a6),%fpcr # set FPCR
fmov.l &0x0,%fpsr # clear FPSR
fmov.s %fp0,%d0 # store does convert and round
fmov.l &0x0,%fpcr # clear FPCR
fmov.l %fpsr,%d1 # save FPSR
or.w %d1,2+USER_FPSR(%a6) # set possible inex2/ainex
fout_sgl_exg_write:
mov.b 1+EXC_OPWORD(%a6),%d1 # extract dst mode
andi.b &0x38,%d1 # is mode == 0? (Dreg dst)
beq.b fout_sgl_exg_write_dn # must save to integer regfile
mov.l EXC_EA(%a6),%a0 # stacked <ea> is correct
bsr.l _dmem_write_long # write long
tst.l %d1 # did dstore fail?
bne.l facc_out_l # yes
rts
fout_sgl_exg_write_dn:
mov.b 1+EXC_OPWORD(%a6),%d1 # extract Dn
andi.w &0x7,%d1
bsr.l store_dreg_l
rts
#
# here, we know that the operand would UNFL if moved out to single prec,
# so, denorm and round and then use generic store single routine to
# write the value to memory.
#
fout_sgl_unfl:
bset &unfl_bit,FPSR_EXCEPT(%a6) # set UNFL
mov.w SRC_EX(%a0),FP_SCR0_EX(%a6)
mov.l SRC_HI(%a0),FP_SCR0_HI(%a6)
mov.l SRC_LO(%a0),FP_SCR0_LO(%a6)
mov.l %a0,-(%sp)
clr.l %d0 # pass: S.F. = 0
cmpi.b STAG(%a6),&DENORM # fetch src optype tag
bne.b fout_sgl_unfl_cont # let DENORMs fall through
lea FP_SCR0(%a6),%a0
bsr.l norm # normalize the DENORM
fout_sgl_unfl_cont:
lea FP_SCR0(%a6),%a0 # pass: ptr to operand
mov.l L_SCR3(%a6),%d1 # pass: rnd prec,mode
bsr.l unf_res # calc default underflow result
lea FP_SCR0(%a6),%a0 # pass: ptr to fop
bsr.l dst_sgl # convert to single prec
mov.b 1+EXC_OPWORD(%a6),%d1 # extract dst mode
andi.b &0x38,%d1 # is mode == 0? (Dreg dst)
beq.b fout_sgl_unfl_dn # must save to integer regfile
mov.l EXC_EA(%a6),%a0 # stacked <ea> is correct
bsr.l _dmem_write_long # write long
tst.l %d1 # did dstore fail?
bne.l facc_out_l # yes
bra.b fout_sgl_unfl_chkexc
fout_sgl_unfl_dn:
mov.b 1+EXC_OPWORD(%a6),%d1 # extract Dn
andi.w &0x7,%d1
bsr.l store_dreg_l
fout_sgl_unfl_chkexc:
mov.b FPCR_ENABLE(%a6),%d1
andi.b &0x0a,%d1 # is UNFL or INEX enabled?
bne.w fout_sd_exc_unfl # yes
addq.l &0x4,%sp
rts
#
# it's definitely an overflow so call ovf_res to get the correct answer
#
fout_sgl_ovfl:
tst.b 3+SRC_HI(%a0) # is result inexact?
bne.b fout_sgl_ovfl_inex2
tst.l SRC_LO(%a0) # is result inexact?
bne.b fout_sgl_ovfl_inex2
ori.w &ovfl_inx_mask,2+USER_FPSR(%a6) # set ovfl/aovfl/ainex
bra.b fout_sgl_ovfl_cont
fout_sgl_ovfl_inex2:
ori.w &ovfinx_mask,2+USER_FPSR(%a6) # set ovfl/aovfl/ainex/inex2
fout_sgl_ovfl_cont:
mov.l %a0,-(%sp)
# call ovf_res() w/ sgl prec and the correct rnd mode to create the default
# overflow result. DON'T save the returned ccodes from ovf_res() since
# fmove out doesn't alter them.
tst.b SRC_EX(%a0) # is operand negative?
smi %d1 # set if so
mov.l L_SCR3(%a6),%d0 # pass: sgl prec,rnd mode
bsr.l ovf_res # calc OVFL result
fmovm.x (%a0),&0x80 # load default overflow result
fmov.s %fp0,%d0 # store to single
mov.b 1+EXC_OPWORD(%a6),%d1 # extract dst mode
andi.b &0x38,%d1 # is mode == 0? (Dreg dst)
beq.b fout_sgl_ovfl_dn # must save to integer regfile
mov.l EXC_EA(%a6),%a0 # stacked <ea> is correct
bsr.l _dmem_write_long # write long
tst.l %d1 # did dstore fail?
bne.l facc_out_l # yes
bra.b fout_sgl_ovfl_chkexc
fout_sgl_ovfl_dn:
mov.b 1+EXC_OPWORD(%a6),%d1 # extract Dn
andi.w &0x7,%d1
bsr.l store_dreg_l
fout_sgl_ovfl_chkexc:
mov.b FPCR_ENABLE(%a6),%d1
andi.b &0x0a,%d1 # is UNFL or INEX enabled?
bne.w fout_sd_exc_ovfl # yes
addq.l &0x4,%sp
rts
#
# move out MAY overflow:
# (1) force the exp to 0x3fff
# (2) do a move w/ appropriate rnd mode
# (3) if exp still equals zero, then insert original exponent
# for the correct result.
# if exp now equals one, then it overflowed so call ovf_res.
#
fout_sgl_may_ovfl:
mov.w SRC_EX(%a0),%d1 # fetch current sign
andi.w &0x8000,%d1 # keep it,clear exp
ori.w &0x3fff,%d1 # insert exp = 0
mov.w %d1,FP_SCR0_EX(%a6) # insert scaled exp
mov.l SRC_HI(%a0),FP_SCR0_HI(%a6) # copy hi(man)
mov.l SRC_LO(%a0),FP_SCR0_LO(%a6) # copy lo(man)
fmov.l L_SCR3(%a6),%fpcr # set FPCR
fmov.x FP_SCR0(%a6),%fp0 # force fop to be rounded
fmov.l &0x0,%fpcr # clear FPCR
fabs.x %fp0 # need absolute value
fcmp.b %fp0,&0x2 # did exponent increase?
fblt.w fout_sgl_exg # no; go finish NORM
bra.w fout_sgl_ovfl # yes; go handle overflow
################
fout_sd_exc_unfl:
mov.l (%sp)+,%a0
mov.w SRC_EX(%a0),FP_SCR0_EX(%a6)
mov.l SRC_HI(%a0),FP_SCR0_HI(%a6)
mov.l SRC_LO(%a0),FP_SCR0_LO(%a6)
cmpi.b STAG(%a6),&DENORM # was src a DENORM?
bne.b fout_sd_exc_cont # no
lea FP_SCR0(%a6),%a0
bsr.l norm
neg.l %d0
andi.w &0x7fff,%d0
bfins %d0,FP_SCR0_EX(%a6){&1:&15}
bra.b fout_sd_exc_cont
fout_sd_exc:
fout_sd_exc_ovfl:
mov.l (%sp)+,%a0 # restore a0
mov.w SRC_EX(%a0),FP_SCR0_EX(%a6)
mov.l SRC_HI(%a0),FP_SCR0_HI(%a6)
mov.l SRC_LO(%a0),FP_SCR0_LO(%a6)
fout_sd_exc_cont:
bclr &0x7,FP_SCR0_EX(%a6) # clear sign bit
sne.b 2+FP_SCR0_EX(%a6) # set internal sign bit
lea FP_SCR0(%a6),%a0 # pass: ptr to DENORM
mov.b 3+L_SCR3(%a6),%d1
lsr.b &0x4,%d1
andi.w &0x0c,%d1
swap %d1
mov.b 3+L_SCR3(%a6),%d1
lsr.b &0x4,%d1
andi.w &0x03,%d1
clr.l %d0 # pass: zero g,r,s
bsr.l _round # round the DENORM
tst.b 2+FP_SCR0_EX(%a6) # is EXOP negative?
beq.b fout_sd_exc_done # no
bset &0x7,FP_SCR0_EX(%a6) # yes
fout_sd_exc_done:
fmovm.x FP_SCR0(%a6),&0x40 # return EXOP in fp1
rts
#################################################################
# fmove.d out ###################################################
#################################################################
fout_dbl:
andi.b &0x30,%d0 # clear rnd prec
ori.b &d_mode*0x10,%d0 # insert dbl prec
mov.l %d0,L_SCR3(%a6) # save rnd prec,mode on stack
#
# operand is a normalized number. first, we check to see if the move out
# would cause either an underflow or overflow. these cases are handled
# separately. otherwise, set the FPCR to the proper rounding mode and
# execute the move.
#
mov.w SRC_EX(%a0),%d0 # extract exponent
andi.w &0x7fff,%d0 # strip sign
cmpi.w %d0,&DBL_HI # will operand overflow?
bgt.w fout_dbl_ovfl # yes; go handle OVFL
beq.w fout_dbl_may_ovfl # maybe; go handle possible OVFL
cmpi.w %d0,&DBL_LO # will operand underflow?
blt.w fout_dbl_unfl # yes; go handle underflow
#
# NORMs(in range) can be stored out by a simple "fmov.d"
# Unnormalized inputs can come through this point.
#
fout_dbl_exg:
fmovm.x SRC(%a0),&0x80 # fetch fop from stack
fmov.l L_SCR3(%a6),%fpcr # set FPCR
fmov.l &0x0,%fpsr # clear FPSR
fmov.d %fp0,L_SCR1(%a6) # store does convert and round
fmov.l &0x0,%fpcr # clear FPCR
fmov.l %fpsr,%d0 # save FPSR
or.w %d0,2+USER_FPSR(%a6) # set possible inex2/ainex
mov.l EXC_EA(%a6),%a1 # pass: dst addr
lea L_SCR1(%a6),%a0 # pass: src addr
movq.l &0x8,%d0 # pass: opsize is 8 bytes
bsr.l _dmem_write # store dbl fop to memory
tst.l %d1 # did dstore fail?
bne.l facc_out_d # yes
rts # no; so we're finished
#
# here, we know that the operand would UNFL if moved out to double prec,
# so, denorm and round and then use generic store double routine to
# write the value to memory.
#
fout_dbl_unfl:
bset &unfl_bit,FPSR_EXCEPT(%a6) # set UNFL
mov.w SRC_EX(%a0),FP_SCR0_EX(%a6)
mov.l SRC_HI(%a0),FP_SCR0_HI(%a6)
mov.l SRC_LO(%a0),FP_SCR0_LO(%a6)
mov.l %a0,-(%sp)
clr.l %d0 # pass: S.F. = 0
cmpi.b STAG(%a6),&DENORM # fetch src optype tag
bne.b fout_dbl_unfl_cont # let DENORMs fall through
lea FP_SCR0(%a6),%a0
bsr.l norm # normalize the DENORM
fout_dbl_unfl_cont:
lea FP_SCR0(%a6),%a0 # pass: ptr to operand
mov.l L_SCR3(%a6),%d1 # pass: rnd prec,mode
bsr.l unf_res # calc default underflow result
lea FP_SCR0(%a6),%a0 # pass: ptr to fop
bsr.l dst_dbl # convert to single prec
mov.l %d0,L_SCR1(%a6)
mov.l %d1,L_SCR2(%a6)
mov.l EXC_EA(%a6),%a1 # pass: dst addr
lea L_SCR1(%a6),%a0 # pass: src addr
movq.l &0x8,%d0 # pass: opsize is 8 bytes
bsr.l _dmem_write # store dbl fop to memory
tst.l %d1 # did dstore fail?
bne.l facc_out_d # yes
mov.b FPCR_ENABLE(%a6),%d1
andi.b &0x0a,%d1 # is UNFL or INEX enabled?
bne.w fout_sd_exc_unfl # yes
addq.l &0x4,%sp
rts
#
# it's definitely an overflow so call ovf_res to get the correct answer
#
fout_dbl_ovfl:
mov.w 2+SRC_LO(%a0),%d0
andi.w &0x7ff,%d0
bne.b fout_dbl_ovfl_inex2
ori.w &ovfl_inx_mask,2+USER_FPSR(%a6) # set ovfl/aovfl/ainex
bra.b fout_dbl_ovfl_cont
fout_dbl_ovfl_inex2:
ori.w &ovfinx_mask,2+USER_FPSR(%a6) # set ovfl/aovfl/ainex/inex2
fout_dbl_ovfl_cont:
mov.l %a0,-(%sp)
# call ovf_res() w/ dbl prec and the correct rnd mode to create the default
# overflow result. DON'T save the returned ccodes from ovf_res() since
# fmove out doesn't alter them.
tst.b SRC_EX(%a0) # is operand negative?
smi %d1 # set if so
mov.l L_SCR3(%a6),%d0 # pass: dbl prec,rnd mode
bsr.l ovf_res # calc OVFL result
fmovm.x (%a0),&0x80 # load default overflow result
fmov.d %fp0,L_SCR1(%a6) # store to double
mov.l EXC_EA(%a6),%a1 # pass: dst addr
lea L_SCR1(%a6),%a0 # pass: src addr
movq.l &0x8,%d0 # pass: opsize is 8 bytes
bsr.l _dmem_write # store dbl fop to memory
tst.l %d1 # did dstore fail?
bne.l facc_out_d # yes
mov.b FPCR_ENABLE(%a6),%d1
andi.b &0x0a,%d1 # is UNFL or INEX enabled?
bne.w fout_sd_exc_ovfl # yes
addq.l &0x4,%sp
rts
#
# move out MAY overflow:
# (1) force the exp to 0x3fff
# (2) do a move w/ appropriate rnd mode
# (3) if exp still equals zero, then insert original exponent
# for the correct result.
# if exp now equals one, then it overflowed so call ovf_res.
#
fout_dbl_may_ovfl:
mov.w SRC_EX(%a0),%d1 # fetch current sign
andi.w &0x8000,%d1 # keep it,clear exp
ori.w &0x3fff,%d1 # insert exp = 0
mov.w %d1,FP_SCR0_EX(%a6) # insert scaled exp
mov.l SRC_HI(%a0),FP_SCR0_HI(%a6) # copy hi(man)
mov.l SRC_LO(%a0),FP_SCR0_LO(%a6) # copy lo(man)
fmov.l L_SCR3(%a6),%fpcr # set FPCR
fmov.x FP_SCR0(%a6),%fp0 # force fop to be rounded
fmov.l &0x0,%fpcr # clear FPCR
fabs.x %fp0 # need absolute value
fcmp.b %fp0,&0x2 # did exponent increase?
fblt.w fout_dbl_exg # no; go finish NORM
bra.w fout_dbl_ovfl # yes; go handle overflow
#########################################################################
# XDEF **************************************************************** #
# dst_dbl(): create double precision value from extended prec. #
# #
# XREF **************************************************************** #
# None #
# #
# INPUT *************************************************************** #
# a0 = pointer to source operand in extended precision #
# #
# OUTPUT ************************************************************** #
# d0 = hi(double precision result) #
# d1 = lo(double precision result) #
# #
# ALGORITHM *********************************************************** #
# #
# Changes extended precision to double precision. #
# Note: no attempt is made to round the extended value to double. #
# dbl_sign = ext_sign #
# dbl_exp = ext_exp - $3fff(ext bias) + $7ff(dbl bias) #
# get rid of ext integer bit #
# dbl_mant = ext_mant{62:12} #
# #
# --------------- --------------- --------------- #
# extended -> |s| exp | |1| ms mant | | ls mant | #
# --------------- --------------- --------------- #
# 95 64 63 62 32 31 11 0 #
# | | #
# | | #
# | | #
# v v #
# --------------- --------------- #
# double -> |s|exp| mant | | mant | #
# --------------- --------------- #
# 63 51 32 31 0 #
# #
#########################################################################
dst_dbl:
clr.l %d0 # clear d0
mov.w FTEMP_EX(%a0),%d0 # get exponent
subi.w &EXT_BIAS,%d0 # subtract extended precision bias
addi.w &DBL_BIAS,%d0 # add double precision bias
tst.b FTEMP_HI(%a0) # is number a denorm?
bmi.b dst_get_dupper # no
subq.w &0x1,%d0 # yes; denorm bias = DBL_BIAS - 1
dst_get_dupper:
swap %d0 # d0 now in upper word
lsl.l &0x4,%d0 # d0 in proper place for dbl prec exp
tst.b FTEMP_EX(%a0) # test sign
bpl.b dst_get_dman # if positive, go process mantissa
bset &0x1f,%d0 # if negative, set sign
dst_get_dman:
mov.l FTEMP_HI(%a0),%d1 # get ms mantissa
bfextu %d1{&1:&20},%d1 # get upper 20 bits of ms
or.l %d1,%d0 # put these bits in ms word of double
mov.l %d0,L_SCR1(%a6) # put the new exp back on the stack
mov.l FTEMP_HI(%a0),%d1 # get ms mantissa
mov.l &21,%d0 # load shift count
lsl.l %d0,%d1 # put lower 11 bits in upper bits
mov.l %d1,L_SCR2(%a6) # build lower lword in memory
mov.l FTEMP_LO(%a0),%d1 # get ls mantissa
bfextu %d1{&0:&21},%d0 # get ls 21 bits of double
mov.l L_SCR2(%a6),%d1
or.l %d0,%d1 # put them in double result
mov.l L_SCR1(%a6),%d0
rts
#########################################################################
# XDEF **************************************************************** #
# dst_sgl(): create single precision value from extended prec #
# #
# XREF **************************************************************** #
# #
# INPUT *************************************************************** #
# a0 = pointer to source operand in extended precision #
# #
# OUTPUT ************************************************************** #
# d0 = single precision result #
# #
# ALGORITHM *********************************************************** #
# #
# Changes extended precision to single precision. #
# sgl_sign = ext_sign #
# sgl_exp = ext_exp - $3fff(ext bias) + $7f(sgl bias) #
# get rid of ext integer bit #
# sgl_mant = ext_mant{62:12} #
# #
# --------------- --------------- --------------- #
# extended -> |s| exp | |1| ms mant | | ls mant | #
# --------------- --------------- --------------- #
# 95 64 63 62 40 32 31 12 0 #
# | | #
# | | #
# | | #
# v v #
# --------------- #
# single -> |s|exp| mant | #
# --------------- #
# 31 22 0 #
# #
#########################################################################
dst_sgl:
clr.l %d0
mov.w FTEMP_EX(%a0),%d0 # get exponent
subi.w &EXT_BIAS,%d0 # subtract extended precision bias
addi.w &SGL_BIAS,%d0 # add single precision bias
tst.b FTEMP_HI(%a0) # is number a denorm?
bmi.b dst_get_supper # no
subq.w &0x1,%d0 # yes; denorm bias = SGL_BIAS - 1
dst_get_supper:
swap %d0 # put exp in upper word of d0
lsl.l &0x7,%d0 # shift it into single exp bits
tst.b FTEMP_EX(%a0) # test sign
bpl.b dst_get_sman # if positive, continue
bset &0x1f,%d0 # if negative, put in sign first
dst_get_sman:
mov.l FTEMP_HI(%a0),%d1 # get ms mantissa
andi.l &0x7fffff00,%d1 # get upper 23 bits of ms
lsr.l &0x8,%d1 # and put them flush right
or.l %d1,%d0 # put these bits in ms word of single
rts
##############################################################################
fout_pack:
bsr.l _calc_ea_fout # fetch the <ea>
mov.l %a0,-(%sp)
mov.b STAG(%a6),%d0 # fetch input type
bne.w fout_pack_not_norm # input is not NORM
fout_pack_norm:
btst &0x4,EXC_CMDREG(%a6) # static or dynamic?
beq.b fout_pack_s # static
fout_pack_d:
mov.b 1+EXC_CMDREG(%a6),%d1 # fetch dynamic reg
lsr.b &0x4,%d1
andi.w &0x7,%d1
bsr.l fetch_dreg # fetch Dn w/ k-factor
bra.b fout_pack_type
fout_pack_s:
mov.b 1+EXC_CMDREG(%a6),%d0 # fetch static field
fout_pack_type:
bfexts %d0{&25:&7},%d0 # extract k-factor
mov.l %d0,-(%sp)
lea FP_SRC(%a6),%a0 # pass: ptr to input
# bindec is currently scrambling FP_SRC for denorm inputs.
# we'll have to change this, but for now, tough luck!!!
bsr.l bindec # convert xprec to packed
# andi.l &0xcfff000f,FP_SCR0(%a6) # clear unused fields
andi.l &0xcffff00f,FP_SCR0(%a6) # clear unused fields
mov.l (%sp)+,%d0
tst.b 3+FP_SCR0_EX(%a6)
bne.b fout_pack_set
tst.l FP_SCR0_HI(%a6)
bne.b fout_pack_set
tst.l FP_SCR0_LO(%a6)
bne.b fout_pack_set
# add the extra condition that only if the k-factor was zero, too, should
# we zero the exponent
tst.l %d0
bne.b fout_pack_set
# "mantissa" is all zero which means that the answer is zero. but, the '040
# algorithm allows the exponent to be non-zero. the 881/2 do not. Therefore,
# if the mantissa is zero, I will zero the exponent, too.
# the question now is whether the exponents sign bit is allowed to be non-zero
# for a zero, also...
andi.w &0xf000,FP_SCR0(%a6)
fout_pack_set:
lea FP_SCR0(%a6),%a0 # pass: src addr
fout_pack_write:
mov.l (%sp)+,%a1 # pass: dst addr
mov.l &0xc,%d0 # pass: opsize is 12 bytes
cmpi.b SPCOND_FLG(%a6),&mda7_flg
beq.b fout_pack_a7
bsr.l _dmem_write # write ext prec number to memory
tst.l %d1 # did dstore fail?
bne.w fout_ext_err # yes
rts
# we don't want to do the write if the exception occurred in supervisor mode
# so _mem_write2() handles this for us.
fout_pack_a7:
bsr.l _mem_write2 # write ext prec number to memory
tst.l %d1 # did dstore fail?
bne.w fout_ext_err # yes
rts
fout_pack_not_norm:
cmpi.b %d0,&DENORM # is it a DENORM?
beq.w fout_pack_norm # yes
lea FP_SRC(%a6),%a0
clr.w 2+FP_SRC_EX(%a6)
cmpi.b %d0,&SNAN # is it an SNAN?
beq.b fout_pack_snan # yes
bra.b fout_pack_write # no
fout_pack_snan:
ori.w &snaniop2_mask,FPSR_EXCEPT(%a6) # set SNAN/AIOP
bset &0x6,FP_SRC_HI(%a6) # set snan bit
bra.b fout_pack_write
#########################################################################
# XDEF **************************************************************** #
# fetch_dreg(): fetch register according to index in d1 #
# #
# XREF **************************************************************** #
# None #
# #
# INPUT *************************************************************** #
# d1 = index of register to fetch from #
# #
# OUTPUT ************************************************************** #
# d0 = value of register fetched #
# #
# ALGORITHM *********************************************************** #
# According to the index value in d1 which can range from zero #
# to fifteen, load the corresponding register file value (where #
# address register indexes start at 8). D0/D1/A0/A1/A6/A7 are on the #
# stack. The rest should still be in their original places. #
# #
#########################################################################
# this routine leaves d1 intact for subsequent store_dreg calls.
global fetch_dreg
fetch_dreg:
mov.w (tbl_fdreg.b,%pc,%d1.w*2),%d0
jmp (tbl_fdreg.b,%pc,%d0.w*1)
tbl_fdreg:
short fdreg0 - tbl_fdreg
short fdreg1 - tbl_fdreg
short fdreg2 - tbl_fdreg
short fdreg3 - tbl_fdreg
short fdreg4 - tbl_fdreg
short fdreg5 - tbl_fdreg
short fdreg6 - tbl_fdreg
short fdreg7 - tbl_fdreg
short fdreg8 - tbl_fdreg
short fdreg9 - tbl_fdreg
short fdrega - tbl_fdreg
short fdregb - tbl_fdreg
short fdregc - tbl_fdreg
short fdregd - tbl_fdreg
short fdrege - tbl_fdreg
short fdregf - tbl_fdreg
fdreg0:
mov.l EXC_DREGS+0x0(%a6),%d0
rts
fdreg1:
mov.l EXC_DREGS+0x4(%a6),%d0
rts
fdreg2:
mov.l %d2,%d0
rts
fdreg3:
mov.l %d3,%d0
rts
fdreg4:
mov.l %d4,%d0
rts
fdreg5:
mov.l %d5,%d0
rts
fdreg6:
mov.l %d6,%d0
rts
fdreg7:
mov.l %d7,%d0
rts
fdreg8:
mov.l EXC_DREGS+0x8(%a6),%d0
rts
fdreg9:
mov.l EXC_DREGS+0xc(%a6),%d0
rts
fdrega:
mov.l %a2,%d0
rts
fdregb:
mov.l %a3,%d0
rts
fdregc:
mov.l %a4,%d0
rts
fdregd:
mov.l %a5,%d0
rts
fdrege:
mov.l (%a6),%d0
rts
fdregf:
mov.l EXC_A7(%a6),%d0
rts
#########################################################################
# XDEF **************************************************************** #
# store_dreg_l(): store longword to data register specified by d1 #
# #
# XREF **************************************************************** #
# None #
# #
# INPUT *************************************************************** #
# d0 = longowrd value to store #
# d1 = index of register to fetch from #
# #
# OUTPUT ************************************************************** #
# (data register is updated) #
# #
# ALGORITHM *********************************************************** #
# According to the index value in d1, store the longword value #
# in d0 to the corresponding data register. D0/D1 are on the stack #
# while the rest are in their initial places. #
# #
#########################################################################
global store_dreg_l
store_dreg_l:
mov.w (tbl_sdregl.b,%pc,%d1.w*2),%d1
jmp (tbl_sdregl.b,%pc,%d1.w*1)
tbl_sdregl:
short sdregl0 - tbl_sdregl
short sdregl1 - tbl_sdregl
short sdregl2 - tbl_sdregl
short sdregl3 - tbl_sdregl
short sdregl4 - tbl_sdregl
short sdregl5 - tbl_sdregl
short sdregl6 - tbl_sdregl
short sdregl7 - tbl_sdregl
sdregl0:
mov.l %d0,EXC_DREGS+0x0(%a6)
rts
sdregl1:
mov.l %d0,EXC_DREGS+0x4(%a6)
rts
sdregl2:
mov.l %d0,%d2
rts
sdregl3:
mov.l %d0,%d3
rts
sdregl4:
mov.l %d0,%d4
rts
sdregl5:
mov.l %d0,%d5
rts
sdregl6:
mov.l %d0,%d6
rts
sdregl7:
mov.l %d0,%d7
rts
#########################################################################
# XDEF **************************************************************** #
# store_dreg_w(): store word to data register specified by d1 #
# #
# XREF **************************************************************** #
# None #
# #
# INPUT *************************************************************** #
# d0 = word value to store #
# d1 = index of register to fetch from #
# #
# OUTPUT ************************************************************** #
# (data register is updated) #
# #
# ALGORITHM *********************************************************** #
# According to the index value in d1, store the word value #
# in d0 to the corresponding data register. D0/D1 are on the stack #
# while the rest are in their initial places. #
# #
#########################################################################
global store_dreg_w
store_dreg_w:
mov.w (tbl_sdregw.b,%pc,%d1.w*2),%d1
jmp (tbl_sdregw.b,%pc,%d1.w*1)
tbl_sdregw:
short sdregw0 - tbl_sdregw
short sdregw1 - tbl_sdregw
short sdregw2 - tbl_sdregw
short sdregw3 - tbl_sdregw
short sdregw4 - tbl_sdregw
short sdregw5 - tbl_sdregw
short sdregw6 - tbl_sdregw
short sdregw7 - tbl_sdregw
sdregw0:
mov.w %d0,2+EXC_DREGS+0x0(%a6)
rts
sdregw1:
mov.w %d0,2+EXC_DREGS+0x4(%a6)
rts
sdregw2:
mov.w %d0,%d2
rts
sdregw3:
mov.w %d0,%d3
rts
sdregw4:
mov.w %d0,%d4
rts
sdregw5:
mov.w %d0,%d5
rts
sdregw6:
mov.w %d0,%d6
rts
sdregw7:
mov.w %d0,%d7
rts
#########################################################################
# XDEF **************************************************************** #
# store_dreg_b(): store byte to data register specified by d1 #
# #
# XREF **************************************************************** #
# None #
# #
# INPUT *************************************************************** #
# d0 = byte value to store #
# d1 = index of register to fetch from #
# #
# OUTPUT ************************************************************** #
# (data register is updated) #
# #
# ALGORITHM *********************************************************** #
# According to the index value in d1, store the byte value #
# in d0 to the corresponding data register. D0/D1 are on the stack #
# while the rest are in their initial places. #
# #
#########################################################################
global store_dreg_b
store_dreg_b:
mov.w (tbl_sdregb.b,%pc,%d1.w*2),%d1
jmp (tbl_sdregb.b,%pc,%d1.w*1)
tbl_sdregb:
short sdregb0 - tbl_sdregb
short sdregb1 - tbl_sdregb
short sdregb2 - tbl_sdregb
short sdregb3 - tbl_sdregb
short sdregb4 - tbl_sdregb
short sdregb5 - tbl_sdregb
short sdregb6 - tbl_sdregb
short sdregb7 - tbl_sdregb
sdregb0:
mov.b %d0,3+EXC_DREGS+0x0(%a6)
rts
sdregb1:
mov.b %d0,3+EXC_DREGS+0x4(%a6)
rts
sdregb2:
mov.b %d0,%d2
rts
sdregb3:
mov.b %d0,%d3
rts
sdregb4:
mov.b %d0,%d4
rts
sdregb5:
mov.b %d0,%d5
rts
sdregb6:
mov.b %d0,%d6
rts
sdregb7:
mov.b %d0,%d7
rts
#########################################################################
# XDEF **************************************************************** #
# inc_areg(): increment an address register by the value in d0 #
# #
# XREF **************************************************************** #
# None #
# #
# INPUT *************************************************************** #
# d0 = amount to increment by #
# d1 = index of address register to increment #
# #
# OUTPUT ************************************************************** #
# (address register is updated) #
# #
# ALGORITHM *********************************************************** #
# Typically used for an instruction w/ a post-increment <ea>, #
# this routine adds the increment value in d0 to the address register #
# specified by d1. A0/A1/A6/A7 reside on the stack. The rest reside #
# in their original places. #
# For a7, if the increment amount is one, then we have to #
# increment by two. For any a7 update, set the mia7_flag so that if #
# an access error exception occurs later in emulation, this address #
# register update can be undone. #
# #
#########################################################################
global inc_areg
inc_areg:
mov.w (tbl_iareg.b,%pc,%d1.w*2),%d1
jmp (tbl_iareg.b,%pc,%d1.w*1)
tbl_iareg:
short iareg0 - tbl_iareg
short iareg1 - tbl_iareg
short iareg2 - tbl_iareg
short iareg3 - tbl_iareg
short iareg4 - tbl_iareg
short iareg5 - tbl_iareg
short iareg6 - tbl_iareg
short iareg7 - tbl_iareg
iareg0: add.l %d0,EXC_DREGS+0x8(%a6)
rts
iareg1: add.l %d0,EXC_DREGS+0xc(%a6)
rts
iareg2: add.l %d0,%a2
rts
iareg3: add.l %d0,%a3
rts
iareg4: add.l %d0,%a4
rts
iareg5: add.l %d0,%a5
rts
iareg6: add.l %d0,(%a6)
rts
iareg7: mov.b &mia7_flg,SPCOND_FLG(%a6)
cmpi.b %d0,&0x1
beq.b iareg7b
add.l %d0,EXC_A7(%a6)
rts
iareg7b:
addq.l &0x2,EXC_A7(%a6)
rts
#########################################################################
# XDEF **************************************************************** #
# dec_areg(): decrement an address register by the value in d0 #
# #
# XREF **************************************************************** #
# None #
# #
# INPUT *************************************************************** #
# d0 = amount to decrement by #
# d1 = index of address register to decrement #
# #
# OUTPUT ************************************************************** #
# (address register is updated) #
# #
# ALGORITHM *********************************************************** #
# Typically used for an instruction w/ a pre-decrement <ea>, #
# this routine adds the decrement value in d0 to the address register #
# specified by d1. A0/A1/A6/A7 reside on the stack. The rest reside #
# in their original places. #
# For a7, if the decrement amount is one, then we have to #
# decrement by two. For any a7 update, set the mda7_flag so that if #
# an access error exception occurs later in emulation, this address #
# register update can be undone. #
# #
#########################################################################
global dec_areg
dec_areg:
mov.w (tbl_dareg.b,%pc,%d1.w*2),%d1
jmp (tbl_dareg.b,%pc,%d1.w*1)
tbl_dareg:
short dareg0 - tbl_dareg
short dareg1 - tbl_dareg
short dareg2 - tbl_dareg
short dareg3 - tbl_dareg
short dareg4 - tbl_dareg
short dareg5 - tbl_dareg
short dareg6 - tbl_dareg
short dareg7 - tbl_dareg
dareg0: sub.l %d0,EXC_DREGS+0x8(%a6)
rts
dareg1: sub.l %d0,EXC_DREGS+0xc(%a6)
rts
dareg2: sub.l %d0,%a2
rts
dareg3: sub.l %d0,%a3
rts
dareg4: sub.l %d0,%a4
rts
dareg5: sub.l %d0,%a5
rts
dareg6: sub.l %d0,(%a6)
rts
dareg7: mov.b &mda7_flg,SPCOND_FLG(%a6)
cmpi.b %d0,&0x1
beq.b dareg7b
sub.l %d0,EXC_A7(%a6)
rts
dareg7b:
subq.l &0x2,EXC_A7(%a6)
rts
##############################################################################
#########################################################################
# XDEF **************************************************************** #
# load_fpn1(): load FP register value into FP_SRC(a6). #
# #
# XREF **************************************************************** #
# None #
# #
# INPUT *************************************************************** #
# d0 = index of FP register to load #
# #
# OUTPUT ************************************************************** #
# FP_SRC(a6) = value loaded from FP register file #
# #
# ALGORITHM *********************************************************** #
# Using the index in d0, load FP_SRC(a6) with a number from the #
# FP register file. #
# #
#########################################################################
global load_fpn1
load_fpn1:
mov.w (tbl_load_fpn1.b,%pc,%d0.w*2), %d0
jmp (tbl_load_fpn1.b,%pc,%d0.w*1)
tbl_load_fpn1:
short load_fpn1_0 - tbl_load_fpn1
short load_fpn1_1 - tbl_load_fpn1
short load_fpn1_2 - tbl_load_fpn1
short load_fpn1_3 - tbl_load_fpn1
short load_fpn1_4 - tbl_load_fpn1
short load_fpn1_5 - tbl_load_fpn1
short load_fpn1_6 - tbl_load_fpn1
short load_fpn1_7 - tbl_load_fpn1
load_fpn1_0:
mov.l 0+EXC_FP0(%a6), 0+FP_SRC(%a6)
mov.l 4+EXC_FP0(%a6), 4+FP_SRC(%a6)
mov.l 8+EXC_FP0(%a6), 8+FP_SRC(%a6)
lea FP_SRC(%a6), %a0
rts
load_fpn1_1:
mov.l 0+EXC_FP1(%a6), 0+FP_SRC(%a6)
mov.l 4+EXC_FP1(%a6), 4+FP_SRC(%a6)
mov.l 8+EXC_FP1(%a6), 8+FP_SRC(%a6)
lea FP_SRC(%a6), %a0
rts
load_fpn1_2:
fmovm.x &0x20, FP_SRC(%a6)
lea FP_SRC(%a6), %a0
rts
load_fpn1_3:
fmovm.x &0x10, FP_SRC(%a6)
lea FP_SRC(%a6), %a0
rts
load_fpn1_4:
fmovm.x &0x08, FP_SRC(%a6)
lea FP_SRC(%a6), %a0
rts
load_fpn1_5:
fmovm.x &0x04, FP_SRC(%a6)
lea FP_SRC(%a6), %a0
rts
load_fpn1_6:
fmovm.x &0x02, FP_SRC(%a6)
lea FP_SRC(%a6), %a0
rts
load_fpn1_7:
fmovm.x &0x01, FP_SRC(%a6)
lea FP_SRC(%a6), %a0
rts
#############################################################################
#########################################################################
# XDEF **************************************************************** #
# load_fpn2(): load FP register value into FP_DST(a6). #
# #
# XREF **************************************************************** #
# None #
# #
# INPUT *************************************************************** #
# d0 = index of FP register to load #
# #
# OUTPUT ************************************************************** #
# FP_DST(a6) = value loaded from FP register file #
# #
# ALGORITHM *********************************************************** #
# Using the index in d0, load FP_DST(a6) with a number from the #
# FP register file. #
# #
#########################################################################
global load_fpn2
load_fpn2:
mov.w (tbl_load_fpn2.b,%pc,%d0.w*2), %d0
jmp (tbl_load_fpn2.b,%pc,%d0.w*1)
tbl_load_fpn2:
short load_fpn2_0 - tbl_load_fpn2
short load_fpn2_1 - tbl_load_fpn2
short load_fpn2_2 - tbl_load_fpn2
short load_fpn2_3 - tbl_load_fpn2
short load_fpn2_4 - tbl_load_fpn2
short load_fpn2_5 - tbl_load_fpn2
short load_fpn2_6 - tbl_load_fpn2
short load_fpn2_7 - tbl_load_fpn2
load_fpn2_0:
mov.l 0+EXC_FP0(%a6), 0+FP_DST(%a6)
mov.l 4+EXC_FP0(%a6), 4+FP_DST(%a6)
mov.l 8+EXC_FP0(%a6), 8+FP_DST(%a6)
lea FP_DST(%a6), %a0
rts
load_fpn2_1:
mov.l 0+EXC_FP1(%a6), 0+FP_DST(%a6)
mov.l 4+EXC_FP1(%a6), 4+FP_DST(%a6)
mov.l 8+EXC_FP1(%a6), 8+FP_DST(%a6)
lea FP_DST(%a6), %a0
rts
load_fpn2_2:
fmovm.x &0x20, FP_DST(%a6)
lea FP_DST(%a6), %a0
rts
load_fpn2_3:
fmovm.x &0x10, FP_DST(%a6)
lea FP_DST(%a6), %a0
rts
load_fpn2_4:
fmovm.x &0x08, FP_DST(%a6)
lea FP_DST(%a6), %a0
rts
load_fpn2_5:
fmovm.x &0x04, FP_DST(%a6)
lea FP_DST(%a6), %a0
rts
load_fpn2_6:
fmovm.x &0x02, FP_DST(%a6)
lea FP_DST(%a6), %a0
rts
load_fpn2_7:
fmovm.x &0x01, FP_DST(%a6)
lea FP_DST(%a6), %a0
rts
#############################################################################
#########################################################################
# XDEF **************************************************************** #
# store_fpreg(): store an fp value to the fpreg designated d0. #
# #
# XREF **************************************************************** #
# None #
# #
# INPUT *************************************************************** #
# fp0 = extended precision value to store #
# d0 = index of floating-point register #
# #
# OUTPUT ************************************************************** #
# None #
# #
# ALGORITHM *********************************************************** #
# Store the value in fp0 to the FP register designated by the #
# value in d0. The FP number can be DENORM or SNAN so we have to be #
# careful that we don't take an exception here. #
# #
#########################################################################
global store_fpreg
store_fpreg:
mov.w (tbl_store_fpreg.b,%pc,%d0.w*2), %d0
jmp (tbl_store_fpreg.b,%pc,%d0.w*1)
tbl_store_fpreg:
short store_fpreg_0 - tbl_store_fpreg
short store_fpreg_1 - tbl_store_fpreg
short store_fpreg_2 - tbl_store_fpreg
short store_fpreg_3 - tbl_store_fpreg
short store_fpreg_4 - tbl_store_fpreg
short store_fpreg_5 - tbl_store_fpreg
short store_fpreg_6 - tbl_store_fpreg
short store_fpreg_7 - tbl_store_fpreg
store_fpreg_0:
fmovm.x &0x80, EXC_FP0(%a6)
rts
store_fpreg_1:
fmovm.x &0x80, EXC_FP1(%a6)
rts
store_fpreg_2:
fmovm.x &0x01, -(%sp)
fmovm.x (%sp)+, &0x20
rts
store_fpreg_3:
fmovm.x &0x01, -(%sp)
fmovm.x (%sp)+, &0x10
rts
store_fpreg_4:
fmovm.x &0x01, -(%sp)
fmovm.x (%sp)+, &0x08
rts
store_fpreg_5:
fmovm.x &0x01, -(%sp)
fmovm.x (%sp)+, &0x04
rts
store_fpreg_6:
fmovm.x &0x01, -(%sp)
fmovm.x (%sp)+, &0x02
rts
store_fpreg_7:
fmovm.x &0x01, -(%sp)
fmovm.x (%sp)+, &0x01
rts
#########################################################################
# XDEF **************************************************************** #
# _denorm(): denormalize an intermediate result #
# #
# XREF **************************************************************** #
# None #
# #
# INPUT *************************************************************** #
# a0 = points to the operand to be denormalized #
# (in the internal extended format) #
# #
# d0 = rounding precision #
# #
# OUTPUT ************************************************************** #
# a0 = pointer to the denormalized result #
# (in the internal extended format) #
# #
# d0 = guard,round,sticky #
# #
# ALGORITHM *********************************************************** #
# According to the exponent underflow threshold for the given #
# precision, shift the mantissa bits to the right in order raise the #
# exponent of the operand to the threshold value. While shifting the #
# mantissa bits right, maintain the value of the guard, round, and #
# sticky bits. #
# other notes: #
# (1) _denorm() is called by the underflow routines #
# (2) _denorm() does NOT affect the status register #
# #
#########################################################################
#
# table of exponent threshold values for each precision
#
tbl_thresh:
short 0x0
short sgl_thresh
short dbl_thresh
global _denorm
_denorm:
#
# Load the exponent threshold for the precision selected and check
# to see if (threshold - exponent) is > 65 in which case we can
# simply calculate the sticky bit and zero the mantissa. otherwise
# we have to call the denormalization routine.
#
lsr.b &0x2, %d0 # shift prec to lo bits
mov.w (tbl_thresh.b,%pc,%d0.w*2), %d1 # load prec threshold
mov.w %d1, %d0 # copy d1 into d0
sub.w FTEMP_EX(%a0), %d0 # diff = threshold - exp
cmpi.w %d0, &66 # is diff > 65? (mant + g,r bits)
bpl.b denorm_set_stky # yes; just calc sticky
clr.l %d0 # clear g,r,s
btst &inex2_bit, FPSR_EXCEPT(%a6) # yes; was INEX2 set?
beq.b denorm_call # no; don't change anything
bset &29, %d0 # yes; set sticky bit
denorm_call:
bsr.l dnrm_lp # denormalize the number
rts
#
# all bit would have been shifted off during the denorm so simply
# calculate if the sticky should be set and clear the entire mantissa.
#
denorm_set_stky:
mov.l &0x20000000, %d0 # set sticky bit in return value
mov.w %d1, FTEMP_EX(%a0) # load exp with threshold
clr.l FTEMP_HI(%a0) # set d1 = 0 (ms mantissa)
clr.l FTEMP_LO(%a0) # set d2 = 0 (ms mantissa)
rts
# #
# dnrm_lp(): normalize exponent/mantissa to specified threshold #
# #
# INPUT: #
# %a0 : points to the operand to be denormalized #
# %d0{31:29} : initial guard,round,sticky #
# %d1{15:0} : denormalization threshold #
# OUTPUT: #
# %a0 : points to the denormalized operand #
# %d0{31:29} : final guard,round,sticky #
# #
# *** Local Equates *** #
set GRS, L_SCR2 # g,r,s temp storage
set FTEMP_LO2, L_SCR1 # FTEMP_LO copy
global dnrm_lp
dnrm_lp:
#
# make a copy of FTEMP_LO and place the g,r,s bits directly after it
# in memory so as to make the bitfield extraction for denormalization easier.
#
mov.l FTEMP_LO(%a0), FTEMP_LO2(%a6) # make FTEMP_LO copy
mov.l %d0, GRS(%a6) # place g,r,s after it
#
# check to see how much less than the underflow threshold the operand
# exponent is.
#
mov.l %d1, %d0 # copy the denorm threshold
sub.w FTEMP_EX(%a0), %d1 # d1 = threshold - uns exponent
ble.b dnrm_no_lp # d1 <= 0
cmpi.w %d1, &0x20 # is ( 0 <= d1 < 32) ?
blt.b case_1 # yes
cmpi.w %d1, &0x40 # is (32 <= d1 < 64) ?
blt.b case_2 # yes
bra.w case_3 # (d1 >= 64)
#
# No normalization necessary
#
dnrm_no_lp:
mov.l GRS(%a6), %d0 # restore original g,r,s
rts
#
# case (0<d1<32)
#
# %d0 = denorm threshold
# %d1 = "n" = amt to shift
#
# ---------------------------------------------------------
# | FTEMP_HI | FTEMP_LO |grs000.........000|
# ---------------------------------------------------------
# <-(32 - n)-><-(n)-><-(32 - n)-><-(n)-><-(32 - n)-><-(n)->
# \ \ \ \
# \ \ \ \
# \ \ \ \
# \ \ \ \
# \ \ \ \
# \ \ \ \
# \ \ \ \
# \ \ \ \
# <-(n)-><-(32 - n)-><------(32)-------><------(32)------->
# ---------------------------------------------------------
# |0.....0| NEW_HI | NEW_FTEMP_LO |grs |
# ---------------------------------------------------------
#
case_1:
mov.l %d2, -(%sp) # create temp storage
mov.w %d0, FTEMP_EX(%a0) # exponent = denorm threshold
mov.l &32, %d0
sub.w %d1, %d0 # %d0 = 32 - %d1
cmpi.w %d1, &29 # is shft amt >= 29
blt.b case1_extract # no; no fix needed
mov.b GRS(%a6), %d2
or.b %d2, 3+FTEMP_LO2(%a6)
case1_extract:
bfextu FTEMP_HI(%a0){&0:%d0}, %d2 # %d2 = new FTEMP_HI
bfextu FTEMP_HI(%a0){%d0:&32}, %d1 # %d1 = new FTEMP_LO
bfextu FTEMP_LO2(%a6){%d0:&32}, %d0 # %d0 = new G,R,S
mov.l %d2, FTEMP_HI(%a0) # store new FTEMP_HI
mov.l %d1, FTEMP_LO(%a0) # store new FTEMP_LO
bftst %d0{&2:&30} # were bits shifted off?
beq.b case1_sticky_clear # no; go finish
bset &rnd_stky_bit, %d0 # yes; set sticky bit
case1_sticky_clear:
and.l &0xe0000000, %d0 # clear all but G,R,S
mov.l (%sp)+, %d2 # restore temp register
rts
#
# case (32<=d1<64)
#
# %d0 = denorm threshold
# %d1 = "n" = amt to shift
#
# ---------------------------------------------------------
# | FTEMP_HI | FTEMP_LO |grs000.........000|
# ---------------------------------------------------------
# <-(32 - n)-><-(n)-><-(32 - n)-><-(n)-><-(32 - n)-><-(n)->
# \ \ \
# \ \ \
# \ \ -------------------
# \ -------------------- \
# ------------------- \ \
# \ \ \
# \ \ \
# \ \ \
# <-------(32)------><-(n)-><-(32 - n)-><------(32)------->
# ---------------------------------------------------------
# |0...............0|0....0| NEW_LO |grs |
# ---------------------------------------------------------
#
case_2:
mov.l %d2, -(%sp) # create temp storage
mov.w %d0, FTEMP_EX(%a0) # exponent = denorm threshold
subi.w &0x20, %d1 # %d1 now between 0 and 32
mov.l &0x20, %d0
sub.w %d1, %d0 # %d0 = 32 - %d1
# subtle step here; or in the g,r,s at the bottom of FTEMP_LO to minimize
# the number of bits to check for the sticky detect.
# it only plays a role in shift amounts of 61-63.
mov.b GRS(%a6), %d2
or.b %d2, 3+FTEMP_LO2(%a6)
bfextu FTEMP_HI(%a0){&0:%d0}, %d2 # %d2 = new FTEMP_LO
bfextu FTEMP_HI(%a0){%d0:&32}, %d1 # %d1 = new G,R,S
bftst %d1{&2:&30} # were any bits shifted off?
bne.b case2_set_sticky # yes; set sticky bit
bftst FTEMP_LO2(%a6){%d0:&31} # were any bits shifted off?
bne.b case2_set_sticky # yes; set sticky bit
mov.l %d1, %d0 # move new G,R,S to %d0
bra.b case2_end
case2_set_sticky:
mov.l %d1, %d0 # move new G,R,S to %d0
bset &rnd_stky_bit, %d0 # set sticky bit
case2_end:
clr.l FTEMP_HI(%a0) # store FTEMP_HI = 0
mov.l %d2, FTEMP_LO(%a0) # store FTEMP_LO
and.l &0xe0000000, %d0 # clear all but G,R,S
mov.l (%sp)+,%d2 # restore temp register
rts
#
# case (d1>=64)
#
# %d0 = denorm threshold
# %d1 = amt to shift
#
case_3:
mov.w %d0, FTEMP_EX(%a0) # insert denorm threshold
cmpi.w %d1, &65 # is shift amt > 65?
blt.b case3_64 # no; it's == 64
beq.b case3_65 # no; it's == 65
#
# case (d1>65)
#
# Shift value is > 65 and out of range. All bits are shifted off.
# Return a zero mantissa with the sticky bit set
#
clr.l FTEMP_HI(%a0) # clear hi(mantissa)
clr.l FTEMP_LO(%a0) # clear lo(mantissa)
mov.l &0x20000000, %d0 # set sticky bit
rts
#
# case (d1 == 64)
#
# ---------------------------------------------------------
# | FTEMP_HI | FTEMP_LO |grs000.........000|
# ---------------------------------------------------------
# <-------(32)------>
# \ \
# \ \
# \ \
# \ ------------------------------
# ------------------------------- \
# \ \
# \ \
# \ \
# <-------(32)------>
# ---------------------------------------------------------
# |0...............0|0................0|grs |
# ---------------------------------------------------------
#
case3_64:
mov.l FTEMP_HI(%a0), %d0 # fetch hi(mantissa)
mov.l %d0, %d1 # make a copy
and.l &0xc0000000, %d0 # extract G,R
and.l &0x3fffffff, %d1 # extract other bits
bra.b case3_complete
#
# case (d1 == 65)
#
# ---------------------------------------------------------
# | FTEMP_HI | FTEMP_LO |grs000.........000|
# ---------------------------------------------------------
# <-------(32)------>
# \ \
# \ \
# \ \
# \ ------------------------------
# -------------------------------- \
# \ \
# \ \
# \ \
# <-------(31)----->
# ---------------------------------------------------------
# |0...............0|0................0|0rs |
# ---------------------------------------------------------
#
case3_65:
mov.l FTEMP_HI(%a0), %d0 # fetch hi(mantissa)
and.l &0x80000000, %d0 # extract R bit
lsr.l &0x1, %d0 # shift high bit into R bit
and.l &0x7fffffff, %d1 # extract other bits
case3_complete:
# last operation done was an "and" of the bits shifted off so the condition
# codes are already set so branch accordingly.
bne.b case3_set_sticky # yes; go set new sticky
tst.l FTEMP_LO(%a0) # were any bits shifted off?
bne.b case3_set_sticky # yes; go set new sticky
tst.b GRS(%a6) # were any bits shifted off?
bne.b case3_set_sticky # yes; go set new sticky
#
# no bits were shifted off so don't set the sticky bit.
# the guard and
# the entire mantissa is zero.
#
clr.l FTEMP_HI(%a0) # clear hi(mantissa)
clr.l FTEMP_LO(%a0) # clear lo(mantissa)
rts
#
# some bits were shifted off so set the sticky bit.
# the entire mantissa is zero.
#
case3_set_sticky:
bset &rnd_stky_bit,%d0 # set new sticky bit
clr.l FTEMP_HI(%a0) # clear hi(mantissa)
clr.l FTEMP_LO(%a0) # clear lo(mantissa)
rts
#########################################################################
# XDEF **************************************************************** #
# _round(): round result according to precision/mode #
# #
# XREF **************************************************************** #
# None #
# #
# INPUT *************************************************************** #
# a0 = ptr to input operand in internal extended format #
# d1(hi) = contains rounding precision: #
# ext = $0000xxxx #
# sgl = $0004xxxx #
# dbl = $0008xxxx #
# d1(lo) = contains rounding mode: #
# RN = $xxxx0000 #
# RZ = $xxxx0001 #
# RM = $xxxx0002 #
# RP = $xxxx0003 #
# d0{31:29} = contains the g,r,s bits (extended) #
# #
# OUTPUT ************************************************************** #
# a0 = pointer to rounded result #
# #
# ALGORITHM *********************************************************** #
# On return the value pointed to by a0 is correctly rounded, #
# a0 is preserved and the g-r-s bits in d0 are cleared. #
# The result is not typed - the tag field is invalid. The #
# result is still in the internal extended format. #
# #
# The INEX bit of USER_FPSR will be set if the rounded result was #
# inexact (i.e. if any of the g-r-s bits were set). #
# #
#########################################################################
global _round
_round:
#
# ext_grs() looks at the rounding precision and sets the appropriate
# G,R,S bits.
# If (G,R,S == 0) then result is exact and round is done, else set
# the inex flag in status reg and continue.
#
bsr.l ext_grs # extract G,R,S
tst.l %d0 # are G,R,S zero?
beq.w truncate # yes; round is complete
or.w &inx2a_mask, 2+USER_FPSR(%a6) # set inex2/ainex
#
# Use rounding mode as an index into a jump table for these modes.
# All of the following assumes grs != 0.
#
mov.w (tbl_mode.b,%pc,%d1.w*2), %a1 # load jump offset
jmp (tbl_mode.b,%pc,%a1) # jmp to rnd mode handler
tbl_mode:
short rnd_near - tbl_mode
short truncate - tbl_mode # RZ always truncates
short rnd_mnus - tbl_mode
short rnd_plus - tbl_mode
#################################################################
# ROUND PLUS INFINITY #
# #
# If sign of fp number = 0 (positive), then add 1 to l. #
#################################################################
rnd_plus:
tst.b FTEMP_SGN(%a0) # check for sign
bmi.w truncate # if positive then truncate
mov.l &0xffffffff, %d0 # force g,r,s to be all f's
swap %d1 # set up d1 for round prec.
cmpi.b %d1, &s_mode # is prec = sgl?
beq.w add_sgl # yes
bgt.w add_dbl # no; it's dbl
bra.w add_ext # no; it's ext
#################################################################
# ROUND MINUS INFINITY #
# #
# If sign of fp number = 1 (negative), then add 1 to l. #
#################################################################
rnd_mnus:
tst.b FTEMP_SGN(%a0) # check for sign
bpl.w truncate # if negative then truncate
mov.l &0xffffffff, %d0 # force g,r,s to be all f's
swap %d1 # set up d1 for round prec.
cmpi.b %d1, &s_mode # is prec = sgl?
beq.w add_sgl # yes
bgt.w add_dbl # no; it's dbl
bra.w add_ext # no; it's ext
#################################################################
# ROUND NEAREST #
# #
# If (g=1), then add 1 to l and if (r=s=0), then clear l #
# Note that this will round to even in case of a tie. #
#################################################################
rnd_near:
asl.l &0x1, %d0 # shift g-bit to c-bit
bcc.w truncate # if (g=1) then
swap %d1 # set up d1 for round prec.
cmpi.b %d1, &s_mode # is prec = sgl?
beq.w add_sgl # yes
bgt.w add_dbl # no; it's dbl
bra.w add_ext # no; it's ext
# *** LOCAL EQUATES ***
set ad_1_sgl, 0x00000100 # constant to add 1 to l-bit in sgl prec
set ad_1_dbl, 0x00000800 # constant to add 1 to l-bit in dbl prec
#########################
# ADD SINGLE #
#########################
add_sgl:
add.l &ad_1_sgl, FTEMP_HI(%a0)
bcc.b scc_clr # no mantissa overflow
roxr.w FTEMP_HI(%a0) # shift v-bit back in
roxr.w FTEMP_HI+2(%a0) # shift v-bit back in
add.w &0x1, FTEMP_EX(%a0) # and incr exponent
scc_clr:
tst.l %d0 # test for rs = 0
bne.b sgl_done
and.w &0xfe00, FTEMP_HI+2(%a0) # clear the l-bit
sgl_done:
and.l &0xffffff00, FTEMP_HI(%a0) # truncate bits beyond sgl limit
clr.l FTEMP_LO(%a0) # clear d2
rts
#########################
# ADD EXTENDED #
#########################
add_ext:
addq.l &1,FTEMP_LO(%a0) # add 1 to l-bit
bcc.b xcc_clr # test for carry out
addq.l &1,FTEMP_HI(%a0) # propagate carry
bcc.b xcc_clr
roxr.w FTEMP_HI(%a0) # mant is 0 so restore v-bit
roxr.w FTEMP_HI+2(%a0) # mant is 0 so restore v-bit
roxr.w FTEMP_LO(%a0)
roxr.w FTEMP_LO+2(%a0)
add.w &0x1,FTEMP_EX(%a0) # and inc exp
xcc_clr:
tst.l %d0 # test rs = 0
bne.b add_ext_done
and.b &0xfe,FTEMP_LO+3(%a0) # clear the l bit
add_ext_done:
rts
#########################
# ADD DOUBLE #
#########################
add_dbl:
add.l &ad_1_dbl, FTEMP_LO(%a0) # add 1 to lsb
bcc.b dcc_clr # no carry
addq.l &0x1, FTEMP_HI(%a0) # propagate carry
bcc.b dcc_clr # no carry
roxr.w FTEMP_HI(%a0) # mant is 0 so restore v-bit
roxr.w FTEMP_HI+2(%a0) # mant is 0 so restore v-bit
roxr.w FTEMP_LO(%a0)
roxr.w FTEMP_LO+2(%a0)
addq.w &0x1, FTEMP_EX(%a0) # incr exponent
dcc_clr:
tst.l %d0 # test for rs = 0
bne.b dbl_done
and.w &0xf000, FTEMP_LO+2(%a0) # clear the l-bit
dbl_done:
and.l &0xfffff800,FTEMP_LO(%a0) # truncate bits beyond dbl limit
rts
###########################
# Truncate all other bits #
###########################
truncate:
swap %d1 # select rnd prec
cmpi.b %d1, &s_mode # is prec sgl?
beq.w sgl_done # yes
bgt.b dbl_done # no; it's dbl
rts # no; it's ext
#
# ext_grs(): extract guard, round and sticky bits according to
# rounding precision.
#
# INPUT
# d0 = extended precision g,r,s (in d0{31:29})
# d1 = {PREC,ROUND}
# OUTPUT
# d0{31:29} = guard, round, sticky
#
# The ext_grs extract the guard/round/sticky bits according to the
# selected rounding precision. It is called by the round subroutine
# only. All registers except d0 are kept intact. d0 becomes an
# updated guard,round,sticky in d0{31:29}
#
# Notes: the ext_grs uses the round PREC, and therefore has to swap d1
# prior to usage, and needs to restore d1 to original. this
# routine is tightly tied to the round routine and not meant to
# uphold standard subroutine calling practices.
#
ext_grs:
swap %d1 # have d1.w point to round precision
tst.b %d1 # is rnd prec = extended?
bne.b ext_grs_not_ext # no; go handle sgl or dbl
#
# %d0 actually already hold g,r,s since _round() had it before calling
# this function. so, as long as we don't disturb it, we are "returning" it.
#
ext_grs_ext:
swap %d1 # yes; return to correct positions
rts
ext_grs_not_ext:
movm.l &0x3000, -(%sp) # make some temp registers {d2/d3}
cmpi.b %d1, &s_mode # is rnd prec = sgl?
bne.b ext_grs_dbl # no; go handle dbl
#
# sgl:
# 96 64 40 32 0
# -----------------------------------------------------
# | EXP |XXXXXXX| |xx | |grs|
# -----------------------------------------------------
# <--(24)--->nn\ /
# ee ---------------------
# ww |
# v
# gr new sticky
#
ext_grs_sgl:
bfextu FTEMP_HI(%a0){&24:&2}, %d3 # sgl prec. g-r are 2 bits right
mov.l &30, %d2 # of the sgl prec. limits
lsl.l %d2, %d3 # shift g-r bits to MSB of d3
mov.l FTEMP_HI(%a0), %d2 # get word 2 for s-bit test
and.l &0x0000003f, %d2 # s bit is the or of all other
bne.b ext_grs_st_stky # bits to the right of g-r
tst.l FTEMP_LO(%a0) # test lower mantissa
bne.b ext_grs_st_stky # if any are set, set sticky
tst.l %d0 # test original g,r,s
bne.b ext_grs_st_stky # if any are set, set sticky
bra.b ext_grs_end_sd # if words 3 and 4 are clr, exit
#
# dbl:
# 96 64 32 11 0
# -----------------------------------------------------
# | EXP |XXXXXXX| | |xx |grs|
# -----------------------------------------------------
# nn\ /
# ee -------
# ww |
# v
# gr new sticky
#
ext_grs_dbl:
bfextu FTEMP_LO(%a0){&21:&2}, %d3 # dbl-prec. g-r are 2 bits right
mov.l &30, %d2 # of the dbl prec. limits
lsl.l %d2, %d3 # shift g-r bits to the MSB of d3
mov.l FTEMP_LO(%a0), %d2 # get lower mantissa for s-bit test
and.l &0x000001ff, %d2 # s bit is the or-ing of all
bne.b ext_grs_st_stky # other bits to the right of g-r
tst.l %d0 # test word original g,r,s
bne.b ext_grs_st_stky # if any are set, set sticky
bra.b ext_grs_end_sd # if clear, exit
ext_grs_st_stky:
bset &rnd_stky_bit, %d3 # set sticky bit
ext_grs_end_sd:
mov.l %d3, %d0 # return grs to d0
movm.l (%sp)+, &0xc # restore scratch registers {d2/d3}
swap %d1 # restore d1 to original
rts
#########################################################################
# norm(): normalize the mantissa of an extended precision input. the #
# input operand should not be normalized already. #
# #
# XDEF **************************************************************** #
# norm() #
# #
# XREF **************************************************************** #
# none #
# #
# INPUT *************************************************************** #
# a0 = pointer fp extended precision operand to normalize #
# #
# OUTPUT ************************************************************** #
# d0 = number of bit positions the mantissa was shifted #
# a0 = the input operand's mantissa is normalized; the exponent #
# is unchanged. #
# #
#########################################################################
global norm
norm:
mov.l %d2, -(%sp) # create some temp regs
mov.l %d3, -(%sp)
mov.l FTEMP_HI(%a0), %d0 # load hi(mantissa)
mov.l FTEMP_LO(%a0), %d1 # load lo(mantissa)
bfffo %d0{&0:&32}, %d2 # how many places to shift?
beq.b norm_lo # hi(man) is all zeroes!
norm_hi:
lsl.l %d2, %d0 # left shift hi(man)
bfextu %d1{&0:%d2}, %d3 # extract lo bits
or.l %d3, %d0 # create hi(man)
lsl.l %d2, %d1 # create lo(man)
mov.l %d0, FTEMP_HI(%a0) # store new hi(man)
mov.l %d1, FTEMP_LO(%a0) # store new lo(man)
mov.l %d2, %d0 # return shift amount
mov.l (%sp)+, %d3 # restore temp regs
mov.l (%sp)+, %d2
rts
norm_lo:
bfffo %d1{&0:&32}, %d2 # how many places to shift?
lsl.l %d2, %d1 # shift lo(man)
add.l &32, %d2 # add 32 to shft amount
mov.l %d1, FTEMP_HI(%a0) # store hi(man)
clr.l FTEMP_LO(%a0) # lo(man) is now zero
mov.l %d2, %d0 # return shift amount
mov.l (%sp)+, %d3 # restore temp regs
mov.l (%sp)+, %d2
rts
#########################################################################
# unnorm_fix(): - changes an UNNORM to one of NORM, DENORM, or ZERO #
# - returns corresponding optype tag #
# #
# XDEF **************************************************************** #
# unnorm_fix() #
# #
# XREF **************************************************************** #
# norm() - normalize the mantissa #
# #
# INPUT *************************************************************** #
# a0 = pointer to unnormalized extended precision number #
# #
# OUTPUT ************************************************************** #
# d0 = optype tag - is corrected to one of NORM, DENORM, or ZERO #
# a0 = input operand has been converted to a norm, denorm, or #
# zero; both the exponent and mantissa are changed. #
# #
#########################################################################
global unnorm_fix
unnorm_fix:
bfffo FTEMP_HI(%a0){&0:&32}, %d0 # how many shifts are needed?
bne.b unnorm_shift # hi(man) is not all zeroes
#
# hi(man) is all zeroes so see if any bits in lo(man) are set
#
unnorm_chk_lo:
bfffo FTEMP_LO(%a0){&0:&32}, %d0 # is operand really a zero?
beq.w unnorm_zero # yes
add.w &32, %d0 # no; fix shift distance
#
# d0 = # shifts needed for complete normalization
#
unnorm_shift:
clr.l %d1 # clear top word
mov.w FTEMP_EX(%a0), %d1 # extract exponent
and.w &0x7fff, %d1 # strip off sgn
cmp.w %d0, %d1 # will denorm push exp < 0?
bgt.b unnorm_nrm_zero # yes; denorm only until exp = 0
#
# exponent would not go < 0. Therefore, number stays normalized
#
sub.w %d0, %d1 # shift exponent value
mov.w FTEMP_EX(%a0), %d0 # load old exponent
and.w &0x8000, %d0 # save old sign
or.w %d0, %d1 # {sgn,new exp}
mov.w %d1, FTEMP_EX(%a0) # insert new exponent
bsr.l norm # normalize UNNORM
mov.b &NORM, %d0 # return new optype tag
rts
#
# exponent would go < 0, so only denormalize until exp = 0
#
unnorm_nrm_zero:
cmp.b %d1, &32 # is exp <= 32?
bgt.b unnorm_nrm_zero_lrg # no; go handle large exponent
bfextu FTEMP_HI(%a0){%d1:&32}, %d0 # extract new hi(man)
mov.l %d0, FTEMP_HI(%a0) # save new hi(man)
mov.l FTEMP_LO(%a0), %d0 # fetch old lo(man)
lsl.l %d1, %d0 # extract new lo(man)
mov.l %d0, FTEMP_LO(%a0) # save new lo(man)
and.w &0x8000, FTEMP_EX(%a0) # set exp = 0
mov.b &DENORM, %d0 # return new optype tag
rts
#
# only mantissa bits set are in lo(man)
#
unnorm_nrm_zero_lrg:
sub.w &32, %d1 # adjust shft amt by 32
mov.l FTEMP_LO(%a0), %d0 # fetch old lo(man)
lsl.l %d1, %d0 # left shift lo(man)
mov.l %d0, FTEMP_HI(%a0) # store new hi(man)
clr.l FTEMP_LO(%a0) # lo(man) = 0
and.w &0x8000, FTEMP_EX(%a0) # set exp = 0
mov.b &DENORM, %d0 # return new optype tag
rts
#
# whole mantissa is zero so this UNNORM is actually a zero
#
unnorm_zero:
and.w &0x8000, FTEMP_EX(%a0) # force exponent to zero
mov.b &ZERO, %d0 # fix optype tag
rts
#########################################################################
# XDEF **************************************************************** #
# set_tag_x(): return the optype of the input ext fp number #
# #
# XREF **************************************************************** #
# None #
# #
# INPUT *************************************************************** #
# a0 = pointer to extended precision operand #
# #
# OUTPUT ************************************************************** #
# d0 = value of type tag #
# one of: NORM, INF, QNAN, SNAN, DENORM, UNNORM, ZERO #
# #
# ALGORITHM *********************************************************** #
# Simply test the exponent, j-bit, and mantissa values to #
# determine the type of operand. #
# If it's an unnormalized zero, alter the operand and force it #
# to be a normal zero. #
# #
#########################################################################
global set_tag_x
set_tag_x:
mov.w FTEMP_EX(%a0), %d0 # extract exponent
andi.w &0x7fff, %d0 # strip off sign
cmpi.w %d0, &0x7fff # is (EXP == MAX)?
beq.b inf_or_nan_x
not_inf_or_nan_x:
btst &0x7,FTEMP_HI(%a0)
beq.b not_norm_x
is_norm_x:
mov.b &NORM, %d0
rts
not_norm_x:
tst.w %d0 # is exponent = 0?
bne.b is_unnorm_x
not_unnorm_x:
tst.l FTEMP_HI(%a0)
bne.b is_denorm_x
tst.l FTEMP_LO(%a0)
bne.b is_denorm_x
is_zero_x:
mov.b &ZERO, %d0
rts
is_denorm_x:
mov.b &DENORM, %d0
rts
# must distinguish now "Unnormalized zeroes" which we
# must convert to zero.
is_unnorm_x:
tst.l FTEMP_HI(%a0)
bne.b is_unnorm_reg_x
tst.l FTEMP_LO(%a0)
bne.b is_unnorm_reg_x
# it's an "unnormalized zero". let's convert it to an actual zero...
andi.w &0x8000,FTEMP_EX(%a0) # clear exponent
mov.b &ZERO, %d0
rts
is_unnorm_reg_x:
mov.b &UNNORM, %d0
rts
inf_or_nan_x:
tst.l FTEMP_LO(%a0)
bne.b is_nan_x
mov.l FTEMP_HI(%a0), %d0
and.l &0x7fffffff, %d0 # msb is a don't care!
bne.b is_nan_x
is_inf_x:
mov.b &INF, %d0
rts
is_nan_x:
btst &0x6, FTEMP_HI(%a0)
beq.b is_snan_x
mov.b &QNAN, %d0
rts
is_snan_x:
mov.b &SNAN, %d0
rts
#########################################################################
# XDEF **************************************************************** #
# set_tag_d(): return the optype of the input dbl fp number #
# #
# XREF **************************************************************** #
# None #
# #
# INPUT *************************************************************** #
# a0 = points to double precision operand #
# #
# OUTPUT ************************************************************** #
# d0 = value of type tag #
# one of: NORM, INF, QNAN, SNAN, DENORM, ZERO #
# #
# ALGORITHM *********************************************************** #
# Simply test the exponent, j-bit, and mantissa values to #
# determine the type of operand. #
# #
#########################################################################
global set_tag_d
set_tag_d:
mov.l FTEMP(%a0), %d0
mov.l %d0, %d1
andi.l &0x7ff00000, %d0
beq.b zero_or_denorm_d
cmpi.l %d0, &0x7ff00000
beq.b inf_or_nan_d
is_norm_d:
mov.b &NORM, %d0
rts
zero_or_denorm_d:
and.l &0x000fffff, %d1
bne is_denorm_d
tst.l 4+FTEMP(%a0)
bne is_denorm_d
is_zero_d:
mov.b &ZERO, %d0
rts
is_denorm_d:
mov.b &DENORM, %d0
rts
inf_or_nan_d:
and.l &0x000fffff, %d1
bne is_nan_d
tst.l 4+FTEMP(%a0)
bne is_nan_d
is_inf_d:
mov.b &INF, %d0
rts
is_nan_d:
btst &19, %d1
bne is_qnan_d
is_snan_d:
mov.b &SNAN, %d0
rts
is_qnan_d:
mov.b &QNAN, %d0
rts
#########################################################################
# XDEF **************************************************************** #
# set_tag_s(): return the optype of the input sgl fp number #
# #
# XREF **************************************************************** #
# None #
# #
# INPUT *************************************************************** #
# a0 = pointer to single precision operand #
# #
# OUTPUT ************************************************************** #
# d0 = value of type tag #
# one of: NORM, INF, QNAN, SNAN, DENORM, ZERO #
# #
# ALGORITHM *********************************************************** #
# Simply test the exponent, j-bit, and mantissa values to #
# determine the type of operand. #
# #
#########################################################################
global set_tag_s
set_tag_s:
mov.l FTEMP(%a0), %d0
mov.l %d0, %d1
andi.l &0x7f800000, %d0
beq.b zero_or_denorm_s
cmpi.l %d0, &0x7f800000
beq.b inf_or_nan_s
is_norm_s:
mov.b &NORM, %d0
rts
zero_or_denorm_s:
and.l &0x007fffff, %d1
bne is_denorm_s
is_zero_s:
mov.b &ZERO, %d0
rts
is_denorm_s:
mov.b &DENORM, %d0
rts
inf_or_nan_s:
and.l &0x007fffff, %d1
bne is_nan_s
is_inf_s:
mov.b &INF, %d0
rts
is_nan_s:
btst &22, %d1
bne is_qnan_s
is_snan_s:
mov.b &SNAN, %d0
rts
is_qnan_s:
mov.b &QNAN, %d0
rts
#########################################################################
# XDEF **************************************************************** #
# unf_res(): routine to produce default underflow result of a #
# scaled extended precision number; this is used by #
# fadd/fdiv/fmul/etc. emulation routines. #
# unf_res4(): same as above but for fsglmul/fsgldiv which use #
# single round prec and extended prec mode. #
# #
# XREF **************************************************************** #
# _denorm() - denormalize according to scale factor #
# _round() - round denormalized number according to rnd prec #
# #
# INPUT *************************************************************** #
# a0 = pointer to extended precison operand #
# d0 = scale factor #
# d1 = rounding precision/mode #
# #
# OUTPUT ************************************************************** #
# a0 = pointer to default underflow result in extended precision #
# d0.b = result FPSR_cc which caller may or may not want to save #
# #
# ALGORITHM *********************************************************** #
# Convert the input operand to "internal format" which means the #
# exponent is extended to 16 bits and the sign is stored in the unused #
# portion of the extended precison operand. Denormalize the number #
# according to the scale factor passed in d0. Then, round the #
# denormalized result. #
# Set the FPSR_exc bits as appropriate but return the cc bits in #
# d0 in case the caller doesn't want to save them (as is the case for #
# fmove out). #
# unf_res4() for fsglmul/fsgldiv forces the denorm to extended #
# precision and the rounding mode to single. #
# #
#########################################################################
global unf_res
unf_res:
mov.l %d1, -(%sp) # save rnd prec,mode on stack
btst &0x7, FTEMP_EX(%a0) # make "internal" format
sne FTEMP_SGN(%a0)
mov.w FTEMP_EX(%a0), %d1 # extract exponent
and.w &0x7fff, %d1
sub.w %d0, %d1
mov.w %d1, FTEMP_EX(%a0) # insert 16 bit exponent
mov.l %a0, -(%sp) # save operand ptr during calls
mov.l 0x4(%sp),%d0 # pass rnd prec.
andi.w &0x00c0,%d0
lsr.w &0x4,%d0
bsr.l _denorm # denorm result
mov.l (%sp),%a0
mov.w 0x6(%sp),%d1 # load prec:mode into %d1
andi.w &0xc0,%d1 # extract rnd prec
lsr.w &0x4,%d1
swap %d1
mov.w 0x6(%sp),%d1
andi.w &0x30,%d1
lsr.w &0x4,%d1
bsr.l _round # round the denorm
mov.l (%sp)+, %a0
# result is now rounded properly. convert back to normal format
bclr &0x7, FTEMP_EX(%a0) # clear sgn first; may have residue
tst.b FTEMP_SGN(%a0) # is "internal result" sign set?
beq.b unf_res_chkifzero # no; result is positive
bset &0x7, FTEMP_EX(%a0) # set result sgn
clr.b FTEMP_SGN(%a0) # clear temp sign
# the number may have become zero after rounding. set ccodes accordingly.
unf_res_chkifzero:
clr.l %d0
tst.l FTEMP_HI(%a0) # is value now a zero?
bne.b unf_res_cont # no
tst.l FTEMP_LO(%a0)
bne.b unf_res_cont # no
# bset &z_bit, FPSR_CC(%a6) # yes; set zero ccode bit
bset &z_bit, %d0 # yes; set zero ccode bit
unf_res_cont:
#
# can inex1 also be set along with unfl and inex2???
#
# we know that underflow has occurred. aunfl should be set if INEX2 is also set.
#
btst &inex2_bit, FPSR_EXCEPT(%a6) # is INEX2 set?
beq.b unf_res_end # no
bset &aunfl_bit, FPSR_AEXCEPT(%a6) # yes; set aunfl
unf_res_end:
add.l &0x4, %sp # clear stack
rts
# unf_res() for fsglmul() and fsgldiv().
global unf_res4
unf_res4:
mov.l %d1,-(%sp) # save rnd prec,mode on stack
btst &0x7,FTEMP_EX(%a0) # make "internal" format
sne FTEMP_SGN(%a0)
mov.w FTEMP_EX(%a0),%d1 # extract exponent
and.w &0x7fff,%d1
sub.w %d0,%d1
mov.w %d1,FTEMP_EX(%a0) # insert 16 bit exponent
mov.l %a0,-(%sp) # save operand ptr during calls
clr.l %d0 # force rnd prec = ext
bsr.l _denorm # denorm result
mov.l (%sp),%a0
mov.w &s_mode,%d1 # force rnd prec = sgl
swap %d1
mov.w 0x6(%sp),%d1 # load rnd mode
andi.w &0x30,%d1 # extract rnd prec
lsr.w &0x4,%d1
bsr.l _round # round the denorm
mov.l (%sp)+,%a0
# result is now rounded properly. convert back to normal format
bclr &0x7,FTEMP_EX(%a0) # clear sgn first; may have residue
tst.b FTEMP_SGN(%a0) # is "internal result" sign set?
beq.b unf_res4_chkifzero # no; result is positive
bset &0x7,FTEMP_EX(%a0) # set result sgn
clr.b FTEMP_SGN(%a0) # clear temp sign
# the number may have become zero after rounding. set ccodes accordingly.
unf_res4_chkifzero:
clr.l %d0
tst.l FTEMP_HI(%a0) # is value now a zero?
bne.b unf_res4_cont # no
tst.l FTEMP_LO(%a0)
bne.b unf_res4_cont # no
# bset &z_bit,FPSR_CC(%a6) # yes; set zero ccode bit
bset &z_bit,%d0 # yes; set zero ccode bit
unf_res4_cont:
#
# can inex1 also be set along with unfl and inex2???
#
# we know that underflow has occurred. aunfl should be set if INEX2 is also set.
#
btst &inex2_bit,FPSR_EXCEPT(%a6) # is INEX2 set?
beq.b unf_res4_end # no
bset &aunfl_bit,FPSR_AEXCEPT(%a6) # yes; set aunfl
unf_res4_end:
add.l &0x4,%sp # clear stack
rts
#########################################################################
# XDEF **************************************************************** #
# ovf_res(): routine to produce the default overflow result of #
# an overflowing number. #
# ovf_res2(): same as above but the rnd mode/prec are passed #
# differently. #
# #
# XREF **************************************************************** #
# none #
# #
# INPUT *************************************************************** #
# d1.b = '-1' => (-); '0' => (+) #
# ovf_res(): #
# d0 = rnd mode/prec #
# ovf_res2(): #
# hi(d0) = rnd prec #
# lo(d0) = rnd mode #
# #
# OUTPUT ************************************************************** #
# a0 = points to extended precision result #
# d0.b = condition code bits #
# #
# ALGORITHM *********************************************************** #
# The default overflow result can be determined by the sign of #
# the result and the rounding mode/prec in effect. These bits are #
# concatenated together to create an index into the default result #
# table. A pointer to the correct result is returned in a0. The #
# resulting condition codes are returned in d0 in case the caller #
# doesn't want FPSR_cc altered (as is the case for fmove out). #
# #
#########################################################################
global ovf_res
ovf_res:
andi.w &0x10,%d1 # keep result sign
lsr.b &0x4,%d0 # shift prec/mode
or.b %d0,%d1 # concat the two
mov.w %d1,%d0 # make a copy
lsl.b &0x1,%d1 # multiply d1 by 2
bra.b ovf_res_load
global ovf_res2
ovf_res2:
and.w &0x10, %d1 # keep result sign
or.b %d0, %d1 # insert rnd mode
swap %d0
or.b %d0, %d1 # insert rnd prec
mov.w %d1, %d0 # make a copy
lsl.b &0x1, %d1 # shift left by 1
#
# use the rounding mode, precision, and result sign as in index into the
# two tables below to fetch the default result and the result ccodes.
#
ovf_res_load:
mov.b (tbl_ovfl_cc.b,%pc,%d0.w*1), %d0 # fetch result ccodes
lea (tbl_ovfl_result.b,%pc,%d1.w*8), %a0 # return result ptr
rts
tbl_ovfl_cc:
byte 0x2, 0x0, 0x0, 0x2
byte 0x2, 0x0, 0x0, 0x2
byte 0x2, 0x0, 0x0, 0x2
byte 0x0, 0x0, 0x0, 0x0
byte 0x2+0x8, 0x8, 0x2+0x8, 0x8
byte 0x2+0x8, 0x8, 0x2+0x8, 0x8
byte 0x2+0x8, 0x8, 0x2+0x8, 0x8
tbl_ovfl_result:
long 0x7fff0000,0x00000000,0x00000000,0x00000000 # +INF; RN
long 0x7ffe0000,0xffffffff,0xffffffff,0x00000000 # +EXT; RZ
long 0x7ffe0000,0xffffffff,0xffffffff,0x00000000 # +EXT; RM
long 0x7fff0000,0x00000000,0x00000000,0x00000000 # +INF; RP
long 0x7fff0000,0x00000000,0x00000000,0x00000000 # +INF; RN
long 0x407e0000,0xffffff00,0x00000000,0x00000000 # +SGL; RZ
long 0x407e0000,0xffffff00,0x00000000,0x00000000 # +SGL; RM
long 0x7fff0000,0x00000000,0x00000000,0x00000000 # +INF; RP
long 0x7fff0000,0x00000000,0x00000000,0x00000000 # +INF; RN
long 0x43fe0000,0xffffffff,0xfffff800,0x00000000 # +DBL; RZ
long 0x43fe0000,0xffffffff,0xfffff800,0x00000000 # +DBL; RM
long 0x7fff0000,0x00000000,0x00000000,0x00000000 # +INF; RP
long 0x00000000,0x00000000,0x00000000,0x00000000
long 0x00000000,0x00000000,0x00000000,0x00000000
long 0x00000000,0x00000000,0x00000000,0x00000000
long 0x00000000,0x00000000,0x00000000,0x00000000
long 0xffff0000,0x00000000,0x00000000,0x00000000 # -INF; RN
long 0xfffe0000,0xffffffff,0xffffffff,0x00000000 # -EXT; RZ
long 0xffff0000,0x00000000,0x00000000,0x00000000 # -INF; RM
long 0xfffe0000,0xffffffff,0xffffffff,0x00000000 # -EXT; RP
long 0xffff0000,0x00000000,0x00000000,0x00000000 # -INF; RN
long 0xc07e0000,0xffffff00,0x00000000,0x00000000 # -SGL; RZ
long 0xffff0000,0x00000000,0x00000000,0x00000000 # -INF; RM
long 0xc07e0000,0xffffff00,0x00000000,0x00000000 # -SGL; RP
long 0xffff0000,0x00000000,0x00000000,0x00000000 # -INF; RN
long 0xc3fe0000,0xffffffff,0xfffff800,0x00000000 # -DBL; RZ
long 0xffff0000,0x00000000,0x00000000,0x00000000 # -INF; RM
long 0xc3fe0000,0xffffffff,0xfffff800,0x00000000 # -DBL; RP
#########################################################################
# XDEF **************************************************************** #
# get_packed(): fetch a packed operand from memory and then #
# convert it to a floating-point binary number. #
# #
# XREF **************************************************************** #
# _dcalc_ea() - calculate the correct <ea> #
# _mem_read() - fetch the packed operand from memory #
# facc_in_x() - the fetch failed so jump to special exit code #
# decbin() - convert packed to binary extended precision #
# #
# INPUT *************************************************************** #
# None #
# #
# OUTPUT ************************************************************** #
# If no failure on _mem_read(): #
# FP_SRC(a6) = packed operand now as a binary FP number #
# #
# ALGORITHM *********************************************************** #
# Get the correct <ea> which is the value on the exception stack #
# frame w/ maybe a correction factor if the <ea> is -(an) or (an)+. #
# Then, fetch the operand from memory. If the fetch fails, exit #
# through facc_in_x(). #
# If the packed operand is a ZERO,NAN, or INF, convert it to #
# its binary representation here. Else, call decbin() which will #
# convert the packed value to an extended precision binary value. #
# #
#########################################################################
# the stacked <ea> for packed is correct except for -(An).
# the base reg must be updated for both -(An) and (An)+.
global get_packed
get_packed:
mov.l &0xc,%d0 # packed is 12 bytes
bsr.l _dcalc_ea # fetch <ea>; correct An
lea FP_SRC(%a6),%a1 # pass: ptr to super dst
mov.l &0xc,%d0 # pass: 12 bytes
bsr.l _dmem_read # read packed operand
tst.l %d1 # did dfetch fail?
bne.l facc_in_x # yes
# The packed operand is an INF or a NAN if the exponent field is all ones.
bfextu FP_SRC(%a6){&1:&15},%d0 # get exp
cmpi.w %d0,&0x7fff # INF or NAN?
bne.b gp_try_zero # no
rts # operand is an INF or NAN
# The packed operand is a zero if the mantissa is all zero, else it's
# a normal packed op.
gp_try_zero:
mov.b 3+FP_SRC(%a6),%d0 # get byte 4
andi.b &0x0f,%d0 # clear all but last nybble
bne.b gp_not_spec # not a zero
tst.l FP_SRC_HI(%a6) # is lw 2 zero?
bne.b gp_not_spec # not a zero
tst.l FP_SRC_LO(%a6) # is lw 3 zero?
bne.b gp_not_spec # not a zero
rts # operand is a ZERO
gp_not_spec:
lea FP_SRC(%a6),%a0 # pass: ptr to packed op
bsr.l decbin # convert to extended
fmovm.x &0x80,FP_SRC(%a6) # make this the srcop
rts
#########################################################################
# decbin(): Converts normalized packed bcd value pointed to by register #
# a0 to extended-precision value in fp0. #
# #
# INPUT *************************************************************** #
# a0 = pointer to normalized packed bcd value #
# #
# OUTPUT ************************************************************** #
# fp0 = exact fp representation of the packed bcd value. #
# #
# ALGORITHM *********************************************************** #
# Expected is a normal bcd (i.e. non-exceptional; all inf, zero, #
# and NaN operands are dispatched without entering this routine) #
# value in 68881/882 format at location (a0). #
# #
# A1. Convert the bcd exponent to binary by successive adds and #
# muls. Set the sign according to SE. Subtract 16 to compensate #
# for the mantissa which is to be interpreted as 17 integer #
# digits, rather than 1 integer and 16 fraction digits. #
# Note: this operation can never overflow. #
# #
# A2. Convert the bcd mantissa to binary by successive #
# adds and muls in FP0. Set the sign according to SM. #
# The mantissa digits will be converted with the decimal point #
# assumed following the least-significant digit. #
# Note: this operation can never overflow. #
# #
# A3. Count the number of leading/trailing zeros in the #
# bcd string. If SE is positive, count the leading zeros; #
# if negative, count the trailing zeros. Set the adjusted #
# exponent equal to the exponent from A1 and the zero count #
# added if SM = 1 and subtracted if SM = 0. Scale the #
# mantissa the equivalent of forcing in the bcd value: #
# #
# SM = 0 a non-zero digit in the integer position #
# SM = 1 a non-zero digit in Mant0, lsd of the fraction #
# #
# this will insure that any value, regardless of its #
# representation (ex. 0.1E2, 1E1, 10E0, 100E-1), is converted #
# consistently. #
# #
# A4. Calculate the factor 10^exp in FP1 using a table of #
# 10^(2^n) values. To reduce the error in forming factors #
# greater than 10^27, a directed rounding scheme is used with #
# tables rounded to RN, RM, and RP, according to the table #
# in the comments of the pwrten section. #
# #
# A5. Form the final binary number by scaling the mantissa by #
# the exponent factor. This is done by multiplying the #
# mantissa in FP0 by the factor in FP1 if the adjusted #
# exponent sign is positive, and dividing FP0 by FP1 if #
# it is negative. #
# #
# Clean up and return. Check if the final mul or div was inexact. #
# If so, set INEX1 in USER_FPSR. #
# #
#########################################################################
#
# PTENRN, PTENRM, and PTENRP are arrays of powers of 10 rounded
# to nearest, minus, and plus, respectively. The tables include
# 10**{1,2,4,8,16,32,64,128,256,512,1024,2048,4096}. No rounding
# is required until the power is greater than 27, however, all
# tables include the first 5 for ease of indexing.
#
RTABLE:
byte 0,0,0,0
byte 2,3,2,3
byte 2,3,3,2
byte 3,2,2,3
set FNIBS,7
set FSTRT,0
set ESTRT,4
set EDIGITS,2
global decbin
decbin:
mov.l 0x0(%a0),FP_SCR0_EX(%a6) # make a copy of input
mov.l 0x4(%a0),FP_SCR0_HI(%a6) # so we don't alter it
mov.l 0x8(%a0),FP_SCR0_LO(%a6)
lea FP_SCR0(%a6),%a0
movm.l &0x3c00,-(%sp) # save d2-d5
fmovm.x &0x1,-(%sp) # save fp1
#
# Calculate exponent:
# 1. Copy bcd value in memory for use as a working copy.
# 2. Calculate absolute value of exponent in d1 by mul and add.
# 3. Correct for exponent sign.
# 4. Subtract 16 to compensate for interpreting the mant as all integer digits.
# (i.e., all digits assumed left of the decimal point.)
#
# Register usage:
#
# calc_e:
# (*) d0: temp digit storage
# (*) d1: accumulator for binary exponent
# (*) d2: digit count
# (*) d3: offset pointer
# ( ) d4: first word of bcd
# ( ) a0: pointer to working bcd value
# ( ) a6: pointer to original bcd value
# (*) FP_SCR1: working copy of original bcd value
# (*) L_SCR1: copy of original exponent word
#
calc_e:
mov.l &EDIGITS,%d2 # # of nibbles (digits) in fraction part
mov.l &ESTRT,%d3 # counter to pick up digits
mov.l (%a0),%d4 # get first word of bcd
clr.l %d1 # zero d1 for accumulator
e_gd:
mulu.l &0xa,%d1 # mul partial product by one digit place
bfextu %d4{%d3:&4},%d0 # get the digit and zero extend into d0
add.l %d0,%d1 # d1 = d1 + d0
addq.b &4,%d3 # advance d3 to the next digit
dbf.w %d2,e_gd # if we have used all 3 digits, exit loop
btst &30,%d4 # get SE
beq.b e_pos # don't negate if pos
neg.l %d1 # negate before subtracting
e_pos:
sub.l &16,%d1 # sub to compensate for shift of mant
bge.b e_save # if still pos, do not neg
neg.l %d1 # now negative, make pos and set SE
or.l &0x40000000,%d4 # set SE in d4,
or.l &0x40000000,(%a0) # and in working bcd
e_save:
mov.l %d1,-(%sp) # save exp on stack
#
#
# Calculate mantissa:
# 1. Calculate absolute value of mantissa in fp0 by mul and add.
# 2. Correct for mantissa sign.
# (i.e., all digits assumed left of the decimal point.)
#
# Register usage:
#
# calc_m:
# (*) d0: temp digit storage
# (*) d1: lword counter
# (*) d2: digit count
# (*) d3: offset pointer
# ( ) d4: words 2 and 3 of bcd
# ( ) a0: pointer to working bcd value
# ( ) a6: pointer to original bcd value
# (*) fp0: mantissa accumulator
# ( ) FP_SCR1: working copy of original bcd value
# ( ) L_SCR1: copy of original exponent word
#
calc_m:
mov.l &1,%d1 # word counter, init to 1
fmov.s &0x00000000,%fp0 # accumulator
#
#
# Since the packed number has a long word between the first & second parts,
# get the integer digit then skip down & get the rest of the
# mantissa. We will unroll the loop once.
#
bfextu (%a0){&28:&4},%d0 # integer part is ls digit in long word
fadd.b %d0,%fp0 # add digit to sum in fp0
#
#
# Get the rest of the mantissa.
#
loadlw:
mov.l (%a0,%d1.L*4),%d4 # load mantissa lonqword into d4
mov.l &FSTRT,%d3 # counter to pick up digits
mov.l &FNIBS,%d2 # reset number of digits per a0 ptr
md2b:
fmul.s &0x41200000,%fp0 # fp0 = fp0 * 10
bfextu %d4{%d3:&4},%d0 # get the digit and zero extend
fadd.b %d0,%fp0 # fp0 = fp0 + digit
#
#
# If all the digits (8) in that long word have been converted (d2=0),
# then inc d1 (=2) to point to the next long word and reset d3 to 0
# to initialize the digit offset, and set d2 to 7 for the digit count;
# else continue with this long word.
#
addq.b &4,%d3 # advance d3 to the next digit
dbf.w %d2,md2b # check for last digit in this lw
nextlw:
addq.l &1,%d1 # inc lw pointer in mantissa
cmp.l %d1,&2 # test for last lw
ble.b loadlw # if not, get last one
#
# Check the sign of the mant and make the value in fp0 the same sign.
#
m_sign:
btst &31,(%a0) # test sign of the mantissa
beq.b ap_st_z # if clear, go to append/strip zeros
fneg.x %fp0 # if set, negate fp0
#
# Append/strip zeros:
#
# For adjusted exponents which have an absolute value greater than 27*,
# this routine calculates the amount needed to normalize the mantissa
# for the adjusted exponent. That number is subtracted from the exp
# if the exp was positive, and added if it was negative. The purpose
# of this is to reduce the value of the exponent and the possibility
# of error in calculation of pwrten.
#
# 1. Branch on the sign of the adjusted exponent.
# 2p.(positive exp)
# 2. Check M16 and the digits in lwords 2 and 3 in descending order.
# 3. Add one for each zero encountered until a non-zero digit.
# 4. Subtract the count from the exp.
# 5. Check if the exp has crossed zero in #3 above; make the exp abs
# and set SE.
# 6. Multiply the mantissa by 10**count.
# 2n.(negative exp)
# 2. Check the digits in lwords 3 and 2 in descending order.
# 3. Add one for each zero encountered until a non-zero digit.
# 4. Add the count to the exp.
# 5. Check if the exp has crossed zero in #3 above; clear SE.
# 6. Divide the mantissa by 10**count.
#
# *Why 27? If the adjusted exponent is within -28 < expA < 28, than
# any adjustment due to append/strip zeros will drive the resultane
# exponent towards zero. Since all pwrten constants with a power
# of 27 or less are exact, there is no need to use this routine to
# attempt to lessen the resultant exponent.
#
# Register usage:
#
# ap_st_z:
# (*) d0: temp digit storage
# (*) d1: zero count
# (*) d2: digit count
# (*) d3: offset pointer
# ( ) d4: first word of bcd
# (*) d5: lword counter
# ( ) a0: pointer to working bcd value
# ( ) FP_SCR1: working copy of original bcd value
# ( ) L_SCR1: copy of original exponent word
#
#
# First check the absolute value of the exponent to see if this
# routine is necessary. If so, then check the sign of the exponent
# and do append (+) or strip (-) zeros accordingly.
# This section handles a positive adjusted exponent.
#
ap_st_z:
mov.l (%sp),%d1 # load expA for range test
cmp.l %d1,&27 # test is with 27
ble.w pwrten # if abs(expA) <28, skip ap/st zeros
btst &30,(%a0) # check sign of exp
bne.b ap_st_n # if neg, go to neg side
clr.l %d1 # zero count reg
mov.l (%a0),%d4 # load lword 1 to d4
bfextu %d4{&28:&4},%d0 # get M16 in d0
bne.b ap_p_fx # if M16 is non-zero, go fix exp
addq.l &1,%d1 # inc zero count
mov.l &1,%d5 # init lword counter
mov.l (%a0,%d5.L*4),%d4 # get lword 2 to d4
bne.b ap_p_cl # if lw 2 is zero, skip it
addq.l &8,%d1 # and inc count by 8
addq.l &1,%d5 # inc lword counter
mov.l (%a0,%d5.L*4),%d4 # get lword 3 to d4
ap_p_cl:
clr.l %d3 # init offset reg
mov.l &7,%d2 # init digit counter
ap_p_gd:
bfextu %d4{%d3:&4},%d0 # get digit
bne.b ap_p_fx # if non-zero, go to fix exp
addq.l &4,%d3 # point to next digit
addq.l &1,%d1 # inc digit counter
dbf.w %d2,ap_p_gd # get next digit
ap_p_fx:
mov.l %d1,%d0 # copy counter to d2
mov.l (%sp),%d1 # get adjusted exp from memory
sub.l %d0,%d1 # subtract count from exp
bge.b ap_p_fm # if still pos, go to pwrten
neg.l %d1 # now its neg; get abs
mov.l (%a0),%d4 # load lword 1 to d4
or.l &0x40000000,%d4 # and set SE in d4
or.l &0x40000000,(%a0) # and in memory
#
# Calculate the mantissa multiplier to compensate for the striping of
# zeros from the mantissa.
#
ap_p_fm:
lea.l PTENRN(%pc),%a1 # get address of power-of-ten table
clr.l %d3 # init table index
fmov.s &0x3f800000,%fp1 # init fp1 to 1
mov.l &3,%d2 # init d2 to count bits in counter
ap_p_el:
asr.l &1,%d0 # shift lsb into carry
bcc.b ap_p_en # if 1, mul fp1 by pwrten factor
fmul.x (%a1,%d3),%fp1 # mul by 10**(d3_bit_no)
ap_p_en:
add.l &12,%d3 # inc d3 to next rtable entry
tst.l %d0 # check if d0 is zero
bne.b ap_p_el # if not, get next bit
fmul.x %fp1,%fp0 # mul mantissa by 10**(no_bits_shifted)
bra.b pwrten # go calc pwrten
#
# This section handles a negative adjusted exponent.
#
ap_st_n:
clr.l %d1 # clr counter
mov.l &2,%d5 # set up d5 to point to lword 3
mov.l (%a0,%d5.L*4),%d4 # get lword 3
bne.b ap_n_cl # if not zero, check digits
sub.l &1,%d5 # dec d5 to point to lword 2
addq.l &8,%d1 # inc counter by 8
mov.l (%a0,%d5.L*4),%d4 # get lword 2
ap_n_cl:
mov.l &28,%d3 # point to last digit
mov.l &7,%d2 # init digit counter
ap_n_gd:
bfextu %d4{%d3:&4},%d0 # get digit
bne.b ap_n_fx # if non-zero, go to exp fix
subq.l &4,%d3 # point to previous digit
addq.l &1,%d1 # inc digit counter
dbf.w %d2,ap_n_gd # get next digit
ap_n_fx:
mov.l %d1,%d0 # copy counter to d0
mov.l (%sp),%d1 # get adjusted exp from memory
sub.l %d0,%d1 # subtract count from exp
bgt.b ap_n_fm # if still pos, go fix mantissa
neg.l %d1 # take abs of exp and clr SE
mov.l (%a0),%d4 # load lword 1 to d4
and.l &0xbfffffff,%d4 # and clr SE in d4
and.l &0xbfffffff,(%a0) # and in memory
#
# Calculate the mantissa multiplier to compensate for the appending of
# zeros to the mantissa.
#
ap_n_fm:
lea.l PTENRN(%pc),%a1 # get address of power-of-ten table
clr.l %d3 # init table index
fmov.s &0x3f800000,%fp1 # init fp1 to 1
mov.l &3,%d2 # init d2 to count bits in counter
ap_n_el:
asr.l &1,%d0 # shift lsb into carry
bcc.b ap_n_en # if 1, mul fp1 by pwrten factor
fmul.x (%a1,%d3),%fp1 # mul by 10**(d3_bit_no)
ap_n_en:
add.l &12,%d3 # inc d3 to next rtable entry
tst.l %d0 # check if d0 is zero
bne.b ap_n_el # if not, get next bit
fdiv.x %fp1,%fp0 # div mantissa by 10**(no_bits_shifted)
#
#
# Calculate power-of-ten factor from adjusted and shifted exponent.
#
# Register usage:
#
# pwrten:
# (*) d0: temp
# ( ) d1: exponent
# (*) d2: {FPCR[6:5],SM,SE} as index in RTABLE; temp
# (*) d3: FPCR work copy
# ( ) d4: first word of bcd
# (*) a1: RTABLE pointer
# calc_p:
# (*) d0: temp
# ( ) d1: exponent
# (*) d3: PWRTxx table index
# ( ) a0: pointer to working copy of bcd
# (*) a1: PWRTxx pointer
# (*) fp1: power-of-ten accumulator
#
# Pwrten calculates the exponent factor in the selected rounding mode
# according to the following table:
#
# Sign of Mant Sign of Exp Rounding Mode PWRTEN Rounding Mode
#
# ANY ANY RN RN
#
# + + RP RP
# - + RP RM
# + - RP RM
# - - RP RP
#
# + + RM RM
# - + RM RP
# + - RM RP
# - - RM RM
#
# + + RZ RM
# - + RZ RM
# + - RZ RP
# - - RZ RP
#
#
pwrten:
mov.l USER_FPCR(%a6),%d3 # get user's FPCR
bfextu %d3{&26:&2},%d2 # isolate rounding mode bits
mov.l (%a0),%d4 # reload 1st bcd word to d4
asl.l &2,%d2 # format d2 to be
bfextu %d4{&0:&2},%d0 # {FPCR[6],FPCR[5],SM,SE}
add.l %d0,%d2 # in d2 as index into RTABLE
lea.l RTABLE(%pc),%a1 # load rtable base
mov.b (%a1,%d2),%d0 # load new rounding bits from table
clr.l %d3 # clear d3 to force no exc and extended
bfins %d0,%d3{&26:&2} # stuff new rounding bits in FPCR
fmov.l %d3,%fpcr # write new FPCR
asr.l &1,%d0 # write correct PTENxx table
bcc.b not_rp # to a1
lea.l PTENRP(%pc),%a1 # it is RP
bra.b calc_p # go to init section
not_rp:
asr.l &1,%d0 # keep checking
bcc.b not_rm
lea.l PTENRM(%pc),%a1 # it is RM
bra.b calc_p # go to init section
not_rm:
lea.l PTENRN(%pc),%a1 # it is RN
calc_p:
mov.l %d1,%d0 # copy exp to d0;use d0
bpl.b no_neg # if exp is negative,
neg.l %d0 # invert it
or.l &0x40000000,(%a0) # and set SE bit
no_neg:
clr.l %d3 # table index
fmov.s &0x3f800000,%fp1 # init fp1 to 1
e_loop:
asr.l &1,%d0 # shift next bit into carry
bcc.b e_next # if zero, skip the mul
fmul.x (%a1,%d3),%fp1 # mul by 10**(d3_bit_no)
e_next:
add.l &12,%d3 # inc d3 to next rtable entry
tst.l %d0 # check if d0 is zero
bne.b e_loop # not zero, continue shifting
#
#
# Check the sign of the adjusted exp and make the value in fp0 the
# same sign. If the exp was pos then multiply fp1*fp0;
# else divide fp0/fp1.
#
# Register Usage:
# norm:
# ( ) a0: pointer to working bcd value
# (*) fp0: mantissa accumulator
# ( ) fp1: scaling factor - 10**(abs(exp))
#
pnorm:
btst &30,(%a0) # test the sign of the exponent
beq.b mul # if clear, go to multiply
div:
fdiv.x %fp1,%fp0 # exp is negative, so divide mant by exp
bra.b end_dec
mul:
fmul.x %fp1,%fp0 # exp is positive, so multiply by exp
#
#
# Clean up and return with result in fp0.
#
# If the final mul/div in decbin incurred an inex exception,
# it will be inex2, but will be reported as inex1 by get_op.
#
end_dec:
fmov.l %fpsr,%d0 # get status register
bclr &inex2_bit+8,%d0 # test for inex2 and clear it
beq.b no_exc # skip this if no exc
ori.w &inx1a_mask,2+USER_FPSR(%a6) # set INEX1/AINEX
no_exc:
add.l &0x4,%sp # clear 1 lw param
fmovm.x (%sp)+,&0x40 # restore fp1
movm.l (%sp)+,&0x3c # restore d2-d5
fmov.l &0x0,%fpcr
fmov.l &0x0,%fpsr
rts
#########################################################################
# bindec(): Converts an input in extended precision format to bcd format#
# #
# INPUT *************************************************************** #
# a0 = pointer to the input extended precision value in memory. #
# the input may be either normalized, unnormalized, or #
# denormalized. #
# d0 = contains the k-factor sign-extended to 32-bits. #
# #
# OUTPUT ************************************************************** #
# FP_SCR0(a6) = bcd format result on the stack. #
# #
# ALGORITHM *********************************************************** #
# #
# A1. Set RM and size ext; Set SIGMA = sign of input. #
# The k-factor is saved for use in d7. Clear the #
# BINDEC_FLG for separating normalized/denormalized #
# input. If input is unnormalized or denormalized, #
# normalize it. #
# #
# A2. Set X = abs(input). #
# #
# A3. Compute ILOG. #
# ILOG is the log base 10 of the input value. It is #
# approximated by adding e + 0.f when the original #
# value is viewed as 2^^e * 1.f in extended precision. #
# This value is stored in d6. #
# #
# A4. Clr INEX bit. #
# The operation in A3 above may have set INEX2. #
# #
# A5. Set ICTR = 0; #
# ICTR is a flag used in A13. It must be set before the #
# loop entry A6. #
# #
# A6. Calculate LEN. #
# LEN is the number of digits to be displayed. The #
# k-factor can dictate either the total number of digits, #
# if it is a positive number, or the number of digits #
# after the decimal point which are to be included as #
# significant. See the 68882 manual for examples. #
# If LEN is computed to be greater than 17, set OPERR in #
# USER_FPSR. LEN is stored in d4. #
# #
# A7. Calculate SCALE. #
# SCALE is equal to 10^ISCALE, where ISCALE is the number #
# of decimal places needed to insure LEN integer digits #
# in the output before conversion to bcd. LAMBDA is the #
# sign of ISCALE, used in A9. Fp1 contains #
# 10^^(abs(ISCALE)) using a rounding mode which is a #
# function of the original rounding mode and the signs #
# of ISCALE and X. A table is given in the code. #
# #
# A8. Clr INEX; Force RZ. #
# The operation in A3 above may have set INEX2. #
# RZ mode is forced for the scaling operation to insure #
# only one rounding error. The grs bits are collected in #
# the INEX flag for use in A10. #
# #
# A9. Scale X -> Y. #
# The mantissa is scaled to the desired number of #
# significant digits. The excess digits are collected #
# in INEX2. #
# #
# A10. Or in INEX. #
# If INEX is set, round error occurred. This is #
# compensated for by 'or-ing' in the INEX2 flag to #
# the lsb of Y. #
# #
# A11. Restore original FPCR; set size ext. #
# Perform FINT operation in the user's rounding mode. #
# Keep the size to extended. #
# #
# A12. Calculate YINT = FINT(Y) according to user's rounding #
# mode. The FPSP routine sintd0 is used. The output #
# is in fp0. #
# #
# A13. Check for LEN digits. #
# If the int operation results in more than LEN digits, #
# or less than LEN -1 digits, adjust ILOG and repeat from #
# A6. This test occurs only on the first pass. If the #
# result is exactly 10^LEN, decrement ILOG and divide #
# the mantissa by 10. #
# #
# A14. Convert the mantissa to bcd. #
# The binstr routine is used to convert the LEN digit #
# mantissa to bcd in memory. The input to binstr is #
# to be a fraction; i.e. (mantissa)/10^LEN and adjusted #
# such that the decimal point is to the left of bit 63. #
# The bcd digits are stored in the correct position in #
# the final string area in memory. #
# #
# A15. Convert the exponent to bcd. #
# As in A14 above, the exp is converted to bcd and the #
# digits are stored in the final string. #
# Test the length of the final exponent string. If the #
# length is 4, set operr. #
# #
# A16. Write sign bits to final string. #
# #
#########################################################################
set BINDEC_FLG, EXC_TEMP # DENORM flag
# Constants in extended precision
PLOG2:
long 0x3FFD0000,0x9A209A84,0xFBCFF798,0x00000000
PLOG2UP1:
long 0x3FFD0000,0x9A209A84,0xFBCFF799,0x00000000
# Constants in single precision
FONE:
long 0x3F800000,0x00000000,0x00000000,0x00000000
FTWO:
long 0x40000000,0x00000000,0x00000000,0x00000000
FTEN:
long 0x41200000,0x00000000,0x00000000,0x00000000
F4933:
long 0x459A2800,0x00000000,0x00000000,0x00000000
RBDTBL:
byte 0,0,0,0
byte 3,3,2,2
byte 3,2,2,3
byte 2,3,3,2
# Implementation Notes:
#
# The registers are used as follows:
#
# d0: scratch; LEN input to binstr
# d1: scratch
# d2: upper 32-bits of mantissa for binstr
# d3: scratch;lower 32-bits of mantissa for binstr
# d4: LEN
# d5: LAMBDA/ICTR
# d6: ILOG
# d7: k-factor
# a0: ptr for original operand/final result
# a1: scratch pointer
# a2: pointer to FP_X; abs(original value) in ext
# fp0: scratch
# fp1: scratch
# fp2: scratch
# F_SCR1:
# F_SCR2:
# L_SCR1:
# L_SCR2:
global bindec
bindec:
movm.l &0x3f20,-(%sp) # {%d2-%d7/%a2}
fmovm.x &0x7,-(%sp) # {%fp0-%fp2}
# A1. Set RM and size ext. Set SIGMA = sign input;
# The k-factor is saved for use in d7. Clear BINDEC_FLG for
# separating normalized/denormalized input. If the input
# is a denormalized number, set the BINDEC_FLG memory word
# to signal denorm. If the input is unnormalized, normalize
# the input and test for denormalized result.
#
fmov.l &rm_mode*0x10,%fpcr # set RM and ext
mov.l (%a0),L_SCR2(%a6) # save exponent for sign check
mov.l %d0,%d7 # move k-factor to d7
clr.b BINDEC_FLG(%a6) # clr norm/denorm flag
cmpi.b STAG(%a6),&DENORM # is input a DENORM?
bne.w A2_str # no; input is a NORM
#
# Normalize the denorm
#
un_de_norm:
mov.w (%a0),%d0
and.w &0x7fff,%d0 # strip sign of normalized exp
mov.l 4(%a0),%d1
mov.l 8(%a0),%d2
norm_loop:
sub.w &1,%d0
lsl.l &1,%d2
roxl.l &1,%d1
tst.l %d1
bge.b norm_loop
#
# Test if the normalized input is denormalized
#
tst.w %d0
bgt.b pos_exp # if greater than zero, it is a norm
st BINDEC_FLG(%a6) # set flag for denorm
pos_exp:
and.w &0x7fff,%d0 # strip sign of normalized exp
mov.w %d0,(%a0)
mov.l %d1,4(%a0)
mov.l %d2,8(%a0)
# A2. Set X = abs(input).
#
A2_str:
mov.l (%a0),FP_SCR1(%a6) # move input to work space
mov.l 4(%a0),FP_SCR1+4(%a6) # move input to work space
mov.l 8(%a0),FP_SCR1+8(%a6) # move input to work space
and.l &0x7fffffff,FP_SCR1(%a6) # create abs(X)
# A3. Compute ILOG.
# ILOG is the log base 10 of the input value. It is approx-
# imated by adding e + 0.f when the original value is viewed
# as 2^^e * 1.f in extended precision. This value is stored
# in d6.
#
# Register usage:
# Input/Output
# d0: k-factor/exponent
# d2: x/x
# d3: x/x
# d4: x/x
# d5: x/x
# d6: x/ILOG
# d7: k-factor/Unchanged
# a0: ptr for original operand/final result
# a1: x/x
# a2: x/x
# fp0: x/float(ILOG)
# fp1: x/x
# fp2: x/x
# F_SCR1:x/x
# F_SCR2:Abs(X)/Abs(X) with $3fff exponent
# L_SCR1:x/x
# L_SCR2:first word of X packed/Unchanged
tst.b BINDEC_FLG(%a6) # check for denorm
beq.b A3_cont # if clr, continue with norm
mov.l &-4933,%d6 # force ILOG = -4933
bra.b A4_str
A3_cont:
mov.w FP_SCR1(%a6),%d0 # move exp to d0
mov.w &0x3fff,FP_SCR1(%a6) # replace exponent with 0x3fff
fmov.x FP_SCR1(%a6),%fp0 # now fp0 has 1.f
sub.w &0x3fff,%d0 # strip off bias
fadd.w %d0,%fp0 # add in exp
fsub.s FONE(%pc),%fp0 # subtract off 1.0
fbge.w pos_res # if pos, branch
fmul.x PLOG2UP1(%pc),%fp0 # if neg, mul by LOG2UP1
fmov.l %fp0,%d6 # put ILOG in d6 as a lword
bra.b A4_str # go move out ILOG
pos_res:
fmul.x PLOG2(%pc),%fp0 # if pos, mul by LOG2
fmov.l %fp0,%d6 # put ILOG in d6 as a lword
# A4. Clr INEX bit.
# The operation in A3 above may have set INEX2.
A4_str:
fmov.l &0,%fpsr # zero all of fpsr - nothing needed
# A5. Set ICTR = 0;
# ICTR is a flag used in A13. It must be set before the
# loop entry A6. The lower word of d5 is used for ICTR.
clr.w %d5 # clear ICTR
# A6. Calculate LEN.
# LEN is the number of digits to be displayed. The k-factor
# can dictate either the total number of digits, if it is
# a positive number, or the number of digits after the
# original decimal point which are to be included as
# significant. See the 68882 manual for examples.
# If LEN is computed to be greater than 17, set OPERR in
# USER_FPSR. LEN is stored in d4.
#
# Register usage:
# Input/Output
# d0: exponent/Unchanged
# d2: x/x/scratch
# d3: x/x
# d4: exc picture/LEN
# d5: ICTR/Unchanged
# d6: ILOG/Unchanged
# d7: k-factor/Unchanged
# a0: ptr for original operand/final result
# a1: x/x
# a2: x/x
# fp0: float(ILOG)/Unchanged
# fp1: x/x
# fp2: x/x
# F_SCR1:x/x
# F_SCR2:Abs(X) with $3fff exponent/Unchanged
# L_SCR1:x/x
# L_SCR2:first word of X packed/Unchanged
A6_str:
tst.l %d7 # branch on sign of k
ble.b k_neg # if k <= 0, LEN = ILOG + 1 - k
mov.l %d7,%d4 # if k > 0, LEN = k
bra.b len_ck # skip to LEN check
k_neg:
mov.l %d6,%d4 # first load ILOG to d4
sub.l %d7,%d4 # subtract off k
addq.l &1,%d4 # add in the 1
len_ck:
tst.l %d4 # LEN check: branch on sign of LEN
ble.b LEN_ng # if neg, set LEN = 1
cmp.l %d4,&17 # test if LEN > 17
ble.b A7_str # if not, forget it
mov.l &17,%d4 # set max LEN = 17
tst.l %d7 # if negative, never set OPERR
ble.b A7_str # if positive, continue
or.l &opaop_mask,USER_FPSR(%a6) # set OPERR & AIOP in USER_FPSR
bra.b A7_str # finished here
LEN_ng:
mov.l &1,%d4 # min LEN is 1
# A7. Calculate SCALE.
# SCALE is equal to 10^ISCALE, where ISCALE is the number
# of decimal places needed to insure LEN integer digits
# in the output before conversion to bcd. LAMBDA is the sign
# of ISCALE, used in A9. Fp1 contains 10^^(abs(ISCALE)) using
# the rounding mode as given in the following table (see
# Coonen, p. 7.23 as ref.; however, the SCALE variable is
# of opposite sign in bindec.sa from Coonen).
#
# Initial USE
# FPCR[6:5] LAMBDA SIGN(X) FPCR[6:5]
# ----------------------------------------------
# RN 00 0 0 00/0 RN
# RN 00 0 1 00/0 RN
# RN 00 1 0 00/0 RN
# RN 00 1 1 00/0 RN
# RZ 01 0 0 11/3 RP
# RZ 01 0 1 11/3 RP
# RZ 01 1 0 10/2 RM
# RZ 01 1 1 10/2 RM
# RM 10 0 0 11/3 RP
# RM 10 0 1 10/2 RM
# RM 10 1 0 10/2 RM
# RM 10 1 1 11/3 RP
# RP 11 0 0 10/2 RM
# RP 11 0 1 11/3 RP
# RP 11 1 0 11/3 RP
# RP 11 1 1 10/2 RM
#
# Register usage:
# Input/Output
# d0: exponent/scratch - final is 0
# d2: x/0 or 24 for A9
# d3: x/scratch - offset ptr into PTENRM array
# d4: LEN/Unchanged
# d5: 0/ICTR:LAMBDA
# d6: ILOG/ILOG or k if ((k<=0)&(ILOG<k))
# d7: k-factor/Unchanged
# a0: ptr for original operand/final result
# a1: x/ptr to PTENRM array
# a2: x/x
# fp0: float(ILOG)/Unchanged
# fp1: x/10^ISCALE
# fp2: x/x
# F_SCR1:x/x
# F_SCR2:Abs(X) with $3fff exponent/Unchanged
# L_SCR1:x/x
# L_SCR2:first word of X packed/Unchanged
A7_str:
tst.l %d7 # test sign of k
bgt.b k_pos # if pos and > 0, skip this
cmp.l %d7,%d6 # test k - ILOG
blt.b k_pos # if ILOG >= k, skip this
mov.l %d7,%d6 # if ((k<0) & (ILOG < k)) ILOG = k
k_pos:
mov.l %d6,%d0 # calc ILOG + 1 - LEN in d0
addq.l &1,%d0 # add the 1
sub.l %d4,%d0 # sub off LEN
swap %d5 # use upper word of d5 for LAMBDA
clr.w %d5 # set it zero initially
clr.w %d2 # set up d2 for very small case
tst.l %d0 # test sign of ISCALE
bge.b iscale # if pos, skip next inst
addq.w &1,%d5 # if neg, set LAMBDA true
cmp.l %d0,&0xffffecd4 # test iscale <= -4908
bgt.b no_inf # if false, skip rest
add.l &24,%d0 # add in 24 to iscale
mov.l &24,%d2 # put 24 in d2 for A9
no_inf:
neg.l %d0 # and take abs of ISCALE
iscale:
fmov.s FONE(%pc),%fp1 # init fp1 to 1
bfextu USER_FPCR(%a6){&26:&2},%d1 # get initial rmode bits
lsl.w &1,%d1 # put them in bits 2:1
add.w %d5,%d1 # add in LAMBDA
lsl.w &1,%d1 # put them in bits 3:1
tst.l L_SCR2(%a6) # test sign of original x
bge.b x_pos # if pos, don't set bit 0
addq.l &1,%d1 # if neg, set bit 0
x_pos:
lea.l RBDTBL(%pc),%a2 # load rbdtbl base
mov.b (%a2,%d1),%d3 # load d3 with new rmode
lsl.l &4,%d3 # put bits in proper position
fmov.l %d3,%fpcr # load bits into fpu
lsr.l &4,%d3 # put bits in proper position
tst.b %d3 # decode new rmode for pten table
bne.b not_rn # if zero, it is RN
lea.l PTENRN(%pc),%a1 # load a1 with RN table base
bra.b rmode # exit decode
not_rn:
lsr.b &1,%d3 # get lsb in carry
bcc.b not_rp2 # if carry clear, it is RM
lea.l PTENRP(%pc),%a1 # load a1 with RP table base
bra.b rmode # exit decode
not_rp2:
lea.l PTENRM(%pc),%a1 # load a1 with RM table base
rmode:
clr.l %d3 # clr table index
e_loop2:
lsr.l &1,%d0 # shift next bit into carry
bcc.b e_next2 # if zero, skip the mul
fmul.x (%a1,%d3),%fp1 # mul by 10**(d3_bit_no)
e_next2:
add.l &12,%d3 # inc d3 to next pwrten table entry
tst.l %d0 # test if ISCALE is zero
bne.b e_loop2 # if not, loop
# A8. Clr INEX; Force RZ.
# The operation in A3 above may have set INEX2.
# RZ mode is forced for the scaling operation to insure
# only one rounding error. The grs bits are collected in
# the INEX flag for use in A10.
#
# Register usage:
# Input/Output
fmov.l &0,%fpsr # clr INEX
fmov.l &rz_mode*0x10,%fpcr # set RZ rounding mode
# A9. Scale X -> Y.
# The mantissa is scaled to the desired number of significant
# digits. The excess digits are collected in INEX2. If mul,
# Check d2 for excess 10 exponential value. If not zero,
# the iscale value would have caused the pwrten calculation
# to overflow. Only a negative iscale can cause this, so
# multiply by 10^(d2), which is now only allowed to be 24,
# with a multiply by 10^8 and 10^16, which is exact since
# 10^24 is exact. If the input was denormalized, we must
# create a busy stack frame with the mul command and the
# two operands, and allow the fpu to complete the multiply.
#
# Register usage:
# Input/Output
# d0: FPCR with RZ mode/Unchanged
# d2: 0 or 24/unchanged
# d3: x/x
# d4: LEN/Unchanged
# d5: ICTR:LAMBDA
# d6: ILOG/Unchanged
# d7: k-factor/Unchanged
# a0: ptr for original operand/final result
# a1: ptr to PTENRM array/Unchanged
# a2: x/x
# fp0: float(ILOG)/X adjusted for SCALE (Y)
# fp1: 10^ISCALE/Unchanged
# fp2: x/x
# F_SCR1:x/x
# F_SCR2:Abs(X) with $3fff exponent/Unchanged
# L_SCR1:x/x
# L_SCR2:first word of X packed/Unchanged
A9_str:
fmov.x (%a0),%fp0 # load X from memory
fabs.x %fp0 # use abs(X)
tst.w %d5 # LAMBDA is in lower word of d5
bne.b sc_mul # if neg (LAMBDA = 1), scale by mul
fdiv.x %fp1,%fp0 # calculate X / SCALE -> Y to fp0
bra.w A10_st # branch to A10
sc_mul:
tst.b BINDEC_FLG(%a6) # check for denorm
beq.w A9_norm # if norm, continue with mul
# for DENORM, we must calculate:
# fp0 = input_op * 10^ISCALE * 10^24
# since the input operand is a DENORM, we can't multiply it directly.
# so, we do the multiplication of the exponents and mantissas separately.
# in this way, we avoid underflow on intermediate stages of the
# multiplication and guarantee a result without exception.
fmovm.x &0x2,-(%sp) # save 10^ISCALE to stack
mov.w (%sp),%d3 # grab exponent
andi.w &0x7fff,%d3 # clear sign
ori.w &0x8000,(%a0) # make DENORM exp negative
add.w (%a0),%d3 # add DENORM exp to 10^ISCALE exp
subi.w &0x3fff,%d3 # subtract BIAS
add.w 36(%a1),%d3
subi.w &0x3fff,%d3 # subtract BIAS
add.w 48(%a1),%d3
subi.w &0x3fff,%d3 # subtract BIAS
bmi.w sc_mul_err # is result is DENORM, punt!!!
andi.w &0x8000,(%sp) # keep sign
or.w %d3,(%sp) # insert new exponent
andi.w &0x7fff,(%a0) # clear sign bit on DENORM again
mov.l 0x8(%a0),-(%sp) # put input op mantissa on stk
mov.l 0x4(%a0),-(%sp)
mov.l &0x3fff0000,-(%sp) # force exp to zero
fmovm.x (%sp)+,&0x80 # load normalized DENORM into fp0
fmul.x (%sp)+,%fp0
# fmul.x 36(%a1),%fp0 # multiply fp0 by 10^8
# fmul.x 48(%a1),%fp0 # multiply fp0 by 10^16
mov.l 36+8(%a1),-(%sp) # get 10^8 mantissa
mov.l 36+4(%a1),-(%sp)
mov.l &0x3fff0000,-(%sp) # force exp to zero
mov.l 48+8(%a1),-(%sp) # get 10^16 mantissa
mov.l 48+4(%a1),-(%sp)
mov.l &0x3fff0000,-(%sp)# force exp to zero
fmul.x (%sp)+,%fp0 # multiply fp0 by 10^8
fmul.x (%sp)+,%fp0 # multiply fp0 by 10^16
bra.b A10_st
sc_mul_err:
bra.b sc_mul_err
A9_norm:
tst.w %d2 # test for small exp case
beq.b A9_con # if zero, continue as normal
fmul.x 36(%a1),%fp0 # multiply fp0 by 10^8
fmul.x 48(%a1),%fp0 # multiply fp0 by 10^16
A9_con:
fmul.x %fp1,%fp0 # calculate X * SCALE -> Y to fp0
# A10. Or in INEX.
# If INEX is set, round error occurred. This is compensated
# for by 'or-ing' in the INEX2 flag to the lsb of Y.
#
# Register usage:
# Input/Output
# d0: FPCR with RZ mode/FPSR with INEX2 isolated
# d2: x/x
# d3: x/x
# d4: LEN/Unchanged
# d5: ICTR:LAMBDA
# d6: ILOG/Unchanged
# d7: k-factor/Unchanged
# a0: ptr for original operand/final result
# a1: ptr to PTENxx array/Unchanged
# a2: x/ptr to FP_SCR1(a6)
# fp0: Y/Y with lsb adjusted
# fp1: 10^ISCALE/Unchanged
# fp2: x/x
A10_st:
fmov.l %fpsr,%d0 # get FPSR
fmov.x %fp0,FP_SCR1(%a6) # move Y to memory
lea.l FP_SCR1(%a6),%a2 # load a2 with ptr to FP_SCR1
btst &9,%d0 # check if INEX2 set
beq.b A11_st # if clear, skip rest
or.l &1,8(%a2) # or in 1 to lsb of mantissa
fmov.x FP_SCR1(%a6),%fp0 # write adjusted Y back to fpu
# A11. Restore original FPCR; set size ext.
# Perform FINT operation in the user's rounding mode. Keep
# the size to extended. The sintdo entry point in the sint
# routine expects the FPCR value to be in USER_FPCR for
# mode and precision. The original FPCR is saved in L_SCR1.
A11_st:
mov.l USER_FPCR(%a6),L_SCR1(%a6) # save it for later
and.l &0x00000030,USER_FPCR(%a6) # set size to ext,
# ;block exceptions
# A12. Calculate YINT = FINT(Y) according to user's rounding mode.
# The FPSP routine sintd0 is used. The output is in fp0.
#
# Register usage:
# Input/Output
# d0: FPSR with AINEX cleared/FPCR with size set to ext
# d2: x/x/scratch
# d3: x/x
# d4: LEN/Unchanged
# d5: ICTR:LAMBDA/Unchanged
# d6: ILOG/Unchanged
# d7: k-factor/Unchanged
# a0: ptr for original operand/src ptr for sintdo
# a1: ptr to PTENxx array/Unchanged
# a2: ptr to FP_SCR1(a6)/Unchanged
# a6: temp pointer to FP_SCR1(a6) - orig value saved and restored
# fp0: Y/YINT
# fp1: 10^ISCALE/Unchanged
# fp2: x/x
# F_SCR1:x/x
# F_SCR2:Y adjusted for inex/Y with original exponent
# L_SCR1:x/original USER_FPCR
# L_SCR2:first word of X packed/Unchanged
A12_st:
movm.l &0xc0c0,-(%sp) # save regs used by sintd0 {%d0-%d1/%a0-%a1}
mov.l L_SCR1(%a6),-(%sp)
mov.l L_SCR2(%a6),-(%sp)
lea.l FP_SCR1(%a6),%a0 # a0 is ptr to FP_SCR1(a6)
fmov.x %fp0,(%a0) # move Y to memory at FP_SCR1(a6)
tst.l L_SCR2(%a6) # test sign of original operand
bge.b do_fint12 # if pos, use Y
or.l &0x80000000,(%a0) # if neg, use -Y
do_fint12:
mov.l USER_FPSR(%a6),-(%sp)
# bsr sintdo # sint routine returns int in fp0
fmov.l USER_FPCR(%a6),%fpcr
fmov.l &0x0,%fpsr # clear the AEXC bits!!!
## mov.l USER_FPCR(%a6),%d0 # ext prec/keep rnd mode
## andi.l &0x00000030,%d0
## fmov.l %d0,%fpcr
fint.x FP_SCR1(%a6),%fp0 # do fint()
fmov.l %fpsr,%d0
or.w %d0,FPSR_EXCEPT(%a6)
## fmov.l &0x0,%fpcr
## fmov.l %fpsr,%d0 # don't keep ccodes
## or.w %d0,FPSR_EXCEPT(%a6)
mov.b (%sp),USER_FPSR(%a6)
add.l &4,%sp
mov.l (%sp)+,L_SCR2(%a6)
mov.l (%sp)+,L_SCR1(%a6)
movm.l (%sp)+,&0x303 # restore regs used by sint {%d0-%d1/%a0-%a1}
mov.l L_SCR2(%a6),FP_SCR1(%a6) # restore original exponent
mov.l L_SCR1(%a6),USER_FPCR(%a6) # restore user's FPCR
# A13. Check for LEN digits.
# If the int operation results in more than LEN digits,
# or less than LEN -1 digits, adjust ILOG and repeat from
# A6. This test occurs only on the first pass. If the
# result is exactly 10^LEN, decrement ILOG and divide
# the mantissa by 10. The calculation of 10^LEN cannot
# be inexact, since all powers of ten up to 10^27 are exact
# in extended precision, so the use of a previous power-of-ten
# table will introduce no error.
#
#
# Register usage:
# Input/Output
# d0: FPCR with size set to ext/scratch final = 0
# d2: x/x
# d3: x/scratch final = x
# d4: LEN/LEN adjusted
# d5: ICTR:LAMBDA/LAMBDA:ICTR
# d6: ILOG/ILOG adjusted
# d7: k-factor/Unchanged
# a0: pointer into memory for packed bcd string formation
# a1: ptr to PTENxx array/Unchanged
# a2: ptr to FP_SCR1(a6)/Unchanged
# fp0: int portion of Y/abs(YINT) adjusted
# fp1: 10^ISCALE/Unchanged
# fp2: x/10^LEN
# F_SCR1:x/x
# F_SCR2:Y with original exponent/Unchanged
# L_SCR1:original USER_FPCR/Unchanged
# L_SCR2:first word of X packed/Unchanged
A13_st:
swap %d5 # put ICTR in lower word of d5
tst.w %d5 # check if ICTR = 0
bne not_zr # if non-zero, go to second test
#
# Compute 10^(LEN-1)
#
fmov.s FONE(%pc),%fp2 # init fp2 to 1.0
mov.l %d4,%d0 # put LEN in d0
subq.l &1,%d0 # d0 = LEN -1
clr.l %d3 # clr table index
l_loop:
lsr.l &1,%d0 # shift next bit into carry
bcc.b l_next # if zero, skip the mul
fmul.x (%a1,%d3),%fp2 # mul by 10**(d3_bit_no)
l_next:
add.l &12,%d3 # inc d3 to next pwrten table entry
tst.l %d0 # test if LEN is zero
bne.b l_loop # if not, loop
#
# 10^LEN-1 is computed for this test and A14. If the input was
# denormalized, check only the case in which YINT > 10^LEN.
#
tst.b BINDEC_FLG(%a6) # check if input was norm
beq.b A13_con # if norm, continue with checking
fabs.x %fp0 # take abs of YINT
bra test_2
#
# Compare abs(YINT) to 10^(LEN-1) and 10^LEN
#
A13_con:
fabs.x %fp0 # take abs of YINT
fcmp.x %fp0,%fp2 # compare abs(YINT) with 10^(LEN-1)
fbge.w test_2 # if greater, do next test
subq.l &1,%d6 # subtract 1 from ILOG
mov.w &1,%d5 # set ICTR
fmov.l &rm_mode*0x10,%fpcr # set rmode to RM
fmul.s FTEN(%pc),%fp2 # compute 10^LEN
bra.w A6_str # return to A6 and recompute YINT
test_2:
fmul.s FTEN(%pc),%fp2 # compute 10^LEN
fcmp.x %fp0,%fp2 # compare abs(YINT) with 10^LEN
fblt.w A14_st # if less, all is ok, go to A14
fbgt.w fix_ex # if greater, fix and redo
fdiv.s FTEN(%pc),%fp0 # if equal, divide by 10
addq.l &1,%d6 # and inc ILOG
bra.b A14_st # and continue elsewhere
fix_ex:
addq.l &1,%d6 # increment ILOG by 1
mov.w &1,%d5 # set ICTR
fmov.l &rm_mode*0x10,%fpcr # set rmode to RM
bra.w A6_str # return to A6 and recompute YINT
#
# Since ICTR <> 0, we have already been through one adjustment,
# and shouldn't have another; this is to check if abs(YINT) = 10^LEN
# 10^LEN is again computed using whatever table is in a1 since the
# value calculated cannot be inexact.
#
not_zr:
fmov.s FONE(%pc),%fp2 # init fp2 to 1.0
mov.l %d4,%d0 # put LEN in d0
clr.l %d3 # clr table index
z_loop:
lsr.l &1,%d0 # shift next bit into carry
bcc.b z_next # if zero, skip the mul
fmul.x (%a1,%d3),%fp2 # mul by 10**(d3_bit_no)
z_next:
add.l &12,%d3 # inc d3 to next pwrten table entry
tst.l %d0 # test if LEN is zero
bne.b z_loop # if not, loop
fabs.x %fp0 # get abs(YINT)
fcmp.x %fp0,%fp2 # check if abs(YINT) = 10^LEN
fbneq.w A14_st # if not, skip this
fdiv.s FTEN(%pc),%fp0 # divide abs(YINT) by 10
addq.l &1,%d6 # and inc ILOG by 1
addq.l &1,%d4 # and inc LEN
fmul.s FTEN(%pc),%fp2 # if LEN++, the get 10^^LEN
# A14. Convert the mantissa to bcd.
# The binstr routine is used to convert the LEN digit
# mantissa to bcd in memory. The input to binstr is
# to be a fraction; i.e. (mantissa)/10^LEN and adjusted
# such that the decimal point is to the left of bit 63.
# The bcd digits are stored in the correct position in
# the final string area in memory.
#
#
# Register usage:
# Input/Output
# d0: x/LEN call to binstr - final is 0
# d1: x/0
# d2: x/ms 32-bits of mant of abs(YINT)
# d3: x/ls 32-bits of mant of abs(YINT)
# d4: LEN/Unchanged
# d5: ICTR:LAMBDA/LAMBDA:ICTR
# d6: ILOG
# d7: k-factor/Unchanged
# a0: pointer into memory for packed bcd string formation
# /ptr to first mantissa byte in result string
# a1: ptr to PTENxx array/Unchanged
# a2: ptr to FP_SCR1(a6)/Unchanged
# fp0: int portion of Y/abs(YINT) adjusted
# fp1: 10^ISCALE/Unchanged
# fp2: 10^LEN/Unchanged
# F_SCR1:x/Work area for final result
# F_SCR2:Y with original exponent/Unchanged
# L_SCR1:original USER_FPCR/Unchanged
# L_SCR2:first word of X packed/Unchanged
A14_st:
fmov.l &rz_mode*0x10,%fpcr # force rz for conversion
fdiv.x %fp2,%fp0 # divide abs(YINT) by 10^LEN
lea.l FP_SCR0(%a6),%a0
fmov.x %fp0,(%a0) # move abs(YINT)/10^LEN to memory
mov.l 4(%a0),%d2 # move 2nd word of FP_RES to d2
mov.l 8(%a0),%d3 # move 3rd word of FP_RES to d3
clr.l 4(%a0) # zero word 2 of FP_RES
clr.l 8(%a0) # zero word 3 of FP_RES
mov.l (%a0),%d0 # move exponent to d0
swap %d0 # put exponent in lower word
beq.b no_sft # if zero, don't shift
sub.l &0x3ffd,%d0 # sub bias less 2 to make fract
tst.l %d0 # check if > 1
bgt.b no_sft # if so, don't shift
neg.l %d0 # make exp positive
m_loop:
lsr.l &1,%d2 # shift d2:d3 right, add 0s
roxr.l &1,%d3 # the number of places
dbf.w %d0,m_loop # given in d0
no_sft:
tst.l %d2 # check for mantissa of zero
bne.b no_zr # if not, go on
tst.l %d3 # continue zero check
beq.b zer_m # if zero, go directly to binstr
no_zr:
clr.l %d1 # put zero in d1 for addx
add.l &0x00000080,%d3 # inc at bit 7
addx.l %d1,%d2 # continue inc
and.l &0xffffff80,%d3 # strip off lsb not used by 882
zer_m:
mov.l %d4,%d0 # put LEN in d0 for binstr call
addq.l &3,%a0 # a0 points to M16 byte in result
bsr binstr # call binstr to convert mant
# A15. Convert the exponent to bcd.
# As in A14 above, the exp is converted to bcd and the
# digits are stored in the final string.
#
# Digits are stored in L_SCR1(a6) on return from BINDEC as:
#
# 32 16 15 0
# -----------------------------------------
# | 0 | e3 | e2 | e1 | e4 | X | X | X |
# -----------------------------------------
#
# And are moved into their proper places in FP_SCR0. If digit e4
# is non-zero, OPERR is signaled. In all cases, all 4 digits are
# written as specified in the 881/882 manual for packed decimal.
#
# Register usage:
# Input/Output
# d0: x/LEN call to binstr - final is 0
# d1: x/scratch (0);shift count for final exponent packing
# d2: x/ms 32-bits of exp fraction/scratch
# d3: x/ls 32-bits of exp fraction
# d4: LEN/Unchanged
# d5: ICTR:LAMBDA/LAMBDA:ICTR
# d6: ILOG
# d7: k-factor/Unchanged
# a0: ptr to result string/ptr to L_SCR1(a6)
# a1: ptr to PTENxx array/Unchanged
# a2: ptr to FP_SCR1(a6)/Unchanged
# fp0: abs(YINT) adjusted/float(ILOG)
# fp1: 10^ISCALE/Unchanged
# fp2: 10^LEN/Unchanged
# F_SCR1:Work area for final result/BCD result
# F_SCR2:Y with original exponent/ILOG/10^4
# L_SCR1:original USER_FPCR/Exponent digits on return from binstr
# L_SCR2:first word of X packed/Unchanged
A15_st:
tst.b BINDEC_FLG(%a6) # check for denorm
beq.b not_denorm
ftest.x %fp0 # test for zero
fbeq.w den_zero # if zero, use k-factor or 4933
fmov.l %d6,%fp0 # float ILOG
fabs.x %fp0 # get abs of ILOG
bra.b convrt
den_zero:
tst.l %d7 # check sign of the k-factor
blt.b use_ilog # if negative, use ILOG
fmov.s F4933(%pc),%fp0 # force exponent to 4933
bra.b convrt # do it
use_ilog:
fmov.l %d6,%fp0 # float ILOG
fabs.x %fp0 # get abs of ILOG
bra.b convrt
not_denorm:
ftest.x %fp0 # test for zero
fbneq.w not_zero # if zero, force exponent
fmov.s FONE(%pc),%fp0 # force exponent to 1
bra.b convrt # do it
not_zero:
fmov.l %d6,%fp0 # float ILOG
fabs.x %fp0 # get abs of ILOG
convrt:
fdiv.x 24(%a1),%fp0 # compute ILOG/10^4
fmov.x %fp0,FP_SCR1(%a6) # store fp0 in memory
mov.l 4(%a2),%d2 # move word 2 to d2
mov.l 8(%a2),%d3 # move word 3 to d3
mov.w (%a2),%d0 # move exp to d0
beq.b x_loop_fin # if zero, skip the shift
sub.w &0x3ffd,%d0 # subtract off bias
neg.w %d0 # make exp positive
x_loop:
lsr.l &1,%d2 # shift d2:d3 right
roxr.l &1,%d3 # the number of places
dbf.w %d0,x_loop # given in d0
x_loop_fin:
clr.l %d1 # put zero in d1 for addx
add.l &0x00000080,%d3 # inc at bit 6
addx.l %d1,%d2 # continue inc
and.l &0xffffff80,%d3 # strip off lsb not used by 882
mov.l &4,%d0 # put 4 in d0 for binstr call
lea.l L_SCR1(%a6),%a0 # a0 is ptr to L_SCR1 for exp digits
bsr binstr # call binstr to convert exp
mov.l L_SCR1(%a6),%d0 # load L_SCR1 lword to d0
mov.l &12,%d1 # use d1 for shift count
lsr.l %d1,%d0 # shift d0 right by 12
bfins %d0,FP_SCR0(%a6){&4:&12} # put e3:e2:e1 in FP_SCR0
lsr.l %d1,%d0 # shift d0 right by 12
bfins %d0,FP_SCR0(%a6){&16:&4} # put e4 in FP_SCR0
tst.b %d0 # check if e4 is zero
beq.b A16_st # if zero, skip rest
or.l &opaop_mask,USER_FPSR(%a6) # set OPERR & AIOP in USER_FPSR
# A16. Write sign bits to final string.
# Sigma is bit 31 of initial value; RHO is bit 31 of d6 (ILOG).
#
# Register usage:
# Input/Output
# d0: x/scratch - final is x
# d2: x/x
# d3: x/x
# d4: LEN/Unchanged
# d5: ICTR:LAMBDA/LAMBDA:ICTR
# d6: ILOG/ILOG adjusted
# d7: k-factor/Unchanged
# a0: ptr to L_SCR1(a6)/Unchanged
# a1: ptr to PTENxx array/Unchanged
# a2: ptr to FP_SCR1(a6)/Unchanged
# fp0: float(ILOG)/Unchanged
# fp1: 10^ISCALE/Unchanged
# fp2: 10^LEN/Unchanged
# F_SCR1:BCD result with correct signs
# F_SCR2:ILOG/10^4
# L_SCR1:Exponent digits on return from binstr
# L_SCR2:first word of X packed/Unchanged
A16_st:
clr.l %d0 # clr d0 for collection of signs
and.b &0x0f,FP_SCR0(%a6) # clear first nibble of FP_SCR0
tst.l L_SCR2(%a6) # check sign of original mantissa
bge.b mant_p # if pos, don't set SM
mov.l &2,%d0 # move 2 in to d0 for SM
mant_p:
tst.l %d6 # check sign of ILOG
bge.b wr_sgn # if pos, don't set SE
addq.l &1,%d0 # set bit 0 in d0 for SE
wr_sgn:
bfins %d0,FP_SCR0(%a6){&0:&2} # insert SM and SE into FP_SCR0
# Clean up and restore all registers used.
fmov.l &0,%fpsr # clear possible inex2/ainex bits
fmovm.x (%sp)+,&0xe0 # {%fp0-%fp2}
movm.l (%sp)+,&0x4fc # {%d2-%d7/%a2}
rts
global PTENRN
PTENRN:
long 0x40020000,0xA0000000,0x00000000 # 10 ^ 1
long 0x40050000,0xC8000000,0x00000000 # 10 ^ 2
long 0x400C0000,0x9C400000,0x00000000 # 10 ^ 4
long 0x40190000,0xBEBC2000,0x00000000 # 10 ^ 8
long 0x40340000,0x8E1BC9BF,0x04000000 # 10 ^ 16
long 0x40690000,0x9DC5ADA8,0x2B70B59E # 10 ^ 32
long 0x40D30000,0xC2781F49,0xFFCFA6D5 # 10 ^ 64
long 0x41A80000,0x93BA47C9,0x80E98CE0 # 10 ^ 128
long 0x43510000,0xAA7EEBFB,0x9DF9DE8E # 10 ^ 256
long 0x46A30000,0xE319A0AE,0xA60E91C7 # 10 ^ 512
long 0x4D480000,0xC9767586,0x81750C17 # 10 ^ 1024
long 0x5A920000,0x9E8B3B5D,0xC53D5DE5 # 10 ^ 2048
long 0x75250000,0xC4605202,0x8A20979B # 10 ^ 4096
global PTENRP
PTENRP:
long 0x40020000,0xA0000000,0x00000000 # 10 ^ 1
long 0x40050000,0xC8000000,0x00000000 # 10 ^ 2
long 0x400C0000,0x9C400000,0x00000000 # 10 ^ 4
long 0x40190000,0xBEBC2000,0x00000000 # 10 ^ 8
long 0x40340000,0x8E1BC9BF,0x04000000 # 10 ^ 16
long 0x40690000,0x9DC5ADA8,0x2B70B59E # 10 ^ 32
long 0x40D30000,0xC2781F49,0xFFCFA6D6 # 10 ^ 64
long 0x41A80000,0x93BA47C9,0x80E98CE0 # 10 ^ 128
long 0x43510000,0xAA7EEBFB,0x9DF9DE8E # 10 ^ 256
long 0x46A30000,0xE319A0AE,0xA60E91C7 # 10 ^ 512
long 0x4D480000,0xC9767586,0x81750C18 # 10 ^ 1024
long 0x5A920000,0x9E8B3B5D,0xC53D5DE5 # 10 ^ 2048
long 0x75250000,0xC4605202,0x8A20979B # 10 ^ 4096
global PTENRM
PTENRM:
long 0x40020000,0xA0000000,0x00000000 # 10 ^ 1
long 0x40050000,0xC8000000,0x00000000 # 10 ^ 2
long 0x400C0000,0x9C400000,0x00000000 # 10 ^ 4
long 0x40190000,0xBEBC2000,0x00000000 # 10 ^ 8
long 0x40340000,0x8E1BC9BF,0x04000000 # 10 ^ 16
long 0x40690000,0x9DC5ADA8,0x2B70B59D # 10 ^ 32
long 0x40D30000,0xC2781F49,0xFFCFA6D5 # 10 ^ 64
long 0x41A80000,0x93BA47C9,0x80E98CDF # 10 ^ 128
long 0x43510000,0xAA7EEBFB,0x9DF9DE8D # 10 ^ 256
long 0x46A30000,0xE319A0AE,0xA60E91C6 # 10 ^ 512
long 0x4D480000,0xC9767586,0x81750C17 # 10 ^ 1024
long 0x5A920000,0x9E8B3B5D,0xC53D5DE4 # 10 ^ 2048
long 0x75250000,0xC4605202,0x8A20979A # 10 ^ 4096
#########################################################################
# binstr(): Converts a 64-bit binary integer to bcd. #
# #
# INPUT *************************************************************** #
# d2:d3 = 64-bit binary integer #
# d0 = desired length (LEN) #
# a0 = pointer to start in memory for bcd characters #
# (This pointer must point to byte 4 of the first #
# lword of the packed decimal memory string.) #
# #
# OUTPUT ************************************************************** #
# a0 = pointer to LEN bcd digits representing the 64-bit integer. #
# #
# ALGORITHM *********************************************************** #
# The 64-bit binary is assumed to have a decimal point before #
# bit 63. The fraction is multiplied by 10 using a mul by 2 #
# shift and a mul by 8 shift. The bits shifted out of the #
# msb form a decimal digit. This process is iterated until #
# LEN digits are formed. #
# #
# A1. Init d7 to 1. D7 is the byte digit counter, and if 1, the #
# digit formed will be assumed the least significant. This is #
# to force the first byte formed to have a 0 in the upper 4 bits. #
# #
# A2. Beginning of the loop: #
# Copy the fraction in d2:d3 to d4:d5. #
# #
# A3. Multiply the fraction in d2:d3 by 8 using bit-field #
# extracts and shifts. The three msbs from d2 will go into d1. #
# #
# A4. Multiply the fraction in d4:d5 by 2 using shifts. The msb #
# will be collected by the carry. #
# #
# A5. Add using the carry the 64-bit quantities in d2:d3 and d4:d5 #
# into d2:d3. D1 will contain the bcd digit formed. #
# #
# A6. Test d7. If zero, the digit formed is the ms digit. If non- #
# zero, it is the ls digit. Put the digit in its place in the #
# upper word of d0. If it is the ls digit, write the word #
# from d0 to memory. #
# #
# A7. Decrement d6 (LEN counter) and repeat the loop until zero. #
# #
#########################################################################
# Implementation Notes:
#
# The registers are used as follows:
#
# d0: LEN counter
# d1: temp used to form the digit
# d2: upper 32-bits of fraction for mul by 8
# d3: lower 32-bits of fraction for mul by 8
# d4: upper 32-bits of fraction for mul by 2
# d5: lower 32-bits of fraction for mul by 2
# d6: temp for bit-field extracts
# d7: byte digit formation word;digit count {0,1}
# a0: pointer into memory for packed bcd string formation
#
global binstr
binstr:
movm.l &0xff00,-(%sp) # {%d0-%d7}
#
# A1: Init d7
#
mov.l &1,%d7 # init d7 for second digit
subq.l &1,%d0 # for dbf d0 would have LEN+1 passes
#
# A2. Copy d2:d3 to d4:d5. Start loop.
#
loop:
mov.l %d2,%d4 # copy the fraction before muls
mov.l %d3,%d5 # to d4:d5
#
# A3. Multiply d2:d3 by 8; extract msbs into d1.
#
bfextu %d2{&0:&3},%d1 # copy 3 msbs of d2 into d1
asl.l &3,%d2 # shift d2 left by 3 places
bfextu %d3{&0:&3},%d6 # copy 3 msbs of d3 into d6
asl.l &3,%d3 # shift d3 left by 3 places
or.l %d6,%d2 # or in msbs from d3 into d2
#
# A4. Multiply d4:d5 by 2; add carry out to d1.
#
asl.l &1,%d5 # mul d5 by 2
roxl.l &1,%d4 # mul d4 by 2
swap %d6 # put 0 in d6 lower word
addx.w %d6,%d1 # add in extend from mul by 2
#
# A5. Add mul by 8 to mul by 2. D1 contains the digit formed.
#
add.l %d5,%d3 # add lower 32 bits
nop # ERRATA FIX #13 (Rev. 1.2 6/6/90)
addx.l %d4,%d2 # add with extend upper 32 bits
nop # ERRATA FIX #13 (Rev. 1.2 6/6/90)
addx.w %d6,%d1 # add in extend from add to d1
swap %d6 # with d6 = 0; put 0 in upper word
#
# A6. Test d7 and branch.
#
tst.w %d7 # if zero, store digit & to loop
beq.b first_d # if non-zero, form byte & write
sec_d:
swap %d7 # bring first digit to word d7b
asl.w &4,%d7 # first digit in upper 4 bits d7b
add.w %d1,%d7 # add in ls digit to d7b
mov.b %d7,(%a0)+ # store d7b byte in memory
swap %d7 # put LEN counter in word d7a
clr.w %d7 # set d7a to signal no digits done
dbf.w %d0,loop # do loop some more!
bra.b end_bstr # finished, so exit
first_d:
swap %d7 # put digit word in d7b
mov.w %d1,%d7 # put new digit in d7b
swap %d7 # put LEN counter in word d7a
addq.w &1,%d7 # set d7a to signal first digit done
dbf.w %d0,loop # do loop some more!
swap %d7 # put last digit in string
lsl.w &4,%d7 # move it to upper 4 bits
mov.b %d7,(%a0)+ # store it in memory string
#
# Clean up and return with result in fp0.
#
end_bstr:
movm.l (%sp)+,&0xff # {%d0-%d7}
rts
#########################################################################
# XDEF **************************************************************** #
# facc_in_b(): dmem_read_byte failed #
# facc_in_w(): dmem_read_word failed #
# facc_in_l(): dmem_read_long failed #
# facc_in_d(): dmem_read of dbl prec failed #
# facc_in_x(): dmem_read of ext prec failed #
# #
# facc_out_b(): dmem_write_byte failed #
# facc_out_w(): dmem_write_word failed #
# facc_out_l(): dmem_write_long failed #
# facc_out_d(): dmem_write of dbl prec failed #
# facc_out_x(): dmem_write of ext prec failed #
# #
# XREF **************************************************************** #
# _real_access() - exit through access error handler #
# #
# INPUT *************************************************************** #
# None #
# #
# OUTPUT ************************************************************** #
# None #
# #
# ALGORITHM *********************************************************** #
# Flow jumps here when an FP data fetch call gets an error #
# result. This means the operating system wants an access error frame #
# made out of the current exception stack frame. #
# So, we first call restore() which makes sure that any updated #
# -(an)+ register gets returned to its pre-exception value and then #
# we change the stack to an access error stack frame. #
# #
#########################################################################
facc_in_b:
movq.l &0x1,%d0 # one byte
bsr.w restore # fix An
mov.w &0x0121,EXC_VOFF(%a6) # set FSLW
bra.w facc_finish
facc_in_w:
movq.l &0x2,%d0 # two bytes
bsr.w restore # fix An
mov.w &0x0141,EXC_VOFF(%a6) # set FSLW
bra.b facc_finish
facc_in_l:
movq.l &0x4,%d0 # four bytes
bsr.w restore # fix An
mov.w &0x0101,EXC_VOFF(%a6) # set FSLW
bra.b facc_finish
facc_in_d:
movq.l &0x8,%d0 # eight bytes
bsr.w restore # fix An
mov.w &0x0161,EXC_VOFF(%a6) # set FSLW
bra.b facc_finish
facc_in_x:
movq.l &0xc,%d0 # twelve bytes
bsr.w restore # fix An
mov.w &0x0161,EXC_VOFF(%a6) # set FSLW
bra.b facc_finish
################################################################
facc_out_b:
movq.l &0x1,%d0 # one byte
bsr.w restore # restore An
mov.w &0x00a1,EXC_VOFF(%a6) # set FSLW
bra.b facc_finish
facc_out_w:
movq.l &0x2,%d0 # two bytes
bsr.w restore # restore An
mov.w &0x00c1,EXC_VOFF(%a6) # set FSLW
bra.b facc_finish
facc_out_l:
movq.l &0x4,%d0 # four bytes
bsr.w restore # restore An
mov.w &0x0081,EXC_VOFF(%a6) # set FSLW
bra.b facc_finish
facc_out_d:
movq.l &0x8,%d0 # eight bytes
bsr.w restore # restore An
mov.w &0x00e1,EXC_VOFF(%a6) # set FSLW
bra.b facc_finish
facc_out_x:
mov.l &0xc,%d0 # twelve bytes
bsr.w restore # restore An
mov.w &0x00e1,EXC_VOFF(%a6) # set FSLW
# here's where we actually create the access error frame from the
# current exception stack frame.
facc_finish:
mov.l USER_FPIAR(%a6),EXC_PC(%a6) # store current PC
fmovm.x EXC_FPREGS(%a6),&0xc0 # restore fp0-fp1
fmovm.l USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
unlk %a6
mov.l (%sp),-(%sp) # store SR, hi(PC)
mov.l 0x8(%sp),0x4(%sp) # store lo(PC)
mov.l 0xc(%sp),0x8(%sp) # store EA
mov.l &0x00000001,0xc(%sp) # store FSLW
mov.w 0x6(%sp),0xc(%sp) # fix FSLW (size)
mov.w &0x4008,0x6(%sp) # store voff
btst &0x5,(%sp) # supervisor or user mode?
beq.b facc_out2 # user
bset &0x2,0xd(%sp) # set supervisor TM bit
facc_out2:
bra.l _real_access
##################################################################
# if the effective addressing mode was predecrement or postincrement,
# the emulation has already changed its value to the correct post-
# instruction value. but since we're exiting to the access error
# handler, then AN must be returned to its pre-instruction value.
# we do that here.
restore:
mov.b EXC_OPWORD+0x1(%a6),%d1
andi.b &0x38,%d1 # extract opmode
cmpi.b %d1,&0x18 # postinc?
beq.w rest_inc
cmpi.b %d1,&0x20 # predec?
beq.w rest_dec
rts
rest_inc:
mov.b EXC_OPWORD+0x1(%a6),%d1
andi.w &0x0007,%d1 # fetch An
mov.w (tbl_rest_inc.b,%pc,%d1.w*2),%d1
jmp (tbl_rest_inc.b,%pc,%d1.w*1)
tbl_rest_inc:
short ri_a0 - tbl_rest_inc
short ri_a1 - tbl_rest_inc
short ri_a2 - tbl_rest_inc
short ri_a3 - tbl_rest_inc
short ri_a4 - tbl_rest_inc
short ri_a5 - tbl_rest_inc
short ri_a6 - tbl_rest_inc
short ri_a7 - tbl_rest_inc
ri_a0:
sub.l %d0,EXC_DREGS+0x8(%a6) # fix stacked a0
rts
ri_a1:
sub.l %d0,EXC_DREGS+0xc(%a6) # fix stacked a1
rts
ri_a2:
sub.l %d0,%a2 # fix a2
rts
ri_a3:
sub.l %d0,%a3 # fix a3
rts
ri_a4:
sub.l %d0,%a4 # fix a4
rts
ri_a5:
sub.l %d0,%a5 # fix a5
rts
ri_a6:
sub.l %d0,(%a6) # fix stacked a6
rts
# if it's a fmove out instruction, we don't have to fix a7
# because we hadn't changed it yet. if it's an opclass two
# instruction (data moved in) and the exception was in supervisor
# mode, then also also wasn't updated. if it was user mode, then
# restore the correct a7 which is in the USP currently.
ri_a7:
cmpi.b EXC_VOFF(%a6),&0x30 # move in or out?
bne.b ri_a7_done # out
btst &0x5,EXC_SR(%a6) # user or supervisor?
bne.b ri_a7_done # supervisor
movc %usp,%a0 # restore USP
sub.l %d0,%a0
movc %a0,%usp
ri_a7_done:
rts
# need to invert adjustment value if the <ea> was predec
rest_dec:
neg.l %d0
bra.b rest_inc
|
AirFortressIlikara/LS2K0300-linux-4.19
| 29,213
|
arch/m68k/ifpsp060/src/ftest.S
|
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
MOTOROLA MICROPROCESSOR & MEMORY TECHNOLOGY GROUP
M68000 Hi-Performance Microprocessor Division
M68060 Software Package
Production Release P1.00 -- October 10, 1994
M68060 Software Package Copyright © 1993, 1994 Motorola Inc. All rights reserved.
THE SOFTWARE is provided on an "AS IS" basis and without warranty.
To the maximum extent permitted by applicable law,
MOTOROLA DISCLAIMS ALL WARRANTIES WHETHER EXPRESS OR IMPLIED,
INCLUDING IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE
and any warranty against infringement with regard to the SOFTWARE
(INCLUDING ANY MODIFIED VERSIONS THEREOF) and any accompanying written materials.
To the maximum extent permitted by applicable law,
IN NO EVENT SHALL MOTOROLA BE LIABLE FOR ANY DAMAGES WHATSOEVER
(INCLUDING WITHOUT LIMITATION, DAMAGES FOR LOSS OF BUSINESS PROFITS,
BUSINESS INTERRUPTION, LOSS OF BUSINESS INFORMATION, OR OTHER PECUNIARY LOSS)
ARISING OF THE USE OR INABILITY TO USE THE SOFTWARE.
Motorola assumes no responsibility for the maintenance and support of the SOFTWARE.
You are hereby granted a copyright license to use, modify, and distribute the SOFTWARE
so long as this entire notice is retained without alteration in any modified and/or
redistributed versions, and that such modified versions are clearly identified as such.
No licenses are granted by implication, estoppel or otherwise under any patents
or trademarks of Motorola, Inc.
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#############################################
set SREGS, -64
set IREGS, -128
set IFPREGS, -224
set SFPREGS, -320
set IFPCREGS, -332
set SFPCREGS, -344
set ICCR, -346
set SCCR, -348
set TESTCTR, -352
set DATA, -384
#############################################
TESTTOP:
bra.l _060TESTS_
short 0x0000
bra.l _060TESTS_unimp
short 0x0000
bra.l _060TESTS_enable
short 0x0000
start_str:
string "Testing 68060 FPSP started:\n"
start_str_unimp:
string "Testing 68060 FPSP unimplemented instruction started:\n"
start_str_enable:
string "Testing 68060 FPSP exception enabled started:\n"
pass_str:
string "passed\n"
fail_str:
string " failed\n"
align 0x4
chk_test:
tst.l %d0
bne.b test_fail
test_pass:
pea pass_str(%pc)
bsr.l _print_str
addq.l &0x4,%sp
rts
test_fail:
mov.l %d1,-(%sp)
bsr.l _print_num
addq.l &0x4,%sp
pea fail_str(%pc)
bsr.l _print_str
addq.l &0x4,%sp
rts
#############################################
_060TESTS_:
link %a6,&-384
movm.l &0x3f3c,-(%sp)
fmovm.x &0xff,-(%sp)
pea start_str(%pc)
bsr.l _print_str
addq.l &0x4,%sp
### effadd
clr.l TESTCTR(%a6)
pea effadd_str(%pc)
bsr.l _print_str
addq.l &0x4,%sp
bsr.l effadd_0
bsr.l chk_test
### unsupp
clr.l TESTCTR(%a6)
pea unsupp_str(%pc)
bsr.l _print_str
addq.l &0x4,%sp
bsr.l unsupp_0
bsr.l chk_test
### ovfl non-maskable
clr.l TESTCTR(%a6)
pea ovfl_nm_str(%pc)
bsr.l _print_str
bsr.l ovfl_nm_0
bsr.l chk_test
### unfl non-maskable
clr.l TESTCTR(%a6)
pea unfl_nm_str(%pc)
bsr.l _print_str
bsr.l unfl_nm_0
bsr.l chk_test
movm.l (%sp)+,&0x3cfc
fmovm.x (%sp)+,&0xff
unlk %a6
rts
_060TESTS_unimp:
link %a6,&-384
movm.l &0x3f3c,-(%sp)
fmovm.x &0xff,-(%sp)
pea start_str_unimp(%pc)
bsr.l _print_str
addq.l &0x4,%sp
### unimp
clr.l TESTCTR(%a6)
pea unimp_str(%pc)
bsr.l _print_str
addq.l &0x4,%sp
bsr.l unimp_0
bsr.l chk_test
movm.l (%sp)+,&0x3cfc
fmovm.x (%sp)+,&0xff
unlk %a6
rts
_060TESTS_enable:
link %a6,&-384
movm.l &0x3f3c,-(%sp)
fmovm.x &0xff,-(%sp)
pea start_str_enable(%pc)
bsr.l _print_str
addq.l &0x4,%sp
### snan
clr.l TESTCTR(%a6)
pea snan_str(%pc)
bsr.l _print_str
bsr.l snan_0
bsr.l chk_test
### operr
clr.l TESTCTR(%a6)
pea operr_str(%pc)
bsr.l _print_str
bsr.l operr_0
bsr.l chk_test
### ovfl
clr.l TESTCTR(%a6)
pea ovfl_str(%pc)
bsr.l _print_str
bsr.l ovfl_0
bsr.l chk_test
### unfl
clr.l TESTCTR(%a6)
pea unfl_str(%pc)
bsr.l _print_str
bsr.l unfl_0
bsr.l chk_test
### dz
clr.l TESTCTR(%a6)
pea dz_str(%pc)
bsr.l _print_str
bsr.l dz_0
bsr.l chk_test
### inexact
clr.l TESTCTR(%a6)
pea inex_str(%pc)
bsr.l _print_str
bsr.l inex_0
bsr.l chk_test
movm.l (%sp)+,&0x3cfc
fmovm.x (%sp)+,&0xff
unlk %a6
rts
#############################################
#############################################
unimp_str:
string "\tUnimplemented FP instructions..."
align 0x4
unimp_0:
addq.l &0x1,TESTCTR(%a6)
movm.l DEF_REGS(%pc),&0x3fff
fmovm.x DEF_FPREGS(%pc),&0xff
fmovm.l DEF_FPCREGS(%pc),%fpcr,%fpsr,%fpiar
mov.w &0x0000,ICCR(%a6)
movm.l &0x7fff,IREGS(%a6)
fmovm.x &0xff,IFPREGS(%a6)
fmovm.l %fpcr,%fpsr,%fpiar,IFPCREGS(%a6)
mov.l &0x40000000,DATA+0x0(%a6)
mov.l &0xc90fdaa2,DATA+0x4(%a6)
mov.l &0x2168c235,DATA+0x8(%a6)
mov.w &0x0000,%cc
unimp_0_pc:
fsin.x DATA(%a6),%fp0
mov.w %cc,SCCR(%a6)
movm.l &0x7fff,SREGS(%a6)
fmovm.x &0xff,SFPREGS(%a6)
fmovm.l %fpcr,%fpsr,%fpiar,SFPCREGS(%a6)
mov.l &0xbfbf0000,IFPREGS+0x0(%a6)
mov.l &0x80000000,IFPREGS+0x4(%a6)
mov.l &0x00000000,IFPREGS+0x8(%a6)
mov.l &0x08000208,IFPCREGS+0x4(%a6)
lea unimp_0_pc(%pc),%a0
mov.l %a0,IFPCREGS+0x8(%a6)
bsr.l chkregs
tst.b %d0
bne.l error
bsr.l chkfpregs
tst.b %d0
bne.l error
unimp_1:
addq.l &0x1,TESTCTR(%a6)
movm.l DEF_REGS(%pc),&0x3fff
fmovm.x DEF_FPREGS(%pc),&0xff
fmovm.l DEF_FPCREGS(%pc),%fpcr,%fpsr,%fpiar
mov.w &0x0000,ICCR(%a6)
movm.l &0x7fff,IREGS(%a6)
fmovm.x &0xff,IFPREGS(%a6)
fmovm.l %fpcr,%fpsr,%fpiar,IFPCREGS(%a6)
mov.l &0x3ffe0000,DATA+0x0(%a6)
mov.l &0xc90fdaa2,DATA+0x4(%a6)
mov.l &0x2168c235,DATA+0x8(%a6)
mov.w &0x0000,%cc
unimp_1_pc:
ftan.x DATA(%a6),%fp0
mov.w %cc,SCCR(%a6)
movm.l &0x7fff,SREGS(%a6)
fmovm.x &0xff,SFPREGS(%a6)
fmovm.l %fpcr,%fpsr,%fpiar,SFPCREGS(%a6)
mov.l &0x3fff0000,IFPREGS+0x0(%a6)
mov.l &0x80000000,IFPREGS+0x4(%a6)
mov.l &0x00000000,IFPREGS+0x8(%a6)
mov.l &0x00000208,IFPCREGS+0x4(%a6)
lea unimp_1_pc(%pc),%a0
mov.l %a0,IFPCREGS+0x8(%a6)
bsr.l chkregs
tst.b %d0
bne.l error
bsr.l chkfpregs
tst.b %d0
bne.l error
# fmovecr
unimp_2:
addq.l &0x1,TESTCTR(%a6)
movm.l DEF_REGS(%pc),&0x3fff
fmovm.x DEF_FPREGS(%pc),&0xff
fmovm.l DEF_FPCREGS(%pc),%fpcr,%fpsr,%fpiar
mov.w &0x0000,ICCR(%a6)
movm.l &0x7fff,IREGS(%a6)
fmovm.x &0xff,IFPREGS(%a6)
fmovm.l %fpcr,%fpsr,%fpiar,IFPCREGS(%a6)
mov.w &0x0000,%cc
unimp_2_pc:
fmovcr.x &0x31,%fp0
mov.w %cc,SCCR(%a6)
movm.l &0x7fff,SREGS(%a6)
fmovm.x &0xff,SFPREGS(%a6)
fmovm.l %fpcr,%fpsr,%fpiar,SFPCREGS(%a6)
mov.l &0x40000000,IFPREGS+0x0(%a6)
mov.l &0x935d8ddd,IFPREGS+0x4(%a6)
mov.l &0xaaa8ac17,IFPREGS+0x8(%a6)
mov.l &0x00000208,IFPCREGS+0x4(%a6)
lea unimp_2_pc(%pc),%a0
mov.l %a0,IFPCREGS+0x8(%a6)
bsr.l chkregs
tst.b %d0
bne.l error
bsr.l chkfpregs
tst.b %d0
bne.l error
# fscc
unimp_3:
addq.l &0x1,TESTCTR(%a6)
movm.l DEF_REGS(%pc),&0x3fff
fmovm.x DEF_FPREGS(%pc),&0xff
fmovm.l DEF_FPCREGS(%pc),%fpcr,%fpsr,%fpiar
fmov.l &0x0f000000,%fpsr
mov.l &0x00,%d7
mov.w &0x0000,ICCR(%a6)
movm.l &0x7fff,IREGS(%a6)
fmovm.x &0xff,IFPREGS(%a6)
fmovm.l %fpcr,%fpsr,%fpiar,IFPCREGS(%a6)
mov.w &0x0000,%cc
unimp_3_pc:
fsgt %d7
mov.w %cc,SCCR(%a6)
movm.l &0x7fff,SREGS(%a6)
fmovm.x &0xff,SFPREGS(%a6)
fmovm.l %fpcr,%fpsr,%fpiar,SFPCREGS(%a6)
mov.l &0x0f008080,IFPCREGS+0x4(%a6)
lea unimp_3_pc(%pc),%a0
mov.l %a0,IFPCREGS+0x8(%a6)
bsr.l chkregs
tst.b %d0
bne.l error
bsr.l chkfpregs
tst.b %d0
bne.l error
# fdbcc
unimp_4:
addq.l &0x1,TESTCTR(%a6)
movm.l DEF_REGS(%pc),&0x3fff
fmovm.x DEF_FPREGS(%pc),&0xff
fmovm.l DEF_FPCREGS(%pc),%fpcr,%fpsr,%fpiar
fmov.l &0x0f000000,%fpsr
mov.l &0x2,%d7
mov.w &0x0000,ICCR(%a6)
movm.l &0x7fff,IREGS(%a6)
fmovm.x &0xff,IFPREGS(%a6)
fmovm.l %fpcr,%fpsr,%fpiar,IFPCREGS(%a6)
mov.w &0x0000,%cc
unimp_4_pc:
fdbgt.w %d7,unimp_4_pc
mov.w %cc,SCCR(%a6)
movm.l &0x7fff,SREGS(%a6)
fmovm.x &0xff,SFPREGS(%a6)
fmovm.l %fpcr,%fpsr,%fpiar,SFPCREGS(%a6)
mov.w &0xffff,IREGS+28+2(%a6)
mov.l &0x0f008080,IFPCREGS+0x4(%a6)
lea unimp_4_pc(%pc),%a0
mov.l %a0,IFPCREGS+0x8(%a6)
bsr.l chkregs
tst.b %d0
bne.l error
bsr.l chkfpregs
tst.b %d0
bne.l error
# ftrapcc
unimp_5:
addq.l &0x1,TESTCTR(%a6)
movm.l DEF_REGS(%pc),&0x3fff
fmovm.x DEF_FPREGS(%pc),&0xff
fmovm.l DEF_FPCREGS(%pc),%fpcr,%fpsr,%fpiar
fmov.l &0x0f000000,%fpsr
mov.w &0x0000,ICCR(%a6)
movm.l &0x7fff,IREGS(%a6)
fmovm.x &0xff,IFPREGS(%a6)
fmovm.l %fpcr,%fpsr,%fpiar,IFPCREGS(%a6)
mov.w &0x0000,%cc
unimp_5_pc:
ftpgt.l &0xabcdef01
mov.w %cc,SCCR(%a6)
movm.l &0x7fff,SREGS(%a6)
fmovm.x &0xff,SFPREGS(%a6)
fmovm.l %fpcr,%fpsr,%fpiar,SFPCREGS(%a6)
mov.l &0x0f008080,IFPCREGS+0x4(%a6)
lea unimp_5_pc(%pc),%a0
mov.l %a0,IFPCREGS+0x8(%a6)
bsr.l chkregs
tst.b %d0
bne.l error
bsr.l chkfpregs
tst.b %d0
bne.l error
clr.l %d0
rts
#############################################
effadd_str:
string "\tUnimplemented <ea>..."
align 0x4
effadd_0:
addq.l &0x1,TESTCTR(%a6)
movm.l DEF_REGS(%pc),&0x3fff
fmovm.x DEF_FPREGS(%pc),&0xff
fmovm.l DEF_FPCREGS(%pc),%fpcr,%fpsr,%fpiar
mov.w &0x0000,ICCR(%a6)
movm.l &0x7fff,IREGS(%a6)
fmovm.x &0xff,IFPREGS(%a6)
fmovm.l %fpcr,%fpsr,%fpiar,IFPCREGS(%a6)
fmov.b &0x2,%fp0
mov.w &0x0000,%cc
effadd_0_pc:
fmul.x &0xc00000008000000000000000,%fp0
mov.w %cc,SCCR(%a6)
movm.l &0x7fff,SREGS(%a6)
fmovm.x &0xff,SFPREGS(%a6)
fmovm.l %fpcr,%fpsr,%fpiar,SFPCREGS(%a6)
mov.l &0xc0010000,IFPREGS+0x0(%a6)
mov.l &0x80000000,IFPREGS+0x4(%a6)
mov.l &0x00000000,IFPREGS+0x8(%a6)
mov.l &0x08000000,IFPCREGS+0x4(%a6)
lea effadd_0_pc(%pc),%a0
mov.l %a0,IFPCREGS+0x8(%a6)
bsr.l chkregs
tst.b %d0
bne.l error
bsr.l chkfpregs
tst.b %d0
bne.l error
effadd_1:
addq.l &0x1,TESTCTR(%a6)
movm.l DEF_REGS(%pc),&0x3fff
fmovm.x DEF_FPREGS(%pc),&0xff
fmovm.l DEF_FPCREGS(%pc),%fpcr,%fpsr,%fpiar
mov.w &0x0000,ICCR(%a6)
movm.l &0x7fff,IREGS(%a6)
fmovm.x &0xff,IFPREGS(%a6)
fmovm.l %fpcr,%fpsr,%fpiar,IFPCREGS(%a6)
mov.w &0x0000,%cc
effadd_1_pc:
fabs.p &0xc12300012345678912345678,%fp0
mov.w %cc,SCCR(%a6)
movm.l &0x7fff,SREGS(%a6)
fmovm.x &0xff,SFPREGS(%a6)
fmovm.l %fpcr,%fpsr,%fpiar,SFPCREGS(%a6)
mov.l &0x3e660000,IFPREGS+0x0(%a6)
mov.l &0xd0ed23e8,IFPREGS+0x4(%a6)
mov.l &0xd14035bc,IFPREGS+0x8(%a6)
mov.l &0x00000108,IFPCREGS+0x4(%a6)
lea effadd_1_pc(%pc),%a0
mov.l %a0,IFPCREGS+0x8(%a6)
bsr.l chkregs
tst.b %d0
bne.l error
bsr.l chkfpregs
tst.b %d0
bne.l error
fmovml_0:
addq.l &0x1,TESTCTR(%a6)
movm.l DEF_REGS(%pc),&0x3fff
fmovm.x DEF_FPREGS(%pc),&0xff
fmovm.l DEF_FPCREGS(%pc),%fpcr,%fpsr,%fpiar
mov.w &0x0000,ICCR(%a6)
mov.w &0x0000,%cc
movm.l &0x7fff,IREGS(%a6)
fmovm.x &0xff,IFPREGS(%a6)
fmovm.l %fpcr,%fpsr,%fpiar,IFPCREGS(%a6)
fmovm.l &0xffffffffffffffff,%fpcr,%fpsr
mov.w %cc,SCCR(%a6)
movm.l &0x7fff,SREGS(%a6)
fmovm.x &0xff,SFPREGS(%a6)
fmovm.l %fpcr,%fpsr,%fpiar,SFPCREGS(%a6)
mov.l &0x0000fff0,IFPCREGS+0x0(%a6)
mov.l &0x0ffffff8,IFPCREGS+0x4(%a6)
bsr.l chkregs
tst.b %d0
bne.l error
bsr.l chkfpregs
tst.b %d0
bne.l error
fmovml_1:
addq.l &0x1,TESTCTR(%a6)
movm.l DEF_REGS(%pc),&0x3fff
fmovm.x DEF_FPREGS(%pc),&0xff
fmovm.l DEF_FPCREGS(%pc),%fpcr,%fpsr,%fpiar
mov.w &0x0000,ICCR(%a6)
mov.w &0x0000,%cc
movm.l &0x7fff,IREGS(%a6)
fmovm.x &0xff,IFPREGS(%a6)
fmovm.l %fpcr,%fpsr,%fpiar,IFPCREGS(%a6)
fmovm.l &0xffffffffffffffff,%fpcr,%fpiar
mov.w %cc,SCCR(%a6)
movm.l &0x7fff,SREGS(%a6)
fmovm.x &0xff,SFPREGS(%a6)
fmovm.l %fpcr,%fpsr,%fpiar,SFPCREGS(%a6)
mov.l &0x0000fff0,IFPCREGS+0x0(%a6)
mov.l &0xffffffff,IFPCREGS+0x8(%a6)
bsr.l chkregs
tst.b %d0
bne.l error
bsr.l chkfpregs
tst.b %d0
bne.l error
fmovml_2:
addq.l &0x1,TESTCTR(%a6)
movm.l DEF_REGS(%pc),&0x3fff
fmovm.x DEF_FPREGS(%pc),&0xff
fmovm.l DEF_FPCREGS(%pc),%fpcr,%fpsr,%fpiar
mov.w &0x0000,ICCR(%a6)
mov.w &0x0000,%cc
movm.l &0x7fff,IREGS(%a6)
fmovm.x &0xff,IFPREGS(%a6)
fmovm.l %fpcr,%fpsr,%fpiar,IFPCREGS(%a6)
fmovm.l &0xffffffffffffffff,%fpsr,%fpiar
mov.w %cc,SCCR(%a6)
movm.l &0x7fff,SREGS(%a6)
fmovm.x &0xff,SFPREGS(%a6)
fmovm.l %fpcr,%fpsr,%fpiar,SFPCREGS(%a6)
mov.l &0x0ffffff8,IFPCREGS+0x4(%a6)
mov.l &0xffffffff,IFPCREGS+0x8(%a6)
bsr.l chkregs
tst.b %d0
bne.l error
bsr.l chkfpregs
tst.b %d0
bne.l error
fmovml_3:
addq.l &0x1,TESTCTR(%a6)
movm.l DEF_REGS(%pc),&0x3fff
fmovm.x DEF_FPREGS(%pc),&0xff
fmovm.l DEF_FPCREGS(%pc),%fpcr,%fpsr,%fpiar
mov.w &0x0000,ICCR(%a6)
mov.w &0x0000,%cc
movm.l &0x7fff,IREGS(%a6)
fmovm.x &0xff,IFPREGS(%a6)
fmovm.l %fpcr,%fpsr,%fpiar,IFPCREGS(%a6)
fmovm.l &0xffffffffffffffffffffffff,%fpcr,%fpsr,%fpiar
mov.w %cc,SCCR(%a6)
movm.l &0x7fff,SREGS(%a6)
fmovm.x &0xff,SFPREGS(%a6)
fmovm.l %fpcr,%fpsr,%fpiar,SFPCREGS(%a6)
mov.l &0x0000fff0,IFPCREGS+0x0(%a6)
mov.l &0x0ffffff8,IFPCREGS+0x4(%a6)
mov.l &0xffffffff,IFPCREGS+0x8(%a6)
bsr.l chkregs
tst.b %d0
bne.l error
bsr.l chkfpregs
tst.b %d0
bne.l error
# fmovmx dynamic
fmovmx_0:
addq.l &0x1,TESTCTR(%a6)
movm.l DEF_REGS(%pc),&0x3fff
fmovm.x DEF_FPREGS(%pc),&0xff
fmovm.l DEF_FPCREGS(%pc),%fpcr,%fpsr,%fpiar
fmov.b &0x1,%fp0
fmov.b &0x2,%fp1
fmov.b &0x3,%fp2
fmov.b &0x4,%fp3
fmov.b &0x5,%fp4
fmov.b &0x6,%fp5
fmov.b &0x7,%fp6
fmov.b &0x8,%fp7
fmov.l &0x0,%fpiar
mov.l &0xffffffaa,%d0
mov.w &0x0000,ICCR(%a6)
movm.l &0xffff,IREGS(%a6)
fmovm.l %fpcr,%fpsr,%fpiar,IFPCREGS(%a6)
fmovm.x &0xff,IFPREGS(%a6)
mov.w &0x0000,%cc
fmovm.x %d0,-(%sp)
mov.w %cc,SCCR(%a6)
fmovm.l %fpcr,%fpsr,%fpiar,SFPCREGS(%a6)
fmov.s &0x7f800000,%fp1
fmov.s &0x7f800000,%fp3
fmov.s &0x7f800000,%fp5
fmov.s &0x7f800000,%fp7
fmov.x (%sp)+,%fp1
fmov.x (%sp)+,%fp3
fmov.x (%sp)+,%fp5
fmov.x (%sp)+,%fp7
movm.l &0xffff,SREGS(%a6)
fmovm.x &0xff,SFPREGS(%a6)
bsr.l chkregs
tst.b %d0
bne.l error
bsr.l chkfpregs
tst.b %d0
bne.l error
fmovmx_1:
addq.l &0x1,TESTCTR(%a6)
movm.l DEF_REGS(%pc),&0x3fff
fmovm.x DEF_FPREGS(%pc),&0xff
fmovm.l DEF_FPCREGS(%pc),%fpcr,%fpsr,%fpiar
fmov.b &0x1,%fp0
fmov.b &0x2,%fp1
fmov.b &0x3,%fp2
fmov.b &0x4,%fp3
fmov.b &0x5,%fp4
fmov.b &0x6,%fp5
fmov.b &0x7,%fp6
fmov.b &0x8,%fp7
fmov.x %fp6,-(%sp)
fmov.x %fp4,-(%sp)
fmov.x %fp2,-(%sp)
fmov.x %fp0,-(%sp)
fmovm.x &0xff,IFPREGS(%a6)
fmov.s &0x7f800000,%fp6
fmov.s &0x7f800000,%fp4
fmov.s &0x7f800000,%fp2
fmov.s &0x7f800000,%fp0
fmov.l &0x0,%fpiar
fmov.l &0x0,%fpsr
mov.l &0xffffffaa,%d0
mov.w &0x0000,ICCR(%a6)
movm.l &0xffff,IREGS(%a6)
fmovm.l %fpcr,%fpsr,%fpiar,IFPCREGS(%a6)
mov.w &0x0000,%cc
fmovm.x (%sp)+,%d0
mov.w %cc,SCCR(%a6)
fmovm.l %fpcr,%fpsr,%fpiar,SFPCREGS(%a6)
movm.l &0xffff,SREGS(%a6)
fmovm.x &0xff,SFPREGS(%a6)
bsr.l chkregs
tst.b %d0
bne.l error
bsr.l chkfpregs
tst.b %d0
bne.l error
fmovmx_2:
addq.l &0x1,TESTCTR(%a6)
movm.l DEF_REGS(%pc),&0x3fff
fmovm.x DEF_FPREGS(%pc),&0xff
fmovm.l DEF_FPCREGS(%pc),%fpcr,%fpsr,%fpiar
fmov.b &0x1,%fp0
fmov.b &0x2,%fp1
fmov.b &0x3,%fp2
fmov.b &0x4,%fp3
fmov.b &0x5,%fp4
fmov.b &0x6,%fp5
fmov.b &0x7,%fp6
fmov.b &0x8,%fp7
fmov.l &0x0,%fpiar
mov.l &0xffffff00,%d0
mov.w &0x0000,ICCR(%a6)
movm.l &0xffff,IREGS(%a6)
fmovm.l %fpcr,%fpsr,%fpiar,IFPCREGS(%a6)
fmovm.x &0xff,IFPREGS(%a6)
mov.w &0x0000,%cc
fmovm.x %d0,-(%sp)
mov.w %cc,SCCR(%a6)
fmovm.l %fpcr,%fpsr,%fpiar,SFPCREGS(%a6)
movm.l &0xffff,SREGS(%a6)
fmovm.x &0xff,SFPREGS(%a6)
bsr.l chkregs
tst.b %d0
bne.l error
bsr.l chkfpregs
tst.b %d0
bne.l error
clr.l %d0
rts
###########################################################
# This test will take a non-maskable overflow directly.
ovfl_nm_str:
string "\tNon-maskable overflow..."
align 0x4
ovfl_nm_0:
addq.l &0x1,TESTCTR(%a6)
movm.l DEF_REGS(%pc),&0x3fff
fmovm.x DEF_FPREGS(%pc),&0xff
fmovm.l DEF_FPCREGS(%pc),%fpcr,%fpsr,%fpiar
mov.w &0x0000,ICCR(%a6)
movm.l &0x7fff,IREGS(%a6)
fmovm.x &0xff,IFPREGS(%a6)
fmovm.l %fpcr,%fpsr,%fpiar,IFPCREGS(%a6)
fmov.b &0x2,%fp0
mov.l &0x7ffe0000,DATA+0x0(%a6)
mov.l &0x80000000,DATA+0x4(%a6)
mov.l &0x00000000,DATA+0x8(%a6)
mov.w &0x0000,%cc
ovfl_nm_0_pc:
fmul.x DATA(%a6),%fp0
mov.w %cc,SCCR(%a6)
movm.l &0x7fff,SREGS(%a6)
fmovm.x &0xff,SFPREGS(%a6)
fmovm.l %fpcr,%fpsr,%fpiar,SFPCREGS(%a6)
mov.l &0x7fff0000,IFPREGS+0x0(%a6)
mov.l &0x00000000,IFPREGS+0x4(%a6)
mov.l &0x00000000,IFPREGS+0x8(%a6)
mov.l &0x02001048,IFPCREGS+0x4(%a6)
lea ovfl_nm_0_pc(%pc),%a0
mov.l %a0,IFPCREGS+0x8(%a6)
bsr.l chkregs
tst.b %d0
bne.l error
bsr.l chkfpregs
tst.b %d0
bne.l error
clr.l %d0
rts
###########################################################
# This test will take an overflow directly.
ovfl_str:
string "\tEnabled overflow..."
align 0x4
ovfl_0:
addq.l &0x1,TESTCTR(%a6)
movm.l DEF_REGS(%pc),&0x3fff
fmovm.x DEF_FPREGS(%pc),&0xff
fmovm.l DEF_FPCREGS(%pc),%fpcr,%fpsr,%fpiar
mov.w &0x0000,ICCR(%a6)
movm.l &0x7fff,IREGS(%a6)
fmovm.x &0xff,IFPREGS(%a6)
fmov.l &0x00001000,%fpcr
fmovm.l %fpcr,%fpsr,%fpiar,IFPCREGS(%a6)
fmov.b &0x2,%fp0
mov.l &0x7ffe0000,DATA+0x0(%a6)
mov.l &0x80000000,DATA+0x4(%a6)
mov.l &0x00000000,DATA+0x8(%a6)
mov.w &0x0000,%cc
ovfl_0_pc:
fmul.x DATA(%a6),%fp0
mov.w %cc,SCCR(%a6)
movm.l &0x7fff,SREGS(%a6)
fmovm.x &0xff,SFPREGS(%a6)
fmovm.l %fpcr,%fpsr,%fpiar,SFPCREGS(%a6)
mov.l &0x7fff0000,IFPREGS+0x0(%a6)
mov.l &0x00000000,IFPREGS+0x4(%a6)
mov.l &0x00000000,IFPREGS+0x8(%a6)
mov.l &0x02001048,IFPCREGS+0x4(%a6)
lea ovfl_0_pc(%pc),%a0
mov.l %a0,IFPCREGS+0x8(%a6)
bsr.l chkregs
tst.b %d0
bne.l error
bsr.l chkfpregs
tst.b %d0
bne.l error
clr.l %d0
rts
#####################################################################
# This test will take an underflow directly.
unfl_str:
string "\tEnabled underflow..."
align 0x4
unfl_0:
addq.l &0x1,TESTCTR(%a6)
movm.l DEF_REGS(%pc),&0x3fff
fmovm.x DEF_FPREGS(%pc),&0xff
fmovm.l DEF_FPCREGS(%pc),%fpcr,%fpsr,%fpiar
mov.w &0x0000,ICCR(%a6)
movm.l &0x7fff,IREGS(%a6)
fmovm.x &0xff,IFPREGS(%a6)
fmov.l &0x00000800,%fpcr
fmovm.l %fpcr,%fpsr,%fpiar,IFPCREGS(%a6)
mov.l &0x00000000,DATA+0x0(%a6)
mov.l &0x80000000,DATA+0x4(%a6)
mov.l &0x00000000,DATA+0x8(%a6)
fmovm.x DATA(%a6),&0x80
mov.w &0x0000,%cc
unfl_0_pc:
fdiv.b &0x2,%fp0
mov.w %cc,SCCR(%a6)
movm.l &0x7fff,SREGS(%a6)
fmovm.x &0xff,SFPREGS(%a6)
fmovm.l %fpcr,%fpsr,%fpiar,SFPCREGS(%a6)
mov.l &0x00000000,IFPREGS+0x0(%a6)
mov.l &0x40000000,IFPREGS+0x4(%a6)
mov.l &0x00000000,IFPREGS+0x8(%a6)
mov.l &0x00000800,IFPCREGS+0x4(%a6)
lea unfl_0_pc(%pc),%a0
mov.l %a0,IFPCREGS+0x8(%a6)
bsr.l chkregs
tst.b %d0
bne.l error
bsr.l chkfpregs
tst.b %d0
bne.l error
clr.l %d0
rts
#####################################################################
# This test will take a non-maskable underflow directly.
unfl_nm_str:
string "\tNon-maskable underflow..."
align 0x4
unfl_nm_0:
addq.l &0x1,TESTCTR(%a6)
movm.l DEF_REGS(%pc),&0x3fff
fmovm.x DEF_FPREGS(%pc),&0xff
fmovm.l DEF_FPCREGS(%pc),%fpcr,%fpsr,%fpiar
mov.w &0x0000,ICCR(%a6)
movm.l &0x7fff,IREGS(%a6)
fmovm.x &0xff,IFPREGS(%a6)
fmovm.l %fpcr,%fpsr,%fpiar,IFPCREGS(%a6)
mov.l &0x00000000,DATA+0x0(%a6)
mov.l &0x80000000,DATA+0x4(%a6)
mov.l &0x00000000,DATA+0x8(%a6)
fmovm.x DATA(%a6),&0x80
mov.w &0x0000,%cc
unfl_nm_0_pc:
fdiv.b &0x2,%fp0
mov.w %cc,SCCR(%a6)
movm.l &0x7fff,SREGS(%a6)
fmovm.x &0xff,SFPREGS(%a6)
fmovm.l %fpcr,%fpsr,%fpiar,SFPCREGS(%a6)
mov.l &0x00000000,IFPREGS+0x0(%a6)
mov.l &0x40000000,IFPREGS+0x4(%a6)
mov.l &0x00000000,IFPREGS+0x8(%a6)
mov.l &0x00000800,IFPCREGS+0x4(%a6)
lea unfl_nm_0_pc(%pc),%a0
mov.l %a0,IFPCREGS+0x8(%a6)
bsr.l chkregs
tst.b %d0
bne.l error
bsr.l chkfpregs
tst.b %d0
bne.l error
clr.l %d0
rts
#####################################################################
inex_str:
string "\tEnabled inexact..."
align 0x4
inex_0:
addq.l &0x1,TESTCTR(%a6)
movm.l DEF_REGS(%pc),&0x3fff
fmovm.x DEF_FPREGS(%pc),&0xff
fmovm.l DEF_FPCREGS(%pc),%fpcr,%fpsr,%fpiar
mov.w &0x0000,ICCR(%a6)
movm.l &0x7fff,IREGS(%a6)
fmovm.x &0xff,IFPREGS(%a6)
fmov.l &0x00000200,%fpcr # enable inexact
fmovm.l %fpcr,%fpsr,%fpiar,IFPCREGS(%a6)
mov.l &0x50000000,DATA+0x0(%a6)
mov.l &0x80000000,DATA+0x4(%a6)
mov.l &0x00000000,DATA+0x8(%a6)
fmovm.x DATA(%a6),&0x80
mov.w &0x0000,%cc
inex_0_pc:
fadd.b &0x2,%fp0
mov.w %cc,SCCR(%a6)
movm.l &0x7fff,SREGS(%a6)
fmovm.x &0xff,SFPREGS(%a6)
fmovm.l %fpcr,%fpsr,%fpiar,SFPCREGS(%a6)
mov.l &0x50000000,IFPREGS+0x0(%a6)
mov.l &0x80000000,IFPREGS+0x4(%a6)
mov.l &0x00000000,IFPREGS+0x8(%a6)
mov.l &0x00000208,IFPCREGS+0x4(%a6)
lea inex_0_pc(%pc),%a0
mov.l %a0,IFPCREGS+0x8(%a6)
bsr.l chkregs
tst.b %d0
bne.l error
bsr.l chkfpregs
tst.b %d0
bne.l error
clr.l %d0
rts
#####################################################################
snan_str:
string "\tEnabled SNAN..."
align 0x4
snan_0:
addq.l &0x1,TESTCTR(%a6)
movm.l DEF_REGS(%pc),&0x3fff
fmovm.x DEF_FPREGS(%pc),&0xff
fmovm.l DEF_FPCREGS(%pc),%fpcr,%fpsr,%fpiar
mov.w &0x0000,ICCR(%a6)
movm.l &0x7fff,IREGS(%a6)
fmovm.x &0xff,IFPREGS(%a6)
fmov.l &0x00004000,%fpcr # enable SNAN
fmovm.l %fpcr,%fpsr,%fpiar,IFPCREGS(%a6)
mov.l &0xffff0000,DATA+0x0(%a6)
mov.l &0x00000000,DATA+0x4(%a6)
mov.l &0x00000001,DATA+0x8(%a6)
fmovm.x DATA(%a6),&0x80
mov.w &0x0000,%cc
snan_0_pc:
fadd.b &0x2,%fp0
mov.w %cc,SCCR(%a6)
movm.l &0x7fff,SREGS(%a6)
fmovm.x &0xff,SFPREGS(%a6)
fmovm.l %fpcr,%fpsr,%fpiar,SFPCREGS(%a6)
mov.l &0xffff0000,IFPREGS+0x0(%a6)
mov.l &0x00000000,IFPREGS+0x4(%a6)
mov.l &0x00000001,IFPREGS+0x8(%a6)
mov.l &0x09004080,IFPCREGS+0x4(%a6)
lea snan_0_pc(%pc),%a0
mov.l %a0,IFPCREGS+0x8(%a6)
bsr.l chkregs
tst.b %d0
bne.l error
bsr.l chkfpregs
tst.b %d0
bne.l error
clr.l %d0
rts
#####################################################################
operr_str:
string "\tEnabled OPERR..."
align 0x4
operr_0:
addq.l &0x1,TESTCTR(%a6)
movm.l DEF_REGS(%pc),&0x3fff
fmovm.x DEF_FPREGS(%pc),&0xff
fmovm.l DEF_FPCREGS(%pc),%fpcr,%fpsr,%fpiar
mov.w &0x0000,ICCR(%a6)
movm.l &0x7fff,IREGS(%a6)
fmovm.x &0xff,IFPREGS(%a6)
fmov.l &0x00002000,%fpcr # enable OPERR
fmovm.l %fpcr,%fpsr,%fpiar,IFPCREGS(%a6)
mov.l &0xffff0000,DATA+0x0(%a6)
mov.l &0x00000000,DATA+0x4(%a6)
mov.l &0x00000000,DATA+0x8(%a6)
fmovm.x DATA(%a6),&0x80
mov.w &0x0000,%cc
operr_0_pc:
fadd.s &0x7f800000,%fp0
mov.w %cc,SCCR(%a6)
movm.l &0x7fff,SREGS(%a6)
fmovm.x &0xff,SFPREGS(%a6)
fmovm.l %fpcr,%fpsr,%fpiar,SFPCREGS(%a6)
mov.l &0xffff0000,IFPREGS+0x0(%a6)
mov.l &0x00000000,IFPREGS+0x4(%a6)
mov.l &0x00000000,IFPREGS+0x8(%a6)
mov.l &0x01002080,IFPCREGS+0x4(%a6)
lea operr_0_pc(%pc),%a0
mov.l %a0,IFPCREGS+0x8(%a6)
bsr.l chkregs
tst.b %d0
bne.l error
bsr.l chkfpregs
tst.b %d0
bne.l error
clr.l %d0
rts
#####################################################################
dz_str:
string "\tEnabled DZ..."
align 0x4
dz_0:
addq.l &0x1,TESTCTR(%a6)
movm.l DEF_REGS(%pc),&0x3fff
fmovm.x DEF_FPREGS(%pc),&0xff
fmovm.l DEF_FPCREGS(%pc),%fpcr,%fpsr,%fpiar
mov.w &0x0000,ICCR(%a6)
movm.l &0x7fff,IREGS(%a6)
fmovm.x &0xff,IFPREGS(%a6)
fmov.l &0x00000400,%fpcr # enable DZ
fmovm.l %fpcr,%fpsr,%fpiar,IFPCREGS(%a6)
mov.l &0x40000000,DATA+0x0(%a6)
mov.l &0x80000000,DATA+0x4(%a6)
mov.l &0x00000000,DATA+0x8(%a6)
fmovm.x DATA(%a6),&0x80
mov.w &0x0000,%cc
dz_0_pc:
fdiv.b &0x0,%fp0
mov.w %cc,SCCR(%a6)
movm.l &0x7fff,SREGS(%a6)
fmovm.x &0xff,SFPREGS(%a6)
fmovm.l %fpcr,%fpsr,%fpiar,SFPCREGS(%a6)
mov.l &0x40000000,IFPREGS+0x0(%a6)
mov.l &0x80000000,IFPREGS+0x4(%a6)
mov.l &0x00000000,IFPREGS+0x8(%a6)
mov.l &0x02000410,IFPCREGS+0x4(%a6)
lea dz_0_pc(%pc),%a0
mov.l %a0,IFPCREGS+0x8(%a6)
bsr.l chkregs
tst.b %d0
bne.l error
bsr.l chkfpregs
tst.b %d0
bne.l error
clr.l %d0
rts
#####################################################################
unsupp_str:
string "\tUnimplemented data type/format..."
# an unnormalized number
align 0x4
unsupp_0:
addq.l &0x1,TESTCTR(%a6)
movm.l DEF_REGS(%pc),&0x3fff
fmovm.x DEF_FPREGS(%pc),&0xff
fmovm.l DEF_FPCREGS(%pc),%fpcr,%fpsr,%fpiar
mov.w &0x0000,ICCR(%a6)
movm.l &0x7fff,IREGS(%a6)
fmovm.x &0xff,IFPREGS(%a6)
fmovm.l %fpcr,%fpsr,%fpiar,IFPCREGS(%a6)
mov.l &0xc03f0000,DATA+0x0(%a6)
mov.l &0x00000000,DATA+0x4(%a6)
mov.l &0x00000001,DATA+0x8(%a6)
fmov.b &0x2,%fp0
mov.w &0x0000,%cc
unsupp_0_pc:
fmul.x DATA(%a6),%fp0
mov.w %cc,SCCR(%a6)
movm.l &0x7fff,SREGS(%a6)
fmovm.x &0xff,SFPREGS(%a6)
fmovm.l %fpcr,%fpsr,%fpiar,SFPCREGS(%a6)
mov.l &0xc0010000,IFPREGS+0x0(%a6)
mov.l &0x80000000,IFPREGS+0x4(%a6)
mov.l &0x00000000,IFPREGS+0x8(%a6)
mov.l &0x08000000,IFPCREGS+0x4(%a6)
lea unsupp_0_pc(%pc),%a0
mov.l %a0,IFPCREGS+0x8(%a6)
bsr.l chkregs
tst.b %d0
bne.l error
bsr.l chkfpregs
tst.b %d0
bne.l error
# a denormalized number
unsupp_1:
addq.l &0x1,TESTCTR(%a6)
movm.l DEF_REGS(%pc),&0x3fff
fmovm.x DEF_FPREGS(%pc),&0xff
fmovm.l DEF_FPCREGS(%pc),%fpcr,%fpsr,%fpiar
mov.w &0x0000,ICCR(%a6)
movm.l &0x7fff,IREGS(%a6)
fmovm.x &0xff,IFPREGS(%a6)
fmovm.l %fpcr,%fpsr,%fpiar,IFPCREGS(%a6)
mov.l &0x80000000,DATA+0x0(%a6)
mov.l &0x01000000,DATA+0x4(%a6)
mov.l &0x00000000,DATA+0x8(%a6)
fmov.l &0x7fffffff,%fp0
mov.w &0x0000,%cc
unsupp_1_pc:
fmul.x DATA(%a6),%fp0
mov.w %cc,SCCR(%a6)
movm.l &0x7fff,SREGS(%a6)
fmovm.x &0xff,SFPREGS(%a6)
fmovm.l %fpcr,%fpsr,%fpiar,SFPCREGS(%a6)
mov.l &0x80170000,IFPREGS+0x0(%a6)
mov.l &0xfffffffe,IFPREGS+0x4(%a6)
mov.l &0x00000000,IFPREGS+0x8(%a6)
mov.l &0x08000000,IFPCREGS+0x4(%a6)
lea unsupp_1_pc(%pc),%a0
mov.l %a0,IFPCREGS+0x8(%a6)
bsr.l chkregs
tst.b %d0
bne.l error
bsr.l chkfpregs
tst.b %d0
bne.l error
# packed
unsupp_2:
addq.l &0x1,TESTCTR(%a6)
movm.l DEF_REGS(%pc),&0x3fff
fmovm.x DEF_FPREGS(%pc),&0xff
fmovm.l DEF_FPCREGS(%pc),%fpcr,%fpsr,%fpiar
mov.w &0x0000,ICCR(%a6)
movm.l &0x7fff,IREGS(%a6)
fmovm.x &0xff,IFPREGS(%a6)
fmovm.l %fpcr,%fpsr,%fpiar,IFPCREGS(%a6)
mov.l &0xc1230001,DATA+0x0(%a6)
mov.l &0x23456789,DATA+0x4(%a6)
mov.l &0x12345678,DATA+0x8(%a6)
mov.w &0x0000,%cc
unsupp_2_pc:
fabs.p DATA(%a6),%fp0
mov.w %cc,SCCR(%a6)
movm.l &0x7fff,SREGS(%a6)
fmovm.x &0xff,SFPREGS(%a6)
fmovm.l %fpcr,%fpsr,%fpiar,SFPCREGS(%a6)
mov.l &0x3e660000,IFPREGS+0x0(%a6)
mov.l &0xd0ed23e8,IFPREGS+0x4(%a6)
mov.l &0xd14035bc,IFPREGS+0x8(%a6)
mov.l &0x00000108,IFPCREGS+0x4(%a6)
lea unsupp_2_pc(%pc),%a0
mov.l %a0,IFPCREGS+0x8(%a6)
bsr.l chkregs
tst.b %d0
bne.l error
bsr.l chkfpregs
tst.b %d0
bne.l error
clr.l %d0
rts
###########################################################
###########################################################
chkregs:
lea IREGS(%a6),%a0
lea SREGS(%a6),%a1
mov.l &14,%d0
chkregs_loop:
cmp.l (%a0)+,(%a1)+
bne.l chkregs_error
dbra.w %d0,chkregs_loop
mov.w ICCR(%a6),%d0
mov.w SCCR(%a6),%d1
cmp.w %d0,%d1
bne.l chkregs_error
clr.l %d0
rts
chkregs_error:
movq.l &0x1,%d0
rts
error:
mov.l TESTCTR(%a6),%d1
movq.l &0x1,%d0
rts
chkfpregs:
lea IFPREGS(%a6),%a0
lea SFPREGS(%a6),%a1
mov.l &23,%d0
chkfpregs_loop:
cmp.l (%a0)+,(%a1)+
bne.l chkfpregs_error
dbra.w %d0,chkfpregs_loop
lea IFPCREGS(%a6),%a0
lea SFPCREGS(%a6),%a1
cmp.l (%a0)+,(%a1)+
bne.l chkfpregs_error
cmp.l (%a0)+,(%a1)+
bne.l chkfpregs_error
cmp.l (%a0)+,(%a1)+
bne.l chkfpregs_error
clr.l %d0
rts
chkfpregs_error:
movq.l &0x1,%d0
rts
DEF_REGS:
long 0xacacacac, 0xacacacac, 0xacacacac, 0xacacacac
long 0xacacacac, 0xacacacac, 0xacacacac, 0xacacacac
long 0xacacacac, 0xacacacac, 0xacacacac, 0xacacacac
long 0xacacacac, 0xacacacac, 0xacacacac, 0xacacacac
DEF_FPREGS:
long 0x7fff0000, 0xffffffff, 0xffffffff
long 0x7fff0000, 0xffffffff, 0xffffffff
long 0x7fff0000, 0xffffffff, 0xffffffff
long 0x7fff0000, 0xffffffff, 0xffffffff
long 0x7fff0000, 0xffffffff, 0xffffffff
long 0x7fff0000, 0xffffffff, 0xffffffff
long 0x7fff0000, 0xffffffff, 0xffffffff
long 0x7fff0000, 0xffffffff, 0xffffffff
DEF_FPCREGS:
long 0x00000000, 0x00000000, 0x00000000
############################################################
_print_str:
mov.l %d0,-(%sp)
mov.l (TESTTOP-0x80+0x0,%pc),%d0
pea (TESTTOP-0x80,%pc,%d0)
mov.l 0x4(%sp),%d0
rtd &0x4
_print_num:
mov.l %d0,-(%sp)
mov.l (TESTTOP-0x80+0x4,%pc),%d0
pea (TESTTOP-0x80,%pc,%d0)
mov.l 0x4(%sp),%d0
rtd &0x4
############################################################
|
AirFortressIlikara/LS2K0300-linux-4.19
| 126,514
|
arch/m68k/ifpsp060/src/isp.S
|
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
MOTOROLA MICROPROCESSOR & MEMORY TECHNOLOGY GROUP
M68000 Hi-Performance Microprocessor Division
M68060 Software Package
Production Release P1.00 -- October 10, 1994
M68060 Software Package Copyright © 1993, 1994 Motorola Inc. All rights reserved.
THE SOFTWARE is provided on an "AS IS" basis and without warranty.
To the maximum extent permitted by applicable law,
MOTOROLA DISCLAIMS ALL WARRANTIES WHETHER EXPRESS OR IMPLIED,
INCLUDING IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE
and any warranty against infringement with regard to the SOFTWARE
(INCLUDING ANY MODIFIED VERSIONS THEREOF) and any accompanying written materials.
To the maximum extent permitted by applicable law,
IN NO EVENT SHALL MOTOROLA BE LIABLE FOR ANY DAMAGES WHATSOEVER
(INCLUDING WITHOUT LIMITATION, DAMAGES FOR LOSS OF BUSINESS PROFITS,
BUSINESS INTERRUPTION, LOSS OF BUSINESS INFORMATION, OR OTHER PECUNIARY LOSS)
ARISING OF THE USE OR INABILITY TO USE THE SOFTWARE.
Motorola assumes no responsibility for the maintenance and support of the SOFTWARE.
You are hereby granted a copyright license to use, modify, and distribute the SOFTWARE
so long as this entire notice is retained without alteration in any modified and/or
redistributed versions, and that such modified versions are clearly identified as such.
No licenses are granted by implication, estoppel or otherwise under any patents
or trademarks of Motorola, Inc.
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# ireal.s:
# This file is appended to the top of the 060ISP package
# and contains the entry points into the package. The user, in
# effect, branches to one of the branch table entries located
# after _060ISP_TABLE.
# Also, subroutine stubs exist in this file (_isp_done for
# example) that are referenced by the ISP package itself in order
# to call a given routine. The stub routine actually performs the
# callout. The ISP code does a "bsr" to the stub routine. This
# extra layer of hierarchy adds a slight performance penalty but
# it makes the ISP code easier to read and more mainatinable.
#
set _off_chk, 0x00
set _off_divbyzero, 0x04
set _off_trace, 0x08
set _off_access, 0x0c
set _off_done, 0x10
set _off_cas, 0x14
set _off_cas2, 0x18
set _off_lock, 0x1c
set _off_unlock, 0x20
set _off_imr, 0x40
set _off_dmr, 0x44
set _off_dmw, 0x48
set _off_irw, 0x4c
set _off_irl, 0x50
set _off_drb, 0x54
set _off_drw, 0x58
set _off_drl, 0x5c
set _off_dwb, 0x60
set _off_dww, 0x64
set _off_dwl, 0x68
_060ISP_TABLE:
# Here's the table of ENTRY POINTS for those linking the package.
bra.l _isp_unimp
short 0x0000
bra.l _isp_cas
short 0x0000
bra.l _isp_cas2
short 0x0000
bra.l _isp_cas_finish
short 0x0000
bra.l _isp_cas2_finish
short 0x0000
bra.l _isp_cas_inrange
short 0x0000
bra.l _isp_cas_terminate
short 0x0000
bra.l _isp_cas_restart
short 0x0000
space 64
#############################################################
global _real_chk
_real_chk:
mov.l %d0,-(%sp)
mov.l (_060ISP_TABLE-0x80+_off_chk,%pc),%d0
pea.l (_060ISP_TABLE-0x80,%pc,%d0)
mov.l 0x4(%sp),%d0
rtd &0x4
global _real_divbyzero
_real_divbyzero:
mov.l %d0,-(%sp)
mov.l (_060ISP_TABLE-0x80+_off_divbyzero,%pc),%d0
pea.l (_060ISP_TABLE-0x80,%pc,%d0)
mov.l 0x4(%sp),%d0
rtd &0x4
global _real_trace
_real_trace:
mov.l %d0,-(%sp)
mov.l (_060ISP_TABLE-0x80+_off_trace,%pc),%d0
pea.l (_060ISP_TABLE-0x80,%pc,%d0)
mov.l 0x4(%sp),%d0
rtd &0x4
global _real_access
_real_access:
mov.l %d0,-(%sp)
mov.l (_060ISP_TABLE-0x80+_off_access,%pc),%d0
pea.l (_060ISP_TABLE-0x80,%pc,%d0)
mov.l 0x4(%sp),%d0
rtd &0x4
global _isp_done
_isp_done:
mov.l %d0,-(%sp)
mov.l (_060ISP_TABLE-0x80+_off_done,%pc),%d0
pea.l (_060ISP_TABLE-0x80,%pc,%d0)
mov.l 0x4(%sp),%d0
rtd &0x4
#######################################
global _real_cas
_real_cas:
mov.l %d0,-(%sp)
mov.l (_060ISP_TABLE-0x80+_off_cas,%pc),%d0
pea.l (_060ISP_TABLE-0x80,%pc,%d0)
mov.l 0x4(%sp),%d0
rtd &0x4
global _real_cas2
_real_cas2:
mov.l %d0,-(%sp)
mov.l (_060ISP_TABLE-0x80+_off_cas2,%pc),%d0
pea.l (_060ISP_TABLE-0x80,%pc,%d0)
mov.l 0x4(%sp),%d0
rtd &0x4
global _real_lock_page
_real_lock_page:
mov.l %d0,-(%sp)
mov.l (_060ISP_TABLE-0x80+_off_lock,%pc),%d0
pea.l (_060ISP_TABLE-0x80,%pc,%d0)
mov.l 0x4(%sp),%d0
rtd &0x4
global _real_unlock_page
_real_unlock_page:
mov.l %d0,-(%sp)
mov.l (_060ISP_TABLE-0x80+_off_unlock,%pc),%d0
pea.l (_060ISP_TABLE-0x80,%pc,%d0)
mov.l 0x4(%sp),%d0
rtd &0x4
#######################################
global _imem_read
_imem_read:
mov.l %d0,-(%sp)
mov.l (_060ISP_TABLE-0x80+_off_imr,%pc),%d0
pea.l (_060ISP_TABLE-0x80,%pc,%d0)
mov.l 0x4(%sp),%d0
rtd &0x4
global _dmem_read
_dmem_read:
mov.l %d0,-(%sp)
mov.l (_060ISP_TABLE-0x80+_off_dmr,%pc),%d0
pea.l (_060ISP_TABLE-0x80,%pc,%d0)
mov.l 0x4(%sp),%d0
rtd &0x4
global _dmem_write
_dmem_write:
mov.l %d0,-(%sp)
mov.l (_060ISP_TABLE-0x80+_off_dmw,%pc),%d0
pea.l (_060ISP_TABLE-0x80,%pc,%d0)
mov.l 0x4(%sp),%d0
rtd &0x4
global _imem_read_word
_imem_read_word:
mov.l %d0,-(%sp)
mov.l (_060ISP_TABLE-0x80+_off_irw,%pc),%d0
pea.l (_060ISP_TABLE-0x80,%pc,%d0)
mov.l 0x4(%sp),%d0
rtd &0x4
global _imem_read_long
_imem_read_long:
mov.l %d0,-(%sp)
mov.l (_060ISP_TABLE-0x80+_off_irl,%pc),%d0
pea.l (_060ISP_TABLE-0x80,%pc,%d0)
mov.l 0x4(%sp),%d0
rtd &0x4
global _dmem_read_byte
_dmem_read_byte:
mov.l %d0,-(%sp)
mov.l (_060ISP_TABLE-0x80+_off_drb,%pc),%d0
pea.l (_060ISP_TABLE-0x80,%pc,%d0)
mov.l 0x4(%sp),%d0
rtd &0x4
global _dmem_read_word
_dmem_read_word:
mov.l %d0,-(%sp)
mov.l (_060ISP_TABLE-0x80+_off_drw,%pc),%d0
pea.l (_060ISP_TABLE-0x80,%pc,%d0)
mov.l 0x4(%sp),%d0
rtd &0x4
global _dmem_read_long
_dmem_read_long:
mov.l %d0,-(%sp)
mov.l (_060ISP_TABLE-0x80+_off_drl,%pc),%d0
pea.l (_060ISP_TABLE-0x80,%pc,%d0)
mov.l 0x4(%sp),%d0
rtd &0x4
global _dmem_write_byte
_dmem_write_byte:
mov.l %d0,-(%sp)
mov.l (_060ISP_TABLE-0x80+_off_dwb,%pc),%d0
pea.l (_060ISP_TABLE-0x80,%pc,%d0)
mov.l 0x4(%sp),%d0
rtd &0x4
global _dmem_write_word
_dmem_write_word:
mov.l %d0,-(%sp)
mov.l (_060ISP_TABLE-0x80+_off_dww,%pc),%d0
pea.l (_060ISP_TABLE-0x80,%pc,%d0)
mov.l 0x4(%sp),%d0
rtd &0x4
global _dmem_write_long
_dmem_write_long:
mov.l %d0,-(%sp)
mov.l (_060ISP_TABLE-0x80+_off_dwl,%pc),%d0
pea.l (_060ISP_TABLE-0x80,%pc,%d0)
mov.l 0x4(%sp),%d0
rtd &0x4
#
# This file contains a set of define statements for constants
# in oreder to promote readability within the core code itself.
#
set LOCAL_SIZE, 96 # stack frame size(bytes)
set LV, -LOCAL_SIZE # stack offset
set EXC_ISR, 0x4 # stack status register
set EXC_IPC, 0x6 # stack pc
set EXC_IVOFF, 0xa # stacked vector offset
set EXC_AREGS, LV+64 # offset of all address regs
set EXC_DREGS, LV+32 # offset of all data regs
set EXC_A7, EXC_AREGS+(7*4) # offset of a7
set EXC_A6, EXC_AREGS+(6*4) # offset of a6
set EXC_A5, EXC_AREGS+(5*4) # offset of a5
set EXC_A4, EXC_AREGS+(4*4) # offset of a4
set EXC_A3, EXC_AREGS+(3*4) # offset of a3
set EXC_A2, EXC_AREGS+(2*4) # offset of a2
set EXC_A1, EXC_AREGS+(1*4) # offset of a1
set EXC_A0, EXC_AREGS+(0*4) # offset of a0
set EXC_D7, EXC_DREGS+(7*4) # offset of d7
set EXC_D6, EXC_DREGS+(6*4) # offset of d6
set EXC_D5, EXC_DREGS+(5*4) # offset of d5
set EXC_D4, EXC_DREGS+(4*4) # offset of d4
set EXC_D3, EXC_DREGS+(3*4) # offset of d3
set EXC_D2, EXC_DREGS+(2*4) # offset of d2
set EXC_D1, EXC_DREGS+(1*4) # offset of d1
set EXC_D0, EXC_DREGS+(0*4) # offset of d0
set EXC_TEMP, LV+16 # offset of temp stack space
set EXC_SAVVAL, LV+12 # offset of old areg value
set EXC_SAVREG, LV+11 # offset of old areg index
set SPCOND_FLG, LV+10 # offset of spc condition flg
set EXC_CC, LV+8 # offset of cc register
set EXC_EXTWPTR, LV+4 # offset of current PC
set EXC_EXTWORD, LV+2 # offset of current ext opword
set EXC_OPWORD, LV+0 # offset of current opword
###########################
# SPecial CONDition FLaGs #
###########################
set mia7_flg, 0x04 # (a7)+ flag
set mda7_flg, 0x08 # -(a7) flag
set ichk_flg, 0x10 # chk exception flag
set idbyz_flg, 0x20 # divbyzero flag
set restore_flg, 0x40 # restore -(an)+ flag
set immed_flg, 0x80 # immediate data flag
set mia7_bit, 0x2 # (a7)+ bit
set mda7_bit, 0x3 # -(a7) bit
set ichk_bit, 0x4 # chk exception bit
set idbyz_bit, 0x5 # divbyzero bit
set restore_bit, 0x6 # restore -(a7)+ bit
set immed_bit, 0x7 # immediate data bit
#########
# Misc. #
#########
set BYTE, 1 # len(byte) == 1 byte
set WORD, 2 # len(word) == 2 bytes
set LONG, 4 # len(longword) == 4 bytes
#########################################################################
# XDEF **************************************************************** #
# _isp_unimp(): 060ISP entry point for Unimplemented Instruction #
# #
# This handler should be the first code executed upon taking the #
# "Unimplemented Integer Instruction" exception in an operating #
# system. #
# #
# XREF **************************************************************** #
# _imem_read_{word,long}() - read instruction word/longword #
# _mul64() - emulate 64-bit multiply #
# _div64() - emulate 64-bit divide #
# _moveperipheral() - emulate "movep" #
# _compandset() - emulate misaligned "cas" #
# _compandset2() - emulate "cas2" #
# _chk2_cmp2() - emulate "cmp2" and "chk2" #
# _isp_done() - "callout" for normal final exit #
# _real_trace() - "callout" for Trace exception #
# _real_chk() - "callout" for Chk exception #
# _real_divbyzero() - "callout" for DZ exception #
# _real_access() - "callout" for access error exception #
# #
# INPUT *************************************************************** #
# - The system stack contains the Unimp Int Instr stack frame #
# #
# OUTPUT ************************************************************** #
# If Trace exception: #
# - The system stack changed to contain Trace exc stack frame #
# If Chk exception: #
# - The system stack changed to contain Chk exc stack frame #
# If DZ exception: #
# - The system stack changed to contain DZ exc stack frame #
# If access error exception: #
# - The system stack changed to contain access err exc stk frame #
# Else: #
# - Results saved as appropriate #
# #
# ALGORITHM *********************************************************** #
# This handler fetches the first instruction longword from #
# memory and decodes it to determine which of the unimplemented #
# integer instructions caused this exception. This handler then calls #
# one of _mul64(), _div64(), _moveperipheral(), _compandset(), #
# _compandset2(), or _chk2_cmp2() as appropriate. #
# Some of these instructions, by their nature, may produce other #
# types of exceptions. "div" can produce a divide-by-zero exception, #
# and "chk2" can cause a "Chk" exception. In both cases, the current #
# exception stack frame must be converted to an exception stack frame #
# of the correct exception type and an exit must be made through #
# _real_divbyzero() or _real_chk() as appropriate. In addition, all #
# instructions may be executing while Trace is enabled. If so, then #
# a Trace exception stack frame must be created and an exit made #
# through _real_trace(). #
# Meanwhile, if any read or write to memory using the #
# _mem_{read,write}() "callout"s returns a failing value, then an #
# access error frame must be created and an exit made through #
# _real_access(). #
# If none of these occur, then a normal exit is made through #
# _isp_done(). #
# #
# This handler, upon entry, saves almost all user-visible #
# address and data registers to the stack. Although this may seem to #
# cause excess memory traffic, it was found that due to having to #
# access these register files for things like data retrieval and <ea> #
# calculations, it was more efficient to have them on the stack where #
# they could be accessed by indexing rather than to make subroutine #
# calls to retrieve a register of a particular index. #
# #
#########################################################################
global _isp_unimp
_isp_unimp:
link.w %a6,&-LOCAL_SIZE # create room for stack frame
movm.l &0x3fff,EXC_DREGS(%a6) # store d0-d7/a0-a5
mov.l (%a6),EXC_A6(%a6) # store a6
btst &0x5,EXC_ISR(%a6) # from s or u mode?
bne.b uieh_s # supervisor mode
uieh_u:
mov.l %usp,%a0 # fetch user stack pointer
mov.l %a0,EXC_A7(%a6) # store a7
bra.b uieh_cont
uieh_s:
lea 0xc(%a6),%a0
mov.l %a0,EXC_A7(%a6) # store corrected sp
###############################################################################
uieh_cont:
clr.b SPCOND_FLG(%a6) # clear "special case" flag
mov.w EXC_ISR(%a6),EXC_CC(%a6) # store cc copy on stack
mov.l EXC_IPC(%a6),EXC_EXTWPTR(%a6) # store extwptr on stack
#
# fetch the opword and first extension word pointed to by the stacked pc
# and store them to the stack for now
#
mov.l EXC_EXTWPTR(%a6),%a0 # fetch instruction addr
addq.l &0x4,EXC_EXTWPTR(%a6) # incr instruction ptr
bsr.l _imem_read_long # fetch opword & extword
mov.l %d0,EXC_OPWORD(%a6) # store extword on stack
#########################################################################
# muls.l 0100 1100 00 |<ea>| 0*** 1100 0000 0*** #
# mulu.l 0100 1100 00 |<ea>| 0*** 0100 0000 0*** #
# #
# divs.l 0100 1100 01 |<ea>| 0*** 1100 0000 0*** #
# divu.l 0100 1100 01 |<ea>| 0*** 0100 0000 0*** #
# #
# movep.w m2r 0000 ***1 00 001*** | <displacement> | #
# movep.l m2r 0000 ***1 01 001*** | <displacement> | #
# movep.w r2m 0000 ***1 10 001*** | <displacement> | #
# movep.l r2m 0000 ***1 11 001*** | <displacement> | #
# #
# cas.w 0000 1100 11 |<ea>| 0000 000* **00 0*** #
# cas.l 0000 1110 11 |<ea>| 0000 000* **00 0*** #
# #
# cas2.w 0000 1100 11 111100 **** 000* **00 0*** #
# **** 000* **00 0*** #
# cas2.l 0000 1110 11 111100 **** 000* **00 0*** #
# **** 000* **00 0*** #
# #
# chk2.b 0000 0000 11 |<ea>| **** 1000 0000 0000 #
# chk2.w 0000 0010 11 |<ea>| **** 1000 0000 0000 #
# chk2.l 0000 0100 11 |<ea>| **** 1000 0000 0000 #
# #
# cmp2.b 0000 0000 11 |<ea>| **** 0000 0000 0000 #
# cmp2.w 0000 0010 11 |<ea>| **** 0000 0000 0000 #
# cmp2.l 0000 0100 11 |<ea>| **** 0000 0000 0000 #
#########################################################################
#
# using bit 14 of the operation word, separate into 2 groups:
# (group1) mul64, div64
# (group2) movep, chk2, cmp2, cas2, cas
#
btst &0x1e,%d0 # group1 or group2
beq.b uieh_group2 # go handle group2
#
# now, w/ group1, make mul64's decode the fastest since it will
# most likely be used the most.
#
uieh_group1:
btst &0x16,%d0 # test for div64
bne.b uieh_div64 # go handle div64
uieh_mul64:
# mul64() may use ()+ addressing and may, therefore, alter a7
bsr.l _mul64 # _mul64()
btst &0x5,EXC_ISR(%a6) # supervisor mode?
beq.w uieh_done
btst &mia7_bit,SPCOND_FLG(%a6) # was a7 changed?
beq.w uieh_done # no
btst &0x7,EXC_ISR(%a6) # is trace enabled?
bne.w uieh_trace_a7 # yes
bra.w uieh_a7 # no
uieh_div64:
# div64() may use ()+ addressing and may, therefore, alter a7.
# div64() may take a divide by zero exception.
bsr.l _div64 # _div64()
# here, we sort out all of the special cases that may have happened.
btst &mia7_bit,SPCOND_FLG(%a6) # was a7 changed?
bne.b uieh_div64_a7 # yes
uieh_div64_dbyz:
btst &idbyz_bit,SPCOND_FLG(%a6) # did divide-by-zero occur?
bne.w uieh_divbyzero # yes
bra.w uieh_done # no
uieh_div64_a7:
btst &0x5,EXC_ISR(%a6) # supervisor mode?
beq.b uieh_div64_dbyz # no
# here, a7 has been incremented by 4 bytes in supervisor mode. we still
# may have the following 3 cases:
# (i) (a7)+
# (ii) (a7)+; trace
# (iii) (a7)+; divide-by-zero
#
btst &idbyz_bit,SPCOND_FLG(%a6) # did divide-by-zero occur?
bne.w uieh_divbyzero_a7 # yes
tst.b EXC_ISR(%a6) # no; is trace enabled?
bmi.w uieh_trace_a7 # yes
bra.w uieh_a7 # no
#
# now, w/ group2, make movep's decode the fastest since it will
# most likely be used the most.
#
uieh_group2:
btst &0x18,%d0 # test for not movep
beq.b uieh_not_movep
bsr.l _moveperipheral # _movep()
bra.w uieh_done
uieh_not_movep:
btst &0x1b,%d0 # test for chk2,cmp2
beq.b uieh_chk2cmp2 # go handle chk2,cmp2
swap %d0 # put opword in lo word
cmpi.b %d0,&0xfc # test for cas2
beq.b uieh_cas2 # go handle cas2
uieh_cas:
bsr.l _compandset # _cas()
# the cases of "cas Dc,Du,(a7)+" and "cas Dc,Du,-(a7)" used from supervisor
# mode are simply not considered valid and therefore are not handled.
bra.w uieh_done
uieh_cas2:
mov.l EXC_EXTWPTR(%a6),%a0 # fetch instruction addr
addq.l &0x2,EXC_EXTWPTR(%a6) # incr instruction ptr
bsr.l _imem_read_word # read extension word
tst.l %d1 # ifetch error?
bne.w isp_iacc # yes
bsr.l _compandset2 # _cas2()
bra.w uieh_done
uieh_chk2cmp2:
# chk2 may take a chk exception
bsr.l _chk2_cmp2 # _chk2_cmp2()
# here we check to see if a chk trap should be taken
cmpi.b SPCOND_FLG(%a6),&ichk_flg
bne.w uieh_done
bra.b uieh_chk_trap
###########################################################################
#
# the required emulation has been completed. now, clean up the necessary stack
# info and prepare for rte
#
uieh_done:
mov.b EXC_CC+1(%a6),EXC_ISR+1(%a6) # insert new ccodes
# if exception occurred in user mode, then we have to restore a7 in case it
# changed. we don't have to update a7 for supervisor mose because that case
# doesn't flow through here
btst &0x5,EXC_ISR(%a6) # user or supervisor?
bne.b uieh_finish # supervisor
mov.l EXC_A7(%a6),%a0 # fetch user stack pointer
mov.l %a0,%usp # restore it
uieh_finish:
movm.l EXC_DREGS(%a6),&0x3fff # restore d0-d7/a0-a5
btst &0x7,EXC_ISR(%a6) # is trace mode on?
bne.b uieh_trace # yes;go handle trace mode
mov.l EXC_EXTWPTR(%a6),EXC_IPC(%a6) # new pc on stack frame
mov.l EXC_A6(%a6),(%a6) # prepare new a6 for unlink
unlk %a6 # unlink stack frame
bra.l _isp_done
#
# The instruction that was just emulated was also being traced. The trace
# trap for this instruction will be lost unless we jump to the trace handler.
# So, here we create a Trace Exception format number two exception stack
# frame from the Unimplemented Integer Intruction Exception stack frame
# format number zero and jump to the user supplied hook "_real_trace()".
#
# UIEH FRAME TRACE FRAME
# ***************** *****************
# * 0x0 * 0x0f4 * * Current *
# ***************** * PC *
# * Current * *****************
# * PC * * 0x2 * 0x024 *
# ***************** *****************
# * SR * * Next *
# ***************** * PC *
# ->* Old * *****************
# from link -->* A6 * * SR *
# ***************** *****************
# /* A7 * * New * <-- for final unlink
# / * * * A6 *
# link frame < ***************** *****************
# \ ~ ~ ~ ~
# \***************** *****************
#
uieh_trace:
mov.l EXC_A6(%a6),-0x4(%a6)
mov.w EXC_ISR(%a6),0x0(%a6)
mov.l EXC_IPC(%a6),0x8(%a6)
mov.l EXC_EXTWPTR(%a6),0x2(%a6)
mov.w &0x2024,0x6(%a6)
sub.l &0x4,%a6
unlk %a6
bra.l _real_trace
#
# UIEH FRAME CHK FRAME
# ***************** *****************
# * 0x0 * 0x0f4 * * Current *
# ***************** * PC *
# * Current * *****************
# * PC * * 0x2 * 0x018 *
# ***************** *****************
# * SR * * Next *
# ***************** * PC *
# (4 words) *****************
# * SR *
# *****************
# (6 words)
#
# the chk2 instruction should take a chk trap. so, here we must create a
# chk stack frame from an unimplemented integer instruction exception frame
# and jump to the user supplied entry point "_real_chk()".
#
uieh_chk_trap:
mov.b EXC_CC+1(%a6),EXC_ISR+1(%a6) # insert new ccodes
movm.l EXC_DREGS(%a6),&0x3fff # restore d0-d7/a0-a5
mov.w EXC_ISR(%a6),(%a6) # put new SR on stack
mov.l EXC_IPC(%a6),0x8(%a6) # put "Current PC" on stack
mov.l EXC_EXTWPTR(%a6),0x2(%a6) # put "Next PC" on stack
mov.w &0x2018,0x6(%a6) # put Vector Offset on stack
mov.l EXC_A6(%a6),%a6 # restore a6
add.l &LOCAL_SIZE,%sp # clear stack frame
bra.l _real_chk
#
# UIEH FRAME DIVBYZERO FRAME
# ***************** *****************
# * 0x0 * 0x0f4 * * Current *
# ***************** * PC *
# * Current * *****************
# * PC * * 0x2 * 0x014 *
# ***************** *****************
# * SR * * Next *
# ***************** * PC *
# (4 words) *****************
# * SR *
# *****************
# (6 words)
#
# the divide instruction should take an integer divide by zero trap. so, here
# we must create a divbyzero stack frame from an unimplemented integer
# instruction exception frame and jump to the user supplied entry point
# "_real_divbyzero()".
#
uieh_divbyzero:
mov.b EXC_CC+1(%a6),EXC_ISR+1(%a6) # insert new ccodes
movm.l EXC_DREGS(%a6),&0x3fff # restore d0-d7/a0-a5
mov.w EXC_ISR(%a6),(%a6) # put new SR on stack
mov.l EXC_IPC(%a6),0x8(%a6) # put "Current PC" on stack
mov.l EXC_EXTWPTR(%a6),0x2(%a6) # put "Next PC" on stack
mov.w &0x2014,0x6(%a6) # put Vector Offset on stack
mov.l EXC_A6(%a6),%a6 # restore a6
add.l &LOCAL_SIZE,%sp # clear stack frame
bra.l _real_divbyzero
#
# DIVBYZERO FRAME
# *****************
# * Current *
# UIEH FRAME * PC *
# ***************** *****************
# * 0x0 * 0x0f4 * * 0x2 * 0x014 *
# ***************** *****************
# * Current * * Next *
# * PC * * PC *
# ***************** *****************
# * SR * * SR *
# ***************** *****************
# (4 words) (6 words)
#
# the divide instruction should take an integer divide by zero trap. so, here
# we must create a divbyzero stack frame from an unimplemented integer
# instruction exception frame and jump to the user supplied entry point
# "_real_divbyzero()".
#
# However, we must also deal with the fact that (a7)+ was used from supervisor
# mode, thereby shifting the stack frame up 4 bytes.
#
uieh_divbyzero_a7:
mov.b EXC_CC+1(%a6),EXC_ISR+1(%a6) # insert new ccodes
movm.l EXC_DREGS(%a6),&0x3fff # restore d0-d7/a0-a5
mov.l EXC_IPC(%a6),0xc(%a6) # put "Current PC" on stack
mov.w &0x2014,0xa(%a6) # put Vector Offset on stack
mov.l EXC_EXTWPTR(%a6),0x6(%a6) # put "Next PC" on stack
mov.l EXC_A6(%a6),%a6 # restore a6
add.l &4+LOCAL_SIZE,%sp # clear stack frame
bra.l _real_divbyzero
#
# TRACE FRAME
# *****************
# * Current *
# UIEH FRAME * PC *
# ***************** *****************
# * 0x0 * 0x0f4 * * 0x2 * 0x024 *
# ***************** *****************
# * Current * * Next *
# * PC * * PC *
# ***************** *****************
# * SR * * SR *
# ***************** *****************
# (4 words) (6 words)
#
#
# The instruction that was just emulated was also being traced. The trace
# trap for this instruction will be lost unless we jump to the trace handler.
# So, here we create a Trace Exception format number two exception stack
# frame from the Unimplemented Integer Intruction Exception stack frame
# format number zero and jump to the user supplied hook "_real_trace()".
#
# However, we must also deal with the fact that (a7)+ was used from supervisor
# mode, thereby shifting the stack frame up 4 bytes.
#
uieh_trace_a7:
mov.b EXC_CC+1(%a6),EXC_ISR+1(%a6) # insert new ccodes
movm.l EXC_DREGS(%a6),&0x3fff # restore d0-d7/a0-a5
mov.l EXC_IPC(%a6),0xc(%a6) # put "Current PC" on stack
mov.w &0x2024,0xa(%a6) # put Vector Offset on stack
mov.l EXC_EXTWPTR(%a6),0x6(%a6) # put "Next PC" on stack
mov.l EXC_A6(%a6),%a6 # restore a6
add.l &4+LOCAL_SIZE,%sp # clear stack frame
bra.l _real_trace
#
# UIEH FRAME
# *****************
# * 0x0 * 0x0f4 *
# UIEH FRAME *****************
# ***************** * Next *
# * 0x0 * 0x0f4 * * PC *
# ***************** *****************
# * Current * * SR *
# * PC * *****************
# ***************** (4 words)
# * SR *
# *****************
# (4 words)
uieh_a7:
mov.b EXC_CC+1(%a6),EXC_ISR+1(%a6) # insert new ccodes
movm.l EXC_DREGS(%a6),&0x3fff # restore d0-d7/a0-a5
mov.w &0x00f4,0xe(%a6) # put Vector Offset on stack
mov.l EXC_EXTWPTR(%a6),0xa(%a6) # put "Next PC" on stack
mov.w EXC_ISR(%a6),0x8(%a6) # put SR on stack
mov.l EXC_A6(%a6),%a6 # restore a6
add.l &8+LOCAL_SIZE,%sp # clear stack frame
bra.l _isp_done
##########
# this is the exit point if a data read or write fails.
# a0 = failing address
# d0 = fslw
isp_dacc:
mov.l %a0,(%a6) # save address
mov.l %d0,-0x4(%a6) # save partial fslw
lea -64(%a6),%sp
movm.l (%sp)+,&0x7fff # restore d0-d7/a0-a6
mov.l 0xc(%sp),-(%sp) # move voff,hi(pc)
mov.l 0x4(%sp),0x10(%sp) # store fslw
mov.l 0xc(%sp),0x4(%sp) # store sr,lo(pc)
mov.l 0x8(%sp),0xc(%sp) # store address
mov.l (%sp)+,0x4(%sp) # store voff,hi(pc)
mov.w &0x4008,0x6(%sp) # store new voff
bra.b isp_acc_exit
# this is the exit point if an instruction word read fails.
# FSLW:
# misaligned = true
# read = true
# size = word
# instruction = true
# software emulation error = true
isp_iacc:
movm.l EXC_DREGS(%a6),&0x3fff # restore d0-d7/a0-a5
unlk %a6 # unlink frame
sub.w &0x8,%sp # make room for acc frame
mov.l 0x8(%sp),(%sp) # store sr,lo(pc)
mov.w 0xc(%sp),0x4(%sp) # store hi(pc)
mov.w &0x4008,0x6(%sp) # store new voff
mov.l 0x2(%sp),0x8(%sp) # store address (=pc)
mov.l &0x09428001,0xc(%sp) # store fslw
isp_acc_exit:
btst &0x5,(%sp) # user or supervisor?
beq.b isp_acc_exit2 # user
bset &0x2,0xd(%sp) # set supervisor TM bit
isp_acc_exit2:
bra.l _real_access
# if the addressing mode was (an)+ or -(an), the address register must
# be restored to its pre-exception value before entering _real_access.
isp_restore:
cmpi.b SPCOND_FLG(%a6),&restore_flg # do we need a restore?
bne.b isp_restore_done # no
clr.l %d0
mov.b EXC_SAVREG(%a6),%d0 # regno to restore
mov.l EXC_SAVVAL(%a6),(EXC_AREGS,%a6,%d0.l*4) # restore value
isp_restore_done:
rts
#########################################################################
# XDEF **************************************************************** #
# _calc_ea(): routine to calculate effective address #
# #
# XREF **************************************************************** #
# _imem_read_word() - read instruction word #
# _imem_read_long() - read instruction longword #
# _dmem_read_long() - read data longword (for memory indirect) #
# isp_iacc() - handle instruction access error exception #
# isp_dacc() - handle data access error exception #
# #
# INPUT *************************************************************** #
# d0 = number of bytes related to effective address (w,l) #
# #
# OUTPUT ************************************************************** #
# If exiting through isp_dacc... #
# a0 = failing address #
# d0 = FSLW #
# elsif exiting though isp_iacc... #
# none #
# else #
# a0 = effective address #
# #
# ALGORITHM *********************************************************** #
# The effective address type is decoded from the opword residing #
# on the stack. A jump table is used to vector to a routine for the #
# appropriate mode. Since none of the emulated integer instructions #
# uses byte-sized operands, only handle word and long operations. #
# #
# Dn,An - shouldn't enter here #
# (An) - fetch An value from stack #
# -(An) - fetch An value from stack; return decr value; #
# place decr value on stack; store old value in case of #
# future access error; if -(a7), set mda7_flg in #
# SPCOND_FLG #
# (An)+ - fetch An value from stack; return value; #
# place incr value on stack; store old value in case of #
# future access error; if (a7)+, set mia7_flg in #
# SPCOND_FLG #
# (d16,An) - fetch An value from stack; read d16 using #
# _imem_read_word(); fetch may fail -> branch to #
# isp_iacc() #
# (xxx).w,(xxx).l - use _imem_read_{word,long}() to fetch #
# address; fetch may fail #
# #<data> - return address of immediate value; set immed_flg #
# in SPCOND_FLG #
# (d16,PC) - fetch stacked PC value; read d16 using #
# _imem_read_word(); fetch may fail -> branch to #
# isp_iacc() #
# everything else - read needed displacements as appropriate w/ #
# _imem_read_{word,long}(); read may fail; if memory #
# indirect, read indirect address using #
# _dmem_read_long() which may also fail #
# #
#########################################################################
global _calc_ea
_calc_ea:
mov.l %d0,%a0 # move # bytes to a0
# MODE and REG are taken from the EXC_OPWORD.
mov.w EXC_OPWORD(%a6),%d0 # fetch opcode word
mov.w %d0,%d1 # make a copy
andi.w &0x3f,%d0 # extract mode field
andi.l &0x7,%d1 # extract reg field
# jump to the corresponding function for each {MODE,REG} pair.
mov.w (tbl_ea_mode.b,%pc,%d0.w*2), %d0 # fetch jmp distance
jmp (tbl_ea_mode.b,%pc,%d0.w*1) # jmp to correct ea mode
swbeg &64
tbl_ea_mode:
short tbl_ea_mode - tbl_ea_mode
short tbl_ea_mode - tbl_ea_mode
short tbl_ea_mode - tbl_ea_mode
short tbl_ea_mode - tbl_ea_mode
short tbl_ea_mode - tbl_ea_mode
short tbl_ea_mode - tbl_ea_mode
short tbl_ea_mode - tbl_ea_mode
short tbl_ea_mode - tbl_ea_mode
short tbl_ea_mode - tbl_ea_mode
short tbl_ea_mode - tbl_ea_mode
short tbl_ea_mode - tbl_ea_mode
short tbl_ea_mode - tbl_ea_mode
short tbl_ea_mode - tbl_ea_mode
short tbl_ea_mode - tbl_ea_mode
short tbl_ea_mode - tbl_ea_mode
short tbl_ea_mode - tbl_ea_mode
short addr_ind_a0 - tbl_ea_mode
short addr_ind_a1 - tbl_ea_mode
short addr_ind_a2 - tbl_ea_mode
short addr_ind_a3 - tbl_ea_mode
short addr_ind_a4 - tbl_ea_mode
short addr_ind_a5 - tbl_ea_mode
short addr_ind_a6 - tbl_ea_mode
short addr_ind_a7 - tbl_ea_mode
short addr_ind_p_a0 - tbl_ea_mode
short addr_ind_p_a1 - tbl_ea_mode
short addr_ind_p_a2 - tbl_ea_mode
short addr_ind_p_a3 - tbl_ea_mode
short addr_ind_p_a4 - tbl_ea_mode
short addr_ind_p_a5 - tbl_ea_mode
short addr_ind_p_a6 - tbl_ea_mode
short addr_ind_p_a7 - tbl_ea_mode
short addr_ind_m_a0 - tbl_ea_mode
short addr_ind_m_a1 - tbl_ea_mode
short addr_ind_m_a2 - tbl_ea_mode
short addr_ind_m_a3 - tbl_ea_mode
short addr_ind_m_a4 - tbl_ea_mode
short addr_ind_m_a5 - tbl_ea_mode
short addr_ind_m_a6 - tbl_ea_mode
short addr_ind_m_a7 - tbl_ea_mode
short addr_ind_disp_a0 - tbl_ea_mode
short addr_ind_disp_a1 - tbl_ea_mode
short addr_ind_disp_a2 - tbl_ea_mode
short addr_ind_disp_a3 - tbl_ea_mode
short addr_ind_disp_a4 - tbl_ea_mode
short addr_ind_disp_a5 - tbl_ea_mode
short addr_ind_disp_a6 - tbl_ea_mode
short addr_ind_disp_a7 - tbl_ea_mode
short _addr_ind_ext - tbl_ea_mode
short _addr_ind_ext - tbl_ea_mode
short _addr_ind_ext - tbl_ea_mode
short _addr_ind_ext - tbl_ea_mode
short _addr_ind_ext - tbl_ea_mode
short _addr_ind_ext - tbl_ea_mode
short _addr_ind_ext - tbl_ea_mode
short _addr_ind_ext - tbl_ea_mode
short abs_short - tbl_ea_mode
short abs_long - tbl_ea_mode
short pc_ind - tbl_ea_mode
short pc_ind_ext - tbl_ea_mode
short immediate - tbl_ea_mode
short tbl_ea_mode - tbl_ea_mode
short tbl_ea_mode - tbl_ea_mode
short tbl_ea_mode - tbl_ea_mode
###################################
# Address register indirect: (An) #
###################################
addr_ind_a0:
mov.l EXC_A0(%a6),%a0 # Get current a0
rts
addr_ind_a1:
mov.l EXC_A1(%a6),%a0 # Get current a1
rts
addr_ind_a2:
mov.l EXC_A2(%a6),%a0 # Get current a2
rts
addr_ind_a3:
mov.l EXC_A3(%a6),%a0 # Get current a3
rts
addr_ind_a4:
mov.l EXC_A4(%a6),%a0 # Get current a4
rts
addr_ind_a5:
mov.l EXC_A5(%a6),%a0 # Get current a5
rts
addr_ind_a6:
mov.l EXC_A6(%a6),%a0 # Get current a6
rts
addr_ind_a7:
mov.l EXC_A7(%a6),%a0 # Get current a7
rts
#####################################################
# Address register indirect w/ postincrement: (An)+ #
#####################################################
addr_ind_p_a0:
mov.l %a0,%d0 # copy no. bytes
mov.l EXC_A0(%a6),%a0 # load current value
add.l %a0,%d0 # increment
mov.l %d0,EXC_A0(%a6) # save incremented value
mov.l %a0,EXC_SAVVAL(%a6) # save in case of access error
mov.b &0x0,EXC_SAVREG(%a6) # save regno, too
mov.b &restore_flg,SPCOND_FLG(%a6) # set flag
rts
addr_ind_p_a1:
mov.l %a0,%d0 # copy no. bytes
mov.l EXC_A1(%a6),%a0 # load current value
add.l %a0,%d0 # increment
mov.l %d0,EXC_A1(%a6) # save incremented value
mov.l %a0,EXC_SAVVAL(%a6) # save in case of access error
mov.b &0x1,EXC_SAVREG(%a6) # save regno, too
mov.b &restore_flg,SPCOND_FLG(%a6) # set flag
rts
addr_ind_p_a2:
mov.l %a0,%d0 # copy no. bytes
mov.l EXC_A2(%a6),%a0 # load current value
add.l %a0,%d0 # increment
mov.l %d0,EXC_A2(%a6) # save incremented value
mov.l %a0,EXC_SAVVAL(%a6) # save in case of access error
mov.b &0x2,EXC_SAVREG(%a6) # save regno, too
mov.b &restore_flg,SPCOND_FLG(%a6) # set flag
rts
addr_ind_p_a3:
mov.l %a0,%d0 # copy no. bytes
mov.l EXC_A3(%a6),%a0 # load current value
add.l %a0,%d0 # increment
mov.l %d0,EXC_A3(%a6) # save incremented value
mov.l %a0,EXC_SAVVAL(%a6) # save in case of access error
mov.b &0x3,EXC_SAVREG(%a6) # save regno, too
mov.b &restore_flg,SPCOND_FLG(%a6) # set flag
rts
addr_ind_p_a4:
mov.l %a0,%d0 # copy no. bytes
mov.l EXC_A4(%a6),%a0 # load current value
add.l %a0,%d0 # increment
mov.l %d0,EXC_A4(%a6) # save incremented value
mov.l %a0,EXC_SAVVAL(%a6) # save in case of access error
mov.b &0x4,EXC_SAVREG(%a6) # save regno, too
mov.b &restore_flg,SPCOND_FLG(%a6) # set flag
rts
addr_ind_p_a5:
mov.l %a0,%d0 # copy no. bytes
mov.l EXC_A5(%a6),%a0 # load current value
add.l %a0,%d0 # increment
mov.l %d0,EXC_A5(%a6) # save incremented value
mov.l %a0,EXC_SAVVAL(%a6) # save in case of access error
mov.b &0x5,EXC_SAVREG(%a6) # save regno, too
mov.b &restore_flg,SPCOND_FLG(%a6) # set flag
rts
addr_ind_p_a6:
mov.l %a0,%d0 # copy no. bytes
mov.l EXC_A6(%a6),%a0 # load current value
add.l %a0,%d0 # increment
mov.l %d0,EXC_A6(%a6) # save incremented value
mov.l %a0,EXC_SAVVAL(%a6) # save in case of access error
mov.b &0x6,EXC_SAVREG(%a6) # save regno, too
mov.b &restore_flg,SPCOND_FLG(%a6) # set flag
rts
addr_ind_p_a7:
mov.b &mia7_flg,SPCOND_FLG(%a6) # set "special case" flag
mov.l %a0,%d0 # copy no. bytes
mov.l EXC_A7(%a6),%a0 # load current value
add.l %a0,%d0 # increment
mov.l %d0,EXC_A7(%a6) # save incremented value
rts
####################################################
# Address register indirect w/ predecrement: -(An) #
####################################################
addr_ind_m_a0:
mov.l EXC_A0(%a6),%d0 # Get current a0
mov.l %d0,EXC_SAVVAL(%a6) # save in case of access error
sub.l %a0,%d0 # Decrement
mov.l %d0,EXC_A0(%a6) # Save decr value
mov.l %d0,%a0
mov.b &0x0,EXC_SAVREG(%a6) # save regno, too
mov.b &restore_flg,SPCOND_FLG(%a6) # set flag
rts
addr_ind_m_a1:
mov.l EXC_A1(%a6),%d0 # Get current a1
mov.l %d0,EXC_SAVVAL(%a6) # save in case of access error
sub.l %a0,%d0 # Decrement
mov.l %d0,EXC_A1(%a6) # Save decr value
mov.l %d0,%a0
mov.b &0x1,EXC_SAVREG(%a6) # save regno, too
mov.b &restore_flg,SPCOND_FLG(%a6) # set flag
rts
addr_ind_m_a2:
mov.l EXC_A2(%a6),%d0 # Get current a2
mov.l %d0,EXC_SAVVAL(%a6) # save in case of access error
sub.l %a0,%d0 # Decrement
mov.l %d0,EXC_A2(%a6) # Save decr value
mov.l %d0,%a0
mov.b &0x2,EXC_SAVREG(%a6) # save regno, too
mov.b &restore_flg,SPCOND_FLG(%a6) # set flag
rts
addr_ind_m_a3:
mov.l EXC_A3(%a6),%d0 # Get current a3
mov.l %d0,EXC_SAVVAL(%a6) # save in case of access error
sub.l %a0,%d0 # Decrement
mov.l %d0,EXC_A3(%a6) # Save decr value
mov.l %d0,%a0
mov.b &0x3,EXC_SAVREG(%a6) # save regno, too
mov.b &restore_flg,SPCOND_FLG(%a6) # set flag
rts
addr_ind_m_a4:
mov.l EXC_A4(%a6),%d0 # Get current a4
mov.l %d0,EXC_SAVVAL(%a6) # save in case of access error
sub.l %a0,%d0 # Decrement
mov.l %d0,EXC_A4(%a6) # Save decr value
mov.l %d0,%a0
mov.b &0x4,EXC_SAVREG(%a6) # save regno, too
mov.b &restore_flg,SPCOND_FLG(%a6) # set flag
rts
addr_ind_m_a5:
mov.l EXC_A5(%a6),%d0 # Get current a5
mov.l %d0,EXC_SAVVAL(%a6) # save in case of access error
sub.l %a0,%d0 # Decrement
mov.l %d0,EXC_A5(%a6) # Save decr value
mov.l %d0,%a0
mov.b &0x5,EXC_SAVREG(%a6) # save regno, too
mov.b &restore_flg,SPCOND_FLG(%a6) # set flag
rts
addr_ind_m_a6:
mov.l EXC_A6(%a6),%d0 # Get current a6
mov.l %d0,EXC_SAVVAL(%a6) # save in case of access error
sub.l %a0,%d0 # Decrement
mov.l %d0,EXC_A6(%a6) # Save decr value
mov.l %d0,%a0
mov.b &0x6,EXC_SAVREG(%a6) # save regno, too
mov.b &restore_flg,SPCOND_FLG(%a6) # set flag
rts
addr_ind_m_a7:
mov.b &mda7_flg,SPCOND_FLG(%a6) # set "special case" flag
mov.l EXC_A7(%a6),%d0 # Get current a7
sub.l %a0,%d0 # Decrement
mov.l %d0,EXC_A7(%a6) # Save decr value
mov.l %d0,%a0
rts
########################################################
# Address register indirect w/ displacement: (d16, An) #
########################################################
addr_ind_disp_a0:
mov.l EXC_EXTWPTR(%a6),%a0 # fetch instruction addr
addq.l &0x2,EXC_EXTWPTR(%a6) # incr instruction ptr
bsr.l _imem_read_word
tst.l %d1 # ifetch error?
bne.l isp_iacc # yes
mov.w %d0,%a0 # sign extend displacement
add.l EXC_A0(%a6),%a0 # a0 + d16
rts
addr_ind_disp_a1:
mov.l EXC_EXTWPTR(%a6),%a0 # fetch instruction addr
addq.l &0x2,EXC_EXTWPTR(%a6) # incr instruction ptr
bsr.l _imem_read_word
tst.l %d1 # ifetch error?
bne.l isp_iacc # yes
mov.w %d0,%a0 # sign extend displacement
add.l EXC_A1(%a6),%a0 # a1 + d16
rts
addr_ind_disp_a2:
mov.l EXC_EXTWPTR(%a6),%a0 # fetch instruction addr
addq.l &0x2,EXC_EXTWPTR(%a6) # incr instruction ptr
bsr.l _imem_read_word
tst.l %d1 # ifetch error?
bne.l isp_iacc # yes
mov.w %d0,%a0 # sign extend displacement
add.l EXC_A2(%a6),%a0 # a2 + d16
rts
addr_ind_disp_a3:
mov.l EXC_EXTWPTR(%a6),%a0 # fetch instruction addr
addq.l &0x2,EXC_EXTWPTR(%a6) # incr instruction ptr
bsr.l _imem_read_word
tst.l %d1 # ifetch error?
bne.l isp_iacc # yes
mov.w %d0,%a0 # sign extend displacement
add.l EXC_A3(%a6),%a0 # a3 + d16
rts
addr_ind_disp_a4:
mov.l EXC_EXTWPTR(%a6),%a0 # fetch instruction addr
addq.l &0x2,EXC_EXTWPTR(%a6) # incr instruction ptr
bsr.l _imem_read_word
tst.l %d1 # ifetch error?
bne.l isp_iacc # yes
mov.w %d0,%a0 # sign extend displacement
add.l EXC_A4(%a6),%a0 # a4 + d16
rts
addr_ind_disp_a5:
mov.l EXC_EXTWPTR(%a6),%a0 # fetch instruction addr
addq.l &0x2,EXC_EXTWPTR(%a6) # incr instruction ptr
bsr.l _imem_read_word
tst.l %d1 # ifetch error?
bne.l isp_iacc # yes
mov.w %d0,%a0 # sign extend displacement
add.l EXC_A5(%a6),%a0 # a5 + d16
rts
addr_ind_disp_a6:
mov.l EXC_EXTWPTR(%a6),%a0 # fetch instruction addr
addq.l &0x2,EXC_EXTWPTR(%a6) # incr instruction ptr
bsr.l _imem_read_word
tst.l %d1 # ifetch error?
bne.l isp_iacc # yes
mov.w %d0,%a0 # sign extend displacement
add.l EXC_A6(%a6),%a0 # a6 + d16
rts
addr_ind_disp_a7:
mov.l EXC_EXTWPTR(%a6),%a0 # fetch instruction addr
addq.l &0x2,EXC_EXTWPTR(%a6) # incr instruction ptr
bsr.l _imem_read_word
tst.l %d1 # ifetch error?
bne.l isp_iacc # yes
mov.w %d0,%a0 # sign extend displacement
add.l EXC_A7(%a6),%a0 # a7 + d16
rts
########################################################################
# Address register indirect w/ index(8-bit displacement): (dn, An, Xn) #
# " " " w/ " (base displacement): (bd, An, Xn) #
# Memory indirect postindexed: ([bd, An], Xn, od) #
# Memory indirect preindexed: ([bd, An, Xn], od) #
########################################################################
_addr_ind_ext:
mov.l %d1,-(%sp)
mov.l EXC_EXTWPTR(%a6),%a0 # fetch instruction addr
addq.l &0x2,EXC_EXTWPTR(%a6) # incr instruction ptr
bsr.l _imem_read_word # fetch extword in d0
tst.l %d1 # ifetch error?
bne.l isp_iacc # yes
mov.l (%sp)+,%d1
mov.l (EXC_AREGS,%a6,%d1.w*4),%a0 # put base in a0
btst &0x8,%d0
beq.b addr_ind_index_8bit # for ext word or not?
movm.l &0x3c00,-(%sp) # save d2-d5
mov.l %d0,%d5 # put extword in d5
mov.l %a0,%d3 # put base in d3
bra.l calc_mem_ind # calc memory indirect
addr_ind_index_8bit:
mov.l %d2,-(%sp) # save old d2
mov.l %d0,%d1
rol.w &0x4,%d1
andi.w &0xf,%d1 # extract index regno
mov.l (EXC_DREGS,%a6,%d1.w*4),%d1 # fetch index reg value
btst &0xb,%d0 # is it word or long?
bne.b aii8_long
ext.l %d1 # sign extend word index
aii8_long:
mov.l %d0,%d2
rol.w &0x7,%d2
andi.l &0x3,%d2 # extract scale value
lsl.l %d2,%d1 # shift index by scale
extb.l %d0 # sign extend displacement
add.l %d1,%d0 # index + disp
add.l %d0,%a0 # An + (index + disp)
mov.l (%sp)+,%d2 # restore old d2
rts
######################
# Immediate: #<data> #
#########################################################################
# word, long: <ea> of the data is the current extension word #
# pointer value. new extension word pointer is simply the old #
# plus the number of bytes in the data type(2 or 4). #
#########################################################################
immediate:
mov.b &immed_flg,SPCOND_FLG(%a6) # set immediate flag
mov.l EXC_EXTWPTR(%a6),%a0 # fetch extension word ptr
rts
###########################
# Absolute short: (XXX).W #
###########################
abs_short:
mov.l EXC_EXTWPTR(%a6),%a0 # fetch instruction addr
addq.l &0x2,EXC_EXTWPTR(%a6) # incr instruction ptr
bsr.l _imem_read_word # fetch short address
tst.l %d1 # ifetch error?
bne.l isp_iacc # yes
mov.w %d0,%a0 # return <ea> in a0
rts
##########################
# Absolute long: (XXX).L #
##########################
abs_long:
mov.l EXC_EXTWPTR(%a6),%a0 # fetch instruction addr
addq.l &0x4,EXC_EXTWPTR(%a6) # incr instruction ptr
bsr.l _imem_read_long # fetch long address
tst.l %d1 # ifetch error?
bne.l isp_iacc # yes
mov.l %d0,%a0 # return <ea> in a0
rts
#######################################################
# Program counter indirect w/ displacement: (d16, PC) #
#######################################################
pc_ind:
mov.l EXC_EXTWPTR(%a6),%a0 # fetch instruction addr
addq.l &0x2,EXC_EXTWPTR(%a6) # incr instruction ptr
bsr.l _imem_read_word # fetch word displacement
tst.l %d1 # ifetch error?
bne.l isp_iacc # yes
mov.w %d0,%a0 # sign extend displacement
add.l EXC_EXTWPTR(%a6),%a0 # pc + d16
# _imem_read_word() increased the extwptr by 2. need to adjust here.
subq.l &0x2,%a0 # adjust <ea>
rts
##########################################################
# PC indirect w/ index(8-bit displacement): (d8, PC, An) #
# " " w/ " (base displacement): (bd, PC, An) #
# PC memory indirect postindexed: ([bd, PC], Xn, od) #
# PC memory indirect preindexed: ([bd, PC, Xn], od) #
##########################################################
pc_ind_ext:
mov.l EXC_EXTWPTR(%a6),%a0 # fetch instruction addr
addq.l &0x2,EXC_EXTWPTR(%a6) # incr instruction ptr
bsr.l _imem_read_word # fetch ext word
tst.l %d1 # ifetch error?
bne.l isp_iacc # yes
mov.l EXC_EXTWPTR(%a6),%a0 # put base in a0
subq.l &0x2,%a0 # adjust base
btst &0x8,%d0 # is disp only 8 bits?
beq.b pc_ind_index_8bit # yes
# the indexed addressing mode uses a base displacement of size
# word or long
movm.l &0x3c00,-(%sp) # save d2-d5
mov.l %d0,%d5 # put extword in d5
mov.l %a0,%d3 # put base in d3
bra.l calc_mem_ind # calc memory indirect
pc_ind_index_8bit:
mov.l %d2,-(%sp) # create a temp register
mov.l %d0,%d1 # make extword copy
rol.w &0x4,%d1 # rotate reg num into place
andi.w &0xf,%d1 # extract register number
mov.l (EXC_DREGS,%a6,%d1.w*4),%d1 # fetch index reg value
btst &0xb,%d0 # is index word or long?
bne.b pii8_long # long
ext.l %d1 # sign extend word index
pii8_long:
mov.l %d0,%d2 # make extword copy
rol.w &0x7,%d2 # rotate scale value into place
andi.l &0x3,%d2 # extract scale value
lsl.l %d2,%d1 # shift index by scale
extb.l %d0 # sign extend displacement
add.l %d1,%d0 # index + disp
add.l %d0,%a0 # An + (index + disp)
mov.l (%sp)+,%d2 # restore temp register
rts
# a5 = exc_extwptr (global to uaeh)
# a4 = exc_opword (global to uaeh)
# a3 = exc_dregs (global to uaeh)
# d2 = index (internal " " )
# d3 = base (internal " " )
# d4 = od (internal " " )
# d5 = extword (internal " " )
calc_mem_ind:
btst &0x6,%d5 # is the index suppressed?
beq.b calc_index
clr.l %d2 # yes, so index = 0
bra.b base_supp_ck
calc_index:
bfextu %d5{&16:&4},%d2
mov.l (EXC_DREGS,%a6,%d2.w*4),%d2
btst &0xb,%d5 # is index word or long?
bne.b no_ext
ext.l %d2
no_ext:
bfextu %d5{&21:&2},%d0
lsl.l %d0,%d2
base_supp_ck:
btst &0x7,%d5 # is the bd suppressed?
beq.b no_base_sup
clr.l %d3
no_base_sup:
bfextu %d5{&26:&2},%d0 # get bd size
# beq.l _error # if (size == 0) it's reserved
cmpi.b %d0,&2
blt.b no_bd
beq.b get_word_bd
mov.l EXC_EXTWPTR(%a6),%a0 # fetch instruction addr
addq.l &0x4,EXC_EXTWPTR(%a6) # incr instruction ptr
bsr.l _imem_read_long
tst.l %d1 # ifetch error?
bne.l isp_iacc # yes
bra.b chk_ind
get_word_bd:
mov.l EXC_EXTWPTR(%a6),%a0 # fetch instruction addr
addq.l &0x2,EXC_EXTWPTR(%a6) # incr instruction ptr
bsr.l _imem_read_word
tst.l %d1 # ifetch error?
bne.l isp_iacc # yes
ext.l %d0 # sign extend bd
chk_ind:
add.l %d0,%d3 # base += bd
no_bd:
bfextu %d5{&30:&2},%d0 # is od suppressed?
beq.w aii_bd
cmpi.b %d0,&0x2
blt.b null_od
beq.b word_od
mov.l EXC_EXTWPTR(%a6),%a0 # fetch instruction addr
addq.l &0x4,EXC_EXTWPTR(%a6) # incr instruction ptr
bsr.l _imem_read_long
tst.l %d1 # ifetch error?
bne.l isp_iacc # yes
bra.b add_them
word_od:
mov.l EXC_EXTWPTR(%a6),%a0 # fetch instruction addr
addq.l &0x2,EXC_EXTWPTR(%a6) # incr instruction ptr
bsr.l _imem_read_word
tst.l %d1 # ifetch error?
bne.l isp_iacc # yes
ext.l %d0 # sign extend od
bra.b add_them
null_od:
clr.l %d0
add_them:
mov.l %d0,%d4
btst &0x2,%d5 # pre or post indexing?
beq.b pre_indexed
mov.l %d3,%a0
bsr.l _dmem_read_long
tst.l %d1 # dfetch error?
bne.b calc_ea_err # yes
add.l %d2,%d0 # <ea> += index
add.l %d4,%d0 # <ea> += od
bra.b done_ea
pre_indexed:
add.l %d2,%d3 # preindexing
mov.l %d3,%a0
bsr.l _dmem_read_long
tst.l %d1 # ifetch error?
bne.b calc_ea_err # yes
add.l %d4,%d0 # ea += od
bra.b done_ea
aii_bd:
add.l %d2,%d3 # ea = (base + bd) + index
mov.l %d3,%d0
done_ea:
mov.l %d0,%a0
movm.l (%sp)+,&0x003c # restore d2-d5
rts
# if dmem_read_long() returns a fail message in d1, the package
# must create an access error frame. here, we pass a skeleton fslw
# and the failing address to the routine that creates the new frame.
# FSLW:
# read = true
# size = longword
# TM = data
# software emulation error = true
calc_ea_err:
mov.l %d3,%a0 # pass failing address
mov.l &0x01010001,%d0 # pass fslw
bra.l isp_dacc
#########################################################################
# XDEF **************************************************************** #
# _moveperipheral(): routine to emulate movep instruction #
# #
# XREF **************************************************************** #
# _dmem_read_byte() - read byte from memory #
# _dmem_write_byte() - write byte to memory #
# isp_dacc() - handle data access error exception #
# #
# INPUT *************************************************************** #
# none #
# #
# OUTPUT ************************************************************** #
# If exiting through isp_dacc... #
# a0 = failing address #
# d0 = FSLW #
# else #
# none #
# #
# ALGORITHM *********************************************************** #
# Decode the movep instruction words stored at EXC_OPWORD and #
# either read or write the required bytes from/to memory. Use the #
# _dmem_{read,write}_byte() routines. If one of the memory routines #
# returns a failing value, we must pass the failing address and a FSLW #
# to the _isp_dacc() routine. #
# Since this instruction is used to access peripherals, make sure #
# to only access the required bytes. #
# #
#########################################################################
###########################
# movep.(w,l) Dx,(d,Ay) #
# movep.(w,l) (d,Ay),Dx #
###########################
global _moveperipheral
_moveperipheral:
mov.w EXC_OPWORD(%a6),%d1 # fetch the opcode word
mov.b %d1,%d0
and.w &0x7,%d0 # extract Ay from opcode word
mov.l (EXC_AREGS,%a6,%d0.w*4),%a0 # fetch ay
add.w EXC_EXTWORD(%a6),%a0 # add: an + sgn_ext(disp)
btst &0x7,%d1 # (reg 2 mem) or (mem 2 reg)
beq.w mem2reg
# reg2mem: fetch dx, then write it to memory
reg2mem:
mov.w %d1,%d0
rol.w &0x7,%d0
and.w &0x7,%d0 # extract Dx from opcode word
mov.l (EXC_DREGS,%a6,%d0.w*4), %d0 # fetch dx
btst &0x6,%d1 # word or long operation?
beq.b r2mwtrans
# a0 = dst addr
# d0 = Dx
r2mltrans:
mov.l %d0,%d2 # store data
mov.l %a0,%a2 # store addr
rol.l &0x8,%d2
mov.l %d2,%d0
bsr.l _dmem_write_byte # os : write hi
tst.l %d1 # dfetch error?
bne.w movp_write_err # yes
add.w &0x2,%a2 # incr addr
mov.l %a2,%a0
rol.l &0x8,%d2
mov.l %d2,%d0
bsr.l _dmem_write_byte # os : write lo
tst.l %d1 # dfetch error?
bne.w movp_write_err # yes
add.w &0x2,%a2 # incr addr
mov.l %a2,%a0
rol.l &0x8,%d2
mov.l %d2,%d0
bsr.l _dmem_write_byte # os : write lo
tst.l %d1 # dfetch error?
bne.w movp_write_err # yes
add.w &0x2,%a2 # incr addr
mov.l %a2,%a0
rol.l &0x8,%d2
mov.l %d2,%d0
bsr.l _dmem_write_byte # os : write lo
tst.l %d1 # dfetch error?
bne.w movp_write_err # yes
rts
# a0 = dst addr
# d0 = Dx
r2mwtrans:
mov.l %d0,%d2 # store data
mov.l %a0,%a2 # store addr
lsr.w &0x8,%d0
bsr.l _dmem_write_byte # os : write hi
tst.l %d1 # dfetch error?
bne.w movp_write_err # yes
add.w &0x2,%a2
mov.l %a2,%a0
mov.l %d2,%d0
bsr.l _dmem_write_byte # os : write lo
tst.l %d1 # dfetch error?
bne.w movp_write_err # yes
rts
# mem2reg: read bytes from memory.
# determines the dest register, and then writes the bytes into it.
mem2reg:
btst &0x6,%d1 # word or long operation?
beq.b m2rwtrans
# a0 = dst addr
m2rltrans:
mov.l %a0,%a2 # store addr
bsr.l _dmem_read_byte # read first byte
tst.l %d1 # dfetch error?
bne.w movp_read_err # yes
mov.l %d0,%d2
add.w &0x2,%a2 # incr addr by 2 bytes
mov.l %a2,%a0
bsr.l _dmem_read_byte # read second byte
tst.l %d1 # dfetch error?
bne.w movp_read_err # yes
lsl.w &0x8,%d2
mov.b %d0,%d2 # append bytes
add.w &0x2,%a2 # incr addr by 2 bytes
mov.l %a2,%a0
bsr.l _dmem_read_byte # read second byte
tst.l %d1 # dfetch error?
bne.w movp_read_err # yes
lsl.l &0x8,%d2
mov.b %d0,%d2 # append bytes
add.w &0x2,%a2 # incr addr by 2 bytes
mov.l %a2,%a0
bsr.l _dmem_read_byte # read second byte
tst.l %d1 # dfetch error?
bne.w movp_read_err # yes
lsl.l &0x8,%d2
mov.b %d0,%d2 # append bytes
mov.b EXC_OPWORD(%a6),%d1
lsr.b &0x1,%d1
and.w &0x7,%d1 # extract Dx from opcode word
mov.l %d2,(EXC_DREGS,%a6,%d1.w*4) # store dx
rts
# a0 = dst addr
m2rwtrans:
mov.l %a0,%a2 # store addr
bsr.l _dmem_read_byte # read first byte
tst.l %d1 # dfetch error?
bne.w movp_read_err # yes
mov.l %d0,%d2
add.w &0x2,%a2 # incr addr by 2 bytes
mov.l %a2,%a0
bsr.l _dmem_read_byte # read second byte
tst.l %d1 # dfetch error?
bne.w movp_read_err # yes
lsl.w &0x8,%d2
mov.b %d0,%d2 # append bytes
mov.b EXC_OPWORD(%a6),%d1
lsr.b &0x1,%d1
and.w &0x7,%d1 # extract Dx from opcode word
mov.w %d2,(EXC_DREGS+2,%a6,%d1.w*4) # store dx
rts
# if dmem_{read,write}_byte() returns a fail message in d1, the package
# must create an access error frame. here, we pass a skeleton fslw
# and the failing address to the routine that creates the new frame.
# FSLW:
# write = true
# size = byte
# TM = data
# software emulation error = true
movp_write_err:
mov.l %a2,%a0 # pass failing address
mov.l &0x00a10001,%d0 # pass fslw
bra.l isp_dacc
# FSLW:
# read = true
# size = byte
# TM = data
# software emulation error = true
movp_read_err:
mov.l %a2,%a0 # pass failing address
mov.l &0x01210001,%d0 # pass fslw
bra.l isp_dacc
#########################################################################
# XDEF **************************************************************** #
# _chk2_cmp2(): routine to emulate chk2/cmp2 instructions #
# #
# XREF **************************************************************** #
# _calc_ea(): calculate effective address #
# _dmem_read_long(): read operands #
# _dmem_read_word(): read operands #
# isp_dacc(): handle data access error exception #
# #
# INPUT *************************************************************** #
# none #
# #
# OUTPUT ************************************************************** #
# If exiting through isp_dacc... #
# a0 = failing address #
# d0 = FSLW #
# else #
# none #
# #
# ALGORITHM *********************************************************** #
# First, calculate the effective address, then fetch the byte, #
# word, or longword sized operands. Then, in the interest of #
# simplicity, all operands are converted to longword size whether the #
# operation is byte, word, or long. The bounds are sign extended #
# accordingly. If Rn is a data register, Rn is also sign extended. If #
# Rn is an address register, it need not be sign extended since the #
# full register is always used. #
# The comparisons are made and the condition codes calculated. #
# If the instruction is chk2 and the Rn value is out-of-bounds, set #
# the ichk_flg in SPCOND_FLG. #
# If the memory fetch returns a failing value, pass the failing #
# address and FSLW to the isp_dacc() routine. #
# #
#########################################################################
global _chk2_cmp2
_chk2_cmp2:
# passing size parameter doesn't matter since chk2 & cmp2 can't do
# either predecrement, postincrement, or immediate.
bsr.l _calc_ea # calculate <ea>
mov.b EXC_EXTWORD(%a6), %d0 # fetch hi extension word
rol.b &0x4, %d0 # rotate reg bits into lo
and.w &0xf, %d0 # extract reg bits
mov.l (EXC_DREGS,%a6,%d0.w*4), %d2 # get regval
cmpi.b EXC_OPWORD(%a6), &0x2 # what size is operation?
blt.b chk2_cmp2_byte # size == byte
beq.b chk2_cmp2_word # size == word
# the bounds are longword size. call routine to read the lower
# bound into d0 and the higher bound into d1.
chk2_cmp2_long:
mov.l %a0,%a2 # save copy of <ea>
bsr.l _dmem_read_long # fetch long lower bound
tst.l %d1 # dfetch error?
bne.w chk2_cmp2_err_l # yes
mov.l %d0,%d3 # save long lower bound
addq.l &0x4,%a2
mov.l %a2,%a0 # pass <ea> of long upper bound
bsr.l _dmem_read_long # fetch long upper bound
tst.l %d1 # dfetch error?
bne.w chk2_cmp2_err_l # yes
mov.l %d0,%d1 # long upper bound in d1
mov.l %d3,%d0 # long lower bound in d0
bra.w chk2_cmp2_compare # go do the compare emulation
# the bounds are word size. fetch them in one subroutine call by
# reading a longword. sign extend both. if it's a data operation,
# sign extend Rn to long, also.
chk2_cmp2_word:
mov.l %a0,%a2
bsr.l _dmem_read_long # fetch 2 word bounds
tst.l %d1 # dfetch error?
bne.w chk2_cmp2_err_l # yes
mov.w %d0, %d1 # place hi in %d1
swap %d0 # place lo in %d0
ext.l %d0 # sign extend lo bnd
ext.l %d1 # sign extend hi bnd
btst &0x7, EXC_EXTWORD(%a6) # address compare?
bne.w chk2_cmp2_compare # yes; don't sign extend
# operation is a data register compare.
# sign extend word to long so we can do simple longword compares.
ext.l %d2 # sign extend data word
bra.w chk2_cmp2_compare # go emulate compare
# the bounds are byte size. fetch them in one subroutine call by
# reading a word. sign extend both. if it's a data operation,
# sign extend Rn to long, also.
chk2_cmp2_byte:
mov.l %a0,%a2
bsr.l _dmem_read_word # fetch 2 byte bounds
tst.l %d1 # dfetch error?
bne.w chk2_cmp2_err_w # yes
mov.b %d0, %d1 # place hi in %d1
lsr.w &0x8, %d0 # place lo in %d0
extb.l %d0 # sign extend lo bnd
extb.l %d1 # sign extend hi bnd
btst &0x7, EXC_EXTWORD(%a6) # address compare?
bne.b chk2_cmp2_compare # yes; don't sign extend
# operation is a data register compare.
# sign extend byte to long so we can do simple longword compares.
extb.l %d2 # sign extend data byte
#
# To set the ccodes correctly:
# (1) save 'Z' bit from (Rn - lo)
# (2) save 'Z' and 'N' bits from ((hi - lo) - (Rn - hi))
# (3) keep 'X', 'N', and 'V' from before instruction
# (4) combine ccodes
#
chk2_cmp2_compare:
sub.l %d0, %d2 # (Rn - lo)
mov.w %cc, %d3 # fetch resulting ccodes
andi.b &0x4, %d3 # keep 'Z' bit
sub.l %d0, %d1 # (hi - lo)
cmp.l %d1,%d2 # ((hi - lo) - (Rn - hi))
mov.w %cc, %d4 # fetch resulting ccodes
or.b %d4, %d3 # combine w/ earlier ccodes
andi.b &0x5, %d3 # keep 'Z' and 'N'
mov.w EXC_CC(%a6), %d4 # fetch old ccodes
andi.b &0x1a, %d4 # keep 'X','N','V' bits
or.b %d3, %d4 # insert new ccodes
mov.w %d4, EXC_CC(%a6) # save new ccodes
btst &0x3, EXC_EXTWORD(%a6) # separate chk2,cmp2
bne.b chk2_finish # it's a chk2
rts
# this code handles the only difference between chk2 and cmp2. chk2 would
# have trapped out if the value was out of bounds. we check this by seeing
# if the 'N' bit was set by the operation.
chk2_finish:
btst &0x0, %d4 # is 'N' bit set?
bne.b chk2_trap # yes;chk2 should trap
rts
chk2_trap:
mov.b &ichk_flg,SPCOND_FLG(%a6) # set "special case" flag
rts
# if dmem_read_{long,word}() returns a fail message in d1, the package
# must create an access error frame. here, we pass a skeleton fslw
# and the failing address to the routine that creates the new frame.
# FSLW:
# read = true
# size = longword
# TM = data
# software emulation error = true
chk2_cmp2_err_l:
mov.l %a2,%a0 # pass failing address
mov.l &0x01010001,%d0 # pass fslw
bra.l isp_dacc
# FSLW:
# read = true
# size = word
# TM = data
# software emulation error = true
chk2_cmp2_err_w:
mov.l %a2,%a0 # pass failing address
mov.l &0x01410001,%d0 # pass fslw
bra.l isp_dacc
#########################################################################
# XDEF **************************************************************** #
# _div64(): routine to emulate div{u,s}.l <ea>,Dr:Dq #
# 64/32->32r:32q #
# #
# XREF **************************************************************** #
# _calc_ea() - calculate effective address #
# isp_iacc() - handle instruction access error exception #
# isp_dacc() - handle data access error exception #
# isp_restore() - restore An on access error w/ -() or ()+ #
# #
# INPUT *************************************************************** #
# none #
# #
# OUTPUT ************************************************************** #
# If exiting through isp_dacc... #
# a0 = failing address #
# d0 = FSLW #
# else #
# none #
# #
# ALGORITHM *********************************************************** #
# First, decode the operand location. If it's in Dn, fetch from #
# the stack. If it's in memory, use _calc_ea() to calculate the #
# effective address. Use _dmem_read_long() to fetch at that address. #
# Unless the operand is immediate data. Then use _imem_read_long(). #
# Send failures to isp_dacc() or isp_iacc() as appropriate. #
# If the operands are signed, make them unsigned and save the #
# sign info for later. Separate out special cases like divide-by-zero #
# or 32-bit divides if possible. Else, use a special math algorithm #
# to calculate the result. #
# Restore sign info if signed instruction. Set the condition #
# codes. Set idbyz_flg in SPCOND_FLG if divisor was zero. Store the #
# quotient and remainder in the appropriate data registers on the stack.#
# #
#########################################################################
set NDIVISOR, EXC_TEMP+0x0
set NDIVIDEND, EXC_TEMP+0x1
set NDRSAVE, EXC_TEMP+0x2
set NDQSAVE, EXC_TEMP+0x4
set DDSECOND, EXC_TEMP+0x6
set DDQUOTIENT, EXC_TEMP+0x8
set DDNORMAL, EXC_TEMP+0xc
global _div64
#############
# div(u,s)l #
#############
_div64:
mov.b EXC_OPWORD+1(%a6), %d0
andi.b &0x38, %d0 # extract src mode
bne.w dcontrolmodel_s # %dn dest or control mode?
mov.b EXC_OPWORD+1(%a6), %d0 # extract Dn from opcode
andi.w &0x7, %d0
mov.l (EXC_DREGS,%a6,%d0.w*4), %d7 # fetch divisor from register
dgotsrcl:
beq.w div64eq0 # divisor is = 0!!!
mov.b EXC_EXTWORD+1(%a6), %d0 # extract Dr from extword
mov.b EXC_EXTWORD(%a6), %d1 # extract Dq from extword
and.w &0x7, %d0
lsr.b &0x4, %d1
and.w &0x7, %d1
mov.w %d0, NDRSAVE(%a6) # save Dr for later
mov.w %d1, NDQSAVE(%a6) # save Dq for later
# fetch %dr and %dq directly off stack since all regs are saved there
mov.l (EXC_DREGS,%a6,%d0.w*4), %d5 # get dividend hi
mov.l (EXC_DREGS,%a6,%d1.w*4), %d6 # get dividend lo
# separate signed and unsigned divide
btst &0x3, EXC_EXTWORD(%a6) # signed or unsigned?
beq.b dspecialcases # use positive divide
# save the sign of the divisor
# make divisor unsigned if it's negative
tst.l %d7 # chk sign of divisor
slt NDIVISOR(%a6) # save sign of divisor
bpl.b dsgndividend
neg.l %d7 # complement negative divisor
# save the sign of the dividend
# make dividend unsigned if it's negative
dsgndividend:
tst.l %d5 # chk sign of hi(dividend)
slt NDIVIDEND(%a6) # save sign of dividend
bpl.b dspecialcases
mov.w &0x0, %cc # clear 'X' cc bit
negx.l %d6 # complement signed dividend
negx.l %d5
# extract some special cases:
# - is (dividend == 0) ?
# - is (hi(dividend) == 0 && (divisor <= lo(dividend))) ? (32-bit div)
dspecialcases:
tst.l %d5 # is (hi(dividend) == 0)
bne.b dnormaldivide # no, so try it the long way
tst.l %d6 # is (lo(dividend) == 0), too
beq.w ddone # yes, so (dividend == 0)
cmp.l %d7,%d6 # is (divisor <= lo(dividend))
bls.b d32bitdivide # yes, so use 32 bit divide
exg %d5,%d6 # q = 0, r = dividend
bra.w divfinish # can't divide, we're done.
d32bitdivide:
tdivu.l %d7, %d5:%d6 # it's only a 32/32 bit div!
bra.b divfinish
dnormaldivide:
# last special case:
# - is hi(dividend) >= divisor ? if yes, then overflow
cmp.l %d7,%d5
bls.b ddovf # answer won't fit in 32 bits
# perform the divide algorithm:
bsr.l dclassical # do int divide
# separate into signed and unsigned finishes.
divfinish:
btst &0x3, EXC_EXTWORD(%a6) # do divs, divu separately
beq.b ddone # divu has no processing!!!
# it was a divs.l, so ccode setting is a little more complicated...
tst.b NDIVIDEND(%a6) # remainder has same sign
beq.b dcc # as dividend.
neg.l %d5 # sgn(rem) = sgn(dividend)
dcc:
mov.b NDIVISOR(%a6), %d0
eor.b %d0, NDIVIDEND(%a6) # chk if quotient is negative
beq.b dqpos # branch to quot positive
# 0x80000000 is the largest number representable as a 32-bit negative
# number. the negative of 0x80000000 is 0x80000000.
cmpi.l %d6, &0x80000000 # will (-quot) fit in 32 bits?
bhi.b ddovf
neg.l %d6 # make (-quot) 2's comp
bra.b ddone
dqpos:
btst &0x1f, %d6 # will (+quot) fit in 32 bits?
bne.b ddovf
ddone:
# at this point, result is normal so ccodes are set based on result.
mov.w EXC_CC(%a6), %cc
tst.l %d6 # set %ccode bits
mov.w %cc, EXC_CC(%a6)
mov.w NDRSAVE(%a6), %d0 # get Dr off stack
mov.w NDQSAVE(%a6), %d1 # get Dq off stack
# if the register numbers are the same, only the quotient gets saved.
# so, if we always save the quotient second, we save ourselves a cmp&beq
mov.l %d5, (EXC_DREGS,%a6,%d0.w*4) # save remainder
mov.l %d6, (EXC_DREGS,%a6,%d1.w*4) # save quotient
rts
ddovf:
bset &0x1, EXC_CC+1(%a6) # 'V' set on overflow
bclr &0x0, EXC_CC+1(%a6) # 'C' cleared on overflow
rts
div64eq0:
andi.b &0x1e, EXC_CC+1(%a6) # clear 'C' bit on divbyzero
ori.b &idbyz_flg,SPCOND_FLG(%a6) # set "special case" flag
rts
###########################################################################
#########################################################################
# This routine uses the 'classical' Algorithm D from Donald Knuth's #
# Art of Computer Programming, vol II, Seminumerical Algorithms. #
# For this implementation b=2**16, and the target is U1U2U3U4/V1V2, #
# where U,V are words of the quadword dividend and longword divisor, #
# and U1, V1 are the most significant words. #
# #
# The most sig. longword of the 64 bit dividend must be in %d5, least #
# in %d6. The divisor must be in the variable ddivisor, and the #
# signed/unsigned flag ddusign must be set (0=unsigned,1=signed). #
# The quotient is returned in %d6, remainder in %d5, unless the #
# v (overflow) bit is set in the saved %ccr. If overflow, the dividend #
# is unchanged. #
#########################################################################
dclassical:
# if the divisor msw is 0, use simpler algorithm then the full blown
# one at ddknuth:
cmpi.l %d7, &0xffff
bhi.b ddknuth # go use D. Knuth algorithm
# Since the divisor is only a word (and larger than the mslw of the dividend),
# a simpler algorithm may be used :
# In the general case, four quotient words would be created by
# dividing the divisor word into each dividend word. In this case,
# the first two quotient words must be zero, or overflow would occur.
# Since we already checked this case above, we can treat the most significant
# longword of the dividend as (0) remainder (see Knuth) and merely complete
# the last two divisions to get a quotient longword and word remainder:
clr.l %d1
swap %d5 # same as r*b if previous step rqd
swap %d6 # get u3 to lsw position
mov.w %d6, %d5 # rb + u3
divu.w %d7, %d5
mov.w %d5, %d1 # first quotient word
swap %d6 # get u4
mov.w %d6, %d5 # rb + u4
divu.w %d7, %d5
swap %d1
mov.w %d5, %d1 # 2nd quotient 'digit'
clr.w %d5
swap %d5 # now remainder
mov.l %d1, %d6 # and quotient
rts
ddknuth:
# In this algorithm, the divisor is treated as a 2 digit (word) number
# which is divided into a 3 digit (word) dividend to get one quotient
# digit (word). After subtraction, the dividend is shifted and the
# process repeated. Before beginning, the divisor and quotient are
# 'normalized' so that the process of estimating the quotient digit
# will yield verifiably correct results..
clr.l DDNORMAL(%a6) # count of shifts for normalization
clr.b DDSECOND(%a6) # clear flag for quotient digits
clr.l %d1 # %d1 will hold trial quotient
ddnchk:
btst &31, %d7 # must we normalize? first word of
bne.b ddnormalized # divisor (V1) must be >= 65536/2
addq.l &0x1, DDNORMAL(%a6) # count normalization shifts
lsl.l &0x1, %d7 # shift the divisor
lsl.l &0x1, %d6 # shift u4,u3 with overflow to u2
roxl.l &0x1, %d5 # shift u1,u2
bra.w ddnchk
ddnormalized:
# Now calculate an estimate of the quotient words (msw first, then lsw).
# The comments use subscripts for the first quotient digit determination.
mov.l %d7, %d3 # divisor
mov.l %d5, %d2 # dividend mslw
swap %d2
swap %d3
cmp.w %d2, %d3 # V1 = U1 ?
bne.b ddqcalc1
mov.w &0xffff, %d1 # use max trial quotient word
bra.b ddadj0
ddqcalc1:
mov.l %d5, %d1
divu.w %d3, %d1 # use quotient of mslw/msw
andi.l &0x0000ffff, %d1 # zero any remainder
ddadj0:
# now test the trial quotient and adjust. This step plus the
# normalization assures (according to Knuth) that the trial
# quotient will be at worst 1 too large.
mov.l %d6, -(%sp)
clr.w %d6 # word u3 left
swap %d6 # in lsw position
ddadj1: mov.l %d7, %d3
mov.l %d1, %d2
mulu.w %d7, %d2 # V2q
swap %d3
mulu.w %d1, %d3 # V1q
mov.l %d5, %d4 # U1U2
sub.l %d3, %d4 # U1U2 - V1q
swap %d4
mov.w %d4,%d0
mov.w %d6,%d4 # insert lower word (U3)
tst.w %d0 # is upper word set?
bne.w ddadjd1
# add.l %d6, %d4 # (U1U2 - V1q) + U3
cmp.l %d2, %d4
bls.b ddadjd1 # is V2q > (U1U2-V1q) + U3 ?
subq.l &0x1, %d1 # yes, decrement and recheck
bra.b ddadj1
ddadjd1:
# now test the word by multiplying it by the divisor (V1V2) and comparing
# the 3 digit (word) result with the current dividend words
mov.l %d5, -(%sp) # save %d5 (%d6 already saved)
mov.l %d1, %d6
swap %d6 # shift answer to ms 3 words
mov.l %d7, %d5
bsr.l dmm2
mov.l %d5, %d2 # now %d2,%d3 are trial*divisor
mov.l %d6, %d3
mov.l (%sp)+, %d5 # restore dividend
mov.l (%sp)+, %d6
sub.l %d3, %d6
subx.l %d2, %d5 # subtract double precision
bcc dd2nd # no carry, do next quotient digit
subq.l &0x1, %d1 # q is one too large
# need to add back divisor longword to current ms 3 digits of dividend
# - according to Knuth, this is done only 2 out of 65536 times for random
# divisor, dividend selection.
clr.l %d2
mov.l %d7, %d3
swap %d3
clr.w %d3 # %d3 now ls word of divisor
add.l %d3, %d6 # aligned with 3rd word of dividend
addx.l %d2, %d5
mov.l %d7, %d3
clr.w %d3 # %d3 now ms word of divisor
swap %d3 # aligned with 2nd word of dividend
add.l %d3, %d5
dd2nd:
tst.b DDSECOND(%a6) # both q words done?
bne.b ddremain
# first quotient digit now correct. store digit and shift the
# (subtracted) dividend
mov.w %d1, DDQUOTIENT(%a6)
clr.l %d1
swap %d5
swap %d6
mov.w %d6, %d5
clr.w %d6
st DDSECOND(%a6) # second digit
bra.w ddnormalized
ddremain:
# add 2nd word to quotient, get the remainder.
mov.w %d1, DDQUOTIENT+2(%a6)
# shift down one word/digit to renormalize remainder.
mov.w %d5, %d6
swap %d6
swap %d5
mov.l DDNORMAL(%a6), %d7 # get norm shift count
beq.b ddrn
subq.l &0x1, %d7 # set for loop count
ddnlp:
lsr.l &0x1, %d5 # shift into %d6
roxr.l &0x1, %d6
dbf %d7, ddnlp
ddrn:
mov.l %d6, %d5 # remainder
mov.l DDQUOTIENT(%a6), %d6 # quotient
rts
dmm2:
# factors for the 32X32->64 multiplication are in %d5 and %d6.
# returns 64 bit result in %d5 (hi) %d6(lo).
# destroys %d2,%d3,%d4.
# multiply hi,lo words of each factor to get 4 intermediate products
mov.l %d6, %d2
mov.l %d6, %d3
mov.l %d5, %d4
swap %d3
swap %d4
mulu.w %d5, %d6 # %d6 <- lsw*lsw
mulu.w %d3, %d5 # %d5 <- msw-dest*lsw-source
mulu.w %d4, %d2 # %d2 <- msw-source*lsw-dest
mulu.w %d4, %d3 # %d3 <- msw*msw
# now use swap and addx to consolidate to two longwords
clr.l %d4
swap %d6
add.w %d5, %d6 # add msw of l*l to lsw of m*l product
addx.w %d4, %d3 # add any carry to m*m product
add.w %d2, %d6 # add in lsw of other m*l product
addx.w %d4, %d3 # add any carry to m*m product
swap %d6 # %d6 is low 32 bits of final product
clr.w %d5
clr.w %d2 # lsw of two mixed products used,
swap %d5 # now use msws of longwords
swap %d2
add.l %d2, %d5
add.l %d3, %d5 # %d5 now ms 32 bits of final product
rts
##########
dcontrolmodel_s:
movq.l &LONG,%d0
bsr.l _calc_ea # calc <ea>
cmpi.b SPCOND_FLG(%a6),&immed_flg # immediate addressing mode?
beq.b dimmed # yes
mov.l %a0,%a2
bsr.l _dmem_read_long # fetch divisor from <ea>
tst.l %d1 # dfetch error?
bne.b div64_err # yes
mov.l %d0, %d7
bra.w dgotsrcl
# we have to split out immediate data here because it must be read using
# imem_read() instead of dmem_read(). this becomes especially important
# if the fetch runs into some deadly fault.
dimmed:
addq.l &0x4,EXC_EXTWPTR(%a6)
bsr.l _imem_read_long # read immediate value
tst.l %d1 # ifetch error?
bne.l isp_iacc # yes
mov.l %d0,%d7
bra.w dgotsrcl
##########
# if dmem_read_long() returns a fail message in d1, the package
# must create an access error frame. here, we pass a skeleton fslw
# and the failing address to the routine that creates the new frame.
# also, we call isp_restore in case the effective addressing mode was
# (an)+ or -(an) in which case the previous "an" value must be restored.
# FSLW:
# read = true
# size = longword
# TM = data
# software emulation error = true
div64_err:
bsr.l isp_restore # restore addr reg
mov.l %a2,%a0 # pass failing address
mov.l &0x01010001,%d0 # pass fslw
bra.l isp_dacc
#########################################################################
# XDEF **************************************************************** #
# _mul64(): routine to emulate mul{u,s}.l <ea>,Dh:Dl 32x32->64 #
# #
# XREF **************************************************************** #
# _calc_ea() - calculate effective address #
# isp_iacc() - handle instruction access error exception #
# isp_dacc() - handle data access error exception #
# isp_restore() - restore An on access error w/ -() or ()+ #
# #
# INPUT *************************************************************** #
# none #
# #
# OUTPUT ************************************************************** #
# If exiting through isp_dacc... #
# a0 = failing address #
# d0 = FSLW #
# else #
# none #
# #
# ALGORITHM *********************************************************** #
# First, decode the operand location. If it's in Dn, fetch from #
# the stack. If it's in memory, use _calc_ea() to calculate the #
# effective address. Use _dmem_read_long() to fetch at that address. #
# Unless the operand is immediate data. Then use _imem_read_long(). #
# Send failures to isp_dacc() or isp_iacc() as appropriate. #
# If the operands are signed, make them unsigned and save the #
# sign info for later. Perform the multiplication using 16x16->32 #
# unsigned multiplies and "add" instructions. Store the high and low #
# portions of the result in the appropriate data registers on the #
# stack. Calculate the condition codes, also. #
# #
#########################################################################
#############
# mul(u,s)l #
#############
global _mul64
_mul64:
mov.b EXC_OPWORD+1(%a6), %d0 # extract src {mode,reg}
cmpi.b %d0, &0x7 # is src mode Dn or other?
bgt.w mul64_memop # src is in memory
# multiplier operand in the data register file.
# must extract the register number and fetch the operand from the stack.
mul64_regop:
andi.w &0x7, %d0 # extract Dn
mov.l (EXC_DREGS,%a6,%d0.w*4), %d3 # fetch multiplier
# multiplier is in %d3. now, extract Dl and Dh fields and fetch the
# multiplicand from the data register specified by Dl.
mul64_multiplicand:
mov.w EXC_EXTWORD(%a6), %d2 # fetch ext word
clr.w %d1 # clear Dh reg
mov.b %d2, %d1 # grab Dh
rol.w &0x4, %d2 # align Dl byte
andi.w &0x7, %d2 # extract Dl
mov.l (EXC_DREGS,%a6,%d2.w*4), %d4 # get multiplicand
# check for the case of "zero" result early
tst.l %d4 # test multiplicand
beq.w mul64_zero # handle zero separately
tst.l %d3 # test multiplier
beq.w mul64_zero # handle zero separately
# multiplier is in %d3 and multiplicand is in %d4.
# if the operation is to be signed, then the operands are converted
# to unsigned and the result sign is saved for the end.
clr.b EXC_TEMP(%a6) # clear temp space
btst &0x3, EXC_EXTWORD(%a6) # signed or unsigned?
beq.b mul64_alg # unsigned; skip sgn calc
tst.l %d3 # is multiplier negative?
bge.b mul64_chk_md_sgn # no
neg.l %d3 # make multiplier positive
ori.b &0x1, EXC_TEMP(%a6) # save multiplier sgn
# the result sign is the exclusive or of the operand sign bits.
mul64_chk_md_sgn:
tst.l %d4 # is multiplicand negative?
bge.b mul64_alg # no
neg.l %d4 # make multiplicand positive
eori.b &0x1, EXC_TEMP(%a6) # calculate correct sign
#########################################################################
# 63 32 0 #
# ---------------------------- #
# | hi(mplier) * hi(mplicand)| #
# ---------------------------- #
# ----------------------------- #
# | hi(mplier) * lo(mplicand) | #
# ----------------------------- #
# ----------------------------- #
# | lo(mplier) * hi(mplicand) | #
# ----------------------------- #
# | ----------------------------- #
# --|-- | lo(mplier) * lo(mplicand) | #
# | ----------------------------- #
# ======================================================== #
# -------------------------------------------------------- #
# | hi(result) | lo(result) | #
# -------------------------------------------------------- #
#########################################################################
mul64_alg:
# load temp registers with operands
mov.l %d3, %d5 # mr in %d5
mov.l %d3, %d6 # mr in %d6
mov.l %d4, %d7 # md in %d7
swap %d6 # hi(mr) in lo %d6
swap %d7 # hi(md) in lo %d7
# complete necessary multiplies:
mulu.w %d4, %d3 # [1] lo(mr) * lo(md)
mulu.w %d6, %d4 # [2] hi(mr) * lo(md)
mulu.w %d7, %d5 # [3] lo(mr) * hi(md)
mulu.w %d7, %d6 # [4] hi(mr) * hi(md)
# add lo portions of [2],[3] to hi portion of [1].
# add carries produced from these adds to [4].
# lo([1]) is the final lo 16 bits of the result.
clr.l %d7 # load %d7 w/ zero value
swap %d3 # hi([1]) <==> lo([1])
add.w %d4, %d3 # hi([1]) + lo([2])
addx.l %d7, %d6 # [4] + carry
add.w %d5, %d3 # hi([1]) + lo([3])
addx.l %d7, %d6 # [4] + carry
swap %d3 # lo([1]) <==> hi([1])
# lo portions of [2],[3] have been added in to final result.
# now, clear lo, put hi in lo reg, and add to [4]
clr.w %d4 # clear lo([2])
clr.w %d5 # clear hi([3])
swap %d4 # hi([2]) in lo %d4
swap %d5 # hi([3]) in lo %d5
add.l %d5, %d4 # [4] + hi([2])
add.l %d6, %d4 # [4] + hi([3])
# unsigned result is now in {%d4,%d3}
tst.b EXC_TEMP(%a6) # should result be signed?
beq.b mul64_done # no
# result should be a signed negative number.
# compute 2's complement of the unsigned number:
# -negate all bits and add 1
mul64_neg:
not.l %d3 # negate lo(result) bits
not.l %d4 # negate hi(result) bits
addq.l &1, %d3 # add 1 to lo(result)
addx.l %d7, %d4 # add carry to hi(result)
# the result is saved to the register file.
# for '040 compatibility, if Dl == Dh then only the hi(result) is
# saved. so, saving hi after lo accomplishes this without need to
# check Dl,Dh equality.
mul64_done:
mov.l %d3, (EXC_DREGS,%a6,%d2.w*4) # save lo(result)
mov.w &0x0, %cc
mov.l %d4, (EXC_DREGS,%a6,%d1.w*4) # save hi(result)
# now, grab the condition codes. only one that can be set is 'N'.
# 'N' CAN be set if the operation is unsigned if bit 63 is set.
mov.w %cc, %d7 # fetch %ccr to see if 'N' set
andi.b &0x8, %d7 # extract 'N' bit
mul64_ccode_set:
mov.b EXC_CC+1(%a6), %d6 # fetch previous %ccr
andi.b &0x10, %d6 # all but 'X' bit changes
or.b %d7, %d6 # group 'X' and 'N'
mov.b %d6, EXC_CC+1(%a6) # save new %ccr
rts
# one or both of the operands is zero so the result is also zero.
# save the zero result to the register file and set the 'Z' ccode bit.
mul64_zero:
clr.l (EXC_DREGS,%a6,%d2.w*4) # save lo(result)
clr.l (EXC_DREGS,%a6,%d1.w*4) # save hi(result)
movq.l &0x4, %d7 # set 'Z' ccode bit
bra.b mul64_ccode_set # finish ccode set
##########
# multiplier operand is in memory at the effective address.
# must calculate the <ea> and go fetch the 32-bit operand.
mul64_memop:
movq.l &LONG, %d0 # pass # of bytes
bsr.l _calc_ea # calculate <ea>
cmpi.b SPCOND_FLG(%a6),&immed_flg # immediate addressing mode?
beq.b mul64_immed # yes
mov.l %a0,%a2
bsr.l _dmem_read_long # fetch src from addr (%a0)
tst.l %d1 # dfetch error?
bne.w mul64_err # yes
mov.l %d0, %d3 # store multiplier in %d3
bra.w mul64_multiplicand
# we have to split out immediate data here because it must be read using
# imem_read() instead of dmem_read(). this becomes especially important
# if the fetch runs into some deadly fault.
mul64_immed:
addq.l &0x4,EXC_EXTWPTR(%a6)
bsr.l _imem_read_long # read immediate value
tst.l %d1 # ifetch error?
bne.l isp_iacc # yes
mov.l %d0,%d3
bra.w mul64_multiplicand
##########
# if dmem_read_long() returns a fail message in d1, the package
# must create an access error frame. here, we pass a skeleton fslw
# and the failing address to the routine that creates the new frame.
# also, we call isp_restore in case the effective addressing mode was
# (an)+ or -(an) in which case the previous "an" value must be restored.
# FSLW:
# read = true
# size = longword
# TM = data
# software emulation error = true
mul64_err:
bsr.l isp_restore # restore addr reg
mov.l %a2,%a0 # pass failing address
mov.l &0x01010001,%d0 # pass fslw
bra.l isp_dacc
#########################################################################
# XDEF **************************************************************** #
# _compandset2(): routine to emulate cas2() #
# (internal to package) #
# #
# _isp_cas2_finish(): store ccodes, store compare regs #
# (external to package) #
# #
# XREF **************************************************************** #
# _real_lock_page() - "callout" to lock op's page from page-outs #
# _cas_terminate2() - access error exit #
# _real_cas2() - "callout" to core cas2 emulation code #
# _real_unlock_page() - "callout" to unlock page #
# #
# INPUT *************************************************************** #
# _compandset2(): #
# d0 = instruction extension word #
# #
# _isp_cas2_finish(): #
# see cas2 core emulation code #
# #
# OUTPUT ************************************************************** #
# _compandset2(): #
# see cas2 core emulation code #
# #
# _isp_cas_finish(): #
# None (register file or memroy changed as appropriate) #
# #
# ALGORITHM *********************************************************** #
# compandset2(): #
# Decode the instruction and fetch the appropriate Update and #
# Compare operands. Then call the "callout" _real_lock_page() for each #
# memory operand address so that the operating system can keep these #
# pages from being paged out. If either _real_lock_page() fails, exit #
# through _cas_terminate2(). Don't forget to unlock the 1st locked page #
# using _real_unlock_paged() if the 2nd lock-page fails. #
# Finally, branch to the core cas2 emulation code by calling the #
# "callout" _real_cas2(). #
# #
# _isp_cas2_finish(): #
# Re-perform the comparison so we can determine the condition #
# codes which were too much trouble to keep around during the locked #
# emulation. Then unlock each operands page by calling the "callout" #
# _real_unlock_page(). #
# #
#########################################################################
set ADDR1, EXC_TEMP+0xc
set ADDR2, EXC_TEMP+0x0
set DC2, EXC_TEMP+0xa
set DC1, EXC_TEMP+0x8
global _compandset2
_compandset2:
mov.l %d0,EXC_TEMP+0x4(%a6) # store for possible restart
mov.l %d0,%d1 # extension word in d0
rol.w &0x4,%d0
andi.w &0xf,%d0 # extract Rn2
mov.l (EXC_DREGS,%a6,%d0.w*4),%a1 # fetch ADDR2
mov.l %a1,ADDR2(%a6)
mov.l %d1,%d0
lsr.w &0x6,%d1
andi.w &0x7,%d1 # extract Du2
mov.l (EXC_DREGS,%a6,%d1.w*4),%d5 # fetch Update2 Op
andi.w &0x7,%d0 # extract Dc2
mov.l (EXC_DREGS,%a6,%d0.w*4),%d3 # fetch Compare2 Op
mov.w %d0,DC2(%a6)
mov.w EXC_EXTWORD(%a6),%d0
mov.l %d0,%d1
rol.w &0x4,%d0
andi.w &0xf,%d0 # extract Rn1
mov.l (EXC_DREGS,%a6,%d0.w*4),%a0 # fetch ADDR1
mov.l %a0,ADDR1(%a6)
mov.l %d1,%d0
lsr.w &0x6,%d1
andi.w &0x7,%d1 # extract Du1
mov.l (EXC_DREGS,%a6,%d1.w*4),%d4 # fetch Update1 Op
andi.w &0x7,%d0 # extract Dc1
mov.l (EXC_DREGS,%a6,%d0.w*4),%d2 # fetch Compare1 Op
mov.w %d0,DC1(%a6)
btst &0x1,EXC_OPWORD(%a6) # word or long?
sne %d7
btst &0x5,EXC_ISR(%a6) # user or supervisor?
sne %d6
mov.l %a0,%a2
mov.l %a1,%a3
mov.l %d7,%d1 # pass size
mov.l %d6,%d0 # pass mode
bsr.l _real_lock_page # lock page
mov.l %a2,%a0
tst.l %d0 # error?
bne.l _cas_terminate2 # yes
mov.l %d7,%d1 # pass size
mov.l %d6,%d0 # pass mode
mov.l %a3,%a0 # pass addr
bsr.l _real_lock_page # lock page
mov.l %a3,%a0
tst.l %d0 # error?
bne.b cas_preterm # yes
mov.l %a2,%a0
mov.l %a3,%a1
bra.l _real_cas2
# if the 2nd lock attempt fails, then we must still unlock the
# first page(s).
cas_preterm:
mov.l %d0,-(%sp) # save FSLW
mov.l %d7,%d1 # pass size
mov.l %d6,%d0 # pass mode
mov.l %a2,%a0 # pass ADDR1
bsr.l _real_unlock_page # unlock first page(s)
mov.l (%sp)+,%d0 # restore FSLW
mov.l %a3,%a0 # pass failing addr
bra.l _cas_terminate2
#############################################################
global _isp_cas2_finish
_isp_cas2_finish:
btst &0x1,EXC_OPWORD(%a6)
bne.b cas2_finish_l
mov.w EXC_CC(%a6),%cc # load old ccodes
cmp.w %d0,%d2
bne.b cas2_finish_w_save
cmp.w %d1,%d3
cas2_finish_w_save:
mov.w %cc,EXC_CC(%a6) # save new ccodes
tst.b %d4 # update compare reg?
bne.b cas2_finish_w_done # no
mov.w DC2(%a6),%d3 # fetch Dc2
mov.w %d1,(2+EXC_DREGS,%a6,%d3.w*4) # store new Compare2 Op
mov.w DC1(%a6),%d2 # fetch Dc1
mov.w %d0,(2+EXC_DREGS,%a6,%d2.w*4) # store new Compare1 Op
cas2_finish_w_done:
btst &0x5,EXC_ISR(%a6)
sne %d2
mov.l %d2,%d0 # pass mode
sf %d1 # pass size
mov.l ADDR1(%a6),%a0 # pass ADDR1
bsr.l _real_unlock_page # unlock page
mov.l %d2,%d0 # pass mode
sf %d1 # pass size
mov.l ADDR2(%a6),%a0 # pass ADDR2
bsr.l _real_unlock_page # unlock page
rts
cas2_finish_l:
mov.w EXC_CC(%a6),%cc # load old ccodes
cmp.l %d0,%d2
bne.b cas2_finish_l_save
cmp.l %d1,%d3
cas2_finish_l_save:
mov.w %cc,EXC_CC(%a6) # save new ccodes
tst.b %d4 # update compare reg?
bne.b cas2_finish_l_done # no
mov.w DC2(%a6),%d3 # fetch Dc2
mov.l %d1,(EXC_DREGS,%a6,%d3.w*4) # store new Compare2 Op
mov.w DC1(%a6),%d2 # fetch Dc1
mov.l %d0,(EXC_DREGS,%a6,%d2.w*4) # store new Compare1 Op
cas2_finish_l_done:
btst &0x5,EXC_ISR(%a6)
sne %d2
mov.l %d2,%d0 # pass mode
st %d1 # pass size
mov.l ADDR1(%a6),%a0 # pass ADDR1
bsr.l _real_unlock_page # unlock page
mov.l %d2,%d0 # pass mode
st %d1 # pass size
mov.l ADDR2(%a6),%a0 # pass ADDR2
bsr.l _real_unlock_page # unlock page
rts
########
global cr_cas2
cr_cas2:
mov.l EXC_TEMP+0x4(%a6),%d0
bra.w _compandset2
#########################################################################
# XDEF **************************************************************** #
# _compandset(): routine to emulate cas w/ misaligned <ea> #
# (internal to package) #
# _isp_cas_finish(): routine called when cas emulation completes #
# (external and internal to package) #
# _isp_cas_restart(): restart cas emulation after a fault #
# (external to package) #
# _isp_cas_terminate(): create access error stack frame on fault #
# (external and internal to package) #
# _isp_cas_inrange(): checks whether instr addess is within range #
# of core cas/cas2emulation code #
# (external to package) #
# #
# XREF **************************************************************** #
# _calc_ea(): calculate effective address #
# #
# INPUT *************************************************************** #
# compandset(): #
# none #
# _isp_cas_restart(): #
# d6 = previous sfc/dfc #
# _isp_cas_finish(): #
# _isp_cas_terminate(): #
# a0 = failing address #
# d0 = FSLW #
# d6 = previous sfc/dfc #
# _isp_cas_inrange(): #
# a0 = instruction address to be checked #
# #
# OUTPUT ************************************************************** #
# compandset(): #
# none #
# _isp_cas_restart(): #
# a0 = effective address #
# d7 = word or longword flag #
# _isp_cas_finish(): #
# a0 = effective address #
# _isp_cas_terminate(): #
# initial register set before emulation exception #
# _isp_cas_inrange(): #
# d0 = 0 => in range; -1 => out of range #
# #
# ALGORITHM *********************************************************** #
# #
# compandset(): #
# First, calculate the effective address. Then, decode the #
# instruction word and fetch the "compare" (DC) and "update" (Du) #
# operands. #
# Next, call the external routine _real_lock_page() so that the #
# operating system can keep this page from being paged out while we're #
# in this routine. If this call fails, jump to _cas_terminate2(). #
# The routine then branches to _real_cas(). This external routine #
# that actually emulates cas can be supplied by the external os or #
# made to point directly back into the 060ISP which has a routine for #
# this purpose. #
# #
# _isp_cas_finish(): #
# Either way, after emulation, the package is re-entered at #
# _isp_cas_finish(). This routine re-compares the operands in order to #
# set the condition codes. Finally, these routines will call #
# _real_unlock_page() in order to unlock the pages that were previously #
# locked. #
# #
# _isp_cas_restart(): #
# This routine can be entered from an access error handler where #
# the emulation sequence should be re-started from the beginning. #
# #
# _isp_cas_terminate(): #
# This routine can be entered from an access error handler where #
# an emulation operand access failed and the operating system would #
# like an access error stack frame created instead of the current #
# unimplemented integer instruction frame. #
# Also, the package enters here if a call to _real_lock_page() #
# fails. #
# #
# _isp_cas_inrange(): #
# Checks to see whether the instruction address passed to it in #
# a0 is within the software package cas/cas2 emulation routines. This #
# can be helpful for an operating system to determine whether an access #
# error during emulation was due to a cas/cas2 emulation access. #
# #
#########################################################################
set DC, EXC_TEMP+0x8
set ADDR, EXC_TEMP+0x4
global _compandset
_compandset:
btst &0x1,EXC_OPWORD(%a6) # word or long operation?
bne.b compandsetl # long
compandsetw:
movq.l &0x2,%d0 # size = 2 bytes
bsr.l _calc_ea # a0 = calculated <ea>
mov.l %a0,ADDR(%a6) # save <ea> for possible restart
sf %d7 # clear d7 for word size
bra.b compandsetfetch
compandsetl:
movq.l &0x4,%d0 # size = 4 bytes
bsr.l _calc_ea # a0 = calculated <ea>
mov.l %a0,ADDR(%a6) # save <ea> for possible restart
st %d7 # set d7 for longword size
compandsetfetch:
mov.w EXC_EXTWORD(%a6),%d0 # fetch cas extension word
mov.l %d0,%d1 # make a copy
lsr.w &0x6,%d0
andi.w &0x7,%d0 # extract Du
mov.l (EXC_DREGS,%a6,%d0.w*4),%d2 # get update operand
andi.w &0x7,%d1 # extract Dc
mov.l (EXC_DREGS,%a6,%d1.w*4),%d4 # get compare operand
mov.w %d1,DC(%a6) # save Dc
btst &0x5,EXC_ISR(%a6) # which mode for exception?
sne %d6 # set on supervisor mode
mov.l %a0,%a2 # save temporarily
mov.l %d7,%d1 # pass size
mov.l %d6,%d0 # pass mode
bsr.l _real_lock_page # lock page
tst.l %d0 # did error occur?
bne.w _cas_terminate2 # yes, clean up the mess
mov.l %a2,%a0 # pass addr in a0
bra.l _real_cas
########
global _isp_cas_finish
_isp_cas_finish:
btst &0x1,EXC_OPWORD(%a6)
bne.b cas_finish_l
# just do the compare again since it's faster than saving the ccodes
# from the locked routine...
cas_finish_w:
mov.w EXC_CC(%a6),%cc # restore cc
cmp.w %d0,%d4 # do word compare
mov.w %cc,EXC_CC(%a6) # save cc
tst.b %d1 # update compare reg?
bne.b cas_finish_w_done # no
mov.w DC(%a6),%d3
mov.w %d0,(EXC_DREGS+2,%a6,%d3.w*4) # Dc = destination
cas_finish_w_done:
mov.l ADDR(%a6),%a0 # pass addr
sf %d1 # pass size
btst &0x5,EXC_ISR(%a6)
sne %d0 # pass mode
bsr.l _real_unlock_page # unlock page
rts
# just do the compare again since it's faster than saving the ccodes
# from the locked routine...
cas_finish_l:
mov.w EXC_CC(%a6),%cc # restore cc
cmp.l %d0,%d4 # do longword compare
mov.w %cc,EXC_CC(%a6) # save cc
tst.b %d1 # update compare reg?
bne.b cas_finish_l_done # no
mov.w DC(%a6),%d3
mov.l %d0,(EXC_DREGS,%a6,%d3.w*4) # Dc = destination
cas_finish_l_done:
mov.l ADDR(%a6),%a0 # pass addr
st %d1 # pass size
btst &0x5,EXC_ISR(%a6)
sne %d0 # pass mode
bsr.l _real_unlock_page # unlock page
rts
########
global _isp_cas_restart
_isp_cas_restart:
mov.l %d6,%sfc # restore previous sfc
mov.l %d6,%dfc # restore previous dfc
cmpi.b EXC_OPWORD+1(%a6),&0xfc # cas or cas2?
beq.l cr_cas2 # cas2
cr_cas:
mov.l ADDR(%a6),%a0 # load <ea>
btst &0x1,EXC_OPWORD(%a6) # word or long operation?
sne %d7 # set d7 accordingly
bra.w compandsetfetch
########
# At this stage, it would be nice if d0 held the FSLW.
global _isp_cas_terminate
_isp_cas_terminate:
mov.l %d6,%sfc # restore previous sfc
mov.l %d6,%dfc # restore previous dfc
global _cas_terminate2
_cas_terminate2:
mov.l %a0,%a2 # copy failing addr to a2
mov.l %d0,-(%sp)
bsr.l isp_restore # restore An (if ()+ or -())
mov.l (%sp)+,%d0
addq.l &0x4,%sp # remove sub return addr
subq.l &0x8,%sp # make room for bigger stack
subq.l &0x8,%a6 # shift frame ptr down, too
mov.l &26,%d1 # want to move 51 longwords
lea 0x8(%sp),%a0 # get address of old stack
lea 0x0(%sp),%a1 # get address of new stack
cas_term_cont:
mov.l (%a0)+,(%a1)+ # move a longword
dbra.w %d1,cas_term_cont # keep going
mov.w &0x4008,EXC_IVOFF(%a6) # put new stk fmt, voff
mov.l %a2,EXC_IVOFF+0x2(%a6) # put faulting addr on stack
mov.l %d0,EXC_IVOFF+0x6(%a6) # put FSLW on stack
movm.l EXC_DREGS(%a6),&0x3fff # restore user regs
unlk %a6 # unlink stack frame
bra.l _real_access
########
global _isp_cas_inrange
_isp_cas_inrange:
clr.l %d0 # clear return result
lea _CASHI(%pc),%a1 # load end of CAS core code
cmp.l %a1,%a0 # is PC in range?
blt.b cin_no # no
lea _CASLO(%pc),%a1 # load begin of CAS core code
cmp.l %a0,%a1 # is PC in range?
blt.b cin_no # no
rts # yes; return d0 = 0
cin_no:
mov.l &-0x1,%d0 # out of range; return d0 = -1
rts
#################################################################
#################################################################
#################################################################
# This is the start of the cas and cas2 "core" emulation code. #
# This is the section that may need to be replaced by the host #
# OS if it is too operating system-specific. #
# Please refer to the package documentation to see how to #
# "replace" this section, if necessary. #
#################################################################
#################################################################
#################################################################
# ###### ## ###### ####
# # # # # # #
# # ###### ###### #
# # # # # #
# ###### # # ###### ######
#########################################################################
# XDEF **************************************************************** #
# _isp_cas2(): "core" emulation code for the cas2 instruction #
# #
# XREF **************************************************************** #
# _isp_cas2_finish() - only exit point for this emulation code; #
# do clean-up; calculate ccodes; store #
# Compare Ops if appropriate. #
# #
# INPUT *************************************************************** #
# *see chart below* #
# #
# OUTPUT ************************************************************** #
# *see chart below* #
# #
# ALGORITHM *********************************************************** #
# (1) Make several copies of the effective address. #
# (2) Save current SR; Then mask off all maskable interrupts. #
# (3) Save current SFC/DFC (ASSUMED TO BE EQUAL!!!); Then set #
# according to whether exception occurred in user or #
# supervisor mode. #
# (4) Use "plpaw" instruction to pre-load ATC with effective #
# address pages(s). THIS SHOULD NOT FAULT!!! The relevant #
# page(s) should have already been made resident prior to #
# entering this routine. #
# (5) Push the operand lines from the cache w/ "cpushl". #
# In the 68040, this was done within the locked region. In #
# the 68060, it is done outside of the locked region. #
# (6) Use "plpar" instruction to do a re-load of ATC entries for #
# ADDR1 since ADDR2 entries may have pushed ADDR1 out of the #
# ATC. #
# (7) Pre-fetch the core emulation instructions by executing #
# one branch within each physical line (16 bytes) of the code #
# before actually executing the code. #
# (8) Load the BUSCR w/ the bus lock value. #
# (9) Fetch the source operands using "moves". #
# (10)Do the compares. If both equal, go to step (13). #
# (11)Unequal. No update occurs. But, we do write the DST1 op #
# back to itself (as w/ the '040) so we can gracefully unlock #
# the bus (and assert LOCKE*) using BUSCR and the final move. #
# (12)Exit. #
# (13)Write update operand to the DST locations. Use BUSCR to #
# assert LOCKE* for the final write operation. #
# (14)Exit. #
# #
# The algorithm is actually implemented slightly differently #
# depending on the size of the operation and the misalignment of the #
# operands. A misaligned operand must be written in aligned chunks or #
# else the BUSCR register control gets confused. #
# #
#########################################################################
#################################################################
# THIS IS THE STATE OF THE INTEGER REGISTER FILE UPON #
# ENTERING _isp_cas2(). #
# #
# D0 = xxxxxxxx #
# D1 = xxxxxxxx #
# D2 = cmp operand 1 #
# D3 = cmp operand 2 #
# D4 = update oper 1 #
# D5 = update oper 2 #
# D6 = 'xxxxxxff if supervisor mode; 'xxxxxx00 if user mode #
# D7 = 'xxxxxxff if longword operation; 'xxxxxx00 if word #
# A0 = ADDR1 #
# A1 = ADDR2 #
# A2 = xxxxxxxx #
# A3 = xxxxxxxx #
# A4 = xxxxxxxx #
# A5 = xxxxxxxx #
# A6 = frame pointer #
# A7 = stack pointer #
#################################################################
# align 0x1000
# beginning label used by _isp_cas_inrange()
global _CASLO
_CASLO:
global _isp_cas2
_isp_cas2:
tst.b %d6 # user or supervisor mode?
bne.b cas2_supervisor # supervisor
cas2_user:
movq.l &0x1,%d0 # load user data fc
bra.b cas2_cont
cas2_supervisor:
movq.l &0x5,%d0 # load supervisor data fc
cas2_cont:
tst.b %d7 # word or longword?
beq.w cas2w # word
####
cas2l:
mov.l %a0,%a2 # copy ADDR1
mov.l %a1,%a3 # copy ADDR2
mov.l %a0,%a4 # copy ADDR1
mov.l %a1,%a5 # copy ADDR2
addq.l &0x3,%a4 # ADDR1+3
addq.l &0x3,%a5 # ADDR2+3
mov.l %a2,%d1 # ADDR1
# mask interrupts levels 0-6. save old mask value.
mov.w %sr,%d7 # save current SR
ori.w &0x0700,%sr # inhibit interrupts
# load the SFC and DFC with the appropriate mode.
movc %sfc,%d6 # save old SFC/DFC
movc %d0,%sfc # store new SFC
movc %d0,%dfc # store new DFC
# pre-load the operand ATC. no page faults should occur here because
# _real_lock_page() should have taken care of this.
plpaw (%a2) # load atc for ADDR1
plpaw (%a4) # load atc for ADDR1+3
plpaw (%a3) # load atc for ADDR2
plpaw (%a5) # load atc for ADDR2+3
# push the operand lines from the cache if they exist.
cpushl %dc,(%a2) # push line for ADDR1
cpushl %dc,(%a4) # push line for ADDR1+3
cpushl %dc,(%a3) # push line for ADDR2
cpushl %dc,(%a5) # push line for ADDR2+2
mov.l %d1,%a2 # ADDR1
addq.l &0x3,%d1
mov.l %d1,%a4 # ADDR1+3
# if ADDR1 was ATC resident before the above "plpaw" and was executed
# and it was the next entry scheduled for replacement and ADDR2
# shares the same set, then the "plpaw" for ADDR2 can push the ADDR1
# entries from the ATC. so, we do a second set of "plpa"s.
plpar (%a2) # load atc for ADDR1
plpar (%a4) # load atc for ADDR1+3
# load the BUSCR values.
mov.l &0x80000000,%a2 # assert LOCK* buscr value
mov.l &0xa0000000,%a3 # assert LOCKE* buscr value
mov.l &0x00000000,%a4 # buscr unlock value
# there are three possible mis-aligned cases for longword cas. they
# are separated because the final write which asserts LOCKE* must
# be aligned.
mov.l %a0,%d0 # is ADDR1 misaligned?
andi.b &0x3,%d0
beq.b CAS2L_ENTER # no
cmpi.b %d0,&0x2
beq.w CAS2L2_ENTER # yes; word misaligned
bra.w CAS2L3_ENTER # yes; byte misaligned
#
# D0 = dst operand 1 <-
# D1 = dst operand 2 <-
# D2 = cmp operand 1
# D3 = cmp operand 2
# D4 = update oper 1
# D5 = update oper 2
# D6 = old SFC/DFC
# D7 = old SR
# A0 = ADDR1
# A1 = ADDR2
# A2 = bus LOCK* value
# A3 = bus LOCKE* value
# A4 = bus unlock value
# A5 = xxxxxxxx
#
align 0x10
CAS2L_START:
movc %a2,%buscr # assert LOCK*
movs.l (%a1),%d1 # fetch Dest2[31:0]
movs.l (%a0),%d0 # fetch Dest1[31:0]
bra.b CAS2L_CONT
CAS2L_ENTER:
bra.b ~+16
CAS2L_CONT:
cmp.l %d0,%d2 # Dest1 - Compare1
bne.b CAS2L_NOUPDATE
cmp.l %d1,%d3 # Dest2 - Compare2
bne.b CAS2L_NOUPDATE
movs.l %d5,(%a1) # Update2[31:0] -> DEST2
bra.b CAS2L_UPDATE
bra.b ~+16
CAS2L_UPDATE:
movc %a3,%buscr # assert LOCKE*
movs.l %d4,(%a0) # Update1[31:0] -> DEST1
movc %a4,%buscr # unlock the bus
bra.b cas2l_update_done
bra.b ~+16
CAS2L_NOUPDATE:
movc %a3,%buscr # assert LOCKE*
movs.l %d0,(%a0) # Dest1[31:0] -> DEST1
movc %a4,%buscr # unlock the bus
bra.b cas2l_noupdate_done
bra.b ~+16
CAS2L_FILLER:
nop
nop
nop
nop
nop
nop
nop
bra.b CAS2L_START
####
#################################################################
# THIS MUST BE THE STATE OF THE INTEGER REGISTER FILE UPON #
# ENTERING _isp_cas2(). #
# #
# D0 = destination[31:0] operand 1 #
# D1 = destination[31:0] operand 2 #
# D2 = cmp[31:0] operand 1 #
# D3 = cmp[31:0] operand 2 #
# D4 = 'xxxxxx11 -> no reg update; 'xxxxxx00 -> update required #
# D5 = xxxxxxxx #
# D6 = xxxxxxxx #
# D7 = xxxxxxxx #
# A0 = xxxxxxxx #
# A1 = xxxxxxxx #
# A2 = xxxxxxxx #
# A3 = xxxxxxxx #
# A4 = xxxxxxxx #
# A5 = xxxxxxxx #
# A6 = frame pointer #
# A7 = stack pointer #
#################################################################
cas2l_noupdate_done:
# restore previous SFC/DFC value.
movc %d6,%sfc # restore old SFC
movc %d6,%dfc # restore old DFC
# restore previous interrupt mask level.
mov.w %d7,%sr # restore old SR
sf %d4 # indicate no update was done
bra.l _isp_cas2_finish
cas2l_update_done:
# restore previous SFC/DFC value.
movc %d6,%sfc # restore old SFC
movc %d6,%dfc # restore old DFC
# restore previous interrupt mask level.
mov.w %d7,%sr # restore old SR
st %d4 # indicate update was done
bra.l _isp_cas2_finish
####
align 0x10
CAS2L2_START:
movc %a2,%buscr # assert LOCK*
movs.l (%a1),%d1 # fetch Dest2[31:0]
movs.l (%a0),%d0 # fetch Dest1[31:0]
bra.b CAS2L2_CONT
CAS2L2_ENTER:
bra.b ~+16
CAS2L2_CONT:
cmp.l %d0,%d2 # Dest1 - Compare1
bne.b CAS2L2_NOUPDATE
cmp.l %d1,%d3 # Dest2 - Compare2
bne.b CAS2L2_NOUPDATE
movs.l %d5,(%a1) # Update2[31:0] -> Dest2
bra.b CAS2L2_UPDATE
bra.b ~+16
CAS2L2_UPDATE:
swap %d4 # get Update1[31:16]
movs.w %d4,(%a0)+ # Update1[31:16] -> DEST1
movc %a3,%buscr # assert LOCKE*
swap %d4 # get Update1[15:0]
bra.b CAS2L2_UPDATE2
bra.b ~+16
CAS2L2_UPDATE2:
movs.w %d4,(%a0) # Update1[15:0] -> DEST1+0x2
movc %a4,%buscr # unlock the bus
bra.w cas2l_update_done
nop
bra.b ~+16
CAS2L2_NOUPDATE:
swap %d0 # get Dest1[31:16]
movs.w %d0,(%a0)+ # Dest1[31:16] -> DEST1
movc %a3,%buscr # assert LOCKE*
swap %d0 # get Dest1[15:0]
bra.b CAS2L2_NOUPDATE2
bra.b ~+16
CAS2L2_NOUPDATE2:
movs.w %d0,(%a0) # Dest1[15:0] -> DEST1+0x2
movc %a4,%buscr # unlock the bus
bra.w cas2l_noupdate_done
nop
bra.b ~+16
CAS2L2_FILLER:
nop
nop
nop
nop
nop
nop
nop
bra.b CAS2L2_START
#################################
align 0x10
CAS2L3_START:
movc %a2,%buscr # assert LOCK*
movs.l (%a1),%d1 # fetch Dest2[31:0]
movs.l (%a0),%d0 # fetch Dest1[31:0]
bra.b CAS2L3_CONT
CAS2L3_ENTER:
bra.b ~+16
CAS2L3_CONT:
cmp.l %d0,%d2 # Dest1 - Compare1
bne.b CAS2L3_NOUPDATE
cmp.l %d1,%d3 # Dest2 - Compare2
bne.b CAS2L3_NOUPDATE
movs.l %d5,(%a1) # Update2[31:0] -> DEST2
bra.b CAS2L3_UPDATE
bra.b ~+16
CAS2L3_UPDATE:
rol.l &0x8,%d4 # get Update1[31:24]
movs.b %d4,(%a0)+ # Update1[31:24] -> DEST1
swap %d4 # get Update1[23:8]
movs.w %d4,(%a0)+ # Update1[23:8] -> DEST1+0x1
bra.b CAS2L3_UPDATE2
bra.b ~+16
CAS2L3_UPDATE2:
rol.l &0x8,%d4 # get Update1[7:0]
movc %a3,%buscr # assert LOCKE*
movs.b %d4,(%a0) # Update1[7:0] -> DEST1+0x3
bra.b CAS2L3_UPDATE3
nop
bra.b ~+16
CAS2L3_UPDATE3:
movc %a4,%buscr # unlock the bus
bra.w cas2l_update_done
nop
nop
nop
bra.b ~+16
CAS2L3_NOUPDATE:
rol.l &0x8,%d0 # get Dest1[31:24]
movs.b %d0,(%a0)+ # Dest1[31:24] -> DEST1
swap %d0 # get Dest1[23:8]
movs.w %d0,(%a0)+ # Dest1[23:8] -> DEST1+0x1
bra.b CAS2L3_NOUPDATE2
bra.b ~+16
CAS2L3_NOUPDATE2:
rol.l &0x8,%d0 # get Dest1[7:0]
movc %a3,%buscr # assert LOCKE*
movs.b %d0,(%a0) # Update1[7:0] -> DEST1+0x3
bra.b CAS2L3_NOUPDATE3
nop
bra.b ~+16
CAS2L3_NOUPDATE3:
movc %a4,%buscr # unlock the bus
bra.w cas2l_noupdate_done
nop
nop
nop
bra.b ~+14
CAS2L3_FILLER:
nop
nop
nop
nop
nop
nop
bra.w CAS2L3_START
#############################################################
#############################################################
cas2w:
mov.l %a0,%a2 # copy ADDR1
mov.l %a1,%a3 # copy ADDR2
mov.l %a0,%a4 # copy ADDR1
mov.l %a1,%a5 # copy ADDR2
addq.l &0x1,%a4 # ADDR1+1
addq.l &0x1,%a5 # ADDR2+1
mov.l %a2,%d1 # ADDR1
# mask interrupt levels 0-6. save old mask value.
mov.w %sr,%d7 # save current SR
ori.w &0x0700,%sr # inhibit interrupts
# load the SFC and DFC with the appropriate mode.
movc %sfc,%d6 # save old SFC/DFC
movc %d0,%sfc # store new SFC
movc %d0,%dfc # store new DFC
# pre-load the operand ATC. no page faults should occur because
# _real_lock_page() should have taken care of this.
plpaw (%a2) # load atc for ADDR1
plpaw (%a4) # load atc for ADDR1+1
plpaw (%a3) # load atc for ADDR2
plpaw (%a5) # load atc for ADDR2+1
# push the operand cache lines from the cache if they exist.
cpushl %dc,(%a2) # push line for ADDR1
cpushl %dc,(%a4) # push line for ADDR1+1
cpushl %dc,(%a3) # push line for ADDR2
cpushl %dc,(%a5) # push line for ADDR2+1
mov.l %d1,%a2 # ADDR1
addq.l &0x3,%d1
mov.l %d1,%a4 # ADDR1+3
# if ADDR1 was ATC resident before the above "plpaw" and was executed
# and it was the next entry scheduled for replacement and ADDR2
# shares the same set, then the "plpaw" for ADDR2 can push the ADDR1
# entries from the ATC. so, we do a second set of "plpa"s.
plpar (%a2) # load atc for ADDR1
plpar (%a4) # load atc for ADDR1+3
# load the BUSCR values.
mov.l &0x80000000,%a2 # assert LOCK* buscr value
mov.l &0xa0000000,%a3 # assert LOCKE* buscr value
mov.l &0x00000000,%a4 # buscr unlock value
# there are two possible mis-aligned cases for word cas. they
# are separated because the final write which asserts LOCKE* must
# be aligned.
mov.l %a0,%d0 # is ADDR1 misaligned?
btst &0x0,%d0
bne.w CAS2W2_ENTER # yes
bra.b CAS2W_ENTER # no
#
# D0 = dst operand 1 <-
# D1 = dst operand 2 <-
# D2 = cmp operand 1
# D3 = cmp operand 2
# D4 = update oper 1
# D5 = update oper 2
# D6 = old SFC/DFC
# D7 = old SR
# A0 = ADDR1
# A1 = ADDR2
# A2 = bus LOCK* value
# A3 = bus LOCKE* value
# A4 = bus unlock value
# A5 = xxxxxxxx
#
align 0x10
CAS2W_START:
movc %a2,%buscr # assert LOCK*
movs.w (%a1),%d1 # fetch Dest2[15:0]
movs.w (%a0),%d0 # fetch Dest1[15:0]
bra.b CAS2W_CONT2
CAS2W_ENTER:
bra.b ~+16
CAS2W_CONT2:
cmp.w %d0,%d2 # Dest1 - Compare1
bne.b CAS2W_NOUPDATE
cmp.w %d1,%d3 # Dest2 - Compare2
bne.b CAS2W_NOUPDATE
movs.w %d5,(%a1) # Update2[15:0] -> DEST2
bra.b CAS2W_UPDATE
bra.b ~+16
CAS2W_UPDATE:
movc %a3,%buscr # assert LOCKE*
movs.w %d4,(%a0) # Update1[15:0] -> DEST1
movc %a4,%buscr # unlock the bus
bra.b cas2w_update_done
bra.b ~+16
CAS2W_NOUPDATE:
movc %a3,%buscr # assert LOCKE*
movs.w %d0,(%a0) # Dest1[15:0] -> DEST1
movc %a4,%buscr # unlock the bus
bra.b cas2w_noupdate_done
bra.b ~+16
CAS2W_FILLER:
nop
nop
nop
nop
nop
nop
nop
bra.b CAS2W_START
####
#################################################################
# THIS MUST BE THE STATE OF THE INTEGER REGISTER FILE UPON #
# ENTERING _isp_cas2(). #
# #
# D0 = destination[15:0] operand 1 #
# D1 = destination[15:0] operand 2 #
# D2 = cmp[15:0] operand 1 #
# D3 = cmp[15:0] operand 2 #
# D4 = 'xxxxxx11 -> no reg update; 'xxxxxx00 -> update required #
# D5 = xxxxxxxx #
# D6 = xxxxxxxx #
# D7 = xxxxxxxx #
# A0 = xxxxxxxx #
# A1 = xxxxxxxx #
# A2 = xxxxxxxx #
# A3 = xxxxxxxx #
# A4 = xxxxxxxx #
# A5 = xxxxxxxx #
# A6 = frame pointer #
# A7 = stack pointer #
#################################################################
cas2w_noupdate_done:
# restore previous SFC/DFC value.
movc %d6,%sfc # restore old SFC
movc %d6,%dfc # restore old DFC
# restore previous interrupt mask level.
mov.w %d7,%sr # restore old SR
sf %d4 # indicate no update was done
bra.l _isp_cas2_finish
cas2w_update_done:
# restore previous SFC/DFC value.
movc %d6,%sfc # restore old SFC
movc %d6,%dfc # restore old DFC
# restore previous interrupt mask level.
mov.w %d7,%sr # restore old SR
st %d4 # indicate update was done
bra.l _isp_cas2_finish
####
align 0x10
CAS2W2_START:
movc %a2,%buscr # assert LOCK*
movs.w (%a1),%d1 # fetch Dest2[15:0]
movs.w (%a0),%d0 # fetch Dest1[15:0]
bra.b CAS2W2_CONT2
CAS2W2_ENTER:
bra.b ~+16
CAS2W2_CONT2:
cmp.w %d0,%d2 # Dest1 - Compare1
bne.b CAS2W2_NOUPDATE
cmp.w %d1,%d3 # Dest2 - Compare2
bne.b CAS2W2_NOUPDATE
movs.w %d5,(%a1) # Update2[15:0] -> DEST2
bra.b CAS2W2_UPDATE
bra.b ~+16
CAS2W2_UPDATE:
ror.l &0x8,%d4 # get Update1[15:8]
movs.b %d4,(%a0)+ # Update1[15:8] -> DEST1
movc %a3,%buscr # assert LOCKE*
rol.l &0x8,%d4 # get Update1[7:0]
bra.b CAS2W2_UPDATE2
bra.b ~+16
CAS2W2_UPDATE2:
movs.b %d4,(%a0) # Update1[7:0] -> DEST1+0x1
movc %a4,%buscr # unlock the bus
bra.w cas2w_update_done
nop
bra.b ~+16
CAS2W2_NOUPDATE:
ror.l &0x8,%d0 # get Dest1[15:8]
movs.b %d0,(%a0)+ # Dest1[15:8] -> DEST1
movc %a3,%buscr # assert LOCKE*
rol.l &0x8,%d0 # get Dest1[7:0]
bra.b CAS2W2_NOUPDATE2
bra.b ~+16
CAS2W2_NOUPDATE2:
movs.b %d0,(%a0) # Dest1[7:0] -> DEST1+0x1
movc %a4,%buscr # unlock the bus
bra.w cas2w_noupdate_done
nop
bra.b ~+16
CAS2W2_FILLER:
nop
nop
nop
nop
nop
nop
nop
bra.b CAS2W2_START
# ###### ## ######
# # # # #
# # ###### ######
# # # # #
# ###### # # ######
#########################################################################
# XDEF **************************************************************** #
# _isp_cas(): "core" emulation code for the cas instruction #
# #
# XREF **************************************************************** #
# _isp_cas_finish() - only exit point for this emulation code; #
# do clean-up #
# #
# INPUT *************************************************************** #
# *see entry chart below* #
# #
# OUTPUT ************************************************************** #
# *see exit chart below* #
# #
# ALGORITHM *********************************************************** #
# (1) Make several copies of the effective address. #
# (2) Save current SR; Then mask off all maskable interrupts. #
# (3) Save current DFC/SFC (ASSUMED TO BE EQUAL!!!); Then set #
# SFC/DFC according to whether exception occurred in user or #
# supervisor mode. #
# (4) Use "plpaw" instruction to pre-load ATC with effective #
# address page(s). THIS SHOULD NOT FAULT!!! The relevant #
# page(s) should have been made resident prior to entering #
# this routine. #
# (5) Push the operand lines from the cache w/ "cpushl". #
# In the 68040, this was done within the locked region. In #
# the 68060, it is done outside of the locked region. #
# (6) Pre-fetch the core emulation instructions by executing one #
# branch within each physical line (16 bytes) of the code #
# before actually executing the code. #
# (7) Load the BUSCR with the bus lock value. #
# (8) Fetch the source operand. #
# (9) Do the compare. If equal, go to step (12). #
# (10)Unequal. No update occurs. But, we do write the DST op back #
# to itself (as w/ the '040) so we can gracefully unlock #
# the bus (and assert LOCKE*) using BUSCR and the final move. #
# (11)Exit. #
# (12)Write update operand to the DST location. Use BUSCR to #
# assert LOCKE* for the final write operation. #
# (13)Exit. #
# #
# The algorithm is actually implemented slightly differently #
# depending on the size of the operation and the misalignment of the #
# operand. A misaligned operand must be written in aligned chunks or #
# else the BUSCR register control gets confused. #
# #
#########################################################################
#########################################################
# THIS IS THE STATE OF THE INTEGER REGISTER FILE UPON #
# ENTERING _isp_cas(). #
# #
# D0 = xxxxxxxx #
# D1 = xxxxxxxx #
# D2 = update operand #
# D3 = xxxxxxxx #
# D4 = compare operand #
# D5 = xxxxxxxx #
# D6 = supervisor ('xxxxxxff) or user mode ('xxxxxx00) #
# D7 = longword ('xxxxxxff) or word size ('xxxxxx00) #
# A0 = ADDR #
# A1 = xxxxxxxx #
# A2 = xxxxxxxx #
# A3 = xxxxxxxx #
# A4 = xxxxxxxx #
# A5 = xxxxxxxx #
# A6 = frame pointer #
# A7 = stack pointer #
#########################################################
global _isp_cas
_isp_cas:
tst.b %d6 # user or supervisor mode?
bne.b cas_super # supervisor
cas_user:
movq.l &0x1,%d0 # load user data fc
bra.b cas_cont
cas_super:
movq.l &0x5,%d0 # load supervisor data fc
cas_cont:
tst.b %d7 # word or longword?
bne.w casl # longword
####
casw:
mov.l %a0,%a1 # make copy for plpaw1
mov.l %a0,%a2 # make copy for plpaw2
addq.l &0x1,%a2 # plpaw2 points to end of word
mov.l %d2,%d3 # d3 = update[7:0]
lsr.w &0x8,%d2 # d2 = update[15:8]
# mask interrupt levels 0-6. save old mask value.
mov.w %sr,%d7 # save current SR
ori.w &0x0700,%sr # inhibit interrupts
# load the SFC and DFC with the appropriate mode.
movc %sfc,%d6 # save old SFC/DFC
movc %d0,%sfc # load new sfc
movc %d0,%dfc # load new dfc
# pre-load the operand ATC. no page faults should occur here because
# _real_lock_page() should have taken care of this.
plpaw (%a1) # load atc for ADDR
plpaw (%a2) # load atc for ADDR+1
# push the operand lines from the cache if they exist.
cpushl %dc,(%a1) # push dirty data
cpushl %dc,(%a2) # push dirty data
# load the BUSCR values.
mov.l &0x80000000,%a1 # assert LOCK* buscr value
mov.l &0xa0000000,%a2 # assert LOCKE* buscr value
mov.l &0x00000000,%a3 # buscr unlock value
# pre-load the instruction cache for the following algorithm.
# this will minimize the number of cycles that LOCK* will be asserted.
bra.b CASW_ENTER # start pre-loading icache
#
# D0 = dst operand <-
# D1 = update[15:8] operand
# D2 = update[7:0] operand
# D3 = xxxxxxxx
# D4 = compare[15:0] operand
# D5 = xxxxxxxx
# D6 = old SFC/DFC
# D7 = old SR
# A0 = ADDR
# A1 = bus LOCK* value
# A2 = bus LOCKE* value
# A3 = bus unlock value
# A4 = xxxxxxxx
# A5 = xxxxxxxx
#
align 0x10
CASW_START:
movc %a1,%buscr # assert LOCK*
movs.w (%a0),%d0 # fetch Dest[15:0]
cmp.w %d0,%d4 # Dest - Compare
bne.b CASW_NOUPDATE
bra.b CASW_UPDATE
CASW_ENTER:
bra.b ~+16
CASW_UPDATE:
movs.b %d2,(%a0)+ # Update[15:8] -> DEST
movc %a2,%buscr # assert LOCKE*
movs.b %d3,(%a0) # Update[7:0] -> DEST+0x1
bra.b CASW_UPDATE2
bra.b ~+16
CASW_UPDATE2:
movc %a3,%buscr # unlock the bus
bra.b casw_update_done
nop
nop
nop
nop
bra.b ~+16
CASW_NOUPDATE:
ror.l &0x8,%d0 # get Dest[15:8]
movs.b %d0,(%a0)+ # Dest[15:8] -> DEST
movc %a2,%buscr # assert LOCKE*
rol.l &0x8,%d0 # get Dest[7:0]
bra.b CASW_NOUPDATE2
bra.b ~+16
CASW_NOUPDATE2:
movs.b %d0,(%a0) # Dest[7:0] -> DEST+0x1
movc %a3,%buscr # unlock the bus
bra.b casw_noupdate_done
nop
nop
bra.b ~+16
CASW_FILLER:
nop
nop
nop
nop
nop
nop
nop
bra.b CASW_START
#################################################################
# THIS MUST BE THE STATE OF THE INTEGER REGISTER FILE UPON #
# CALLING _isp_cas_finish(). #
# #
# D0 = destination[15:0] operand #
# D1 = 'xxxxxx11 -> no reg update; 'xxxxxx00 -> update required #
# D2 = xxxxxxxx #
# D3 = xxxxxxxx #
# D4 = compare[15:0] operand #
# D5 = xxxxxxxx #
# D6 = xxxxxxxx #
# D7 = xxxxxxxx #
# A0 = xxxxxxxx #
# A1 = xxxxxxxx #
# A2 = xxxxxxxx #
# A3 = xxxxxxxx #
# A4 = xxxxxxxx #
# A5 = xxxxxxxx #
# A6 = frame pointer #
# A7 = stack pointer #
#################################################################
casw_noupdate_done:
# restore previous SFC/DFC value.
movc %d6,%sfc # restore old SFC
movc %d6,%dfc # restore old DFC
# restore previous interrupt mask level.
mov.w %d7,%sr # restore old SR
sf %d1 # indicate no update was done
bra.l _isp_cas_finish
casw_update_done:
# restore previous SFC/DFC value.
movc %d6,%sfc # restore old SFC
movc %d6,%dfc # restore old DFC
# restore previous interrupt mask level.
mov.w %d7,%sr # restore old SR
st %d1 # indicate update was done
bra.l _isp_cas_finish
################
# there are two possible mis-aligned cases for longword cas. they
# are separated because the final write which asserts LOCKE* must
# be an aligned write.
casl:
mov.l %a0,%a1 # make copy for plpaw1
mov.l %a0,%a2 # make copy for plpaw2
addq.l &0x3,%a2 # plpaw2 points to end of longword
mov.l %a0,%d1 # byte or word misaligned?
btst &0x0,%d1
bne.w casl2 # byte misaligned
mov.l %d2,%d3 # d3 = update[15:0]
swap %d2 # d2 = update[31:16]
# mask interrupts levels 0-6. save old mask value.
mov.w %sr,%d7 # save current SR
ori.w &0x0700,%sr # inhibit interrupts
# load the SFC and DFC with the appropriate mode.
movc %sfc,%d6 # save old SFC/DFC
movc %d0,%sfc # load new sfc
movc %d0,%dfc # load new dfc
# pre-load the operand ATC. no page faults should occur here because
# _real_lock_page() should have taken care of this.
plpaw (%a1) # load atc for ADDR
plpaw (%a2) # load atc for ADDR+3
# push the operand lines from the cache if they exist.
cpushl %dc,(%a1) # push dirty data
cpushl %dc,(%a2) # push dirty data
# load the BUSCR values.
mov.l &0x80000000,%a1 # assert LOCK* buscr value
mov.l &0xa0000000,%a2 # assert LOCKE* buscr value
mov.l &0x00000000,%a3 # buscr unlock value
bra.b CASL_ENTER # start pre-loading icache
#
# D0 = dst operand <-
# D1 = xxxxxxxx
# D2 = update[31:16] operand
# D3 = update[15:0] operand
# D4 = compare[31:0] operand
# D5 = xxxxxxxx
# D6 = old SFC/DFC
# D7 = old SR
# A0 = ADDR
# A1 = bus LOCK* value
# A2 = bus LOCKE* value
# A3 = bus unlock value
# A4 = xxxxxxxx
# A5 = xxxxxxxx
#
align 0x10
CASL_START:
movc %a1,%buscr # assert LOCK*
movs.l (%a0),%d0 # fetch Dest[31:0]
cmp.l %d0,%d4 # Dest - Compare
bne.b CASL_NOUPDATE
bra.b CASL_UPDATE
CASL_ENTER:
bra.b ~+16
CASL_UPDATE:
movs.w %d2,(%a0)+ # Update[31:16] -> DEST
movc %a2,%buscr # assert LOCKE*
movs.w %d3,(%a0) # Update[15:0] -> DEST+0x2
bra.b CASL_UPDATE2
bra.b ~+16
CASL_UPDATE2:
movc %a3,%buscr # unlock the bus
bra.b casl_update_done
nop
nop
nop
nop
bra.b ~+16
CASL_NOUPDATE:
swap %d0 # get Dest[31:16]
movs.w %d0,(%a0)+ # Dest[31:16] -> DEST
swap %d0 # get Dest[15:0]
movc %a2,%buscr # assert LOCKE*
bra.b CASL_NOUPDATE2
bra.b ~+16
CASL_NOUPDATE2:
movs.w %d0,(%a0) # Dest[15:0] -> DEST+0x2
movc %a3,%buscr # unlock the bus
bra.b casl_noupdate_done
nop
nop
bra.b ~+16
CASL_FILLER:
nop
nop
nop
nop
nop
nop
nop
bra.b CASL_START
#################################################################
# THIS MUST BE THE STATE OF THE INTEGER REGISTER FILE UPON #
# CALLING _isp_cas_finish(). #
# #
# D0 = destination[31:0] operand #
# D1 = 'xxxxxx11 -> no reg update; 'xxxxxx00 -> update required #
# D2 = xxxxxxxx #
# D3 = xxxxxxxx #
# D4 = compare[31:0] operand #
# D5 = xxxxxxxx #
# D6 = xxxxxxxx #
# D7 = xxxxxxxx #
# A0 = xxxxxxxx #
# A1 = xxxxxxxx #
# A2 = xxxxxxxx #
# A3 = xxxxxxxx #
# A4 = xxxxxxxx #
# A5 = xxxxxxxx #
# A6 = frame pointer #
# A7 = stack pointer #
#################################################################
casl_noupdate_done:
# restore previous SFC/DFC value.
movc %d6,%sfc # restore old SFC
movc %d6,%dfc # restore old DFC
# restore previous interrupt mask level.
mov.w %d7,%sr # restore old SR
sf %d1 # indicate no update was done
bra.l _isp_cas_finish
casl_update_done:
# restore previous SFC/DFC value.
movc %d6,%sfc # restore old SFC
movc %d6,%dfc # restore old DFC
# restore previous interrupts mask level.
mov.w %d7,%sr # restore old SR
st %d1 # indicate update was done
bra.l _isp_cas_finish
#######################################
casl2:
mov.l %d2,%d5 # d5 = Update[7:0]
lsr.l &0x8,%d2
mov.l %d2,%d3 # d3 = Update[23:8]
swap %d2 # d2 = Update[31:24]
# mask interrupts levels 0-6. save old mask value.
mov.w %sr,%d7 # save current SR
ori.w &0x0700,%sr # inhibit interrupts
# load the SFC and DFC with the appropriate mode.
movc %sfc,%d6 # save old SFC/DFC
movc %d0,%sfc # load new sfc
movc %d0,%dfc # load new dfc
# pre-load the operand ATC. no page faults should occur here because
# _real_lock_page() should have taken care of this already.
plpaw (%a1) # load atc for ADDR
plpaw (%a2) # load atc for ADDR+3
# puch the operand lines from the cache if they exist.
cpushl %dc,(%a1) # push dirty data
cpushl %dc,(%a2) # push dirty data
# load the BUSCR values.
mov.l &0x80000000,%a1 # assert LOCK* buscr value
mov.l &0xa0000000,%a2 # assert LOCKE* buscr value
mov.l &0x00000000,%a3 # buscr unlock value
# pre-load the instruction cache for the following algorithm.
# this will minimize the number of cycles that LOCK* will be asserted.
bra.b CASL2_ENTER # start pre-loading icache
#
# D0 = dst operand <-
# D1 = xxxxxxxx
# D2 = update[31:24] operand
# D3 = update[23:8] operand
# D4 = compare[31:0] operand
# D5 = update[7:0] operand
# D6 = old SFC/DFC
# D7 = old SR
# A0 = ADDR
# A1 = bus LOCK* value
# A2 = bus LOCKE* value
# A3 = bus unlock value
# A4 = xxxxxxxx
# A5 = xxxxxxxx
#
align 0x10
CASL2_START:
movc %a1,%buscr # assert LOCK*
movs.l (%a0),%d0 # fetch Dest[31:0]
cmp.l %d0,%d4 # Dest - Compare
bne.b CASL2_NOUPDATE
bra.b CASL2_UPDATE
CASL2_ENTER:
bra.b ~+16
CASL2_UPDATE:
movs.b %d2,(%a0)+ # Update[31:24] -> DEST
movs.w %d3,(%a0)+ # Update[23:8] -> DEST+0x1
movc %a2,%buscr # assert LOCKE*
bra.b CASL2_UPDATE2
bra.b ~+16
CASL2_UPDATE2:
movs.b %d5,(%a0) # Update[7:0] -> DEST+0x3
movc %a3,%buscr # unlock the bus
bra.w casl_update_done
nop
bra.b ~+16
CASL2_NOUPDATE:
rol.l &0x8,%d0 # get Dest[31:24]
movs.b %d0,(%a0)+ # Dest[31:24] -> DEST
swap %d0 # get Dest[23:8]
movs.w %d0,(%a0)+ # Dest[23:8] -> DEST+0x1
bra.b CASL2_NOUPDATE2
bra.b ~+16
CASL2_NOUPDATE2:
rol.l &0x8,%d0 # get Dest[7:0]
movc %a2,%buscr # assert LOCKE*
movs.b %d0,(%a0) # Dest[7:0] -> DEST+0x3
bra.b CASL2_NOUPDATE3
nop
bra.b ~+16
CASL2_NOUPDATE3:
movc %a3,%buscr # unlock the bus
bra.w casl_noupdate_done
nop
nop
nop
bra.b ~+16
CASL2_FILLER:
nop
nop
nop
nop
nop
nop
nop
bra.b CASL2_START
####
####
# end label used by _isp_cas_inrange()
global _CASHI
_CASHI:
|
AirFortressIlikara/LS2K0300-linux-4.19
| 105,822
|
arch/m68k/ifpsp060/src/itest.S
|
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
MOTOROLA MICROPROCESSOR & MEMORY TECHNOLOGY GROUP
M68000 Hi-Performance Microprocessor Division
M68060 Software Package
Production Release P1.00 -- October 10, 1994
M68060 Software Package Copyright © 1993, 1994 Motorola Inc. All rights reserved.
THE SOFTWARE is provided on an "AS IS" basis and without warranty.
To the maximum extent permitted by applicable law,
MOTOROLA DISCLAIMS ALL WARRANTIES WHETHER EXPRESS OR IMPLIED,
INCLUDING IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE
and any warranty against infringement with regard to the SOFTWARE
(INCLUDING ANY MODIFIED VERSIONS THEREOF) and any accompanying written materials.
To the maximum extent permitted by applicable law,
IN NO EVENT SHALL MOTOROLA BE LIABLE FOR ANY DAMAGES WHATSOEVER
(INCLUDING WITHOUT LIMITATION, DAMAGES FOR LOSS OF BUSINESS PROFITS,
BUSINESS INTERRUPTION, LOSS OF BUSINESS INFORMATION, OR OTHER PECUNIARY LOSS)
ARISING OF THE USE OR INABILITY TO USE THE SOFTWARE.
Motorola assumes no responsibility for the maintenance and support of the SOFTWARE.
You are hereby granted a copyright license to use, modify, and distribute the SOFTWARE
so long as this entire notice is retained without alteration in any modified and/or
redistributed versions, and that such modified versions are clearly identified as such.
No licenses are granted by implication, estoppel or otherwise under any patents
or trademarks of Motorola, Inc.
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#############################################
set SREGS, -64
set IREGS, -128
set SCCR, -130
set ICCR, -132
set TESTCTR, -136
set EAMEM, -140
set EASTORE, -144
set DATA, -160
#############################################
TESTTOP:
bra.l _060TESTS_
start_str:
string "Testing 68060 ISP started:\n"
pass_str:
string "passed\n"
fail_str:
string " failed\n"
align 0x4
chk_test:
tst.l %d0
bne.b test_fail
test_pass:
pea pass_str(%pc)
bsr.l _print_str
addq.l &0x4,%sp
rts
test_fail:
mov.l %d1,-(%sp)
bsr.l _print_num
addq.l &0x4,%sp
pea fail_str(%pc)
bsr.l _print_str
addq.l &0x4,%sp
rts
#############################################
_060TESTS_:
link %a6,&-160
movm.l &0x3f3c,-(%sp)
pea start_str(%pc)
bsr.l _print_str
addq.l &0x4,%sp
### mul
clr.l TESTCTR(%a6)
pea mulul_str(%pc)
bsr.l _print_str
addq.l &0x4,%sp
bsr.l mulul_0
bsr.l chk_test
### div
clr.l TESTCTR(%a6)
pea divul_str(%pc)
bsr.l _print_str
addq.l &0x4,%sp
bsr.l divul_0
bsr.l chk_test
### cmp2
clr.l TESTCTR(%a6)
pea cmp2_str(%pc)
bsr.l _print_str
addq.l &0x4,%sp
bsr.l cmp2_1
bsr.l chk_test
### movp
clr.l TESTCTR(%a6)
pea movp_str(%pc)
bsr.l _print_str
addq.l &0x4,%sp
bsr.l movp_0
bsr.l chk_test
### ea
clr.l TESTCTR(%a6)
pea ea_str(%pc)
bsr.l _print_str
addq.l &0x4,%sp
mov.l &0x2,EAMEM(%a6)
bsr.l ea_0
bsr.l chk_test
### cas
clr.l TESTCTR(%a6)
pea cas_str(%pc)
bsr.l _print_str
addq.l &0x4,%sp
bsr.l cas0
bsr.l chk_test
### cas2
clr.l TESTCTR(%a6)
pea cas2_str(%pc)
bsr.l _print_str
addq.l &0x4,%sp
bsr.l cas20
bsr.l chk_test
###
movm.l (%sp)+,&0x3cfc
unlk %a6
rts
#############################################
#############################################
mulul_str:
string "\t64-bit multiply..."
align 0x4
mulul_0:
addq.l &0x1,TESTCTR(%a6)
movm.l DEF_REGS(%pc),&0x3fff
clr.l %d1
mov.l &0x99999999,%d2
mov.l &0x88888888,%d3
mov.w &0x0004,ICCR(%a6)
mov.w &0x0000,%cc
movm.l &0x7fff,IREGS(%a6)
mulu.l %d1,%d2:%d3
mov.w %cc,SCCR(%a6)
movm.l &0x7fff,SREGS(%a6)
clr.l IREGS+0x8(%a6)
clr.l IREGS+0xc(%a6)
bsr.l chkregs
tst.b %d0
bne.l error
mulul_1:
addq.l &0x1,TESTCTR(%a6)
movm.l DEF_REGS(%pc),&0x3fff
mov.l &0x77777777,%d1
mov.l &0x99999999,%d2
mov.l &0x00000000,%d3
mov.w &0x0004,ICCR(%a6)
mov.w &0x0000,%cc
movm.l &0x7fff,IREGS(%a6)
mulu.l %d1,%d2:%d3
mov.w %cc,SCCR(%a6)
movm.l &0x7fff,SREGS(%a6)
clr.l IREGS+0x8(%a6)
clr.l IREGS+0xc(%a6)
bsr.l chkregs
tst.b %d0
bne.l error
mulul_2:
addq.l &0x1,TESTCTR(%a6)
movm.l DEF_REGS(%pc),&0x3fff
mov.l &0x00000010,%d1
mov.l &0x66666666,%d2
mov.w &0x0000,ICCR(%a6)
mov.w &0x0000,%cc
movm.l &0x7fff,IREGS(%a6)
mulu.l %d1,%d2:%d2
mov.w %cc,SCCR(%a6)
movm.l &0x7fff,SREGS(%a6)
mov.l &0x00000006,IREGS+0x8(%a6)
bsr.l chkregs
tst.b %d0
bne.l error
mulul_3:
addq.l &0x1,TESTCTR(%a6)
movm.l DEF_REGS(%pc),&0x3fff
mov.l &0x55555555,%d1
mov.l &0x00000000,%d2
mov.l &0x00000003,%d3
mov.w &0x0000,ICCR(%a6)
mov.w &0x0000,%cc
movm.l &0x7fff,IREGS(%a6)
mulu.l %d1,%d2:%d3
mov.w %cc,SCCR(%a6)
movm.l &0x7fff,SREGS(%a6)
mov.l &0x00000000,IREGS+0x8(%a6)
mov.l &0xffffffff,IREGS+0xc(%a6)
bsr.l chkregs
tst.b %d0
bne.l error
mulul_4:
addq.l &0x1,TESTCTR(%a6)
movm.l DEF_REGS(%pc),&0x3fff
mov.l &0x40000000,%d1
mov.l &0x00000000,%d2
mov.l &0x00000004,%d3
mov.w &0x0000,ICCR(%a6)
mov.w &0x0000,%cc
movm.l &0x7fff,IREGS(%a6)
mulu.l %d1,%d2:%d3
mov.w %cc,SCCR(%a6)
movm.l &0x7fff,SREGS(%a6)
mov.l &0x00000001,IREGS+0x8(%a6)
mov.l &0x00000000,IREGS+0xc(%a6)
bsr.l chkregs
tst.b %d0
bne.l error
mulul_5:
addq.l &0x1,TESTCTR(%a6)
movm.l DEF_REGS(%pc),&0x3fff
mov.l &0xffffffff,%d1
mov.l &0x00000000,%d2
mov.l &0xffffffff,%d3
mov.w &0x0008,ICCR(%a6)
mov.w &0x0000,%cc
movm.l &0x7fff,IREGS(%a6)
mulu.l %d1,%d2:%d3
mov.w %cc,SCCR(%a6)
movm.l &0x7fff,SREGS(%a6)
mov.l &0xfffffffe,IREGS+0x8(%a6)
mov.l &0x00000001,IREGS+0xc(%a6)
bsr.l chkregs
tst.b %d0
bne.l error
mulul_6:
addq.l &0x1,TESTCTR(%a6)
movm.l DEF_REGS(%pc),&0x3fff
mov.l &0x80000000,%d1
mov.l &0x00000000,%d2
mov.l &0xffffffff,%d3
mov.w &0x00000,ICCR(%a6)
mov.w &0x0000,%cc
movm.l &0x7fff,IREGS(%a6)
muls.l %d1,%d2:%d3
mov.w %cc,SCCR(%a6)
movm.l &0x7fff,SREGS(%a6)
mov.l &0x00000000,IREGS+0x8(%a6)
mov.l &0x80000000,IREGS+0xc(%a6)
bsr.l chkregs
tst.b %d0
bne.l error
mulul_7:
addq.l &0x1,TESTCTR(%a6)
movm.l DEF_REGS(%pc),&0x3fff
mov.l &0x80000000,%d1
mov.l &0x00000000,%d2
mov.l &0x00000001,%d3
mov.w &0x0008,ICCR(%a6)
mov.w &0x0000,%cc
movm.l &0x7fff,IREGS(%a6)
muls.l %d1,%d2:%d3
mov.w %cc,SCCR(%a6)
movm.l &0x7fff,SREGS(%a6)
mov.l &0xffffffff,IREGS+0x8(%a6)
mov.l &0x80000000,IREGS+0xc(%a6)
bsr.l chkregs
tst.b %d0
bne.l error
mulul_8:
addq.l &0x1,TESTCTR(%a6)
movm.l DEF_REGS(%pc),&0x3fff
mov.l &0x00000001,%d1
mov.l &0x00000000,%d2
mov.l &0x80000000,%d3
mov.w &0x0008,ICCR(%a6)
mov.w &0x0000,%cc
movm.l &0x7fff,IREGS(%a6)
muls.l %d1,%d2:%d3
mov.w %cc,SCCR(%a6)
movm.l &0x7fff,SREGS(%a6)
mov.l &0xffffffff,IREGS+0x8(%a6)
mov.l &0x80000000,IREGS+0xc(%a6)
bsr.l chkregs
tst.b %d0
bne.l error
mov.l TESTCTR(%a6),%d1
clr.l %d0
rts
#############################################
movp_str:
string "\tmovep..."
align 0x4
###############################
# movep.w %d0,(0x0,%a0) #
###############################
movp_0:
addq.l &0x1,TESTCTR(%a6)
movm.l DEF_REGS(%pc),&0x3fff
lea DATA(%a6),%a0
mov.w &0xaaaa,%d0
clr.b 0x0(%a0)
clr.b 0x2(%a0)
mov.w &0x001f,ICCR(%a6)
mov.w &0x001f,%cc
movm.l &0x7fff,IREGS(%a6)
movp.w %d0,(0x0,%a0)
mov.w %cc,SCCR(%a6)
movm.l &0x7fff,SREGS(%a6)
mov.b 0x2(%a0),%d1
lsl.w &0x8,%d1
mov.b 0x0(%a0),%d1
cmp.w %d0,%d1
bne.l error
bsr.l chkregs
tst.b %d0
bne.l error
###############################
# movep.w %d0,(0x0,%a0) #
###############################
movp_1:
addq.l &0x1,TESTCTR(%a6)
movm.l DEF_REGS(%pc),&0x3fff
lea DATA+0x4(%a6),%a0
mov.w &0xaaaa,%d0
clr.l -0x4(%a0)
clr.l (%a0)
clr.l 0x4(%a0)
mov.w &0x001f,ICCR(%a6)
mov.w &0x001f,%cc
movm.l &0x7fff,IREGS(%a6)
movp.w %d0,(0x0,%a0)
mov.w %cc,SCCR(%a6)
movm.l &0x7fff,SREGS(%a6)
tst.l -0x4(%a0)
bne.l error
tst.l 0x4(%a0)
bne.l error
cmpi.l (%a0),&0xaa00aa00
bne.l error
bsr.l chkregs
tst.b %d0
bne.l error
#####################################################
# movep.w %d0,(0x0,%a0) #
# - this test has %cc initially equal to zero #
#####################################################
movp_2:
addq.l &0x1,TESTCTR(%a6)
movm.l DEF_REGS(%pc),&0x3fff
lea DATA(%a6),%a0
mov.w &0xaaaa,%d0
clr.b 0x0(%a0)
clr.b 0x2(%a0)
mov.w &0x0000,ICCR(%a6)
mov.w &0x0000,%cc
movm.l &0x7fff,IREGS(%a6)
movp.w %d0,(0x0,%a0)
mov.w %cc,SCCR(%a6)
movm.l &0x7fff,SREGS(%a6)
mov.b 0x2(%a0),%d1
lsl.w &0x8,%d1
mov.b 0x0(%a0),%d1
cmp.w %d0,%d1
bne.l error
bsr.l chkregs
tst.b %d0
bne.l error
###############################
# movep.w (0x0,%a0),%d0 #
###############################
movp_3:
addq.l &0x1,TESTCTR(%a6)
movm.l DEF_REGS(%pc),&0x3fff
lea DATA(%a6),%a0
mov.b &0xaa,0x0(%a0)
mov.b &0xaa,0x2(%a0)
mov.w &0x001f,ICCR(%a6)
mov.w &0x001f,%cc
movm.l &0x7fff,IREGS(%a6)
movp.w (0x0,%a0),%d0
mov.w %cc,SCCR(%a6)
movm.l &0x7fff,SREGS(%a6)
mov.w &0xaaaa,IREGS+0x2(%a6)
mov.w &0xaaaa,%d1
cmp.w %d0,%d1
bne.l error
bsr.l chkregs
tst.b %d0
bne.l error
###############################
# movep.l %d0,(0x0,%a0) #
###############################
movp_4:
addq.l &0x1,TESTCTR(%a6)
movm.l DEF_REGS(%pc),&0x3fff
lea DATA(%a6),%a0
mov.l &0xaaaaaaaa,%d0
clr.b 0x0(%a0)
clr.b 0x2(%a0)
clr.b 0x4(%a0)
clr.b 0x6(%a0)
mov.w &0x001f,ICCR(%a6)
mov.w &0x001f,%cc
movm.l &0x7fff,IREGS(%a6)
movp.l %d0,(0x0,%a0)
mov.w %cc,SCCR(%a6)
movm.l &0x7fff,SREGS(%a6)
mov.b 0x6(%a0),%d1
lsl.l &0x8,%d1
mov.b 0x4(%a0),%d1
lsl.l &0x8,%d1
mov.b 0x2(%a0),%d1
lsl.l &0x8,%d1
mov.b 0x0(%a0),%d1
cmp.l %d0,%d1
bne.l error
bsr.l chkregs
tst.b %d0
bne.l error
###############################
# movep.l %d0,(0x0,%a0) #
###############################
movp_5:
addq.l &0x1,TESTCTR(%a6)
movm.l DEF_REGS(%pc),&0x3fff
lea DATA+0x4(%a6),%a0
mov.l &0xaaaaaaaa,%d0
clr.l -0x4(%a0)
clr.l (%a0)
clr.l 0x4(%a0)
clr.l 0x8(%a0)
mov.w &0x001f,ICCR(%a6)
mov.w &0x001f,%cc
movm.l &0x7fff,IREGS(%a6)
movp.l %d0,(0x0,%a0)
mov.w %cc,SCCR(%a6)
movm.l &0x7fff,SREGS(%a6)
tst.l -0x4(%a0)
bne.l error
tst.l 0x8(%a0)
bne.l error
cmpi.l (%a0),&0xaa00aa00
bne.l error
cmpi.l 0x4(%a0),&0xaa00aa00
bne.l error
bsr.l chkregs
tst.b %d0
bne.l error
###############################
# movep.l (0x0,%a0),%d0 #
###############################
movp_6:
addq.l &0x1,TESTCTR(%a6)
movm.l DEF_REGS(%pc),&0x3fff
lea DATA(%a6),%a0
mov.b &0xaa,0x0(%a0)
mov.b &0xaa,0x2(%a0)
mov.b &0xaa,0x4(%a0)
mov.b &0xaa,0x6(%a0)
mov.w &0x001f,ICCR(%a6)
mov.w &0x001f,%cc
movm.l &0x7fff,IREGS(%a6)
movp.l (0x0,%a0),%d0
mov.w %cc,SCCR(%a6)
movm.l &0x7fff,SREGS(%a6)
mov.l &0xaaaaaaaa,IREGS(%a6)
mov.l &0xaaaaaaaa,%d1
cmp.l %d0,%d1
bne.l error
bsr.l chkregs
tst.b %d0
bne.l error
###############################
# movep.w %d7,(0x0,%a0) #
###############################
movp_7:
addq.l &0x1,TESTCTR(%a6)
movm.l DEF_REGS(%pc),&0x3fff
lea DATA(%a6),%a0
mov.w &0xaaaa,%d7
clr.b 0x0(%a0)
clr.b 0x2(%a0)
mov.w &0x001f,ICCR(%a6)
mov.w &0x001f,%cc
movm.l &0x7fff,IREGS(%a6)
movp.w %d7,(0x0,%a0)
mov.w %cc,SCCR(%a6)
movm.l &0x7fff,SREGS(%a6)
mov.b 0x2(%a0),%d1
lsl.w &0x8,%d1
mov.b 0x0(%a0),%d1
cmp.w %d7,%d1
bne.l error
bsr.l chkregs
tst.b %d0
bne.l error
###############################
# movep.w (0x0,%a0),%d7 #
###############################
movp_8:
addq.l &0x1,TESTCTR(%a6)
movm.l DEF_REGS(%pc),&0x3fff
lea DATA(%a6),%a0
mov.b &0xaa,0x0(%a0)
mov.b &0xaa,0x2(%a0)
mov.w &0x001f,ICCR(%a6)
mov.w &0x001f,%cc
movm.l &0x7fff,IREGS(%a6)
movp.w (0x0,%a0),%d7
mov.w %cc,SCCR(%a6)
movm.l &0x7fff,SREGS(%a6)
mov.w &0xaaaa,IREGS+30(%a6)
mov.w &0xaaaa,%d1
cmp.w %d7,%d1
bne.l error
bsr.l chkregs
tst.b %d0
bne.l error
###############################
# movep.w %d0,(0x0,%a0) #
###############################
movp_9:
addq.l &0x1,TESTCTR(%a6)
movm.l DEF_REGS(%pc),&0x3fff
lea DATA(%a6),%a0
mov.w &0xaaaa,%d0
clr.b 0x0(%a0)
clr.b 0x2(%a0)
mov.w &0x001f,ICCR(%a6)
mov.w &0x001f,%cc
movm.l &0x7fff,IREGS(%a6)
movp.w %d0,(0x0,%a0)
mov.w %cc,SCCR(%a6)
movm.l &0x7fff,SREGS(%a6)
mov.b 0x2(%a0),%d1
lsl.w &0x8,%d1
mov.b 0x0(%a0),%d1
cmp.w %d0,%d1
bne.l error
bsr.l chkregs
tst.b %d0
bne.l error
###############################
# movep.w %d0,(0x8,%a0) #
###############################
movp_10:
addq.l &0x1,TESTCTR(%a6)
movm.l DEF_REGS(%pc),&0x3fff
lea DATA(%a6),%a0
mov.w &0xaaaa,%d0
clr.b 0x0+0x8(%a0)
clr.b 0x2+0x8(%a0)
mov.w &0x001f,ICCR(%a6)
mov.w &0x1f,%cc
movm.l &0x7fff,IREGS(%a6)
movp.w %d0,(0x8,%a0)
mov.w %cc,SCCR(%a6)
movm.l &0x7fff,SREGS(%a6)
mov.b 0x2+0x8(%a0),%d1
lsl.w &0x8,%d1
mov.b 0x0+0x8(%a0),%d1
cmp.w %d0,%d1
bne.l error
bsr.l chkregs
tst.b %d0
bne.l error
###############################
# movep.w (0x8,%a0),%d0 #
###############################
movp_11:
addq.l &0x1,TESTCTR(%a6)
movm.l DEF_REGS(%pc),&0x3fff
lea DATA(%a6),%a0
mov.b &0xaa,0x0+0x8(%a0)
mov.b &0xaa,0x2+0x8(%a0)
mov.w &0x001f,ICCR(%a6)
mov.w &0x1f,%cc
movm.l &0x7fff,IREGS(%a6)
movp.w (0x8,%a0),%d0
mov.w %cc,SCCR(%a6)
movm.l &0x7fff,SREGS(%a6)
mov.w &0xaaaa,IREGS+0x2(%a6)
mov.w &0xaaaa,%d1
cmp.w %d0,%d1
bne.l error
bsr.l chkregs
tst.b %d0
bne.l error
###############################
# movep.l %d0,(0x8,%a0) #
###############################
movp_12:
addq.l &0x1,TESTCTR(%a6)
movm.l DEF_REGS(%pc),&0x3fff
lea DATA(%a6),%a0
mov.l &0xaaaaaaaa,%d0
clr.b 0x0+0x8(%a0)
clr.b 0x2+0x8(%a0)
clr.b 0x4+0x8(%a0)
clr.b 0x6+0x8(%a0)
mov.w &0x001f,ICCR(%a6)
mov.w &0x1f,%cc
movm.l &0x7fff,IREGS(%a6)
movp.l %d0,(0x8,%a0)
mov.w %cc,SCCR(%a6)
movm.l &0x7fff,SREGS(%a6)
mov.b 0x6+0x8(%a0),%d1
lsl.l &0x8,%d1
mov.b 0x4+0x8(%a0),%d1
lsl.l &0x8,%d1
mov.b 0x2+0x8(%a0),%d1
lsl.l &0x8,%d1
mov.b 0x0+0x8(%a0),%d1
cmp.l %d0,%d1
bne.l error
bsr.l chkregs
tst.b %d0
bne.l error
###############################
# movep.l (0x8,%a0),%d0 #
###############################
movp_13:
addq.l &0x1,TESTCTR(%a6)
movm.l DEF_REGS(%pc),&0x3fff
lea DATA(%a6),%a0
mov.b &0xaa,0x0+0x8(%a0)
mov.b &0xaa,0x2+0x8(%a0)
mov.b &0xaa,0x4+0x8(%a0)
mov.b &0xaa,0x6+0x8(%a0)
mov.w &0x001f,ICCR(%a6)
mov.w &0x1f,%cc
movm.l &0x7fff,IREGS(%a6)
movp.l (0x8,%a0),%d0
mov.w %cc,SCCR(%a6)
movm.l &0x7fff,SREGS(%a6)
mov.l &0xaaaaaaaa,IREGS(%a6)
mov.l &0xaaaaaaaa,%d1
cmp.l %d0,%d1
bne.l error
bsr.l chkregs
tst.b %d0
bne.l error
################################
# movep.w %d0,(-0x8,%a0) #
################################
movp_14:
addq.l &0x1,TESTCTR(%a6)
movm.l DEF_REGS(%pc),&0x3fff
lea DATA+0x8(%a6),%a0
mov.w &0xaaaa,%d0
clr.b 0x0-0x8(%a0)
clr.b 0x2-0x8(%a0)
mov.w &0x001f,ICCR(%a6)
mov.w &0x1f,%cc
movm.l &0x7fff,IREGS(%a6)
movp.w %d0,(-0x8,%a0)
mov.w %cc,SCCR(%a6)
movm.l &0x7fff,SREGS(%a6)
mov.b 0x2-0x8(%a0),%d1
lsl.w &0x8,%d1
mov.b 0x0-0x8(%a0),%d1
cmp.w %d0,%d1
bne.l error
bsr.l chkregs
tst.b %d0
bne.l error
################################
# movep.w (-0x8,%a0),%d0 #
################################
movp_15:
addq.l &0x1,TESTCTR(%a6)
movm.l DEF_REGS(%pc),&0x3fff
lea DATA+0x8(%a6),%a0
mov.b &0xaa,0x0-0x8(%a0)
mov.b &0xaa,0x2-0x8(%a0)
mov.w &0x001f,ICCR(%a6)
mov.w &0x1f,%cc
movm.l &0x7fff,IREGS(%a6)
movp.w (-0x8,%a0),%d0
mov.w %cc,SCCR(%a6)
movm.l &0x7fff,SREGS(%a6)
mov.w &0xaaaa,IREGS+0x2(%a6)
mov.w &0xaaaa,%d1
cmp.w %d0,%d1
bne.l error
bsr.l chkregs
tst.b %d0
bne.l error
################################
# movep.l %d0,(-0x8,%a0) #
################################
movp_16:
addq.l &0x1,TESTCTR(%a6)
movm.l DEF_REGS(%pc),&0x3fff
lea DATA+0x8(%a6),%a0
mov.l &0xaaaaaaaa,%d0
clr.b 0x0-0x8(%a0)
clr.b 0x2-0x8(%a0)
clr.b 0x4-0x8(%a0)
clr.b 0x8-0x8(%a0)
mov.w &0x001f,ICCR(%a6)
mov.w &0x1f,%cc
movm.l &0x7fff,IREGS(%a6)
movp.l %d0,(-0x8,%a0)
mov.w %cc,SCCR(%a6)
movm.l &0x7fff,SREGS(%a6)
mov.b 0x6-0x8(%a0),%d1
lsl.l &0x8,%d1
mov.b 0x4-0x8(%a0),%d1
lsl.l &0x8,%d1
mov.b 0x2-0x8(%a0),%d1
lsl.l &0x8,%d1
mov.b 0x0-0x8(%a0),%d1
cmp.l %d0,%d1
bne.l error
bsr.l chkregs
tst.b %d0
bne.l error
################################
# movep.l (-0x8,%a0),%d0 #
################################
movp_17:
addq.l &0x1,TESTCTR(%a6)
movm.l DEF_REGS(%pc),&0x3fff
lea DATA+0x8(%a6),%a0
mov.b &0xaa,0x0-0x8(%a0)
mov.b &0xaa,0x2-0x8(%a0)
mov.b &0xaa,0x4-0x8(%a0)
mov.b &0xaa,0x8-0x8(%a0)
mov.w &0x001f,ICCR(%a6)
mov.w &0x1f,%cc
movm.l &0x7fff,IREGS(%a6)
movp.l (-0x8,%a0),%d0
mov.w %cc,SCCR(%a6)
movm.l &0x7fff,SREGS(%a6)
mov.l &0xaaaaaaaa,IREGS(%a6)
mov.l &0xaaaaaaaa,%d1
cmp.l %d0,%d1
bne.l error
bsr.l chkregs
tst.b %d0
bne.l error
mov.l TESTCTR(%a6),%d1
clr.l %d0
rts
###########################################################
divul_str:
string "\t64-bit divide..."
align 0x4
divul_0:
addq.l &0x1,TESTCTR(%a6)
# movm.l DEF_REGS(%pc),&0x3fff
# clr.l %d1
# mov.l &0x99999999,%d2
# mov.l &0x88888888,%d3
# mov.w &0x001e,ICCR(%a6)
# mov.w &0x001f,%cc
# movm.l &0x7fff,IREGS(%a6)
# divu.l %d1,%d2:%d3
# mov.w %cc,SCCR(%a6)
# movm.l &0x7fff,SREGS(%a6)
# bsr.l chkregs
# tst.b %d0
# bne.l error
divul_1:
addq.l &0x1,TESTCTR(%a6)
movm.l DEF_REGS(%pc),&0x3fff
mov.l &0x00000001,%d1
mov.l &0x00000000,%d2
mov.l &0x00000000,%d3
mov.w &0x0014,ICCR(%a6)
mov.w &0x001f,%cc
movm.l &0x7fff,IREGS(%a6)
divu.l %d1,%d2:%d3
mov.w %cc,SCCR(%a6)
movm.l &0x7fff,SREGS(%a6)
bsr.l chkregs
tst.b %d0
bne.l error
divul_2:
addq.l &0x1,TESTCTR(%a6)
movm.l DEF_REGS(%pc),&0x3fff
mov.l &0x44444444,%d1
mov.l &0x00000000,%d2
mov.l &0x55555555,%d3
mov.w &0x0010,ICCR(%a6)
mov.w &0x001f,%cc
movm.l &0x7fff,IREGS(%a6)
divu.l %d1,%d2:%d3
mov.w %cc,SCCR(%a6)
movm.l &0x7fff,SREGS(%a6)
mov.l &0x11111111,IREGS+0x8(%a6)
mov.l &0x00000001,IREGS+0xc(%a6)
bsr.l chkregs
tst.b %d0
bne.l error
divul_3:
addq.l &0x1,TESTCTR(%a6)
movm.l DEF_REGS(%pc),&0x3fff
mov.l &0x55555555,%d1
mov.l &0x00000000,%d2
mov.l &0x44444444,%d3
mov.w &0x0014,ICCR(%a6)
mov.w &0x001f,%cc
movm.l &0x7fff,IREGS(%a6)
divu.l %d1,%d2:%d3
mov.w %cc,SCCR(%a6)
movm.l &0x7fff,SREGS(%a6)
mov.l &0x44444444,IREGS+0x8(%a6)
mov.l &0x00000000,IREGS+0xc(%a6)
bsr.l chkregs
tst.b %d0
bne.l error
divul_4:
addq.l &0x1,TESTCTR(%a6)
movm.l DEF_REGS(%pc),&0x3fff
mov.l &0x11111111,%d1
mov.l &0x44444444,%d2
mov.l &0x44444444,%d3
mov.w &0x001e,ICCR(%a6)
mov.w &0x001d,%cc
movm.l &0x7fff,IREGS(%a6)
divu.l %d1,%d2:%d3
mov.w %cc,SCCR(%a6)
movm.l &0x7fff,SREGS(%a6)
bsr.l chkregs
tst.b %d0
bne.l error
divul_5:
addq.l &0x1,TESTCTR(%a6)
movm.l DEF_REGS(%pc),&0x3fff
mov.l &0xfffffffe,%d1
mov.l &0x00000001,%d2
mov.l &0x00000002,%d3
mov.w &0x001e,ICCR(%a6)
mov.w &0x001d,%cc
movm.l &0x7fff,IREGS(%a6)
divs.l %d1,%d2:%d3
mov.w %cc,SCCR(%a6)
movm.l &0x7fff,SREGS(%a6)
bsr.l chkregs
tst.b %d0
bne.l error
divul_6:
addq.l &0x1,TESTCTR(%a6)
movm.l DEF_REGS(%pc),&0x3fff
mov.l &0xfffffffe,%d1
mov.l &0x00000001,%d2
mov.l &0x00000000,%d3
mov.w &0x0018,ICCR(%a6)
mov.w &0x001d,%cc
movm.l &0x7fff,IREGS(%a6)
divs.l %d1,%d2:%d3
mov.w %cc,SCCR(%a6)
movm.l &0x7fff,SREGS(%a6)
mov.l &0x00000000,IREGS+0x8(%a6)
mov.l &0x80000000,IREGS+0xc(%a6)
bsr.l chkregs
tst.b %d0
bne.l error
divul_7:
addq.l &0x1,TESTCTR(%a6)
movm.l DEF_REGS(%pc),&0x3fff
mov.l &0x00000002,%d1
mov.l &0x00000001,%d2
mov.l &0x00000000,%d3
mov.w &0x001e,ICCR(%a6)
mov.w &0x001d,%cc
movm.l &0x7fff,IREGS(%a6)
divs.l %d1,%d2:%d3
mov.w %cc,SCCR(%a6)
movm.l &0x7fff,SREGS(%a6)
bsr.l chkregs
tst.b %d0
bne.l error
divul_8:
addq.l &0x1,TESTCTR(%a6)
movm.l DEF_REGS(%pc),&0x3fff
mov.l &0xffffffff,%d1
mov.l &0xfffffffe,%d2
mov.l &0xffffffff,%d3
mov.w &0x0008,ICCR(%a6)
mov.w &0x0000,%cc
movm.l &0x7fff,IREGS(%a6)
divu.l %d1,%d2:%d3
mov.w %cc,SCCR(%a6)
movm.l &0x7fff,SREGS(%a6)
bsr.l chkregs
tst.b %d0
bne.l error
divul_9:
addq.l &0x1,TESTCTR(%a6)
movm.l DEF_REGS(%pc),&0x3fff
mov.l &0xffffffff,%d1
mov.l &0xfffffffe,%d2
mov.l &0xffffffff,%d3
mov.w &0x0008,ICCR(%a6)
mov.w &0x0000,%cc
movm.l &0x7fff,IREGS(%a6)
divu.l &0xffffffff,%d2:%d2
mov.w %cc,SCCR(%a6)
movm.l &0x7fff,SREGS(%a6)
mov.l &0xffffffff,IREGS+0x8(%a6)
bsr.l chkregs
tst.b %d0
bne.l error
divul_10:
addq.l &0x1,TESTCTR(%a6)
movm.l DEF_REGS(%pc),&0x3fff
mov.l &0x0000ffff,%d1
mov.l &0x00000001,%d2
mov.l &0x55555555,%d3
mov.w &0x0000,ICCR(%a6)
mov.w &0x0000,%cc
movm.l &0x7fff,IREGS(%a6)
divu.l %d1,%d2:%d3
mov.w %cc,SCCR(%a6)
movm.l &0x7fff,SREGS(%a6)
mov.l &0x0000aaab,IREGS+0x8(%a6)
mov.l &0x00015556,IREGS+0xc(%a6)
bsr.l chkregs
tst.b %d0
bne.l error
mov.l TESTCTR(%a6),%d1
clr.l %d0
rts
###########################################################
cas_str:
string "\tcas..."
align 0x4
cas0:
addq.l &0x1,TESTCTR(%a6)
movm.l DEF_REGS(%pc),&0x3fff
lea DATA+0x1(%a6),%a0
mov.w &0xaaaa,(%a0)
mov.w &0xaaaa,%d1
mov.w &0xbbbb,%d2
mov.w &0x0014,ICCR(%a6)
mov.w &0x0010,%cc
movm.l &0x7fff,IREGS(%a6)
cas.w %d1,%d2,(%a0) # Dc,Du,<ea>
mov.w %cc,SCCR(%a6)
mov.w (%a0),%d3
mov.w &0xbbbb,IREGS+0xc+0x2(%a6)
movm.l &0x7fff,SREGS(%a6)
bsr.l chkregs
tst.b %d0
bne.l error
cas1:
addq.l &0x1,TESTCTR(%a6)
movm.l DEF_REGS(%pc),&0x3fff
lea DATA+0x1(%a6),%a0
mov.w &0xeeee,(%a0)
mov.w &0x0000aaaa,%d1
mov.w &0x0000bbbb,%d2
mov.w &0x0000,ICCR(%a6)
mov.w &0x0000,%cc
movm.l &0x7fff,IREGS(%a6)
cas.w %d1,%d2,(%a0) # Dc,Du,<ea>
mov.w %cc,SCCR(%a6)
mov.w (%a0),%d3
mov.w &0xeeee,IREGS+0x4+0x2(%a6)
mov.w &0xeeee,IREGS+0xc+0x2(%a6)
movm.l &0x7fff,SREGS(%a6)
bsr.l chkregs
tst.b %d0
bne.l error
cas2:
addq.l &0x1,TESTCTR(%a6)
movm.l DEF_REGS(%pc),&0x3fff
lea DATA+0x2(%a6),%a0
mov.l &0xaaaaaaaa,(%a0)
mov.l &0xaaaaaaaa,%d1
mov.l &0xbbbbbbbb,%d2
mov.w &0x0004,ICCR(%a6)
mov.w &0x0000,%cc
movm.l &0x7fff,IREGS(%a6)
cas.l %d1,%d2,(%a0) # Dc,Du,<ea>
mov.w %cc,SCCR(%a6)
mov.l (%a0),%d3
mov.l &0xbbbbbbbb,IREGS+0xc(%a6)
movm.l &0x7fff,SREGS(%a6)
bsr.l chkregs
tst.b %d0
bne.l error
cas3:
addq.l &0x1,TESTCTR(%a6)
movm.l DEF_REGS(%pc),&0x3fff
lea DATA+0x2(%a6),%a0
mov.l &0xeeeeeeee,(%a0)
mov.l &0xaaaaaaaa,%d1
mov.l &0xbbbbbbbb,%d2
mov.w &0x0000,ICCR(%a6)
mov.w &0x0000,%cc
movm.l &0x7fff,IREGS(%a6)
cas.l %d1,%d2,(%a0) # Dc,Du,<ea>
mov.w %cc,SCCR(%a6)
mov.l (%a0),%d3
mov.l &0xeeeeeeee,IREGS+0x4(%a6)
mov.l &0xeeeeeeee,IREGS+0xc(%a6)
movm.l &0x7fff,SREGS(%a6)
bsr.l chkregs
tst.b %d0
bne.l error
cas4:
addq.l &0x1,TESTCTR(%a6)
movm.l DEF_REGS(%pc),&0x3fff
lea DATA+0x1(%a6),%a0
mov.l &0xaaaaaaaa,(%a0)
mov.l &0xaaaaaaaa,%d1
mov.l &0xbbbbbbbb,%d2
mov.w &0x0004,ICCR(%a6)
mov.w &0x0000,%cc
movm.l &0x7fff,IREGS(%a6)
cas.l %d1,%d2,(%a0) # Dc,Du,<ea>
mov.w %cc,SCCR(%a6)
mov.l (%a0),%d3
mov.l &0xbbbbbbbb,IREGS+0xc(%a6)
movm.l &0x7fff,SREGS(%a6)
bsr.l chkregs
tst.b %d0
bne.l error
cas5:
addq.l &0x1,TESTCTR(%a6)
movm.l DEF_REGS(%pc),&0x3fff
lea DATA+0x1(%a6),%a0
mov.l &0x7fffffff,(%a0)
mov.l &0x80000000,%d1
mov.l &0xbbbbbbbb,%d2
mov.w &0x001b,ICCR(%a6)
mov.w &0x0010,%cc
movm.l &0x7fff,IREGS(%a6)
cas.l %d1,%d2,(%a0) # Dc,Du,<ea>
mov.w %cc,SCCR(%a6)
mov.l (%a0),%d3
mov.l &0x7fffffff,IREGS+0x4(%a6)
mov.l &0x7fffffff,IREGS+0xc(%a6)
movm.l &0x7fff,SREGS(%a6)
bsr.l chkregs
tst.b %d0
bne.l error
mov.l TESTCTR(%a6),%d1
clr.l %d0
rts
###########################################################
cas2_str:
string "\tcas2..."
align 0x4
cas20:
addq.l &0x1,TESTCTR(%a6)
movm.l DEF_REGS(%pc),&0x3fff
lea DATA+0x0(%a6),%a0
lea DATA+0x4(%a6),%a1
mov.l &0xaaaaaaaa,(%a0)
mov.l &0xbbbbbbbb,(%a1)
mov.l &0xaaaaaaaa,%d1
mov.l &0xbbbbbbbb,%d2
mov.l &0xcccccccc,%d3
mov.l &0xdddddddd,%d4
mov.w &0x0014,ICCR(%a6)
mov.w &0x0010,%cc
movm.l &0x7fff,IREGS(%a6)
cas2.l %d1:%d2,%d3:%d4,(%a0):(%a1) # Dc1:Dc2,Du1:Du2,(Rn1):(Rn2)
mov.w %cc,SCCR(%a6)
mov.l (%a0),%d5
mov.l (%a1),%d6
mov.l &0xcccccccc,IREGS+0x14(%a6)
mov.l &0xdddddddd,IREGS+0x18(%a6)
movm.l &0x7fff,SREGS(%a6)
bsr.l chkregs
tst.b %d0
bne.l error
cas21:
addq.l &0x1,TESTCTR(%a6)
movm.l DEF_REGS(%pc),&0x3fff
lea DATA+0x1(%a6),%a0
lea DATA+0x5(%a6),%a1
mov.l &0xaaaaaaaa,(%a0)
mov.l &0xbbbbbbbb,(%a1)
mov.l &0xaaaaaaaa,%d1
mov.l &0xbbbbbbbb,%d2
mov.l &0xcccccccc,%d3
mov.l &0xdddddddd,%d4
mov.w &0x0014,ICCR(%a6)
mov.w &0x0010,%cc
movm.l &0x7fff,IREGS(%a6)
cas2.l %d1:%d2,%d3:%d4,(%a0):(%a1) # Dc1:Dc2,Du1:Du2,(Rn1):(Rn2)
mov.w %cc,SCCR(%a6)
mov.l (%a0),%d5
mov.l (%a1),%d6
mov.l &0xcccccccc,IREGS+0x14(%a6)
mov.l &0xdddddddd,IREGS+0x18(%a6)
movm.l &0x7fff,SREGS(%a6)
bsr.l chkregs
tst.b %d0
bne.l error
cas22:
addq.l &0x1,TESTCTR(%a6)
movm.l DEF_REGS(%pc),&0x3fff
lea DATA+0x2(%a6),%a0
lea DATA+0x6(%a6),%a1
mov.l &0xaaaaaaaa,(%a0)
mov.l &0xbbbbbbbb,(%a1)
mov.l &0xaaaaaaaa,%d1
mov.l &0xbbbbbbbb,%d2
mov.l &0xcccccccc,%d3
mov.l &0xdddddddd,%d4
mov.w &0x0014,ICCR(%a6)
mov.w &0x0010,%cc
movm.l &0x7fff,IREGS(%a6)
cas2.l %d1:%d2,%d3:%d4,(%a0):(%a1) # Dc1:Dc2,Du1:Du2,(Rn1):(Rn2)
mov.w %cc,SCCR(%a6)
mov.l (%a0),%d5
mov.l (%a1),%d6
mov.l &0xcccccccc,IREGS+0x14(%a6)
mov.l &0xdddddddd,IREGS+0x18(%a6)
movm.l &0x7fff,SREGS(%a6)
bsr.l chkregs
tst.b %d0
bne.l error
cas23:
addq.l &0x1,TESTCTR(%a6)
movm.l DEF_REGS(%pc),&0x3fff
lea DATA+0x0(%a6),%a0
lea DATA+0x4(%a6),%a1
mov.l &0xeeeeeeee,(%a0)
mov.l &0xbbbbbbbb,(%a1)
mov.l &0xaaaaaaaa,%d1
mov.l &0xbbbbbbbb,%d2
mov.l &0xcccccccc,%d3
mov.l &0xdddddddd,%d4
mov.w &0x0000,ICCR(%a6)
mov.w &0x0000,%cc
movm.l &0x7fff,IREGS(%a6)
cas2.l %d1:%d2,%d3:%d4,(%a0):(%a1) # Dc1:Dc2,Du1:Du2,(Rn1):(Rn2)
mov.w %cc,SCCR(%a6)
mov.l (%a0),%d5
mov.l (%a1),%d6
mov.l &0xeeeeeeee,IREGS+0x4(%a6)
mov.l &0xbbbbbbbb,IREGS+0x8(%a6)
mov.l &0xeeeeeeee,IREGS+0x14(%a6)
mov.l &0xbbbbbbbb,IREGS+0x18(%a6)
movm.l &0x7fff,SREGS(%a6)
bsr.l chkregs
tst.b %d0
bne.l error
cas24:
addq.l &0x1,TESTCTR(%a6)
movm.l DEF_REGS(%pc),&0x3fff
lea DATA+0x1(%a6),%a0
lea DATA+0x5(%a6),%a1
mov.l &0xeeeeeeee,(%a0)
mov.l &0xbbbbbbbb,(%a1)
mov.l &0xaaaaaaaa,%d1
mov.l &0xbbbbbbbb,%d2
mov.l &0xcccccccc,%d3
mov.l &0xdddddddd,%d4
mov.w &0x0000,ICCR(%a6)
mov.w &0x0000,%cc
movm.l &0x7fff,IREGS(%a6)
cas2.l %d1:%d2,%d3:%d4,(%a0):(%a1) # Dc1:Dc2,Du1:Du2,(Rn1):(Rn2)
mov.w %cc,SCCR(%a6)
mov.l (%a0),%d5
mov.l (%a1),%d6
mov.l &0xeeeeeeee,IREGS+0x4(%a6)
mov.l &0xbbbbbbbb,IREGS+0x8(%a6)
mov.l &0xeeeeeeee,IREGS+0x14(%a6)
mov.l &0xbbbbbbbb,IREGS+0x18(%a6)
movm.l &0x7fff,SREGS(%a6)
bsr.l chkregs
tst.b %d0
bne.l error
cas25:
addq.l &0x1,TESTCTR(%a6)
movm.l DEF_REGS(%pc),&0x3fff
lea DATA+0x2(%a6),%a0
lea DATA+0x6(%a6),%a1
mov.l &0xeeeeeeee,(%a0)
mov.l &0xbbbbbbbb,(%a1)
mov.l &0xaaaaaaaa,%d1
mov.l &0xbbbbbbbb,%d2
mov.l &0xcccccccc,%d3
mov.l &0xdddddddd,%d4
mov.w &0x0000,ICCR(%a6)
mov.w &0x0000,%cc
movm.l &0x7fff,IREGS(%a6)
cas2.l %d1:%d2,%d3:%d4,(%a0):(%a1) # Dc1:Dc2,Du1:Du2,(Rn1):(Rn2)
mov.w %cc,SCCR(%a6)
mov.l (%a0),%d5
mov.l (%a1),%d6
mov.l &0xeeeeeeee,IREGS+0x4(%a6)
mov.l &0xbbbbbbbb,IREGS+0x8(%a6)
mov.l &0xeeeeeeee,IREGS+0x14(%a6)
mov.l &0xbbbbbbbb,IREGS+0x18(%a6)
movm.l &0x7fff,SREGS(%a6)
bsr.l chkregs
tst.b %d0
bne.l error
cas26:
addq.l &0x1,TESTCTR(%a6)
movm.l DEF_REGS(%pc),&0x3fff
lea DATA+0x0(%a6),%a0
lea DATA+0x4(%a6),%a1
mov.l &0xaaaaaaaa,(%a0)
mov.l &0xeeeeeeee,(%a1)
mov.l &0xaaaaaaaa,%d1
mov.l &0xbbbbbbbb,%d2
mov.l &0xcccccccc,%d3
mov.l &0xdddddddd,%d4
mov.w &0x0000,ICCR(%a6)
mov.w &0x0000,%cc
movm.l &0x7fff,IREGS(%a6)
cas2.l %d1:%d2,%d3:%d4,(%a0):(%a1) # Dc1:Dc2,Du1:Du2,(Rn1):(Rn2)
mov.w %cc,SCCR(%a6)
mov.l (%a0),%d5
mov.l (%a1),%d6
mov.l &0xaaaaaaaa,IREGS+0x4(%a6)
mov.l &0xeeeeeeee,IREGS+0x8(%a6)
mov.l &0xaaaaaaaa,IREGS+0x14(%a6)
mov.l &0xeeeeeeee,IREGS+0x18(%a6)
movm.l &0x7fff,SREGS(%a6)
bsr.l chkregs
tst.b %d0
bne.l error
cas27:
addq.l &0x1,TESTCTR(%a6)
movm.l DEF_REGS(%pc),&0x3fff
lea DATA+0x1(%a6),%a0
lea DATA+0x5(%a6),%a1
mov.l &0xaaaaaaaa,(%a0)
mov.l &0xeeeeeeee,(%a1)
mov.l &0xaaaaaaaa,%d1
mov.l &0xbbbbbbbb,%d2
mov.l &0xcccccccc,%d3
mov.l &0xdddddddd,%d4
mov.w &0x0000,ICCR(%a6)
mov.w &0x0000,%cc
movm.l &0x7fff,IREGS(%a6)
cas2.l %d1:%d2,%d3:%d4,(%a0):(%a1) # Dc1:Dc2,Du1:Du2,(Rn1):(Rn2)
mov.w %cc,SCCR(%a6)
mov.l (%a0),%d5
mov.l (%a1),%d6
mov.l &0xaaaaaaaa,IREGS+0x4(%a6)
mov.l &0xeeeeeeee,IREGS+0x8(%a6)
mov.l &0xaaaaaaaa,IREGS+0x14(%a6)
mov.l &0xeeeeeeee,IREGS+0x18(%a6)
movm.l &0x7fff,SREGS(%a6)
bsr.l chkregs
tst.b %d0
bne.l error
cas28:
addq.l &0x1,TESTCTR(%a6)
movm.l DEF_REGS(%pc),&0x3fff
lea DATA+0x2(%a6),%a0
lea DATA+0x6(%a6),%a1
mov.l &0xaaaaaaaa,(%a0)
mov.l &0x7fffffff,(%a1)
mov.l &0xaaaaaaaa,%d1
mov.l &0x80000000,%d2
mov.l &0xcccccccc,%d3
mov.l &0xdddddddd,%d4
mov.w &0x000b,ICCR(%a6)
mov.w &0x0000,%cc
movm.l &0x7fff,IREGS(%a6)
cas2.l %d1:%d2,%d3:%d4,(%a0):(%a1) # Dc1:Dc2,Du1:Du2,(Rn1):(Rn2)
mov.w %cc,SCCR(%a6)
mov.l (%a0),%d5
mov.l (%a1),%d6
mov.l &0xaaaaaaaa,IREGS+0x4(%a6)
mov.l &0x7fffffff,IREGS+0x8(%a6)
mov.l &0xaaaaaaaa,IREGS+0x14(%a6)
mov.l &0x7fffffff,IREGS+0x18(%a6)
movm.l &0x7fff,SREGS(%a6)
bsr.l chkregs
tst.b %d0
bne.l error
##################################
cas29:
addq.l &0x1,TESTCTR(%a6)
movm.l DEF_REGS(%pc),&0x3fff
lea DATA+0x0(%a6),%a0
lea DATA+0x4(%a6),%a1
mov.w &0xaaaa,(%a0)
mov.w &0xbbbb,(%a1)
mov.w &0xaaaa,%d1
mov.w &0xbbbb,%d2
mov.w &0xcccc,%d3
mov.w &0xdddd,%d4
mov.w &0x0014,ICCR(%a6)
mov.w &0x0010,%cc
movm.l &0x7fff,IREGS(%a6)
cas2.w %d1:%d2,%d3:%d4,(%a0):(%a1) # Dc1:Dc2,Du1:Du2,(Rn1):(Rn2)
mov.w %cc,SCCR(%a6)
mov.w (%a0),%d5
mov.w (%a1),%d6
mov.w &0xcccc,IREGS+0x14+0x2(%a6)
mov.w &0xdddd,IREGS+0x18+0x2(%a6)
movm.l &0x7fff,SREGS(%a6)
bsr.l chkregs
tst.b %d0
bne.l error
cas210:
addq.l &0x1,TESTCTR(%a6)
movm.l DEF_REGS(%pc),&0x3fff
lea DATA+0x1(%a6),%a0
lea DATA+0x5(%a6),%a1
mov.w &0xaaaa,(%a0)
mov.w &0xbbbb,(%a1)
mov.w &0xaaaa,%d1
mov.w &0xbbbb,%d2
mov.w &0xcccc,%d3
mov.w &0xdddd,%d4
mov.w &0x0004,ICCR(%a6)
mov.w &0x0000,%cc
movm.l &0x7fff,IREGS(%a6)
cas2.w %d1:%d2,%d3:%d4,(%a0):(%a1) # Dc1:Dc2,Du1:Du2,(Rn1):(Rn2)
mov.w %cc,SCCR(%a6)
mov.w (%a0),%d5
mov.w (%a1),%d6
mov.w &0xcccc,IREGS+0x14+0x2(%a6)
mov.w &0xdddd,IREGS+0x18+0x2(%a6)
movm.l &0x7fff,SREGS(%a6)
bsr.l chkregs
tst.b %d0
bne.l error
cas211:
addq.l &0x1,TESTCTR(%a6)
movm.l DEF_REGS(%pc),&0x3fff
lea DATA+0x0(%a6),%a0
lea DATA+0x4(%a6),%a1
mov.w &0xeeee,(%a0)
mov.w &0xbbbb,(%a1)
mov.w &0xaaaa,%d1
mov.w &0xbbbb,%d2
mov.w &0xcccc,%d3
mov.w &0xdddd,%d4
mov.w &0x0000,ICCR(%a6)
mov.w &0x0000,%cc
movm.l &0x7fff,IREGS(%a6)
cas2.w %d1:%d2,%d3:%d4,(%a0):(%a1) # Dc1:Dc2,Du1:Du2,(Rn1):(Rn2)
mov.w %cc,SCCR(%a6)
mov.w (%a0),%d5
mov.w (%a1),%d6
mov.w &0xeeee,IREGS+0x4+0x2(%a6)
mov.w &0xbbbb,IREGS+0x8+0x2(%a6)
mov.w &0xeeee,IREGS+0x14+0x2(%a6)
mov.w &0xbbbb,IREGS+0x18+0x2(%a6)
movm.l &0x7fff,SREGS(%a6)
bsr.l chkregs
tst.b %d0
bne.l error
cas212:
addq.l &0x1,TESTCTR(%a6)
movm.l DEF_REGS(%pc),&0x3fff
lea DATA+0x1(%a6),%a0
lea DATA+0x5(%a6),%a1
mov.w &0xeeee,(%a0)
mov.w &0xbbbb,(%a1)
mov.w &0xaaaa,%d1
mov.w &0xbbbb,%d2
mov.w &0xcccc,%d3
mov.w &0xdddd,%d4
mov.w &0x0000,ICCR(%a6)
mov.w &0x0000,%cc
movm.l &0x7fff,IREGS(%a6)
cas2.w %d1:%d2,%d3:%d4,(%a0):(%a1) # Dc1:Dc2,Du1:Du2,(Rn1):(Rn2)
mov.w %cc,SCCR(%a6)
mov.w (%a0),%d5
mov.w (%a1),%d6
mov.w &0xeeee,IREGS+0x4+0x2(%a6)
mov.w &0xbbbb,IREGS+0x8+0x2(%a6)
mov.w &0xeeee,IREGS+0x14+0x2(%a6)
mov.w &0xbbbb,IREGS+0x18+0x2(%a6)
movm.l &0x7fff,SREGS(%a6)
bsr.l chkregs
tst.b %d0
bne.l error
cas213:
addq.l &0x1,TESTCTR(%a6)
movm.l DEF_REGS(%pc),&0x3fff
lea DATA+0x0(%a6),%a0
lea DATA+0x4(%a6),%a1
mov.w &0xaaaa,(%a0)
mov.w &0xeeee,(%a1)
mov.w &0xaaaa,%d1
mov.w &0xbbbb,%d2
mov.w &0xcccc,%d3
mov.w &0xdddd,%d4
mov.w &0x0000,ICCR(%a6)
mov.w &0x0000,%cc
movm.l &0x7fff,IREGS(%a6)
cas2.w %d1:%d2,%d3:%d4,(%a0):(%a1) # Dc1:Dc2,Du1:Du2,(Rn1):(Rn2)
mov.w %cc,SCCR(%a6)
mov.w (%a0),%d5
mov.w (%a1),%d6
mov.w &0xaaaa,IREGS+0x4+0x2(%a6)
mov.w &0xeeee,IREGS+0x8+0x2(%a6)
mov.w &0xaaaa,IREGS+0x14+0x2(%a6)
mov.w &0xeeee,IREGS+0x18+0x2(%a6)
movm.l &0x7fff,SREGS(%a6)
bsr.l chkregs
tst.b %d0
bne.l error
cas214:
addq.l &0x1,TESTCTR(%a6)
movm.l DEF_REGS(%pc),&0x3fff
lea DATA+0x1(%a6),%a0
lea DATA+0x5(%a6),%a1
mov.w &0xaaaa,(%a0)
mov.w &0x7fff,(%a1)
mov.w &0xaaaa,%d1
mov.w &0x8000,%d2
mov.w &0xcccc,%d3
mov.w &0xdddd,%d4
mov.w &0x001b,ICCR(%a6)
mov.w &0x0010,%cc
movm.l &0x7fff,IREGS(%a6)
cas2.w %d1:%d2,%d3:%d4,(%a0):(%a1) # Dc1:Dc2,Du1:Du2,(Rn1):(Rn2)
mov.w %cc,SCCR(%a6)
mov.w (%a0),%d5
mov.w (%a1),%d6
mov.w &0xaaaa,IREGS+0x4+0x2(%a6)
mov.w &0x7fff,IREGS+0x8+0x2(%a6)
mov.w &0xaaaa,IREGS+0x14+0x2(%a6)
mov.w &0x7fff,IREGS+0x18+0x2(%a6)
movm.l &0x7fff,SREGS(%a6)
bsr.l chkregs
tst.b %d0
bne.l error
mov.l TESTCTR(%a6),%d1
clr.l %d0
rts
###########################################################
cmp2_str:
string "\tcmp2,chk2..."
align 0x4
# unsigned - small,small
cmp2_1:
addq.l &0x1,TESTCTR(%a6)
movm.l DEF_REGS(%pc),&0x3fff
mov.w &0x2040,DATA(%a6)
mov.l &0x11111120,%d1
mov.w &0x0004,ICCR(%a6)
mov.w &0x0000,%cc
movm.l &0x7fff,IREGS(%a6)
cmp2.b %d1,DATA(%a6)
mov.w %cc,SCCR(%a6)
movm.l &0x7fff,SREGS(%a6)
bsr.l chkregs
tst.b %d0
bne.l error
cmp2_2:
addq.l &0x1,TESTCTR(%a6)
movm.l DEF_REGS(%pc),&0x3fff
mov.w &0x2040,DATA(%a6)
mov.l &0x00000040,%a1
mov.w &0x0004,ICCR(%a6)
mov.w &0x0000,%cc
movm.l &0x7fff,IREGS(%a6)
cmp2.b %a1,DATA(%a6)
mov.w %cc,SCCR(%a6)
movm.l &0x7fff,SREGS(%a6)
bsr.l chkregs
tst.b %d0
bne.l error
cmp2_3:
addq.l &0x1,TESTCTR(%a6)
movm.l DEF_REGS(%pc),&0x3fff
mov.w &0x2040,DATA(%a6)
mov.l &0x11111130,%d1
mov.w &0x0000,ICCR(%a6)
mov.w &0x0000,%cc
movm.l &0x7fff,IREGS(%a6)
chk2.b DATA(%a6),%d1
mov.w %cc,SCCR(%a6)
movm.l &0x7fff,SREGS(%a6)
bsr.l chkregs
tst.b %d0
bne.l error
cmp2_4:
addq.l &0x1,TESTCTR(%a6)
movm.l DEF_REGS(%pc),&0x3fff
mov.w &0x2040,DATA(%a6)
mov.l &0x00000010,%a1
mov.w &0x0001,ICCR(%a6)
mov.w &0x0000,%cc
movm.l &0x7fff,IREGS(%a6)
cmp2.b %a1,DATA(%a6)
mov.w %cc,SCCR(%a6)
movm.l &0x7fff,SREGS(%a6)
bsr.l chkregs
tst.b %d0
bne.l error
cmp2_5:
addq.l &0x1,TESTCTR(%a6)
movm.l DEF_REGS(%pc),&0x3fff
mov.w &0x2040,DATA(%a6)
mov.l &0x11111150,%d1
mov.w &0x0001,ICCR(%a6)
mov.w &0x0000,%cc
movm.l &0x7fff,IREGS(%a6)
cmp2.b %d1,DATA(%a6)
mov.w %cc,SCCR(%a6)
movm.l &0x7fff,SREGS(%a6)
bsr.l chkregs
tst.b %d0
bne.l error
cmp2_6:
addq.l &0x1,TESTCTR(%a6)
movm.l DEF_REGS(%pc),&0x3fff
mov.w &0x2040,DATA(%a6)
mov.l &0x00000090,%a1
mov.w &0x0001,ICCR(%a6)
mov.w &0x0000,%cc
movm.l &0x7fff,IREGS(%a6)
cmp2.b %a1,DATA(%a6)
mov.w %cc,SCCR(%a6)
movm.l &0x7fff,SREGS(%a6)
bsr.l chkregs
tst.b %d0
bne.l error
# unsigned - small,large
cmp2_7:
addq.l &0x1,TESTCTR(%a6)
movm.l DEF_REGS(%pc),&0x3fff
mov.l &0x2000a000,DATA(%a6)
mov.l &0x11112000,%d1
mov.w &0x0004,ICCR(%a6)
mov.w &0x0000,%cc
movm.l &0x7fff,IREGS(%a6)
cmp2.w %d1,DATA(%a6)
mov.w %cc,SCCR(%a6)
movm.l &0x7fff,SREGS(%a6)
bsr.l chkregs
tst.b %d0
bne.l error
cmp2_8:
addq.l &0x1,TESTCTR(%a6)
movm.l DEF_REGS(%pc),&0x3fff
mov.l &0x2000a000,DATA(%a6)
mov.l &0xffffa000,%a1
mov.w &0x0004,ICCR(%a6)
mov.w &0x0000,%cc
movm.l &0x7fff,IREGS(%a6)
cmp2.w %a1,DATA(%a6)
mov.w %cc,SCCR(%a6)
movm.l &0x7fff,SREGS(%a6)
bsr.l chkregs
tst.b %d0
bne.l error
cmp2_9:
addq.l &0x1,TESTCTR(%a6)
movm.l DEF_REGS(%pc),&0x3fff
mov.l &0x2000a000,DATA(%a6)
mov.l &0x11113000,%d1
mov.w &0x0000,ICCR(%a6)
mov.w &0x0000,%cc
movm.l &0x7fff,IREGS(%a6)
chk2.w DATA(%a6),%d1
mov.w %cc,SCCR(%a6)
movm.l &0x7fff,SREGS(%a6)
bsr.l chkregs
tst.b %d0
bne.l error
cmp2_10:
addq.l &0x1,TESTCTR(%a6)
movm.l DEF_REGS(%pc),&0x3fff
mov.l &0x2000a000,DATA(%a6)
mov.l &0xffff9000,%a1
mov.w &0x0000,ICCR(%a6)
mov.w &0x0000,%cc
movm.l &0x7fff,IREGS(%a6)
cmp2.w %a1,DATA(%a6)
mov.w %cc,SCCR(%a6)
movm.l &0x7fff,SREGS(%a6)
bsr.l chkregs
tst.b %d0
bne.l error
cmp2_11:
addq.l &0x1,TESTCTR(%a6)
movm.l DEF_REGS(%pc),&0x3fff
mov.l &0x2000a000,DATA(%a6)
mov.l &0x11111000,%d1
mov.w &0x0001,ICCR(%a6)
mov.w &0x0000,%cc
movm.l &0x7fff,IREGS(%a6)
cmp2.w %d1,DATA(%a6)
mov.w %cc,SCCR(%a6)
movm.l &0x7fff,SREGS(%a6)
bsr.l chkregs
tst.b %d0
bne.l error
cmp2_12:
addq.l &0x1,TESTCTR(%a6)
movm.l DEF_REGS(%pc),&0x3fff
mov.l &0x2000a000,DATA(%a6)
mov.l &0xffffb000,%a1
mov.w &0x0001,ICCR(%a6)
mov.w &0x0000,%cc
movm.l &0x7fff,IREGS(%a6)
cmp2.w %a1,DATA(%a6)
mov.w %cc,SCCR(%a6)
movm.l &0x7fff,SREGS(%a6)
bsr.l chkregs
tst.b %d0
bne.l error
# unsigned - large,large
cmp2_13:
addq.l &0x1,TESTCTR(%a6)
movm.l DEF_REGS(%pc),&0x3fff
mov.l &0xa0000000,DATA(%a6)
mov.l &0xc0000000,DATA+0x4(%a6)
mov.l &0xa0000000,%d1
mov.w &0x000c,ICCR(%a6)
mov.w &0x0008,%cc
movm.l &0x7fff,IREGS(%a6)
cmp2.l %d1,DATA(%a6)
mov.w %cc,SCCR(%a6)
movm.l &0x7fff,SREGS(%a6)
bsr.l chkregs
tst.b %d0
bne.l error
cmp2_14:
addq.l &0x1,TESTCTR(%a6)
movm.l DEF_REGS(%pc),&0x3fff
mov.l &0xa0000000,DATA(%a6)
mov.l &0xc0000000,DATA+0x4(%a6)
mov.l &0xc0000000,%a1
mov.w &0x000c,ICCR(%a6)
mov.w &0x0008,%cc
movm.l &0x7fff,IREGS(%a6)
cmp2.l %a1,DATA(%a6)
mov.w %cc,SCCR(%a6)
movm.l &0x7fff,SREGS(%a6)
bsr.l chkregs
tst.b %d0
bne.l error
cmp2_15:
addq.l &0x1,TESTCTR(%a6)
movm.l DEF_REGS(%pc),&0x3fff
mov.l &0xa0000000,DATA(%a6)
mov.l &0xc0000000,DATA+0x4(%a6)
mov.l &0xb0000000,%d1
mov.w &0x0008,ICCR(%a6)
mov.w &0x0008,%cc
movm.l &0x7fff,IREGS(%a6)
chk2.l DATA(%a6),%d1
mov.w %cc,SCCR(%a6)
movm.l &0x7fff,SREGS(%a6)
bsr.l chkregs
tst.b %d0
bne.l error
cmp2_16:
addq.l &0x1,TESTCTR(%a6)
movm.l DEF_REGS(%pc),&0x3fff
mov.l &0xa0000000,DATA(%a6)
mov.l &0xc0000000,DATA+0x4(%a6)
mov.l &0x10000000,%a1
mov.w &0x0009,ICCR(%a6)
mov.w &0x0008,%cc
movm.l &0x7fff,IREGS(%a6)
cmp2.l %a1,DATA(%a6)
mov.w %cc,SCCR(%a6)
movm.l &0x7fff,SREGS(%a6)
bsr.l chkregs
tst.b %d0
bne.l error
cmp2_17:
addq.l &0x1,TESTCTR(%a6)
movm.l DEF_REGS(%pc),&0x3fff
mov.l &0xa0000000,DATA(%a6)
mov.l &0xc0000000,DATA+0x4(%a6)
mov.l &0x90000000,%d1
mov.w &0x0009,ICCR(%a6)
mov.w &0x0008,%cc
movm.l &0x7fff,IREGS(%a6)
cmp2.l %d1,DATA(%a6)
mov.w %cc,SCCR(%a6)
movm.l &0x7fff,SREGS(%a6)
bsr.l chkregs
tst.b %d0
bne.l error
cmp2_18:
addq.l &0x1,TESTCTR(%a6)
movm.l DEF_REGS(%pc),&0x3fff
mov.l &0xa0000000,DATA(%a6)
mov.l &0xc0000000,DATA+0x4(%a6)
mov.l &0xd0000000,%a1
mov.w &0x0009,ICCR(%a6)
mov.w &0x0008,%cc
movm.l &0x7fff,IREGS(%a6)
cmp2.l %a1,DATA(%a6)
mov.w %cc,SCCR(%a6)
movm.l &0x7fff,SREGS(%a6)
bsr.l chkregs
tst.b %d0
bne.l error
# signed - negative,positive
cmp2_19:
addq.l &0x1,TESTCTR(%a6)
movm.l DEF_REGS(%pc),&0x3fff
mov.w &0xa040,DATA(%a6)
mov.l &0x111111a0,%d1
mov.w &0x0004,ICCR(%a6)
mov.w &0x0000,%cc
movm.l &0x7fff,IREGS(%a6)
cmp2.b %d1,DATA(%a6)
mov.w %cc,SCCR(%a6)
movm.l &0x7fff,SREGS(%a6)
bsr.l chkregs
tst.b %d0
bne.l error
cmp2_20:
addq.l &0x1,TESTCTR(%a6)
movm.l DEF_REGS(%pc),&0x3fff
mov.w &0xa040,DATA(%a6)
mov.l &0x00000040,%a1
mov.w &0x0004,ICCR(%a6)
mov.w &0x0000,%cc
movm.l &0x7fff,IREGS(%a6)
chk2.b DATA(%a6),%a1
mov.w %cc,SCCR(%a6)
movm.l &0x7fff,SREGS(%a6)
bsr.l chkregs
tst.b %d0
bne.l error
cmp2_21:
addq.l &0x1,TESTCTR(%a6)
movm.l DEF_REGS(%pc),&0x3fff
mov.w &0xa040,DATA(%a6)
mov.l &0x111111b0,%d1
mov.w &0x0000,ICCR(%a6)
mov.w &0x0000,%cc
movm.l &0x7fff,IREGS(%a6)
cmp2.b %d1,DATA(%a6)
mov.w %cc,SCCR(%a6)
movm.l &0x7fff,SREGS(%a6)
bsr.l chkregs
tst.b %d0
bne.l error
cmp2_22:
addq.l &0x1,TESTCTR(%a6)
movm.l DEF_REGS(%pc),&0x3fff
mov.w &0xa040,DATA(%a6)
mov.l &0x00000010,%a1
mov.w &0x0000,ICCR(%a6)
mov.w &0x0000,%cc
movm.l &0x7fff,IREGS(%a6)
cmp2.b %a1,DATA(%a6)
mov.w %cc,SCCR(%a6)
movm.l &0x7fff,SREGS(%a6)
bsr.l chkregs
tst.b %d0
bne.l error
cmp2_23:
addq.l &0x1,TESTCTR(%a6)
movm.l DEF_REGS(%pc),&0x3fff
mov.w &0xa040,DATA(%a6)
mov.l &0x11111190,%d1
mov.w &0x0001,ICCR(%a6)
mov.w &0x0000,%cc
movm.l &0x7fff,IREGS(%a6)
cmp2.b %d1,DATA(%a6)
mov.w %cc,SCCR(%a6)
movm.l &0x7fff,SREGS(%a6)
bsr.l chkregs
tst.b %d0
bne.l error
cmp2_24:
addq.l &0x1,TESTCTR(%a6)
movm.l DEF_REGS(%pc),&0x3fff
mov.w &0xa040,DATA(%a6)
mov.l &0x00000050,%a1
mov.w &0x0001,ICCR(%a6)
mov.w &0x0000,%cc
movm.l &0x7fff,IREGS(%a6)
cmp2.b %a1,DATA(%a6)
mov.w %cc,SCCR(%a6)
movm.l &0x7fff,SREGS(%a6)
bsr.l chkregs
tst.b %d0
bne.l error
# signed - negative,negative
cmp2_25:
addq.l &0x1,TESTCTR(%a6)
movm.l DEF_REGS(%pc),&0x3fff
mov.w &0xa0c0,DATA(%a6)
mov.l &0x111111a0,%d1
mov.w &0x0004,ICCR(%a6)
mov.w &0x0000,%cc
movm.l &0x7fff,IREGS(%a6)
cmp2.b %d1,DATA(%a6)
mov.w %cc,SCCR(%a6)
movm.l &0x7fff,SREGS(%a6)
bsr.l chkregs
tst.b %d0
bne.l error
cmp2_26:
addq.l &0x1,TESTCTR(%a6)
movm.l DEF_REGS(%pc),&0x3fff
mov.w &0xa0c0,DATA(%a6)
mov.l &0xffffffc0,%a1
mov.w &0x0004,ICCR(%a6)
mov.w &0x0000,%cc
movm.l &0x7fff,IREGS(%a6)
cmp2.b %a1,DATA(%a6)
mov.w %cc,SCCR(%a6)
movm.l &0x7fff,SREGS(%a6)
bsr.l chkregs
tst.b %d0
bne.l error
cmp2_27:
addq.l &0x1,TESTCTR(%a6)
movm.l DEF_REGS(%pc),&0x3fff
mov.w &0xa0c0,DATA(%a6)
mov.l &0x111111b0,%d1
mov.w &0x0000,ICCR(%a6)
mov.w &0x0000,%cc
movm.l &0x7fff,IREGS(%a6)
chk2.b DATA(%a6),%d1
mov.w %cc,SCCR(%a6)
movm.l &0x7fff,SREGS(%a6)
bsr.l chkregs
tst.b %d0
bne.l error
cmp2_28:
addq.l &0x1,TESTCTR(%a6)
movm.l DEF_REGS(%pc),&0x3fff
mov.w &0xa0c0,DATA(%a6)
mov.l &0x11111190,%a1
mov.w &0x0001,ICCR(%a6)
mov.w &0x0000,%cc
movm.l &0x7fff,IREGS(%a6)
cmp2.b %a1,DATA(%a6)
mov.w %cc,SCCR(%a6)
movm.l &0x7fff,SREGS(%a6)
bsr.l chkregs
tst.b %d0
bne.l error
cmp2_29:
addq.l &0x1,TESTCTR(%a6)
movm.l DEF_REGS(%pc),&0x3fff
mov.w &0xa0c0,DATA(%a6)
mov.l &0x111111d0,%d1
mov.w &0x0001,ICCR(%a6)
mov.w &0x0000,%cc
movm.l &0x7fff,IREGS(%a6)
cmp2.b %d1,DATA(%a6)
mov.w %cc,SCCR(%a6)
movm.l &0x7fff,SREGS(%a6)
bsr.l chkregs
tst.b %d0
bne.l error
cmp2_30:
addq.l &0x1,TESTCTR(%a6)
movm.l DEF_REGS(%pc),&0x3fff
mov.w &0xa0c0,DATA(%a6)
mov.l &0x00000050,%a1
mov.w &0x001b,ICCR(%a6)
mov.w &0x001f,%cc
movm.l &0x7fff,IREGS(%a6)
cmp2.b %a1,DATA(%a6)
mov.w %cc,SCCR(%a6)
movm.l &0x7fff,SREGS(%a6)
bsr.l chkregs
tst.b %d0
bne.l error
mov.l TESTCTR(%a6),%d1
clr.l %d0
rts
###########################################################
ea_str:
string "\tEffective addresses..."
align 0x4
ea_0:
addq.l &0x1,TESTCTR(%a6)
movm.l DEF_REGS(%pc),&0x3fff
clr.l %d2
mov.l &0x00000002,%d3
lea EAMEM(%a6),%a0
mov.w &0x0000,ICCR(%a6)
mov.w &0x0000,%cc
movm.l &0xffff,IREGS(%a6)
mulu.l (%a0),%d2:%d3
mov.w %cc,SCCR(%a6)
movm.l &0xffff,SREGS(%a6)
mov.l &0x00000004,IREGS+0xc(%a6)
bsr.l chkregs
tst.b %d0
bne.l error
ea_1:
addq.l &0x1,TESTCTR(%a6)
movm.l DEF_REGS(%pc),&0x3fff
clr.l %d2
mov.l &0x00000002,%d3
lea EAMEM(%a6),%a0
mov.w &0x0000,ICCR(%a6)
mov.w &0x0000,%cc
movm.l &0xffff,IREGS(%a6)
mulu.l (%a0)+,%d2:%d3
mov.w %cc,SCCR(%a6)
movm.l &0xffff,SREGS(%a6)
mov.l &0x00000004,IREGS+0xc(%a6)
lea EAMEM+0x4(%a6),%a0
mov.l %a0,IREGS+0x20(%a6)
bsr.l chkregs
tst.b %d0
bne.l error
ea_2:
addq.l &0x1,TESTCTR(%a6)
movm.l DEF_REGS(%pc),&0x3fff
clr.l %d2
mov.l &0x00000002,%d3
lea EAMEM+0x4(%a6),%a0
mov.w &0x0000,ICCR(%a6)
mov.w &0x0000,%cc
movm.l &0xffff,IREGS(%a6)
mulu.l -(%a0),%d2:%d3
mov.w %cc,SCCR(%a6)
movm.l &0xffff,SREGS(%a6)
mov.l &0x00000004,IREGS+0xc(%a6)
lea EAMEM(%a6),%a0
mov.l %a0,IREGS+0x20(%a6)
bsr.l chkregs
tst.b %d0
bne.l error
ea_3:
addq.l &0x1,TESTCTR(%a6)
movm.l DEF_REGS(%pc),&0x3fff
clr.l %d2
mov.l &0x00000002,%d3
lea EAMEM+0x1000(%a6),%a0
mov.w &0x0000,ICCR(%a6)
mov.w &0x0000,%cc
movm.l &0xffff,IREGS(%a6)
mulu.l (-0x1000,%a0),%d2:%d3
mov.w %cc,SCCR(%a6)
movm.l &0xffff,SREGS(%a6)
mov.l &0x00000004,IREGS+0xc(%a6)
bsr.l chkregs
tst.b %d0
bne.l error
ea_4:
addq.l &0x1,TESTCTR(%a6)
movm.l DEF_REGS(%pc),&0x3fff
clr.l %d2
mov.l &0x00000002,%d3
lea EAMEM-0x1000(%a6),%a0
mov.w &0x0000,ICCR(%a6)
mov.w &0x0000,%cc
movm.l &0xffff,IREGS(%a6)
mulu.l (0x1000,%a0),%d2:%d3
mov.w %cc,SCCR(%a6)
movm.l &0xffff,SREGS(%a6)
mov.l &0x00000004,IREGS+0xc(%a6)
bsr.l chkregs
tst.b %d0
bne.l error
ea_5:
addq.l &0x1,TESTCTR(%a6)
# movm.l DEF_REGS(%pc),&0x3fff
# clr.l %d2
# mov.l &0x00000002,%d3
# mov.w &0x0000,ICCR(%a6)
# mov.w &0x0000,%cc
# movm.l &0xffff,IREGS(%a6)
# mulu.l EAMEM.w,%d2:%d3
# mov.w %cc,SCCR(%a6)
# movm.l &0xffff,SREGS(%a6)
# mov.l &0x00000004,IREGS+0xc(%a6)
# bsr.l chkregs
# tst.b %d0
# bne.l error
ea_6:
addq.l &0x1,TESTCTR(%a6)
# movm.l DEF_REGS(%pc),&0x3fff
# clr.l %d2
# mov.l &0x00000002,%d3
# mov.w &0x0000,ICCR(%a6)
# mov.w &0x0000,%cc
# movm.l &0xffff,IREGS(%a6)
# mulu.l EAMEM.l,%d2:%d3
# mov.w %cc,SCCR(%a6)
# movm.l &0xffff,SREGS(%a6)
# mov.l &0x00000004,IREGS+0xc(%a6)
# bsr.l chkregs
# tst.b %d0
# bne.l error
ea_7:
addq.l &0x1,TESTCTR(%a6)
movm.l DEF_REGS(%pc),&0x3fff
clr.l %d2
mov.l &0x00000002,%d3
mov.w &0x0000,ICCR(%a6)
mov.w &0x0000,%cc
movm.l &0xffff,IREGS(%a6)
mulu.l &0x00000002,%d2:%d3
mov.w %cc,SCCR(%a6)
movm.l &0xffff,SREGS(%a6)
mov.l &0x00000004,IREGS+0xc(%a6)
bsr.l chkregs
tst.b %d0
bne.l error
ea_8:
addq.l &0x1,TESTCTR(%a6)
bra.b ea_8_next
ea_8_mem:
long 0x00000002
ea_8_next:
movm.l DEF_REGS(%pc),&0x3fff
clr.l %d2
mov.l &0x00000002,%d3
mov.w &0x0000,ICCR(%a6)
mov.w &0x0000,%cc
movm.l &0xffff,IREGS(%a6)
mulu.l (ea_8_mem.w,%pc),%d2:%d3
mov.w %cc,SCCR(%a6)
movm.l &0xffff,SREGS(%a6)
mov.l &0x00000004,IREGS+0xc(%a6)
bsr.l chkregs
tst.b %d0
bne.l error
ea_9:
addq.l &0x1,TESTCTR(%a6)
movm.l DEF_REGS(%pc),&0x3fff
clr.l %d2
mov.l &0x00000002,%d3
lea EAMEM+0x4(%a6),%a1
mov.w &0x0000,ICCR(%a6)
mov.w &0x0000,%cc
movm.l &0xffff,IREGS(%a6)
mulu.l -(%a1),%d2:%d3
mov.w %cc,SCCR(%a6)
movm.l &0xffff,SREGS(%a6)
mov.l &0x00000004,IREGS+0xc(%a6)
lea EAMEM(%a6),%a0
mov.l %a0,IREGS+0x24(%a6)
bsr.l chkregs
tst.b %d0
bne.l error
ea_10:
addq.l &0x1,TESTCTR(%a6)
movm.l DEF_REGS(%pc),&0x3fff
clr.l %d2
mov.l &0x00000002,%d3
lea EAMEM+0x4(%a6),%a2
mov.w &0x0000,ICCR(%a6)
mov.w &0x0000,%cc
movm.l &0xffff,IREGS(%a6)
mulu.l -(%a2),%d2:%d3
mov.w %cc,SCCR(%a6)
movm.l &0xffff,SREGS(%a6)
mov.l &0x00000004,IREGS+0xc(%a6)
lea EAMEM(%a6),%a0
mov.l %a0,IREGS+0x28(%a6)
bsr.l chkregs
tst.b %d0
bne.l error
ea_11:
addq.l &0x1,TESTCTR(%a6)
movm.l DEF_REGS(%pc),&0x3fff
clr.l %d2
mov.l &0x00000002,%d3
lea EAMEM+0x4(%a6),%a3
mov.w &0x0000,ICCR(%a6)
mov.w &0x0000,%cc
movm.l &0xffff,IREGS(%a6)
mulu.l -(%a3),%d2:%d3
mov.w %cc,SCCR(%a6)
movm.l &0xffff,SREGS(%a6)
mov.l &0x00000004,IREGS+0xc(%a6)
lea EAMEM(%a6),%a0
mov.l %a0,IREGS+0x2c(%a6)
bsr.l chkregs
tst.b %d0
bne.l error
ea_12:
addq.l &0x1,TESTCTR(%a6)
movm.l DEF_REGS(%pc),&0x3fff
clr.l %d2
mov.l &0x00000002,%d3
lea EAMEM+0x4(%a6),%a4
mov.w &0x0000,ICCR(%a6)
mov.w &0x0000,%cc
movm.l &0xffff,IREGS(%a6)
mulu.l -(%a4),%d2:%d3
mov.w %cc,SCCR(%a6)
movm.l &0xffff,SREGS(%a6)
mov.l &0x00000004,IREGS+0xc(%a6)
lea EAMEM(%a6),%a0
mov.l %a0,IREGS+0x30(%a6)
bsr.l chkregs
tst.b %d0
bne.l error
ea_13:
addq.l &0x1,TESTCTR(%a6)
movm.l DEF_REGS(%pc),&0x3fff
clr.l %d2
mov.l &0x00000002,%d3
lea EAMEM+0x4(%a6),%a5
mov.w &0x0000,ICCR(%a6)
mov.w &0x0000,%cc
movm.l &0xffff,IREGS(%a6)
mulu.l -(%a5),%d2:%d3
mov.w %cc,SCCR(%a6)
movm.l &0xffff,SREGS(%a6)
mov.l &0x00000004,IREGS+0xc(%a6)
lea EAMEM(%a6),%a0
mov.l %a0,IREGS+0x34(%a6)
bsr.l chkregs
tst.b %d0
bne.l error
ea_14:
addq.l &0x1,TESTCTR(%a6)
movm.l DEF_REGS(%pc),&0x3fff
mov.l %a6,%a1
clr.l %d2
mov.l &0x00000002,%d3
lea EAMEM+0x4(%a1),%a6
mov.w &0x0000,ICCR(%a1)
mov.w &0x0000,%cc
movm.l &0xffff,IREGS(%a1)
mulu.l -(%a6),%d2:%d3
mov.w %cc,SCCR(%a1)
movm.l &0xffff,SREGS(%a1)
mov.l &0x00000004,IREGS+0xc(%a1)
lea EAMEM(%a1),%a0
mov.l %a0,IREGS+0x38(%a1)
mov.l %a1,%a6
bsr.l chkregs
tst.b %d0
bne.l error
ea_15:
addq.l &0x1,TESTCTR(%a6)
movm.l DEF_REGS(%pc),&0x3fff
clr.l %d2
mov.l &0x00000002,%d3
mov.l %a7,%a0
lea EAMEM+0x4(%a6),%a7
mov.w &0x0000,ICCR(%a6)
mov.w &0x0000,%cc
movm.l &0xffff,IREGS(%a6)
mulu.l -(%a7),%d2:%d3
mov.w %cc,SCCR(%a6)
movm.l &0xffff,SREGS(%a6)
mov.l &0x00000004,IREGS+0xc(%a6)
lea EAMEM(%a6),%a1
mov.l %a1,IREGS+0x3c(%a6)
mov.l %a0,%a7
bsr.l chkregs
tst.b %d0
bne.l error
ea_16:
addq.l &0x1,TESTCTR(%a6)
movm.l DEF_REGS(%pc),&0x3fff
clr.l %d2
mov.l &0x00000002,%d3
lea EAMEM(%a6),%a0
mov.l &-0x10,%d4
mov.w &0x0000,ICCR(%a6)
mov.w &0x0000,%cc
movm.l &0xffff,IREGS(%a6)
mulu.l (0x10.b,%a0,%d4.w*1),%d2:%d3
mov.w %cc,SCCR(%a6)
movm.l &0xffff,SREGS(%a6)
mov.l &0x00000004,IREGS+0xc(%a6)
bsr.l chkregs
tst.b %d0
bne.l error
ea_17:
addq.l &0x1,TESTCTR(%a6)
movm.l DEF_REGS(%pc),&0x3fff
clr.l %d2
mov.l &0x00000002,%d3
lea EAMEM(%a6),%a0
mov.l &-0x8,%d4
mov.w &0x0000,ICCR(%a6)
mov.w &0x0000,%cc
movm.l &0xffff,IREGS(%a6)
mulu.l (0x10.b,%a0,%d4.w*2),%d2:%d3
mov.w %cc,SCCR(%a6)
movm.l &0xffff,SREGS(%a6)
mov.l &0x00000004,IREGS+0xc(%a6)
bsr.l chkregs
tst.b %d0
bne.l error
ea_18:
addq.l &0x1,TESTCTR(%a6)
movm.l DEF_REGS(%pc),&0x3fff
clr.l %d2
mov.l &0x00000002,%d3
lea EAMEM(%a6),%a0
mov.l &-0x4,%d4
mov.w &0x0000,ICCR(%a6)
mov.w &0x0000,%cc
movm.l &0xffff,IREGS(%a6)
mulu.l (0x10.b,%a0,%d4.w*4),%d2:%d3
mov.w %cc,SCCR(%a6)
movm.l &0xffff,SREGS(%a6)
mov.l &0x00000004,IREGS+0xc(%a6)
bsr.l chkregs
tst.b %d0
bne.l error
ea_19:
addq.l &0x1,TESTCTR(%a6)
movm.l DEF_REGS(%pc),&0x3fff
clr.l %d2
mov.l &0x00000002,%d3
lea EAMEM(%a6),%a0
mov.l &-0x2,%d4
mov.w &0x0000,ICCR(%a6)
mov.w &0x0000,%cc
movm.l &0xffff,IREGS(%a6)
mulu.l (0x10.b,%a0,%d4.w*8),%d2:%d3
mov.w %cc,SCCR(%a6)
movm.l &0xffff,SREGS(%a6)
mov.l &0x00000004,IREGS+0xc(%a6)
bsr.l chkregs
tst.b %d0
bne.l error
ea_20:
addq.l &0x1,TESTCTR(%a6)
movm.l DEF_REGS(%pc),&0x3fff
clr.l %d2
mov.l &0x00000002,%d3
lea EAMEM(%a6),%a0
mov.l &-0x10,%d4
mov.w &0x0000,ICCR(%a6)
mov.w &0x0000,%cc
movm.l &0xffff,IREGS(%a6)
mulu.l (0x10.b,%a0,%d4.l*1),%d2:%d3
mov.w %cc,SCCR(%a6)
movm.l &0xffff,SREGS(%a6)
mov.l &0x00000004,IREGS+0xc(%a6)
bsr.l chkregs
tst.b %d0
bne.l error
ea_21:
addq.l &0x1,TESTCTR(%a6)
movm.l DEF_REGS(%pc),&0x3fff
clr.l %d2
mov.l &0x00000002,%d3
lea EAMEM(%a6),%a0
mov.l &-0x8,%d4
mov.w &0x0000,ICCR(%a6)
mov.w &0x0000,%cc
movm.l &0xffff,IREGS(%a6)
mulu.l (0x10.b,%a0,%d4.l*2),%d2:%d3
mov.w %cc,SCCR(%a6)
movm.l &0xffff,SREGS(%a6)
mov.l &0x00000004,IREGS+0xc(%a6)
bsr.l chkregs
tst.b %d0
bne.l error
ea_22:
addq.l &0x1,TESTCTR(%a6)
movm.l DEF_REGS(%pc),&0x3fff
clr.l %d2
mov.l &0x00000002,%d3
lea EAMEM(%a6),%a0
mov.l &-0x4,%d4
mov.w &0x0000,ICCR(%a6)
mov.w &0x0000,%cc
movm.l &0xffff,IREGS(%a6)
mulu.l (0x10.b,%a0,%d4.l*4),%d2:%d3
mov.w %cc,SCCR(%a6)
movm.l &0xffff,SREGS(%a6)
mov.l &0x00000004,IREGS+0xc(%a6)
bsr.l chkregs
tst.b %d0
bne.l error
ea_23:
addq.l &0x1,TESTCTR(%a6)
movm.l DEF_REGS(%pc),&0x3fff
clr.l %d2
mov.l &0x00000002,%d3
lea EAMEM(%a6),%a0
mov.l &-0x2,%d4
mov.w &0x0000,ICCR(%a6)
mov.w &0x0000,%cc
movm.l &0xffff,IREGS(%a6)
mulu.l (0x10.b,%a0,%d4.l*8),%d2:%d3
mov.w %cc,SCCR(%a6)
movm.l &0xffff,SREGS(%a6)
mov.l &0x00000004,IREGS+0xc(%a6)
bsr.l chkregs
tst.b %d0
bne.l error
ea_24:
addq.l &0x1,TESTCTR(%a6)
movm.l DEF_REGS(%pc),&0x3fff
clr.l %d2
mov.l &0x00000002,%d3
lea EAMEM(%a6),%a0
mov.l &-0x2,%a4
mov.w &0x0000,ICCR(%a6)
mov.w &0x0000,%cc
movm.l &0xffff,IREGS(%a6)
mulu.l (0x10.b,%a0,%a4.l*8),%d2:%d3
mov.w %cc,SCCR(%a6)
movm.l &0xffff,SREGS(%a6)
mov.l &0x00000004,IREGS+0xc(%a6)
bsr.l chkregs
tst.b %d0
bne.l error
ea_25:
addq.l &0x1,TESTCTR(%a6)
movm.l DEF_REGS(%pc),&0x3fff
clr.l %d2
mov.l &0x00000002,%d3
lea EAMEM(%a6),%a0
mov.l &0x2,%a4
mov.w &0x0000,ICCR(%a6)
mov.w &0x0000,%cc
movm.l &0xffff,IREGS(%a6)
mulu.l (-0x10.b,%a0,%a4.l*8),%d2:%d3
mov.w %cc,SCCR(%a6)
movm.l &0xffff,SREGS(%a6)
mov.l &0x00000004,IREGS+0xc(%a6)
bsr.l chkregs
tst.b %d0
bne.l error
ea_26:
addq.l &0x1,TESTCTR(%a6)
movm.l DEF_REGS(%pc),&0x3fff
clr.l %d2
mov.l &0x00000002,%d3
lea EAMEM(%a6),%a1
mov.l &-0x10,%d4
mov.w &0x0000,ICCR(%a6)
mov.w &0x0000,%cc
movm.l &0xffff,IREGS(%a6)
mulu.l (0x10.b,%a1,%d4.w*1),%d2:%d3
mov.w %cc,SCCR(%a6)
movm.l &0xffff,SREGS(%a6)
mov.l &0x00000004,IREGS+0xc(%a6)
bsr.l chkregs
tst.b %d0
bne.l error
ea_27:
addq.l &0x1,TESTCTR(%a6)
movm.l DEF_REGS(%pc),&0x3fff
clr.l %d2
mov.l &0x00000002,%d3
lea EAMEM(%a6),%a2
mov.l &-0x10,%d4
mov.w &0x0000,ICCR(%a6)
mov.w &0x0000,%cc
movm.l &0xffff,IREGS(%a6)
mulu.l (0x10.b,%a2,%d4.w*1),%d2:%d3
mov.w %cc,SCCR(%a6)
movm.l &0xffff,SREGS(%a6)
mov.l &0x00000004,IREGS+0xc(%a6)
bsr.l chkregs
tst.b %d0
bne.l error
ea_28:
addq.l &0x1,TESTCTR(%a6)
movm.l DEF_REGS(%pc),&0x3fff
clr.l %d2
mov.l &0x00000002,%d3
lea EAMEM(%a6),%a3
mov.l &-0x10,%d4
mov.w &0x0000,ICCR(%a6)
mov.w &0x0000,%cc
movm.l &0xffff,IREGS(%a6)
mulu.l (0x10.b,%a3,%d4.w*1),%d2:%d3
mov.w %cc,SCCR(%a6)
movm.l &0xffff,SREGS(%a6)
mov.l &0x00000004,IREGS+0xc(%a6)
bsr.l chkregs
tst.b %d0
bne.l error
ea_29:
addq.l &0x1,TESTCTR(%a6)
movm.l DEF_REGS(%pc),&0x3fff
clr.l %d2
mov.l &0x00000002,%d3
lea EAMEM(%a6),%a4
mov.l &-0x10,%d4
mov.w &0x0000,ICCR(%a6)
mov.w &0x0000,%cc
movm.l &0xffff,IREGS(%a6)
mulu.l (0x10.b,%a4,%d4.w*1),%d2:%d3
mov.w %cc,SCCR(%a6)
movm.l &0xffff,SREGS(%a6)
mov.l &0x00000004,IREGS+0xc(%a6)
bsr.l chkregs
tst.b %d0
bne.l error
ea_30:
addq.l &0x1,TESTCTR(%a6)
movm.l DEF_REGS(%pc),&0x3fff
clr.l %d2
mov.l &0x00000002,%d3
lea EAMEM(%a6),%a5
mov.l &-0x10,%d4
mov.w &0x0000,ICCR(%a6)
mov.w &0x0000,%cc
movm.l &0xffff,IREGS(%a6)
mulu.l (0x10.b,%a5,%d4.w*1),%d2:%d3
mov.w %cc,SCCR(%a6)
movm.l &0xffff,SREGS(%a6)
mov.l &0x00000004,IREGS+0xc(%a6)
bsr.l chkregs
tst.b %d0
bne.l error
ea_31:
addq.l &0x1,TESTCTR(%a6)
movm.l DEF_REGS(%pc),&0x3fff
mov.l %a6,%a1
clr.l %d2
mov.l &0x00000002,%d3
lea EAMEM(%a1),%a6
mov.l &-0x10,%d4
mov.w &0x0000,ICCR(%a1)
mov.w &0x0000,%cc
movm.l &0xffff,IREGS(%a1)
mulu.l (0x10.b,%a6,%d4.w*1),%d2:%d3
mov.w %cc,SCCR(%a1)
movm.l &0xffff,SREGS(%a1)
mov.l &0x00000004,IREGS+0xc(%a1)
mov.l %a1,%a6
bsr.l chkregs
tst.b %d0
bne.l error
ea_32:
addq.l &0x1,TESTCTR(%a6)
movm.l DEF_REGS(%pc),&0x3fff
clr.l %d2
mov.l &0x00000002,%d3
mov.l %a7,%a0
lea EAMEM(%a6),%a7
mov.l &-0x10,%d4
mov.w &0x0000,ICCR(%a6)
mov.w &0x0000,%cc
movm.l &0xffff,IREGS(%a6)
mulu.l (0x10.b,%a7,%d4.w*1),%d2:%d3
mov.w %cc,SCCR(%a6)
movm.l &0xffff,SREGS(%a6)
mov.l &0x00000004,IREGS+0xc(%a6)
mov.l %a0,%a7
bsr.l chkregs
tst.b %d0
bne.l error
ea_33:
addq.l &0x1,TESTCTR(%a6)
movm.l DEF_REGS(%pc),&0x3fff
clr.l %d2
mov.l &0x00000002,%d3
lea EAMEM(%a6),%a1
mov.w &0x0000,ICCR(%a6)
mov.w &0x0000,%cc
movm.l &0xffff,IREGS(%a6)
mulu.l (%a1),%d2:%d3
mov.w %cc,SCCR(%a6)
movm.l &0xffff,SREGS(%a6)
mov.l &0x00000004,IREGS+0xc(%a6)
bsr.l chkregs
tst.b %d0
bne.l error
ea_34:
addq.l &0x1,TESTCTR(%a6)
movm.l DEF_REGS(%pc),&0x3fff
clr.l %d2
mov.l &0x00000002,%d3
lea EAMEM(%a6),%a2
mov.w &0x0000,ICCR(%a6)
mov.w &0x0000,%cc
movm.l &0xffff,IREGS(%a6)
mulu.l (%a2),%d2:%d3
mov.w %cc,SCCR(%a6)
movm.l &0xffff,SREGS(%a6)
mov.l &0x00000004,IREGS+0xc(%a6)
bsr.l chkregs
tst.b %d0
bne.l error
ea_35:
addq.l &0x1,TESTCTR(%a6)
movm.l DEF_REGS(%pc),&0x3fff
clr.l %d2
mov.l &0x00000002,%d3
lea EAMEM(%a6),%a3
mov.w &0x0000,ICCR(%a6)
mov.w &0x0000,%cc
movm.l &0xffff,IREGS(%a6)
mulu.l (%a3),%d2:%d3
mov.w %cc,SCCR(%a6)
movm.l &0xffff,SREGS(%a6)
mov.l &0x00000004,IREGS+0xc(%a6)
bsr.l chkregs
tst.b %d0
bne.l error
ea_36:
addq.l &0x1,TESTCTR(%a6)
movm.l DEF_REGS(%pc),&0x3fff
clr.l %d2
mov.l &0x00000002,%d3
lea EAMEM(%a6),%a4
mov.w &0x0000,ICCR(%a6)
mov.w &0x0000,%cc
movm.l &0xffff,IREGS(%a6)
mulu.l (%a4),%d2:%d3
mov.w %cc,SCCR(%a6)
movm.l &0xffff,SREGS(%a6)
mov.l &0x00000004,IREGS+0xc(%a6)
bsr.l chkregs
tst.b %d0
bne.l error
ea_37:
addq.l &0x1,TESTCTR(%a6)
movm.l DEF_REGS(%pc),&0x3fff
clr.l %d2
mov.l &0x00000002,%d3
lea EAMEM(%a6),%a5
mov.w &0x0000,ICCR(%a6)
mov.w &0x0000,%cc
movm.l &0xffff,IREGS(%a6)
mulu.l (%a5),%d2:%d3
mov.w %cc,SCCR(%a6)
movm.l &0xffff,SREGS(%a6)
mov.l &0x00000004,IREGS+0xc(%a6)
bsr.l chkregs
tst.b %d0
bne.l error
ea_38:
addq.l &0x1,TESTCTR(%a6)
movm.l DEF_REGS(%pc),&0x3fff
mov.l %a6,%a1
clr.l %d2
mov.l &0x00000002,%d3
lea EAMEM(%a1),%a6
mov.w &0x0000,ICCR(%a1)
mov.w &0x0000,%cc
movm.l &0xffff,IREGS(%a1)
mulu.l (%a6),%d2:%d3
mov.w %cc,SCCR(%a1)
movm.l &0xffff,SREGS(%a1)
mov.l &0x00000004,IREGS+0xc(%a1)
mov.l %a1,%a6
bsr.l chkregs
tst.b %d0
bne.l error
ea_39:
addq.l &0x1,TESTCTR(%a6)
movm.l DEF_REGS(%pc),&0x3fff
clr.l %d2
mov.l &0x00000002,%d3
mov.l %a7,%a0
lea EAMEM(%a6),%a7
mov.w &0x0000,ICCR(%a6)
mov.w &0x0000,%cc
movm.l &0xffff,IREGS(%a6)
mulu.l (%a7),%d2:%d3
mov.w %cc,SCCR(%a6)
movm.l &0xffff,SREGS(%a6)
mov.l &0x00000004,IREGS+0xc(%a6)
mov.l %a0,%a7
bsr.l chkregs
tst.b %d0
bne.l error
ea_40:
addq.l &0x1,TESTCTR(%a6)
movm.l DEF_REGS(%pc),&0x3fff
clr.l %d2
mov.l &0x00000002,%d3
lea EAMEM(%a6),%a1
mov.w &0x0000,ICCR(%a6)
mov.w &0x0000,%cc
movm.l &0xffff,IREGS(%a6)
mulu.l (%a1)+,%d2:%d3
mov.w %cc,SCCR(%a6)
movm.l &0xffff,SREGS(%a6)
mov.l &0x00000004,IREGS+0xc(%a6)
lea EAMEM+0x4(%a6),%a0
mov.l %a0,IREGS+0x24(%a6)
bsr.l chkregs
tst.b %d0
bne.l error
ea_41:
addq.l &0x1,TESTCTR(%a6)
movm.l DEF_REGS(%pc),&0x3fff
clr.l %d2
mov.l &0x00000002,%d3
lea EAMEM(%a6),%a2
mov.w &0x0000,ICCR(%a6)
mov.w &0x0000,%cc
movm.l &0xffff,IREGS(%a6)
mulu.l (%a2)+,%d2:%d3
mov.w %cc,SCCR(%a6)
movm.l &0xffff,SREGS(%a6)
mov.l &0x00000004,IREGS+0xc(%a6)
lea EAMEM+0x4(%a6),%a0
mov.l %a0,IREGS+0x28(%a6)
bsr.l chkregs
tst.b %d0
bne.l error
ea_42:
addq.l &0x1,TESTCTR(%a6)
movm.l DEF_REGS(%pc),&0x3fff
clr.l %d2
mov.l &0x00000002,%d3
lea EAMEM(%a6),%a3
mov.w &0x0000,ICCR(%a6)
mov.w &0x0000,%cc
movm.l &0xffff,IREGS(%a6)
mulu.l (%a3)+,%d2:%d3
mov.w %cc,SCCR(%a6)
movm.l &0xffff,SREGS(%a6)
mov.l &0x00000004,IREGS+0xc(%a6)
lea EAMEM+0x4(%a6),%a0
mov.l %a0,IREGS+0x2c(%a6)
bsr.l chkregs
tst.b %d0
bne.l error
ea_43:
addq.l &0x1,TESTCTR(%a6)
movm.l DEF_REGS(%pc),&0x3fff
clr.l %d2
mov.l &0x00000002,%d3
lea EAMEM(%a6),%a4
mov.w &0x0000,ICCR(%a6)
mov.w &0x0000,%cc
movm.l &0xffff,IREGS(%a6)
mulu.l (%a4)+,%d2:%d3
mov.w %cc,SCCR(%a6)
movm.l &0xffff,SREGS(%a6)
mov.l &0x00000004,IREGS+0xc(%a6)
lea EAMEM+0x4(%a6),%a0
mov.l %a0,IREGS+0x30(%a6)
bsr.l chkregs
tst.b %d0
bne.l error
ea_44:
addq.l &0x1,TESTCTR(%a6)
movm.l DEF_REGS(%pc),&0x3fff
clr.l %d2
mov.l &0x00000002,%d3
lea EAMEM(%a6),%a5
mov.w &0x0000,ICCR(%a6)
mov.w &0x0000,%cc
movm.l &0xffff,IREGS(%a6)
mulu.l (%a5)+,%d2:%d3
mov.w %cc,SCCR(%a6)
movm.l &0xffff,SREGS(%a6)
mov.l &0x00000004,IREGS+0xc(%a6)
lea EAMEM+0x4(%a6),%a0
mov.l %a0,IREGS+0x34(%a6)
bsr.l chkregs
tst.b %d0
bne.l error
ea_45:
addq.l &0x1,TESTCTR(%a6)
movm.l DEF_REGS(%pc),&0x3fff
mov.l %a6,%a1
clr.l %d2
mov.l &0x00000002,%d3
lea EAMEM(%a1),%a6
mov.w &0x0000,ICCR(%a1)
mov.w &0x0000,%cc
movm.l &0xffff,IREGS(%a1)
mulu.l (%a6)+,%d2:%d3
mov.w %cc,SCCR(%a1)
movm.l &0xffff,SREGS(%a1)
mov.l &0x00000004,IREGS+0xc(%a1)
lea EAMEM+0x4(%a1),%a0
mov.l %a0,IREGS+0x38(%a1)
mov.l %a1,%a6
bsr.l chkregs
tst.b %d0
bne.l error
ea_46:
addq.l &0x1,TESTCTR(%a6)
movm.l DEF_REGS(%pc),&0x3fff
clr.l %d2
mov.l &0x00000002,%d3
mov.l %a7,%a0
lea EAMEM(%a6),%a7
mov.w &0x0000,ICCR(%a6)
mov.w &0x0000,%cc
movm.l &0xffff,IREGS(%a6)
mulu.l (%a7)+,%d2:%d3
mov.w %cc,SCCR(%a6)
movm.l &0xffff,SREGS(%a6)
mov.l &0x00000004,IREGS+0xc(%a6)
lea EAMEM+0x4(%a6),%a1
mov.l %a1,IREGS+0x3c(%a6)
mov.l %a0,%a7
bsr.l chkregs
tst.b %d0
bne.l error
ea_47:
addq.l &0x1,TESTCTR(%a6)
movm.l DEF_REGS(%pc),&0x3fff
clr.l %d2
mov.l &0x00000002,%d3
lea EAMEM-0x1000(%a6),%a1
mov.w &0x0000,ICCR(%a6)
mov.w &0x0000,%cc
movm.l &0xffff,IREGS(%a6)
mulu.l (0x1000,%a1),%d2:%d3
mov.w %cc,SCCR(%a6)
movm.l &0xffff,SREGS(%a6)
mov.l &0x00000004,IREGS+0xc(%a6)
bsr.l chkregs
tst.b %d0
bne.l error
ea_48:
addq.l &0x1,TESTCTR(%a6)
movm.l DEF_REGS(%pc),&0x3fff
clr.l %d2
mov.l &0x00000002,%d3
lea EAMEM-0x1000(%a6),%a2
mov.w &0x0000,ICCR(%a6)
mov.w &0x0000,%cc
movm.l &0xffff,IREGS(%a6)
mulu.l (0x1000,%a2),%d2:%d3
mov.w %cc,SCCR(%a6)
movm.l &0xffff,SREGS(%a6)
mov.l &0x00000004,IREGS+0xc(%a6)
bsr.l chkregs
tst.b %d0
bne.l error
ea_49:
addq.l &0x1,TESTCTR(%a6)
movm.l DEF_REGS(%pc),&0x3fff
clr.l %d2
mov.l &0x00000002,%d3
lea EAMEM-0x1000(%a6),%a3
mov.w &0x0000,ICCR(%a6)
mov.w &0x0000,%cc
movm.l &0xffff,IREGS(%a6)
mulu.l (0x1000,%a3),%d2:%d3
mov.w %cc,SCCR(%a6)
movm.l &0xffff,SREGS(%a6)
mov.l &0x00000004,IREGS+0xc(%a6)
bsr.l chkregs
tst.b %d0
bne.l error
ea_50:
addq.l &0x1,TESTCTR(%a6)
movm.l DEF_REGS(%pc),&0x3fff
clr.l %d2
mov.l &0x00000002,%d3
lea EAMEM-0x1000(%a6),%a4
mov.w &0x0000,ICCR(%a6)
mov.w &0x0000,%cc
movm.l &0xffff,IREGS(%a6)
mulu.l (0x1000,%a4),%d2:%d3
mov.w %cc,SCCR(%a6)
movm.l &0xffff,SREGS(%a6)
mov.l &0x00000004,IREGS+0xc(%a6)
bsr.l chkregs
tst.b %d0
bne.l error
ea_51:
addq.l &0x1,TESTCTR(%a6)
movm.l DEF_REGS(%pc),&0x3fff
clr.l %d2
mov.l &0x00000002,%d3
lea EAMEM-0x1000(%a6),%a5
mov.w &0x0000,ICCR(%a6)
mov.w &0x0000,%cc
movm.l &0xffff,IREGS(%a6)
mulu.l (0x1000,%a5),%d2:%d3
mov.w %cc,SCCR(%a6)
movm.l &0xffff,SREGS(%a6)
mov.l &0x00000004,IREGS+0xc(%a6)
bsr.l chkregs
tst.b %d0
bne.l error
ea_52:
addq.l &0x1,TESTCTR(%a6)
movm.l DEF_REGS(%pc),&0x3fff
mov.l %a6,%a1
clr.l %d2
mov.l &0x00000002,%d3
lea EAMEM-0x1000(%a1),%a6
mov.w &0x0000,ICCR(%a1)
mov.w &0x0000,%cc
movm.l &0xffff,IREGS(%a1)
mulu.l (0x1000,%a6),%d2:%d3
mov.w %cc,SCCR(%a1)
movm.l &0xffff,SREGS(%a1)
mov.l &0x00000004,IREGS+0xc(%a1)
mov.l %a1,%a6
bsr.l chkregs
tst.b %d0
bne.l error
ea_53:
addq.l &0x1,TESTCTR(%a6)
movm.l DEF_REGS(%pc),&0x3fff
clr.l %d2
mov.l &0x00000002,%d3
mov.l %a7,%a0
lea EAMEM-0x1000(%a6),%a7
mov.w &0x0000,ICCR(%a6)
mov.w &0x0000,%cc
movm.l &0xffff,IREGS(%a6)
mulu.l (0x1000,%a7),%d2:%d3
mov.w %cc,SCCR(%a6)
movm.l &0xffff,SREGS(%a6)
mov.l &0x00000004,IREGS+0xc(%a6)
mov.l %a0,%a7
bsr.l chkregs
tst.b %d0
bne.l error
ea_54:
addq.l &0x1,TESTCTR(%a6)
movm.l DEF_REGS(%pc),&0x3fff
clr.l %d2
mov.l &0x00000002,%d3
lea EAMEM+0x1000(%a6),%a0
mov.w &0x0000,ICCR(%a6)
mov.w &0x0000,%cc
movm.l &0xffff,IREGS(%a6)
mulu.l (-0x1000,%a0),%d2:%d3
mov.w %cc,SCCR(%a6)
movm.l &0xffff,SREGS(%a6)
mov.l &0x00000004,IREGS+0xc(%a6)
bsr.l chkregs
tst.b %d0
bne.l error
ea_55:
addq.l &0x1,TESTCTR(%a6)
bra.b ea_55_next
ea_55_data:
long 0x00000002
ea_55_next:
movm.l DEF_REGS(%pc),&0x3fff
clr.l %d2
mov.l &0x00000002,%d3
mov.w &0x0000,ICCR(%a6)
mov.w &0x0000,%cc
movm.l &0xffff,IREGS(%a6)
mulu.l (ea_55_data.w,%pc),%d2:%d3
mov.w %cc,SCCR(%a6)
movm.l &0xffff,SREGS(%a6)
mov.l &0x00000004,IREGS+0xc(%a6)
bsr.l chkregs
tst.b %d0
bne.l error
ea_56:
addq.l &0x1,TESTCTR(%a6)
movm.l DEF_REGS(%pc),&0x3fff
clr.l %d2
mov.l &0x00000002,%d3
lea EAMEM(%a6),%a3
mov.l &-0x10,%d4
mov.w &0x0000,ICCR(%a6)
mov.w &0x0000,%cc
movm.l &0xffff,IREGS(%a6)
mulu.l (0x10.w,%a3,%d4.w*1),%d2:%d3
mov.w %cc,SCCR(%a6)
movm.l &0xffff,SREGS(%a6)
mov.l &0x00000004,IREGS+0xc(%a6)
bsr.l chkregs
tst.b %d0
bne.l error
ea_57:
addq.l &0x1,TESTCTR(%a6)
movm.l DEF_REGS(%pc),&0x3fff
clr.l %d2
mov.l &0x00000002,%d3
lea EAMEM(%a6),%a3
mov.l &-0x8,%d4
mov.w &0x0000,ICCR(%a6)
mov.w &0x0000,%cc
movm.l &0xffff,IREGS(%a6)
mulu.l (0x10.w,%a3,%d4.w*2),%d2:%d3
mov.w %cc,SCCR(%a6)
movm.l &0xffff,SREGS(%a6)
mov.l &0x00000004,IREGS+0xc(%a6)
bsr.l chkregs
tst.b %d0
bne.l error
ea_58:
addq.l &0x1,TESTCTR(%a6)
movm.l DEF_REGS(%pc),&0x3fff
clr.l %d2
mov.l &0x00000002,%d3
lea EAMEM(%a6),%a3
mov.l &-0x4,%d4
mov.w &0x0000,ICCR(%a6)
mov.w &0x0000,%cc
movm.l &0xffff,IREGS(%a6)
mulu.l (0x10.w,%a3,%d4.w*4),%d2:%d3
mov.w %cc,SCCR(%a6)
movm.l &0xffff,SREGS(%a6)
mov.l &0x00000004,IREGS+0xc(%a6)
bsr.l chkregs
tst.b %d0
bne.l error
ea_59:
addq.l &0x1,TESTCTR(%a6)
movm.l DEF_REGS(%pc),&0x3fff
clr.l %d2
mov.l &0x00000002,%d3
lea EAMEM(%a6),%a3
mov.l &-0x2,%d4
mov.w &0x0000,ICCR(%a6)
mov.w &0x0000,%cc
movm.l &0xffff,IREGS(%a6)
mulu.l (0x10.w,%a3,%d4.w*8),%d2:%d3
mov.w %cc,SCCR(%a6)
movm.l &0xffff,SREGS(%a6)
mov.l &0x00000004,IREGS+0xc(%a6)
bsr.l chkregs
tst.b %d0
bne.l error
ea_60:
addq.l &0x1,TESTCTR(%a6)
movm.l DEF_REGS(%pc),&0x3fff
clr.l %d2
mov.l &0x00000002,%d3
lea EAMEM(%a6),%a3
mov.l &-0x10,%d4
mov.w &0x0000,ICCR(%a6)
mov.w &0x0000,%cc
movm.l &0xffff,IREGS(%a6)
mulu.l (0x10.w,%a3,%d4.l*1),%d2:%d3
mov.w %cc,SCCR(%a6)
movm.l &0xffff,SREGS(%a6)
mov.l &0x00000004,IREGS+0xc(%a6)
bsr.l chkregs
tst.b %d0
bne.l error
ea_61:
addq.l &0x1,TESTCTR(%a6)
movm.l DEF_REGS(%pc),&0x3fff
clr.l %d2
mov.l &0x00000002,%d3
lea EAMEM(%a6),%a3
mov.l &-0x8,%d4
mov.w &0x0000,ICCR(%a6)
mov.w &0x0000,%cc
movm.l &0xffff,IREGS(%a6)
mulu.l (0x10.w,%a3,%d4.l*2),%d2:%d3
mov.w %cc,SCCR(%a6)
movm.l &0xffff,SREGS(%a6)
mov.l &0x00000004,IREGS+0xc(%a6)
bsr.l chkregs
tst.b %d0
bne.l error
ea_62:
addq.l &0x1,TESTCTR(%a6)
movm.l DEF_REGS(%pc),&0x3fff
clr.l %d2
mov.l &0x00000002,%d3
lea EAMEM(%a6),%a3
mov.l &-0x4,%d4
mov.w &0x0000,ICCR(%a6)
mov.w &0x0000,%cc
movm.l &0xffff,IREGS(%a6)
mulu.l (0x10.w,%a3,%d4.l*4),%d2:%d3
mov.w %cc,SCCR(%a6)
movm.l &0xffff,SREGS(%a6)
mov.l &0x00000004,IREGS+0xc(%a6)
bsr.l chkregs
tst.b %d0
bne.l error
ea_63:
addq.l &0x1,TESTCTR(%a6)
movm.l DEF_REGS(%pc),&0x3fff
clr.l %d2
mov.l &0x00000002,%d3
lea EAMEM(%a6),%a3
mov.l &-0x2,%d4
mov.w &0x0000,ICCR(%a6)
mov.w &0x0000,%cc
movm.l &0xffff,IREGS(%a6)
mulu.l (0x10.w,%a3,%d4.l*8),%d2:%d3
mov.w %cc,SCCR(%a6)
movm.l &0xffff,SREGS(%a6)
mov.l &0x00000004,IREGS+0xc(%a6)
bsr.l chkregs
tst.b %d0
bne.l error
ea_64:
addq.l &0x1,TESTCTR(%a6)
movm.l DEF_REGS(%pc),&0x3fff
clr.l %d2
mov.l &0x00000002,%d3
lea EAMEM(%a6),%a3
mov.l &0x2,%a4
mov.w &0x0000,ICCR(%a6)
mov.w &0x0000,%cc
movm.l &0xffff,IREGS(%a6)
mulu.l (-0x10.w,%a3,%a4.l*8),%d2:%d3
mov.w %cc,SCCR(%a6)
movm.l &0xffff,SREGS(%a6)
mov.l &0x00000004,IREGS+0xc(%a6)
bsr.l chkregs
tst.b %d0
bne.l error
ea_65:
addq.l &0x1,TESTCTR(%a6)
movm.l DEF_REGS(%pc),&0x3fff
clr.l %d2
mov.l &0x00000002,%d3
lea EAMEM(%a6),%a3
mov.l &0x2,%a4
mov.w &0x0000,ICCR(%a6)
mov.w &0x0000,%cc
movm.l &0xffff,IREGS(%a6)
mulu.l (0x00.w,%a3,%za4.l*8),%d2:%d3
mov.w %cc,SCCR(%a6)
movm.l &0xffff,SREGS(%a6)
mov.l &0x00000004,IREGS+0xc(%a6)
bsr.l chkregs
tst.b %d0
bne.l error
ea_66:
addq.l &0x1,TESTCTR(%a6)
movm.l DEF_REGS(%pc),&0x3fff
clr.l %d2
mov.l &0x00000002,%d3
lea EAMEM(%a6),%a3
mov.l %a3,%a4
add.l &0x10,%a4
mov.w &0x0000,ICCR(%a6)
mov.w &0x0000,%cc
movm.l &0xffff,IREGS(%a6)
mulu.l (-0x10.w,%za3,%a4.l*1),%d2:%d3
mov.w %cc,SCCR(%a6)
movm.l &0xffff,SREGS(%a6)
mov.l &0x00000004,IREGS+0xc(%a6)
bsr.l chkregs
tst.b %d0
bne.l error
ea_67:
addq.l &0x1,TESTCTR(%a6)
movm.l DEF_REGS(%pc),&0x3fff
clr.l %d2
mov.l &0x00000002,%d3
lea EAMEM(%a6),%a3
mov.l &0x2,%a4
mov.w &0x0000,ICCR(%a6)
mov.w &0x0000,%cc
movm.l &0xffff,IREGS(%a6)
mulu.l (-0x10.l,%a3,%a4.l*8),%d2:%d3
mov.w %cc,SCCR(%a6)
movm.l &0xffff,SREGS(%a6)
mov.l &0x00000004,IREGS+0xc(%a6)
bsr.l chkregs
tst.b %d0
bne.l error
ea_68:
addq.l &0x1,TESTCTR(%a6)
bra.b ea_68_next
ea_68_mem:
long 0x00000002
ea_68_next:
movm.l DEF_REGS(%pc),&0x3fff
clr.l %d2
mov.l &0x00000002,%d3
lea EAMEM(%a6),%a3
mov.l &-0x10,%d4
mov.w &0x0000,ICCR(%a6)
mov.w &0x0000,%cc
movm.l &0xffff,IREGS(%a6)
mulu.l (ea_68_mem+0x10.w,%pc,%d4.w*1),%d2:%d3
mov.w %cc,SCCR(%a6)
movm.l &0xffff,SREGS(%a6)
mov.l &0x00000004,IREGS+0xc(%a6)
bsr.l chkregs
tst.b %d0
bne.l error
ea_69:
addq.l &0x1,TESTCTR(%a6)
bra.b ea_69_next
ea_69_mem:
long 0x00000002
ea_69_next:
movm.l DEF_REGS(%pc),&0x3fff
clr.l %d2
mov.l &0x00000002,%d3
lea EAMEM(%a6),%a0
mov.l &-0x8,%d4
mov.w &0x0000,ICCR(%a6)
mov.w &0x0000,%cc
movm.l &0x7fff,IREGS(%a6)
mulu.l (ea_69_mem+0x10.w,%pc,%d4.w*2),%d2:%d3
mov.w %cc,SCCR(%a6)
movm.l &0x7fff,SREGS(%a6)
mov.l &0x00000004,IREGS+0xc(%a6)
bsr.l chkregs
tst.b %d0
bne.l error
ea_70:
addq.l &0x1,TESTCTR(%a6)
bra.b ea_70_next
ea_70_mem:
long 0x00000002
ea_70_next:
movm.l DEF_REGS(%pc),&0x3fff
clr.l %d2
mov.l &0x00000002,%d3
lea EAMEM(%a6),%a0
mov.l &-0x4,%d4
mov.w &0x0000,ICCR(%a6)
mov.w &0x0000,%cc
movm.l &0x7fff,IREGS(%a6)
mulu.l (ea_70_mem+0x10.w,%pc,%d4.w*4),%d2:%d3
mov.w %cc,SCCR(%a6)
movm.l &0x7fff,SREGS(%a6)
mov.l &0x00000004,IREGS+0xc(%a6)
bsr.l chkregs
tst.b %d0
bne.l error
ea_71:
addq.l &0x1,TESTCTR(%a6)
bra.b ea_71_next
ea_71_mem:
long 0x00000002
ea_71_next:
movm.l DEF_REGS(%pc),&0x3fff
clr.l %d2
mov.l &0x00000002,%d3
lea EAMEM(%a6),%a0
mov.l &-0x2,%d4
mov.w &0x0000,ICCR(%a6)
mov.w &0x0000,%cc
movm.l &0x7fff,IREGS(%a6)
mulu.l (ea_71_mem+0x10.w,%pc,%d4.w*8),%d2:%d3
mov.w %cc,SCCR(%a6)
movm.l &0x7fff,SREGS(%a6)
mov.l &0x00000004,IREGS+0xc(%a6)
bsr.l chkregs
tst.b %d0
bne.l error
ea_72:
addq.l &0x1,TESTCTR(%a6)
bra.b ea_72_next
ea_72_mem:
long 0x00000002
ea_72_next:
movm.l DEF_REGS(%pc),&0x3fff
clr.l %d2
mov.l &0x00000002,%d3
lea EAMEM(%a6),%a0
mov.l &-0x10,%d4
mov.w &0x0000,ICCR(%a6)
mov.w &0x0000,%cc
movm.l &0x7fff,IREGS(%a6)
mulu.l (ea_72_mem+0x10.w,%pc,%d4.l*1),%d2:%d3
mov.w %cc,SCCR(%a6)
movm.l &0x7fff,SREGS(%a6)
mov.l &0x00000004,IREGS+0xc(%a6)
bsr.l chkregs
tst.b %d0
bne.l error
ea_73:
addq.l &0x1,TESTCTR(%a6)
bra.b ea_73_next
ea_73_mem:
long 0x00000002
ea_73_next:
movm.l DEF_REGS(%pc),&0x3fff
clr.l %d2
mov.l &0x00000002,%d3
lea EAMEM(%a6),%a0
mov.l &-0x8,%d4
mov.w &0x0000,ICCR(%a6)
mov.w &0x0000,%cc
movm.l &0x7fff,IREGS(%a6)
mulu.l (ea_73_mem+0x10.w,%pc,%d4.l*2),%d2:%d3
mov.w %cc,SCCR(%a6)
movm.l &0x7fff,SREGS(%a6)
mov.l &0x00000004,IREGS+0xc(%a6)
bsr.l chkregs
tst.b %d0
bne.l error
ea_74:
addq.l &0x1,TESTCTR(%a6)
bra.b ea_74_next
ea_74_mem:
long 0x00000002
ea_74_next:
movm.l DEF_REGS(%pc),&0x3fff
clr.l %d2
mov.l &0x00000002,%d3
lea EAMEM(%a6),%a0
mov.l &-0x4,%d4
mov.w &0x0000,ICCR(%a6)
mov.w &0x0000,%cc
movm.l &0x7fff,IREGS(%a6)
mulu.l (ea_74_mem+0x10.w,%pc,%d4.l*4),%d2:%d3
mov.w %cc,SCCR(%a6)
movm.l &0x7fff,SREGS(%a6)
mov.l &0x00000004,IREGS+0xc(%a6)
bsr.l chkregs
tst.b %d0
bne.l error
ea_75:
addq.l &0x1,TESTCTR(%a6)
bra.b ea_75_next
ea_75_mem:
long 0x00000002
ea_75_next:
movm.l DEF_REGS(%pc),&0x3fff
clr.l %d2
mov.l &0x00000002,%d3
lea EAMEM(%a6),%a0
mov.l &-0x2,%d4
mov.w &0x0000,ICCR(%a6)
mov.w &0x0000,%cc
movm.l &0x7fff,IREGS(%a6)
mulu.l (ea_75_mem+0x10.w,%pc,%d4.l*8),%d2:%d3
mov.w %cc,SCCR(%a6)
movm.l &0x7fff,SREGS(%a6)
mov.l &0x00000004,IREGS+0xc(%a6)
bsr.l chkregs
tst.b %d0
bne.l error
ea_76:
addq.l &0x1,TESTCTR(%a6)
bra.b ea_76_next
ea_76_mem:
long 0x00000002
ea_76_next:
movm.l DEF_REGS(%pc),&0x3fff
clr.l %d2
mov.l &0x00000002,%d3
lea EAMEM(%a6),%a3
mov.l &-0x2,%a4
mov.w &0x0000,ICCR(%a6)
mov.w &0x0000,%cc
movm.l &0xffff,IREGS(%a6)
mulu.l (ea_76_mem+0x10.w,%pc,%a4.l*8),%d2:%d3
mov.w %cc,SCCR(%a6)
movm.l &0xffff,SREGS(%a6)
mov.l &0x00000004,IREGS+0xc(%a6)
bsr.l chkregs
tst.b %d0
bne.l error
ea_77:
addq.l &0x1,TESTCTR(%a6)
bra.b ea_77_next
ea_77_mem:
long 0x00000002
ea_77_next:
movm.l DEF_REGS(%pc),&0x3fff
clr.l %d2
mov.l &0x00000002,%d3
lea EAMEM(%a6),%a3
mov.l &0x2,%a4
mov.w &0x0000,ICCR(%a6)
mov.w &0x0000,%cc
movm.l &0xffff,IREGS(%a6)
mulu.l (ea_77_mem+0x00.w,%pc,%za4.l*8),%d2:%d3
mov.w %cc,SCCR(%a6)
movm.l &0xffff,SREGS(%a6)
mov.l &0x00000004,IREGS+0xc(%a6)
bsr.l chkregs
tst.b %d0
bne.l error
ea_78:
addq.l &0x1,TESTCTR(%a6)
# movm.l DEF_REGS(%pc),&0x3fff
# clr.l %d2
# mov.l &0x00000002,%d3
# lea EAMEM,%a3
# mov.l %a3,%a4
# add.l &0x10,%a4
# mov.w &0x0000,ICCR(%a6)
# mov.w &0x0000,%cc
# movm.l &0xffff,IREGS(%a6)
# mulu.l (EAMEM-0x10.w,%zpc,%a4.l*1),%d2:%d3
# mov.w %cc,SCCR(%a6)
# movm.l &0xffff,SREGS(%a6)
# mov.l &0x00000004,IREGS+0xc(%a6)
# bsr.l chkregs
# tst.b %d0
# bne.l error
ea_79:
addq.l &0x1,TESTCTR(%a6)
movm.l DEF_REGS(%pc),&0x3fff
clr.l %d2
mov.l &0x00000002,%d3
lea EAMEM,%a3
mov.l &0x2,%a4
mov.w &0x0000,ICCR(%a6)
mov.w &0x0000,%cc
movm.l &0xffff,IREGS(%a6)
mulu.l (ea_79_mem-0x10.l,%pc,%a4.l*8),%d2:%d3
mov.w %cc,SCCR(%a6)
movm.l &0xffff,SREGS(%a6)
mov.l &0x00000004,IREGS+0xc(%a6)
bra.b ea_79_next
ea_79_mem:
long 0x00000002
ea_79_next:
bsr.l chkregs
tst.b %d0
bne.l error
ea_80:
addq.l &0x1,TESTCTR(%a6)
bra.b ea_80_next
ea_80_mem:
long 0x00000002
ea_80_next:
movm.l DEF_REGS(%pc),&0x3fff
clr.l %d2
mov.l &0x00000002,%d3
lea EAMEM(%a6),%a1
mov.l &-0x10,%d4
mov.w &0x0000,ICCR(%a6)
mov.w &0x0000,%cc
movm.l &0xffff,IREGS(%a6)
mulu.l (ea_80_mem+0x10.b,%pc,%d4.w*1),%d2:%d3
mov.w %cc,SCCR(%a6)
movm.l &0xffff,SREGS(%a6)
mov.l &0x00000004,IREGS+0xc(%a6)
bsr.l chkregs
tst.b %d0
bne.l error
ea_81:
addq.l &0x1,TESTCTR(%a6)
bra.b ea_81_next
ea_81_mem:
long 0x00000002
ea_81_next:
movm.l DEF_REGS(%pc),&0x3fff
clr.l %d2
mov.l &0x00000002,%d3
lea EAMEM(%a6),%a0
mov.l &-0x8,%d4
mov.w &0x0000,ICCR(%a6)
mov.w &0x0000,%cc
movm.l &0xffff,IREGS(%a6)
mulu.l (ea_81_mem+0x10.b,%pc,%d4.w*2),%d2:%d3
mov.w %cc,SCCR(%a6)
movm.l &0xffff,SREGS(%a6)
mov.l &0x00000004,IREGS+0xc(%a6)
bsr.l chkregs
tst.b %d0
bne.l error
ea_82:
addq.l &0x1,TESTCTR(%a6)
bra.b ea_82_next
ea_82_mem:
long 0x00000002
ea_82_next:
movm.l DEF_REGS(%pc),&0x3fff
clr.l %d2
mov.l &0x00000002,%d3
lea EAMEM(%a6),%a0
mov.l &-0x4,%d4
mov.w &0x0000,ICCR(%a6)
mov.w &0x0000,%cc
movm.l &0xffff,IREGS(%a6)
mulu.l (ea_82_mem+0x10.b,%pc,%d4.w*4),%d2:%d3
mov.w %cc,SCCR(%a6)
movm.l &0xffff,SREGS(%a6)
mov.l &0x00000004,IREGS+0xc(%a6)
bsr.l chkregs
tst.b %d0
bne.l error
ea_83:
addq.l &0x1,TESTCTR(%a6)
bra.b ea_83_next
ea_83_mem:
long 0x00000002
ea_83_next:
movm.l DEF_REGS(%pc),&0x3fff
clr.l %d2
mov.l &0x00000002,%d3
lea EAMEM(%a6),%a0
mov.l &-0x2,%d4
mov.w &0x0000,ICCR(%a6)
mov.w &0x0000,%cc
movm.l &0xffff,IREGS(%a6)
mulu.l (ea_83_mem+0x10.b,%pc,%d4.w*8),%d2:%d3
mov.w %cc,SCCR(%a6)
movm.l &0xffff,SREGS(%a6)
mov.l &0x00000004,IREGS+0xc(%a6)
bsr.l chkregs
tst.b %d0
bne.l error
ea_84:
addq.l &0x1,TESTCTR(%a6)
bra.b ea_84_next
ea_84_mem:
long 0x00000002
ea_84_next:
movm.l DEF_REGS(%pc),&0x3fff
clr.l %d2
mov.l &0x00000002,%d3
lea EAMEM(%a6),%a0
mov.l &-0x10,%d4
mov.w &0x0000,ICCR(%a6)
mov.w &0x0000,%cc
movm.l &0xffff,IREGS(%a6)
mulu.l (ea_84_mem+0x10.b,%pc,%d4.l*1),%d2:%d3
mov.w %cc,SCCR(%a6)
movm.l &0xffff,SREGS(%a6)
mov.l &0x00000004,IREGS+0xc(%a6)
bsr.l chkregs
tst.b %d0
bne.l error
ea_85:
addq.l &0x1,TESTCTR(%a6)
bra.b ea_85_next
ea_85_mem:
long 0x00000002
ea_85_next:
movm.l DEF_REGS(%pc),&0x3fff
clr.l %d2
mov.l &0x00000002,%d3
lea EAMEM(%a6),%a0
mov.l &-0x8,%d4
mov.w &0x0000,ICCR(%a6)
mov.w &0x0000,%cc
movm.l &0xffff,IREGS(%a6)
mulu.l (ea_85_mem+0x10.b,%pc,%d4.l*2),%d2:%d3
mov.w %cc,SCCR(%a6)
movm.l &0xffff,SREGS(%a6)
mov.l &0x00000004,IREGS+0xc(%a6)
bsr.l chkregs
tst.b %d0
bne.l error
ea_86:
addq.l &0x1,TESTCTR(%a6)
bra.b ea_86_next
ea_86_mem:
long 0x00000002
ea_86_next:
movm.l DEF_REGS(%pc),&0x3fff
clr.l %d2
mov.l &0x00000002,%d3
lea EAMEM(%a6),%a0
mov.l &-0x4,%d4
mov.w &0x0000,ICCR(%a6)
mov.w &0x0000,%cc
movm.l &0xffff,IREGS(%a6)
mulu.l (ea_86_mem+0x10.b,%pc,%d4.l*4),%d2:%d3
mov.w %cc,SCCR(%a6)
movm.l &0xffff,SREGS(%a6)
mov.l &0x00000004,IREGS+0xc(%a6)
bsr.l chkregs
tst.b %d0
bne.l error
ea_87:
addq.l &0x1,TESTCTR(%a6)
bra.b ea_87_next
ea_87_mem:
long 0x00000002
ea_87_next:
movm.l DEF_REGS(%pc),&0x3fff
clr.l %d2
mov.l &0x00000002,%d3
lea EAMEM(%a6),%a0
mov.l &-0x2,%d4
mov.w &0x0000,ICCR(%a6)
mov.w &0x0000,%cc
movm.l &0xffff,IREGS(%a6)
mulu.l (ea_87_mem+0x10.b,%pc,%d4.l*8),%d2:%d3
mov.w %cc,SCCR(%a6)
movm.l &0xffff,SREGS(%a6)
mov.l &0x00000004,IREGS+0xc(%a6)
bsr.l chkregs
tst.b %d0
bne.l error
ea_88:
addq.l &0x1,TESTCTR(%a6)
movm.l DEF_REGS(%pc),&0x3fff
clr.l %d2
mov.l &0x00000002,%d3
lea EAMEM(%a6),%a0
mov.l &-0x2,%d4
mov.w &0x0000,ICCR(%a6)
mov.w &0x0000,%cc
movm.l &0xffff,IREGS(%a6)
mulu.l (ea_88_mem+0x10.b,%pc,%d4.l*8),%d2:%d3
mov.w %cc,SCCR(%a6)
movm.l &0xffff,SREGS(%a6)
mov.l &0x00000004,IREGS+0xc(%a6)
bra.b ea_88_next
ea_88_mem:
long 0x00000002
ea_88_next:
bsr.l chkregs
tst.b %d0
bne.l error
ea_89:
addq.l &0x1,TESTCTR(%a6)
movm.l DEF_REGS(%pc),&0x3fff
clr.l %d2
mov.l &0x00000002,%d3
lea EAMEM-0x1000(%a6),%a3
lea EASTORE(%a6),%a4
mov.l %a3,(%a4)
mov.l &-0x10,%d4
mov.w &0x0000,ICCR(%a6)
mov.w &0x0000,%cc
movm.l &0xffff,IREGS(%a6)
mulu.l ([0x10.w,%a4,%d4.w*1],0x1000.w),%d2:%d3
mov.w %cc,SCCR(%a6)
movm.l &0xffff,SREGS(%a6)
mov.l &0x00000004,IREGS+0xc(%a6)
bsr.l chkregs
tst.b %d0
bne.l error
ea_90:
addq.l &0x1,TESTCTR(%a6)
movm.l DEF_REGS(%pc),&0x3fff
clr.l %d2
mov.l &0x00000002,%d3
lea EAMEM-0x1000(%a6),%a3
lea EASTORE(%a6),%a4
mov.l %a3,(%a4)
mov.l &-0x8,%d4
mov.w &0x0000,ICCR(%a6)
mov.w &0x0000,%cc
movm.l &0xffff,IREGS(%a6)
mulu.l ([0x10.w,%a4,%d4.w*2],0x1000.w),%d2:%d3
mov.w %cc,SCCR(%a6)
movm.l &0xffff,SREGS(%a6)
mov.l &0x00000004,IREGS+0xc(%a6)
bsr.l chkregs
tst.b %d0
bne.l error
ea_91:
addq.l &0x1,TESTCTR(%a6)
movm.l DEF_REGS(%pc),&0x3fff
clr.l %d2
mov.l &0x00000002,%d3
lea EAMEM-0x1000(%a6),%a3
lea EASTORE(%a6),%a4
mov.l %a3,(%a4)
mov.l &-0x4,%d4
mov.w &0x0000,ICCR(%a6)
mov.w &0x0000,%cc
movm.l &0xffff,IREGS(%a6)
mulu.l ([0x10.w,%a4,%d4.w*4],0x1000.w),%d2:%d3
mov.w %cc,SCCR(%a6)
movm.l &0xffff,SREGS(%a6)
mov.l &0x00000004,IREGS+0xc(%a6)
bsr.l chkregs
tst.b %d0
bne.l error
ea_92:
addq.l &0x1,TESTCTR(%a6)
movm.l DEF_REGS(%pc),&0x3fff
clr.l %d2
mov.l &0x00000002,%d3
lea EAMEM-0x1000(%a6),%a3
lea EASTORE(%a6),%a4
mov.l %a3,(%a4)
mov.l &-0x2,%d4
mov.w &0x0000,ICCR(%a6)
mov.w &0x0000,%cc
movm.l &0xffff,IREGS(%a6)
mulu.l ([0x10.w,%a4,%d4.w*8],0x1000.w),%d2:%d3
mov.w %cc,SCCR(%a6)
movm.l &0xffff,SREGS(%a6)
mov.l &0x00000004,IREGS+0xc(%a6)
bsr.l chkregs
tst.b %d0
bne.l error
ea_93:
addq.l &0x1,TESTCTR(%a6)
movm.l DEF_REGS(%pc),&0x3fff
clr.l %d2
mov.l &0x00000002,%d3
lea EAMEM-0x1000(%a6),%a3
lea EASTORE(%a6),%a4
mov.l %a3,(%a4)
mov.l &-0x10,%d4
mov.w &0x0000,ICCR(%a6)
mov.w &0x0000,%cc
movm.l &0xffff,IREGS(%a6)
mulu.l ([0x10.w,%a4,%d4.l*1],0x1000.w),%d2:%d3
mov.w %cc,SCCR(%a6)
movm.l &0xffff,SREGS(%a6)
mov.l &0x00000004,IREGS+0xc(%a6)
bsr.l chkregs
tst.b %d0
bne.l error
ea_94:
addq.l &0x1,TESTCTR(%a6)
movm.l DEF_REGS(%pc),&0x3fff
clr.l %d2
mov.l &0x00000002,%d3
lea EAMEM-0x1000(%a6),%a3
lea EASTORE(%a6),%a4
mov.l %a3,(%a4)
mov.l &-0x8,%d4
mov.w &0x0000,ICCR(%a6)
mov.w &0x0000,%cc
movm.l &0xffff,IREGS(%a6)
mulu.l ([0x10.w,%a4,%d4.l*2],0x1000.w),%d2:%d3
mov.w %cc,SCCR(%a6)
movm.l &0xffff,SREGS(%a6)
mov.l &0x00000004,IREGS+0xc(%a6)
bsr.l chkregs
tst.b %d0
bne.l error
ea_95:
addq.l &0x1,TESTCTR(%a6)
movm.l DEF_REGS(%pc),&0x3fff
clr.l %d2
mov.l &0x00000002,%d3
lea EAMEM-0x1000(%a6),%a3
lea EASTORE(%a6),%a4
mov.l %a3,(%a4)
mov.l &-0x4,%d4
mov.w &0x0000,ICCR(%a6)
mov.w &0x0000,%cc
movm.l &0xffff,IREGS(%a6)
mulu.l ([0x10.w,%a4,%d4.l*4],0x1000.w),%d2:%d3
mov.w %cc,SCCR(%a6)
movm.l &0xffff,SREGS(%a6)
mov.l &0x00000004,IREGS+0xc(%a6)
bsr.l chkregs
tst.b %d0
bne.l error
ea_96:
addq.l &0x1,TESTCTR(%a6)
movm.l DEF_REGS(%pc),&0x3fff
clr.l %d2
mov.l &0x00000002,%d3
lea EAMEM-0x1000(%a6),%a3
lea EASTORE(%a6),%a4
mov.l %a3,(%a4)
mov.l &-0x2,%d4
mov.w &0x0000,ICCR(%a6)
mov.w &0x0000,%cc
movm.l &0xffff,IREGS(%a6)
mulu.l ([0x10.w,%a4,%d4.l*8],0x1000.w),%d2:%d3
mov.w %cc,SCCR(%a6)
movm.l &0xffff,SREGS(%a6)
mov.l &0x00000004,IREGS+0xc(%a6)
bsr.l chkregs
tst.b %d0
bne.l error
ea_97:
addq.l &0x1,TESTCTR(%a6)
movm.l DEF_REGS(%pc),&0x3fff
clr.l %d2
mov.l &0x00000002,%d3
lea EAMEM-0x1000(%a6),%a3
lea EASTORE(%a6),%a4
mov.l %a3,(%a4)
mov.l &-0x2,%d4
mov.w &0x0000,ICCR(%a6)
mov.w &0x0000,%cc
movm.l &0xffff,IREGS(%a6)
mulu.l ([0x10.l,%a4,%d4.l*8],0x1000.l),%d2:%d3
mov.w %cc,SCCR(%a6)
movm.l &0xffff,SREGS(%a6)
mov.l &0x00000004,IREGS+0xc(%a6)
bsr.l chkregs
tst.b %d0
bne.l error
ea_98:
addq.l &0x1,TESTCTR(%a6)
movm.l DEF_REGS(%pc),&0x3fff
clr.l %d2
mov.l &0x00000002,%d3
lea EAMEM-0x1000(%a6),%a3
lea EASTORE(%a6),%a4
mov.l %a3,(%a4)
mov.l &-0x2,%d4
mov.w &0x0000,ICCR(%a6)
mov.w &0x0000,%cc
movm.l &0xffff,IREGS(%a6)
mulu.l ([0x00.l,%a4,%zd4.l*8],0x1000.l),%d2:%d3
mov.w %cc,SCCR(%a6)
movm.l &0xffff,SREGS(%a6)
mov.l &0x00000004,IREGS+0xc(%a6)
bsr.l chkregs
tst.b %d0
bne.l error
ea_99:
addq.l &0x1,TESTCTR(%a6)
movm.l DEF_REGS(%pc),&0x3fff
clr.l %d2
mov.l &0x00000002,%d3
lea EAMEM-0x1000(%a6),%a3
lea EASTORE(%a6),%a4
mov.l %a3,(%a4)
mov.l &-0x2,%d4
mov.w &0x0000,ICCR(%a6)
mov.w &0x0000,%cc
movm.l &0xffff,IREGS(%a6)
mulu.l ([%a4,%zd4.l*8],0x1000.l),%d2:%d3
mov.w %cc,SCCR(%a6)
movm.l &0xffff,SREGS(%a6)
mov.l &0x00000004,IREGS+0xc(%a6)
bsr.l chkregs
tst.b %d0
bne.l error
ea_100:
addq.l &0x1,TESTCTR(%a6)
movm.l DEF_REGS(%pc),&0x3fff
clr.l %d2
mov.l &0x00000002,%d3
lea EAMEM-0x1000(%a6),%a3
lea EASTORE(%a6),%a4
mov.l %a3,(%a4)
mov.l &-0x10,%d4
add.l %a4,%d4
mov.w &0x0000,ICCR(%a6)
mov.w &0x0000,%cc
movm.l &0xffff,IREGS(%a6)
mulu.l ([0x10.l,%za4,%d4.l*1],0x1000.l),%d2:%d3
mov.w %cc,SCCR(%a6)
movm.l &0xffff,SREGS(%a6)
mov.l &0x00000004,IREGS+0xc(%a6)
bsr.l chkregs
tst.b %d0
bne.l error
ea_101:
addq.l &0x1,TESTCTR(%a6)
# movm.l DEF_REGS(%pc),&0x3fff
# clr.l %d2
# mov.l &0x00000002,%d3
# lea EAMEM(%a6),%a3
# lea EASTORE(%a6),%a4
# mov.l %a3,(%a4)
# mov.l &-0x10,%d4
# mov.w &0x0000,ICCR(%a6)
# mov.w &0x0000,%cc
# movm.l &0xffff,IREGS(%a6)
# mulu.l ([EASTORE.l,%za4,%zd4.l*1]),%d2:%d3
# mov.w %cc,SCCR(%a6)
# movm.l &0xffff,SREGS(%a6)
# mov.l &0x00000004,IREGS+0xc(%a6)
# bsr.l chkregs
# tst.b %d0
# bne.l error
ea_102:
addq.l &0x1,TESTCTR(%a6)
movm.l DEF_REGS(%pc),&0x3fff
mov.l %a6,%a1
clr.l %d2
mov.l &0x00000002,%d3
lea EAMEM+0x1000(%a1),%a3
lea EASTORE(%a1),%a4
mov.l %a3,(%a4)
mov.l &-0x2,%a6
mov.w &0x0000,ICCR(%a1)
mov.w &0x0000,%cc
movm.l &0xffff,IREGS(%a1)
mulu.l ([0x10.w,%a4,%a6.l*8],-0x1000.w),%d2:%d3
mov.w %cc,SCCR(%a1)
movm.l &0xffff,SREGS(%a1)
mov.l &0x00000004,IREGS+0xc(%a1)
mov.l %a1,%a6
bsr.l chkregs
tst.b %d0
bne.l error
ea_103:
addq.l &0x1,TESTCTR(%a6)
movm.l DEF_REGS(%pc),&0x3fff
mov.l %a6,%a1
clr.l %d2
mov.l &0x00000002,%d3
lea EAMEM+0x1000(%a1),%a3
lea EASTORE(%a1),%a4
mov.l %a3,(%a4)
mov.l &0x2,%a6
mov.w &0x0000,ICCR(%a1)
mov.w &0x0000,%cc
movm.l &0xffff,IREGS(%a1)
mulu.l ([-0x10.w,%a4,%a6.l*8],-0x1000.w),%d2:%d3
mov.w %cc,SCCR(%a1)
movm.l &0xffff,SREGS(%a1)
mov.l &0x00000004,IREGS+0xc(%a1)
mov.l %a1,%a6
bsr.l chkregs
tst.b %d0
bne.l error
ea_104:
addq.l &0x1,TESTCTR(%a6)
movm.l DEF_REGS(%pc),&0x3fff
clr.l %d2
mov.l &0x00000002,%d3
lea EAMEM-0x20(%a6),%a3
lea EASTORE(%a6),%a4
mov.l %a3,(%a4)
sub.l &0x10,%a4
mov.l &0x10,%d4
mov.w &0x0000,ICCR(%a6)
mov.w &0x0000,%cc
movm.l &0xffff,IREGS(%a6)
mulu.l ([0x10.w,%a4],%d4.w*1,0x10.w),%d2:%d3
mov.w %cc,SCCR(%a6)
movm.l &0xffff,SREGS(%a6)
mov.l &0x00000004,IREGS+0xc(%a6)
bsr.l chkregs
tst.b %d0
bne.l error
ea_105:
addq.l &0x1,TESTCTR(%a6)
movm.l DEF_REGS(%pc),&0x3fff
clr.l %d2
mov.l &0x00000002,%d3
lea EAMEM-0x20(%a6),%a3
lea EASTORE(%a6),%a4
mov.l %a3,(%a4)
sub.l &0x10,%a4
mov.l &0x8,%d4
mov.w &0x0000,ICCR(%a6)
mov.w &0x0000,%cc
movm.l &0xffff,IREGS(%a6)
mulu.l ([0x10.w,%a4],%d4.w*2,0x10.w),%d2:%d3
mov.w %cc,SCCR(%a6)
movm.l &0xffff,SREGS(%a6)
mov.l &0x00000004,IREGS+0xc(%a6)
bsr.l chkregs
tst.b %d0
bne.l error
ea_106:
addq.l &0x1,TESTCTR(%a6)
movm.l DEF_REGS(%pc),&0x3fff
clr.l %d2
mov.l &0x00000002,%d3
lea EAMEM-0x20(%a6),%a3
lea EASTORE(%a6),%a4
mov.l %a3,(%a4)
sub.l &0x10,%a4
mov.l &0x4,%d4
mov.w &0x0000,ICCR(%a6)
mov.w &0x0000,%cc
movm.l &0xffff,IREGS(%a6)
mulu.l ([0x10.w,%a4],%d4.w*4,0x10.w),%d2:%d3
mov.w %cc,SCCR(%a6)
movm.l &0xffff,SREGS(%a6)
mov.l &0x00000004,IREGS+0xc(%a6)
bsr.l chkregs
tst.b %d0
bne.l error
ea_107:
addq.l &0x1,TESTCTR(%a6)
movm.l DEF_REGS(%pc),&0x3fff
clr.l %d2
mov.l &0x00000002,%d3
lea EAMEM-0x20(%a6),%a3
lea EASTORE(%a6),%a4
mov.l %a3,(%a4)
sub.l &0x10,%a4
mov.l &0x2,%d4
mov.w &0x0000,ICCR(%a6)
mov.w &0x0000,%cc
movm.l &0xffff,IREGS(%a6)
mulu.l ([0x10.w,%a4],%d4.w*8,0x10.w),%d2:%d3
mov.w %cc,SCCR(%a6)
movm.l &0xffff,SREGS(%a6)
mov.l &0x00000004,IREGS+0xc(%a6)
bsr.l chkregs
tst.b %d0
bne.l error
ea_108:
addq.l &0x1,TESTCTR(%a6)
movm.l DEF_REGS(%pc),&0x3fff
clr.l %d2
mov.l &0x00000002,%d3
lea EAMEM-0x20(%a6),%a3
lea EASTORE(%a6),%a4
mov.l %a3,(%a4)
sub.l &0x10,%a4
mov.l &0x10,%d4
mov.w &0x0000,ICCR(%a6)
mov.w &0x0000,%cc
movm.l &0xffff,IREGS(%a6)
mulu.l ([0x10.w,%a4],%d4.l*1,0x10.w),%d2:%d3
mov.w %cc,SCCR(%a6)
movm.l &0xffff,SREGS(%a6)
mov.l &0x00000004,IREGS+0xc(%a6)
bsr.l chkregs
tst.b %d0
bne.l error
ea_109:
addq.l &0x1,TESTCTR(%a6)
movm.l DEF_REGS(%pc),&0x3fff
clr.l %d2
mov.l &0x00000002,%d3
lea EAMEM-0x20(%a6),%a3
lea EASTORE(%a6),%a4
mov.l %a3,(%a4)
sub.l &0x10,%a4
mov.l &0x8,%d4
mov.w &0x0000,ICCR(%a6)
mov.w &0x0000,%cc
movm.l &0xffff,IREGS(%a6)
mulu.l ([0x10.w,%a4],%d4.w*2,0x10.w),%d2:%d3
mov.w %cc,SCCR(%a6)
movm.l &0xffff,SREGS(%a6)
mov.l &0x00000004,IREGS+0xc(%a6)
bsr.l chkregs
tst.b %d0
bne.l error
ea_110:
addq.l &0x1,TESTCTR(%a6)
movm.l DEF_REGS(%pc),&0x3fff
clr.l %d2
mov.l &0x00000002,%d3
lea EAMEM-0x20(%a6),%a3
lea EASTORE(%a6),%a4
mov.l %a3,(%a4)
sub.l &0x10,%a4
mov.l &0x4,%d4
mov.w &0x0000,ICCR(%a6)
mov.w &0x0000,%cc
movm.l &0xffff,IREGS(%a6)
mulu.l ([0x10.w,%a4],%d4.l*4,0x10.w),%d2:%d3
mov.w %cc,SCCR(%a6)
movm.l &0xffff,SREGS(%a6)
mov.l &0x00000004,IREGS+0xc(%a6)
bsr.l chkregs
tst.b %d0
bne.l error
ea_111:
addq.l &0x1,TESTCTR(%a6)
movm.l DEF_REGS(%pc),&0x3fff
clr.l %d2
mov.l &0x00000002,%d3
lea EAMEM-0x20(%a6),%a3
lea EASTORE(%a6),%a4
mov.l %a3,(%a4)
sub.l &0x10,%a4
mov.l &0x2,%d4
mov.w &0x0000,ICCR(%a6)
mov.w &0x0000,%cc
movm.l &0xffff,IREGS(%a6)
mulu.l ([0x10.w,%a4],%d4.l*8,0x10.w),%d2:%d3
mov.w %cc,SCCR(%a6)
movm.l &0xffff,SREGS(%a6)
mov.l &0x00000004,IREGS+0xc(%a6)
bsr.l chkregs
tst.b %d0
bne.l error
ea_112:
addq.l &0x1,TESTCTR(%a6)
movm.l DEF_REGS(%pc),&0x3fff
clr.l %d2
mov.l &0x00000002,%d3
lea EAMEM-0x20(%a6),%a3
lea EASTORE(%a6),%a4
mov.l %a3,(%a4)
sub.l &0x10,%a4
mov.l &0x2,%d4
mov.w &0x0000,ICCR(%a6)
mov.w &0x0000,%cc
movm.l &0xffff,IREGS(%a6)
mulu.l ([0x10.l,%a4],%d4.l*8,0x10.l),%d2:%d3
mov.w %cc,SCCR(%a6)
movm.l &0xffff,SREGS(%a6)
mov.l &0x00000004,IREGS+0xc(%a6)
bsr.l chkregs
tst.b %d0
bne.l error
ea_113:
addq.l &0x1,TESTCTR(%a6)
movm.l DEF_REGS(%pc),&0x3fff
clr.l %d2
mov.l &0x00000002,%d3
lea EAMEM-0x20(%a6),%a3
lea EASTORE(%a6),%a4
mov.l %a3,(%a4)
mov.l &0x2,%d4
mov.w &0x0000,ICCR(%a6)
mov.w &0x0000,%cc
movm.l &0xffff,IREGS(%a6)
mulu.l ([0x00.l,%a4],%zd4.l*8,0x20.l),%d2:%d3
mov.w %cc,SCCR(%a6)
movm.l &0xffff,SREGS(%a6)
mov.l &0x00000004,IREGS+0xc(%a6)
bsr.l chkregs
tst.b %d0
bne.l error
ea_114:
addq.l &0x1,TESTCTR(%a6)
movm.l DEF_REGS(%pc),&0x3fff
mov.l %a7,%a0
clr.l %d2
mov.l &0x00000002,%d3
lea EAMEM-0x20(%a6),%a3
lea EASTORE(%a6),%a7
mov.l %a3,(%a7)
mov.l &0x20,%d4
mov.w &0x0000,ICCR(%a6)
mov.w &0x0000,%cc
movm.l &0xffff,IREGS(%a6)
mulu.l ([%a7],%d4.l*1),%d2:%d3
mov.w %cc,SCCR(%a6)
movm.l &0xffff,SREGS(%a6)
mov.l &0x00000004,IREGS+0xc(%a6)
mov.l %a0,%a7
bsr.l chkregs
tst.b %d0
bne.l error
ea_115:
addq.l &0x1,TESTCTR(%a6)
# movm.l DEF_REGS(%pc),&0x3fff
# clr.l %d2
# mov.l &0x00000002,%d3
# lea EAMEM-0x20(%pc),%a3
# lea EASTORE(%pc),%a4
# mov.l %a3,(%a4)
# mov.l &0x2,%d4
# mov.w &0x0000,ICCR(%a6)
# mov.w &0x0000,%cc
# movm.l &0xffff,IREGS(%a6)
# mulu.l ([EASTORE.l,%za4],%zd4.l*8,0x20.l),%d2:%d3
# mov.w %cc,SCCR(%a6)
# movm.l &0xffff,SREGS(%a6)
# mov.l &0x00000004,IREGS+0xc(%a6)
# bsr.l chkregs
# tst.b %d0
# bne.l error
ea_116:
addq.l &0x1,TESTCTR(%a6)
movm.l DEF_REGS(%pc),&0x3fff
mov.l %a6,%a1
clr.l %d2
mov.l &0x00000002,%d3
lea EAMEM(%a1),%a3
lea EASTORE(%a1),%a6
mov.l %a3,(%a6)
add.l &0x10,%a6
mov.l &-0x2,%a5
mov.w &0x0000,ICCR(%a1)
mov.w &0x0000,%cc
movm.l &0xffff,IREGS(%a1)
mulu.l ([-0x10.w,%a6],%a5.l*8,0x10.l),%d2:%d3
mov.w %cc,SCCR(%a1)
movm.l &0xffff,SREGS(%a1)
mov.l &0x00000004,IREGS+0xc(%a1)
mov.l %a1,%a6
bsr.l chkregs
tst.b %d0
bne.l error
mov.l TESTCTR(%a6),%d1
clr.l %d0
rts
ea_117:
addq.l &0x1,TESTCTR(%a6)
movm.l DEF_REGS(%pc),&0x3fff
clr.l %d2
mov.l &0x00000002,%d3
lea EAMEM-0x1000(%pc),%a3
lea EASTORE(%pc),%a4
mov.l %a3,(%a4)
mov.l &-0x10,%d4
mov.w &0x0000,ICCR(%a6)
mov.w &0x0000,%cc
movm.l &0xffff,IREGS(%a6)
mulu.l ([EASTORE+0x10.w,%pc,%d4.w*1],0x1000.w),%d2:%d3
mov.w %cc,SCCR(%a6)
movm.l &0xffff,SREGS(%a6)
mov.l &0x00000004,IREGS+0xc(%a6)
bsr.l chkregs
tst.b %d0
bne.l error
ea_118:
addq.l &0x1,TESTCTR(%a6)
movm.l DEF_REGS(%pc),&0x3fff
clr.l %d2
mov.l &0x00000002,%d3
lea EAMEM-0x1000(%pc),%a3
lea EASTORE(%pc),%a4
mov.l %a3,(%a4)
mov.l &-0x8,%d4
mov.w &0x0000,ICCR(%a6)
mov.w &0x0000,%cc
movm.l &0xffff,IREGS(%a6)
mulu.l ([EASTORE+0x10.w,%pc,%d4.w*2],0x1000.w),%d2:%d3
mov.w %cc,SCCR(%a6)
movm.l &0xffff,SREGS(%a6)
mov.l &0x00000004,IREGS+0xc(%a6)
bsr.l chkregs
tst.b %d0
bne.l error
ea_119:
addq.l &0x1,TESTCTR(%a6)
movm.l DEF_REGS(%pc),&0x3fff
clr.l %d2
mov.l &0x00000002,%d3
lea EAMEM-0x1000(%pc),%a3
lea EASTORE(%pc),%a4
mov.l %a3,(%a4)
mov.l &-0x4,%d4
mov.w &0x0000,ICCR(%a6)
mov.w &0x0000,%cc
movm.l &0xffff,IREGS(%a6)
mulu.l ([EASTORE+0x10.w,%pc,%d4.w*4],0x1000.w),%d2:%d3
mov.w %cc,SCCR(%a6)
movm.l &0xffff,SREGS(%a6)
mov.l &0x00000004,IREGS+0xc(%a6)
bsr.l chkregs
tst.b %d0
bne.l error
ea_120:
addq.l &0x1,TESTCTR(%a6)
movm.l DEF_REGS(%pc),&0x3fff
clr.l %d2
mov.l &0x00000002,%d3
lea EAMEM-0x1000(%pc),%a3
lea EASTORE(%pc),%a4
mov.l %a3,(%a4)
mov.l &-0x2,%d4
mov.w &0x0000,ICCR(%a6)
mov.w &0x0000,%cc
movm.l &0xffff,IREGS(%a6)
mulu.l ([EASTORE+0x10.w,%pc,%d4.w*8],0x1000.w),%d2:%d3
mov.w %cc,SCCR(%a6)
movm.l &0xffff,SREGS(%a6)
mov.l &0x00000004,IREGS+0xc(%a6)
bsr.l chkregs
tst.b %d0
bne.l error
ea_121:
addq.l &0x1,TESTCTR(%a6)
movm.l DEF_REGS(%pc),&0x3fff
clr.l %d2
mov.l &0x00000002,%d3
lea EAMEM-0x1000(%pc),%a3
lea EASTORE(%pc),%a4
mov.l %a3,(%a4)
mov.l &-0x10,%d4
mov.w &0x0000,ICCR(%a6)
mov.w &0x0000,%cc
movm.l &0xffff,IREGS(%a6)
mulu.l ([EASTORE+0x10.w,%pc,%d4.l*1],0x1000.w),%d2:%d3
mov.w %cc,SCCR(%a6)
movm.l &0xffff,SREGS(%a6)
mov.l &0x00000004,IREGS+0xc(%a6)
bsr.l chkregs
tst.b %d0
bne.l error
ea_122:
addq.l &0x1,TESTCTR(%a6)
movm.l DEF_REGS(%pc),&0x3fff
clr.l %d2
mov.l &0x00000002,%d3
lea EAMEM-0x1000(%pc),%a3
lea EASTORE(%pc),%a4
mov.l %a3,(%a4)
mov.l &-0x8,%d4
mov.w &0x0000,ICCR(%a6)
mov.w &0x0000,%cc
movm.l &0xffff,IREGS(%a6)
mulu.l ([EASTORE+0x10.w,%pc,%d4.l*2],0x1000.w),%d2:%d3
mov.w %cc,SCCR(%a6)
movm.l &0xffff,SREGS(%a6)
mov.l &0x00000004,IREGS+0xc(%a6)
bsr.l chkregs
tst.b %d0
bne.l error
ea_123:
addq.l &0x1,TESTCTR(%a6)
movm.l DEF_REGS(%pc),&0x3fff
clr.l %d2
mov.l &0x00000002,%d3
lea EAMEM-0x1000(%pc),%a3
lea EASTORE(%pc),%a4
mov.l %a3,(%a4)
mov.l &-0x4,%d4
mov.w &0x0000,ICCR(%a6)
mov.w &0x0000,%cc
movm.l &0xffff,IREGS(%a6)
mulu.l ([EASTORE+0x10.w,%pc,%d4.l*4],0x1000.w),%d2:%d3
mov.w %cc,SCCR(%a6)
movm.l &0xffff,SREGS(%a6)
mov.l &0x00000004,IREGS+0xc(%a6)
bsr.l chkregs
tst.b %d0
bne.l error
ea_124:
addq.l &0x1,TESTCTR(%a6)
movm.l DEF_REGS(%pc),&0x3fff
clr.l %d2
mov.l &0x00000002,%d3
lea EAMEM-0x1000(%pc),%a3
lea EASTORE(%pc),%a4
mov.l %a3,(%a4)
mov.l &-0x2,%d4
mov.w &0x0000,ICCR(%a6)
mov.w &0x0000,%cc
movm.l &0xffff,IREGS(%a6)
mulu.l ([EASTORE+0x10.w,%pc,%d4.l*8],0x1000.w),%d2:%d3
mov.w %cc,SCCR(%a6)
movm.l &0xffff,SREGS(%a6)
mov.l &0x00000004,IREGS+0xc(%a6)
bsr.l chkregs
tst.b %d0
bne.l error
ea_125:
addq.l &0x1,TESTCTR(%a6)
movm.l DEF_REGS(%pc),&0x3fff
clr.l %d2
mov.l &0x00000002,%d3
lea EAMEM-0x1000(%pc),%a3
lea EASTORE(%pc),%a4
mov.l %a3,(%a4)
mov.l &-0x2,%d4
mov.w &0x0000,ICCR(%a6)
mov.w &0x0000,%cc
movm.l &0xffff,IREGS(%a6)
mulu.l ([EASTORE+0x10.l,%pc,%d4.l*8],0x1000.l),%d2:%d3
mov.w %cc,SCCR(%a6)
movm.l &0xffff,SREGS(%a6)
mov.l &0x00000004,IREGS+0xc(%a6)
bsr.l chkregs
tst.b %d0
bne.l error
ea_126:
addq.l &0x1,TESTCTR(%a6)
movm.l DEF_REGS(%pc),&0x3fff
clr.l %d2
mov.l &0x00000002,%d3
lea EAMEM-0x1000(%pc),%a3
lea EASTORE(%pc),%a4
mov.l %a3,(%a4)
mov.l &-0x2,%d4
mov.w &0x0000,ICCR(%a6)
mov.w &0x0000,%cc
movm.l &0xffff,IREGS(%a6)
mulu.l ([EASTORE+0x00.l,%pc,%zd4.l*8],0x1000.l),%d2:%d3
mov.w %cc,SCCR(%a6)
movm.l &0xffff,SREGS(%a6)
mov.l &0x00000004,IREGS+0xc(%a6)
bsr.l chkregs
tst.b %d0
bne.l error
ea_127:
addq.l &0x1,TESTCTR(%a6)
movm.l DEF_REGS(%pc),&0x3fff
clr.l %d2
mov.l &0x00000002,%d3
lea EAMEM-0x1000(%pc),%a3
lea EASTORE(%pc),%a4
mov.l %a3,(%a4)
mov.l %a4,%d4
mov.w &0x0000,ICCR(%a6)
mov.w &0x0000,%cc
movm.l &0xffff,IREGS(%a6)
mulu.l ([%zpc,%d4.l*1],0x1000.l),%d2:%d3
mov.w %cc,SCCR(%a6)
movm.l &0xffff,SREGS(%a6)
mov.l &0x00000004,IREGS+0xc(%a6)
bsr.l chkregs
tst.b %d0
bne.l error
ea_128:
addq.l &0x1,TESTCTR(%a6)
movm.l DEF_REGS(%pc),&0x3fff
clr.l %d2
mov.l &0x00000002,%d3
lea EAMEM-0x1000(%pc),%a3
lea EASTORE(%pc),%a4
mov.l %a3,(%a4)
mov.l &-0x10,%d4
add.l %a4,%d4
mov.w &0x0000,ICCR(%a6)
mov.w &0x0000,%cc
movm.l &0xffff,IREGS(%a6)
mulu.l ([0x10.l,%zpc,%d4.l*1],0x1000.l),%d2:%d3
mov.w %cc,SCCR(%a6)
movm.l &0xffff,SREGS(%a6)
mov.l &0x00000004,IREGS+0xc(%a6)
bsr.l chkregs
tst.b %d0
bne.l error
ea_129:
addq.l &0x1,TESTCTR(%a6)
clr.l %d2
mov.l &0x00000002,%d3
lea EAMEM(%pc),%a3
lea EASTORE(%pc),%a4
mov.l %a3,(%a4)
mov.l &-0x10,%d4
mov.w &0x0000,ICCR(%a6)
mov.w &0x0000,%cc
movm.l &0xffff,IREGS(%a6)
mulu.l ([EASTORE.l,%zpc,%zd4.l*1]),%d2:%d3
mov.w %cc,SCCR(%a6)
movm.l &0xffff,SREGS(%a6)
mov.l &0x00000004,IREGS+0xc(%a6)
bsr.l chkregs
tst.b %d0
bne.l error
ea_130:
addq.l &0x1,TESTCTR(%a6)
movm.l DEF_REGS(%pc),&0x3fff
clr.l %d2
mov.l &0x00000002,%d3
lea EAMEM+0x1000(%pc),%a3
lea EASTORE(%pc),%a4
mov.l %a3,(%a4)
mov.l &0x2,%a6
mov.w &0x0000,ICCR(%a6)
mov.w &0x0000,%cc
movm.l &0xffff,IREGS(%a6)
mulu.l ([EASTORE-0x10.w,%pc,%a6.l*8],-0x1000.w),%d2:%d3
mov.w %cc,SCCR(%a6)
movm.l &0xffff,SREGS(%a6)
mov.l &0x00000004,IREGS+0xc(%a6)
bsr.l chkregs
tst.b %d0
bne.l error
ea_131:
addq.l &0x1,TESTCTR(%a6)
movm.l DEF_REGS(%pc),&0x3fff
mov.l %a7,%a0
clr.l %d2
mov.l &0x00000002,%d3
lea EAMEM+0x1000(%pc),%a3
lea EASTORE(%pc),%a4
mov.l %a3,(%a4)
mov.l &0x2,%a7
mov.w &0x0000,ICCR(%a6)
mov.w &0x0000,%cc
movm.l &0xffff,IREGS(%a6)
mulu.l ([EASTORE-0x10.w,%pc,%a7.l*8],-0x1000.w),%d2:%d3
mov.w %cc,SCCR(%a6)
movm.l &0xffff,SREGS(%a6)
mov.l &0x00000004,IREGS+0xc(%a6)
mov.l %a0,%a7
bsr.l chkregs
tst.b %d0
bne.l error
ea_132:
addq.l &0x1,TESTCTR(%a6)
movm.l DEF_REGS(%pc),&0x3fff
clr.l %d2
mov.l &0x00000002,%d3
lea EAMEM-0x20(%pc),%a3
lea EASTORE(%pc),%a4
mov.l %a3,(%a4)
sub.l &0x10,%a4
mov.l &0x10,%d4
mov.w &0x0000,ICCR(%a6)
mov.w &0x0000,%cc
movm.l &0xffff,IREGS(%a6)
mulu.l ([EASTORE.w,%pc],%d4.w*1,0x10.w),%d2:%d3
mov.w %cc,SCCR(%a6)
movm.l &0xffff,SREGS(%a6)
mov.l &0x00000004,IREGS+0xc(%a6)
bsr.l chkregs
tst.b %d0
bne.l error
ea_133:
addq.l &0x1,TESTCTR(%a6)
movm.l DEF_REGS(%pc),&0x3fff
clr.l %d2
mov.l &0x00000002,%d3
lea EAMEM-0x20(%pc),%a3
lea EASTORE(%pc),%a4
mov.l %a3,(%a4)
sub.l &0x10,%a4
mov.l &0x8,%d4
mov.w &0x0000,ICCR(%a6)
mov.w &0x0000,%cc
movm.l &0xffff,IREGS(%a6)
mulu.l ([EASTORE.w,%pc],%d4.w*2,0x10.w),%d2:%d3
mov.w %cc,SCCR(%a6)
movm.l &0xffff,SREGS(%a6)
mov.l &0x00000004,IREGS+0xc(%a6)
bsr.l chkregs
tst.b %d0
bne.l error
ea_134:
addq.l &0x1,TESTCTR(%a6)
movm.l DEF_REGS(%pc),&0x3fff
clr.l %d2
mov.l &0x00000002,%d3
lea EAMEM-0x20(%pc),%a3
lea EASTORE(%pc),%a4
mov.l %a3,(%a4)
sub.l &0x10,%a4
mov.l &0x4,%d4
mov.w &0x0000,ICCR(%a6)
mov.w &0x0000,%cc
movm.l &0xffff,IREGS(%a6)
mulu.l ([EASTORE.w,%pc],%d4.w*4,0x10.w),%d2:%d3
mov.w %cc,SCCR(%a6)
movm.l &0xffff,SREGS(%a6)
mov.l &0x00000004,IREGS+0xc(%a6)
bsr.l chkregs
tst.b %d0
bne.l error
ea_135:
addq.l &0x1,TESTCTR(%a6)
movm.l DEF_REGS(%pc),&0x3fff
clr.l %d2
mov.l &0x00000002,%d3
lea EAMEM-0x20(%pc),%a3
lea EASTORE(%pc),%a4
mov.l %a3,(%a4)
sub.l &0x10,%a4
mov.l &0x2,%d4
mov.w &0x0000,ICCR(%a6)
mov.w &0x0000,%cc
movm.l &0xffff,IREGS(%a6)
mulu.l ([EASTORE.w,%pc],%d4.w*8,0x10.w),%d2:%d3
mov.w %cc,SCCR(%a6)
movm.l &0xffff,SREGS(%a6)
mov.l &0x00000004,IREGS+0xc(%a6)
bsr.l chkregs
tst.b %d0
bne.l error
ea_136:
addq.l &0x1,TESTCTR(%a6)
movm.l DEF_REGS(%pc),&0x3fff
clr.l %d2
mov.l &0x00000002,%d3
lea EAMEM-0x20(%pc),%a3
lea EASTORE(%pc),%a4
mov.l %a3,(%a4)
sub.l &0x10,%a4
mov.l &0x10,%d4
mov.w &0x0000,ICCR(%a6)
mov.w &0x0000,%cc
movm.l &0xffff,IREGS(%a6)
mulu.l ([EASTORE.w,%pc],%d4.l*1,0x10.w),%d2:%d3
mov.w %cc,SCCR(%a6)
movm.l &0xffff,SREGS(%a6)
mov.l &0x00000004,IREGS+0xc(%a6)
bsr.l chkregs
tst.b %d0
bne.l error
ea_137:
addq.l &0x1,TESTCTR(%a6)
movm.l DEF_REGS(%pc),&0x3fff
clr.l %d2
mov.l &0x00000002,%d3
lea EAMEM-0x20(%pc),%a3
lea EASTORE(%pc),%a4
mov.l %a3,(%a4)
sub.l &0x10,%a4
mov.l &0x8,%d4
mov.w &0x0000,ICCR(%a6)
mov.w &0x0000,%cc
movm.l &0xffff,IREGS(%a6)
mulu.l ([EASTORE.w,%pc],%d4.w*2,0x10.w),%d2:%d3
mov.w %cc,SCCR(%a6)
movm.l &0xffff,SREGS(%a6)
mov.l &0x00000004,IREGS+0xc(%a6)
bsr.l chkregs
tst.b %d0
bne.l error
ea_138:
addq.l &0x1,TESTCTR(%a6)
movm.l DEF_REGS(%pc),&0x3fff
clr.l %d2
mov.l &0x00000002,%d3
lea EAMEM-0x20(%pc),%a3
lea EASTORE(%pc),%a4
mov.l %a3,(%a4)
sub.l &0x10,%a4
mov.l &0x4,%d4
mov.w &0x0000,ICCR(%a6)
mov.w &0x0000,%cc
movm.l &0xffff,IREGS(%a6)
mulu.l ([EASTORE.w,%pc],%d4.l*4,0x10.w),%d2:%d3
mov.w %cc,SCCR(%a6)
movm.l &0xffff,SREGS(%a6)
mov.l &0x00000004,IREGS+0xc(%a6)
bsr.l chkregs
tst.b %d0
bne.l error
ea_139:
addq.l &0x1,TESTCTR(%a6)
movm.l DEF_REGS(%pc),&0x3fff
clr.l %d2
mov.l &0x00000002,%d3
lea EAMEM-0x20(%pc),%a3
lea EASTORE(%pc),%a4
mov.l %a3,(%a4)
sub.l &0x10,%a4
mov.l &0x2,%d4
mov.w &0x0000,ICCR(%a6)
mov.w &0x0000,%cc
movm.l &0xffff,IREGS(%a6)
mulu.l ([EASTORE.w,%pc],%d4.l*8,0x10.w),%d2:%d3
mov.w %cc,SCCR(%a6)
movm.l &0xffff,SREGS(%a6)
mov.l &0x00000004,IREGS+0xc(%a6)
bsr.l chkregs
tst.b %d0
bne.l error
ea_140:
addq.l &0x1,TESTCTR(%a6)
movm.l DEF_REGS(%pc),&0x3fff
clr.l %d2
mov.l &0x00000002,%d3
lea EAMEM-0x20(%pc),%a3
lea EASTORE(%pc),%a4
mov.l %a3,(%a4)
sub.l &0x10,%a4
mov.l &0x2,%d4
mov.w &0x0000,ICCR(%a6)
mov.w &0x0000,%cc
movm.l &0xffff,IREGS(%a6)
mulu.l ([EASTORE.l,%pc],%d4.l*8,0x10.l),%d2:%d3
mov.w %cc,SCCR(%a6)
movm.l &0xffff,SREGS(%a6)
mov.l &0x00000004,IREGS+0xc(%a6)
bsr.l chkregs
tst.b %d0
bne.l error
ea_141:
addq.l &0x1,TESTCTR(%a6)
movm.l DEF_REGS(%pc),&0x3fff
clr.l %d2
mov.l &0x00000002,%d3
lea EAMEM-0x20(%pc),%a3
lea EASTORE(%pc),%a4
mov.l %a3,(%a4)
mov.l &0x2,%d4
mov.w &0x0000,ICCR(%a6)
mov.w &0x0000,%cc
movm.l &0xffff,IREGS(%a6)
mulu.l ([EASTORE.l,%pc],%zd4.l*8,0x20.l),%d2:%d3
mov.w %cc,SCCR(%a6)
movm.l &0xffff,SREGS(%a6)
mov.l &0x00000004,IREGS+0xc(%a6)
bsr.l chkregs
tst.b %d0
bne.l error
ea_142:
addq.l &0x1,TESTCTR(%a6)
movm.l DEF_REGS(%pc),&0x3fff
clr.l %d2
mov.l &0x00000002,%d3
lea EAMEM-0x20(%pc),%a3
lea EASTORE(%pc),%a4
mov.l %a3,(%a4)
mov.l &0x4,%d4
mov.w &0x0000,ICCR(%a6)
mov.w &0x0000,%cc
movm.l &0xffff,IREGS(%a6)
mulu.l ([EASTORE.l,%zpc],%d4.l*8),%d2:%d3
mov.w %cc,SCCR(%a6)
movm.l &0xffff,SREGS(%a6)
mov.l &0x00000004,IREGS+0xc(%a6)
bsr.l chkregs
tst.b %d0
bne.l error
ea_143:
addq.l &0x1,TESTCTR(%a6)
movm.l DEF_REGS(%pc),&0x3fff
mov.l %a7,%a0
clr.l %d2
mov.l &0x00000002,%d3
lea EAMEM(%pc),%a3
lea EASTORE(%pc),%a6
mov.l %a3,(%a6)
add.l &0x10,%a6
mov.l &-0x2,%a7
mov.w &0x0000,ICCR(%a6)
mov.w &0x0000,%cc
movm.l &0xffff,IREGS(%a6)
mulu.l ([EASTORE.w,%pc],%a7.l*8,0x10.l),%d2:%d3
mov.w %cc,SCCR(%a6)
movm.l &0xffff,SREGS(%a6)
mov.l &0x00000004,IREGS+0xc(%a6)
mov.l %a0,%a7
bsr.l chkregs
tst.b %d0
bne.l error
clr.l %d0
rts
###########################################################
###########################################################
chkregs:
lea IREGS(%a6),%a0
lea SREGS(%a6),%a1
mov.l &14,%d0
chkregs_loop:
cmp.l (%a0)+,(%a1)+
bne.l chkregs_error
dbra.w %d0,chkregs_loop
mov.w ICCR(%a6),%d0
mov.w SCCR(%a6),%d1
cmp.w %d0,%d1
bne.l chkregs_error
clr.l %d0
rts
chkregs_error:
movq.l &0x1,%d0
rts
error:
mov.l TESTCTR(%a6),%d1
movq.l &0x1,%d0
rts
DEF_REGS:
long 0xacacacac, 0xacacacac, 0xacacacac, 0xacacacac
long 0xacacacac, 0xacacacac, 0xacacacac, 0xacacacac
long 0xacacacac, 0xacacacac, 0xacacacac, 0xacacacac
long 0xacacacac, 0xacacacac, 0xacacacac, 0xacacacac
############################################################
_print_str:
mov.l %d0,-(%sp)
mov.l (TESTTOP-0x80+0x0,%pc),%d0
pea (TESTTOP-0x80,%pc,%d0)
mov.l 0x4(%sp),%d0
rtd &0x4
_print_num:
mov.l %d0,-(%sp)
mov.l (TESTTOP-0x80+0x4,%pc),%d0
pea (TESTTOP-0x80,%pc,%d0)
mov.l 0x4(%sp),%d0
rtd &0x4
############################################################
|
AirFortressIlikara/LS2K0300-linux-4.19
| 5,258
|
arch/um/kernel/dyn.lds.S
|
#include <asm/vmlinux.lds.h>
#include <asm/page.h>
OUTPUT_FORMAT(ELF_FORMAT)
OUTPUT_ARCH(ELF_ARCH)
ENTRY(_start)
jiffies = jiffies_64;
SECTIONS
{
PROVIDE (__executable_start = START);
. = START + SIZEOF_HEADERS;
.interp : { *(.interp) }
__binary_start = .;
. = ALIGN(4096); /* Init code and data */
_text = .;
INIT_TEXT_SECTION(PAGE_SIZE)
. = ALIGN(PAGE_SIZE);
/* Read-only sections, merged into text segment: */
.hash : { *(.hash) }
.gnu.hash : { *(.gnu.hash) }
.dynsym : { *(.dynsym) }
.dynstr : { *(.dynstr) }
.gnu.version : { *(.gnu.version) }
.gnu.version_d : { *(.gnu.version_d) }
.gnu.version_r : { *(.gnu.version_r) }
.rel.init : { *(.rel.init) }
.rela.init : { *(.rela.init) }
.rel.text : { *(.rel.text .rel.text.* .rel.gnu.linkonce.t.*) }
.rela.text : { *(.rela.text .rela.text.* .rela.gnu.linkonce.t.*) }
.rel.fini : { *(.rel.fini) }
.rela.fini : { *(.rela.fini) }
.rel.rodata : { *(.rel.rodata .rel.rodata.* .rel.gnu.linkonce.r.*) }
.rela.rodata : { *(.rela.rodata .rela.rodata.* .rela.gnu.linkonce.r.*) }
.rel.data : { *(.rel.data .rel.data.* .rel.gnu.linkonce.d.*) }
.rela.data : { *(.rela.data .rela.data.* .rela.gnu.linkonce.d.*) }
.rel.tdata : { *(.rel.tdata .rel.tdata.* .rel.gnu.linkonce.td.*) }
.rela.tdata : { *(.rela.tdata .rela.tdata.* .rela.gnu.linkonce.td.*) }
.rel.tbss : { *(.rel.tbss .rel.tbss.* .rel.gnu.linkonce.tb.*) }
.rela.tbss : { *(.rela.tbss .rela.tbss.* .rela.gnu.linkonce.tb.*) }
.rel.ctors : { *(.rel.ctors) }
.rela.ctors : { *(.rela.ctors) }
.rel.dtors : { *(.rel.dtors) }
.rela.dtors : { *(.rela.dtors) }
.rel.got : { *(.rel.got) }
.rela.got : { *(.rela.got) }
.rel.bss : { *(.rel.bss .rel.bss.* .rel.gnu.linkonce.b.*) }
.rela.bss : { *(.rela.bss .rela.bss.* .rela.gnu.linkonce.b.*) }
.rel.plt : {
*(.rel.plt)
PROVIDE_HIDDEN(__rel_iplt_start = .);
*(.rel.iplt)
PROVIDE_HIDDEN(__rel_iplt_end = .);
}
.rela.plt : {
*(.rela.plt)
PROVIDE_HIDDEN(__rela_iplt_start = .);
*(.rela.iplt)
PROVIDE_HIDDEN(__rela_iplt_end = .);
}
.init : {
KEEP (*(.init))
} =0x90909090
.plt : { *(.plt) }
.text : {
_stext = .;
TEXT_TEXT
SCHED_TEXT
CPUIDLE_TEXT
LOCK_TEXT
*(.fixup)
*(.stub .text.* .gnu.linkonce.t.*)
/* .gnu.warning sections are handled specially by elf32.em. */
*(.gnu.warning)
. = ALIGN(PAGE_SIZE);
} =0x90909090
. = ALIGN(PAGE_SIZE);
.syscall_stub : {
__syscall_stub_start = .;
*(.__syscall_stub*)
__syscall_stub_end = .;
}
.fini : {
KEEP (*(.fini))
} =0x90909090
.kstrtab : { *(.kstrtab) }
#include <asm/common.lds.S>
__init_begin = .;
init.data : { INIT_DATA }
__init_end = .;
/* Ensure the __preinit_array_start label is properly aligned. We
could instead move the label definition inside the section, but
the linker would then create the section even if it turns out to
be empty, which isn't pretty. */
. = ALIGN(32 / 8);
.preinit_array : { *(.preinit_array) }
.init_array : { *(.init_array) }
.fini_array : { *(.fini_array) }
.data : {
INIT_TASK_DATA(KERNEL_STACK_SIZE)
. = ALIGN(KERNEL_STACK_SIZE);
*(.data..init_irqstack)
DATA_DATA
*(.data.* .gnu.linkonce.d.*)
SORT(CONSTRUCTORS)
}
.data1 : { *(.data1) }
.tdata : { *(.tdata .tdata.* .gnu.linkonce.td.*) }
.tbss : { *(.tbss .tbss.* .gnu.linkonce.tb.*) *(.tcommon) }
.eh_frame : { KEEP (*(.eh_frame)) }
.gcc_except_table : { *(.gcc_except_table) }
.dynamic : { *(.dynamic) }
.ctors : {
/* gcc uses crtbegin.o to find the start of
the constructors, so we make sure it is
first. Because this is a wildcard, it
doesn't matter if the user does not
actually link against crtbegin.o; the
linker won't look for a file to match a
wildcard. The wildcard also means that it
doesn't matter which directory crtbegin.o
is in. */
KEEP (*crtbegin.o(.ctors))
/* We don't want to include the .ctor section from
from the crtend.o file until after the sorted ctors.
The .ctor section from the crtend file contains the
end of ctors marker and it must be last */
KEEP (*(EXCLUDE_FILE (*crtend.o ) .ctors))
KEEP (*(SORT(.ctors.*)))
KEEP (*(.ctors))
}
.dtors : {
KEEP (*crtbegin.o(.dtors))
KEEP (*(EXCLUDE_FILE (*crtend.o ) .dtors))
KEEP (*(SORT(.dtors.*)))
KEEP (*(.dtors))
}
.jcr : { KEEP (*(.jcr)) }
.got : { *(.got.plt) *(.got) }
_edata = .;
PROVIDE (edata = .);
.bss : {
__bss_start = .;
*(.dynbss)
*(.bss .bss.* .gnu.linkonce.b.*)
*(COMMON)
/* Align here to ensure that the .bss section occupies space up to
_end. Align after .bss to ensure correct alignment even if the
.bss section disappears because there are no input sections. */
. = ALIGN(32 / 8);
. = ALIGN(32 / 8);
}
__bss_stop = .;
_end = .;
PROVIDE (end = .);
STABS_DEBUG
DWARF_DEBUG
DISCARDS
}
|
AirFortressIlikara/LS2K0300-linux-4.19
| 2,474
|
arch/um/kernel/uml.lds.S
|
/* SPDX-License-Identifier: GPL-2.0 */
#include <asm/vmlinux.lds.h>
#include <asm/page.h>
OUTPUT_FORMAT(ELF_FORMAT)
OUTPUT_ARCH(ELF_ARCH)
ENTRY(_start)
jiffies = jiffies_64;
SECTIONS
{
/* This must contain the right address - not quite the default ELF one.*/
PROVIDE (__executable_start = START);
/* Static binaries stick stuff here, like the sigreturn trampoline,
* invisibly to objdump. So, just make __binary_start equal to the very
* beginning of the executable, and if there are unmapped pages after this,
* they are forever unusable.
*/
__binary_start = START;
. = START + SIZEOF_HEADERS;
_text = .;
INIT_TEXT_SECTION(0)
. = ALIGN(PAGE_SIZE);
.text :
{
_stext = .;
TEXT_TEXT
SCHED_TEXT
CPUIDLE_TEXT
LOCK_TEXT
*(.fixup)
/* .gnu.warning sections are handled specially by elf32.em. */
*(.gnu.warning)
*(.gnu.linkonce.t*)
}
. = ALIGN(PAGE_SIZE);
.syscall_stub : {
__syscall_stub_start = .;
*(.__syscall_stub*)
__syscall_stub_end = .;
}
/*
* These are needed even in a static link, even if they wind up being empty.
* Newer glibc needs these __rel{,a}_iplt_{start,end} symbols.
*/
.rel.plt : {
*(.rel.plt)
PROVIDE_HIDDEN(__rel_iplt_start = .);
*(.rel.iplt)
PROVIDE_HIDDEN(__rel_iplt_end = .);
}
.rela.plt : {
*(.rela.plt)
PROVIDE_HIDDEN(__rela_iplt_start = .);
*(.rela.iplt)
PROVIDE_HIDDEN(__rela_iplt_end = .);
}
#include <asm/common.lds.S>
__init_begin = .;
init.data : { INIT_DATA }
__init_end = .;
.data :
{
INIT_TASK_DATA(KERNEL_STACK_SIZE)
. = ALIGN(KERNEL_STACK_SIZE);
*(.data..init_irqstack)
DATA_DATA
*(.gnu.linkonce.d*)
CONSTRUCTORS
}
.data1 : { *(.data1) }
.ctors :
{
*(.ctors)
}
.dtors :
{
*(.dtors)
}
.got : { *(.got.plt) *(.got) }
.dynamic : { *(.dynamic) }
.tdata : { *(.tdata .tdata.* .gnu.linkonce.td.*) }
.tbss : { *(.tbss .tbss.* .gnu.linkonce.tb.*) *(.tcommon) }
/* We want the small data sections together, so single-instruction offsets
can access them all, and initialized data all before uninitialized, so
we can shorten the on-disk segment size. */
.sdata : { *(.sdata) }
_edata = .;
PROVIDE (edata = .);
. = ALIGN(PAGE_SIZE);
__bss_start = .;
PROVIDE(_bss_start = .);
SBSS(0)
BSS(0)
__bss_stop = .;
_end = .;
PROVIDE (end = .);
STABS_DEBUG
DWARF_DEBUG
DISCARDS
}
|
AirFortressIlikara/LS2K0300-linux-4.19
| 1,697
|
arch/um/include/asm/common.lds.S
|
/* SPDX-License-Identifier: GPL-2.0 */
#include <asm-generic/vmlinux.lds.h>
.fini : { *(.fini) } =0x9090
_etext = .;
PROVIDE (etext = .);
. = ALIGN(4096);
_sdata = .;
PROVIDE (sdata = .);
RODATA
.unprotected : { *(.unprotected) }
. = ALIGN(4096);
PROVIDE (_unprotected_end = .);
. = ALIGN(4096);
NOTES
EXCEPTION_TABLE(0)
BUG_TABLE
.uml.setup.init : {
__uml_setup_start = .;
*(.uml.setup.init)
__uml_setup_end = .;
}
.uml.help.init : {
__uml_help_start = .;
*(.uml.help.init)
__uml_help_end = .;
}
.uml.postsetup.init : {
__uml_postsetup_start = .;
*(.uml.postsetup.init)
__uml_postsetup_end = .;
}
.init.setup : {
INIT_SETUP(0)
}
PERCPU_SECTION(32)
.initcall.init : {
INIT_CALLS
}
.con_initcall.init : {
CON_INITCALL
}
SECURITY_INIT
.exitcall : {
__exitcall_begin = .;
*(.exitcall.exit)
__exitcall_end = .;
}
.uml.exitcall : {
__uml_exitcall_begin = .;
*(.uml.exitcall.exit)
__uml_exitcall_end = .;
}
. = ALIGN(4);
.altinstructions : {
__alt_instructions = .;
*(.altinstructions)
__alt_instructions_end = .;
}
.altinstr_replacement : { *(.altinstr_replacement) }
/* .exit.text is discard at runtime, not link time, to deal with references
from .altinstructions and .eh_frame */
.exit.text : { EXIT_TEXT }
.exit.data : { *(.exit.data) }
.preinit_array : {
__preinit_array_start = .;
*(.preinit_array)
__preinit_array_end = .;
}
.init_array : {
__init_array_start = .;
*(.init_array)
__init_array_end = .;
}
.fini_array : {
__fini_array_start = .;
*(.fini_array)
__fini_array_end = .;
}
. = ALIGN(4096);
.init.ramfs : {
INIT_RAM_FS
}
|
AirFortressIlikara/LS2K0300-linux-4.19
| 2,028
|
arch/loongarch/power/hibernate_asm.S
|
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Hibernation support specific for loongarch - temporary page tables
*
* Licensed under the GPLv2
*
* Copyright (C) 2009 Lemote Inc.
* Author: Hu Hongbing <huhb@lemote.com>
* Wu Zhangjin <wuzhangjin@gmail.com>
* Copyright (C) 2020 Loongson Technology Co., Ltd.
*/
#include <linux/linkage.h>
#include <asm/asm-offsets.h>
#include <asm/regdef.h>
#include <asm/asm.h>
.text
SYM_FUNC_START(swsusp_arch_save)
la.pcrel t0, saved_regs
PTR_S ra, t0, PT_R1
PTR_S tp, t0, PT_R2
PTR_S sp, t0, PT_R3
PTR_S u0, t0, PT_R21
PTR_S fp, t0, PT_R22
PTR_S s0, t0, PT_R23
PTR_S s1, t0, PT_R24
PTR_S s2, t0, PT_R25
PTR_S s3, t0, PT_R26
PTR_S s4, t0, PT_R27
PTR_S s5, t0, PT_R28
PTR_S s6, t0, PT_R29
PTR_S s7, t0, PT_R30
PTR_S s8, t0, PT_R31
b swsusp_save
SYM_FUNC_END(swsusp_arch_save)
SYM_FUNC_START(restore_image)
la.pcrel t0, restore_pblist
PTR_L t0, t0, 0
0:
PTR_L t1, t0, PBE_ADDRESS /* source */
PTR_L t2, t0, PBE_ORIG_ADDRESS /* destination */
PTR_LI t3, _PAGE_SIZE
PTR_ADD t3, t3, t1
1:
REG_L t8, t1, 0
REG_S t8, t2, 0
PTR_ADDI t1, t1, SZREG
PTR_ADDI t2, t2, SZREG
bne t1, t3, 1b
PTR_L t0, t0, PBE_NEXT
bnez t0, 0b
la.pcrel t0, saved_regs
PTR_L ra, t0, PT_R1
PTR_L tp, t0, PT_R2
PTR_L sp, t0, PT_R3
PTR_S u0, t0, PT_R21
PTR_L fp, t0, PT_R22
PTR_L s0, t0, PT_R23
PTR_L s1, t0, PT_R24
PTR_L s2, t0, PT_R25
PTR_L s3, t0, PT_R26
PTR_L s4, t0, PT_R27
PTR_L s5, t0, PT_R28
PTR_L s6, t0, PT_R29
PTR_L s7, t0, PT_R30
PTR_L s8, t0, PT_R31
PTR_LI a0, 0x0
jirl zero, ra, 0
SYM_FUNC_END(restore_image)
|
AirFortressIlikara/LS2K0300-linux-4.19
| 2,853
|
arch/loongarch/power/suspend_asm.S
|
/* SPDX-License-Identifier: GPL-2.0 */
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Sleep helper for Loongson-3 sleep mode.
*
* Copyright (C) 2020 Loongson Technology Co., Ltd.
* Author: Huacai Chen <chenhuacai@loongson.cn>
*/
#include <asm/asm.h>
#include <asm/asmmacro.h>
#include <asm/addrspace.h>
#include <asm/loongarchregs.h>
#include <asm/stackframe.h>
/* preparatory stuff */
.macro SETUP_SLEEP
addi.d sp, sp, -PT_SIZE
st.d $r1, sp, PT_R1
st.d $r2, sp, PT_R2
st.d $r3, sp, PT_R3
st.d $r4, sp, PT_R4
st.d $r21, sp, PT_R21
st.d $r22, sp, PT_R22
st.d $r23, sp, PT_R23
st.d $r24, sp, PT_R24
st.d $r25, sp, PT_R25
st.d $r26, sp, PT_R26
st.d $r27, sp, PT_R27
st.d $r28, sp, PT_R28
st.d $r29, sp, PT_R29
st.d $r30, sp, PT_R30
st.d $r31, sp, PT_R31
la.pcrel t0, acpi_saved_sp
st.d sp, t0, 0
.endm
.macro SETUP_WAKEUP
ld.d $r1, sp, PT_R1
ld.d $r2, sp, PT_R2
ld.d $r3, sp, PT_R3
ld.d $r4, sp, PT_R4
ld.d $r21, sp, PT_R21
ld.d $r22, sp, PT_R22
ld.d $r23, sp, PT_R23
ld.d $r24, sp, PT_R24
ld.d $r25, sp, PT_R25
ld.d $r26, sp, PT_R26
ld.d $r27, sp, PT_R27
ld.d $r28, sp, PT_R28
ld.d $r29, sp, PT_R29
ld.d $r30, sp, PT_R30
ld.d $r31, sp, PT_R31
.endm
.text
.align 12
/* Sleep/wakeup code for Loongson-3 */
SYM_FUNC_START(loongarch_suspend_enter)
SETUP_SLEEP
bl cpu_flush_caches
/* Pass RA and SP to BIOS */
addi.d a1, sp, 0
la.pcrel a0, loongarch_wakeup_start
la.pcrel t0, loongarch_suspend_addr
ld.d t0, t0, 0
jirl a0, t0, 0 /* Call BIOS's STR sleep routine */
/*
* This is where we return upon wakeup.
* Reload all of the registers and return.
*/
SYM_INNER_LABEL(loongarch_wakeup_start, SYM_L_GLOBAL)
li.d t0, CSR_DMW0_INIT # UC, PLV0
csrwr t0, LOONGARCH_CSR_DMWIN0
li.d t0, CSR_DMW1_INIT # CA, PLV0
csrwr t0, LOONGARCH_CSR_DMWIN1
JUMP_CACHE_ADDR t0, t1
li.w t0, 0xb0
csrwr t0, LOONGARCH_CSR_CRMD
la.pcrel t0, acpi_saved_sp
ld.d sp, t0, 0
SETUP_WAKEUP
addi.d sp, sp, PT_SIZE
jr ra
SYM_FUNC_END(loongarch_suspend_enter)
|
AirFortressIlikara/LS2K0300-linux-4.19
| 1,610
|
arch/loongarch/vdso/vdso.lds.S
|
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) 2020 Loongson Technology Corporation Limited
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*/
OUTPUT_FORMAT("elf64-loongarch", "elf64-loongarch", "elf64-loongarch")
OUTPUT_ARCH(loongarch)
SECTIONS
{
PROVIDE(_start = .);
. = SIZEOF_HEADERS;
.hash : { *(.hash) } :text
.gnu.hash : { *(.gnu.hash) }
.dynsym : { *(.dynsym) }
.dynstr : { *(.dynstr) }
.gnu.version : { *(.gnu.version) }
.gnu.version_d : { *(.gnu.version_d) }
.gnu.version_r : { *(.gnu.version_r) }
.note : { *(.note.*) } :text :note
.text : { *(.text*) } :text
PROVIDE (__etext = .);
PROVIDE (_etext = .);
PROVIDE (etext = .);
.eh_frame_hdr : { *(.eh_frame_hdr) } :text :eh_frame_hdr
.eh_frame : { KEEP (*(.eh_frame)) } :text
.dynamic : { *(.dynamic) } :text :dynamic
.rodata : { *(.rodata*) } :text
_end = .;
PROVIDE(end = .);
/DISCARD/ : {
*(.gnu.attributes)
*(.note.GNU-stack)
*(.data .data.* .gnu.linkonce.d.* .sdata*)
*(.bss .sbss .dynbss .dynsbss)
}
}
PHDRS
{
text PT_LOAD FLAGS(5) FILEHDR PHDRS; /* PF_R|PF_X */
dynamic PT_DYNAMIC FLAGS(4); /* PF_R */
note PT_NOTE FLAGS(4); /* PF_R */
eh_frame_hdr PT_GNU_EH_FRAME;
}
VERSION
{
LINUX_2.6 {
#ifndef DISABLE_LOONGARCH_VDSO
global:
__vdso_clock_gettime;
__vdso_gettimeofday;
__vdso_getpid;
__vdso_getuid;
__vdso_getcpu;
#endif
local: *;
};
}
|
AirFortressIlikara/LS2K0300-linux-4.19
| 4,024
|
arch/loongarch/kernel/head.S
|
/* SPDX-License-Identifier: GPL-2.0 */
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 1994, 1995 Waldorf Electronics
* Written by Ralf Baechle and Andreas Busse
* Copyright (C) 1994 - 99, 2003, 06 Ralf Baechle
* Copyright (C) 1996 Paul M. Antoine
* Further modifications by David S. Miller and Harald Koerfgen
* Copyright (C) 1999 Silicon Graphics, Inc.
* Kevin Kissell, kevink@mips.com and Carsten Langgaard, carstenl@mips.com
* Copyright (C) 2000 MIPS Technologies, Inc. All rights reserved.
* Copyright (C) 2020 Loongson Technology Co., Ltd.
*/
#include <linux/init.h>
#include <linux/threads.h>
#include <asm/addrspace.h>
#include <asm/asm.h>
#include <asm/asmmacro.h>
#include <asm/bug.h>
#include <asm/irqflags.h>
#include <asm/regdef.h>
#include <asm/loongarchregs.h>
#include <asm/stackframe.h>
/*
* Reserved space for exception handlers.
* Necessary for machines which link their kernels at KPRANGEx.
*/
.fill 0x400
SYM_ENTRY(_stext, SYM_L_GLOBAL, SYM_A_NONE)
__REF
.align 12
SYM_CODE_START(kernel_entry) # kernel entry point
# Config direct window and set PG
# 0x8000 xxxx xxxx xxxx
li.d t0, CSR_DMW0_INIT # UC, PLV0
csrwr t0, LOONGARCH_CSR_DMWIN0
# 0x9000 xxxx xxxx xxxx
li.d t0, CSR_DMW1_INIT # CA, PLV0
csrwr t0, LOONGARCH_CSR_DMWIN1
# Enable PG
li.w t0, 0xb0 # PLV=0, IE=0, PG
csrwr t0, LOONGARCH_CSR_CRMD
# Clear EUEN
li.w t0, 0xf
csrxchg zero, t0, LOONGARCH_CSR_EUEN
/* We might not get launched at the address the kernel is linked to,
so we jump there. */
JUMP_CACHE_ADDR t0, t1
la t0, __bss_start # clear .bss
st.d zero, t0, 0
la t1, __bss_stop - LONGSIZE
1:
addi.d t0, t0, LONGSIZE
st.d zero, t0, 0
bne t0, t1, 1b
la t0, fw_arg0
st.d a0, t0, 0 # firmware arguments
la t0, fw_arg1
st.d a1, t0, 0
la t0, fw_arg2
st.d a2, t0, 0
#ifdef CONFIG_PAGE_SIZE_4KB
li.d t0, 0
li.d t1, CSR_STFILL
csrxchg t0, t1, LOONGARCH_CSR_IMPCTL1
#endif
csrwr zero, PERCPU_BASE_KS
/* gpr21 used for percpu base, it should be initialized as 0 */
or $r21, zero, zero
la tp, init_thread_union
/* Set the SP after an empty pt_regs. */
li.d sp, (_THREAD_SIZE - PT_SIZE)
add.d sp, sp, tp
set_saved_sp sp, t0, t1
#ifdef CONFIG_RELOCATABLE
#ifdef CONFIG_CRASH_DUMP
/* Handle the a3 passed in by the firmware is not 0 */
li.w t1, 0x123
bne t1, a4, 1f
beqz a3, 1f
move a0, a3
/* Apply the relocations for kdump */
bl relocate_kdump_kernel
b 2f
#endif
1:
/* Copy kernel and apply the relocations */
bl relocate_kernel
2:
/* Repoint the sp into the new kernel image */
PTR_LI sp, _THREAD_SIZE - PT_SIZE
PTR_ADD sp, sp, $r2
set_saved_sp sp, t0, t1
/*
* relocate_kernel returns the entry point either
* in the relocated kernel or the original if for
* some reason relocation failed - jump there now
* with instruction hazard barrier because of the
* newly sync'd icache.
*/
jirl zero, v0, 0
#else
#ifdef CONFIG_KASAN
bl kasan_early_init
#endif
bl start_kernel
#endif
ASM_BUG()
SYM_CODE_END(kernel_entry)
#ifdef CONFIG_SMP
/*
* SMP slave cpus entry point. Board specific code for bootstrap calls this
* function after setting up the stack and tp registers.
*/
.macro smp_slave_setup
#ifdef CONFIG_PAGE_SIZE_4KB
li.d t0, 0
li.d t1, CSR_STFILL
csrxchg t0, t1, LOONGARCH_CSR_IMPCTL1
#endif
li.d t0, CSR_DMW0_INIT # UC, PLV0
csrwr t0, LOONGARCH_CSR_DMWIN0
li.d t0, CSR_DMW1_INIT # CA, PLV0
csrwr t0, LOONGARCH_CSR_DMWIN1
JUMP_CACHE_ADDR t0, t1
li.w t0, 0xb0 # PLV=0, IE=0, PG
csrwr t0, LOONGARCH_CSR_CRMD
# Clear EUEN
li.w t0, 0xf
csrxchg zero, t0, LOONGARCH_CSR_EUEN
la.abs t0, cpuboot_data
ld.d sp, t0, CPU_BOOT_STACK
ld.d tp, t0, CPU_BOOT_TINFO
.endm
SYM_CODE_START(smp_bootstrap)
smp_slave_setup
bl start_secondary
ASM_BUG()
SYM_CODE_END(smp_bootstrap)
SYM_ENTRY(kernel_entry_end, SYM_L_GLOBAL, SYM_A_NONE)
#endif /* CONFIG_SMP */
|
AirFortressIlikara/LS2K0300-linux-4.19
| 10,168
|
arch/loongarch/kernel/scall-trans-mips64.S
|
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 1995, 96, 97, 98, 99, 2000, 01, 02 by Ralf Baechle
* Copyright (C) 1999, 2000 Silicon Graphics, Inc.
* Copyright (C) 2001 MIPS Technologies, Inc.
* Copyright (C) 2020 Loongson Technology Co., Ltd.
*/
#include <linux/errno.h>
#include <asm/asm.h>
#include <asm/asmmacro.h>
#include <asm/irqflags.h>
#include <asm/loongarchregs.h>
#include <asm/regdef.h>
#include <asm/stackframe.h>
#include <asm/asm-offsets.h>
#include <asm/thread_info.h>
#include <asm/unistd.h>
#include <asm/unwind_hints.h>
.align 5
SYM_FUNC_START(handle_sys_lat_mips64)
UNWIND_HINT_REGS
li.d t1, _TIF_WORK_SYSCALL_ENTRY
LONG_L t0, tp, TI_FLAGS # syscall tracing enabled?
and t0, t1, t0
bnez t0, syscall_trace_entry
choose_abi:
li.d t0, TRANS_ARCH_MASK
and t1, t0, a7
li.d t0, SYS_NUM_MASK
and a7, a7, t0
li.d t0, TRANS_MIPS_N64
bne t0, t1, illegal_syscall
syscall_mips64:
li.w t0, __NR_mips64_Linux
sub.d t2, a7, t0
sltui t0, t2, __NR_mips64_Linux_syscalls + 1
beqz t0, illegal_syscall
/* Syscall number held in a7 */
slli.d t0, t2, 3 # offset into table
la t2, mips64_syscall_table
add.d t0, t2, t0
ld.d t2, t0, 0 # syscall routine
beqz t2, illegal_syscall
jalr t2 # Do The Real Thing (TM)
li.w t0, -EMAXERRNO - 1 # error?
sltu t0, t0, v0
st.d t0, sp, PT_R7 # set error flag
beqz t0, 1f
ld.d t1, sp, PT_R11 # syscall number
addi.d t1, t1, 1 # +1 for handle_signal
st.d t1, sp, PT_R0 # save it for syscall restarting
sub.d v0, zero, v0
bl trans_mips_errno
1: st.d v0, sp, PT_R4 # result
trans_syscall_exit:
NOT_SIBLING_CALL_HINT
b syscall_exit_partial
/* ------------------------------------------------------------------------ */
syscall_trace_entry:
SAVE_STATIC
move a0, sp
move a1, a7
bl syscall_trace_enter
blt v0, zero, 1f # seccomp failed? Skip syscall
RESTORE_STATIC
ld.d a0, sp, PT_R4 # Restore argument registers
ld.d a1, sp, PT_R5
ld.d a2, sp, PT_R6
ld.d a3, sp, PT_R7
ld.d a4, sp, PT_R8
ld.d a5, sp, PT_R9
ld.d a6, sp, PT_R10
ld.d a7, sp, PT_R11 # Restore syscall (maybe modified)
b choose_abi
1: b trans_syscall_exit
/*
* The system call does not exist in this kernel
*/
illegal_syscall:
li.w v0, ENOSYS # error
st.d v0, sp, PT_R4
li.w t0, 1 # set error flag
st.d t0, sp, PT_R7
b trans_syscall_exit
SYM_FUNC_END(handle_sys_lat_mips64)
.align 3
SYM_DATA_START(mips64_syscall_table)
PTR sys_read /* 5000 */
PTR sys_write
PTR sys_ni_syscall
PTR sys_close
PTR sys_ni_syscall /* stat */
PTR sys_ni_syscall /* 5005 */
PTR sys_ni_syscall /* lstat */
PTR sys_ni_syscall
PTR sys_lseek
PTR sys_ni_syscall
PTR sys_mprotect /* 5010 */
PTR sys_munmap
PTR sys_brk
PTR sys_ni_syscall
PTR sys_rt_sigprocmask
PTR sys_ioctl /* 5015 */
PTR sys_pread64
PTR sys_pwrite64
PTR sys_readv
PTR sys_writev
PTR sys_access /* 5020 */
PTR sys_pipe
PTR sys_ni_syscall
PTR sys_sched_yield
PTR sys_mremap
PTR sys_msync /* 5025 */
PTR sys_mincore
PTR sys_madvise
PTR sys_shmget
PTR sys_shmat
PTR sys_shmctl /* 5030 */
PTR sys_dup
PTR sys_dup2
PTR sys_ni_syscall /* pause */
PTR sys_nanosleep
PTR sys_getitimer /* 5035 */
PTR sys_setitimer
PTR sys_alarm
PTR sys_getpid
PTR sys_sendfile64
PTR sys_ni_syscall /* 5040 */
PTR sys_connect
PTR sys_accept
PTR sys_sendto
PTR sys_recvfrom
PTR sys_sendmsg /* 5045 */
PTR sys_recvmsg
PTR sys_shutdown
PTR sys_bind
PTR sys_listen
PTR sys_getsockname /* 5050 */
PTR sys_getpeername
PTR sys_socketpair
PTR sys_setsockopt
PTR sys_getsockopt
PTR sys_ni_syscall /* 5055 */
PTR sys_ni_syscall
PTR sys_execve
PTR sys_exit
PTR sys_wait4
PTR sys_kill /* 5060 */
PTR sys_newuname
PTR sys_semget
PTR sys_semop
PTR sys_semctl
PTR sys_shmdt /* 5065 */
PTR sys_msgget
PTR sys_msgsnd
PTR sys_msgrcv
PTR sys_msgctl
PTR sys_fcntl /* 5070 */
PTR sys_flock
PTR sys_fsync
PTR sys_fdatasync
PTR sys_truncate
PTR sys_ftruncate /* 5075 */
PTR sys_ni_syscall
PTR sys_getcwd
PTR sys_chdir
PTR sys_fchdir
PTR sys_rename /* 5080 */
PTR sys_mkdir
PTR sys_rmdir
PTR sys_ni_syscall
PTR sys_link
PTR sys_unlink /* 5085 */
PTR sys_symlink
PTR sys_readlink
PTR sys_chmod
PTR sys_fchmod
PTR sys_chown /* 5090 */
PTR sys_fchown
PTR sys_lchown
PTR sys_umask
PTR sys_gettimeofday
PTR sys_getrlimit /* 5095 */
PTR sys_getrusage
PTR sys_sysinfo
PTR sys_times
PTR sys_ptrace
PTR sys_getuid /* 5100 */
PTR sys_syslog
PTR sys_getgid
PTR sys_setuid
PTR sys_setgid
PTR sys_geteuid /* 5105 */
PTR sys_getegid
PTR sys_setpgid
PTR sys_getppid
PTR sys_getpgrp
PTR sys_setsid /* 5110 */
PTR sys_setreuid
PTR sys_setregid
PTR sys_getgroups
PTR sys_setgroups
PTR sys_setresuid /* 5115 */
PTR sys_getresuid
PTR sys_setresgid
PTR sys_getresgid
PTR sys_getpgid
PTR sys_setfsuid /* 5120 */
PTR sys_setfsgid
PTR sys_getsid
PTR sys_capget
PTR sys_capset
PTR sys_rt_sigpending /* 5125 */
PTR sys_rt_sigtimedwait
PTR sys_rt_sigqueueinfo
PTR sys_rt_sigsuspend
PTR sys_sigaltstack
PTR sys_ni_syscall /* 5130 */
PTR sys_mknod
PTR sys_personality
PTR sys_ni_syscall
PTR sys_ni_syscall /* statfs */
PTR sys_ni_syscall /* 5135 */
PTR sys_ni_syscall
PTR sys_getpriority
PTR sys_setpriority
PTR sys_sched_setparam
PTR sys_sched_getparam /* 5140 */
PTR sys_sched_setscheduler
PTR sys_sched_getscheduler
PTR sys_sched_get_priority_max
PTR sys_sched_get_priority_min
PTR sys_sched_rr_get_interval /* 5145 */
PTR sys_mlock
PTR sys_munlock
PTR sys_mlockall
PTR sys_munlockall
PTR sys_vhangup /* 5150 */
PTR sys_pivot_root
PTR sys_ni_syscall
PTR sys_prctl
PTR sys_adjtimex
PTR sys_setrlimit /* 5155 */
PTR sys_chroot
PTR sys_sync
PTR sys_acct
PTR sys_settimeofday
PTR sys_mount /* 5160 */
PTR sys_umount
PTR sys_swapon
PTR sys_swapoff
PTR sys_reboot
PTR sys_sethostname /* 5165 */
PTR sys_setdomainname
PTR sys_ni_syscall /* was create_module */
PTR sys_init_module
PTR sys_delete_module
PTR sys_ni_syscall /* 5170, was get_kernel_syms */
PTR sys_ni_syscall /* was query_module */
PTR sys_quotactl
PTR sys_ni_syscall /* was nfsservctl */
PTR sys_ni_syscall /* res. for getpmsg */
PTR sys_ni_syscall /* 5175 for putpmsg */
PTR sys_ni_syscall /* res. for afs_syscall */
PTR sys_ni_syscall /* res. for security */
PTR sys_gettid
PTR sys_readahead
PTR sys_setxattr /* 5180 */
PTR sys_lsetxattr
PTR sys_fsetxattr
PTR sys_getxattr
PTR sys_lgetxattr
PTR sys_fgetxattr /* 5185 */
PTR sys_listxattr
PTR sys_llistxattr
PTR sys_flistxattr
PTR sys_removexattr
PTR sys_lremovexattr /* 5190 */
PTR sys_fremovexattr
PTR sys_tkill
PTR sys_ni_syscall
PTR sys_futex
PTR sys_sched_setaffinity /* 5195 */
PTR sys_sched_getaffinity
PTR sys_ni_syscall
PTR sys_ni_syscall
PTR sys_ni_syscall
PTR sys_io_setup /* 5200 */
PTR sys_io_destroy
PTR sys_io_getevents
PTR sys_io_submit
PTR sys_io_cancel
PTR sys_exit_group /* 5205 */
PTR sys_lookup_dcookie
PTR sys_epoll_create
PTR sys_epoll_ctl
PTR sys_epoll_wait
PTR sys_remap_file_pages /* 5210 */
PTR sys_rt_sigreturn
PTR sys_set_tid_address
PTR sys_restart_syscall
PTR sys_semtimedop
PTR sys_fadvise64_64 /* 5215 */
PTR sys_timer_create
PTR sys_timer_settime
PTR sys_timer_gettime
PTR sys_timer_getoverrun
PTR sys_timer_delete /* 5220 */
PTR sys_clock_settime
PTR sys_clock_gettime
PTR sys_clock_getres
PTR sys_clock_nanosleep
PTR sys_tgkill /* 5225 */
PTR sys_ni_syscall /* utimes */
PTR sys_mbind
PTR sys_get_mempolicy
PTR sys_set_mempolicy
PTR sys_mq_open /* 5230 */
PTR sys_mq_unlink
PTR sys_mq_timedsend
PTR sys_mq_timedreceive
PTR sys_mq_notify
PTR sys_mq_getsetattr /* 5235 */
PTR sys_ni_syscall /* sys_vserver */
PTR sys_waitid
PTR sys_ni_syscall /* available, was setaltroot */
PTR sys_add_key
PTR sys_request_key /* 5240 */
PTR sys_keyctl
PTR sys_ni_syscall /* set_thread_area */
PTR sys_inotify_init
PTR sys_inotify_add_watch
PTR sys_inotify_rm_watch /* 5245 */
PTR sys_migrate_pages
PTR sys_ni_syscall
PTR sys_mkdirat
PTR sys_mknodat
PTR sys_fchownat /* 5250 */
PTR sys_ni_syscall /* futimesat */
PTR sys_ni_syscall /* newfstatat */
PTR sys_unlinkat
PTR sys_renameat
PTR sys_linkat /* 5255 */
PTR sys_symlinkat
PTR sys_readlinkat
PTR sys_fchmodat
PTR sys_faccessat
PTR sys_pselect6 /* 5260 */
PTR sys_ppoll
PTR sys_unshare
PTR sys_splice
PTR sys_sync_file_range
PTR sys_tee /* 5265 */
PTR sys_vmsplice
PTR sys_move_pages
PTR sys_set_robust_list
PTR sys_get_robust_list
PTR sys_kexec_load /* 5270 */
PTR sys_getcpu
PTR sys_epoll_pwait
PTR sys_ioprio_set
PTR sys_ioprio_get
PTR sys_utimensat /* 5275 */
PTR sys_ni_syscall /* signalfd */
PTR sys_ni_syscall /* was timerfd */
PTR sys_ni_syscall /* eventfd */
PTR sys_fallocate
PTR sys_timerfd_create /* 5280 */
PTR sys_timerfd_gettime
PTR sys_timerfd_settime
PTR sys_signalfd4
PTR sys_eventfd2
PTR sys_epoll_create1 /* 5285 */
PTR sys_dup3
PTR sys_pipe2
PTR sys_inotify_init1
PTR sys_preadv
PTR sys_pwritev /* 5290 */
PTR sys_rt_tgsigqueueinfo
PTR sys_perf_event_open
PTR sys_accept4
PTR sys_recvmmsg
PTR sys_fanotify_init /* 5295 */
PTR sys_fanotify_mark
PTR sys_ni_syscall
PTR sys_name_to_handle_at
PTR sys_open_by_handle_at
PTR sys_clock_adjtime /* 5300 */
PTR sys_syncfs
PTR sys_sendmmsg
PTR sys_setns
PTR sys_process_vm_readv
PTR sys_process_vm_writev /* 5305 */
PTR sys_kcmp
PTR sys_finit_module
PTR sys_getdents64
PTR sys_sched_setattr
PTR sys_sched_getattr /* 5310 */
PTR sys_renameat2
PTR sys_seccomp
PTR sys_getrandom
PTR sys_memfd_create
PTR sys_bpf /* 5315 */
PTR sys_execveat
PTR sys_userfaultfd
PTR sys_membarrier
PTR sys_mlock2
PTR sys_copy_file_range /* 5320 */
PTR sys_preadv2
PTR sys_pwritev2
PTR sys_pkey_mprotect
PTR sys_pkey_alloc
PTR sys_pkey_free /* 5325 */
PTR sys_statx
PTR sys_rseq
PTR sys_io_pgetevents
PTR sys_set_user_tp
SYM_DATA_END(mips64_syscall_table)
|
AirFortressIlikara/LS2K0300-linux-4.19
| 3,495
|
arch/loongarch/kernel/scall64-64.S
|
/* SPDX-License-Identifier: GPL-2.0 */
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 1995, 96, 97, 98, 99, 2000, 01, 02 by Ralf Baechle
* Copyright (C) 1999, 2000 Silicon Graphics, Inc.
* Copyright (C) 2001 MIPS Technologies, Inc.
* Copyright (C) 2020 Loongson Technology Co., Ltd.
*/
#include <linux/errno.h>
#include <asm/asm.h>
#include <asm/asmmacro.h>
#include <asm/irqflags.h>
#include <asm/loongarchregs.h>
#include <asm/regdef.h>
#include <asm/stackframe.h>
#include <asm/asm-offsets.h>
#include <asm/thread_info.h>
#include <asm/unistd.h>
#include <asm/unwind_hints.h>
.text
.cfi_sections .debug_frame
.align 5
SYM_FUNC_START(handle_sys)
csrrd t0, PERCPU_BASE_KS
la.abs t1, kernelsp
LONG_ADD t1, t1, t0
or t2, sp, zero
LONG_L sp, t1, 0
PTR_ADDI sp, sp, -PT_SIZE
cfi_st t2, PT_R3, 1
cfi_rel_offset sp, PT_R3, 1
cfi_st $r4, PT_R4, 1
cfi_st $r5, PT_R5, 1
csrrd t2, LOONGARCH_CSR_PRMD
LONG_S t2, sp, PT_PRMD
cfi_st $r6, PT_R6, 1
cfi_st ra, PT_R1, 1
cfi_st $r7, PT_R7, 1
cfi_st $r8, PT_R8, 1
cfi_st $r9, PT_R9, 1
cfi_st $r10, PT_R10, 1
cfi_st $r11, PT_R11, 1
csrrd ra, LOONGARCH_CSR_ERA
LONG_S ra, sp, PT_ERA
.cfi_rel_offset 1, PT_ERA
cfi_st tp, PT_R2, 1
cfi_st fp, PT_R22, 1
cfi_st $r21, PT_R21, 1
or $r21, t0, zero
SAVE_STATIC
UNWIND_HINT_REGS
/* syscall coming from user mode */
li.d tp, ~_THREAD_MASK
and tp, tp, sp
TRACE_IRQS_ON_RELOAD
STI
/* save the initial A0 value (needed in signal handlers) */
st.d a0, sp, PT_ORIG_A0
ld.d t1, sp, PT_ERA # skip syscall on return
addi.d t1, t1, 4 # skip to next instruction
st.d t1, sp, PT_ERA
#ifdef CONFIG_CPU_HAS_LBT
li.d t1, TRANS_ARCH_MASK
and t1, t1, a7
bnez t1, lbt_scall
#endif
li.d t1, _TIF_WORK_SYSCALL_ENTRY
LONG_L t0, tp, TI_FLAGS # syscall tracing enabled?
and t0, t1, t0
bnez t0, syscall_trace_entry
syscall_common:
/* Check to make sure we don't jump to a bogus syscall number. */
li.w t0, __NR_syscalls
bgeu a7, t0, illegal_syscall
/* Syscall number held in a7 */
slli.d t0, a7, 3 # offset into table
la t2, sys_call_table
add.d t0, t2, t0
ld.d t2, t0, 0 #syscall routine
beqz t2, illegal_syscall
jalr t2 # Do The Real Thing (TM)
ld.d t1, sp, PT_R11 # syscall number
addi.d t1, t1, 1 # +1 for handle_signal
st.d t1, sp, PT_R0 # save it for syscall restarting
st.d v0, sp, PT_R4 # result
n64_syscall_exit:
NOT_SIBLING_CALL_HINT
b syscall_exit_partial
/* ------------------------------------------------------------------------ */
syscall_trace_entry:
SAVE_STATIC
move a0, sp
move a1, a7
bl syscall_trace_enter
blt v0, zero, 1f # seccomp failed? Skip syscall
RESTORE_STATIC
ld.d a0, sp, PT_R4 # Restore argument registers
ld.d a1, sp, PT_R5
ld.d a2, sp, PT_R6
ld.d a3, sp, PT_R7
ld.d a4, sp, PT_R8
ld.d a5, sp, PT_R9
ld.d a6, sp, PT_R10
ld.d a7, sp, PT_R11 # Restore syscall (maybe modified)
b syscall_common
1: b syscall_exit
#ifdef CONFIG_CPU_HAS_LBT
lbt_scall:
li.d t0, TRANS_MIPS_N64
and t0, t0, t1
bnez t0, handle_sys_lat_mips64 # for mips binary translation.
b handle_sys_lat_i386 # for i386 binary translation.
#endif
/*
* The system call does not exist in this kernel
*/
illegal_syscall:
/* This also isn't a valid syscall, throw an error. */
li.w v0, -ENOSYS # error
st.d v0, sp, PT_R4
b n64_syscall_exit
SYM_FUNC_END(handle_sys)
|
AirFortressIlikara/LS2K0300-linux-4.19
| 15,408
|
arch/loongarch/kernel/scall-trans-i386.S
|
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 1995, 96, 97, 98, 99, 2000, 01, 02 by Ralf Baechle
* Copyright (C) 1999, 2000 Silicon Graphics, Inc.
* Copyright (C) 2001 MIPS Technologies, Inc.
* Copyright (C) 2020 Loongson Technology Co., Ltd.
*/
#include <linux/errno.h>
#include <asm/asm.h>
#include <asm/asmmacro.h>
#include <asm/irqflags.h>
#include <asm/loongarchregs.h>
#include <asm/regdef.h>
#include <asm/stackframe.h>
#include <asm/asm-offsets.h>
#include <asm/thread_info.h>
#include <asm/unistd.h>
#include <asm/unwind_hints.h>
.align 5
SYM_FUNC_START(handle_sys_lat_i386)
UNWIND_HINT_REGS
li.d t1, _TIF_WORK_SYSCALL_ENTRY
LONG_L t0, tp, TI_FLAGS # syscall tracing enabled?
and t0, t1, t0
bnez t0, syscall_trace_entry
choose_abi:
li.d t0, TRANS_ARCH_MASK
and t1, t0, a7 # pick the ARCH specfied tag
li.d t0, SYS_NUM_MASK
and a7, a7, t0 # pick the syscall num
li.d t0, TRANS_I386
bne t0, t1, illegal_syscall
syscall_i386:
sltui t0, a7, __NR_i386_Linux_syscalls + 1
beqz t0, illegal_syscall
/* Syscall number held in a7 */
slli.d t0, a7, 3 # offset into table
la t2, i386_syscall_table
add.d t0, t2, t0
ld.d t2, t0, 0 # syscall routine
beqz t2, illegal_syscall
jalr t2 # Do The Real Thing (TM)
li.w t0, -EMAXERRNO - 1 # error?
sltu t0, t0, v0
st.d t0, sp, PT_R7 # set error flag
beqz t0, 1f
ld.d t1, sp, PT_R11 # syscall number
addi.d t1, t1, 1 # +1 for handle_signal
st.d t1, sp, PT_R0 # save it for syscall restarting
1: st.d v0, sp, PT_R4 # result
trans_i386_syscall_exit:
NOT_SIBLING_CALL_HINT
b syscall_exit_partial
/* ------------------------------------------------------------------------ */
syscall_trace_entry:
SAVE_STATIC
move a0, sp
move a1, a7
bl syscall_trace_enter
blt v0, zero, 1f # seccomp failed? Skip syscall
RESTORE_STATIC
ld.d a0, sp, PT_R4 # Restore argument registers
ld.d a1, sp, PT_R5
ld.d a2, sp, PT_R6
ld.d a3, sp, PT_R7
ld.d a4, sp, PT_R8
ld.d a5, sp, PT_R9
ld.d a6, sp, PT_R10
ld.d a7, sp, PT_R11 # Restore syscall (maybe modified)
b choose_abi
1: b trans_i386_syscall_exit
/*
* The system call does not exist in this kernel
*/
illegal_syscall:
li.w v0, ENOSYS # error
st.d v0, sp, PT_R4
li.w t0, 1 # set error flag
st.d t0, sp, PT_R7
b trans_i386_syscall_exit
SYM_FUNC_END(handle_sys_lat_i386)
.align 3
SYM_DATA_START(i386_syscall_table)
PTR sys_restart_syscall /* 0 */
PTR sys_exit /* 1 */
PTR sys_ni_syscall /* 2 sys_fork */
PTR sys_read /* 3 */
PTR sys_write /* 4 */
PTR sys_open /* 5 */
PTR sys_close /* 6 */
PTR sys_ni_syscall /* 7 sys_waitpid */
PTR sys_creat /* 8 */
PTR sys_link /* 9 */
PTR sys_unlink /* 10 */
PTR sys_execve /* 11 */
PTR sys_chdir /* 12 */
PTR sys_ni_syscall /* 13 sys_time */
PTR sys_mknod /* 14 */
PTR sys_chmod /* 15 */
PTR sys_lchown16 /* 16 */
PTR sys_ni_syscall /* 17 break */
PTR sys_ni_syscall /* 18 sys_stat */
PTR sys_lseek /* 19 */
PTR sys_getpid /* 20 */
PTR sys_mount /* 21 */
PTR sys_ni_syscall /* 22 sys_oldumount */
PTR sys_setuid16 /* 23 */
PTR sys_getuid16 /* 24 */
PTR sys_ni_syscall /* 25 sys_stime */
PTR sys_ptrace /* 26 */
PTR sys_alarm /* 27 */
PTR sys_ni_syscall /* 28 sys_fstat */
PTR sys_ni_syscall /* 29 sys_pause */
PTR sys_ni_syscall /* 30 sys_utime */
PTR sys_ni_syscall /* 31 stty */
PTR sys_ni_syscall /* 32 gtty */
PTR sys_access /* 33 */
PTR sys_ni_syscall /* 34 sys_nice */
PTR sys_ni_syscall /* 35 ftime */
PTR sys_sync /* 36 */
PTR sys_kill /* 37 */
PTR sys_rename /* 38 */
PTR sys_mkdir /* 39 */
PTR sys_rmdir /* 40 */
PTR sys_dup /* 41 */
PTR sys_pipe /* 42 */
PTR sys_times /* 43 */
PTR sys_ni_syscall /* 44 prof */
PTR sys_brk /* 45 */
PTR sys_setgid16 /* 46 */
PTR sys_getgid16 /* 47 */
PTR sys_ni_syscall /* 48 sys_signal */
PTR sys_geteuid16 /* 49 */
PTR sys_getegid16 /* 50 */
PTR sys_acct /* 51 */
PTR sys_umount /* 52 */
PTR sys_ni_syscall /* 53 lock */
PTR sys_ioctl /* 54 */
PTR sys_fcntl /* 55 */
PTR sys_ni_syscall /* 56 mxp */
PTR sys_setpgid /* 57 */
PTR sys_ni_syscall /* 58 ulimit */
PTR sys_ni_syscall /* 59 sys_olduname */
PTR sys_umask /* 60 */
PTR sys_chroot /* 61 */
PTR sys_ustat /* 62 */
PTR sys_dup2 /* 63 */
PTR sys_getppid /* 64 */
PTR sys_getpgrp /* 65 */
PTR sys_setsid /* 66 */
PTR sys_ni_syscall /* 67 sys_sigaction */
PTR sys_sgetmask /* 68 */
PTR sys_ssetmask /* 69 */
PTR sys_setreuid16 /* 70 */
PTR sys_setregid16 /* 71 */
PTR sys_ni_syscall /* 72 sys_sigsuspend */
PTR sys_ni_syscall /* 73 sys_sigpending */
PTR sys_sethostname /* 74 */
PTR sys_setrlimit /* 75 */
PTR sys_ni_syscall /* 76 sys_old_getrlimit */
PTR sys_getrusage /* 77 */
PTR sys_gettimeofday /* 78 */
PTR sys_settimeofday /* 79 */
PTR sys_getgroups16 /* 80 */
PTR sys_setgroups16 /* 81 */
PTR sys_ni_syscall /* 82 sys_old_select */
PTR sys_symlink /* 83 */
PTR sys_ni_syscall /* 84 sys_lstat */
PTR sys_readlink /* 85 */
PTR sys_uselib /* 86 */
PTR sys_swapon /* 87 */
PTR sys_reboot /* 88 */
PTR sys_ni_syscall /* 89 sys_old_readdir */
PTR sys_ni_syscall /* 90 sys_old_mmap */
PTR sys_munmap /* 91 */
PTR sys_truncate /* 92 */
PTR sys_ftruncate /* 93 */
PTR sys_fchmod /* 94 */
PTR sys_fchown16 /* 95 */
PTR sys_getpriority /* 96 */
PTR sys_setpriority /* 97 */
PTR sys_ni_syscall /* 98 profil */
PTR sys_statfs /* 99 */
PTR sys_fstatfs /* 100 */
PTR sys_ni_syscall /* 101 sys_ioperm */
PTR sys_socketcall /* 102 */
PTR sys_syslog /* 103 */
PTR sys_setitimer /* 104 */
PTR sys_getitimer /* 105 */
PTR sys_newstat /* 106 */
PTR sys_newlstat /* 107 */
PTR sys_newfstat /* 108 */
PTR sys_ni_syscall /* 109 sys_uname */
PTR sys_ni_syscall /* 110 sys_iopl */
PTR sys_vhangup /* 111 */
PTR sys_ni_syscall /* 112 idel */
PTR sys_vm86old /* 113 */
PTR sys_wait4 /* 114 */
PTR sys_swapoff /* 115 */
PTR sys_sysinfo /* 116 */
PTR sys_ipc /* 117 */
PTR sys_fsync /* 118 */
PTR sys_ni_syscall /* 119 sys_sigreturn */
PTR sys_clone /* 120 */
PTR sys_setdomainname /* 121 */
PTR sys_newuname /* 122 */
PTR sys_modify_ldt /* 123 */
PTR sys_adjtimex /* 124 */
PTR sys_mprotect /* 125 */
PTR sys_ni_syscall /* 126 sys_sigprocmask */
PTR sys_ni_syscall /* 127 create_module */
PTR sys_init_module /* 128 */
PTR sys_delete_module /* 129 */
PTR sys_ni_syscall /* 130 get_kernel_syms */
PTR sys_quotactl /* 131 */
PTR sys_getpgid /* 132 */
PTR sys_fchdir /* 133 */
PTR sys_bdflush /* 134 */
PTR sys_sysfs /* 135 */
PTR sys_personality /* 136 */
PTR sys_ni_syscall /* 137 afs_syscall */
PTR sys_setfsuid16 /* 138 */
PTR sys_setfsgid16 /* 139 */
PTR sys_ni_syscall /* 140 sys_llseek */
PTR sys_getdents /* 141 */
PTR sys_select /* 142 */
PTR sys_flock /* 143 */
PTR sys_msync /* 144 */
PTR sys_readv /* 145 */
PTR sys_writev /* 146 */
PTR sys_getsid /* 147 */
PTR sys_fdatasync /* 148 */
PTR sys_sysctl /* 149 */
PTR sys_mlock /* 150 */
PTR sys_munlock /* 151 */
PTR sys_mlockall /* 152 */
PTR sys_munlockall /* 153 */
PTR sys_sched_setparam /* 154 */
PTR sys_sched_getparam /* 155 */
PTR sys_sched_setscheduler /* 156 */
PTR sys_sched_getscheduler /* 157 */
PTR sys_sched_yield /* 158 */
PTR sys_sched_get_priority_max /* 159 */
PTR sys_sched_get_priority_min /* 160 */
PTR sys_sched_rr_get_interval /* 161 */
PTR sys_nanosleep /* 162 */
PTR sys_mremap /* 163 */
PTR sys_setresuid16 /* 164 */
PTR sys_getresuid16 /* 165 */
PTR sys_vm86 /* 166 */
PTR sys_ni_syscall /* 167 query_module */
PTR sys_poll /* 168 */
PTR sys_ni_syscall /* 169 nfsservctl */
PTR sys_setresgid16 /* 170 */
PTR sys_getresgid16 /* 171 */
PTR sys_prctl /* 172 */
PTR sys_rt_sigreturn /* 173 */
PTR sys_rt_sigaction /* 174 */
PTR sys_latx_rt_sigprocmask /* 175 */
PTR sys_rt_sigpending /* 176 */
PTR sys_rt_sigtimedwait /* 177 */
PTR sys_rt_sigqueueinfo /* 178 */
PTR sys_rt_sigsuspend /* 179 */
PTR sys_pread64 /* 180 */
PTR sys_pwrite64 /* 181 */
PTR sys_chown16 /* 182 */
PTR sys_getcwd /* 183 */
PTR sys_capget /* 184 */
PTR sys_capset /* 185 */
PTR sys_sigaltstack /* 186 */
PTR sys_sendfile /* 187 */
PTR sys_ni_syscall /* 188 getpmsg */
PTR sys_ni_syscall /* 189 putpmsg */
PTR sys_ni_syscall /* 190 sys_vfork */
PTR sys_getrlimit /* 191 */
PTR sys_mmap_pgoff /* 192 */
PTR sys_ni_syscall /* 193 sys_truncate64 */
PTR sys_ni_syscall /* 194 sys_ftruncate64 */
PTR sys_ni_syscall /* 195 sys_stat64 */
PTR sys_ni_syscall /* 196 sys_lstat64 */
PTR sys_ni_syscall /* 197 sys_fstat64 */
PTR sys_lchown /* 198 */
PTR sys_getuid /* 199 */
PTR sys_getgid /* 200 */
PTR sys_geteuid /* 201 */
PTR sys_getegid /* 202 */
PTR sys_setreuid /* 203 */
PTR sys_setregid /* 204 */
PTR sys_getgroups /* 205 */
PTR sys_setgroups /* 206 */
PTR sys_fchown /* 207 */
PTR sys_setresuid /* 208 */
PTR sys_getresuid /* 209 */
PTR sys_setresgid /* 210 */
PTR sys_getresgid /* 211 */
PTR sys_chown /* 212 */
PTR sys_setuid /* 213 */
PTR sys_setgid /* 214 */
PTR sys_setfsuid /* 215 */
PTR sys_setfsgid /* 216 */
PTR sys_pivot_root /* 217 */
PTR sys_mincore /* 218 */
PTR sys_madvise /* 219 */
PTR sys_getdents64 /* 220 */
PTR sys_ni_syscall /* 221 sys_fcntl64 */
PTR sys_ni_syscall /* 222 is unused */
PTR sys_ni_syscall /* 223 is unused */
PTR sys_gettid /* 224 */
PTR sys_readahead /* 225 */
PTR sys_setxattr /* 226 */
PTR sys_lsetxattr /* 227 */
PTR sys_fsetxattr /* 228 */
PTR sys_getxattr /* 229 */
PTR sys_lgetxattr /* 230 */
PTR sys_fgetxattr /* 231 */
PTR sys_listxattr /* 232 */
PTR sys_llistxattr /* 233 */
PTR sys_flistxattr /* 234 */
PTR sys_removexattr /* 235 */
PTR sys_lremovexattr /* 236 */
PTR sys_fremovexattr /* 237 */
PTR sys_tkill /* 238 */
PTR sys_sendfile64 /* 239 */
PTR sys_futex /* 240 */
PTR sys_sched_setaffinity /* 241 */
PTR sys_sched_getaffinity /* 242 */
PTR sys_ni_syscall /* 243 sys_set_thread_area */
PTR sys_ni_syscall /* 244 sys_get_thread_area */
PTR sys_io_setup /* 245 */
PTR sys_io_destroy /* 246 */
PTR sys_io_getevents /* 247 */
PTR sys_io_submit /* 248 */
PTR sys_io_cancel /* 249 */
PTR sys_fadvise64 /* 250 */
PTR sys_ni_syscall /* 251 is available for reuse*/
PTR sys_exit_group /* 252 */
PTR sys_lookup_dcookie /* 253 */
PTR sys_epoll_create /* 254 */
PTR sys_epoll_ctl /* 255 */
PTR sys_epoll_wait /* 256 */
PTR sys_remap_file_pages /* 257 */
PTR sys_set_tid_address /* 258 */
PTR sys_timer_create /* 259 */
PTR sys_timer_settime /* 260 */
PTR sys_timer_gettime /* 261 */
PTR sys_timer_getoverrun /* 262 */
PTR sys_timer_delete /* 263 */
PTR sys_clock_settime /* 264 */
PTR sys_clock_gettime /* 265 */
PTR sys_clock_getres /* 266 */
PTR sys_clock_nanosleep /* 267 */
PTR sys_statfs64 /* 268 */
PTR sys_fstatfs64 /* 269 */
PTR sys_tgkill /* 270 */
PTR sys_utimes /* 271 */
PTR sys_fadvise64_64 /* 272 */
PTR sys_ni_syscall /* 273 vserver */
PTR sys_mbind /* 274 */
PTR sys_get_mempolicy /* 275 */
PTR sys_set_mempolicy /* 276 */
PTR sys_mq_open /* 277 */
PTR sys_mq_unlink /* 278 */
PTR sys_mq_timedsend /* 279 */
PTR sys_mq_timedreceive /* 280 */
PTR sys_mq_notify /* 281 */
PTR sys_mq_getsetattr /* 282 */
PTR sys_kexec_load /* 283 */
PTR sys_waitid /* 284 */
PTR sys_ni_syscall /* 285 sys_setaltroot */
PTR sys_add_key /* 286 */
PTR sys_request_key /* 287 */
PTR sys_keyctl /* 288 */
PTR sys_ioprio_set /* 289 */
PTR sys_ioprio_get /* 290 */
PTR sys_inotify_init /* 291 */
PTR sys_inotify_add_watch /* 292 */
PTR sys_inotify_rm_watch /* 293 */
PTR sys_migrate_pages /* 294 */
PTR sys_openat /* 295 */
PTR sys_mkdirat /* 296 */
PTR sys_mknodat /* 297 */
PTR sys_fchownat /* 298 */
PTR sys_futimesat /* 299 */
PTR sys_ni_syscall /* 300 sys_fstatat64 */
PTR sys_unlinkat /* 301 */
PTR sys_renameat /* 302 */
PTR sys_linkat /* 303 */
PTR sys_symlinkat /* 304 */
PTR sys_readlinkat /* 305 */
PTR sys_fchmodat /* 306 */
PTR sys_faccessat /* 307 */
PTR sys_pselect6 /* 308 */
PTR sys_ppoll /* 309 */
PTR sys_unshare /* 310 */
PTR sys_set_robust_list /* 311 */
PTR sys_get_robust_list /* 312 */
PTR sys_splice /* 313 */
PTR sys_sync_file_range /* 314 */
PTR sys_tee /* 315 */
PTR sys_vmsplice /* 316 */
PTR sys_move_pages /* 317 */
PTR sys_getcpu /* 318 */
PTR sys_epoll_pwait /* 319 */
PTR sys_utimensat /* 320 */
PTR sys_signalfd /* 321 */
PTR sys_timerfd_create /* 322 */
PTR sys_eventfd /* 323 */
PTR sys_fallocate /* 324 */
PTR sys_timerfd_settime /* 325 */
PTR sys_timerfd_gettime /* 326 */
PTR sys_signalfd4 /* 327 */
PTR sys_eventfd2 /* 328 */
PTR sys_epoll_create1 /* 329 */
PTR sys_dup3 /* 330 */
PTR sys_pipe2 /* 331 */
PTR sys_inotify_init1 /* 332 */
PTR sys_preadv /* 333 */
PTR sys_pwritev /* 334 */
PTR sys_rt_tgsigqueueinfo /* 335 */
PTR sys_perf_event_open /* 336 */
PTR sys_recvmmsg /* 337 */
PTR sys_fanotify_init /* 338 */
PTR sys_fanotify_mark /* 339 */
PTR sys_prlimit64 /* 340 */
PTR sys_name_to_handle_at /* 341 */
PTR sys_open_by_handle_at /* 342 */
PTR sys_clock_adjtime /* 343 */
PTR sys_syncfs /* 344 */
PTR sys_sendmmsg /* 345 */
PTR sys_setns /* 346 */
PTR sys_process_vm_readv /* 347 */
PTR sys_process_vm_writev /* 348 */
PTR sys_kcmp /* 349 */
PTR sys_finit_module /* 350 */
PTR sys_sched_setattr /* 351 */
PTR sys_sched_getattr /* 352 */
PTR sys_renameat2 /* 353 */
PTR sys_seccomp /* 354 */
PTR sys_getrandom /* 355 */
PTR sys_memfd_create /* 356 */
PTR sys_bpf /* 357 */
PTR sys_execveat /* 358 */
PTR sys_socket /* 359 */
PTR sys_socketpair /* 360 */
PTR sys_bind /* 361 */
PTR sys_connect /* 362 */
PTR sys_listen /* 363 */
PTR sys_accept4 /* 364 */
PTR sys_getsockopt /* 365 */
PTR sys_setsockopt /* 366 */
PTR sys_getsockname /* 367 */
PTR sys_getpeername /* 368 */
PTR sys_sendto /* 369 */
PTR sys_sendmsg /* 370 */
PTR sys_recvfrom /* 371 */
PTR sys_recvmsg /* 372 */
PTR sys_shutdown /* 373 */
PTR sys_userfaultfd /* 374 */
PTR sys_membarrier /* 375 */
PTR sys_mlock2 /* 376 */
PTR sys_copy_file_range /* 377 */
PTR sys_preadv2 /* 378 */
PTR sys_pwritev2 /* 379 */
PTR sys_pkey_mprotect /* 380 */
PTR sys_pkey_alloc /* 381 */
PTR sys_pkey_free /* 382 */
PTR sys_statx /* 383 */
PTR sys_ni_syscall /* 384 sys_arch_prctl */
PTR sys_io_pgetevents /* 385 */
PTR sys_rseq /* 386 */
SYM_DATA_END(i386_syscall_table)
|
AirFortressIlikara/LS2K0300-linux-4.19
| 4,440
|
arch/loongarch/kernel/entry.S
|
/* SPDX-License-Identifier: GPL-2.0 */
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 1994 - 2000, 2001, 2003 Ralf Baechle
* Copyright (C) 1999, 2000 Silicon Graphics, Inc.
* Copyright (C) 2001 MIPS Technologies, Inc.
* Copyright (C) 2020 Loongson Technology Corporation Limited
*/
#include <asm/asm.h>
#include <asm/asmmacro.h>
#include <asm/irqflags.h>
#include <asm/regdef.h>
#include <asm/loongarchregs.h>
#include <asm/stackframe.h>
#include <asm/thread_info.h>
#include <asm/unwind_hints.h>
#ifndef CONFIG_PREEMPT
#define resume_kernel restore_all
#else
#define __ret_from_irq ret_from_exception
#endif
.text
.align 5
#ifndef CONFIG_PREEMPT
SYM_CODE_START(ret_from_exception)
UNWIND_HINT_REGS
local_irq_disable # preempt stop
b __ret_from_irq
SYM_CODE_END(ret_from_exception)
#endif
SYM_CODE_START(ret_from_irq)
UNWIND_HINT_REGS
LONG_S s0, tp, TI_REGS
b __ret_from_irq
SYM_CODE_END(ret_from_irq)
SYM_CODE_START(__ret_from_irq)
UNWIND_HINT_REGS
/*
* We can be coming here from a syscall done in the kernel space,
* e.g. a failed kernel_execve().
*/
resume_userspace_check:
LONG_L t0, sp, PT_PRMD # returning to kernel mode?
andi t0, t0, PLV_MASK
beqz t0, resume_kernel
resume_userspace:
local_irq_disable # make sure we dont miss an
# interrupt setting need_resched
# between sampling and return
LONG_L a2, tp, TI_FLAGS # current->work
andi t0, a2, _TIF_WORK_MASK # (ignoring syscall_trace)
bnez t0, work_pending
b restore_all
SYM_CODE_END(__ret_from_irq)
#ifdef CONFIG_PREEMPT
resume_kernel:
local_irq_disable
ld.w t0, tp, TI_PRE_COUNT
bnez t0, restore_all
need_resched:
LONG_L t0, tp, TI_FLAGS
andi t1, t0, _TIF_NEED_RESCHED
beqz t1, restore_all
LONG_L t0, sp, PT_PRMD # Interrupts off?
andi t0, t0, CSR_CRMD_IE
beqz t0, restore_all
bl preempt_schedule_irq
b need_resched
#endif
SYM_CODE_START(ret_from_kernel_thread)
UNWIND_HINT_REGS
bl schedule_tail # a0 = struct task_struct *prev
move a0, s1
jirl ra, s0, 0
b syscall_exit
SYM_CODE_END(ret_from_kernel_thread)
SYM_CODE_START(ret_from_fork)
UNWIND_HINT_REGS
bl schedule_tail # a0 = struct task_struct *prev
b syscall_exit
SYM_CODE_END(ret_from_fork)
SYM_CODE_START(syscall_exit)
UNWIND_HINT_REGS
#ifdef CONFIG_DEBUG_RSEQ
move a0, sp
bl rseq_syscall
#endif
local_irq_disable # make sure need_resched and
# signals dont change between
# sampling and return
LONG_L a2, tp, TI_FLAGS # current->work
li.w t0, _TIF_ALLWORK_MASK
and t0, a2, t0
bnez t0, syscall_exit_work
restore_all: # restore full frame
RESTORE_TEMP
RESTORE_STATIC
restore_partial: # restore partial frame
#ifdef CONFIG_TRACE_IRQFLAGS
SAVE_STATIC
SAVE_TEMP
LONG_L v0, sp, PT_PRMD
andi v0, v0, CSR_PRMD_PIE
beqz v0, 1f
bl trace_hardirqs_on
b 2f
1: bl trace_hardirqs_off
2:
RESTORE_TEMP
RESTORE_STATIC
#endif
RESTORE_SOME
RESTORE_SP_AND_RET
work_pending:
andi t0, a2, _TIF_NEED_RESCHED # a2 is preloaded with TI_FLAGS
beqz t0, work_notifysig
work_resched:
TRACE_IRQS_OFF
bl schedule
local_irq_disable # make sure need_resched and
# signals dont change between
# sampling and return
LONG_L a2, tp, TI_FLAGS
andi t0, a2, _TIF_WORK_MASK # is there any work to be done
# other than syscall tracing?
beqz t0, restore_all
andi t0, a2, _TIF_NEED_RESCHED
bnez t0, work_resched
work_notifysig: # deal with pending signals and
# notify-resume requests
move a0, sp
li.w a1, 0
bl do_notify_resume # a2 already loaded
b resume_userspace_check
SYM_CODE_END(syscall_exit)
SYM_CODE_START(syscall_exit_partial)
UNWIND_HINT_REGS
#ifdef CONFIG_DEBUG_RSEQ
move a0, sp
bl rseq_syscall
#endif
local_irq_disable # make sure need_resched doesn't
# change between and return
LONG_L a2, tp, TI_FLAGS # current->work
li.w t0, _TIF_ALLWORK_MASK
and t0, t0, a2
beqz t0, restore_partial
SAVE_STATIC
syscall_exit_work:
LONG_L t0, sp, PT_PRMD # returning to kernel mode?
andi t0, t0, PLV_MASK
beqz t0, resume_kernel
li.w t0, _TIF_WORK_SYSCALL_EXIT
and t0, t0, a2 # a2 is preloaded with TI_FLAGS
beqz t0, work_pending # trace bit set?
TRACE_IRQS_ON
local_irq_enable # could let syscall_trace_leave()
# call schedule() instead
move a0, sp
bl syscall_trace_leave
b resume_userspace
SYM_CODE_END(syscall_exit_partial)
|
AirFortressIlikara/LS2K0300-linux-4.19
| 2,671
|
arch/loongarch/kernel/relocate_kernel.S
|
/* SPDX-License-Identifier: GPL-2.0 */
/*
* relocate_kernel.S for kexec
*
* Copyright (C) 2020-2023 Loongson Technology Corporation Limited
*/
#include <linux/kexec.h>
#include <asm/asm.h>
#include <asm/asmmacro.h>
#include <asm/regdef.h>
#include <asm/loongarchregs.h>
#include <asm/stackframe.h>
#include <asm/addrspace.h>
SYM_CODE_START(relocate_new_kernel)
/*
* a0: EFI boot flag for the new kernel
* a1: Command line pointer for the new kernel
* a2: System table pointer for the new kernel
* a3: Start address to jump to after relocation
* a4: Pointer to the current indirection page entry
* a5: Kdump kernel relocate offset
*/
move s0, a4
/*
* In case of a kdump/crash kernel, the indirection page is not
* populated as the kernel is directly copied to a reserved location
*/
beqz s0, done
process_entry:
PTR_L s1, s0, 0
PTR_ADDI s0, s0, SZREG
/* destination page */
andi s2, s1, IND_DESTINATION
beqz s2, 1f
li.w t0, ~0x1
and s3, s1, t0 /* store destination addr in s3 */
b process_entry
1:
/* indirection page, update s0 */
andi s2, s1, IND_INDIRECTION
beqz s2, 1f
li.w t0, ~0x2
and s0, s1, t0
b process_entry
1:
/* done page */
andi s2, s1, IND_DONE
beqz s2, 1f
b done
1:
/* source page */
andi s2, s1, IND_SOURCE
beqz s2, process_entry
li.w t0, ~0x8
and s1, s1, t0
li.w s5, (1 << _PAGE_SHIFT) / SZREG
copy_word:
/* copy page word by word */
REG_L s4, s1, 0
REG_S s4, s3, 0
PTR_ADDI s3, s3, SZREG
PTR_ADDI s1, s1, SZREG
LONG_ADDI s5, s5, -1
beqz s5, process_entry
b copy_word
b process_entry
done:
ibar 0
dbar 0
/*
* Jump to the new kernel,
* make sure the values of a0, a1, a2, a3 and a5 are not changed.
*/
move t0, a3
move a3, a5
/*
* By judging whether a4 is 0x123 to avoid problems caused by a3
* not being 0 passed in by some firmware (like old pmon).
*/
li.w a4, 0x123
jr t0
SYM_CODE_END(relocate_new_kernel)
#ifdef CONFIG_SMP
/*
* Other CPUs should wait until code is relocated and
* then start at the entry point from LOONGARCH_IOCSR_MBUF0.
*/
SYM_CODE_START(kexec_smp_wait)
1: li.w t0, 0x100 /* wait for init loop */
2: addi.w t0, t0, -1 /* limit mailbox access */
bnez t0, 2b
li.w t1, LOONGARCH_IOCSR_MBUF0
iocsrrd.w s0, t1 /* check PC as an indicator */
beqz s0, 1b
iocsrrd.d s0, t1 /* get PC via mailbox */
li.d t0, CAC_BASE
or s0, s0, t0 /* s0 = TO_CACHE(s0) */
jr s0 /* jump to initial PC */
SYM_CODE_END(kexec_smp_wait)
#endif
relocate_new_kernel_end:
SYM_DATA_START(relocate_new_kernel_size)
PTR relocate_new_kernel_end - relocate_new_kernel
SYM_DATA_END(relocate_new_kernel_size)
|
AirFortressIlikara/LS2K0300-linux-4.19
| 16,568
|
arch/loongarch/kernel/fpu.S
|
/* SPDX-License-Identifier: GPL-2.0 */
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2019 Pei Huang <huangpei@loongson.cn>
* Copyright (C) 2019 Lu Zeng <zenglu@loongson.cn>
* Copyright (C) 2020 Huacai Chen <chenhuacai@loongson.cn>
* Copyright (C) 2020 Loongson Technology Corporation Limited
*/
#include <asm/asm.h>
#include <asm/asmmacro.h>
#include <asm/asm-extable.h>
#include <asm/errno.h>
#include <asm/export.h>
#include <asm/fpregdef.h>
#include <asm/loongarchregs.h>
#include <asm/asm-offsets.h>
#include <asm/regdef.h>
#undef v0
#undef v1
#define FPU_SREG_WIDTH 32
#define SC_FPR0 0
#define SC_FPR1 (SC_FPR0 + FPU_SREG_WIDTH)
#define SC_FPR2 (SC_FPR1 + FPU_SREG_WIDTH)
#define SC_FPR3 (SC_FPR2 + FPU_SREG_WIDTH)
#define SC_FPR4 (SC_FPR3 + FPU_SREG_WIDTH)
#define SC_FPR5 (SC_FPR4 + FPU_SREG_WIDTH)
#define SC_FPR6 (SC_FPR5 + FPU_SREG_WIDTH)
#define SC_FPR7 (SC_FPR6 + FPU_SREG_WIDTH)
#define SC_FPR8 (SC_FPR7 + FPU_SREG_WIDTH)
#define SC_FPR9 (SC_FPR8 + FPU_SREG_WIDTH)
#define SC_FPR10 (SC_FPR9 + FPU_SREG_WIDTH)
#define SC_FPR11 (SC_FPR10 + FPU_SREG_WIDTH)
#define SC_FPR12 (SC_FPR11 + FPU_SREG_WIDTH)
#define SC_FPR13 (SC_FPR12 + FPU_SREG_WIDTH)
#define SC_FPR14 (SC_FPR13 + FPU_SREG_WIDTH)
#define SC_FPR15 (SC_FPR14 + FPU_SREG_WIDTH)
#define SC_FPR16 (SC_FPR15 + FPU_SREG_WIDTH)
#define SC_FPR17 (SC_FPR16 + FPU_SREG_WIDTH)
#define SC_FPR18 (SC_FPR17 + FPU_SREG_WIDTH)
#define SC_FPR19 (SC_FPR18 + FPU_SREG_WIDTH)
#define SC_FPR20 (SC_FPR19 + FPU_SREG_WIDTH)
#define SC_FPR21 (SC_FPR20 + FPU_SREG_WIDTH)
#define SC_FPR22 (SC_FPR21 + FPU_SREG_WIDTH)
#define SC_FPR23 (SC_FPR22 + FPU_SREG_WIDTH)
#define SC_FPR24 (SC_FPR23 + FPU_SREG_WIDTH)
#define SC_FPR25 (SC_FPR24 + FPU_SREG_WIDTH)
#define SC_FPR26 (SC_FPR25 + FPU_SREG_WIDTH)
#define SC_FPR27 (SC_FPR26 + FPU_SREG_WIDTH)
#define SC_FPR28 (SC_FPR27 + FPU_SREG_WIDTH)
#define SC_FPR29 (SC_FPR28 + FPU_SREG_WIDTH)
#define SC_FPR30 (SC_FPR29 + FPU_SREG_WIDTH)
#define SC_FPR31 (SC_FPR30 + FPU_SREG_WIDTH)
/* preprocessor replaces the fp in ".set fp=64" with $30 otherwise */
#undef fp
.macro EX insn, reg, src, offs
.ex\@: \insn \reg, \src, \offs
_asm_extable .ex\@, fault
.endm
.macro EX_V insn, reg, src, offs
parse_v __insn, \insn
parse_v __offs, \offs
parse_r __src, \src
parse_vr __reg, \reg
.ex\@:
.word __insn << 22 | __offs << 10 | __src << 5 | __reg
_asm_extable .ex\@, fault
.endm
.macro EX_XV insn, reg, src, offs
parse_v __insn, \insn
parse_v __offs, \offs
parse_r __src, \src
parse_xr __reg, \reg
.ex\@:
.word __insn << 22 | __offs << 10 | __src << 5 | __reg
_asm_extable .ex\@, fault
.endm
.macro sc_save_fp base
EX fst.d $f0, \base, SC_FPR0
EX fst.d $f1, \base, SC_FPR1
EX fst.d $f2, \base, SC_FPR2
EX fst.d $f3, \base, SC_FPR3
EX fst.d $f4, \base, SC_FPR4
EX fst.d $f5, \base, SC_FPR5
EX fst.d $f6, \base, SC_FPR6
EX fst.d $f7, \base, SC_FPR7
EX fst.d $f8, \base, SC_FPR8
EX fst.d $f9, \base, SC_FPR9
EX fst.d $f10, \base, SC_FPR10
EX fst.d $f11, \base, SC_FPR11
EX fst.d $f12, \base, SC_FPR12
EX fst.d $f13, \base, SC_FPR13
EX fst.d $f14, \base, SC_FPR14
EX fst.d $f15, \base, SC_FPR15
EX fst.d $f16, \base, SC_FPR16
EX fst.d $f17, \base, SC_FPR17
EX fst.d $f18, \base, SC_FPR18
EX fst.d $f19, \base, SC_FPR19
EX fst.d $f20, \base, SC_FPR20
EX fst.d $f21, \base, SC_FPR21
EX fst.d $f22, \base, SC_FPR22
EX fst.d $f23, \base, SC_FPR23
EX fst.d $f24, \base, SC_FPR24
EX fst.d $f25, \base, SC_FPR25
EX fst.d $f26, \base, SC_FPR26
EX fst.d $f27, \base, SC_FPR27
EX fst.d $f28, \base, SC_FPR28
EX fst.d $f29, \base, SC_FPR29
EX fst.d $f30, \base, SC_FPR30
EX fst.d $f31, \base, SC_FPR31
.endm
.macro sc_restore_fp base
EX fld.d $f0, \base, SC_FPR0
EX fld.d $f1, \base, SC_FPR1
EX fld.d $f2, \base, SC_FPR2
EX fld.d $f3, \base, SC_FPR3
EX fld.d $f4, \base, SC_FPR4
EX fld.d $f5, \base, SC_FPR5
EX fld.d $f6, \base, SC_FPR6
EX fld.d $f7, \base, SC_FPR7
EX fld.d $f8, \base, SC_FPR8
EX fld.d $f9, \base, SC_FPR9
EX fld.d $f10, \base, SC_FPR10
EX fld.d $f11, \base, SC_FPR11
EX fld.d $f12, \base, SC_FPR12
EX fld.d $f13, \base, SC_FPR13
EX fld.d $f14, \base, SC_FPR14
EX fld.d $f15, \base, SC_FPR15
EX fld.d $f16, \base, SC_FPR16
EX fld.d $f17, \base, SC_FPR17
EX fld.d $f18, \base, SC_FPR18
EX fld.d $f19, \base, SC_FPR19
EX fld.d $f20, \base, SC_FPR20
EX fld.d $f21, \base, SC_FPR21
EX fld.d $f22, \base, SC_FPR22
EX fld.d $f23, \base, SC_FPR23
EX fld.d $f24, \base, SC_FPR24
EX fld.d $f25, \base, SC_FPR25
EX fld.d $f26, \base, SC_FPR26
EX fld.d $f27, \base, SC_FPR27
EX fld.d $f28, \base, SC_FPR28
EX fld.d $f29, \base, SC_FPR29
EX fld.d $f30, \base, SC_FPR30
EX fld.d $f31, \base, SC_FPR31
.endm
.macro sc_save_fcc base, tmp0, tmp1
movcf2gr \tmp0, $fcc0
move \tmp1, \tmp0
movcf2gr \tmp0, $fcc1
bstrins.d \tmp1, \tmp0, 15, 8
movcf2gr \tmp0, $fcc2
bstrins.d \tmp1, \tmp0, 23, 16
movcf2gr \tmp0, $fcc3
bstrins.d \tmp1, \tmp0, 31, 24
movcf2gr \tmp0, $fcc4
bstrins.d \tmp1, \tmp0, 39, 32
movcf2gr \tmp0, $fcc5
bstrins.d \tmp1, \tmp0, 47, 40
movcf2gr \tmp0, $fcc6
bstrins.d \tmp1, \tmp0, 55, 48
movcf2gr \tmp0, $fcc7
bstrins.d \tmp1, \tmp0, 63, 56
EX st.d \tmp1, \base, 0
.endm
.macro sc_restore_fcc base, tmp0, tmp1
EX ld.d \tmp0, \base, 0
bstrpick.d \tmp1, \tmp0, 7, 0
movgr2cf $fcc0, \tmp1
bstrpick.d \tmp1, \tmp0, 15, 8
movgr2cf $fcc1, \tmp1
bstrpick.d \tmp1, \tmp0, 23, 16
movgr2cf $fcc2, \tmp1
bstrpick.d \tmp1, \tmp0, 31, 24
movgr2cf $fcc3, \tmp1
bstrpick.d \tmp1, \tmp0, 39, 32
movgr2cf $fcc4, \tmp1
bstrpick.d \tmp1, \tmp0, 47, 40
movgr2cf $fcc5, \tmp1
bstrpick.d \tmp1, \tmp0, 55, 48
movgr2cf $fcc6, \tmp1
bstrpick.d \tmp1, \tmp0, 63, 56
movgr2cf $fcc7, \tmp1
.endm
.macro sc_save_fcsr base, tmp0
movfcsr2gr \tmp0, fcsr0
EX st.w \tmp0, \base, 0
#if defined(CONFIG_CPU_HAS_LBT)
/* TM bit is always 0 if LBT not supported */
andi \tmp0, \tmp0, FPU_CSR_TM
beqz \tmp0, 1f
bstrins.d \tmp0, $r0, FPU_CSR_TM_SHIFT, FPU_CSR_TM_SHIFT
movgr2fcsr fcsr0, \tmp0
1:
#endif
.endm
.macro sc_restore_fcsr base, tmp0
EX ld.w \tmp0, \base, 0
movgr2fcsr fcsr0, \tmp0
.endm
#if defined(CONFIG_CPU_HAS_LBT)
.macro sc_save_scr base, tmp0
parse_r __reg, \tmp0
.word 0x3 << 10 | 0x0 << 5 | __reg
EX st.d \tmp0, \base, 0
.word 0x3 << 10 | 0x1 << 5 | __reg
EX st.d \tmp0, \base, 8
.word 0x3 << 10 | 0x2 << 5 | __reg
EX st.d \tmp0, \base, 16
.word 0x3 << 10 | 0x3 << 5 | __reg
EX st.d \tmp0, \base, 24
.endm
.macro sc_restore_scr base, tmp0
parse_r __reg, \tmp0
EX ld.d \tmp0, \base, 0
.word 0x2 << 10 | __reg << 5 | 0x0
EX ld.d \tmp0, \base, 8
.word 0x2 << 10 | __reg << 5 | 0x1
EX ld.d \tmp0, \base, 16
.word 0x2 << 10 | __reg << 5 | 0x2
EX ld.d \tmp0, \base, 24
.word 0x2 << 10 | __reg << 5 | 0x3
.endm
#endif
.macro sc_save_lsx base
EX_V 0xb1 $vr0, \base, SC_FPR0
EX_V 0xb1 $vr1, \base, SC_FPR1
EX_V 0xb1 $vr2, \base, SC_FPR2
EX_V 0xb1 $vr3, \base, SC_FPR3
EX_V 0xb1 $vr4, \base, SC_FPR4
EX_V 0xb1 $vr5, \base, SC_FPR5
EX_V 0xb1 $vr6, \base, SC_FPR6
EX_V 0xb1 $vr7, \base, SC_FPR7
EX_V 0xb1 $vr8, \base, SC_FPR8
EX_V 0xb1 $vr9, \base, SC_FPR9
EX_V 0xb1 $vr10, \base, SC_FPR10
EX_V 0xb1 $vr11, \base, SC_FPR11
EX_V 0xb1 $vr12, \base, SC_FPR12
EX_V 0xb1 $vr13, \base, SC_FPR13
EX_V 0xb1 $vr14, \base, SC_FPR14
EX_V 0xb1 $vr15, \base, SC_FPR15
EX_V 0xb1 $vr16, \base, SC_FPR16
EX_V 0xb1 $vr17, \base, SC_FPR17
EX_V 0xb1 $vr18, \base, SC_FPR18
EX_V 0xb1 $vr19, \base, SC_FPR19
EX_V 0xb1 $vr20, \base, SC_FPR20
EX_V 0xb1 $vr21, \base, SC_FPR21
EX_V 0xb1 $vr22, \base, SC_FPR22
EX_V 0xb1 $vr23, \base, SC_FPR23
EX_V 0xb1 $vr24, \base, SC_FPR24
EX_V 0xb1 $vr25, \base, SC_FPR25
EX_V 0xb1 $vr26, \base, SC_FPR26
EX_V 0xb1 $vr27, \base, SC_FPR27
EX_V 0xb1 $vr28, \base, SC_FPR28
EX_V 0xb1 $vr29, \base, SC_FPR29
EX_V 0xb1 $vr30, \base, SC_FPR30
EX_V 0xb1 $vr31, \base, SC_FPR31
.endm
.macro sc_restore_lsx base
EX_V 0xb0 $vr0, \base, SC_FPR0
EX_V 0xb0 $vr1, \base, SC_FPR1
EX_V 0xb0 $vr2, \base, SC_FPR2
EX_V 0xb0 $vr3, \base, SC_FPR3
EX_V 0xb0 $vr4, \base, SC_FPR4
EX_V 0xb0 $vr5, \base, SC_FPR5
EX_V 0xb0 $vr6, \base, SC_FPR6
EX_V 0xb0 $vr7, \base, SC_FPR7
EX_V 0xb0 $vr8, \base, SC_FPR8
EX_V 0xb0 $vr9, \base, SC_FPR9
EX_V 0xb0 $vr10, \base, SC_FPR10
EX_V 0xb0 $vr11, \base, SC_FPR11
EX_V 0xb0 $vr12, \base, SC_FPR12
EX_V 0xb0 $vr13, \base, SC_FPR13
EX_V 0xb0 $vr14, \base, SC_FPR14
EX_V 0xb0 $vr15, \base, SC_FPR15
EX_V 0xb0 $vr16, \base, SC_FPR16
EX_V 0xb0 $vr17, \base, SC_FPR17
EX_V 0xb0 $vr18, \base, SC_FPR18
EX_V 0xb0 $vr19, \base, SC_FPR19
EX_V 0xb0 $vr20, \base, SC_FPR20
EX_V 0xb0 $vr21, \base, SC_FPR21
EX_V 0xb0 $vr22, \base, SC_FPR22
EX_V 0xb0 $vr23, \base, SC_FPR23
EX_V 0xb0 $vr24, \base, SC_FPR24
EX_V 0xb0 $vr25, \base, SC_FPR25
EX_V 0xb0 $vr26, \base, SC_FPR26
EX_V 0xb0 $vr27, \base, SC_FPR27
EX_V 0xb0 $vr28, \base, SC_FPR28
EX_V 0xb0 $vr29, \base, SC_FPR29
EX_V 0xb0 $vr30, \base, SC_FPR30
EX_V 0xb0 $vr31, \base, SC_FPR31
.endm
.macro sc_save_lasx base
EX_XV 0xb3 $xr0, \base, SC_FPR0
EX_XV 0xb3 $xr1, \base, SC_FPR1
EX_XV 0xb3 $xr2, \base, SC_FPR2
EX_XV 0xb3 $xr3, \base, SC_FPR3
EX_XV 0xb3 $xr4, \base, SC_FPR4
EX_XV 0xb3 $xr5, \base, SC_FPR5
EX_XV 0xb3 $xr6, \base, SC_FPR6
EX_XV 0xb3 $xr7, \base, SC_FPR7
EX_XV 0xb3 $xr8, \base, SC_FPR8
EX_XV 0xb3 $xr9, \base, SC_FPR9
EX_XV 0xb3 $xr10, \base, SC_FPR10
EX_XV 0xb3 $xr11, \base, SC_FPR11
EX_XV 0xb3 $xr12, \base, SC_FPR12
EX_XV 0xb3 $xr13, \base, SC_FPR13
EX_XV 0xb3 $xr14, \base, SC_FPR14
EX_XV 0xb3 $xr15, \base, SC_FPR15
EX_XV 0xb3 $xr16, \base, SC_FPR16
EX_XV 0xb3 $xr17, \base, SC_FPR17
EX_XV 0xb3 $xr18, \base, SC_FPR18
EX_XV 0xb3 $xr19, \base, SC_FPR19
EX_XV 0xb3 $xr20, \base, SC_FPR20
EX_XV 0xb3 $xr21, \base, SC_FPR21
EX_XV 0xb3 $xr22, \base, SC_FPR22
EX_XV 0xb3 $xr23, \base, SC_FPR23
EX_XV 0xb3 $xr24, \base, SC_FPR24
EX_XV 0xb3 $xr25, \base, SC_FPR25
EX_XV 0xb3 $xr26, \base, SC_FPR26
EX_XV 0xb3 $xr27, \base, SC_FPR27
EX_XV 0xb3 $xr28, \base, SC_FPR28
EX_XV 0xb3 $xr29, \base, SC_FPR29
EX_XV 0xb3 $xr30, \base, SC_FPR30
EX_XV 0xb3 $xr31, \base, SC_FPR31
.endm
.macro sc_restore_lasx base
EX_XV 0xb2 $xr0, \base, SC_FPR0
EX_XV 0xb2 $xr1, \base, SC_FPR1
EX_XV 0xb2 $xr2, \base, SC_FPR2
EX_XV 0xb2 $xr3, \base, SC_FPR3
EX_XV 0xb2 $xr4, \base, SC_FPR4
EX_XV 0xb2 $xr5, \base, SC_FPR5
EX_XV 0xb2 $xr6, \base, SC_FPR6
EX_XV 0xb2 $xr7, \base, SC_FPR7
EX_XV 0xb2 $xr8, \base, SC_FPR8
EX_XV 0xb2 $xr9, \base, SC_FPR9
EX_XV 0xb2 $xr10, \base, SC_FPR10
EX_XV 0xb2 $xr11, \base, SC_FPR11
EX_XV 0xb2 $xr12, \base, SC_FPR12
EX_XV 0xb2 $xr13, \base, SC_FPR13
EX_XV 0xb2 $xr14, \base, SC_FPR14
EX_XV 0xb2 $xr15, \base, SC_FPR15
EX_XV 0xb2 $xr16, \base, SC_FPR16
EX_XV 0xb2 $xr17, \base, SC_FPR17
EX_XV 0xb2 $xr18, \base, SC_FPR18
EX_XV 0xb2 $xr19, \base, SC_FPR19
EX_XV 0xb2 $xr20, \base, SC_FPR20
EX_XV 0xb2 $xr21, \base, SC_FPR21
EX_XV 0xb2 $xr22, \base, SC_FPR22
EX_XV 0xb2 $xr23, \base, SC_FPR23
EX_XV 0xb2 $xr24, \base, SC_FPR24
EX_XV 0xb2 $xr25, \base, SC_FPR25
EX_XV 0xb2 $xr26, \base, SC_FPR26
EX_XV 0xb2 $xr27, \base, SC_FPR27
EX_XV 0xb2 $xr28, \base, SC_FPR28
EX_XV 0xb2 $xr29, \base, SC_FPR29
EX_XV 0xb2 $xr30, \base, SC_FPR30
EX_XV 0xb2 $xr31, \base, SC_FPR31
.endm
/*
* Save a thread's fp context.
*/
SYM_FUNC_START(_save_fp)
/*
* since TM bit of FSCR may afftect fpr0-fp7
* fcsr save need before FPR save
*/
fpu_save_csr a0 t1
fpu_save_double a0 t1 # clobbers t1
fpu_save_cc a0 t1 t2 # clobbers t1, t2
jirl zero, ra, 0
SYM_FUNC_END(_save_fp)
EXPORT_SYMBOL(_save_fp)
/*
* Restore a thread's fp context.
*/
SYM_FUNC_START(_restore_fp)
fpu_restore_double a0 t1 # clobbers t1
/*
* since TM bit of FSCR may afftect fpr0-fp7
* fscr restore need be after FPR store
*/
fpu_restore_csr a0 t1
fpu_restore_cc a0 t1 t2 # clobbers t1, t2
jirl zero, ra, 0
SYM_FUNC_END(_restore_fp)
#ifdef CONFIG_CPU_HAS_LSX
/*
* Save a thread's LSX vector context.
*/
SYM_FUNC_START(_save_lsx)
lsx_save_all a0 t1 t2
jirl zero, ra, 0
SYM_FUNC_END(_save_lsx)
EXPORT_SYMBOL(_save_lsx)
/*
* Restore a thread's LSX vector context.
*/
SYM_FUNC_START(_restore_lsx)
lsx_restore_all a0 t1 t2
jirl zero, ra, 0
SYM_FUNC_END(_restore_lsx)
SYM_FUNC_START(_save_lsx_upper)
lsx_save_all_upper a0 t0 t1
jirl zero, ra, 0
SYM_FUNC_END(_save_lsx_upper)
SYM_FUNC_START(_restore_lsx_upper)
lsx_restore_all_upper a0 t0 t1
jirl zero, ra, 0
SYM_FUNC_END(_restore_lsx_upper)
SYM_FUNC_START(_init_lsx_upper)
lsx_init_all_upper t1
jirl zero, ra, 0
SYM_FUNC_END(_init_lsx_upper)
#endif
#ifdef CONFIG_CPU_HAS_LASX
/*
* Save a thread's LASX vector context.
*/
SYM_FUNC_START(_save_lasx)
lasx_save_all a0 t1 t2
jirl zero, ra, 0
SYM_FUNC_END(_save_lasx)
EXPORT_SYMBOL(_save_lasx)
/*
* Restore a thread's LASX vector context.
*/
SYM_FUNC_START(_restore_lasx)
lasx_restore_all a0 t1 t2
jirl zero, ra, 0
SYM_FUNC_END(_restore_lasx)
SYM_FUNC_START(_save_lasx_upper)
lasx_save_all_upper a0 t0 t1
jirl zero, ra, 0
SYM_FUNC_END(_save_lasx_upper)
SYM_FUNC_START(_restore_lasx_upper)
lasx_restore_all_upper a0 t0 t1
jirl zero, ra, 0
SYM_FUNC_END(_restore_lasx_upper)
SYM_FUNC_START(_init_lasx_upper)
lasx_init_all_upper t1
jirl zero, ra, 0
SYM_FUNC_END(_init_lasx_upper)
#endif
/*
* Load the FPU with signalling NANS. This bit pattern we're using has
* the property that no matter whether considered as single or as double
* precision represents signaling NANS.
*
* The value to initialize fcsr0 to comes in $a0.
*/
SYM_FUNC_START(_init_fpu)
li.w t1, CSR_EUEN_FPEN
csrxchg t1, t1, LOONGARCH_CSR_EUEN
movgr2fcsr fcsr0, a0
li.w t1, -1 # SNaN
movgr2fr.d $f0, t1
movgr2fr.d $f1, t1
movgr2fr.d $f2, t1
movgr2fr.d $f3, t1
movgr2fr.d $f4, t1
movgr2fr.d $f5, t1
movgr2fr.d $f6, t1
movgr2fr.d $f7, t1
movgr2fr.d $f8, t1
movgr2fr.d $f9, t1
movgr2fr.d $f10, t1
movgr2fr.d $f11, t1
movgr2fr.d $f12, t1
movgr2fr.d $f13, t1
movgr2fr.d $f14, t1
movgr2fr.d $f15, t1
movgr2fr.d $f16, t1
movgr2fr.d $f17, t1
movgr2fr.d $f18, t1
movgr2fr.d $f19, t1
movgr2fr.d $f20, t1
movgr2fr.d $f21, t1
movgr2fr.d $f22, t1
movgr2fr.d $f23, t1
movgr2fr.d $f24, t1
movgr2fr.d $f25, t1
movgr2fr.d $f26, t1
movgr2fr.d $f27, t1
movgr2fr.d $f28, t1
movgr2fr.d $f29, t1
movgr2fr.d $f30, t1
movgr2fr.d $f31, t1
jirl zero, ra, 0
SYM_FUNC_END(_init_fpu)
/*
* a0: fpregs
* a1: fcc
* a2: fcsr
*/
SYM_FUNC_START(_save_fp_context)
sc_save_fcc a1 t1 t2
sc_save_fcsr a2 t1
sc_save_fp a0
li.w a0, 0 # success
jirl zero, ra, 0
SYM_FUNC_END(_save_fp_context)
/*
* a0: fpregs
* a1: fcc
* a2: fcsr
*/
SYM_FUNC_START(_restore_fp_context)
sc_restore_fp a0
sc_restore_fcc a1 t1 t2
sc_restore_fcsr a2 t1
li.w a0, 0 # success
jirl zero, ra, 0
SYM_FUNC_END(_restore_fp_context)
/*
* a0: fpregs
* a1: fcc
* a2: fcsr
*/
SYM_FUNC_START(_save_lsx_context)
sc_save_fcc a1, t0, t1
sc_save_fcsr a2, t0
sc_save_lsx a0
li.w a0, 0 # success
jirl zero, ra, 0
SYM_FUNC_END(_save_lsx_context)
/*
* a0: fpregs
* a1: fcc
* a2: fcsr
*/
SYM_FUNC_START(_restore_lsx_context)
sc_restore_lsx a0
sc_restore_fcc a1, t1, t2
sc_restore_fcsr a2, t1
li.w a0, 0 # success
jirl zero, ra, 0
SYM_FUNC_END(_restore_lsx_context)
#if defined(CONFIG_CPU_HAS_LBT)
/*
* a0: scr
*/
SYM_FUNC_START(_save_scr_context)
parse_r __reg, t0
/* eflags */
.word 0x17 << 18 | 0x3f << 10 | 0 << 5 | __reg
EX st.w t0, a1, 0
sc_save_scr a0, t0
li.w a0, 0 # success
jirl zero, ra, 0
SYM_FUNC_END(_save_scr_context)
/*
* a0: scr
*/
SYM_FUNC_START(_restore_scr_context)
parse_r __reg, t0
/* eflags */
EX ld.w t0, a1, 0
.word 0x17 << 18 | 0x3f << 10 | 1 << 5 | __reg
sc_restore_scr a0, t1
li.w a0, 0 # success
jirl zero, ra, 0
SYM_FUNC_END(_restore_scr_context)
#endif
/*
* a0: fpregs
* a1: fcc
* a2: fcsr
*/
SYM_FUNC_START(_save_lasx_context)
sc_save_fcc a1, t0, t1
sc_save_fcsr a2, t0
sc_save_lasx a0
li.w a0, 0 # success
jirl zero, ra, 0
SYM_FUNC_END(_save_lasx_context)
/*
* a0: fpregs
* a1: fcc
* a2: fcsr
*/
SYM_FUNC_START(_restore_lasx_context)
sc_restore_lasx a0
sc_restore_fcc a1, t1, t2
sc_restore_fcsr a2, t1
li.w a0, 0 # success
jirl zero, ra, 0
SYM_FUNC_END(_restore_lasx_context)
.type fault, @function
fault: li.w a0, -EFAULT # failure
jirl zero, ra, 0
|
AirFortressIlikara/LS2K0300-linux-4.19
| 4,047
|
arch/loongarch/kernel/mcount_dyn.S
|
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Based on arch/arm64/kernel/entry-ftrace.S
*
* Copyright (C) 2013 Linaro Limited
* Copyright (C) 2020 Loongson Technology Corporation Limited
*/
#include <asm/export.h>
#include <asm/regdef.h>
#include <asm/stackframe.h>
#include <asm/ftrace.h>
#include <asm/unwind_hints.h>
.text
/*
* Due to -fpatchable-function-entry=2: the compiler inserted 2 NOPs before the
* regular C function prologue. When PC arrived here, the last 2 instructions
* as follows,
* move t0, ra
* bl callsite (for modules, callsite is a tramplione)
*
* modules tramplione as follows,
* addu16i.d t1, zero, callsite[31:16]
* lu32i.d t1, callsite[51:32]
* lu52i.d t1, t1, callsite[63:52]
* jirl zero, t1, callsite[15:0] >> 2
*
* See arch/loongarch/kernel/ftrace_dyn.c for details. Here, pay attention to
* that the T series regs are available and safe because each C functions
* follows the LoongArch psABI well.
*/
.macro ftrace_regs_entry allregs=0
PTR_ADDI sp, sp, -PT_SIZE
/* Save trace function ra at PT_ERA */
PTR_S ra, sp, PT_ERA
/* Save parent ra at PT_R1(RA) */
PTR_S t0, sp, PT_R1
PTR_S a0, sp, PT_R4
PTR_S a1, sp, PT_R5
PTR_S a2, sp, PT_R6
PTR_S a3, sp, PT_R7
PTR_S a4, sp, PT_R8
PTR_S a5, sp, PT_R9
PTR_S a6, sp, PT_R10
PTR_S a7, sp, PT_R11
PTR_S fp, sp, PT_R22
.if \allregs
PTR_S t0, sp, PT_R12
PTR_S t1, sp, PT_R13
PTR_S t2, sp, PT_R14
PTR_S t3, sp, PT_R15
PTR_S t4, sp, PT_R16
PTR_S t5, sp, PT_R17
PTR_S t6, sp, PT_R18
PTR_S t7, sp, PT_R19
PTR_S t8, sp, PT_R20
PTR_S s0, sp, PT_R23
PTR_S s1, sp, PT_R24
PTR_S s2, sp, PT_R25
PTR_S s3, sp, PT_R26
PTR_S s4, sp, PT_R27
PTR_S s5, sp, PT_R28
PTR_S s6, sp, PT_R29
PTR_S s7, sp, PT_R30
PTR_S s8, sp, PT_R31
PTR_S tp, sp, PT_R2
/* Clear it for later use as a flag sometimes. */
PTR_S zero, sp, PT_R0
PTR_S $r21, sp, PT_R21
.endif
PTR_ADDI t8, sp, PT_SIZE
PTR_S t8, sp, PT_R3
UNWIND_HINT_REGS
.endm
SYM_CODE_START(ftrace_caller)
UNWIND_HINT
ftrace_regs_entry allregs=0
b ftrace_common
SYM_CODE_END(ftrace_caller)
#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
SYM_CODE_START(ftrace_regs_caller)
UNWIND_HINT
ftrace_regs_entry allregs=1
b ftrace_common
SYM_CODE_END(ftrace_regs_caller)
#endif
SYM_CODE_START(ftrace_common)
UNWIND_HINT_REGS
PTR_ADDI a0, ra, -8 /* arg0: ip */
move a1, t0 /* arg1: parent_ip */
la.pcrel t1, function_trace_op
PTR_L a2, t1, 0 /* arg2: op */
move a3, sp /* arg3: regs */
.globl ftrace_call
ftrace_call:
bl ftrace_stub
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
.globl ftrace_graph_call
ftrace_graph_call:
nop /* b ftrace_graph_caller */
#endif
/*
* As we didn't use S series regs in this assmembly code and all calls
* are C function which will save S series regs by themselves, there is
* no need to restore S series regs. The T series is available and safe
* at the callsite, so there is no need to restore the T series regs.
*/
ftrace_common_return:
PTR_L a0, sp, PT_R4
PTR_L a1, sp, PT_R5
PTR_L a2, sp, PT_R6
PTR_L a3, sp, PT_R7
PTR_L a4, sp, PT_R8
PTR_L a5, sp, PT_R9
PTR_L a6, sp, PT_R10
PTR_L a7, sp, PT_R11
PTR_L fp, sp, PT_R22
PTR_L ra, sp, PT_R1
PTR_L t0, sp, PT_ERA
PTR_ADDI sp, sp, PT_SIZE
UNWIND_HINT
jirl zero, t0, 0
SYM_CODE_END(ftrace_common)
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
SYM_CODE_START(ftrace_graph_caller)
UNWIND_HINT_REGS
PTR_L a0, sp, PT_ERA
PTR_ADDI a0, a0, -8 /* arg0: self_addr */
PTR_ADDI a1, sp, PT_R1 /* arg1: parent */
bl prepare_ftrace_return
b ftrace_common_return
SYM_CODE_END(ftrace_graph_caller)
SYM_CODE_START(return_to_handler)
UNWIND_HINT
/* save return value regs */
PTR_ADDI sp, sp, -2 * SZREG
PTR_S a0, sp, 0
PTR_S a1, sp, SZREG
move a0, zero /* Has no check FP now. */
bl ftrace_return_to_handler
move ra, a0 /* parent ra */
/* restore return value regs */
PTR_L a0, sp, 0
PTR_L a1, sp, SZREG
PTR_ADDI sp, sp, 2 * SZREG
jirl zero, ra, 0
SYM_CODE_END(return_to_handler)
#endif
SYM_FUNC_START(ftrace_stub)
jirl zero, ra, 0
SYM_FUNC_END(ftrace_stub)
|
AirFortressIlikara/LS2K0300-linux-4.19
| 1,474
|
arch/loongarch/kernel/switch.S
|
/* SPDX-License-Identifier: GPL-2.0 */
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 1994, 1995, 1996, 1998, 1999, 2002, 2003 Ralf Baechle
* Copyright (C) 1996 David S. Miller (davem@davemloft.net)
* Copyright (C) 1994, 1995, 1996, by Andreas Busse
* Copyright (C) 1999 Silicon Graphics, Inc.
* Copyright (C) 2000 MIPS Technologies, Inc.
* Copyright (C) 2020 Loongson Technology Corporation Limited
*/
#include <asm/asm.h>
#include <asm/loongarchregs.h>
#include <asm/asm-offsets.h>
#include <asm/regdef.h>
#include <asm/stackframe.h>
#include <asm/thread_info.h>
#include <asm/asmmacro.h>
/*
* task_struct *resume(task_struct *prev, task_struct *next,
* struct thread_info *next_ti,
* void *sched_ra, void *sched_cfa)
*/
.align 5
SYM_FUNC_START(resume)
cpu_save_nonscratch a0
stptr.d ra, a0, THREAD_REG01
stptr.d a3, a0, THREAD_SCHED_RA
stptr.d a4, a0, THREAD_SCHED_CFA
#if defined(CONFIG_STACKPROTECTOR) && !defined(CONFIG_SMP)
PTR_LA t8, __stack_chk_guard
LONG_L t9, a1, TASK_STACK_CANARY
LONG_S t9, t8, 0
#endif
/*
* The order of restoring the registers takes care of the race
* updating $28, $29 and kernelsp without disabling ints.
*/
move tp, a2
cpu_restore_nonscratch a1
li.w t0, _THREAD_SIZE
PTR_ADD t0, t0, tp
set_saved_sp t0, t1, t2
jr ra
SYM_FUNC_END(resume)
|
AirFortressIlikara/LS2K0300-linux-4.19
| 2,319
|
arch/loongarch/kernel/mcount.S
|
/*
* LoongArch specific _mcount support
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive for
* more details.
*
* Copyright (C) 2009 Lemote Inc. & DSLab, Lanzhou University, China
* Copyright (C) 2010 DSLab, Lanzhou University, China
* Author: Wu Zhangjin <wuzhangjin@gmail.com>
*/
#include <asm/export.h>
#include <asm/regdef.h>
#include <asm/stackframe.h>
#include <asm/ftrace.h>
.text
#define MCOUNT_STACK_SIZE (2 * SZREG)
#define MCOUNT_S0_OFFSET (0)
#define MCOUNT_RA_OFFSET (SZREG)
.macro MCOUNT_SAVE_REGS
PTR_ADDI sp, sp, -MCOUNT_STACK_SIZE
PTR_S s0, sp, MCOUNT_S0_OFFSET
PTR_S ra, sp, MCOUNT_RA_OFFSET
move s0, a0
.endm
.macro MCOUNT_RESTORE_REGS
move a0, s0
PTR_L ra, sp, MCOUNT_RA_OFFSET
PTR_L s0, sp, MCOUNT_S0_OFFSET
PTR_ADDI sp, sp, MCOUNT_STACK_SIZE
.endm
SYM_FUNC_START(_mcount)
la t1, ftrace_stub
la t2, ftrace_trace_function /* Prepare t2 for (1) */
PTR_L t2, t2, 0
beq t1, t2, fgraph_trace
MCOUNT_SAVE_REGS
move a0, ra /* arg0: self return address */
move a1, s0 /* arg1: parent's return address */
jirl ra, t2, 0 /* (1) call *ftrace_trace_function */
MCOUNT_RESTORE_REGS
fgraph_trace:
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
la t1, ftrace_stub
la t3, ftrace_graph_return
PTR_L t3, t3, 0
bne t1, t3, ftrace_graph_caller
la t1, ftrace_graph_entry_stub
la t3, ftrace_graph_entry
PTR_L t3, t3, 0
bne t1, t3, ftrace_graph_caller
#endif
.globl ftrace_stub
ftrace_stub:
jirl zero, ra, 0
SYM_FUNC_END(_mcount)
EXPORT_SYMBOL(_mcount)
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
SYM_FUNC_START(ftrace_graph_caller)
MCOUNT_SAVE_REGS
PTR_ADDI a0, ra, -4 /* arg0: Callsite self return addr */
PTR_ADDI a1, sp, MCOUNT_STACK_SIZE /* arg1: Callsite sp */
move a2, s0 /* arg2: Callsite parent ra */
bl prepare_ftrace_return
MCOUNT_RESTORE_REGS
jirl zero, ra, 0
SYM_FUNC_END(ftrace_graph_caller)
SYM_FUNC_START(return_to_handler)
PTR_ADDI sp, sp, -2 * SZREG
PTR_S a0, sp, 0
PTR_S a1, sp, SZREG
bl ftrace_return_to_handler
/* restore the real parent address: a0 -> ra */
move ra, a0
PTR_L a0, sp, 0
PTR_L a1, sp, SZREG
PTR_ADDI sp, sp, 2 * SZREG
jirl zero, ra, 0
SYM_FUNC_END(return_to_handler)
#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
|
AirFortressIlikara/LS2K0300-linux-4.19
| 3,908
|
arch/loongarch/kernel/vmlinux.lds.S
|
/* SPDX-License-Identifier: GPL-2.0 */
#include <asm/asm-offsets.h>
#include <asm/thread_info.h>
#include <asm/orc_lookup.h>
#define PAGE_SIZE _PAGE_SIZE
#define RO_EXCEPTION_TABLE_ALIGN 4
/*
* Put .bss..swapper_pg_dir as the first thing in .bss. This will
* ensure that it has .bss alignment (64K).
*/
#define BSS_FIRST_SECTIONS *(.bss..swapper_pg_dir)
#include <asm-generic/vmlinux.lds.h>
OUTPUT_ARCH(loongarch)
ENTRY(kernel_entry)
PHDRS {
text PT_LOAD FLAGS(7); /* RWX */
note PT_NOTE FLAGS(4); /* R__ */
}
jiffies = jiffies_64;
SECTIONS
{
. = VMLINUX_LOAD_ADDRESS;
/* Read-only */
_text = .; /* Text and read-only data */
.text : {
TEXT_TEXT
SCHED_TEXT
CPUIDLE_TEXT
LOCK_TEXT
KPROBES_TEXT
IRQENTRY_TEXT
SOFTIRQENTRY_TEXT
*(.text.*)
*(.fixup)
*(.gnu.warning)
} :text = 0
_etext = .; /* End of text section */
/*
* struct alt_inst entries. From the header (alternative.h):
* "Alternative instructions for different CPU types or capabilities"
* Think locking instructions on spinlocks.
*/
. = ALIGN(4);
.altinstructions : AT(ADDR(.altinstructions) - LOAD_OFFSET) {
__alt_instructions = .;
*(.altinstructions)
__alt_instructions_end = .;
}
#define NOTES_HEADER :note
NOTES :text NOTES_HEADER
.dummy : { *(.dummy) } :text
_sdata = .; /* Start of data section */
RODATA
/* writeable */
.data : { /* Data */
INIT_TASK_DATA(THREAD_SIZE)
NOSAVE_DATA
CACHELINE_ALIGNED_DATA(1 << CONFIG_L1_CACHE_SHIFT)
READ_MOSTLY_DATA(1 << CONFIG_L1_CACHE_SHIFT)
DATA_DATA
CONSTRUCTORS
}
BUG_TABLE
ORC_UNWIND_TABLE
_gp = . + 0x8000;
.lit8 : {
*(.lit8)
}
.lit4 : {
*(.lit4)
}
/* We want the small data sections together, so single-instruction offsets
can access them all, and initialized data all before uninitialized, so
we can shorten the on-disk segment size. */
.sdata : {
*(.sdata)
}
_edata = .; /* End of data section */
/* Will be freed after init */
. = ALIGN(PAGE_SIZE); /* Init code and data */
__init_begin = .;
INIT_TEXT_SECTION(PAGE_SIZE)
INIT_DATA_SECTION(16)
. = ALIGN(4);
.loongarch.machines.init : AT(ADDR(.loongarch.machines.init) - LOAD_OFFSET) {
__loongarch_machines_start = .;
*(.loongarch.machines.init)
__loongarch_machines_end = .;
}
/* .exit.text is discarded at runtime, not link time, to deal with
* references from .rodata
*/
.exit.text : {
EXIT_TEXT
}
.exit.data : {
EXIT_DATA
}
#ifdef CONFIG_SMP
PERCPU_SECTION(1 << CONFIG_L1_CACHE_SHIFT)
#endif
#ifdef CONFIG_RELOCATABLE
. = ALIGN(4);
.data.reloc : {
_relocation_start = .;
/*
* Space for relocation table
* This needs to be filled so that the
* relocs tool can overwrite the content.
* An invalid value is left at the start of the
* section to abort relocation if the table
* has not been filled in.
*/
LONG(0xFFFFFFFF);
FILL(0);
. += CONFIG_RELOCATION_TABLE_SIZE - 4;
_relocation_end = .;
}
#endif
/*
* Align to 64K in attempt to eliminate holes before the
* .bss..swapper_pg_dir section at the start of .bss. This
* also satisfies PAGE_SIZE alignment as the largest page size
* allowed is 64K.
*/
. = ALIGN(0x10000);
__init_end = .;
/* freed after init ends here */
/*
* Force .bss to 64K alignment so that .bss..swapper_pg_dir
* gets that alignment. .sbss should be empty, so there will be
* no holes after __init_end. */
BSS_SECTION(0, 0x10000, 8)
_end = . ;
/* These mark the ABI of the kernel for debuggers. */
.mdebug.abi32 : {
KEEP(*(.mdebug.abi32))
}
.mdebug.abi64 : {
KEEP(*(.mdebug.abi64))
}
STABS_DEBUG
DWARF_DEBUG
/* These must appear regardless of . */
.gptab.sdata : {
*(.gptab.data)
*(.gptab.sdata)
}
.gptab.sbss : {
*(.gptab.bss)
*(.gptab.sbss)
}
/* Sections to be discarded */
DISCARDS
/DISCARD/ : {
/* ABI crap starts here */
*(.LOONGARCH.options)
*(.options)
*(.eh_frame)
}
}
|
AirFortressIlikara/LS2K0300-linux-4.19
| 4,783
|
arch/loongarch/kernel/genex.S
|
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) 2020-2021 Loongson Technology Corporation Limited
*/
#include <linux/init.h>
#include <asm/asm.h>
#include <asm/asmmacro.h>
#include <asm/cacheops.h>
#include <asm/irqflags.h>
#include <asm/regdef.h>
#include <asm/fpregdef.h>
#include <asm/loongarchregs.h>
#include <asm/stackframe.h>
#include <asm/thread_info.h>
#include <asm/unwind_hints.h>
SYM_FUNC_START(except_vec_cex)
b cache_parity_error
nop
SYM_FUNC_END(except_vec_cex)
.align 5 /* 32 byte rollback region */
SYM_FUNC_START(__cpu_wait)
/* start of rollback region */
LONG_L t0, tp, TI_FLAGS
nop
andi t0, t0, _TIF_NEED_RESCHED
bnez t0, 1f
nop
nop
nop
idle 0
/* end of rollback region (the region size must be power of two) */
1:
jirl zero, ra, 0
SYM_FUNC_END(__cpu_wait)
/*
* Common Vectored Interrupt code
* Complete the register saves and invoke the handler which is passed in $v0
*/
SYM_FUNC_START(except_vec_vi_handler)
UNWIND_HINT_REGS
la t1, __cpu_wait
ld.d t0, sp, PT_ERA
/* 32 byte rollback region */
ori t0, t0, 0x1f
xori t0, t0, 0x1f
bne t0, t1, 1f
st.d t0, sp, PT_ERA
1:
LOAD_TEMP_FROM_KSCRATCH
SAVE_TEMP
SAVE_STATIC
CLI
#ifdef CONFIG_TRACE_IRQFLAGS
move s0, v0
TRACE_IRQS_OFF
move v0, s0
#endif
LONG_L s0, tp, TI_REGS
LONG_S sp, tp, TI_REGS
/*
* SAVE_ALL ensures we are using a valid kernel stack for the thread.
* Check if we are already using the IRQ stack.
*/
move s1, sp # Preserve the sp
/* Get IRQ stack for this CPU */
la t1, irq_stack
LONG_ADD t1, t1, $r21
LONG_L t0, t1, 0
# Check if already on IRQ stack
PTR_LI t1, ~(_THREAD_SIZE-1)
and t1, t1, sp
beq t0, t1, 2f
/* Switch to IRQ stack */
li.w t1, _IRQ_STACK_START
PTR_ADD sp, t0, t1
/* Save task's sp on IRQ stack so that unwinding can follow it */
LONG_S s1, sp, 0
2:
/* v0 is equal to a0
* v0: irq number for do_vi
*/
la t0, do_vi
jirl ra, t0, 0
/* Restore sp */
move sp, s1
la t0, ret_from_irq
jirl zero, t0, 0
SYM_FUNC_END(except_vec_vi_handler)
.macro __build_clear_none
.endm
.macro __build_clear_sti
TRACE_IRQS_ON
STI
.endm
.macro __build_clear_cli
CLI
TRACE_IRQS_OFF
.endm
.macro __build_clear_fpe
movfcsr2gr a1, fcsr0
CLI
TRACE_IRQS_OFF
.endm
.macro __build_clear_csti
csrrd t0, LOONGARCH_CSR_CRMD
PTR_S t0, sp, PT_CRMD
csrrd t0, LOONGARCH_CSR_EUEN
PTR_S t0, sp, PT_EUEN
csrrd t0, LOONGARCH_CSR_ECFG
PTR_S t0, sp, PT_ECFG
csrrd t0, LOONGARCH_CSR_ESTAT
PTR_S t0, sp, PT_ESTAT
csrrd t0, LOONGARCH_CSR_BADV
PTR_S t0, sp, PT_BVADDR
STI
.endm
.macro __build_clear_kmode
csrrd t0, LOONGARCH_CSR_CRMD
PTR_S t0, sp, PT_CRMD
csrrd t0, LOONGARCH_CSR_EUEN
PTR_S t0, sp, PT_EUEN
csrrd t0, LOONGARCH_CSR_ECFG
PTR_S t0, sp, PT_ECFG
csrrd t0, LOONGARCH_CSR_ESTAT
PTR_S t0, sp, PT_ESTAT
csrrd t0, LOONGARCH_CSR_BADV
PTR_S t0, sp, PT_BVADDR
KMODE
.endm
.macro __BUILD_HANDLER exception handler clear ext
.align 5
SYM_FUNC_START(handle_\exception)
csrwr t0, LOONGARCH_CSR_KS0
csrwr t1, LOONGARCH_CSR_KS1
SAVE_ALL
SYM_INNER_LABEL(handle_\exception\ext, SYM_L_GLOBAL)
__build_clear_\clear
move a0, sp
la.abs t0, do_\handler
jirl ra, t0, 0
la.abs t0, ret_from_exception
jirl zero, t0, 0
SYM_FUNC_END(handle_\exception)
.endm
.macro BUILD_HANDLER exception handler clear verbose
__BUILD_HANDLER \exception \handler \clear \verbose _int
.endm
BUILD_HANDLER ade ade kmode
BUILD_HANDLER bp bp kmode
BUILD_HANDLER ri ri csti
BUILD_HANDLER fpu fpu sti
BUILD_HANDLER fpe fpe fpe
BUILD_HANDLER lsx lsx sti
BUILD_HANDLER lasx lasx sti
BUILD_HANDLER lbt lbt kmode
BUILD_HANDLER ale ale kmode
BUILD_HANDLER bce bce kmode
BUILD_HANDLER reserved reserved csti /* others */
BUILD_HANDLER watch watch none
SYM_FUNC_START(handle_sys_wrap)
la.abs t0, handle_sys
jirl zero, t0, 0
SYM_FUNC_END(handle_sys_wrap)
/*
* Macro helper for vectored interrupt handler.
*/
.macro BUILD_VI_HANDLER num
.align 5
SYM_FUNC_START(handle_vi_\num)
csrwr t0, LOONGARCH_CSR_KS0
csrwr t1, LOONGARCH_CSR_KS1
SAVE_SOME #docfi=1
addi.d v0, zero, \num
la.abs v1, except_vec_vi_handler
jirl zero, v1, 0
SYM_FUNC_END(handle_vi_\num)
.endm
BUILD_VI_HANDLER 0
BUILD_VI_HANDLER 1
BUILD_VI_HANDLER 2
BUILD_VI_HANDLER 3
BUILD_VI_HANDLER 4
BUILD_VI_HANDLER 5
BUILD_VI_HANDLER 6
BUILD_VI_HANDLER 7
BUILD_VI_HANDLER 8
BUILD_VI_HANDLER 9
BUILD_VI_HANDLER 10
BUILD_VI_HANDLER 11
BUILD_VI_HANDLER 12
BUILD_VI_HANDLER 13
.section .data, "aw"
.align 3
.globl vector_table
vector_table:
PTR handle_vi_0
PTR handle_vi_1
PTR handle_vi_2
PTR handle_vi_3
PTR handle_vi_4
PTR handle_vi_5
PTR handle_vi_6
PTR handle_vi_7
PTR handle_vi_8
PTR handle_vi_9
PTR handle_vi_10
PTR handle_vi_11
PTR handle_vi_12
PTR handle_vi_13
|
AirFortressIlikara/LS2K0300-linux-4.19
| 3,808
|
arch/loongarch/lib/clear_user.S
|
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) 2020-2021 Loongson Technology Corporation Limited
*/
#include <linux/linkage.h>
#include <asm/alternative-asm.h>
#include <asm/asmmacro.h>
#include <asm/cpu.h>
#include <asm/export.h>
#include <asm/regdef.h>
#include <asm/asm-extable.h>
.irp to, 0, 1, 2, 3, 4, 5, 6, 7
.L_fixup_handle_\to\():
sub.d v0, a2, a0
addi.d v0, v0, (\to) * (-8)
jr ra
.endr
.irp to, 0, 2, 4
.L_fixup_handle_s\to\():
addi.d v0, a1, -\to
jr ra
.endr
SYM_FUNC_START(__clear_user)
/*
* Some CPUs support hardware unaligned access
*/
ALTERNATIVE "b __clear_user_generic", \
"b __clear_user_fast", CPU_FEATURE_UAL
SYM_FUNC_END(__clear_user)
EXPORT_SYMBOL(__clear_user)
/*
* unsigned long __clear_user_generic(void *addr, unsigned long size)
*
* a0: addr
* a1: size
*/
SYM_FUNC_START(__clear_user_generic)
beqz a1, 2f
1: st.b zero, a0, 0
addi.d a0, a0, 1
addi.d a1, a1, -1
bgt a1, zero, 1b
2: move v0, a1
jr ra
_asm_extable 1b, .L_fixup_handle_s0
SYM_FUNC_END(__clear_user_generic)
/*
* unsigned long __clear_user_fast(void *addr, unsigned long size)
*
* a0: addr
* a1: size
*/
SYM_FUNC_START(__clear_user_fast)
sltui t0, a1, 9
bnez t0, .Lsmall
add.d a2, a0, a1
0: st.d zero, a0, 0
/* align up address */
addi.d a0, a0, 8
bstrins.d a0, zero, 2, 0
addi.d a3, a2, -64
bgeu a0, a3, .Llt64
/* set 64 bytes at a time */
.Lloop64:
1: st.d zero, a0, 0
2: st.d zero, a0, 8
3: st.d zero, a0, 16
4: st.d zero, a0, 24
5: st.d zero, a0, 32
6: st.d zero, a0, 40
7: st.d zero, a0, 48
8: st.d zero, a0, 56
addi.d a0, a0, 64
bltu a0, a3, .Lloop64
/* set the remaining bytes */
.Llt64:
addi.d a3, a2, -32
bgeu a0, a3, .Llt32
9: st.d zero, a0, 0
10: st.d zero, a0, 8
11: st.d zero, a0, 16
12: st.d zero, a0, 24
addi.d a0, a0, 32
.Llt32:
addi.d a3, a2, -16
bgeu a0, a3, .Llt16
13: st.d zero, a0, 0
14: st.d zero, a0, 8
addi.d a0, a0, 16
.Llt16:
addi.d a3, a2, -8
bgeu a0, a3, .Llt8
15: st.d zero, a0, 0
.Llt8:
16: st.d zero, a2, -8
/* return */
move v0, zero
jr ra
.align 4
.Lsmall:
pcaddi t0, 4
slli.d a2, a1, 4
add.d t0, t0, a2
jr t0
.align 4
move v0, zero
jr ra
.align 4
17: st.b zero, a0, 0
move v0, zero
jr ra
.align 4
18: st.h zero, a0, 0
move v0, zero
jr ra
.align 4
19: st.h zero, a0, 0
20: st.b zero, a0, 2
move v0, zero
jr ra
.align 4
21: st.w zero, a0, 0
move v0, zero
jr ra
.align 4
22: st.w zero, a0, 0
23: st.b zero, a0, 4
move v0, zero
jr ra
.align 4
24: st.w zero, a0, 0
25: st.h zero, a0, 4
move v0, zero
jr ra
.align 4
26: st.w zero, a0, 0
27: st.w zero, a0, 3
move v0, zero
jr ra
.align 4
28: st.d zero, a0, 0
move v0, zero
jr ra
/* fixup and ex_table */
_asm_extable 0b, .L_fixup_handle_0
_asm_extable 1b, .L_fixup_handle_0
_asm_extable 2b, .L_fixup_handle_1
_asm_extable 3b, .L_fixup_handle_2
_asm_extable 4b, .L_fixup_handle_3
_asm_extable 5b, .L_fixup_handle_4
_asm_extable 6b, .L_fixup_handle_5
_asm_extable 7b, .L_fixup_handle_6
_asm_extable 8b, .L_fixup_handle_7
_asm_extable 9b, .L_fixup_handle_0
_asm_extable 10b, .L_fixup_handle_1
_asm_extable 11b, .L_fixup_handle_2
_asm_extable 12b, .L_fixup_handle_3
_asm_extable 13b, .L_fixup_handle_0
_asm_extable 14b, .L_fixup_handle_1
_asm_extable 15b, .L_fixup_handle_0
_asm_extable 16b, .L_fixup_handle_1
_asm_extable 17b, .L_fixup_handle_s0
_asm_extable 18b, .L_fixup_handle_s0
_asm_extable 19b, .L_fixup_handle_s0
_asm_extable 20b, .L_fixup_handle_s2
_asm_extable 21b, .L_fixup_handle_s0
_asm_extable 22b, .L_fixup_handle_s0
_asm_extable 23b, .L_fixup_handle_s4
_asm_extable 24b, .L_fixup_handle_s0
_asm_extable 25b, .L_fixup_handle_s4
_asm_extable 26b, .L_fixup_handle_s0
_asm_extable 27b, .L_fixup_handle_s4
_asm_extable 28b, .L_fixup_handle_s0
SYM_FUNC_END(__clear_user_fast)
|
AirFortressIlikara/LS2K0300-linux-4.19
| 2,946
|
arch/loongarch/lib/memcpy.S
|
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) 2020-2021 Loongson Technology Corporation Limited
*/
#include <linux/linkage.h>
#include <asm/alternative-asm.h>
#include <asm/asmmacro.h>
#include <asm/cpu.h>
#include <asm/export.h>
#include <asm/regdef.h>
SYM_FUNC_START_WEAK(memcpy)
SYM_FUNC_START_ALIAS(__memcpy)
/*
* Some CPUs support hardware unaligned access
*/
ALTERNATIVE "b __memcpy_generic", \
"b __memcpy_fast", CPU_FEATURE_UAL
SYM_FUNC_END(memcpy)
SYM_FUNC_END_ALIAS(__memcpy)
EXPORT_SYMBOL(memcpy)
EXPORT_SYMBOL(__memcpy)
/*
* void *__memcpy_generic(void *dst, const void *src, size_t n)
*
* a0: dst
* a1: src
* a2: n
*/
SYM_FUNC_START(__memcpy_generic)
move a3, a0
beqz a2, 2f
1: ld.b t0, a1, 0
st.b t0, a0, 0
addi.d a0, a0, 1
addi.d a1, a1, 1
addi.d a2, a2, -1
bgt a2, zero, 1b
2: move v0, a3
jr ra
SYM_FUNC_END(__memcpy_generic)
.align 5
SYM_FUNC_START_NOALIGN(__memcpy_small)
pcaddi t0, 8
slli.d a2, a2, 5
add.d t0, t0, a2
jr t0
.align 5
0: jr ra
.align 5
1: ld.b t0, a1, 0
st.b t0, a0, 0
jr ra
.align 5
2: ld.h t0, a1, 0
st.h t0, a0, 0
jr ra
.align 5
3: ld.h t0, a1, 0
ld.b t1, a1, 2
st.h t0, a0, 0
st.b t1, a0, 2
jr ra
.align 5
4: ld.w t0, a1, 0
st.w t0, a0, 0
jr ra
.align 5
5: ld.w t0, a1, 0
ld.b t1, a1, 4
st.w t0, a0, 0
st.b t1, a0, 4
jr ra
.align 5
6: ld.w t0, a1, 0
ld.h t1, a1, 4
st.w t0, a0, 0
st.h t1, a0, 4
jr ra
.align 5
7: ld.w t0, a1, 0
ld.w t1, a1, 3
st.w t0, a0, 0
st.w t1, a0, 3
jr ra
.align 5
8: ld.d t0, a1, 0
st.d t0, a0, 0
jr ra
SYM_FUNC_END(__memcpy_small)
/*
* void *__memcpy_fast(void *dst, const void *src, size_t n)
*
* a0: dst
* a1: src
* a2: n
*/
SYM_FUNC_START(__memcpy_fast)
sltui t0, a2, 9
bnez t0, __memcpy_small
add.d a3, a1, a2
add.d a2, a0, a2
ld.d a6, a1, 0
ld.d a7, a3, -8
/* align up destination address */
andi t1, a0, 7
sub.d t0, zero, t1
addi.d t0, t0, 8
add.d a1, a1, t0
add.d a5, a0, t0
addi.d a4, a3, -64
bgeu a1, a4, .Llt64
/* copy 64 bytes at a time */
.Lloop64:
ld.d t0, a1, 0
ld.d t1, a1, 8
ld.d t2, a1, 16
ld.d t3, a1, 24
ld.d t4, a1, 32
ld.d t5, a1, 40
ld.d t6, a1, 48
ld.d t7, a1, 56
addi.d a1, a1, 64
st.d t0, a5, 0
st.d t1, a5, 8
st.d t2, a5, 16
st.d t3, a5, 24
st.d t4, a5, 32
st.d t5, a5, 40
st.d t6, a5, 48
st.d t7, a5, 56
addi.d a5, a5, 64
bltu a1, a4, .Lloop64
/* copy the remaining bytes */
.Llt64:
addi.d a4, a3, -32
bgeu a1, a4, .Llt32
ld.d t0, a1, 0
ld.d t1, a1, 8
ld.d t2, a1, 16
ld.d t3, a1, 24
addi.d a1, a1, 32
st.d t0, a5, 0
st.d t1, a5, 8
st.d t2, a5, 16
st.d t3, a5, 24
addi.d a5, a5, 32
.Llt32:
addi.d a4, a3, -16
bgeu a1, a4, .Llt16
ld.d t0, a1, 0
ld.d t1, a1, 8
addi.d a1, a1, 16
st.d t0, a5, 0
st.d t1, a5, 8
addi.d a5, a5, 16
.Llt16:
addi.d a4, a3, -8
bgeu a1, a4, .Llt8
ld.d t0, a1, 0
st.d t0, a5, 0
.Llt8:
st.d a6, a0, 0
st.d a7, a2, -8
/* return */
jr ra
SYM_FUNC_END(__memcpy_fast)
|
AirFortressIlikara/LS2K0300-linux-4.19
| 1,477
|
arch/loongarch/lib/unaligned.S
|
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) 2020-2021 Loongson Technology Corporation Limited
*/
#include <linux/linkage.h>
#include <asm/alternative-asm.h>
#include <asm/asmmacro.h>
#include <asm/cpu.h>
#include <asm/export.h>
#include <asm/regdef.h>
#include <asm/errno.h>
#include <asm/asm-extable.h>
.L_fixup_handle_unaligned:
li.w v0, -EFAULT
jr ra
/*
* unsigned long probe_user_read(void *addr, void *value, unsigned long n, bool symbol)
*
* a0: addr
* a1: value
* a2: n
* a3: symbol
*/
SYM_FUNC_START(unaligned_read)
beqz a2, 5f
li.w t1, 8
li.w t2, 0
addi.d t0, a2, -1
mul.d t1, t0, t1
add.d a0, a0, t0
beq a3, zero, 2f
1: ld.b t3, a0, 0
b 3f
2: ld.bu t3, a0, 0
3: sll.d t3, t3, t1
or t2, t2, t3
addi.d t1, t1, -8
addi.d a0, a0, -1
addi.d a2, a2, -1
bgt a2, zero, 2b
4: st.d t2, a1, 0
move v0, a2
jr ra
5: li.w v0, -EFAULT
jr ra
_asm_extable 1b, .L_fixup_handle_unaligned
_asm_extable 2b, .L_fixup_handle_unaligned
_asm_extable 4b, .L_fixup_handle_unaligned
SYM_FUNC_END(unaligned_read)
/*
* unsigned long unaligned_write(void *addr, unsigned long value, unsigned long n)
*
* a0: addr
* a1: value
* a2: n
*/
SYM_FUNC_START(unaligned_write)
beqz a2, 3f
li.w t0, 0
1: srl.d t1, a1, t0
2: st.b t1, a0, 0
addi.d t0, t0, 8
addi.d a2, a2, -1
addi.d a0, a0, 1
bgt a2, zero, 1b
move v0, a2
jr ra
3: li.w v0, -EFAULT
jr ra
_asm_extable 2b, .L_fixup_handle_unaligned
SYM_FUNC_END(unaligned_write)
|
AirFortressIlikara/LS2K0300-linux-4.19
| 2,479
|
arch/loongarch/lib/memmove.S
|
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) 2020-2021 Loongson Technology Corporation Limited
*/
#include <linux/linkage.h>
#include <asm/alternative-asm.h>
#include <asm/asmmacro.h>
#include <asm/cpu.h>
#include <asm/export.h>
#include <asm/regdef.h>
SYM_FUNC_START_WEAK(memmove)
SYM_FUNC_START_ALIAS(__memmove)
blt a0, a1, __memcpy /* dst < src, memcpy */
blt a1, a0, __rmemcpy /* src < dst, rmemcpy */
jr ra /* dst == src, return */
SYM_FUNC_END(memmove)
SYM_FUNC_END_ALIAS(__memmove)
EXPORT_SYMBOL(memmove)
EXPORT_SYMBOL(__memmove)
SYM_FUNC_START(__rmemcpy)
/*
* Some CPUs support hardware unaligned access
*/
ALTERNATIVE "b __rmemcpy_generic", \
"b __rmemcpy_fast", CPU_FEATURE_UAL
SYM_FUNC_END(__rmemcpy)
/*
* void *__rmemcpy_generic(void *dst, const void *src, size_t n)
*
* a0: dst
* a1: src
* a2: n
*/
SYM_FUNC_START(__rmemcpy_generic)
move a3, a0
beqz a2, 2f
add.d a0, a0, a2
add.d a1, a1, a2
1: ld.b t0, a1, -1
st.b t0, a0, -1
addi.d a0, a0, -1
addi.d a1, a1, -1
addi.d a2, a2, -1
bgt a2, zero, 1b
2: move v0, a3
jr ra
SYM_FUNC_END(__rmemcpy_generic)
/*
* void *__rmemcpy_fast(void *dst, const void *src, size_t n)
*
* a0: dst
* a1: src
* a2: n
*/
SYM_FUNC_START(__rmemcpy_fast)
sltui t0, a2, 9
bnez t0, __memcpy_small
add.d a3, a1, a2
add.d a2, a0, a2
ld.d a6, a1, 0
ld.d a7, a3, -8
/* align up destination address */
andi t1, a2, 7
sub.d a3, a3, t1
sub.d a5, a2, t1
addi.d a4, a1, 64
bgeu a4, a3, .Llt64
/* copy 64 bytes at a time */
.Lloop64:
ld.d t0, a3, -8
ld.d t1, a3, -16
ld.d t2, a3, -24
ld.d t3, a3, -32
ld.d t4, a3, -40
ld.d t5, a3, -48
ld.d t6, a3, -56
ld.d t7, a3, -64
addi.d a3, a3, -64
st.d t0, a5, -8
st.d t1, a5, -16
st.d t2, a5, -24
st.d t3, a5, -32
st.d t4, a5, -40
st.d t5, a5, -48
st.d t6, a5, -56
st.d t7, a5, -64
addi.d a5, a5, -64
bltu a4, a3, .Lloop64
/* copy the remaining bytes */
.Llt64:
addi.d a4, a1, 32
bgeu a4, a3, .Llt32
ld.d t0, a3, -8
ld.d t1, a3, -16
ld.d t2, a3, -24
ld.d t3, a3, -32
addi.d a3, a3, -32
st.d t0, a5, -8
st.d t1, a5, -16
st.d t2, a5, -24
st.d t3, a5, -32
addi.d a5, a5, -32
.Llt32:
addi.d a4, a1, 16
bgeu a4, a3, .Llt16
ld.d t0, a3, -8
ld.d t1, a3, -16
addi.d a3, a3, -16
st.d t0, a5, -8
st.d t1, a5, -16
addi.d a5, a5, -16
.Llt16:
addi.d a4, a1, 8
bgeu a4, a3, .Llt8
ld.d t0, a3, -8
st.d t0, a5, -8
.Llt8:
st.d a6, a0, 0
st.d a7, a2, -8
/* return */
jr ra
SYM_FUNC_END(__rmemcpy_fast)
|
AirFortressIlikara/LS2K0300-linux-4.19
| 2,359
|
arch/loongarch/lib/memset.S
|
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) 2020-2021 Loongson Technology Corporation Limited
*/
#include <linux/linkage.h>
#include <asm/alternative-asm.h>
#include <asm/asmmacro.h>
#include <asm/cpu.h>
#include <asm/export.h>
#include <asm/regdef.h>
.macro fill_to_64 r0
bstrins.d \r0, \r0, 15, 8
bstrins.d \r0, \r0, 31, 16
bstrins.d \r0, \r0, 63, 32
.endm
SYM_FUNC_START_WEAK(memset)
SYM_FUNC_START_ALIAS(__memset)
/*
* Some CPUs support hardware unaligned access
*/
ALTERNATIVE "b __memset_generic", \
"b __memset_fast", CPU_FEATURE_UAL
SYM_FUNC_END(memset)
SYM_FUNC_END_ALIAS(__memset)
EXPORT_SYMBOL(memset)
EXPORT_SYMBOL(__memset)
/*
* void *__memset_generic(void *s, int c, size_t n)
*
* a0: s
* a1: c
* a2: n
*/
SYM_FUNC_START(__memset_generic)
move a3, a0
beqz a2, 2f
1: st.b a1, a0, 0
addi.d a0, a0, 1
addi.d a2, a2, -1
bgt a2, zero, 1b
2: move v0, a3
jr ra
SYM_FUNC_END(__memset_generic)
/*
* void *__memset_fast(void *s, int c, size_t n)
*
* a0: s
* a1: c
* a2: n
*/
SYM_FUNC_START(__memset_fast)
/* fill a1 to 64 bits */
fill_to_64 a1
sltui t0, a2, 9
bnez t0, .Lsmall
add.d a2, a0, a2
st.d a1, a0, 0
/* align up address */
addi.d a3, a0, 8
bstrins.d a3, zero, 2, 0
addi.d a4, a2, -64
bgeu a3, a4, .Llt64
/* set 64 bytes at a time */
.Lloop64:
st.d a1, a3, 0
st.d a1, a3, 8
st.d a1, a3, 16
st.d a1, a3, 24
st.d a1, a3, 32
st.d a1, a3, 40
st.d a1, a3, 48
st.d a1, a3, 56
addi.d a3, a3, 64
bltu a3, a4, .Lloop64
/* set the remaining bytes */
.Llt64:
addi.d a4, a2, -32
bgeu a3, a4, .Llt32
st.d a1, a3, 0
st.d a1, a3, 8
st.d a1, a3, 16
st.d a1, a3, 24
addi.d a3, a3, 32
.Llt32:
addi.d a4, a2, -16
bgeu a3, a4, .Llt16
st.d a1, a3, 0
st.d a1, a3, 8
addi.d a3, a3, 16
.Llt16:
addi.d a4, a2, -8
bgeu a3, a4, .Llt8
st.d a1, a3, 0
.Llt8:
st.d a1, a2, -8
/* return */
jr ra
.align 4
.Lsmall:
pcaddi t0, 4
slli.d a2, a2, 4
add.d t0, t0, a2
jr t0
.align 4
0: jr ra
.align 4
1: st.b a1, a0, 0
jr ra
.align 4
2: st.h a1, a0, 0
jr ra
.align 4
3: st.h a1, a0, 0
st.b a1, a0, 2
jr ra
.align 4
4: st.w a1, a0, 0
jr ra
.align 4
5: st.w a1, a0, 0
st.b a1, a0, 4
jr ra
.align 4
6: st.w a1, a0, 0
st.h a1, a0, 4
jr ra
.align 4
7: st.w a1, a0, 0
st.w a1, a0, 3
jr ra
.align 4
8: st.d a1, a0, 0
jr ra
SYM_FUNC_END(__memset_fast)
|
AirFortressIlikara/LS2K0300-linux-4.19
| 5,630
|
arch/loongarch/lib/copy_user.S
|
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) 2020-2021 Loongson Technology Corporation Limited
*/
#include <linux/linkage.h>
#include <asm/alternative-asm.h>
#include <asm/asmmacro.h>
#include <asm/cpu.h>
#include <asm/export.h>
#include <asm/regdef.h>
#include <asm/asm-extable.h>
.irp to, 0, 1, 2, 3, 4, 5, 6, 7
.L_fixup_handle_\to\():
sub.d v0, a2, a0
addi.d v0, v0, (\to) * (-8)
jr ra
.endr
.irp to, 0, 2, 4
.L_fixup_handle_s\to\():
addi.d v0, a2, -\to
jr ra
.endr
SYM_FUNC_START(__copy_user)
/*
* Some CPUs support hardware unaligned access
*/
ALTERNATIVE "b __copy_user_generic", \
"b __copy_user_fast", CPU_FEATURE_UAL
SYM_FUNC_END(__copy_user)
EXPORT_SYMBOL(__copy_user)
/*
* unsigned long __copy_user_generic(void *to, const void *from, unsigned long n)
*
* a0: to
* a1: from
* a2: n
*/
SYM_FUNC_START(__copy_user_generic)
beqz a2, 3f
1: ld.b t0, a1, 0
2: st.b t0, a0, 0
addi.d a0, a0, 1
addi.d a1, a1, 1
addi.d a2, a2, -1
bgt a2, zero, 1b
3: move v0, a2
jr ra
_asm_extable 1b, .L_fixup_handle_s0
_asm_extable 2b, .L_fixup_handle_s0
SYM_FUNC_END(__copy_user_generic)
/*
* unsigned long __copy_user_fast(void *to, const void *from, unsigned long n)
*
* a0: to
* a1: from
* a2: n
*/
SYM_FUNC_START(__copy_user_fast)
sltui t0, a2, 9
bnez t0, .Lsmall
add.d a3, a1, a2
add.d a2, a0, a2
0: ld.d t0, a1, 0
1: st.d t0, a0, 0
/* align up destination address */
andi t1, a0, 7
sub.d t0, zero, t1
addi.d t0, t0, 8
add.d a1, a1, t0
add.d a0, a0, t0
addi.d a4, a3, -64
bgeu a1, a4, .Llt64
/* copy 64 bytes at a time */
.Lloop64:
2: ld.d t0, a1, 0
3: ld.d t1, a1, 8
4: ld.d t2, a1, 16
5: ld.d t3, a1, 24
6: ld.d t4, a1, 32
7: ld.d t5, a1, 40
8: ld.d t6, a1, 48
9: ld.d t7, a1, 56
addi.d a1, a1, 64
10: st.d t0, a0, 0
11: st.d t1, a0, 8
12: st.d t2, a0, 16
13: st.d t3, a0, 24
14: st.d t4, a0, 32
15: st.d t5, a0, 40
16: st.d t6, a0, 48
17: st.d t7, a0, 56
addi.d a0, a0, 64
bltu a1, a4, .Lloop64
/* copy the remaining bytes */
.Llt64:
addi.d a4, a3, -32
bgeu a1, a4, .Llt32
18: ld.d t0, a1, 0
19: ld.d t1, a1, 8
20: ld.d t2, a1, 16
21: ld.d t3, a1, 24
addi.d a1, a1, 32
22: st.d t0, a0, 0
23: st.d t1, a0, 8
24: st.d t2, a0, 16
25: st.d t3, a0, 24
addi.d a0, a0, 32
.Llt32:
addi.d a4, a3, -16
bgeu a1, a4, .Llt16
26: ld.d t0, a1, 0
27: ld.d t1, a1, 8
addi.d a1, a1, 16
28: st.d t0, a0, 0
29: st.d t1, a0, 8
addi.d a0, a0, 16
.Llt16:
addi.d a4, a3, -8
bgeu a1, a4, .Llt8
30: ld.d t0, a1, 0
31: st.d t0, a0, 0
.Llt8:
32: ld.d t0, a3, -8
33: st.d t0, a2, -8
/* return */
move v0, zero
jr ra
.align 5
.Lsmall:
pcaddi t0, 8
slli.d a3, a2, 5
add.d t0, t0, a3
jr t0
.align 5
move v0, zero
jr ra
.align 5
34: ld.b t0, a1, 0
35: st.b t0, a0, 0
move v0, zero
jr ra
.align 5
36: ld.h t0, a1, 0
37: st.h t0, a0, 0
move v0, zero
jr ra
.align 5
38: ld.h t0, a1, 0
39: ld.b t1, a1, 2
40: st.h t0, a0, 0
41: st.b t1, a0, 2
move v0, zero
jr ra
.align 5
42: ld.w t0, a1, 0
43: st.w t0, a0, 0
move v0, zero
jr ra
.align 5
44: ld.w t0, a1, 0
45: ld.b t1, a1, 4
46: st.w t0, a0, 0
47: st.b t1, a0, 4
move v0, zero
jr ra
.align 5
48: ld.w t0, a1, 0
49: ld.h t1, a1, 4
50: st.w t0, a0, 0
51: st.h t1, a0, 4
move v0, zero
jr ra
.align 5
52: ld.w t0, a1, 0
53: ld.w t1, a1, 3
54: st.w t0, a0, 0
55: st.w t1, a0, 3
move v0, zero
jr ra
.align 5
56: ld.d t0, a1, 0
57: st.d t0, a0, 0
move v0, zero
jr ra
/* fixup and ex_table */
_asm_extable 0b, .L_fixup_handle_0
_asm_extable 1b, .L_fixup_handle_0
_asm_extable 2b, .L_fixup_handle_0
_asm_extable 3b, .L_fixup_handle_0
_asm_extable 4b, .L_fixup_handle_0
_asm_extable 5b, .L_fixup_handle_0
_asm_extable 6b, .L_fixup_handle_0
_asm_extable 7b, .L_fixup_handle_0
_asm_extable 8b, .L_fixup_handle_0
_asm_extable 9b, .L_fixup_handle_0
_asm_extable 10b, .L_fixup_handle_0
_asm_extable 11b, .L_fixup_handle_1
_asm_extable 12b, .L_fixup_handle_2
_asm_extable 13b, .L_fixup_handle_3
_asm_extable 14b, .L_fixup_handle_4
_asm_extable 15b, .L_fixup_handle_5
_asm_extable 16b, .L_fixup_handle_6
_asm_extable 17b, .L_fixup_handle_7
_asm_extable 18b, .L_fixup_handle_0
_asm_extable 19b, .L_fixup_handle_0
_asm_extable 20b, .L_fixup_handle_0
_asm_extable 21b, .L_fixup_handle_0
_asm_extable 22b, .L_fixup_handle_0
_asm_extable 23b, .L_fixup_handle_1
_asm_extable 24b, .L_fixup_handle_2
_asm_extable 25b, .L_fixup_handle_3
_asm_extable 26b, .L_fixup_handle_0
_asm_extable 27b, .L_fixup_handle_0
_asm_extable 28b, .L_fixup_handle_0
_asm_extable 29b, .L_fixup_handle_1
_asm_extable 30b, .L_fixup_handle_0
_asm_extable 31b, .L_fixup_handle_0
_asm_extable 32b, .L_fixup_handle_0
_asm_extable 33b, .L_fixup_handle_1
_asm_extable 34b, .L_fixup_handle_s0
_asm_extable 35b, .L_fixup_handle_s0
_asm_extable 36b, .L_fixup_handle_s0
_asm_extable 37b, .L_fixup_handle_s0
_asm_extable 38b, .L_fixup_handle_s0
_asm_extable 39b, .L_fixup_handle_s0
_asm_extable 40b, .L_fixup_handle_s0
_asm_extable 41b, .L_fixup_handle_s2
_asm_extable 42b, .L_fixup_handle_s0
_asm_extable 43b, .L_fixup_handle_s0
_asm_extable 44b, .L_fixup_handle_s0
_asm_extable 45b, .L_fixup_handle_s0
_asm_extable 46b, .L_fixup_handle_s0
_asm_extable 47b, .L_fixup_handle_s4
_asm_extable 48b, .L_fixup_handle_s0
_asm_extable 49b, .L_fixup_handle_s0
_asm_extable 50b, .L_fixup_handle_s0
_asm_extable 51b, .L_fixup_handle_s4
_asm_extable 52b, .L_fixup_handle_s0
_asm_extable 53b, .L_fixup_handle_s0
_asm_extable 54b, .L_fixup_handle_s0
_asm_extable 55b, .L_fixup_handle_s4
_asm_extable 56b, .L_fixup_handle_s0
_asm_extable 57b, .L_fixup_handle_s0
SYM_FUNC_END(__copy_user_fast)
|
AirFortressIlikara/LS2K0300-linux-4.19
| 7,730
|
arch/loongarch/kvm/entry.S
|
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2020-2022 Loongson Technology Corporation Limited
*/
#include <linux/linkage.h>
#include <asm/stackframe.h>
#include <asm/asm.h>
#include <asm/asmmacro.h>
#include <asm/regdef.h>
#include "kvm_compat.h"
#define RESUME_HOST (1 << 1)
#define GGPR_OFFSET(x) (KVM_ARCH_GGPR + 8*x)
#define PT_GPR_OFFSET(x) (PT_R0 + 8*x)
.text
.macro kvm_save_guest_gprs base
.irp n,1,2,3,4,5,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31
KVM_LONG_S $r\n, \base, GGPR_OFFSET(\n)
.endr
.endm
.macro kvm_restore_guest_gprs base
.irp n,1,2,3,4,5,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31
KVM_LONG_L $r\n, \base, GGPR_OFFSET(\n)
.endr
.endm
.macro kvm_save_host_gpr base
.irp n,1,2,3,22,23,24,25,26,27,28,29,30,31
KVM_LONG_S $r\n, \base, PT_GPR_OFFSET(\n)
.endr
.endm
.macro kvm_restore_host_gpr base
.irp n,1,2,3,22,23,24,25,26,27,28,29,30,31
KVM_LONG_L $r\n, \base, PT_GPR_OFFSET(\n)
.endr
.endm
/*
* prepare switch to guest
* @param:
* KVM_ARCH: kvm_vcpu_arch, don't touch it until 'ertn'
* GPRNUM: KVM_ARCH gpr number
* tmp, tmp1: temp register
*/
.macro kvm_switch_to_guest KVM_ARCH GPRNUM tmp tmp1
/* set host excfg.VS=0, all exceptions share one exception entry */
csrrd \tmp, KVM_CSR_ECFG
bstrins.w \tmp, zero, (KVM_ECFG_VS_SHIFT + KVM_ECFG_VS_WIDTH - 1), KVM_ECFG_VS_SHIFT
csrwr \tmp, KVM_CSR_ECFG
/* Load up the new EENTRY */
KVM_LONG_L \tmp, \KVM_ARCH, KVM_ARCH_GEENTRY
csrwr \tmp, KVM_CSR_EENTRY
/* Set Guest ERA */
KVM_LONG_L \tmp, \KVM_ARCH, KVM_ARCH_GPC
csrwr \tmp, KVM_CSR_ERA
/* Save host PGDL */
csrrd \tmp, KVM_CSR_PGDL
KVM_LONG_S \tmp, \KVM_ARCH, KVM_ARCH_HPGD
/* Switch to kvm */
KVM_LONG_L \tmp1, \KVM_ARCH, KVM_VCPU_KVM - KVM_VCPU_ARCH
/* Load guest PGDL */
lu12i.w \tmp, KVM_GPGD
srli.w \tmp, \tmp, 12
ldx.d \tmp, \tmp1, \tmp
csrwr \tmp, KVM_CSR_PGDL
/* Mix GID and RID */
csrrd \tmp1, KVM_CSR_GSTAT
bstrpick.w \tmp1, \tmp1, (KVM_GSTAT_GID_SHIFT + KVM_GSTAT_GID_WIDTH - 1), KVM_GSTAT_GID_SHIFT
csrrd \tmp, KVM_CSR_GTLBC
bstrins.w \tmp, \tmp1, (KVM_GTLBC_TGID_SHIFT + KVM_GTLBC_TGID_WIDTH - 1), KVM_GTLBC_TGID_SHIFT
csrwr \tmp, KVM_CSR_GTLBC
/*
* Switch to guest:
* GSTAT.PGM = 1, ERRCTL.ISERR = 0, TLBRPRMD.ISTLBR = 0
* ertn
*/
/* Prepare enable Intr before enter guest */
ori \tmp, zero, KVM_PRMD_PIE
csrxchg \tmp, \tmp, KVM_CSR_PRMD
/* Set PVM bit to setup ertn to guest context */
ori \tmp, zero, KVM_GSTAT_PVM
csrxchg \tmp, \tmp, KVM_CSR_GSTAT
/* Load Guest gprs */
kvm_restore_guest_gprs \KVM_ARCH
/* Load KVM_ARCH register */
KVM_LONG_L \KVM_ARCH, \KVM_ARCH, GGPR_OFFSET(\GPRNUM)
ertn
.endm
#ifndef EXCPTION_ENTRY
#define EXCPTION_ENTRY(name) \
.globl name ASM_NL \
.p2align 12; \
name: \
.cfi_startproc;
#endif
#ifndef EXCPTION_ENDPROC
#define EXCPTION_ENDPROC(name) \
.cfi_endproc; \
SYM_END(name, SYM_T_FUNC)
#endif
/* load kvm_vcpu to a2 and store a1 for free use */
EXCPTION_ENTRY(kvm_exception_entry)
csrwr a2, KVM_TEMP_KS
csrrd a2, KVM_VCPU_KS
KVM_LONG_ADDI a2, a2, KVM_VCPU_ARCH
/* After save gprs, free to use any gpr */
kvm_save_guest_gprs a2
/* Save guest a2 */
csrrd t0, KVM_TEMP_KS
KVM_LONG_S t0, a2, GGPR_OFFSET(REG_A2)
b kvm_exit_entry
EXCPTION_ENDPROC(kvm_exception_entry)
/* a2: kvm_vcpu_arch, a1 is free to use */
SYM_FUNC_START(kvm_exit_entry)
csrrd s1, KVM_VCPU_KS
KVM_LONG_L s0, s1, KVM_VCPU_RUN
csrrd t0, KVM_CSR_ESTAT
KVM_LONG_S t0, a2, KVM_ARCH_HESTAT
csrrd t0, KVM_CSR_ERA
KVM_LONG_S t0, a2, KVM_ARCH_GPC
csrrd t0, KVM_CSR_BADV
KVM_LONG_S t0, a2, KVM_ARCH_HBADV
csrrd t0, KVM_CSR_BADI
KVM_LONG_S t0, a2, KVM_ARCH_HBADI
/* Restore host excfg.VS */
csrrd t0, KVM_CSR_ECFG
KVM_LONG_L t1, a2, KVM_ARCH_HECFG
or t0, t0, t1
csrwr t0, KVM_CSR_ECFG
/* Restore host eentry */
KVM_LONG_L t0, a2, KVM_ARCH_HEENTRY
csrwr t0, KVM_CSR_EENTRY
#if defined(CONFIG_CPU_HAS_FPU)
/* Save FPU context */
csrrd t0, KVM_CSR_EUEN
ori t1, zero, KVM_EUEN_FPEN | KVM_EUEN_LSXEN | KVM_EUEN_LASXEN
and t2, t0, t1
beqz t2, 1f
movfcsr2gr t3, fcsr0
INT_S t3, a2, VCPU_FCSR0
movcf2gr t3, $fcc0
or t2, t3, zero
movcf2gr t3, $fcc1
bstrins.d t2, t3, 0xf, 0x8
movcf2gr t3, $fcc2
bstrins.d t2, t3, 0x17, 0x10
movcf2gr t3, $fcc3
bstrins.d t2, t3, 0x1f, 0x18
movcf2gr t3, $fcc4
bstrins.d t2, t3, 0x27, 0x20
movcf2gr t3, $fcc5
bstrins.d t2, t3, 0x2f, 0x28
movcf2gr t3, $fcc6
bstrins.d t2, t3, 0x37, 0x30
movcf2gr t3, $fcc7
bstrins.d t2, t3, 0x3f, 0x38
KVM_LONG_S t2, a2, VCPU_FCC
movgr2fcsr fcsr0, zero
1:
#endif
KVM_LONG_L t0, a2, KVM_ARCH_HPGD
csrwr t0, KVM_CSR_PGDL
/* Disable PVM bit for keeping from into guest */
ori t0, zero, KVM_GSTAT_PVM
csrxchg zero, t0, KVM_CSR_GSTAT
/* Clear GTLBC.TGID field */
csrrd t0, KVM_CSR_GTLBC
bstrins.w t0, zero, KVM_GTLBC_TGID_SHIFT + KVM_GTLBC_TGID_WIDTH - 1, KVM_GTLBC_TGID_SHIFT
csrwr t0, KVM_CSR_GTLBC
/* Enable Address Map mode */
ori t0, zero, (1 << KVM_CRMD_DACM_SHIFT)|(1 << KVM_CRMD_DACF_SHIFT) | KVM_CRMD_PG |PLV_KERN
csrwr t0, KVM_CSR_CRMD
KVM_LONG_L tp, a2, KVM_ARCH_HGP
KVM_LONG_L sp, a2, KVM_ARCH_HSTACK
/* restore per cpu register */
KVM_LONG_L $r21, a2, KVM_ARCH_HPERCPU
KVM_LONG_ADDI sp, sp, -PT_SIZE
/* Prepare handle exception */
or a0, s0, zero
or a1, s1, zero
KVM_LONG_L t8, a2, KVM_ARCH_HANDLE_EXIT
jirl ra,t8, 0
ori t0, zero, KVM_CRMD_IE
csrxchg zero, t0, KVM_CSR_CRMD
or a2, s1, zero
KVM_LONG_ADDI a2, a2, KVM_VCPU_ARCH
andi t0, v0, RESUME_HOST
bnez t0, ret_to_host
INT_S zero, a2, KVM_ARCH_ISHYPCALL
ret_to_guest:
/* Save per cpu register again, maybe switched to another cpu */
KVM_LONG_S $r21, a2, KVM_ARCH_HPERCPU
/* Save kvm_vcpu to kscratch */
csrwr s1, KVM_VCPU_KS
kvm_switch_to_guest a2 REG_A2 t0 t1
ret_to_host:
KVM_LONG_L a2, a2, KVM_ARCH_HSTACK
addi.d a2, a2, -PT_SIZE
srai.w a3, v0, 2
or v0, a3, zero
kvm_restore_host_gpr a2
jirl zero, ra, 0
SYM_FUNC_END(kvm_exit_entry)
/*
* int kvm_enter_guest(struct kvm_run *run, struct kvm_vcpu *vcpu)
*
* @register_param:
* a0: kvm_run* run
* a1: kvm_vcpu* vcpu
*/
SYM_FUNC_START(kvm_enter_guest)
/* allocate space in stack bottom */
KVM_LONG_ADDI a2, sp, -PT_SIZE
/* save host gprs */
kvm_save_host_gpr a2
/* save host crmd,prmd csr to stack */
csrrd a3, KVM_CSR_CRMD
KVM_LONG_S a3, a2, PT_CRMD
csrrd a3, KVM_CSR_PRMD
KVM_LONG_S a3, a2, PT_PRMD
KVM_LONG_ADDI a2, a1, KVM_VCPU_ARCH
KVM_LONG_S sp, a2, KVM_ARCH_HSTACK
KVM_LONG_S tp, a2, KVM_ARCH_HGP
/* Save per cpu register */
KVM_LONG_S $r21, a2, KVM_ARCH_HPERCPU
/* Save kvm_vcpu to kscratch */
csrwr a1, KVM_VCPU_KS
kvm_switch_to_guest a2 REG_A2 t0 t1
SYM_FUNC_END(kvm_enter_guest)
SYM_FUNC_START(__kvm_save_fpu)
fpu_save_double a0 t1
jirl zero, ra, 0
SYM_FUNC_END(__kvm_save_fpu)
SYM_FUNC_START(__kvm_restore_fpu)
fpu_restore_double a0 t1
jirl zero, ra, 0
SYM_FUNC_END(__kvm_restore_fpu)
SYM_FUNC_START(__kvm_restore_fcsr)
fpu_restore_csr a0 t1
fpu_restore_cc a0 t1 t2
jirl zero, ra, 0
SYM_FUNC_END(__kvm_restore_fcsr)
#ifdef CONFIG_CPU_HAS_LSX
SYM_FUNC_START(__kvm_save_lsx)
lsx_save_data a0 t1
jirl zero, ra, 0
SYM_FUNC_END(__kvm_save_lsx)
SYM_FUNC_START(__kvm_restore_lsx)
lsx_restore_data a0 t1
jirl zero, ra, 0
SYM_FUNC_END(__kvm_restore_lsx)
SYM_FUNC_START(__kvm_restore_lsx_upper)
lsx_restore_all_upper a0 t0 t1
jirl zero, ra, 0
SYM_FUNC_END(__kvm_restore_lsx_upper)
#endif
#ifdef CONFIG_CPU_HAS_LASX
SYM_FUNC_START(__kvm_save_lasx)
lasx_save_data a0 t7
jirl zero, ra, 0
SYM_FUNC_END(__kvm_save_lasx)
SYM_FUNC_START(__kvm_restore_lasx)
lasx_restore_data a0 t7
jirl zero, ra, 0
SYM_FUNC_END(__kvm_restore_lasx)
#endif
|
AirFortressIlikara/LS2K0300-linux-4.19
| 14,537
|
arch/loongarch/mm/tlbex.S
|
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) 2020 Loongson Technology Corporation Limited
*/
#include <linux/linkage.h>
#include <asm/asm.h>
#include <asm/export.h>
#include <asm/page.h>
#include <asm/regdef.h>
#include <asm/loongarchregs.h>
#include <asm/stackframe.h>
#include <asm/pgtable.h>
#define PTE_SHIFT (PAGE_SHIFT + PTE_ORDER)
#define PTRS_PER_PGD_BITS (PAGE_SHIFT + PGD_ORDER - 3)
#define PTRS_PER_PUD_BITS (PAGE_SHIFT + PUD_ORDER - 3)
#define PTRS_PER_PMD_BITS (PAGE_SHIFT + PMD_ORDER - 3)
#define PTRS_PER_PTE_BITS (PAGE_SHIFT + PTE_ORDER - 3)
.align 5
SYM_FUNC_START(clear_page)
lu12i.w t0, 1 << (PAGE_SHIFT - 12)
add.d t0, t0, a0
1:
st.d zero, a0, 0
st.d zero, a0, 8
st.d zero, a0, 16
st.d zero, a0, 24
st.d zero, a0, 32
st.d zero, a0, 40
st.d zero, a0, 48
st.d zero, a0, 56
addi.d a0, a0, 128
st.d zero, a0, -64
st.d zero, a0, -56
st.d zero, a0, -48
st.d zero, a0, -40
st.d zero, a0, -32
st.d zero, a0, -24
st.d zero, a0, -16
st.d zero, a0, -8
bne t0, a0, 1b
jirl zero, ra, 0
SYM_FUNC_END(clear_page)
EXPORT_SYMBOL(clear_page)
.align 5
SYM_FUNC_START(copy_page)
lu12i.w t8, 1 << (PAGE_SHIFT - 12)
add.d t8, t8, a0
1:
ld.d t0, a1, 0
ld.d t1, a1, 8
ld.d t2, a1, 16
ld.d t3, a1, 24
ld.d t4, a1, 32
ld.d t5, a1, 40
ld.d t6, a1, 48
ld.d t7, a1, 56
st.d t0, a0, 0
st.d t1, a0, 8
ld.d t0, a1, 64
ld.d t1, a1, 72
st.d t2, a0, 16
st.d t3, a0, 24
ld.d t2, a1, 80
ld.d t3, a1, 88
st.d t4, a0, 32
st.d t5, a0, 40
ld.d t4, a1, 96
ld.d t5, a1, 104
st.d t6, a0, 48
st.d t7, a0, 56
ld.d t6, a1, 112
ld.d t7, a1, 120
addi.d a0, a0, 128
addi.d a1, a1, 128
st.d t0, a0, -64
st.d t1, a0, -56
st.d t2, a0, -48
st.d t3, a0, -40
st.d t4, a0, -32
st.d t5, a0, -24
st.d t6, a0, -16
st.d t7, a0, -8
bne t8, a0, 1b
jirl zero, ra, 0
SYM_FUNC_END(copy_page)
EXPORT_SYMBOL(copy_page)
SYM_FUNC_START(handle_tlb_modify)
csrwr t0, LOONGARCH_CSR_KS0
csrwr t1, LOONGARCH_CSR_KS1
csrwr ra, LOONGARCH_CSR_KS2
/*
* The vmalloc handling is not in the hotpath.
*/
csrrd t0, LOONGARCH_CSR_BADV
blt t0, zero, vmalloc_modify
csrrd t1, LOONGARCH_CSR_PGDL
vmalloc_done_modify:
/* get pgd offset in bytes */
bstrpick.d ra, t0, PTRS_PER_PGD_BITS + PGDIR_SHIFT - 1, PGDIR_SHIFT
alsl.d t1, ra, t1, 3
#if CONFIG_PGTABLE_LEVELS > 3
ld.d t1, t1, 0
bstrpick.d ra, t0, PTRS_PER_PUD_BITS + PUD_SHIFT - 1, PUD_SHIFT
alsl.d t1, ra, t1, 3
#endif
#if CONFIG_PGTABLE_LEVELS > 2
ld.d t1, t1, 0
bstrpick.d ra, t0, PTRS_PER_PMD_BITS + PMD_SHIFT - 1, PMD_SHIFT
alsl.d t1, ra, t1, 3
#endif
ld.d ra, t1, 0
/*
* For huge tlb entries, pmde doesn't contain an address but
* instead contains the tlb pte. Check the PAGE_HUGE bit and
* see if we need to jump to huge tlb processing.
*/
rotri.d ra, ra, _PAGE_HUGE_SHIFT + 1
blt ra, zero, tlb_huge_update_modify
rotri.d ra, ra, 64 - (_PAGE_HUGE_SHIFT + 1)
bstrpick.d t0, t0, PTRS_PER_PTE_BITS + PTE_SHIFT - 1, PTE_SHIFT
alsl.d t1, t0, ra, _PTE_T_LOG2
#ifdef CONFIG_SMP
smp_pgtable_change_modify:
ll.d t0, t1, 0
#else
ld.d t0, t1, 0
#endif
andi ra, t0, _PAGE_WRITE
beqz ra, nopage_tlb_modify
/* Present and writable bits set, set accessed and dirty bits. */
ori t0, t0, _PAGE_VALID | _PAGE_DIRTY
#ifdef CONFIG_SMP
sc.d t0, t1, 0
beqz t0, smp_pgtable_change_modify
#else
st.d t0, t1, 0
#endif
tlbsrch
bstrins.d t1, zero, 3, 3
ld.d t0, t1, 0
ld.d t1, t1, 8
csrwr t0, LOONGARCH_CSR_TLBELO0
csrwr t1, LOONGARCH_CSR_TLBELO1
tlbwr
csrrd t0, LOONGARCH_CSR_KS0
csrrd t1, LOONGARCH_CSR_KS1
csrrd ra, LOONGARCH_CSR_KS2
ertn
#ifdef CONFIG_64BIT
vmalloc_modify:
la.abs t1, swapper_pg_dir
b vmalloc_done_modify
#endif
/*
* This is the entry point when
* build_tlbchange_handler_head spots a huge page.
*/
tlb_huge_update_modify:
#ifdef CONFIG_SMP
ll.d ra, t1, 0
#endif
andi t0, ra, _PAGE_WRITE
beqz t0, nopage_tlb_modify
#ifdef CONFIG_SMP
ori t0, ra, _PAGE_VALID | _PAGE_DIRTY
sc.d t0, t1, 0
beqz t0, tlb_huge_update_modify
ori t0, ra, _PAGE_VALID | _PAGE_DIRTY
#else
rotri.d ra, ra, 64 - (_PAGE_HUGE_SHIFT + 1)
ori t0, ra, _PAGE_VALID | _PAGE_DIRTY
st.d t0, t1, 0
#endif
tlbsrch
/*
* A huge PTE describes an area the size of the
* configured huge page size. This is twice the
* of the large TLB entry size we intend to use.
* A TLB entry half the size of the configured
* huge page size is configured into entrylo0
* and entrylo1 to cover the contiguous huge PTE
* address space.
*/
/* Huge page: Move Global bit */
xori t0, t0, _PAGE_HUGE
lu12i.w t1, _PAGE_HGLOBAL >> 12
and t1, t0, t1
srli.d t1, t1, (_PAGE_HGLOBAL_SHIFT - _PAGE_GLOBAL_SHIFT)
or t0, t0, t1
move ra, t0
csrwr ra, LOONGARCH_CSR_TLBELO0
/* convert to entrylo1 */
addi.d t1, zero, 1
slli.d t1, t1, (HPAGE_SHIFT - 1)
add.d t0, t0, t1
csrwr t0, LOONGARCH_CSR_TLBELO1
/* Set huge page tlb entry size */
addu16i.d t0, zero, (CSR_TLBIDX_PS >> 16)
addu16i.d t1, zero, (PS_HUGE_SIZE << (CSR_TLBIDX_PS_SHIFT - 16))
csrxchg t1, t0, LOONGARCH_CSR_TLBIDX
tlbwr
/* Reset default page size */
addu16i.d t0, zero, (CSR_TLBIDX_PS >> 16)
addu16i.d t1, zero, (PS_DEFAULT_SIZE << (CSR_TLBIDX_PS_SHIFT - 16))
csrxchg t1, t0, LOONGARCH_CSR_TLBIDX
csrrd t0, LOONGARCH_CSR_KS0
csrrd t1, LOONGARCH_CSR_KS1
csrrd ra, LOONGARCH_CSR_KS2
ertn
nopage_tlb_modify:
dbar 0x700
csrrd ra, LOONGARCH_CSR_KS2
la.abs t0, tlb_do_page_fault_1
jirl zero, t0, 0
SYM_FUNC_END(handle_tlb_modify)
SYM_FUNC_START(handle_tlb_modify_ptw)
csrwr t0, LOONGARCH_CSR_KS0
csrwr t1, LOONGARCH_CSR_KS1
la.abs t0, tlb_do_page_fault_1
jirl zero, t0, 0
SYM_FUNC_END(handle_tlb_modify_ptw)
SYM_FUNC_START(handle_tlb_store)
csrwr t0, LOONGARCH_CSR_KS0
csrwr t1, LOONGARCH_CSR_KS1
csrwr ra, LOONGARCH_CSR_KS2
/*
* The vmalloc handling is not in the hotpath.
*/
csrrd t0, LOONGARCH_CSR_BADV
blt t0, zero, vmalloc_store
csrrd t1, LOONGARCH_CSR_PGDL
vmalloc_done_store:
/* get pgd offset in bytes */
bstrpick.d ra, t0, PTRS_PER_PGD_BITS + PGDIR_SHIFT - 1, PGDIR_SHIFT
alsl.d t1, ra, t1, 3
#if CONFIG_PGTABLE_LEVELS > 3
ld.d t1, t1, 0
bstrpick.d ra, t0, PTRS_PER_PUD_BITS + PUD_SHIFT - 1, PUD_SHIFT
alsl.d t1, ra, t1, 3
#endif
#if CONFIG_PGTABLE_LEVELS > 2
ld.d t1, t1, 0
bstrpick.d ra, t0, PTRS_PER_PMD_BITS + PMD_SHIFT - 1, PMD_SHIFT
alsl.d t1, ra, t1, 3
#endif
ld.d ra, t1, 0
/*
* For huge tlb entries, pmde doesn't contain an address but
* instead contains the tlb pte. Check the PAGE_HUGE bit and
* see if we need to jump to huge tlb processing.
*/
rotri.d ra, ra, _PAGE_HUGE_SHIFT + 1
blt ra, zero, tlb_huge_update_store
rotri.d ra, ra, 64 - (_PAGE_HUGE_SHIFT + 1)
bstrpick.d t0, t0, PTRS_PER_PTE_BITS + PTE_SHIFT - 1, PTE_SHIFT
alsl.d t1, t0, ra, _PTE_T_LOG2
#ifdef CONFIG_SMP
smp_pgtable_change_store:
ll.d t0, t1, 0
#else
ld.d t0, t1, 0
#endif
andi ra, t0, _PAGE_PRESENT | _PAGE_WRITE
xori ra, ra, _PAGE_PRESENT | _PAGE_WRITE
bnez ra, nopage_tlb_store
ori t0, t0, _PAGE_VALID | _PAGE_DIRTY
#ifdef CONFIG_SMP
sc.d t0, t1, 0
beqz t0, smp_pgtable_change_store
#else
st.d t0, t1, 0
#endif
tlbsrch
bstrins.d t1, zero, 3, 3
ld.d t0, t1, 0
ld.d t1, t1, 8
csrwr t0, LOONGARCH_CSR_TLBELO0
csrwr t1, LOONGARCH_CSR_TLBELO1
tlbwr
csrrd t0, LOONGARCH_CSR_KS0
csrrd t1, LOONGARCH_CSR_KS1
csrrd ra, LOONGARCH_CSR_KS2
ertn
#ifdef CONFIG_64BIT
vmalloc_store:
la.abs t1, swapper_pg_dir
b vmalloc_done_store
#endif
/*
* This is the entry point when build_tlbchange_handler_head
* spots a huge page.
*/
tlb_huge_update_store:
#ifdef CONFIG_SMP
ll.d ra, t1, 0
#endif
andi t0, ra, _PAGE_PRESENT | _PAGE_WRITE
xori t0, t0, _PAGE_PRESENT | _PAGE_WRITE
bnez t0, nopage_tlb_store
#ifdef CONFIG_SMP
ori t0, ra, _PAGE_VALID | _PAGE_DIRTY
sc.d t0, t1, 0
beqz t0, tlb_huge_update_store
ori t0, ra, _PAGE_VALID | _PAGE_DIRTY
#else
rotri.d ra, ra, 64 - (_PAGE_HUGE_SHIFT + 1)
ori t0, ra, _PAGE_VALID | _PAGE_DIRTY
st.d t0, t1, 0
#endif
tlbsrch
/* The type conversion is to avoid uasm warning */
addu16i.d t1, zero, -(CSR_TLBIDX_EHINV >> 16)
addi.d ra, t1, 0
csrxchg ra, t1, LOONGARCH_CSR_TLBIDX
tlbwr
csrxchg zero, t1, LOONGARCH_CSR_TLBIDX
/*
* A huge PTE describes an area the size of the
* configured huge page size. This is twice the
* of the large TLB entry size we intend to use.
* A TLB entry half the size of the configured
* huge page size is configured into entrylo0
* and entrylo1 to cover the contiguous huge PTE
* address space.
*/
/* Huge page: Move Global bit */
xori t0, t0, _PAGE_HUGE
lu12i.w t1, _PAGE_HGLOBAL >> 12
and t1, t0, t1
srli.d t1, t1, (_PAGE_HGLOBAL_SHIFT - _PAGE_GLOBAL_SHIFT)
or t0, t0, t1
move ra, t0
csrwr ra, LOONGARCH_CSR_TLBELO0
/* convert to entrylo1 */
addi.d t1, zero, 1
slli.d t1, t1, (HPAGE_SHIFT - 1)
add.d t0, t0, t1
csrwr t0, LOONGARCH_CSR_TLBELO1
/* Set huge page tlb entry size */
addu16i.d t0, zero, (CSR_TLBIDX_PS >> 16)
addu16i.d t1, zero, (PS_HUGE_SIZE << (CSR_TLBIDX_PS_SHIFT - 16))
csrxchg t1, t0, LOONGARCH_CSR_TLBIDX
tlbfill
/* Reset default page size */
addu16i.d t0, zero, (CSR_TLBIDX_PS >> 16)
addu16i.d t1, zero, (PS_DEFAULT_SIZE << (CSR_TLBIDX_PS_SHIFT - 16))
csrxchg t1, t0, LOONGARCH_CSR_TLBIDX
csrrd t0, LOONGARCH_CSR_KS0
csrrd t1, LOONGARCH_CSR_KS1
csrrd ra, LOONGARCH_CSR_KS2
ertn
nopage_tlb_store:
dbar 0x700
csrrd ra, LOONGARCH_CSR_KS2
la.abs t0, tlb_do_page_fault_1
jirl zero, t0, 0
SYM_FUNC_END(handle_tlb_store)
SYM_FUNC_START(handle_tlb_store_ptw)
csrwr t0, LOONGARCH_CSR_KS0
csrwr t1, LOONGARCH_CSR_KS1
la.abs t0, tlb_do_page_fault_1
jirl zero, t0, 0
SYM_FUNC_END(handle_tlb_store_ptw)
SYM_FUNC_START(handle_tlb_load)
csrwr t0, LOONGARCH_CSR_KS0
csrwr t1, LOONGARCH_CSR_KS1
csrwr ra, LOONGARCH_CSR_KS2
/*
* The vmalloc handling is not in the hotpath.
*/
csrrd t0, LOONGARCH_CSR_BADV
bltz t0, vmalloc_load
csrrd t1, LOONGARCH_CSR_PGDL
vmalloc_done_load:
/* get pgd offset in bytes */
bstrpick.d ra, t0, PTRS_PER_PGD_BITS + PGDIR_SHIFT - 1, PGDIR_SHIFT
alsl.d t1, ra, t1, 3
#if CONFIG_PGTABLE_LEVELS > 3
ld.d t1, t1, 0
bstrpick.d ra, t0, PTRS_PER_PUD_BITS + PUD_SHIFT - 1, PUD_SHIFT
alsl.d t1, ra, t1, 3
#endif
#if CONFIG_PGTABLE_LEVELS > 2
ld.d t1, t1, 0
bstrpick.d ra, t0, PTRS_PER_PMD_BITS + PMD_SHIFT - 1, PMD_SHIFT
alsl.d t1, ra, t1, 3
#endif
ld.d ra, t1, 0
/*
* For huge tlb entries, pmde doesn't contain an address but
* instead contains the tlb pte. Check the PAGE_HUGE bit and
* see if we need to jump to huge tlb processing.
*/
rotri.d ra, ra, _PAGE_HUGE_SHIFT + 1
blt ra, zero, tlb_huge_update_load
rotri.d ra, ra, 64 - (_PAGE_HUGE_SHIFT + 1)
bstrpick.d t0, t0, PTRS_PER_PTE_BITS + PTE_SHIFT - 1, PTE_SHIFT
alsl.d t1, t0, ra, _PTE_T_LOG2
#ifdef CONFIG_SMP
smp_pgtable_change_load:
ll.d t0, t1, 0
#else
ld.d t0, t1, 0
#endif
andi ra, t0, _PAGE_PRESENT
beqz ra, nopage_tlb_load
ori t0, t0, _PAGE_VALID
#ifdef CONFIG_SMP
sc.d t0, t1, 0
beqz t0, smp_pgtable_change_load
#else
st.d t0, t1, 0
#endif
tlbsrch
bstrins.d t1, zero, 3, 3
ld.d t0, t1, 0
ld.d t1, t1, 8
csrwr t0, LOONGARCH_CSR_TLBELO0
csrwr t1, LOONGARCH_CSR_TLBELO1
tlbwr
csrrd t0, LOONGARCH_CSR_KS0
csrrd t1, LOONGARCH_CSR_KS1
csrrd ra, LOONGARCH_CSR_KS2
ertn
#ifdef CONFIG_64BIT
vmalloc_load:
la.abs t1, swapper_pg_dir
b vmalloc_done_load
#endif
/*
* This is the entry point when build_tlbchange_handler_head
* spots a huge page.
*/
tlb_huge_update_load:
#ifdef CONFIG_SMP
ll.d ra, t1, 0
#endif
andi t0, ra, _PAGE_PRESENT
beqz t0, nopage_tlb_load
#ifdef CONFIG_SMP
ori t0, ra, _PAGE_VALID
sc.d t0, t1, 0
beqz t0, tlb_huge_update_load
ori t0, ra, _PAGE_VALID
#else
rotri.d ra, ra, 64 - (_PAGE_HUGE_SHIFT + 1)
ori t0, ra, _PAGE_VALID
st.d t0, t1, 0
#endif
tlbsrch
/* The type conversion is to avoid uasm warning */
addu16i.d t1, zero, -(CSR_TLBIDX_EHINV >> 16)
addi.d ra, t1, 0
csrxchg ra, t1, LOONGARCH_CSR_TLBIDX
tlbwr
csrxchg zero, t1, LOONGARCH_CSR_TLBIDX
/*
* A huge PTE describes an area the size of the
* configured huge page size. This is twice the
* of the large TLB entry size we intend to use.
* A TLB entry half the size of the configured
* huge page size is configured into entrylo0
* and entrylo1 to cover the contiguous huge PTE
* address space.
*/
/* Huge page: Move Global bit */
xori t0, t0, _PAGE_HUGE
lu12i.w t1, _PAGE_HGLOBAL >> 12
and t1, t0, t1
srli.d t1, t1, (_PAGE_HGLOBAL_SHIFT - _PAGE_GLOBAL_SHIFT)
or t0, t0, t1
move ra, t0
csrwr ra, LOONGARCH_CSR_TLBELO0
/* convert to entrylo1 */
addi.d t1, zero, 1
slli.d t1, t1, (HPAGE_SHIFT - 1)
add.d t0, t0, t1
csrwr t0, LOONGARCH_CSR_TLBELO1
/* Set huge page tlb entry size */
addu16i.d t0, zero, (CSR_TLBIDX_PS >> 16)
addu16i.d t1, zero, (PS_HUGE_SIZE << (CSR_TLBIDX_PS_SHIFT - 16))
csrxchg t1, t0, LOONGARCH_CSR_TLBIDX
tlbfill
addu16i.d t0, zero, (CSR_TLBIDX_PS >> 16)
addu16i.d t1, zero, (PS_DEFAULT_SIZE << (CSR_TLBIDX_PS_SHIFT - 16))
csrxchg t1, t0, LOONGARCH_CSR_TLBIDX
csrrd t0, LOONGARCH_CSR_KS0
csrrd t1, LOONGARCH_CSR_KS1
csrrd ra, LOONGARCH_CSR_KS2
ertn
nopage_tlb_load:
dbar 0x700
csrrd ra, LOONGARCH_CSR_KS2
la.abs t0, tlb_do_page_fault_0
jirl zero, t0, 0
SYM_FUNC_END(handle_tlb_load)
SYM_FUNC_START(handle_tlb_load_ptw)
csrwr t0, LOONGARCH_CSR_KS0
csrwr t1, LOONGARCH_CSR_KS1
la.abs t0, tlb_do_page_fault_0
jirl zero, t0, 0
SYM_FUNC_END(handle_tlb_load_ptw)
SYM_FUNC_START(handle_tlb_refill)
csrwr t0, LOONGARCH_CSR_TLBRSAVE
csrrd t0, LOONGARCH_CSR_PGD
lddir t0, t0, 3
#if CONFIG_PGTABLE_LEVELS > 3
lddir t0, t0, 2
#endif
#if CONFIG_PGTABLE_LEVELS > 2
lddir t0, t0, 1
#endif
ldpte t0, 0
ldpte t0, 1
tlbfill
csrrd t0, LOONGARCH_CSR_TLBRSAVE
ertn
SYM_FUNC_END(handle_tlb_refill)
.macro tlb_do_page_fault, write
SYM_FUNC_START(tlb_do_page_fault_\write)
SAVE_ALL docfi=0
csrrd a2, LOONGARCH_CSR_BADV
KMODE
move a0, sp
REG_S a2, sp, PT_BVADDR
li.w a1, \write
la.abs t0, do_page_fault
jirl ra, t0, 0
la.abs t0, ret_from_exception
jirl zero, t0, 0
SYM_FUNC_END(tlb_do_page_fault_\write)
.endm
tlb_do_page_fault 0
tlb_do_page_fault 1
SYM_FUNC_START(tlb_do_page_fault_protect)
csrwr t0, LOONGARCH_CSR_KS0
csrwr t1, LOONGARCH_CSR_KS1
la.abs t0, tlb_do_page_fault_0
jirl zero, t0, 0
SYM_FUNC_END(tlb_do_page_fault_protect)
|
AirFortressIlikara/LS2K0300-linux-4.19
| 1,482
|
arch/loongarch/boot/compressed/head.S
|
/* SPDX-License-Identifier: GPL-2.0 */
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 1994, 1995 Waldorf Electronics
* Written by Ralf Baechle and Andreas Busse
* Copyright (C) 1995 - 1999 Ralf Baechle
* Copyright (C) 1996 Paul M. Antoine
* Modified for DECStation and hence R3000 support by Paul M. Antoine
* Further modifications by David S. Miller and Harald Koerfgen
* Copyright (C) 1999 Silicon Graphics, Inc.
* Copyright (C) 2020 Loongson Technology Co., Ltd.
*/
#include <asm/asm.h>
#include <asm/regdef.h>
#include <asm/addrspace.h>
#include <asm/loongarchregs.h>
SYM_CODE_START(start)
/* Save boot rom start args */
move s0, a0
move s1, a1
move s2, a2
move s3, a3 /* for kdump */
# 0x9000 xxxx xxxx xxxx
li.d t0, CSR_DMW1_INIT # CA, PLV0
csrwr t0, LOONGARCH_CSR_DMWIN1
/* Clear BSS */
la a0, _edata
la a2, _end
1: st.d zero, a0, 0
addi.d a0, a0, 8
bne a2, a0, 1b
la sp, .stack + 8192 /* stack address */
la a0, .heap /* heap address */
move a1, a3 /* kdump relocate offset */
li.w t1, 0x123
beq t1, a4, 2f
li.w a1, 0
li.w s3, 0
2:
la ra, 3f
la t4, decompress_kernel
jirl zero, t4, 0
3:
move a0, s0
move a1, s1
move a2, s2
move a3, s3
PTR_LI t4, KERNEL_ENTRY
add.d t4, t4, a3
jirl zero, t4, 0
4:
b 4b
SYM_CODE_END(start)
.comm .heap,BOOT_HEAP_SIZE,4
.comm .stack,4096*2,4
|
AirFortressIlikara/LS2K0300-linux-4.19
| 4,055
|
arch/loongarch/boot/compressed/efi-header.S
|
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) 2013 - 2017 Linaro, Ltd.
* Copyright (C) 2013, 2014 Red Hat, Inc.
* Copyright (C) 2020, 2021 Loongson, Inc.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/pe.h>
#include <linux/sizes.h>
#include <generated/utsrelease.h>
#include <generated/compile.h>
.macro __EFI_PE_HEADER
.long PE_MAGIC
coff_header:
.short IMAGE_FILE_MACHINE_LOONGARCH /* Machine */
.short section_count /* NumberOfSections */
.long 0 /* TimeDateStamp */
.long 0 /* PointerToSymbolTable */
.long 0 /* NumberOfSymbols */
.short section_table - optional_header /* SizeOfOptionalHeader */
.short IMAGE_FILE_DEBUG_STRIPPED | \
IMAGE_FILE_EXECUTABLE_IMAGE | \
IMAGE_FILE_LINE_NUMS_STRIPPED /* Characteristics */
optional_header:
.short PE_OPT_MAGIC_PE32PLUS /* PE32+ format */
.byte 0x02 /* MajorLinkerVersion */
.byte 0x14 /* MinorLinkerVersion */
.long _data - efi_header_end /* SizeOfCode */
.long _end - _data /* SizeOfInitializedData */
.long 0 /* SizeOfUninitializedData */
.long efi_entry - _text /* AddressOfEntryPoint */
.long efi_header_end - _text /* BaseOfCode */
extra_header_fields:
.quad 0 /* ImageBase */
.long PECOFF_SEGMENT_ALIGN /* SectionAlignment */
.long PECOFF_FILE_ALIGN /* FileAlignment */
.short 0 /* MajorOperatingSystemVersion */
.short 0 /* MinorOperatingSystemVersion */
.short 0 /* MajorImageVersion */
.short 0 /* MinorImageVersion */
.short 0 /* MajorSubsystemVersion */
.short 0 /* MinorSubsystemVersion */
.long 0 /* Win32VersionValue */
.long _end - _text /* SizeOfImage */
/* Everything before the kernel image is considered part of the header */
.long efi_header_end - _head /* SizeOfHeaders */
.long 0 /* CheckSum */
.short IMAGE_SUBSYSTEM_EFI_APPLICATION /* Subsystem */
.short 0 /* DllCharacteristics */
.quad 0 /* SizeOfStackReserve */
.quad 0 /* SizeOfStackCommit */
.quad 0 /* SizeOfHeapReserve */
.quad 0 /* SizeOfHeapCommit */
.long 0 /* LoaderFlags */
.long (section_table - .) / 8 /* NumberOfRvaAndSizes */
.quad 0 /* ExportTable */
.quad 0 /* ImportTable */
.quad 0 /* ResourceTable */
.quad 0 /* ExceptionTable */
.quad 0 /* CertificationTable */
.quad 0 /* BaseRelocationTable */
/* Section table */
section_table:
.ascii ".text\0\0\0"
.long _data - efi_header_end /* VirtualSize */
.long efi_header_end - _text /* VirtualAddress */
.long _data - efi_header_end /* SizeOfRawData */
.long efi_header_end - _text /* PointerToRawData */
.long 0 /* PointerToRelocations */
.long 0 /* PointerToLineNumbers */
.short 0 /* NumberOfRelocations */
.short 0 /* NumberOfLineNumbers */
.long IMAGE_SCN_CNT_CODE | \
IMAGE_SCN_MEM_READ | \
IMAGE_SCN_MEM_EXECUTE /* Characteristics */
.ascii ".data\0\0\0"
.long _end - _data /* VirtualSize */
.long _data - _text /* VirtualAddress */
.long _edata - _data /* SizeOfRawData */
.long _data - _text /* PointerToRawData */
.long 0 /* PointerToRelocations */
.long 0 /* PointerToLineNumbers */
.short 0 /* NumberOfRelocations */
.short 0 /* NumberOfLineNumbers */
.long IMAGE_SCN_CNT_INITIALIZED_DATA | \
IMAGE_SCN_MEM_READ | \
IMAGE_SCN_MEM_WRITE /* Characteristics */
.org 0x20e
.word kernel_version - 512 - _head
.set section_count, (. - section_table) / 40
efi_header_end:
.endm
.section ".head.text","ax"
_head:
/* "MZ", MS-DOS header */
.byte 0x4d
.byte 0x5a
.org 0x28
.ascii "LoongArch\0"
.org 0x3c
/* Offset to the PE header */
.long pe_header - _head
pe_header:
__EFI_PE_HEADER
start:
.globl start
kernel_entaddr:
.quad KERNEL_ENTRY
.globl kernel_entaddr
kernel_version:
.ascii UTS_RELEASE " (" LINUX_COMPILE_BY "@" LINUX_COMPILE_HOST ") \
" UTS_VERSION "\0"
|
AirFortressIlikara/LS2K0300-linux-4.19
| 1,715
|
arch/c6x/kernel/head.S
|
;
; Port on Texas Instruments TMS320C6x architecture
;
; Copyright (C) 2004, 2009, 2010, 2011 Texas Instruments Incorporated
; Author: Aurelien Jacquiot (aurelien.jacquiot@jaluna.com)
;
; This program is free software; you can redistribute it and/or modify
; it under the terms of the GNU General Public License version 2 as
; published by the Free Software Foundation.
;
#include <linux/linkage.h>
#include <linux/of_fdt.h>
#include <asm/asm-offsets.h>
__HEAD
ENTRY(_c_int00)
;; Save magic and pointer
MV .S1 A4,A10
MV .S2 B4,B10
MVKL .S2 __bss_start,B5
MVKH .S2 __bss_start,B5
MVKL .S2 __bss_stop,B6
MVKH .S2 __bss_stop,B6
SUB .L2 B6,B5,B6 ; bss size
;; Set the stack pointer
MVKL .S2 current_ksp,B0
MVKH .S2 current_ksp,B0
LDW .D2T2 *B0,B15
;; clear bss
SHR .S2 B6,3,B0 ; number of dwords to clear
ZERO .L2 B13
ZERO .L2 B12
bss_loop:
BDEC .S2 bss_loop,B0
NOP 3
CMPLT .L2 B0,0,B1
[!B1] STDW .D2T2 B13:B12,*B5++[1]
NOP 4
AND .D2 ~7,B15,B15
;; Clear GIE and PGIE
MVC .S2 CSR,B2
CLR .S2 B2,0,1,B2
MVC .S2 B2,CSR
MVC .S2 TSR,B2
CLR .S2 B2,0,1,B2
MVC .S2 B2,TSR
MVC .S2 ITSR,B2
CLR .S2 B2,0,1,B2
MVC .S2 B2,ITSR
MVC .S2 NTSR,B2
CLR .S2 B2,0,1,B2
MVC .S2 B2,NTSR
;; pass DTB pointer to machine_init (or zero if none)
MVKL .S1 OF_DT_HEADER,A0
MVKH .S1 OF_DT_HEADER,A0
CMPEQ .L1 A10,A0,A0
[A0] MV .S1X B10,A4
[!A0] MVK .S1 0,A4
#ifdef CONFIG_C6X_BIG_KERNEL
MVKL .S1 machine_init,A0
MVKH .S1 machine_init,A0
B .S2X A0
ADDKPC .S2 0f,B3,4
0:
#else
CALLP .S2 machine_init,B3
#endif
;; Jump to Linux init
#ifdef CONFIG_C6X_BIG_KERNEL
MVKL .S1 start_kernel,A0
MVKH .S1 start_kernel,A0
B .S2X A0
#else
B .S2 start_kernel
#endif
NOP 5
L1: BNOP .S2 L1,5
|
AirFortressIlikara/LS2K0300-linux-4.19
| 1,783
|
arch/c6x/kernel/vectors.S
|
;
; Port on Texas Instruments TMS320C6x architecture
;
; Copyright (C) 2004, 2006, 2009, 2010, 2011 Texas Instruments Incorporated
; Author: Aurelien Jacquiot (aurelien.jacquiot@jaluna.com)
;
; This program is free software; you can redistribute it and/or modify
; it under the terms of the GNU General Public License version 2 as
; published by the Free Software Foundation.
;
; This section handles all the interrupt vector routines.
; At RESET the processor sets up the DRAM timing parameters and
; branches to the label _c_int00 which handles initialization for the C code.
;
#define ALIGNMENT 5
.macro IRQVEC name, handler
.align ALIGNMENT
.hidden \name
.global \name
\name:
#ifdef CONFIG_C6X_BIG_KERNEL
STW .D2T1 A0,*B15--[2]
|| MVKL .S1 \handler,A0
MVKH .S1 \handler,A0
B .S2X A0
LDW .D2T1 *++B15[2],A0
NOP 4
NOP
NOP
.endm
#else /* CONFIG_C6X_BIG_KERNEL */
B .S2 \handler
NOP
NOP
NOP
NOP
NOP
NOP
NOP
.endm
#endif /* CONFIG_C6X_BIG_KERNEL */
.sect ".vectors","ax"
.align ALIGNMENT
.global RESET
.hidden RESET
RESET:
#ifdef CONFIG_C6X_BIG_KERNEL
MVKL .S1 _c_int00,A0 ; branch to _c_int00
MVKH .S1 _c_int00,A0
B .S2X A0
#else
B .S2 _c_int00
NOP
NOP
#endif
NOP
NOP
NOP
NOP
NOP
IRQVEC NMI,_nmi_handler ; NMI interrupt
IRQVEC AINT,_bad_interrupt ; reserved
IRQVEC MSGINT,_bad_interrupt ; reserved
IRQVEC INT4,_int4_handler
IRQVEC INT5,_int5_handler
IRQVEC INT6,_int6_handler
IRQVEC INT7,_int7_handler
IRQVEC INT8,_int8_handler
IRQVEC INT9,_int9_handler
IRQVEC INT10,_int10_handler
IRQVEC INT11,_int11_handler
IRQVEC INT12,_int12_handler
IRQVEC INT13,_int13_handler
IRQVEC INT14,_int14_handler
IRQVEC INT15,_int15_handler
|
AirFortressIlikara/LS2K0300-linux-4.19
| 15,959
|
arch/c6x/kernel/entry.S
|
;
; Port on Texas Instruments TMS320C6x architecture
;
; Copyright (C) 2004-2011 Texas Instruments Incorporated
; Author: Aurelien Jacquiot (aurelien.jacquiot@virtuallogix.com)
; Updated for 2.6.34: Mark Salter <msalter@redhat.com>
;
; This program is free software; you can redistribute it and/or modify
; it under the terms of the GNU General Public License version 2 as
; published by the Free Software Foundation.
;
#include <linux/sys.h>
#include <linux/linkage.h>
#include <asm/thread_info.h>
#include <asm/asm-offsets.h>
#include <asm/unistd.h>
#include <asm/errno.h>
; Registers naming
#define DP B14
#define SP B15
#ifndef CONFIG_PREEMPT
#define resume_kernel restore_all
#endif
.altmacro
.macro MASK_INT reg
MVC .S2 CSR,reg
CLR .S2 reg,0,0,reg
MVC .S2 reg,CSR
.endm
.macro UNMASK_INT reg
MVC .S2 CSR,reg
SET .S2 reg,0,0,reg
MVC .S2 reg,CSR
.endm
.macro GET_THREAD_INFO reg
SHR .S1X SP,THREAD_SHIFT,reg
SHL .S1 reg,THREAD_SHIFT,reg
.endm
;;
;; This defines the normal kernel pt_regs layout.
;;
.macro SAVE_ALL __rp __tsr
STW .D2T2 B0,*SP--[2] ; save original B0
MVKL .S2 current_ksp,B0
MVKH .S2 current_ksp,B0
LDW .D2T2 *B0,B1 ; KSP
NOP 3
STW .D2T2 B1,*+SP[1] ; save original B1
XOR .D2 SP,B1,B0 ; (SP ^ KSP)
LDW .D2T2 *+SP[1],B1 ; restore B0/B1
LDW .D2T2 *++SP[2],B0
SHR .S2 B0,THREAD_SHIFT,B0 ; 0 if already using kstack
[B0] STDW .D2T2 SP:DP,*--B1[1] ; user: save user sp/dp kstack
[B0] MV .S2 B1,SP ; and switch to kstack
||[!B0] STDW .D2T2 SP:DP,*--SP[1] ; kernel: save on current stack
SUBAW .D2 SP,2,SP
ADD .D1X SP,-8,A15
|| STDW .D2T1 A15:A14,*SP--[16] ; save A15:A14
STDW .D2T2 B13:B12,*SP--[1]
|| STDW .D1T1 A13:A12,*A15--[1]
|| MVC .S2 __rp,B13
STDW .D2T2 B11:B10,*SP--[1]
|| STDW .D1T1 A11:A10,*A15--[1]
|| MVC .S2 CSR,B12
STDW .D2T2 B9:B8,*SP--[1]
|| STDW .D1T1 A9:A8,*A15--[1]
|| MVC .S2 RILC,B11
STDW .D2T2 B7:B6,*SP--[1]
|| STDW .D1T1 A7:A6,*A15--[1]
|| MVC .S2 ILC,B10
STDW .D2T2 B5:B4,*SP--[1]
|| STDW .D1T1 A5:A4,*A15--[1]
STDW .D2T2 B3:B2,*SP--[1]
|| STDW .D1T1 A3:A2,*A15--[1]
|| MVC .S2 __tsr,B5
STDW .D2T2 B1:B0,*SP--[1]
|| STDW .D1T1 A1:A0,*A15--[1]
|| MV .S1X B5,A5
STDW .D2T2 B31:B30,*SP--[1]
|| STDW .D1T1 A31:A30,*A15--[1]
STDW .D2T2 B29:B28,*SP--[1]
|| STDW .D1T1 A29:A28,*A15--[1]
STDW .D2T2 B27:B26,*SP--[1]
|| STDW .D1T1 A27:A26,*A15--[1]
STDW .D2T2 B25:B24,*SP--[1]
|| STDW .D1T1 A25:A24,*A15--[1]
STDW .D2T2 B23:B22,*SP--[1]
|| STDW .D1T1 A23:A22,*A15--[1]
STDW .D2T2 B21:B20,*SP--[1]
|| STDW .D1T1 A21:A20,*A15--[1]
STDW .D2T2 B19:B18,*SP--[1]
|| STDW .D1T1 A19:A18,*A15--[1]
STDW .D2T2 B17:B16,*SP--[1]
|| STDW .D1T1 A17:A16,*A15--[1]
STDW .D2T2 B13:B12,*SP--[1] ; save PC and CSR
STDW .D2T2 B11:B10,*SP--[1] ; save RILC and ILC
STDW .D2T1 A5:A4,*SP--[1] ; save TSR and orig A4
;; We left an unused word on the stack just above pt_regs.
;; It is used to save whether or not this frame is due to
;; a syscall. It is cleared here, but the syscall handler
;; sets it to a non-zero value.
MVK .L2 0,B1
STW .D2T2 B1,*+SP(REGS__END+8) ; clear syscall flag
.endm
.macro RESTORE_ALL __rp __tsr
LDDW .D2T2 *++SP[1],B9:B8 ; get TSR (B9)
LDDW .D2T2 *++SP[1],B11:B10 ; get RILC (B11) and ILC (B10)
LDDW .D2T2 *++SP[1],B13:B12 ; get PC (B13) and CSR (B12)
ADDAW .D1X SP,30,A15
LDDW .D1T1 *++A15[1],A17:A16
|| LDDW .D2T2 *++SP[1],B17:B16
LDDW .D1T1 *++A15[1],A19:A18
|| LDDW .D2T2 *++SP[1],B19:B18
LDDW .D1T1 *++A15[1],A21:A20
|| LDDW .D2T2 *++SP[1],B21:B20
LDDW .D1T1 *++A15[1],A23:A22
|| LDDW .D2T2 *++SP[1],B23:B22
LDDW .D1T1 *++A15[1],A25:A24
|| LDDW .D2T2 *++SP[1],B25:B24
LDDW .D1T1 *++A15[1],A27:A26
|| LDDW .D2T2 *++SP[1],B27:B26
LDDW .D1T1 *++A15[1],A29:A28
|| LDDW .D2T2 *++SP[1],B29:B28
LDDW .D1T1 *++A15[1],A31:A30
|| LDDW .D2T2 *++SP[1],B31:B30
LDDW .D1T1 *++A15[1],A1:A0
|| LDDW .D2T2 *++SP[1],B1:B0
LDDW .D1T1 *++A15[1],A3:A2
|| LDDW .D2T2 *++SP[1],B3:B2
|| MVC .S2 B9,__tsr
LDDW .D1T1 *++A15[1],A5:A4
|| LDDW .D2T2 *++SP[1],B5:B4
|| MVC .S2 B11,RILC
LDDW .D1T1 *++A15[1],A7:A6
|| LDDW .D2T2 *++SP[1],B7:B6
|| MVC .S2 B10,ILC
LDDW .D1T1 *++A15[1],A9:A8
|| LDDW .D2T2 *++SP[1],B9:B8
|| MVC .S2 B13,__rp
LDDW .D1T1 *++A15[1],A11:A10
|| LDDW .D2T2 *++SP[1],B11:B10
|| MVC .S2 B12,CSR
LDDW .D1T1 *++A15[1],A13:A12
|| LDDW .D2T2 *++SP[1],B13:B12
MV .D2X A15,SP
|| MVKL .S1 current_ksp,A15
MVKH .S1 current_ksp,A15
|| ADDAW .D1X SP,6,A14
STW .D1T1 A14,*A15 ; save kernel stack pointer
LDDW .D2T1 *++SP[1],A15:A14
B .S2 __rp ; return from interruption
LDDW .D2T2 *+SP[1],SP:DP
NOP 4
.endm
.section .text
;;
;; Jump to schedule() then return to ret_from_exception
;;
_reschedule:
#ifdef CONFIG_C6X_BIG_KERNEL
MVKL .S1 schedule,A0
MVKH .S1 schedule,A0
B .S2X A0
#else
B .S1 schedule
#endif
ADDKPC .S2 ret_from_exception,B3,4
;;
;; Called before syscall handler when process is being debugged
;;
tracesys_on:
#ifdef CONFIG_C6X_BIG_KERNEL
MVKL .S1 syscall_trace_entry,A0
MVKH .S1 syscall_trace_entry,A0
B .S2X A0
#else
B .S1 syscall_trace_entry
#endif
ADDKPC .S2 ret_from_syscall_trace,B3,3
ADD .S1X 8,SP,A4
ret_from_syscall_trace:
;; tracing returns (possibly new) syscall number
MV .D2X A4,B0
|| MVK .S2 __NR_syscalls,B1
CMPLTU .L2 B0,B1,B1
[!B1] BNOP .S2 ret_from_syscall_function,5
|| MVK .S1 -ENOSYS,A4
;; reload syscall args from (possibly modified) stack frame
;; and get syscall handler addr from sys_call_table:
LDW .D2T2 *+SP(REGS_B4+8),B4
|| MVKL .S2 sys_call_table,B1
LDW .D2T1 *+SP(REGS_A6+8),A6
|| MVKH .S2 sys_call_table,B1
LDW .D2T2 *+B1[B0],B0
|| MVKL .S2 ret_from_syscall_function,B3
LDW .D2T2 *+SP(REGS_B6+8),B6
|| MVKH .S2 ret_from_syscall_function,B3
LDW .D2T1 *+SP(REGS_A8+8),A8
LDW .D2T2 *+SP(REGS_B8+8),B8
NOP
; B0 = sys_call_table[__NR_*]
BNOP .S2 B0,5 ; branch to syscall handler
|| LDW .D2T1 *+SP(REGS_ORIG_A4+8),A4
syscall_exit_work:
AND .D1 _TIF_SYSCALL_TRACE,A2,A0
[!A0] BNOP .S1 work_pending,5
[A0] B .S2 syscall_trace_exit
ADDKPC .S2 resume_userspace,B3,1
MVC .S2 CSR,B1
SET .S2 B1,0,0,B1
MVC .S2 B1,CSR ; enable ints
work_pending:
AND .D1 _TIF_NEED_RESCHED,A2,A0
[!A0] BNOP .S1 work_notifysig,5
work_resched:
#ifdef CONFIG_C6X_BIG_KERNEL
MVKL .S1 schedule,A1
MVKH .S1 schedule,A1
B .S2X A1
#else
B .S2 schedule
#endif
ADDKPC .S2 work_rescheduled,B3,4
work_rescheduled:
;; make sure we don't miss an interrupt setting need_resched or
;; sigpending between sampling and the rti
MASK_INT B2
GET_THREAD_INFO A12
LDW .D1T1 *+A12(THREAD_INFO_FLAGS),A2
MVK .S1 _TIF_WORK_MASK,A1
MVK .S1 _TIF_NEED_RESCHED,A3
NOP 2
AND .D1 A1,A2,A0
|| AND .S1 A3,A2,A1
[!A0] BNOP .S1 restore_all,5
[A1] BNOP .S1 work_resched,5
work_notifysig:
;; enable interrupts for do_notify_resume()
UNMASK_INT B2
B .S2 do_notify_resume
LDW .D2T1 *+SP(REGS__END+8),A6 ; syscall flag
ADDKPC .S2 resume_userspace,B3,1
ADD .S1X 8,SP,A4 ; pt_regs pointer is first arg
MV .D2X A2,B4 ; thread_info flags is second arg
;;
;; On C64x+, the return way from exception and interrupt
;; is a little bit different
;;
ENTRY(ret_from_exception)
#ifdef CONFIG_PREEMPT
MASK_INT B2
#endif
ENTRY(ret_from_interrupt)
;;
;; Check if we are comming from user mode.
;;
LDW .D2T2 *+SP(REGS_TSR+8),B0
MVK .S2 0x40,B1
NOP 3
AND .D2 B0,B1,B0
[!B0] BNOP .S2 resume_kernel,5
resume_userspace:
;; make sure we don't miss an interrupt setting need_resched or
;; sigpending between sampling and the rti
MASK_INT B2
GET_THREAD_INFO A12
LDW .D1T1 *+A12(THREAD_INFO_FLAGS),A2
MVK .S1 _TIF_WORK_MASK,A1
MVK .S1 _TIF_NEED_RESCHED,A3
NOP 2
AND .D1 A1,A2,A0
[A0] BNOP .S1 work_pending,5
BNOP .S1 restore_all,5
;;
;; System call handling
;; B0 = syscall number (in sys_call_table)
;; A4,B4,A6,B6,A8,B8 = arguments of the syscall function
;; A4 is the return value register
;;
system_call_saved:
MVK .L2 1,B2
STW .D2T2 B2,*+SP(REGS__END+8) ; set syscall flag
MVC .S2 B2,ECR ; ack the software exception
UNMASK_INT B2 ; re-enable global IT
system_call_saved_noack:
;; Check system call number
MVK .S2 __NR_syscalls,B1
#ifdef CONFIG_C6X_BIG_KERNEL
|| MVKL .S1 sys_ni_syscall,A0
#endif
CMPLTU .L2 B0,B1,B1
#ifdef CONFIG_C6X_BIG_KERNEL
|| MVKH .S1 sys_ni_syscall,A0
#endif
;; Check for ptrace
GET_THREAD_INFO A12
#ifdef CONFIG_C6X_BIG_KERNEL
[!B1] B .S2X A0
#else
[!B1] B .S2 sys_ni_syscall
#endif
[!B1] ADDKPC .S2 ret_from_syscall_function,B3,4
;; Get syscall handler addr from sys_call_table
;; call tracesys_on or call syscall handler
LDW .D1T1 *+A12(THREAD_INFO_FLAGS),A2
|| MVKL .S2 sys_call_table,B1
MVKH .S2 sys_call_table,B1
LDW .D2T2 *+B1[B0],B0
NOP 2
; A2 = thread_info flags
AND .D1 _TIF_SYSCALL_TRACE,A2,A2
[A2] BNOP .S1 tracesys_on,5
;; B0 = _sys_call_table[__NR_*]
B .S2 B0
ADDKPC .S2 ret_from_syscall_function,B3,4
ret_from_syscall_function:
STW .D2T1 A4,*+SP(REGS_A4+8) ; save return value in A4
; original A4 is in orig_A4
syscall_exit:
;; make sure we don't miss an interrupt setting need_resched or
;; sigpending between sampling and the rti
MASK_INT B2
LDW .D1T1 *+A12(THREAD_INFO_FLAGS),A2
MVK .S1 _TIF_ALLWORK_MASK,A1
NOP 3
AND .D1 A1,A2,A2 ; check for work to do
[A2] BNOP .S1 syscall_exit_work,5
restore_all:
RESTORE_ALL NRP,NTSR
;;
;; After a fork we jump here directly from resume,
;; so that A4 contains the previous task structure.
;;
ENTRY(ret_from_fork)
#ifdef CONFIG_C6X_BIG_KERNEL
MVKL .S1 schedule_tail,A0
MVKH .S1 schedule_tail,A0
B .S2X A0
#else
B .S2 schedule_tail
#endif
ADDKPC .S2 ret_from_fork_2,B3,4
ret_from_fork_2:
;; return 0 in A4 for child process
GET_THREAD_INFO A12
BNOP .S2 syscall_exit,3
MVK .L2 0,B0
STW .D2T2 B0,*+SP(REGS_A4+8)
ENDPROC(ret_from_fork)
ENTRY(ret_from_kernel_thread)
#ifdef CONFIG_C6X_BIG_KERNEL
MVKL .S1 schedule_tail,A0
MVKH .S1 schedule_tail,A0
B .S2X A0
#else
B .S2 schedule_tail
#endif
LDW .D2T2 *+SP(REGS_A0+8),B10 /* get fn */
ADDKPC .S2 0f,B3,3
0:
B .S2 B10 /* call fn */
LDW .D2T1 *+SP(REGS_A1+8),A4 /* get arg */
ADDKPC .S2 ret_from_fork_2,B3,3
ENDPROC(ret_from_kernel_thread)
;;
;; These are the interrupt handlers, responsible for calling c6x_do_IRQ()
;;
.macro SAVE_ALL_INT
SAVE_ALL IRP,ITSR
.endm
.macro CALL_INT int
#ifdef CONFIG_C6X_BIG_KERNEL
MVKL .S1 c6x_do_IRQ,A0
MVKH .S1 c6x_do_IRQ,A0
BNOP .S2X A0,1
MVK .S1 int,A4
ADDAW .D2 SP,2,B4
MVKL .S2 ret_from_interrupt,B3
MVKH .S2 ret_from_interrupt,B3
#else
CALLP .S2 c6x_do_IRQ,B3
|| MVK .S1 int,A4
|| ADDAW .D2 SP,2,B4
B .S1 ret_from_interrupt
NOP 5
#endif
.endm
ENTRY(_int4_handler)
SAVE_ALL_INT
CALL_INT 4
ENDPROC(_int4_handler)
ENTRY(_int5_handler)
SAVE_ALL_INT
CALL_INT 5
ENDPROC(_int5_handler)
ENTRY(_int6_handler)
SAVE_ALL_INT
CALL_INT 6
ENDPROC(_int6_handler)
ENTRY(_int7_handler)
SAVE_ALL_INT
CALL_INT 7
ENDPROC(_int7_handler)
ENTRY(_int8_handler)
SAVE_ALL_INT
CALL_INT 8
ENDPROC(_int8_handler)
ENTRY(_int9_handler)
SAVE_ALL_INT
CALL_INT 9
ENDPROC(_int9_handler)
ENTRY(_int10_handler)
SAVE_ALL_INT
CALL_INT 10
ENDPROC(_int10_handler)
ENTRY(_int11_handler)
SAVE_ALL_INT
CALL_INT 11
ENDPROC(_int11_handler)
ENTRY(_int12_handler)
SAVE_ALL_INT
CALL_INT 12
ENDPROC(_int12_handler)
ENTRY(_int13_handler)
SAVE_ALL_INT
CALL_INT 13
ENDPROC(_int13_handler)
ENTRY(_int14_handler)
SAVE_ALL_INT
CALL_INT 14
ENDPROC(_int14_handler)
ENTRY(_int15_handler)
SAVE_ALL_INT
CALL_INT 15
ENDPROC(_int15_handler)
;;
;; Handler for uninitialized and spurious interrupts
;;
ENTRY(_bad_interrupt)
B .S2 IRP
NOP 5
ENDPROC(_bad_interrupt)
;;
;; Entry for NMI/exceptions/syscall
;;
ENTRY(_nmi_handler)
SAVE_ALL NRP,NTSR
MVC .S2 EFR,B2
CMPEQ .L2 1,B2,B2
|| MVC .S2 TSR,B1
CLR .S2 B1,10,10,B1
MVC .S2 B1,TSR
#ifdef CONFIG_C6X_BIG_KERNEL
[!B2] MVKL .S1 process_exception,A0
[!B2] MVKH .S1 process_exception,A0
[!B2] B .S2X A0
#else
[!B2] B .S2 process_exception
#endif
[B2] B .S2 system_call_saved
[!B2] ADDAW .D2 SP,2,B1
[!B2] MV .D1X B1,A4
ADDKPC .S2 ret_from_trap,B3,2
ret_from_trap:
MV .D2X A4,B0
[!B0] BNOP .S2 ret_from_exception,5
#ifdef CONFIG_C6X_BIG_KERNEL
MVKL .S2 system_call_saved_noack,B3
MVKH .S2 system_call_saved_noack,B3
#endif
LDW .D2T2 *+SP(REGS_B0+8),B0
LDW .D2T1 *+SP(REGS_A4+8),A4
LDW .D2T2 *+SP(REGS_B4+8),B4
LDW .D2T1 *+SP(REGS_A6+8),A6
LDW .D2T2 *+SP(REGS_B6+8),B6
LDW .D2T1 *+SP(REGS_A8+8),A8
#ifdef CONFIG_C6X_BIG_KERNEL
|| B .S2 B3
#else
|| B .S2 system_call_saved_noack
#endif
LDW .D2T2 *+SP(REGS_B8+8),B8
NOP 4
ENDPROC(_nmi_handler)
;;
;; Jump to schedule() then return to ret_from_isr
;;
#ifdef CONFIG_PREEMPT
resume_kernel:
GET_THREAD_INFO A12
LDW .D1T1 *+A12(THREAD_INFO_PREEMPT_COUNT),A1
NOP 4
[A1] BNOP .S2 restore_all,5
preempt_schedule:
GET_THREAD_INFO A2
LDW .D1T1 *+A2(THREAD_INFO_FLAGS),A1
#ifdef CONFIG_C6X_BIG_KERNEL
MVKL .S2 preempt_schedule_irq,B0
MVKH .S2 preempt_schedule_irq,B0
NOP 2
#else
NOP 4
#endif
AND .D1 _TIF_NEED_RESCHED,A1,A1
[!A1] BNOP .S2 restore_all,5
#ifdef CONFIG_C6X_BIG_KERNEL
B .S2 B0
#else
B .S2 preempt_schedule_irq
#endif
ADDKPC .S2 preempt_schedule,B3,4
#endif /* CONFIG_PREEMPT */
ENTRY(enable_exception)
DINT
MVC .S2 TSR,B0
MVC .S2 B3,NRP
MVK .L2 0xc,B1
OR .D2 B0,B1,B0
MVC .S2 B0,TSR ; Set GEE and XEN in TSR
B .S2 NRP
NOP 5
ENDPROC(enable_exception)
;;
;; Special system calls
;; return address is in B3
;;
ENTRY(sys_rt_sigreturn)
ADD .D1X SP,8,A4
#ifdef CONFIG_C6X_BIG_KERNEL
|| MVKL .S1 do_rt_sigreturn,A0
MVKH .S1 do_rt_sigreturn,A0
BNOP .S2X A0,5
#else
|| B .S2 do_rt_sigreturn
NOP 5
#endif
ENDPROC(sys_rt_sigreturn)
ENTRY(sys_pread_c6x)
MV .D2X A8,B7
#ifdef CONFIG_C6X_BIG_KERNEL
|| MVKL .S1 sys_pread64,A0
MVKH .S1 sys_pread64,A0
BNOP .S2X A0,5
#else
|| B .S2 sys_pread64
NOP 5
#endif
ENDPROC(sys_pread_c6x)
ENTRY(sys_pwrite_c6x)
MV .D2X A8,B7
#ifdef CONFIG_C6X_BIG_KERNEL
|| MVKL .S1 sys_pwrite64,A0
MVKH .S1 sys_pwrite64,A0
BNOP .S2X A0,5
#else
|| B .S2 sys_pwrite64
NOP 5
#endif
ENDPROC(sys_pwrite_c6x)
;; On Entry
;; A4 - path
;; B4 - offset_lo (LE), offset_hi (BE)
;; A6 - offset_lo (BE), offset_hi (LE)
ENTRY(sys_truncate64_c6x)
#ifdef CONFIG_CPU_BIG_ENDIAN
MV .S2 B4,B5
MV .D2X A6,B4
#else
MV .D2X A6,B5
#endif
#ifdef CONFIG_C6X_BIG_KERNEL
|| MVKL .S1 sys_truncate64,A0
MVKH .S1 sys_truncate64,A0
BNOP .S2X A0,5
#else
|| B .S2 sys_truncate64
NOP 5
#endif
ENDPROC(sys_truncate64_c6x)
;; On Entry
;; A4 - fd
;; B4 - offset_lo (LE), offset_hi (BE)
;; A6 - offset_lo (BE), offset_hi (LE)
ENTRY(sys_ftruncate64_c6x)
#ifdef CONFIG_CPU_BIG_ENDIAN
MV .S2 B4,B5
MV .D2X A6,B4
#else
MV .D2X A6,B5
#endif
#ifdef CONFIG_C6X_BIG_KERNEL
|| MVKL .S1 sys_ftruncate64,A0
MVKH .S1 sys_ftruncate64,A0
BNOP .S2X A0,5
#else
|| B .S2 sys_ftruncate64
NOP 5
#endif
ENDPROC(sys_ftruncate64_c6x)
;; On Entry
;; A4 - fd
;; B4 - offset_lo (LE), offset_hi (BE)
;; A6 - offset_lo (BE), offset_hi (LE)
;; B6 - len_lo (LE), len_hi (BE)
;; A8 - len_lo (BE), len_hi (LE)
;; B8 - advice
ENTRY(sys_fadvise64_64_c6x)
#ifdef CONFIG_C6X_BIG_KERNEL
MVKL .S1 sys_fadvise64_64,A0
MVKH .S1 sys_fadvise64_64,A0
BNOP .S2X A0,2
#else
B .S2 sys_fadvise64_64
NOP 2
#endif
#ifdef CONFIG_CPU_BIG_ENDIAN
MV .L2 B4,B5
|| MV .D2X A6,B4
MV .L1 A8,A6
|| MV .D1X B6,A7
#else
MV .D2X A6,B5
MV .L1 A8,A7
|| MV .D1X B6,A6
#endif
MV .L2 B8,B6
ENDPROC(sys_fadvise64_64_c6x)
;; On Entry
;; A4 - fd
;; B4 - mode
;; A6 - offset_hi
;; B6 - offset_lo
;; A8 - len_hi
;; B8 - len_lo
ENTRY(sys_fallocate_c6x)
#ifdef CONFIG_C6X_BIG_KERNEL
MVKL .S1 sys_fallocate,A0
MVKH .S1 sys_fallocate,A0
BNOP .S2X A0,1
#else
B .S2 sys_fallocate
NOP
#endif
MV .D1 A6,A7
MV .D1X B6,A6
MV .D2X A8,B7
MV .D2 B8,B6
ENDPROC(sys_fallocate_c6x)
;; put this in .neardata for faster access when using DSBT mode
.section .neardata,"aw",@progbits
.global current_ksp
.hidden current_ksp
current_ksp:
.word init_thread_union + THREAD_START_SP
|
AirFortressIlikara/LS2K0300-linux-4.19
| 1,651
|
arch/c6x/kernel/switch_to.S
|
/*
* Copyright (C) 2011 Texas Instruments Incorporated
* Author: Mark Salter (msalter@redhat.com)
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/linkage.h>
#include <asm/asm-offsets.h>
#define SP B15
/*
* void __switch_to(struct thread_info *prev,
* struct thread_info *next,
* struct task_struct *tsk) ;
*/
ENTRY(__switch_to)
LDDW .D2T2 *+B4(THREAD_B15_14),B7:B6
|| MV .L2X A4,B5 ; prev
|| MV .L1X B4,A5 ; next
|| MVC .S2 RILC,B1
STW .D2T2 B3,*+B5(THREAD_PC)
|| STDW .D1T1 A13:A12,*+A4(THREAD_A13_12)
|| MVC .S2 ILC,B0
LDW .D2T2 *+B4(THREAD_PC),B3
|| LDDW .D1T1 *+A5(THREAD_A13_12),A13:A12
STDW .D1T1 A11:A10,*+A4(THREAD_A11_10)
|| STDW .D2T2 B1:B0,*+B5(THREAD_RICL_ICL)
#ifndef __DSBT__
|| MVKL .S2 current_ksp,B1
#endif
STDW .D2T2 B15:B14,*+B5(THREAD_B15_14)
|| STDW .D1T1 A15:A14,*+A4(THREAD_A15_14)
#ifndef __DSBT__
|| MVKH .S2 current_ksp,B1
#endif
;; Switch to next SP
MV .S2 B7,SP
#ifdef __DSBT__
|| STW .D2T2 B7,*+B14(current_ksp)
#else
|| STW .D2T2 B7,*B1
|| MV .L2 B6,B14
#endif
|| LDDW .D1T1 *+A5(THREAD_RICL_ICL),A1:A0
STDW .D2T2 B11:B10,*+B5(THREAD_B11_10)
|| LDDW .D1T1 *+A5(THREAD_A15_14),A15:A14
STDW .D2T2 B13:B12,*+B5(THREAD_B13_12)
|| LDDW .D1T1 *+A5(THREAD_A11_10),A11:A10
B .S2 B3 ; return in next E1
|| LDDW .D2T2 *+B4(THREAD_B13_12),B13:B12
LDDW .D2T2 *+B4(THREAD_B11_10),B11:B10
NOP
MV .L2X A0,B0
|| MV .S1 A6,A4
MVC .S2 B0,ILC
|| MV .L2X A1,B1
MVC .S2 B1,RILC
ENDPROC(__switch_to)
|
AirFortressIlikara/LS2K0300-linux-4.19
| 2,532
|
arch/c6x/kernel/vmlinux.lds.S
|
/* SPDX-License-Identifier: GPL-2.0 */
/*
* ld script for the c6x kernel
*
* Copyright (C) 2010, 2011 Texas Instruments Incorporated
* Mark Salter <msalter@redhat.com>
*/
#include <asm-generic/vmlinux.lds.h>
#include <asm/thread_info.h>
#include <asm/page.h>
ENTRY(_c_int00)
#if defined(CONFIG_CPU_BIG_ENDIAN)
jiffies = jiffies_64 + 4;
#else
jiffies = jiffies_64;
#endif
#define READONLY_SEGMENT_START \
. = PAGE_OFFSET;
#define READWRITE_SEGMENT_START \
. = ALIGN(128); \
_data_lma = .;
SECTIONS
{
/*
* Start kernel read only segment
*/
READONLY_SEGMENT_START
.vectors :
{
_vectors_start = .;
*(.vectors)
. = ALIGN(0x400);
_vectors_end = .;
}
/*
* This section contains data which may be shared with other
* cores. It needs to be a fixed offset from PAGE_OFFSET
* regardless of kernel configuration.
*/
.virtio_ipc_dev :
{
*(.virtio_ipc_dev)
}
. = ALIGN(PAGE_SIZE);
__init_begin = .;
.init :
{
_sinittext = .;
HEAD_TEXT
INIT_TEXT
_einittext = .;
}
INIT_DATA_SECTION(16)
PERCPU_SECTION(128)
. = ALIGN(PAGE_SIZE);
__init_end = .;
.text :
{
_text = .;
_stext = .;
TEXT_TEXT
SCHED_TEXT
CPUIDLE_TEXT
LOCK_TEXT
IRQENTRY_TEXT
SOFTIRQENTRY_TEXT
KPROBES_TEXT
*(.fixup)
*(.gnu.warning)
}
EXCEPTION_TABLE(16)
NOTES
RO_DATA_SECTION(PAGE_SIZE)
.const :
{
*(.const .const.* .gnu.linkonce.r.*)
*(.switch)
}
. = ALIGN (8) ;
__fdt_blob : AT(ADDR(__fdt_blob) - LOAD_OFFSET)
{
_fdt_start = . ; /* place for fdt blob */
*(__fdt_blob) ; /* Any link-placed DTB */
BYTE(0); /* section always has contents */
. = _fdt_start + 0x4000; /* Pad up to 16kbyte */
_fdt_end = . ;
}
_etext = .;
/*
* Start kernel read-write segment.
*/
READWRITE_SEGMENT_START
_sdata = .;
.fardata : AT(ADDR(.fardata) - LOAD_OFFSET)
{
INIT_TASK_DATA(THREAD_SIZE)
NOSAVE_DATA
PAGE_ALIGNED_DATA(PAGE_SIZE)
CACHELINE_ALIGNED_DATA(128)
READ_MOSTLY_DATA(128)
DATA_DATA
CONSTRUCTORS
*(.data1)
*(.fardata .fardata.*)
*(.data.debug_bpt)
}
.neardata ALIGN(8) : AT(ADDR(.neardata) - LOAD_OFFSET)
{
*(.neardata2 .neardata2.* .gnu.linkonce.s2.*)
*(.neardata .neardata.* .gnu.linkonce.s.*)
. = ALIGN(8);
}
BUG_TABLE
_edata = .;
__bss_start = .;
SBSS(8)
BSS(8)
.far :
{
. = ALIGN(8);
*(.dynfar)
*(.far .far.* .gnu.linkonce.b.*)
. = ALIGN(8);
}
__bss_stop = .;
_end = .;
DWARF_DEBUG
/DISCARD/ :
{
EXIT_TEXT
EXIT_DATA
EXIT_CALL
*(.discard)
*(.discard.*)
*(.interp)
}
}
|
AirFortressIlikara/LS2K0300-linux-4.19
| 1,866
|
arch/c6x/lib/divi.S
|
;; Copyright 2010 Free Software Foundation, Inc.
;; Contributed by Bernd Schmidt <bernds@codesourcery.com>.
;;
;; This program is free software; you can redistribute it and/or modify
;; it under the terms of the GNU General Public License as published by
;; the Free Software Foundation; either version 2 of the License, or
;; (at your option) any later version.
;;
;; This program is distributed in the hope that it will be useful,
;; but WITHOUT ANY WARRANTY; without even the implied warranty of
;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
;; GNU General Public License for more details.
;;
;; You should have received a copy of the GNU General Public License
;; along with this program; if not, write to the Free Software
;; Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#include <linux/linkage.h>
;; ABI considerations for the divide functions
;; The following registers are call-used:
;; __c6xabi_divi A0,A1,A2,A4,A6,B0,B1,B2,B4,B5
;; __c6xabi_divu A0,A1,A2,A4,A6,B0,B1,B2,B4
;; __c6xabi_remi A1,A2,A4,A5,A6,B0,B1,B2,B4
;; __c6xabi_remu A1,A4,A5,A7,B0,B1,B2,B4
;;
;; In our implementation, divu and remu are leaf functions,
;; while both divi and remi call into divu.
;; A0 is not clobbered by any of the functions.
;; divu does not clobber B2 either, which is taken advantage of
;; in remi.
;; divi uses B5 to hold the original return address during
;; the call to divu.
;; remi uses B2 and A5 to hold the input values during the
;; call to divu. It stores B3 in on the stack.
.text
ENTRY(__c6xabi_divi)
call .s2 __c6xabi_divu
|| mv .d2 B3, B5
|| cmpgt .l1 0, A4, A1
|| cmpgt .l2 0, B4, B1
[A1] neg .l1 A4, A4
|| [B1] neg .l2 B4, B4
|| xor .s1x A1, B1, A1
[A1] addkpc .s2 _divu_ret, B3, 4
_divu_ret:
neg .l1 A4, A4
|| mv .l2 B3,B5
|| ret .s2 B5
nop 5
ENDPROC(__c6xabi_divi)
|
AirFortressIlikara/LS2K0300-linux-4.19
| 1,126
|
arch/c6x/lib/pop_rts.S
|
;; Copyright 2010 Free Software Foundation, Inc.
;; Contributed by Bernd Schmidt <bernds@codesourcery.com>.
;;
;; This program is free software; you can redistribute it and/or modify
;; it under the terms of the GNU General Public License as published by
;; the Free Software Foundation; either version 2 of the License, or
;; (at your option) any later version.
;;
;; This program is distributed in the hope that it will be useful,
;; but WITHOUT ANY WARRANTY; without even the implied warranty of
;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
;; GNU General Public License for more details.
;;
;; You should have received a copy of the GNU General Public License
;; along with this program; if not, write to the Free Software
;; Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#include <linux/linkage.h>
.text
ENTRY(__c6xabi_pop_rts)
lddw .d2t2 *++B15, B3:B2
lddw .d2t1 *++B15, A11:A10
lddw .d2t2 *++B15, B11:B10
lddw .d2t1 *++B15, A13:A12
lddw .d2t2 *++B15, B13:B12
lddw .d2t1 *++B15, A15:A14
|| b .s2 B3
ldw .d2t2 *++B15[2], B14
nop 4
ENDPROC(__c6xabi_pop_rts)
|
AirFortressIlikara/LS2K0300-linux-4.19
| 2,634
|
arch/c6x/lib/remu.S
|
;; Copyright 2010 Free Software Foundation, Inc.
;; Contributed by Bernd Schmidt <bernds@codesourcery.com>.
;;
;; This program is free software; you can redistribute it and/or modify
;; it under the terms of the GNU General Public License as published by
;; the Free Software Foundation; either version 2 of the License, or
;; (at your option) any later version.
;;
;; This program is distributed in the hope that it will be useful,
;; but WITHOUT ANY WARRANTY; without even the implied warranty of
;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
;; GNU General Public License for more details.
;;
;; You should have received a copy of the GNU General Public License
;; along with this program; if not, write to the Free Software
;; Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#include <linux/linkage.h>
;; ABI considerations for the divide functions
;; The following registers are call-used:
;; __c6xabi_divi A0,A1,A2,A4,A6,B0,B1,B2,B4,B5
;; __c6xabi_divu A0,A1,A2,A4,A6,B0,B1,B2,B4
;; __c6xabi_remi A1,A2,A4,A5,A6,B0,B1,B2,B4
;; __c6xabi_remu A1,A4,A5,A7,B0,B1,B2,B4
;;
;; In our implementation, divu and remu are leaf functions,
;; while both divi and remi call into divu.
;; A0 is not clobbered by any of the functions.
;; divu does not clobber B2 either, which is taken advantage of
;; in remi.
;; divi uses B5 to hold the original return address during
;; the call to divu.
;; remi uses B2 and A5 to hold the input values during the
;; call to divu. It stores B3 in on the stack.
.text
ENTRY(__c6xabi_remu)
;; The ABI seems designed to prevent these functions calling each other,
;; so we duplicate most of the divsi3 code here.
mv .s2x A4, B1
lmbd .l2 1, B4, B1
|| [!B1] b .s2 B3 ; RETURN A
|| [!B1] mvk .d2 1, B4
mv .l1x B1, A7
|| shl .s2 B4, B1, B4
cmpltu .l1x A4, B4, A1
[!A1] sub .l1x A4, B4, A4
shru .s2 B4, 1, B4
_remu_loop:
cmpgt .l2 B1, 7, B0
|| [B1] subc .l1x A4,B4,A4
|| [B1] add .s2 -1, B1, B1
;; RETURN A may happen here (note: must happen before the next branch)
[B1] subc .l1x A4,B4,A4
|| [B1] add .s2 -1, B1, B1
|| [B0] b .s1 _remu_loop
[B1] subc .l1x A4,B4,A4
|| [B1] add .s2 -1, B1, B1
[B1] subc .l1x A4,B4,A4
|| [B1] add .s2 -1, B1, B1
[B1] subc .l1x A4,B4,A4
|| [B1] add .s2 -1, B1, B1
[B1] subc .l1x A4,B4,A4
|| [B1] add .s2 -1, B1, B1
[B1] subc .l1x A4,B4,A4
|| [B1] add .s2 -1, B1, B1
;; loop backwards branch happens here
ret .s2 B3
[B1] subc .l1x A4,B4,A4
|| [B1] add .s2 -1, B1, B1
[B1] subc .l1x A4,B4,A4
extu .s1 A4, A7, A4
nop 2
ENDPROC(__c6xabi_remu)
|
AirFortressIlikara/LS2K0300-linux-4.19
| 2,032
|
arch/c6x/lib/remi.S
|
;; Copyright 2010 Free Software Foundation, Inc.
;; Contributed by Bernd Schmidt <bernds@codesourcery.com>.
;;
;; This program is free software; you can redistribute it and/or modify
;; it under the terms of the GNU General Public License as published by
;; the Free Software Foundation; either version 2 of the License, or
;; (at your option) any later version.
;;
;; This program is distributed in the hope that it will be useful,
;; but WITHOUT ANY WARRANTY; without even the implied warranty of
;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
;; GNU General Public License for more details.
;;
;; You should have received a copy of the GNU General Public License
;; along with this program; if not, write to the Free Software
;; Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#include <linux/linkage.h>
;; ABI considerations for the divide functions
;; The following registers are call-used:
;; __c6xabi_divi A0,A1,A2,A4,A6,B0,B1,B2,B4,B5
;; __c6xabi_divu A0,A1,A2,A4,A6,B0,B1,B2,B4
;; __c6xabi_remi A1,A2,A4,A5,A6,B0,B1,B2,B4
;; __c6xabi_remu A1,A4,A5,A7,B0,B1,B2,B4
;;
;; In our implementation, divu and remu are leaf functions,
;; while both divi and remi call into divu.
;; A0 is not clobbered by any of the functions.
;; divu does not clobber B2 either, which is taken advantage of
;; in remi.
;; divi uses B5 to hold the original return address during
;; the call to divu.
;; remi uses B2 and A5 to hold the input values during the
;; call to divu. It stores B3 in on the stack.
.text
ENTRY(__c6xabi_remi)
stw .d2t2 B3, *B15--[2]
|| cmpgt .l1 0, A4, A1
|| cmpgt .l2 0, B4, B2
|| mv .s1 A4, A5
|| call .s2 __c6xabi_divu
[A1] neg .l1 A4, A4
|| [B2] neg .l2 B4, B4
|| xor .s2x B2, A1, B0
|| mv .d2 B4, B2
[B0] addkpc .s2 _divu_ret_1, B3, 1
[!B0] addkpc .s2 _divu_ret_2, B3, 1
nop 2
_divu_ret_1:
neg .l1 A4, A4
_divu_ret_2:
ldw .d2t2 *++B15[2], B3
mpy32 .m1x A4, B2, A6
nop 3
ret .s2 B3
sub .l1 A5, A6, A4
nop 4
ENDPROC(__c6xabi_remi)
|
AirFortressIlikara/LS2K0300-linux-4.19
| 1,150
|
arch/c6x/lib/strasgi_64plus.S
|
;; Copyright 2010 Free Software Foundation, Inc.
;; Contributed by Bernd Schmidt <bernds@codesourcery.com>.
;;
;; This program is free software; you can redistribute it and/or modify
;; it under the terms of the GNU General Public License as published by
;; the Free Software Foundation; either version 2 of the License, or
;; (at your option) any later version.
;;
;; This program is distributed in the hope that it will be useful,
;; but WITHOUT ANY WARRANTY; without even the implied warranty of
;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
;; GNU General Public License for more details.
;;
;; You should have received a copy of the GNU General Public License
;; along with this program; if not, write to the Free Software
;; Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#include <linux/linkage.h>
.text
ENTRY(__c6xabi_strasgi_64plus)
shru .s2x a6, 2, b31
|| mv .s1 a4, a30
|| mv .d2 b4, b30
add .s2 -4, b31, b31
sploopd 1
|| mvc .s2 b31, ilc
ldw .d2t2 *b30++, b31
nop 4
mv .s1x b31,a31
spkernel 6, 0
|| stw .d1t1 a31, *a30++
ret .s2 b3
nop 5
ENDPROC(__c6xabi_strasgi_64plus)
|
AirFortressIlikara/LS2K0300-linux-4.19
| 1,337
|
arch/c6x/lib/divremi.S
|
;; Copyright 2010 Free Software Foundation, Inc.
;; Contributed by Bernd Schmidt <bernds@codesourcery.com>.
;;
;; This program is free software; you can redistribute it and/or modify
;; it under the terms of the GNU General Public License as published by
;; the Free Software Foundation; either version 2 of the License, or
;; (at your option) any later version.
;;
;; This program is distributed in the hope that it will be useful,
;; but WITHOUT ANY WARRANTY; without even the implied warranty of
;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
;; GNU General Public License for more details.
;;
;; You should have received a copy of the GNU General Public License
;; along with this program; if not, write to the Free Software
;; Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#include <linux/linkage.h>
.text
ENTRY(__c6xabi_divremi)
stw .d2t2 B3, *B15--[2]
|| cmpgt .l1 0, A4, A1
|| cmpgt .l2 0, B4, B2
|| mv .s1 A4, A5
|| call .s2 __c6xabi_divu
[A1] neg .l1 A4, A4
|| [B2] neg .l2 B4, B4
|| xor .s2x B2, A1, B0
|| mv .d2 B4, B2
[B0] addkpc .s2 _divu_ret_1, B3, 1
[!B0] addkpc .s2 _divu_ret_2, B3, 1
nop 2
_divu_ret_1:
neg .l1 A4, A4
_divu_ret_2:
ldw .d2t2 *++B15[2], B3
mpy32 .m1x A4, B2, A6
nop 3
ret .s2 B3
sub .l1 A5, A6, A5
nop 4
ENDPROC(__c6xabi_divremi)
|
AirFortressIlikara/LS2K0300-linux-4.19
| 2,332
|
arch/c6x/lib/strasgi.S
|
;; Copyright 2010 Free Software Foundation, Inc.
;; Contributed by Bernd Schmidt <bernds@codesourcery.com>.
;;
;; This program is free software; you can redistribute it and/or modify
;; it under the terms of the GNU General Public License as published by
;; the Free Software Foundation; either version 2 of the License, or
;; (at your option) any later version.
;;
;; This program is distributed in the hope that it will be useful,
;; but WITHOUT ANY WARRANTY; without even the implied warranty of
;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
;; GNU General Public License for more details.
;;
;; You should have received a copy of the GNU General Public License
;; along with this program; if not, write to the Free Software
;; Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#include <linux/linkage.h>
.text
ENTRY(__c6xabi_strasgi)
;; This is essentially memcpy, with alignment known to be at least
;; 4, and the size a multiple of 4 greater than or equal to 28.
ldw .d2t1 *B4++, A0
|| mvk .s2 16, B1
ldw .d2t1 *B4++, A1
|| mvk .s2 20, B2
|| sub .d1 A6, 24, A6
ldw .d2t1 *B4++, A5
ldw .d2t1 *B4++, A7
|| mv .l2x A6, B7
ldw .d2t1 *B4++, A8
ldw .d2t1 *B4++, A9
|| mv .s2x A0, B5
|| cmpltu .l2 B2, B7, B0
_strasgi_loop:
stw .d1t2 B5, *A4++
|| [B0] ldw .d2t1 *B4++, A0
|| mv .s2x A1, B5
|| mv .l2 B7, B6
[B0] sub .d2 B6, 24, B7
|| [B0] b .s2 _strasgi_loop
|| cmpltu .l2 B1, B6, B0
[B0] ldw .d2t1 *B4++, A1
|| stw .d1t2 B5, *A4++
|| mv .s2x A5, B5
|| cmpltu .l2 12, B6, B0
[B0] ldw .d2t1 *B4++, A5
|| stw .d1t2 B5, *A4++
|| mv .s2x A7, B5
|| cmpltu .l2 8, B6, B0
[B0] ldw .d2t1 *B4++, A7
|| stw .d1t2 B5, *A4++
|| mv .s2x A8, B5
|| cmpltu .l2 4, B6, B0
[B0] ldw .d2t1 *B4++, A8
|| stw .d1t2 B5, *A4++
|| mv .s2x A9, B5
|| cmpltu .l2 0, B6, B0
[B0] ldw .d2t1 *B4++, A9
|| stw .d1t2 B5, *A4++
|| mv .s2x A0, B5
|| cmpltu .l2 B2, B7, B0
;; loop back branch happens here
cmpltu .l2 B1, B6, B0
|| ret .s2 b3
[B0] stw .d1t1 A1, *A4++
|| cmpltu .l2 12, B6, B0
[B0] stw .d1t1 A5, *A4++
|| cmpltu .l2 8, B6, B0
[B0] stw .d1t1 A7, *A4++
|| cmpltu .l2 4, B6, B0
[B0] stw .d1t1 A8, *A4++
|| cmpltu .l2 0, B6, B0
[B0] stw .d1t1 A9, *A4++
;; return happens here
ENDPROC(__c6xabi_strasgi)
|
AirFortressIlikara/LS2K0300-linux-4.19
| 1,066
|
arch/c6x/lib/negll.S
|
;; Copyright (C) 2010 Texas Instruments Incorporated
;; Contributed by Mark Salter <msalter@redhat.com>.
;;
;; This program is free software; you can redistribute it and/or modify
;; it under the terms of the GNU General Public License as published by
;; the Free Software Foundation; either version 2 of the License, or
;; (at your option) any later version.
;;
;; This program is distributed in the hope that it will be useful,
;; but WITHOUT ANY WARRANTY; without even the implied warranty of
;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
;; GNU General Public License for more details.
;;
;; You should have received a copy of the GNU General Public License
;; along with this program; if not, write to the Free Software
;; Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
;; int64_t __c6xabi_negll(int64_t val)
#include <linux/linkage.h>
.text
ENTRY(__c6xabi_negll)
b .s2 B3
mvk .l1 0,A0
subu .l1 A0,A4,A3:A2
sub .l1 A0,A5,A0
|| ext .s1 A3,24,24,A5
add .l1 A5,A0,A5
mv .s1 A2,A4
ENDPROC(__c6xabi_negll)
|
AirFortressIlikara/LS2K0300-linux-4.19
| 1,283
|
arch/c6x/lib/llshr.S
|
;; Copyright (C) 2010 Texas Instruments Incorporated
;; Contributed by Mark Salter <msalter@redhat.com>.
;;
;; This program is free software; you can redistribute it and/or modify
;; it under the terms of the GNU General Public License as published by
;; the Free Software Foundation; either version 2 of the License, or
;; (at your option) any later version.
;;
;; This program is distributed in the hope that it will be useful,
;; but WITHOUT ANY WARRANTY; without even the implied warranty of
;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
;; GNU General Public License for more details.
;;
;; You should have received a copy of the GNU General Public License
;; along with this program; if not, write to the Free Software
;; Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
;; uint64_t __c6xabi_llshr(uint64_t val, uint shift)
#include <linux/linkage.h>
.text
ENTRY(__c6xabi_llshr)
mv .l1x B4,A1
[!A1] b .s2 B3 ; return if zero shift count
mvk .s1 32,A0
sub .d1 A0,A1,A0
cmplt .l1 0,A0,A2
[A2] shl .s1 A5,A0,A0
nop
[!A2] neg .l1 A0,A4
|| [A2] shru .s1 A4,A1,A4
[!A2] shr .s1 A5,A4,A4
|| [A2] or .d1 A4,A0,A4
[!A2] shr .s1 A5,0x1f,A5
[A2] shr .s1 A5,A1,A5
bnop .s2 B3,5
ENDPROC(__c6xabi_llshr)
|
AirFortressIlikara/LS2K0300-linux-4.19
| 1,270
|
arch/c6x/lib/llshl.S
|
;; Copyright (C) 2010 Texas Instruments Incorporated
;; Contributed by Mark Salter <msalter@redhat.com>.
;;
;; This program is free software; you can redistribute it and/or modify
;; it under the terms of the GNU General Public License as published by
;; the Free Software Foundation; either version 2 of the License, or
;; (at your option) any later version.
;;
;; This program is distributed in the hope that it will be useful,
;; but WITHOUT ANY WARRANTY; without even the implied warranty of
;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
;; GNU General Public License for more details.
;;
;; You should have received a copy of the GNU General Public License
;; along with this program; if not, write to the Free Software
;; Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
;; uint64_t __c6xabi_llshl(uint64_t val, uint shift)
#include <linux/linkage.h>
.text
ENTRY(__c6xabi_llshl)
mv .l1x B4,A1
[!A1] b .s2 B3 ; just return if zero shift
mvk .s1 32,A0
sub .d1 A0,A1,A0
cmplt .l1 0,A0,A2
[A2] shru .s1 A4,A0,A0
[!A2] neg .l1 A0,A5
|| [A2] shl .s1 A5,A1,A5
[!A2] shl .s1 A4,A5,A5
|| [A2] or .d1 A5,A0,A5
|| [!A2] mvk .l1 0,A4
[A2] shl .s1 A4,A1,A4
bnop .s2 B3,5
ENDPROC(__c6xabi_llshl)
|
AirFortressIlikara/LS2K0300-linux-4.19
| 1,122
|
arch/c6x/lib/push_rts.S
|
;; Copyright 2010 Free Software Foundation, Inc.
;; Contributed by Bernd Schmidt <bernds@codesourcery.com>.
;;
;; This program is free software; you can redistribute it and/or modify
;; it under the terms of the GNU General Public License as published by
;; the Free Software Foundation; either version 2 of the License, or
;; (at your option) any later version.
;;
;; This program is distributed in the hope that it will be useful,
;; but WITHOUT ANY WARRANTY; without even the implied warranty of
;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
;; GNU General Public License for more details.
;;
;; You should have received a copy of the GNU General Public License
;; along with this program; if not, write to the Free Software
;; Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#include <linux/linkage.h>
.text
ENTRY(__c6xabi_push_rts)
stw .d2t2 B14, *B15--[2]
stdw .d2t1 A15:A14, *B15--
|| b .s2x A3
stdw .d2t2 B13:B12, *B15--
stdw .d2t1 A13:A12, *B15--
stdw .d2t2 B11:B10, *B15--
stdw .d2t1 A11:A10, *B15--
stdw .d2t2 B3:B2, *B15--
ENDPROC(__c6xabi_push_rts)
|
AirFortressIlikara/LS2K0300-linux-4.19
| 1,587
|
arch/c6x/lib/mpyll.S
|
;; Copyright (C) 2010 Texas Instruments Incorporated
;; Contributed by Mark Salter <msalter@redhat.com>.
;;
;; This program is free software; you can redistribute it and/or modify
;; it under the terms of the GNU General Public License as published by
;; the Free Software Foundation; either version 2 of the License, or
;; (at your option) any later version.
;;
;; This program is distributed in the hope that it will be useful,
;; but WITHOUT ANY WARRANTY; without even the implied warranty of
;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
;; GNU General Public License for more details.
;;
;; You should have received a copy of the GNU General Public License
;; along with this program; if not, write to the Free Software
;; Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#include <linux/linkage.h>
;; uint64_t __c6xabi_mpyll(uint64_t x, uint64_t y)
;;
;; 64x64 multiply
;; First compute partial results using 32-bit parts of x and y:
;;
;; b63 b32 b31 b0
;; -----------------------------
;; | 1 | 0 |
;; -----------------------------
;;
;; P0 = X0*Y0
;; P1 = X0*Y1 + X1*Y0
;; P2 = X1*Y1
;;
;; result = (P2 << 64) + (P1 << 32) + P0
;;
;; Since the result is also 64-bit, we can skip the P2 term.
.text
ENTRY(__c6xabi_mpyll)
mpy32u .m1x A4,B4,A1:A0 ; X0*Y0
b .s2 B3
|| mpy32u .m2x B5,A4,B1:B0 ; X0*Y1 (don't need upper 32-bits)
|| mpy32u .m1x A5,B4,A3:A2 ; X1*Y0 (don't need upper 32-bits)
nop
nop
mv .s1 A0,A4
add .l1x A2,B0,A5
add .s1 A1,A5,A5
ENDPROC(__c6xabi_mpyll)
|
AirFortressIlikara/LS2K0300-linux-4.19
| 7,639
|
arch/c6x/lib/csum_64plus.S
|
;
; linux/arch/c6x/lib/csum_64plus.s
;
; Port on Texas Instruments TMS320C6x architecture
;
; Copyright (C) 2006, 2009, 2010, 2011 Texas Instruments Incorporated
; Author: Aurelien Jacquiot (aurelien.jacquiot@jaluna.com)
;
; This program is free software; you can redistribute it and/or modify
; it under the terms of the GNU General Public License version 2 as
; published by the Free Software Foundation.
;
#include <linux/linkage.h>
;
;unsigned int csum_partial_copy(const char *src, char * dst,
; int len, int sum)
;
; A4: src
; B4: dst
; A6: len
; B6: sum
; return csum in A4
;
.text
ENTRY(csum_partial_copy)
MVC .S2 ILC,B30
MV .D1X B6,A31 ; given csum
ZERO .D1 A9 ; csum (a side)
|| ZERO .D2 B9 ; csum (b side)
|| SHRU .S2X A6,2,B5 ; len / 4
;; Check alignment and size
AND .S1 3,A4,A1
|| AND .S2 3,B4,B0
OR .L2X B0,A1,B0 ; non aligned condition
|| MVC .S2 B5,ILC
|| MVK .D2 1,B2
|| MV .D1X B5,A1 ; words condition
[!A1] B .S1 L8
[B0] BNOP .S1 L6,5
SPLOOP 1
;; Main loop for aligned words
LDW .D1T1 *A4++,A7
NOP 4
MV .S2X A7,B7
|| EXTU .S1 A7,0,16,A16
STW .D2T2 B7,*B4++
|| MPYU .M2 B7,B2,B8
|| ADD .L1 A16,A9,A9
NOP
SPKERNEL 8,0
|| ADD .L2 B8,B9,B9
ZERO .D1 A1
|| ADD .L1X A9,B9,A9 ; add csum from a and b sides
L6:
[!A1] BNOP .S1 L8,5
;; Main loop for non-aligned words
SPLOOP 2
|| MVK .L1 1,A2
LDNW .D1T1 *A4++,A7
NOP 3
NOP
MV .S2X A7,B7
|| EXTU .S1 A7,0,16,A16
|| MPYU .M1 A7,A2,A8
ADD .L1 A16,A9,A9
SPKERNEL 6,0
|| STNW .D2T2 B7,*B4++
|| ADD .L1 A8,A9,A9
L8: AND .S2X 2,A6,B5
CMPGT .L2 B5,0,B0
[!B0] BNOP .S1 L82,4
;; Manage half-word
ZERO .L1 A7
|| ZERO .D1 A8
#ifdef CONFIG_CPU_BIG_ENDIAN
LDBU .D1T1 *A4++,A7
LDBU .D1T1 *A4++,A8
NOP 3
SHL .S1 A7,8,A0
ADD .S1 A8,A9,A9
STB .D2T1 A7,*B4++
|| ADD .S1 A0,A9,A9
STB .D2T1 A8,*B4++
#else
LDBU .D1T1 *A4++,A7
LDBU .D1T1 *A4++,A8
NOP 3
ADD .S1 A7,A9,A9
SHL .S1 A8,8,A0
STB .D2T1 A7,*B4++
|| ADD .S1 A0,A9,A9
STB .D2T1 A8,*B4++
#endif
;; Manage eventually the last byte
L82: AND .S2X 1,A6,B0
[!B0] BNOP .S1 L9,5
|| ZERO .L1 A7
L83: LDBU .D1T1 *A4++,A7
NOP 4
MV .L2X A7,B7
#ifdef CONFIG_CPU_BIG_ENDIAN
STB .D2T2 B7,*B4++
|| SHL .S1 A7,8,A7
ADD .S1 A7,A9,A9
#else
STB .D2T2 B7,*B4++
|| ADD .S1 A7,A9,A9
#endif
;; Fold the csum
L9: SHRU .S2X A9,16,B0
[!B0] BNOP .S1 L10,5
L91: SHRU .S2X A9,16,B4
|| EXTU .S1 A9,16,16,A3
ADD .D1X A3,B4,A9
SHRU .S1 A9,16,A0
[A0] BNOP .S1 L91,5
L10: ADD .D1 A31,A9,A9
MV .D1 A9,A4
BNOP .S2 B3,4
MVC .S2 B30,ILC
ENDPROC(csum_partial_copy)
;
;unsigned short
;ip_fast_csum(unsigned char *iph, unsigned int ihl)
;{
; unsigned int checksum = 0;
; unsigned short *tosum = (unsigned short *) iph;
; int len;
;
; len = ihl*4;
;
; if (len <= 0)
; return 0;
;
; while(len) {
; len -= 2;
; checksum += *tosum++;
; }
; if (len & 1)
; checksum += *(unsigned char*) tosum;
;
; while(checksum >> 16)
; checksum = (checksum & 0xffff) + (checksum >> 16);
;
; return ~checksum;
;}
;
; A4: iph
; B4: ihl
; return checksum in A4
;
.text
ENTRY(ip_fast_csum)
ZERO .D1 A5
|| MVC .S2 ILC,B30
SHL .S2 B4,2,B0
CMPGT .L2 B0,0,B1
[!B1] BNOP .S1 L15,4
[!B1] ZERO .D1 A3
[!B0] B .S1 L12
SHRU .S2 B0,1,B0
MVC .S2 B0,ILC
NOP 3
SPLOOP 1
LDHU .D1T1 *A4++,A3
NOP 3
NOP
SPKERNEL 5,0
|| ADD .L1 A3,A5,A5
L12: SHRU .S1 A5,16,A0
[!A0] BNOP .S1 L14,5
L13: SHRU .S2X A5,16,B4
EXTU .S1 A5,16,16,A3
ADD .D1X A3,B4,A5
SHRU .S1 A5,16,A0
[A0] BNOP .S1 L13,5
L14: NOT .D1 A5,A3
EXTU .S1 A3,16,16,A3
L15: BNOP .S2 B3,3
MVC .S2 B30,ILC
MV .D1 A3,A4
ENDPROC(ip_fast_csum)
;
;unsigned short
;do_csum(unsigned char *buff, unsigned int len)
;{
; int odd, count;
; unsigned int result = 0;
;
; if (len <= 0)
; goto out;
; odd = 1 & (unsigned long) buff;
; if (odd) {
;#ifdef __LITTLE_ENDIAN
; result += (*buff << 8);
;#else
; result = *buff;
;#endif
; len--;
; buff++;
; }
; count = len >> 1; /* nr of 16-bit words.. */
; if (count) {
; if (2 & (unsigned long) buff) {
; result += *(unsigned short *) buff;
; count--;
; len -= 2;
; buff += 2;
; }
; count >>= 1; /* nr of 32-bit words.. */
; if (count) {
; unsigned int carry = 0;
; do {
; unsigned int w = *(unsigned int *) buff;
; count--;
; buff += 4;
; result += carry;
; result += w;
; carry = (w > result);
; } while (count);
; result += carry;
; result = (result & 0xffff) + (result >> 16);
; }
; if (len & 2) {
; result += *(unsigned short *) buff;
; buff += 2;
; }
; }
; if (len & 1)
;#ifdef __LITTLE_ENDIAN
; result += *buff;
;#else
; result += (*buff << 8);
;#endif
; result = (result & 0xffff) + (result >> 16);
; /* add up carry.. */
; result = (result & 0xffff) + (result >> 16);
; if (odd)
; result = ((result >> 8) & 0xff) | ((result & 0xff) << 8);
;out:
; return result;
;}
;
; A4: buff
; B4: len
; return checksum in A4
;
ENTRY(do_csum)
CMPGT .L2 B4,0,B0
[!B0] BNOP .S1 L26,3
EXTU .S1 A4,31,31,A0
MV .L1 A0,A3
|| MV .S1X B3,A5
|| MV .L2 B4,B3
|| ZERO .D1 A1
#ifdef CONFIG_CPU_BIG_ENDIAN
[A0] SUB .L2 B3,1,B3
|| [A0] LDBU .D1T1 *A4++,A1
#else
[!A0] BNOP .S1 L21,5
|| [A0] LDBU .D1T1 *A4++,A0
SUB .L2 B3,1,B3
|| SHL .S1 A0,8,A1
L21:
#endif
SHR .S2 B3,1,B0
[!B0] BNOP .S1 L24,3
MVK .L1 2,A0
AND .L1 A4,A0,A0
[!A0] BNOP .S1 L22,5
|| [A0] LDHU .D1T1 *A4++,A0
SUB .L2 B0,1,B0
|| SUB .S2 B3,2,B3
|| ADD .L1 A0,A1,A1
L22:
SHR .S2 B0,1,B0
|| ZERO .L1 A0
[!B0] BNOP .S1 L23,5
|| [B0] MVC .S2 B0,ILC
SPLOOP 3
SPMASK L1
|| MV .L1 A1,A2
|| LDW .D1T1 *A4++,A1
NOP 4
ADD .L1 A0,A1,A0
ADD .L1 A2,A0,A2
SPKERNEL 1,2
|| CMPGTU .L1 A1,A2,A0
ADD .L1 A0,A2,A6
EXTU .S1 A6,16,16,A7
SHRU .S2X A6,16,B0
NOP 1
ADD .L1X A7,B0,A1
L23:
MVK .L2 2,B0
AND .L2 B3,B0,B0
[B0] LDHU .D1T1 *A4++,A0
NOP 4
[B0] ADD .L1 A0,A1,A1
L24:
EXTU .S2 B3,31,31,B0
#ifdef CONFIG_CPU_BIG_ENDIAN
[!B0] BNOP .S1 L25,4
|| [B0] LDBU .D1T1 *A4,A0
SHL .S1 A0,8,A0
ADD .L1 A0,A1,A1
L25:
#else
[B0] LDBU .D1T1 *A4,A0
NOP 4
[B0] ADD .L1 A0,A1,A1
#endif
EXTU .S1 A1,16,16,A0
SHRU .S2X A1,16,B0
NOP 1
ADD .L1X A0,B0,A0
SHRU .S1 A0,16,A1
ADD .L1 A0,A1,A0
EXTU .S1 A0,16,16,A1
EXTU .S1 A1,16,24,A2
EXTU .S1 A1,24,16,A0
|| MV .L2X A3,B0
[B0] OR .L1 A0,A2,A1
L26:
NOP 1
BNOP .S2X A5,4
MV .L1 A1,A4
ENDPROC(do_csum)
;__wsum csum_partial(const void *buff, int len, __wsum wsum)
;{
; unsigned int sum = (__force unsigned int)wsum;
; unsigned int result = do_csum(buff, len);
;
; /* add in old sum, and carry.. */
; result += sum;
; if (sum > result)
; result += 1;
; return (__force __wsum)result;
;}
;
ENTRY(csum_partial)
MV .L1X B3,A9
|| CALLP .S2 do_csum,B3
|| MV .S1 A6,A8
BNOP .S2X A9,2
ADD .L1 A8,A4,A1
CMPGTU .L1 A8,A1,A0
ADD .L1 A1,A0,A4
ENDPROC(csum_partial)
;unsigned short
;ip_compute_csum(unsigned char *buff, unsigned int len)
;
; A4: buff
; B4: len
; return checksum in A4
ENTRY(ip_compute_csum)
MV .L1X B3,A9
|| CALLP .S2 do_csum,B3
BNOP .S2X A9,3
NOT .S1 A4,A4
CLR .S1 A4,16,31,A4
ENDPROC(ip_compute_csum)
|
AirFortressIlikara/LS2K0300-linux-4.19
| 2,688
|
arch/c6x/lib/divremu.S
|
;; Copyright 2011 Free Software Foundation, Inc.
;; Contributed by Bernd Schmidt <bernds@codesourcery.com>.
;;
;; This program is free software; you can redistribute it and/or modify
;; it under the terms of the GNU General Public License as published by
;; the Free Software Foundation; either version 2 of the License, or
;; (at your option) any later version.
;;
;; This program is distributed in the hope that it will be useful,
;; but WITHOUT ANY WARRANTY; without even the implied warranty of
;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
;; GNU General Public License for more details.
;;
;; You should have received a copy of the GNU General Public License
;; along with this program; if not, write to the Free Software
;; Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#include <linux/linkage.h>
.text
ENTRY(__c6xabi_divremu)
;; We use a series of up to 31 subc instructions. First, we find
;; out how many leading zero bits there are in the divisor. This
;; gives us both a shift count for aligning (shifting) the divisor
;; to the, and the number of times we have to execute subc.
;; At the end, we have both the remainder and most of the quotient
;; in A4. The top bit of the quotient is computed first and is
;; placed in A2.
;; Return immediately if the dividend is zero. Setting B4 to 1
;; is a trick to allow us to leave the following insns in the jump
;; delay slot without affecting the result.
mv .s2x A4, B1
[b1] lmbd .l2 1, B4, B1
||[!b1] b .s2 B3 ; RETURN A
||[!b1] mvk .d2 1, B4
||[!b1] zero .s1 A5
mv .l1x B1, A6
|| shl .s2 B4, B1, B4
;; The loop performs a maximum of 28 steps, so we do the
;; first 3 here.
cmpltu .l1x A4, B4, A2
[!A2] sub .l1x A4, B4, A4
|| shru .s2 B4, 1, B4
|| xor .s1 1, A2, A2
shl .s1 A2, 31, A2
|| [b1] subc .l1x A4,B4,A4
|| [b1] add .s2 -1, B1, B1
[b1] subc .l1x A4,B4,A4
|| [b1] add .s2 -1, B1, B1
;; RETURN A may happen here (note: must happen before the next branch)
__divremu0:
cmpgt .l2 B1, 7, B0
|| [b1] subc .l1x A4,B4,A4
|| [b1] add .s2 -1, B1, B1
[b1] subc .l1x A4,B4,A4
|| [b1] add .s2 -1, B1, B1
|| [b0] b .s1 __divremu0
[b1] subc .l1x A4,B4,A4
|| [b1] add .s2 -1, B1, B1
[b1] subc .l1x A4,B4,A4
|| [b1] add .s2 -1, B1, B1
[b1] subc .l1x A4,B4,A4
|| [b1] add .s2 -1, B1, B1
[b1] subc .l1x A4,B4,A4
|| [b1] add .s2 -1, B1, B1
[b1] subc .l1x A4,B4,A4
|| [b1] add .s2 -1, B1, B1
;; loop backwards branch happens here
ret .s2 B3
|| mvk .s1 32, A1
sub .l1 A1, A6, A6
|| extu .s1 A4, A6, A5
shl .s1 A4, A6, A4
shru .s1 A4, 1, A4
|| sub .l1 A6, 1, A6
or .l1 A2, A4, A4
shru .s1 A4, A6, A4
nop
ENDPROC(__c6xabi_divremu)
|
AirFortressIlikara/LS2K0300-linux-4.19
| 3,249
|
arch/c6x/lib/divu.S
|
;; Copyright 2010 Free Software Foundation, Inc.
;; Contributed by Bernd Schmidt <bernds@codesourcery.com>.
;;
;; This program is free software; you can redistribute it and/or modify
;; it under the terms of the GNU General Public License as published by
;; the Free Software Foundation; either version 2 of the License, or
;; (at your option) any later version.
;;
;; This program is distributed in the hope that it will be useful,
;; but WITHOUT ANY WARRANTY; without even the implied warranty of
;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
;; GNU General Public License for more details.
;;
;; You should have received a copy of the GNU General Public License
;; along with this program; if not, write to the Free Software
;; Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#include <linux/linkage.h>
;; ABI considerations for the divide functions
;; The following registers are call-used:
;; __c6xabi_divi A0,A1,A2,A4,A6,B0,B1,B2,B4,B5
;; __c6xabi_divu A0,A1,A2,A4,A6,B0,B1,B2,B4
;; __c6xabi_remi A1,A2,A4,A5,A6,B0,B1,B2,B4
;; __c6xabi_remu A1,A4,A5,A7,B0,B1,B2,B4
;;
;; In our implementation, divu and remu are leaf functions,
;; while both divi and remi call into divu.
;; A0 is not clobbered by any of the functions.
;; divu does not clobber B2 either, which is taken advantage of
;; in remi.
;; divi uses B5 to hold the original return address during
;; the call to divu.
;; remi uses B2 and A5 to hold the input values during the
;; call to divu. It stores B3 in on the stack.
.text
ENTRY(__c6xabi_divu)
;; We use a series of up to 31 subc instructions. First, we find
;; out how many leading zero bits there are in the divisor. This
;; gives us both a shift count for aligning (shifting) the divisor
;; to the, and the number of times we have to execute subc.
;; At the end, we have both the remainder and most of the quotient
;; in A4. The top bit of the quotient is computed first and is
;; placed in A2.
;; Return immediately if the dividend is zero.
mv .s2x A4, B1
[B1] lmbd .l2 1, B4, B1
|| [!B1] b .s2 B3 ; RETURN A
|| [!B1] mvk .d2 1, B4
mv .l1x B1, A6
|| shl .s2 B4, B1, B4
;; The loop performs a maximum of 28 steps, so we do the
;; first 3 here.
cmpltu .l1x A4, B4, A2
[!A2] sub .l1x A4, B4, A4
|| shru .s2 B4, 1, B4
|| xor .s1 1, A2, A2
shl .s1 A2, 31, A2
|| [B1] subc .l1x A4,B4,A4
|| [B1] add .s2 -1, B1, B1
[B1] subc .l1x A4,B4,A4
|| [B1] add .s2 -1, B1, B1
;; RETURN A may happen here (note: must happen before the next branch)
_divu_loop:
cmpgt .l2 B1, 7, B0
|| [B1] subc .l1x A4,B4,A4
|| [B1] add .s2 -1, B1, B1
[B1] subc .l1x A4,B4,A4
|| [B1] add .s2 -1, B1, B1
|| [B0] b .s1 _divu_loop
[B1] subc .l1x A4,B4,A4
|| [B1] add .s2 -1, B1, B1
[B1] subc .l1x A4,B4,A4
|| [B1] add .s2 -1, B1, B1
[B1] subc .l1x A4,B4,A4
|| [B1] add .s2 -1, B1, B1
[B1] subc .l1x A4,B4,A4
|| [B1] add .s2 -1, B1, B1
[B1] subc .l1x A4,B4,A4
|| [B1] add .s2 -1, B1, B1
;; loop backwards branch happens here
ret .s2 B3
|| mvk .s1 32, A1
sub .l1 A1, A6, A6
shl .s1 A4, A6, A4
shru .s1 A4, 1, A4
|| sub .l1 A6, 1, A6
or .l1 A2, A4, A4
shru .s1 A4, A6, A4
nop
ENDPROC(__c6xabi_divu)
|
AirFortressIlikara/LS2K0300-linux-4.19
| 1,282
|
arch/c6x/lib/llshru.S
|
;; Copyright (C) 2010 Texas Instruments Incorporated
;; Contributed by Mark Salter <msalter@redhat.com>.
;;
;; This program is free software; you can redistribute it and/or modify
;; it under the terms of the GNU General Public License as published by
;; the Free Software Foundation; either version 2 of the License, or
;; (at your option) any later version.
;;
;; This program is distributed in the hope that it will be useful,
;; but WITHOUT ANY WARRANTY; without even the implied warranty of
;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
;; GNU General Public License for more details.
;;
;; You should have received a copy of the GNU General Public License
;; along with this program; if not, write to the Free Software
;; Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
;; uint64_t __c6xabi_llshru(uint64_t val, uint shift)
#include <linux/linkage.h>
.text
ENTRY(__c6xabi_llshru)
mv .l1x B4,A1
[!A1] b .s2 B3 ; return if zero shift count
mvk .s1 32,A0
sub .d1 A0,A1,A0
cmplt .l1 0,A0,A2
[A2] shl .s1 A5,A0,A0
nop
[!A2] neg .l1 A0,A4
|| [A2] shru .s1 A4,A1,A4
[!A2] shru .s1 A5,A4,A4
|| [A2] or .d1 A4,A0,A4
|| [!A2] mvk .l1 0,A5
[A2] shru .s1 A5,A1,A5
bnop .s2 B3,5
ENDPROC(__c6xabi_llshru)
|
AirFortressIlikara/LS2K0300-linux-4.19
| 11,011
|
arch/microblaze/kernel/head.S
|
/*
* Copyright (C) 2007-2009 Michal Simek <monstr@monstr.eu>
* Copyright (C) 2007-2009 PetaLogix
* Copyright (C) 2006 Atmark Techno, Inc.
*
* MMU code derived from arch/ppc/kernel/head_4xx.S:
* Copyright (c) 1995-1996 Gary Thomas <gdt@linuxppc.org>
* Initial PowerPC version.
* Copyright (c) 1996 Cort Dougan <cort@cs.nmt.edu>
* Rewritten for PReP
* Copyright (c) 1996 Paul Mackerras <paulus@cs.anu.edu.au>
* Low-level exception handers, MMU support, and rewrite.
* Copyright (c) 1997 Dan Malek <dmalek@jlc.net>
* PowerPC 8xx modifications.
* Copyright (c) 1998-1999 TiVo, Inc.
* PowerPC 403GCX modifications.
* Copyright (c) 1999 Grant Erickson <grant@lcse.umn.edu>
* PowerPC 403GCX/405GP modifications.
* Copyright 2000 MontaVista Software Inc.
* PPC405 modifications
* PowerPC 403GCX/405GP modifications.
* Author: MontaVista Software, Inc.
* frank_rowand@mvista.com or source@mvista.com
* debbie_chu@mvista.com
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*/
#include <linux/init.h>
#include <linux/linkage.h>
#include <asm/thread_info.h>
#include <asm/page.h>
#include <linux/of_fdt.h> /* for OF_DT_HEADER */
#ifdef CONFIG_MMU
#include <asm/setup.h> /* COMMAND_LINE_SIZE */
#include <asm/mmu.h>
#include <asm/processor.h>
.section .data
.global empty_zero_page
.align 12
empty_zero_page:
.space PAGE_SIZE
.global swapper_pg_dir
swapper_pg_dir:
.space PAGE_SIZE
#endif /* CONFIG_MMU */
.section .rodata
.align 4
endian_check:
.word 1
__HEAD
ENTRY(_start)
#if CONFIG_KERNEL_BASE_ADDR == 0
brai TOPHYS(real_start)
.org 0x100
real_start:
#endif
mts rmsr, r0
/* Disable stack protection from bootloader */
mts rslr, r0
addi r8, r0, 0xFFFFFFFF
mts rshr, r8
/*
* According to Xilinx, msrclr instruction behaves like 'mfs rX,rpc'
* if the msrclr instruction is not enabled. We use this to detect
* if the opcode is available, by issuing msrclr and then testing the result.
* r8 == 0 - msr instructions are implemented
* r8 != 0 - msr instructions are not implemented
*/
mfs r1, rmsr
msrclr r8, 0 /* clear nothing - just read msr for test */
cmpu r8, r8, r1 /* r1 must contain msr reg content */
/* r7 may point to an FDT, or there may be one linked in.
if it's in r7, we've got to save it away ASAP.
We ensure r7 points to a valid FDT, just in case the bootloader
is broken or non-existent */
beqi r7, no_fdt_arg /* NULL pointer? don't copy */
/* Does r7 point to a valid FDT? Load HEADER magic number */
/* Run time Big/Little endian platform */
/* Save 1 as word and load byte - 0 - BIG, 1 - LITTLE */
lbui r11, r0, TOPHYS(endian_check)
beqid r11, big_endian /* DO NOT break delay stop dependency */
lw r11, r0, r7 /* Big endian load in delay slot */
lwr r11, r0, r7 /* Little endian load */
big_endian:
rsubi r11, r11, OF_DT_HEADER /* Check FDT header */
beqi r11, _prepare_copy_fdt
or r7, r0, r0 /* clear R7 when not valid DTB */
bnei r11, no_fdt_arg /* No - get out of here */
_prepare_copy_fdt:
or r11, r0, r0 /* incremment */
ori r4, r0, TOPHYS(_fdt_start)
ori r3, r0, (0x8000 - 4)
_copy_fdt:
lw r12, r7, r11 /* r12 = r7 + r11 */
sw r12, r4, r11 /* addr[r4 + r11] = r12 */
addik r11, r11, 4 /* increment counting */
bgtid r3, _copy_fdt /* loop for all entries */
addik r3, r3, -4 /* descrement loop */
no_fdt_arg:
#ifdef CONFIG_MMU
#ifndef CONFIG_CMDLINE_BOOL
/*
* handling command line
* copy command line directly to cmd_line placed in data section.
*/
beqid r5, skip /* Skip if NULL pointer */
or r11, r0, r0 /* incremment */
ori r4, r0, cmd_line /* load address of command line */
tophys(r4,r4) /* convert to phys address */
ori r3, r0, COMMAND_LINE_SIZE - 1 /* number of loops */
_copy_command_line:
/* r2=r5+r6 - r5 contain pointer to command line */
lbu r2, r5, r11
beqid r2, skip /* Skip if no data */
sb r2, r4, r11 /* addr[r4+r6]= r2 */
addik r11, r11, 1 /* increment counting */
bgtid r3, _copy_command_line /* loop for all entries */
addik r3, r3, -1 /* decrement loop */
addik r5, r4, 0 /* add new space for command line */
tovirt(r5,r5)
skip:
#endif /* CONFIG_CMDLINE_BOOL */
#ifdef NOT_COMPILE
/* save bram context */
or r11, r0, r0 /* incremment */
ori r4, r0, TOPHYS(_bram_load_start) /* save bram context */
ori r3, r0, (LMB_SIZE - 4)
_copy_bram:
lw r7, r0, r11 /* r7 = r0 + r6 */
sw r7, r4, r11 /* addr[r4 + r6] = r7 */
addik r11, r11, 4 /* increment counting */
bgtid r3, _copy_bram /* loop for all entries */
addik r3, r3, -4 /* descrement loop */
#endif
/* We have to turn on the MMU right away. */
/*
* Set up the initial MMU state so we can do the first level of
* kernel initialization. This maps the first 16 MBytes of memory 1:1
* virtual to physical.
*/
nop
addik r3, r0, MICROBLAZE_TLB_SIZE -1 /* Invalidate all TLB entries */
_invalidate:
mts rtlbx, r3
mts rtlbhi, r0 /* flush: ensure V is clear */
mts rtlblo, r0
bgtid r3, _invalidate /* loop for all entries */
addik r3, r3, -1
/* sync */
/* Setup the kernel PID */
mts rpid,r0 /* Load the kernel PID */
nop
bri 4
/*
* We should still be executing code at physical address area
* RAM_BASEADDR at this point. However, kernel code is at
* a virtual address. So, set up a TLB mapping to cover this once
* translation is enabled.
*/
addik r3,r0, CONFIG_KERNEL_START /* Load the kernel virtual address */
tophys(r4,r3) /* Load the kernel physical address */
/* start to do TLB calculation */
addik r12, r0, _end
rsub r12, r3, r12
addik r12, r12, CONFIG_LOWMEM_SIZE >> PTE_SHIFT /* that's the pad */
or r9, r0, r0 /* TLB0 = 0 */
or r10, r0, r0 /* TLB1 = 0 */
addik r11, r12, -0x1000000
bgei r11, GT16 /* size is greater than 16MB */
addik r11, r12, -0x0800000
bgei r11, GT8 /* size is greater than 8MB */
addik r11, r12, -0x0400000
bgei r11, GT4 /* size is greater than 4MB */
/* size is less than 4MB */
addik r11, r12, -0x0200000
bgei r11, GT2 /* size is greater than 2MB */
addik r9, r0, 0x0100000 /* TLB0 must be 1MB */
addik r11, r12, -0x0100000
bgei r11, GT1 /* size is greater than 1MB */
/* TLB1 is 0 which is setup above */
bri tlb_end
GT4: /* r11 contains the rest - will be either 1 or 4 */
ori r9, r0, 0x400000 /* TLB0 is 4MB */
bri TLB1
GT16: /* TLB0 is 16MB */
addik r9, r0, 0x1000000 /* means TLB0 is 16MB */
TLB1:
/* must be used r2 because of subtract if failed */
addik r2, r11, -0x0400000
bgei r2, GT20 /* size is greater than 16MB */
/* size is >16MB and <20MB */
addik r11, r11, -0x0100000
bgei r11, GT17 /* size is greater than 17MB */
/* kernel is >16MB and < 17MB */
GT1:
addik r10, r0, 0x0100000 /* means TLB1 is 1MB */
bri tlb_end
GT2: /* TLB0 is 0 and TLB1 will be 4MB */
GT17: /* TLB1 is 4MB - kernel size <20MB */
addik r10, r0, 0x0400000 /* means TLB1 is 4MB */
bri tlb_end
GT8: /* TLB0 is still zero that's why I can use only TLB1 */
GT20: /* TLB1 is 16MB - kernel size >20MB */
addik r10, r0, 0x1000000 /* means TLB1 is 16MB */
tlb_end:
/*
* Configure and load two entries into TLB slots 0 and 1.
* In case we are pinning TLBs, these are reserved in by the
* other TLB functions. If not reserving, then it doesn't
* matter where they are loaded.
*/
andi r4,r4,0xfffffc00 /* Mask off the real page number */
ori r4,r4,(TLB_WR | TLB_EX) /* Set the write and execute bits */
/*
* TLB0 is always used - check if is not zero (r9 stores TLB0 value)
* if is use TLB1 value and clear it (r10 stores TLB1 value)
*/
bnei r9, tlb0_not_zero
add r9, r10, r0
add r10, r0, r0
tlb0_not_zero:
/* look at the code below */
ori r30, r0, 0x200
andi r29, r9, 0x100000
bneid r29, 1f
addik r30, r30, 0x80
andi r29, r9, 0x400000
bneid r29, 1f
addik r30, r30, 0x80
andi r29, r9, 0x1000000
bneid r29, 1f
addik r30, r30, 0x80
1:
andi r3,r3,0xfffffc00 /* Mask off the effective page number */
ori r3,r3,(TLB_VALID)
or r3, r3, r30
/* Load tlb_skip size value which is index to first unused TLB entry */
lwi r11, r0, TOPHYS(tlb_skip)
mts rtlbx,r11 /* TLB slow 0 */
mts rtlblo,r4 /* Load the data portion of the entry */
mts rtlbhi,r3 /* Load the tag portion of the entry */
/* Increase tlb_skip size */
addik r11, r11, 1
swi r11, r0, TOPHYS(tlb_skip)
/* TLB1 can be zeroes that's why we not setup it */
beqi r10, jump_over2
/* look at the code below */
ori r30, r0, 0x200
andi r29, r10, 0x100000
bneid r29, 1f
addik r30, r30, 0x80
andi r29, r10, 0x400000
bneid r29, 1f
addik r30, r30, 0x80
andi r29, r10, 0x1000000
bneid r29, 1f
addik r30, r30, 0x80
1:
addk r4, r4, r9 /* previous addr + TLB0 size */
addk r3, r3, r9
andi r3,r3,0xfffffc00 /* Mask off the effective page number */
ori r3,r3,(TLB_VALID)
or r3, r3, r30
lwi r11, r0, TOPHYS(tlb_skip)
mts rtlbx, r11 /* r11 is used from TLB0 */
mts rtlblo,r4 /* Load the data portion of the entry */
mts rtlbhi,r3 /* Load the tag portion of the entry */
/* Increase tlb_skip size */
addik r11, r11, 1
swi r11, r0, TOPHYS(tlb_skip)
jump_over2:
/*
* Load a TLB entry for LMB, since we need access to
* the exception vectors, using a 4k real==virtual mapping.
*/
/* Use temporary TLB_ID for LMB - clear this temporary mapping later */
ori r11, r0, MICROBLAZE_LMB_TLB_ID
mts rtlbx,r11
ori r4,r0,(TLB_WR | TLB_EX)
ori r3,r0,(TLB_VALID | TLB_PAGESZ(PAGESZ_4K))
mts rtlblo,r4 /* Load the data portion of the entry */
mts rtlbhi,r3 /* Load the tag portion of the entry */
/*
* We now have the lower 16 Meg of RAM mapped into TLB entries, and the
* caches ready to work.
*/
turn_on_mmu:
ori r15,r0,start_here
ori r4,r0,MSR_KERNEL_VMS
mts rmsr,r4
nop
rted r15,0 /* enables MMU */
nop
start_here:
#endif /* CONFIG_MMU */
/* Initialize small data anchors */
addik r13, r0, _KERNEL_SDA_BASE_
addik r2, r0, _KERNEL_SDA2_BASE_
/* Initialize stack pointer */
addik r1, r0, init_thread_union + THREAD_SIZE - 4
/* Initialize r31 with current task address */
addik r31, r0, init_task
addik r11, r0, machine_early_init
brald r15, r11
nop
#ifndef CONFIG_MMU
addik r15, r0, machine_halt
braid start_kernel
nop
#else
/*
* Initialize the MMU.
*/
bralid r15, mmu_init
nop
/* Go back to running unmapped so we can load up new values
* and change to using our exception vectors.
* On the MicroBlaze, all we invalidate the used TLB entries to clear
* the old 16M byte TLB mappings.
*/
ori r15,r0,TOPHYS(kernel_load_context)
ori r4,r0,MSR_KERNEL
mts rmsr,r4
nop
bri 4
rted r15,0
nop
/* Load up the kernel context */
kernel_load_context:
ori r5, r0, MICROBLAZE_LMB_TLB_ID
mts rtlbx,r5
nop
mts rtlbhi,r0
nop
addi r15, r0, machine_halt
ori r17, r0, start_kernel
ori r4, r0, MSR_KERNEL_VMS
mts rmsr, r4
nop
rted r17, 0 /* enable MMU and jump to start_kernel */
nop
#endif /* CONFIG_MMU */
|
AirFortressIlikara/LS2K0300-linux-4.19
| 10,365
|
arch/microblaze/kernel/syscall_table.S
|
/* SPDX-License-Identifier: GPL-2.0 */
ENTRY(sys_call_table)
.long sys_restart_syscall /* 0 - old "setup()" system call,
* used for restarting */
.long sys_exit
.long sys_fork
.long sys_read
.long sys_write
.long sys_open /* 5 */
.long sys_close
.long sys_waitpid
.long sys_creat
.long sys_link
.long sys_unlink /* 10 */
.long sys_execve
.long sys_chdir
.long sys_time
.long sys_mknod
.long sys_chmod /* 15 */
.long sys_lchown
.long sys_ni_syscall /* old break syscall holder */
.long sys_ni_syscall /* old stat */
.long sys_lseek
.long sys_getpid /* 20 */
.long sys_mount
.long sys_oldumount
.long sys_setuid
.long sys_getuid
.long sys_stime /* 25 */
.long sys_ptrace
.long sys_alarm
.long sys_ni_syscall /* oldfstat */
.long sys_pause
.long sys_utime /* 30 */
.long sys_ni_syscall /* old stty syscall holder */
.long sys_ni_syscall /* old gtty syscall holder */
.long sys_access
.long sys_nice
.long sys_ni_syscall /* 35 - old ftime syscall holder */
.long sys_sync
.long sys_kill
.long sys_rename
.long sys_mkdir
.long sys_rmdir /* 40 */
.long sys_dup
.long sys_pipe
.long sys_times
.long sys_ni_syscall /* old prof syscall holder */
.long sys_brk /* 45 */
.long sys_setgid
.long sys_getgid
.long sys_signal
.long sys_geteuid
.long sys_getegid /* 50 */
.long sys_acct
.long sys_umount /* recycled never used phys() */
.long sys_ni_syscall /* old lock syscall holder */
.long sys_ioctl
.long sys_fcntl /* 55 */
.long sys_ni_syscall /* old mpx syscall holder */
.long sys_setpgid
.long sys_ni_syscall /* old ulimit syscall holder */
.long sys_ni_syscall /* olduname */
.long sys_umask /* 60 */
.long sys_chroot
.long sys_ustat
.long sys_dup2
.long sys_getppid
.long sys_getpgrp /* 65 */
.long sys_setsid
.long sys_ni_syscall /* sys_sigaction */
.long sys_sgetmask
.long sys_ssetmask
.long sys_setreuid /* 70 */
.long sys_setregid
.long sys_ni_syscall /* sys_sigsuspend_wrapper */
.long sys_sigpending
.long sys_sethostname
.long sys_setrlimit /* 75 */
.long sys_ni_syscall /* old_getrlimit */
.long sys_getrusage
.long sys_gettimeofday
.long sys_settimeofday
.long sys_getgroups /* 80 */
.long sys_setgroups
.long sys_ni_syscall /* old_select */
.long sys_symlink
.long sys_ni_syscall /* oldlstat */
.long sys_readlink /* 85 */
.long sys_uselib
.long sys_swapon
.long sys_reboot
.long sys_ni_syscall /* old_readdir */
.long sys_mmap /* 90 */ /* old_mmap */
.long sys_munmap
.long sys_truncate
.long sys_ftruncate
.long sys_fchmod
.long sys_fchown /* 95 */
.long sys_getpriority
.long sys_setpriority
.long sys_ni_syscall /* old profil syscall holder */
.long sys_statfs
.long sys_fstatfs /* 100 */
.long sys_ni_syscall /* ioperm */
.long sys_socketcall
.long sys_syslog /* operation with system console */
.long sys_setitimer
.long sys_getitimer /* 105 */
.long sys_newstat
.long sys_newlstat
.long sys_newfstat
.long sys_ni_syscall /* uname */
.long sys_ni_syscall /* 110 */ /* iopl */
.long sys_vhangup
.long sys_ni_syscall /* old "idle" system call */
.long sys_ni_syscall /* old sys_vm86old */
.long sys_wait4
.long sys_swapoff /* 115 */
.long sys_sysinfo
.long sys_ni_syscall /* old sys_ipc */
.long sys_fsync
.long sys_ni_syscall /* sys_sigreturn_wrapper */
.long sys_clone /* 120 */
.long sys_setdomainname
.long sys_newuname
.long sys_ni_syscall /* modify_ldt */
.long sys_adjtimex
.long sys_mprotect /* 125: sys_mprotect */
.long sys_sigprocmask
.long sys_ni_syscall /* old "create_module" */
.long sys_init_module
.long sys_delete_module
.long sys_ni_syscall /* 130: old "get_kernel_syms" */
.long sys_quotactl
.long sys_getpgid
.long sys_fchdir
.long sys_bdflush
.long sys_sysfs /* 135 */
.long sys_personality
.long sys_ni_syscall /* reserved for afs_syscall */
.long sys_setfsuid
.long sys_setfsgid
.long sys_llseek /* 140 */
.long sys_getdents
.long sys_select
.long sys_flock
.long sys_msync
.long sys_readv /* 145 */
.long sys_writev
.long sys_getsid
.long sys_fdatasync
.long sys_sysctl
.long sys_mlock /* 150: sys_mlock */
.long sys_munlock
.long sys_mlockall
.long sys_munlockall
.long sys_sched_setparam
.long sys_sched_getparam /* 155 */
.long sys_sched_setscheduler
.long sys_sched_getscheduler
.long sys_sched_yield
.long sys_sched_get_priority_max
.long sys_sched_get_priority_min /* 160 */
.long sys_sched_rr_get_interval
.long sys_nanosleep
.long sys_mremap
.long sys_setresuid
.long sys_getresuid /* 165 */
.long sys_ni_syscall /* sys_vm86 */
.long sys_ni_syscall /* Old sys_query_module */
.long sys_poll
.long sys_ni_syscall /* old nfsservctl */
.long sys_setresgid /* 170 */
.long sys_getresgid
.long sys_prctl
.long sys_rt_sigreturn_wrapper
.long sys_rt_sigaction
.long sys_rt_sigprocmask /* 175 */
.long sys_rt_sigpending
.long sys_rt_sigtimedwait
.long sys_rt_sigqueueinfo
.long sys_rt_sigsuspend
.long sys_pread64 /* 180 */
.long sys_pwrite64
.long sys_chown
.long sys_getcwd
.long sys_capget
.long sys_capset /* 185 */
.long sys_ni_syscall /* sigaltstack */
.long sys_sendfile
.long sys_ni_syscall /* reserved for streams1 */
.long sys_ni_syscall /* reserved for streams2 */
.long sys_vfork /* 190 */
.long sys_getrlimit
.long sys_mmap2
.long sys_truncate64
.long sys_ftruncate64
.long sys_stat64 /* 195 */
.long sys_lstat64
.long sys_fstat64
.long sys_lchown
.long sys_getuid
.long sys_getgid /* 200 */
.long sys_geteuid
.long sys_getegid
.long sys_setreuid
.long sys_setregid
.long sys_getgroups /* 205 */
.long sys_setgroups
.long sys_fchown
.long sys_setresuid
.long sys_getresuid
.long sys_setresgid /* 210 */
.long sys_getresgid
.long sys_chown
.long sys_setuid
.long sys_setgid
.long sys_setfsuid /* 215 */
.long sys_setfsgid
.long sys_pivot_root
.long sys_mincore
.long sys_madvise
.long sys_getdents64 /* 220 */
.long sys_fcntl64
.long sys_ni_syscall /* reserved for TUX */
.long sys_ni_syscall
.long sys_gettid
.long sys_readahead /* 225 */
.long sys_setxattr
.long sys_lsetxattr
.long sys_fsetxattr
.long sys_getxattr
.long sys_lgetxattr /* 230 */
.long sys_fgetxattr
.long sys_listxattr
.long sys_llistxattr
.long sys_flistxattr
.long sys_removexattr /* 235 */
.long sys_lremovexattr
.long sys_fremovexattr
.long sys_tkill
.long sys_sendfile64
.long sys_futex /* 240 */
.long sys_sched_setaffinity
.long sys_sched_getaffinity
.long sys_ni_syscall /* set_thread_area */
.long sys_ni_syscall /* get_thread_area */
.long sys_io_setup /* 245 */
.long sys_io_destroy
.long sys_io_getevents
.long sys_io_submit
.long sys_io_cancel
.long sys_fadvise64 /* 250 */
.long sys_ni_syscall
.long sys_exit_group
.long sys_lookup_dcookie
.long sys_epoll_create
.long sys_epoll_ctl /* 255 */
.long sys_epoll_wait
.long sys_remap_file_pages
.long sys_set_tid_address
.long sys_timer_create
.long sys_timer_settime /* 260 */
.long sys_timer_gettime
.long sys_timer_getoverrun
.long sys_timer_delete
.long sys_clock_settime
.long sys_clock_gettime /* 265 */
.long sys_clock_getres
.long sys_clock_nanosleep
.long sys_statfs64
.long sys_fstatfs64
.long sys_tgkill /* 270 */
.long sys_utimes
.long sys_fadvise64_64
.long sys_ni_syscall /* sys_vserver */
.long sys_mbind
.long sys_get_mempolicy
.long sys_set_mempolicy
.long sys_mq_open
.long sys_mq_unlink
.long sys_mq_timedsend
.long sys_mq_timedreceive /* 280 */
.long sys_mq_notify
.long sys_mq_getsetattr
.long sys_kexec_load
.long sys_waitid
.long sys_ni_syscall /* 285 */ /* available */
.long sys_add_key
.long sys_request_key
.long sys_keyctl
.long sys_ioprio_set
.long sys_ioprio_get /* 290 */
.long sys_inotify_init
.long sys_inotify_add_watch
.long sys_inotify_rm_watch
.long sys_ni_syscall /* sys_migrate_pages */
.long sys_openat /* 295 */
.long sys_mkdirat
.long sys_mknodat
.long sys_fchownat
.long sys_futimesat
.long sys_fstatat64 /* 300 */
.long sys_unlinkat
.long sys_renameat
.long sys_linkat
.long sys_symlinkat
.long sys_readlinkat /* 305 */
.long sys_fchmodat
.long sys_faccessat
.long sys_pselect6
.long sys_ppoll
.long sys_unshare /* 310 */
.long sys_set_robust_list
.long sys_get_robust_list
.long sys_splice
.long sys_sync_file_range
.long sys_tee /* 315 */
.long sys_vmsplice
.long sys_move_pages
.long sys_getcpu
.long sys_epoll_pwait
.long sys_utimensat /* 320 */
.long sys_signalfd
.long sys_timerfd_create
.long sys_eventfd
.long sys_fallocate
.long sys_semtimedop /* 325 */
.long sys_timerfd_settime
.long sys_timerfd_gettime
.long sys_semctl
.long sys_semget
.long sys_semop /* 330 */
.long sys_msgctl
.long sys_msgget
.long sys_msgrcv
.long sys_msgsnd
.long sys_shmat /* 335 */
.long sys_shmctl
.long sys_shmdt
.long sys_shmget
.long sys_signalfd4 /* new syscall */
.long sys_eventfd2 /* 340 */
.long sys_epoll_create1
.long sys_dup3
.long sys_pipe2
.long sys_inotify_init1
.long sys_socket /* 345 */
.long sys_socketpair
.long sys_bind
.long sys_listen
.long sys_accept
.long sys_connect /* 350 */
.long sys_getsockname
.long sys_getpeername
.long sys_sendto
.long sys_send
.long sys_recvfrom /* 355 */
.long sys_recv
.long sys_setsockopt
.long sys_getsockopt
.long sys_shutdown
.long sys_sendmsg /* 360 */
.long sys_recvmsg
.long sys_accept4
.long sys_preadv
.long sys_pwritev
.long sys_rt_tgsigqueueinfo /* 365 */
.long sys_perf_event_open
.long sys_recvmmsg
.long sys_fanotify_init
.long sys_fanotify_mark
.long sys_prlimit64 /* 370 */
.long sys_name_to_handle_at
.long sys_open_by_handle_at
.long sys_clock_adjtime
.long sys_syncfs
.long sys_setns /* 375 */
.long sys_sendmmsg
.long sys_process_vm_readv
.long sys_process_vm_writev
.long sys_kcmp
.long sys_finit_module /* 380 */
.long sys_sched_setattr
.long sys_sched_getattr
.long sys_renameat2
.long sys_seccomp
.long sys_getrandom /* 385 */
.long sys_memfd_create
.long sys_bpf
.long sys_execveat
.long sys_userfaultfd
.long sys_membarrier /* 390 */
.long sys_mlock2
.long sys_copy_file_range
.long sys_preadv2
.long sys_pwritev2
.long sys_pkey_mprotect /* 395 */
.long sys_pkey_alloc
.long sys_pkey_free
.long sys_statx
.long sys_io_pgetevents
.long sys_rseq
|
AirFortressIlikara/LS2K0300-linux-4.19
| 29,174
|
arch/microblaze/kernel/entry.S
|
/*
* Low-level system-call handling, trap handlers and context-switching
*
* Copyright (C) 2008-2009 Michal Simek <monstr@monstr.eu>
* Copyright (C) 2008-2009 PetaLogix
* Copyright (C) 2003 John Williams <jwilliams@itee.uq.edu.au>
* Copyright (C) 2001,2002 NEC Corporation
* Copyright (C) 2001,2002 Miles Bader <miles@gnu.org>
*
* This file is subject to the terms and conditions of the GNU General
* Public License. See the file COPYING in the main directory of this
* archive for more details.
*
* Written by Miles Bader <miles@gnu.org>
* Heavily modified by John Williams for Microblaze
*/
#include <linux/sys.h>
#include <linux/linkage.h>
#include <asm/entry.h>
#include <asm/current.h>
#include <asm/processor.h>
#include <asm/exceptions.h>
#include <asm/asm-offsets.h>
#include <asm/thread_info.h>
#include <asm/page.h>
#include <asm/unistd.h>
#include <linux/errno.h>
#include <asm/signal.h>
#undef DEBUG
#ifdef DEBUG
/* Create space for syscalls counting. */
.section .data
.global syscall_debug_table
.align 4
syscall_debug_table:
.space (__NR_syscalls * 4)
#endif /* DEBUG */
#define C_ENTRY(name) .globl name; .align 4; name
/*
* Various ways of setting and clearing BIP in flags reg.
* This is mucky, but necessary using microblaze version that
* allows msr ops to write to BIP
*/
#if CONFIG_XILINX_MICROBLAZE0_USE_MSR_INSTR
.macro clear_bip
msrclr r0, MSR_BIP
.endm
.macro set_bip
msrset r0, MSR_BIP
.endm
.macro clear_eip
msrclr r0, MSR_EIP
.endm
.macro set_ee
msrset r0, MSR_EE
.endm
.macro disable_irq
msrclr r0, MSR_IE
.endm
.macro enable_irq
msrset r0, MSR_IE
.endm
.macro set_ums
msrset r0, MSR_UMS
msrclr r0, MSR_VMS
.endm
.macro set_vms
msrclr r0, MSR_UMS
msrset r0, MSR_VMS
.endm
.macro clear_ums
msrclr r0, MSR_UMS
.endm
.macro clear_vms_ums
msrclr r0, MSR_VMS | MSR_UMS
.endm
#else
.macro clear_bip
mfs r11, rmsr
andi r11, r11, ~MSR_BIP
mts rmsr, r11
.endm
.macro set_bip
mfs r11, rmsr
ori r11, r11, MSR_BIP
mts rmsr, r11
.endm
.macro clear_eip
mfs r11, rmsr
andi r11, r11, ~MSR_EIP
mts rmsr, r11
.endm
.macro set_ee
mfs r11, rmsr
ori r11, r11, MSR_EE
mts rmsr, r11
.endm
.macro disable_irq
mfs r11, rmsr
andi r11, r11, ~MSR_IE
mts rmsr, r11
.endm
.macro enable_irq
mfs r11, rmsr
ori r11, r11, MSR_IE
mts rmsr, r11
.endm
.macro set_ums
mfs r11, rmsr
ori r11, r11, MSR_VMS
andni r11, r11, MSR_UMS
mts rmsr, r11
.endm
.macro set_vms
mfs r11, rmsr
ori r11, r11, MSR_VMS
andni r11, r11, MSR_UMS
mts rmsr, r11
.endm
.macro clear_ums
mfs r11, rmsr
andni r11, r11, MSR_UMS
mts rmsr,r11
.endm
.macro clear_vms_ums
mfs r11, rmsr
andni r11, r11, (MSR_VMS|MSR_UMS)
mts rmsr,r11
.endm
#endif
/* Define how to call high-level functions. With MMU, virtual mode must be
* enabled when calling the high-level function. Clobbers R11.
* VM_ON, VM_OFF, DO_JUMP_BIPCLR, DO_CALL
*/
/* turn on virtual protected mode save */
#define VM_ON \
set_ums; \
rted r0, 2f; \
nop; \
2:
/* turn off virtual protected mode save and user mode save*/
#define VM_OFF \
clear_vms_ums; \
rted r0, TOPHYS(1f); \
nop; \
1:
#define SAVE_REGS \
swi r2, r1, PT_R2; /* Save SDA */ \
swi r3, r1, PT_R3; \
swi r4, r1, PT_R4; \
swi r5, r1, PT_R5; \
swi r6, r1, PT_R6; \
swi r7, r1, PT_R7; \
swi r8, r1, PT_R8; \
swi r9, r1, PT_R9; \
swi r10, r1, PT_R10; \
swi r11, r1, PT_R11; /* save clobbered regs after rval */\
swi r12, r1, PT_R12; \
swi r13, r1, PT_R13; /* Save SDA2 */ \
swi r14, r1, PT_PC; /* PC, before IRQ/trap */ \
swi r15, r1, PT_R15; /* Save LP */ \
swi r16, r1, PT_R16; \
swi r17, r1, PT_R17; \
swi r18, r1, PT_R18; /* Save asm scratch reg */ \
swi r19, r1, PT_R19; \
swi r20, r1, PT_R20; \
swi r21, r1, PT_R21; \
swi r22, r1, PT_R22; \
swi r23, r1, PT_R23; \
swi r24, r1, PT_R24; \
swi r25, r1, PT_R25; \
swi r26, r1, PT_R26; \
swi r27, r1, PT_R27; \
swi r28, r1, PT_R28; \
swi r29, r1, PT_R29; \
swi r30, r1, PT_R30; \
swi r31, r1, PT_R31; /* Save current task reg */ \
mfs r11, rmsr; /* save MSR */ \
swi r11, r1, PT_MSR;
#define RESTORE_REGS_GP \
lwi r2, r1, PT_R2; /* restore SDA */ \
lwi r3, r1, PT_R3; \
lwi r4, r1, PT_R4; \
lwi r5, r1, PT_R5; \
lwi r6, r1, PT_R6; \
lwi r7, r1, PT_R7; \
lwi r8, r1, PT_R8; \
lwi r9, r1, PT_R9; \
lwi r10, r1, PT_R10; \
lwi r11, r1, PT_R11; /* restore clobbered regs after rval */\
lwi r12, r1, PT_R12; \
lwi r13, r1, PT_R13; /* restore SDA2 */ \
lwi r14, r1, PT_PC; /* RESTORE_LINK PC, before IRQ/trap */\
lwi r15, r1, PT_R15; /* restore LP */ \
lwi r16, r1, PT_R16; \
lwi r17, r1, PT_R17; \
lwi r18, r1, PT_R18; /* restore asm scratch reg */ \
lwi r19, r1, PT_R19; \
lwi r20, r1, PT_R20; \
lwi r21, r1, PT_R21; \
lwi r22, r1, PT_R22; \
lwi r23, r1, PT_R23; \
lwi r24, r1, PT_R24; \
lwi r25, r1, PT_R25; \
lwi r26, r1, PT_R26; \
lwi r27, r1, PT_R27; \
lwi r28, r1, PT_R28; \
lwi r29, r1, PT_R29; \
lwi r30, r1, PT_R30; \
lwi r31, r1, PT_R31; /* Restore cur task reg */
#define RESTORE_REGS \
lwi r11, r1, PT_MSR; \
mts rmsr , r11; \
RESTORE_REGS_GP
#define RESTORE_REGS_RTBD \
lwi r11, r1, PT_MSR; \
andni r11, r11, MSR_EIP; /* clear EIP */ \
ori r11, r11, MSR_EE | MSR_BIP; /* set EE and BIP */ \
mts rmsr , r11; \
RESTORE_REGS_GP
#define SAVE_STATE \
swi r1, r0, TOPHYS(PER_CPU(ENTRY_SP)); /* save stack */ \
/* See if already in kernel mode.*/ \
mfs r1, rmsr; \
andi r1, r1, MSR_UMS; \
bnei r1, 1f; \
/* Kernel-mode state save. */ \
/* Reload kernel stack-ptr. */ \
lwi r1, r0, TOPHYS(PER_CPU(ENTRY_SP)); \
/* FIXME: I can add these two lines to one */ \
/* tophys(r1,r1); */ \
/* addik r1, r1, -PT_SIZE; */ \
addik r1, r1, CONFIG_KERNEL_BASE_ADDR - CONFIG_KERNEL_START - PT_SIZE; \
SAVE_REGS \
brid 2f; \
swi r1, r1, PT_MODE; \
1: /* User-mode state save. */ \
lwi r1, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); /* get saved current */\
tophys(r1,r1); \
lwi r1, r1, TS_THREAD_INFO; /* get the thread info */ \
/* MS these three instructions can be added to one */ \
/* addik r1, r1, THREAD_SIZE; */ \
/* tophys(r1,r1); */ \
/* addik r1, r1, -PT_SIZE; */ \
addik r1, r1, THREAD_SIZE + CONFIG_KERNEL_BASE_ADDR - CONFIG_KERNEL_START - PT_SIZE; \
SAVE_REGS \
lwi r11, r0, TOPHYS(PER_CPU(ENTRY_SP)); \
swi r11, r1, PT_R1; /* Store user SP. */ \
swi r0, r1, PT_MODE; /* Was in user-mode. */ \
/* MS: I am clearing UMS even in case when I come from kernel space */ \
clear_ums; \
2: lwi CURRENT_TASK, r0, TOPHYS(PER_CPU(CURRENT_SAVE));
.text
/*
* User trap.
*
* System calls are handled here.
*
* Syscall protocol:
* Syscall number in r12, args in r5-r10
* Return value in r3
*
* Trap entered via brki instruction, so BIP bit is set, and interrupts
* are masked. This is nice, means we don't have to CLI before state save
*/
C_ENTRY(_user_exception):
swi r1, r0, TOPHYS(PER_CPU(ENTRY_SP)) /* save stack */
addi r14, r14, 4 /* return address is 4 byte after call */
lwi r1, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); /* get saved current */
tophys(r1,r1);
lwi r1, r1, TS_THREAD_INFO; /* get stack from task_struct */
/* calculate kernel stack pointer from task struct 8k */
addik r1, r1, THREAD_SIZE;
tophys(r1,r1);
addik r1, r1, -PT_SIZE; /* Make room on the stack. */
SAVE_REGS
swi r0, r1, PT_R3
swi r0, r1, PT_R4
swi r0, r1, PT_MODE; /* Was in user-mode. */
lwi r11, r0, TOPHYS(PER_CPU(ENTRY_SP));
swi r11, r1, PT_R1; /* Store user SP. */
clear_ums;
2: lwi CURRENT_TASK, r0, TOPHYS(PER_CPU(CURRENT_SAVE));
/* Save away the syscall number. */
swi r12, r1, PT_R0;
tovirt(r1,r1)
/* where the trap should return need -8 to adjust for rtsd r15, 8*/
/* Jump to the appropriate function for the system call number in r12
* (r12 is not preserved), or return an error if r12 is not valid. The LP
* register should point to the location where
* the called function should return. [note that MAKE_SYS_CALL uses label 1] */
/* Step into virtual mode */
rtbd r0, 3f
nop
3:
lwi r11, CURRENT_TASK, TS_THREAD_INFO /* get thread info */
lwi r11, r11, TI_FLAGS /* get flags in thread info */
andi r11, r11, _TIF_WORK_SYSCALL_MASK
beqi r11, 4f
addik r3, r0, -ENOSYS
swi r3, r1, PT_R3
brlid r15, do_syscall_trace_enter
addik r5, r1, PT_R0
# do_syscall_trace_enter returns the new syscall nr.
addk r12, r0, r3
lwi r5, r1, PT_R5;
lwi r6, r1, PT_R6;
lwi r7, r1, PT_R7;
lwi r8, r1, PT_R8;
lwi r9, r1, PT_R9;
lwi r10, r1, PT_R10;
4:
/* Jump to the appropriate function for the system call number in r12
* (r12 is not preserved), or return an error if r12 is not valid.
* The LP register should point to the location where the called function
* should return. [note that MAKE_SYS_CALL uses label 1] */
/* See if the system call number is valid */
blti r12, 5f
addi r11, r12, -__NR_syscalls;
bgei r11, 5f;
/* Figure out which function to use for this system call. */
/* Note Microblaze barrel shift is optional, so don't rely on it */
add r12, r12, r12; /* convert num -> ptr */
add r12, r12, r12;
addi r30, r0, 1 /* restarts allowed */
#ifdef DEBUG
/* Trac syscalls and stored them to syscall_debug_table */
/* The first syscall location stores total syscall number */
lwi r3, r0, syscall_debug_table
addi r3, r3, 1
swi r3, r0, syscall_debug_table
lwi r3, r12, syscall_debug_table
addi r3, r3, 1
swi r3, r12, syscall_debug_table
#endif
# Find and jump into the syscall handler.
lwi r12, r12, sys_call_table
/* where the trap should return need -8 to adjust for rtsd r15, 8 */
addi r15, r0, ret_from_trap-8
bra r12
/* The syscall number is invalid, return an error. */
5:
braid ret_from_trap
addi r3, r0, -ENOSYS;
/* Entry point used to return from a syscall/trap */
/* We re-enable BIP bit before state restore */
C_ENTRY(ret_from_trap):
swi r3, r1, PT_R3
swi r4, r1, PT_R4
lwi r11, r1, PT_MODE;
/* See if returning to kernel mode, if so, skip resched &c. */
bnei r11, 2f;
/* We're returning to user mode, so check for various conditions that
* trigger rescheduling. */
/* FIXME: Restructure all these flag checks. */
lwi r11, CURRENT_TASK, TS_THREAD_INFO; /* get thread info */
lwi r11, r11, TI_FLAGS; /* get flags in thread info */
andi r11, r11, _TIF_WORK_SYSCALL_MASK
beqi r11, 1f
brlid r15, do_syscall_trace_leave
addik r5, r1, PT_R0
1:
/* We're returning to user mode, so check for various conditions that
* trigger rescheduling. */
/* get thread info from current task */
lwi r11, CURRENT_TASK, TS_THREAD_INFO;
lwi r19, r11, TI_FLAGS; /* get flags in thread info */
andi r11, r19, _TIF_NEED_RESCHED;
beqi r11, 5f;
bralid r15, schedule; /* Call scheduler */
nop; /* delay slot */
bri 1b
/* Maybe handle a signal */
5:
andi r11, r19, _TIF_SIGPENDING | _TIF_NOTIFY_RESUME;
beqi r11, 4f; /* Signals to handle, handle them */
addik r5, r1, 0; /* Arg 1: struct pt_regs *regs */
bralid r15, do_notify_resume; /* Handle any signals */
add r6, r30, r0; /* Arg 2: int in_syscall */
add r30, r0, r0 /* no more restarts */
bri 1b
/* Finally, return to user state. */
4: set_bip; /* Ints masked for state restore */
swi CURRENT_TASK, r0, PER_CPU(CURRENT_SAVE); /* save current */
VM_OFF;
tophys(r1,r1);
RESTORE_REGS_RTBD;
addik r1, r1, PT_SIZE /* Clean up stack space. */
lwi r1, r1, PT_R1 - PT_SIZE;/* Restore user stack pointer. */
bri 6f;
/* Return to kernel state. */
2: set_bip; /* Ints masked for state restore */
VM_OFF;
tophys(r1,r1);
RESTORE_REGS_RTBD;
addik r1, r1, PT_SIZE /* Clean up stack space. */
tovirt(r1,r1);
6:
TRAP_return: /* Make global symbol for debugging */
rtbd r14, 0; /* Instructions to return from an IRQ */
nop;
/* This the initial entry point for a new child thread, with an appropriate
stack in place that makes it look the the child is in the middle of an
syscall. This function is actually `returned to' from switch_thread
(copy_thread makes ret_from_fork the return address in each new thread's
saved context). */
C_ENTRY(ret_from_fork):
bralid r15, schedule_tail; /* ...which is schedule_tail's arg */
add r5, r3, r0; /* switch_thread returns the prev task */
/* ( in the delay slot ) */
brid ret_from_trap; /* Do normal trap return */
add r3, r0, r0; /* Child's fork call should return 0. */
C_ENTRY(ret_from_kernel_thread):
bralid r15, schedule_tail; /* ...which is schedule_tail's arg */
add r5, r3, r0; /* switch_thread returns the prev task */
/* ( in the delay slot ) */
brald r15, r20 /* fn was left in r20 */
addk r5, r0, r19 /* ... and argument - in r19 */
brid ret_from_trap
add r3, r0, r0
C_ENTRY(sys_rt_sigreturn_wrapper):
addik r30, r0, 0 /* no restarts */
brid sys_rt_sigreturn /* Do real work */
addik r5, r1, 0; /* add user context as 1st arg */
/*
* HW EXCEPTION rutine start
*/
C_ENTRY(full_exception_trap):
/* adjust exception address for privileged instruction
* for finding where is it */
addik r17, r17, -4
SAVE_STATE /* Save registers */
/* PC, before IRQ/trap - this is one instruction above */
swi r17, r1, PT_PC;
tovirt(r1,r1)
/* FIXME this can be store directly in PT_ESR reg.
* I tested it but there is a fault */
/* where the trap should return need -8 to adjust for rtsd r15, 8 */
addik r15, r0, ret_from_exc - 8
mfs r6, resr
mfs r7, rfsr; /* save FSR */
mts rfsr, r0; /* Clear sticky fsr */
rted r0, full_exception
addik r5, r1, 0 /* parameter struct pt_regs * regs */
/*
* Unaligned data trap.
*
* Unaligned data trap last on 4k page is handled here.
*
* Trap entered via exception, so EE bit is set, and interrupts
* are masked. This is nice, means we don't have to CLI before state save
*
* The assembler routine is in "arch/microblaze/kernel/hw_exception_handler.S"
*/
C_ENTRY(unaligned_data_trap):
/* MS: I have to save r11 value and then restore it because
* set_bit, clear_eip, set_ee use r11 as temp register if MSR
* instructions are not used. We don't need to do if MSR instructions
* are used and they use r0 instead of r11.
* I am using ENTRY_SP which should be primary used only for stack
* pointer saving. */
swi r11, r0, TOPHYS(PER_CPU(ENTRY_SP));
set_bip; /* equalize initial state for all possible entries */
clear_eip;
set_ee;
lwi r11, r0, TOPHYS(PER_CPU(ENTRY_SP));
SAVE_STATE /* Save registers.*/
/* PC, before IRQ/trap - this is one instruction above */
swi r17, r1, PT_PC;
tovirt(r1,r1)
/* where the trap should return need -8 to adjust for rtsd r15, 8 */
addik r15, r0, ret_from_exc-8
mfs r3, resr /* ESR */
mfs r4, rear /* EAR */
rtbd r0, _unaligned_data_exception
addik r7, r1, 0 /* parameter struct pt_regs * regs */
/*
* Page fault traps.
*
* If the real exception handler (from hw_exception_handler.S) didn't find
* the mapping for the process, then we're thrown here to handle such situation.
*
* Trap entered via exceptions, so EE bit is set, and interrupts
* are masked. This is nice, means we don't have to CLI before state save
*
* Build a standard exception frame for TLB Access errors. All TLB exceptions
* will bail out to this point if they can't resolve the lightweight TLB fault.
*
* The C function called is in "arch/microblaze/mm/fault.c", declared as:
* void do_page_fault(struct pt_regs *regs,
* unsigned long address,
* unsigned long error_code)
*/
/* data and intruction trap - which is choose is resolved int fault.c */
C_ENTRY(page_fault_data_trap):
SAVE_STATE /* Save registers.*/
/* PC, before IRQ/trap - this is one instruction above */
swi r17, r1, PT_PC;
tovirt(r1,r1)
/* where the trap should return need -8 to adjust for rtsd r15, 8 */
addik r15, r0, ret_from_exc-8
mfs r6, rear /* parameter unsigned long address */
mfs r7, resr /* parameter unsigned long error_code */
rted r0, do_page_fault
addik r5, r1, 0 /* parameter struct pt_regs * regs */
C_ENTRY(page_fault_instr_trap):
SAVE_STATE /* Save registers.*/
/* PC, before IRQ/trap - this is one instruction above */
swi r17, r1, PT_PC;
tovirt(r1,r1)
/* where the trap should return need -8 to adjust for rtsd r15, 8 */
addik r15, r0, ret_from_exc-8
mfs r6, rear /* parameter unsigned long address */
ori r7, r0, 0 /* parameter unsigned long error_code */
rted r0, do_page_fault
addik r5, r1, 0 /* parameter struct pt_regs * regs */
/* Entry point used to return from an exception. */
C_ENTRY(ret_from_exc):
lwi r11, r1, PT_MODE;
bnei r11, 2f; /* See if returning to kernel mode, */
/* ... if so, skip resched &c. */
/* We're returning to user mode, so check for various conditions that
trigger rescheduling. */
1:
lwi r11, CURRENT_TASK, TS_THREAD_INFO; /* get thread info */
lwi r19, r11, TI_FLAGS; /* get flags in thread info */
andi r11, r19, _TIF_NEED_RESCHED;
beqi r11, 5f;
/* Call the scheduler before returning from a syscall/trap. */
bralid r15, schedule; /* Call scheduler */
nop; /* delay slot */
bri 1b
/* Maybe handle a signal */
5: andi r11, r19, _TIF_SIGPENDING | _TIF_NOTIFY_RESUME;
beqi r11, 4f; /* Signals to handle, handle them */
/*
* Handle a signal return; Pending signals should be in r18.
*
* Not all registers are saved by the normal trap/interrupt entry
* points (for instance, call-saved registers (because the normal
* C-compiler calling sequence in the kernel makes sure they're
* preserved), and call-clobbered registers in the case of
* traps), but signal handlers may want to examine or change the
* complete register state. Here we save anything not saved by
* the normal entry sequence, so that it may be safely restored
* (in a possibly modified form) after do_notify_resume returns. */
addik r5, r1, 0; /* Arg 1: struct pt_regs *regs */
bralid r15, do_notify_resume; /* Handle any signals */
addi r6, r0, 0; /* Arg 2: int in_syscall */
bri 1b
/* Finally, return to user state. */
4: set_bip; /* Ints masked for state restore */
swi CURRENT_TASK, r0, PER_CPU(CURRENT_SAVE); /* save current */
VM_OFF;
tophys(r1,r1);
RESTORE_REGS_RTBD;
addik r1, r1, PT_SIZE /* Clean up stack space. */
lwi r1, r1, PT_R1 - PT_SIZE; /* Restore user stack pointer. */
bri 6f;
/* Return to kernel state. */
2: set_bip; /* Ints masked for state restore */
VM_OFF;
tophys(r1,r1);
RESTORE_REGS_RTBD;
addik r1, r1, PT_SIZE /* Clean up stack space. */
tovirt(r1,r1);
6:
EXC_return: /* Make global symbol for debugging */
rtbd r14, 0; /* Instructions to return from an IRQ */
nop;
/*
* HW EXCEPTION rutine end
*/
/*
* Hardware maskable interrupts.
*
* The stack-pointer (r1) should have already been saved to the memory
* location PER_CPU(ENTRY_SP).
*/
C_ENTRY(_interrupt):
/* MS: we are in physical address */
/* Save registers, switch to proper stack, convert SP to virtual.*/
swi r1, r0, TOPHYS(PER_CPU(ENTRY_SP))
/* MS: See if already in kernel mode. */
mfs r1, rmsr
nop
andi r1, r1, MSR_UMS
bnei r1, 1f
/* Kernel-mode state save. */
lwi r1, r0, TOPHYS(PER_CPU(ENTRY_SP))
tophys(r1,r1); /* MS: I have in r1 physical address where stack is */
/* save registers */
/* MS: Make room on the stack -> activation record */
addik r1, r1, -PT_SIZE;
SAVE_REGS
brid 2f;
swi r1, r1, PT_MODE; /* 0 - user mode, 1 - kernel mode */
1:
/* User-mode state save. */
/* MS: get the saved current */
lwi r1, r0, TOPHYS(PER_CPU(CURRENT_SAVE));
tophys(r1,r1);
lwi r1, r1, TS_THREAD_INFO;
addik r1, r1, THREAD_SIZE;
tophys(r1,r1);
/* save registers */
addik r1, r1, -PT_SIZE;
SAVE_REGS
/* calculate mode */
swi r0, r1, PT_MODE;
lwi r11, r0, TOPHYS(PER_CPU(ENTRY_SP));
swi r11, r1, PT_R1;
clear_ums;
2:
lwi CURRENT_TASK, r0, TOPHYS(PER_CPU(CURRENT_SAVE));
tovirt(r1,r1)
addik r15, r0, irq_call;
irq_call:rtbd r0, do_IRQ;
addik r5, r1, 0;
/* MS: we are in virtual mode */
ret_from_irq:
lwi r11, r1, PT_MODE;
bnei r11, 2f;
1:
lwi r11, CURRENT_TASK, TS_THREAD_INFO;
lwi r19, r11, TI_FLAGS; /* MS: get flags from thread info */
andi r11, r19, _TIF_NEED_RESCHED;
beqi r11, 5f
bralid r15, schedule;
nop; /* delay slot */
bri 1b
/* Maybe handle a signal */
5: andi r11, r19, _TIF_SIGPENDING | _TIF_NOTIFY_RESUME;
beqid r11, no_intr_resched
/* Handle a signal return; Pending signals should be in r18. */
addik r5, r1, 0; /* Arg 1: struct pt_regs *regs */
bralid r15, do_notify_resume; /* Handle any signals */
addi r6, r0, 0; /* Arg 2: int in_syscall */
bri 1b
/* Finally, return to user state. */
no_intr_resched:
/* Disable interrupts, we are now committed to the state restore */
disable_irq
swi CURRENT_TASK, r0, PER_CPU(CURRENT_SAVE);
VM_OFF;
tophys(r1,r1);
RESTORE_REGS
addik r1, r1, PT_SIZE /* MS: Clean up stack space. */
lwi r1, r1, PT_R1 - PT_SIZE;
bri 6f;
/* MS: Return to kernel state. */
2:
#ifdef CONFIG_PREEMPT
lwi r11, CURRENT_TASK, TS_THREAD_INFO;
/* MS: get preempt_count from thread info */
lwi r5, r11, TI_PREEMPT_COUNT;
bgti r5, restore;
lwi r5, r11, TI_FLAGS; /* get flags in thread info */
andi r5, r5, _TIF_NEED_RESCHED;
beqi r5, restore /* if zero jump over */
preempt:
/* interrupts are off that's why I am calling preempt_chedule_irq */
bralid r15, preempt_schedule_irq
nop
lwi r11, CURRENT_TASK, TS_THREAD_INFO; /* get thread info */
lwi r5, r11, TI_FLAGS; /* get flags in thread info */
andi r5, r5, _TIF_NEED_RESCHED;
bnei r5, preempt /* if non zero jump to resched */
restore:
#endif
VM_OFF /* MS: turn off MMU */
tophys(r1,r1)
RESTORE_REGS
addik r1, r1, PT_SIZE /* MS: Clean up stack space. */
tovirt(r1,r1);
6:
IRQ_return: /* MS: Make global symbol for debugging */
rtid r14, 0
nop
/*
* Debug trap for KGDB. Enter to _debug_exception by brki r16, 0x18
* and call handling function with saved pt_regs
*/
C_ENTRY(_debug_exception):
/* BIP bit is set on entry, no interrupts can occur */
swi r1, r0, TOPHYS(PER_CPU(ENTRY_SP))
mfs r1, rmsr
nop
andi r1, r1, MSR_UMS
bnei r1, 1f
/* MS: Kernel-mode state save - kgdb */
lwi r1, r0, TOPHYS(PER_CPU(ENTRY_SP)); /* Reload kernel stack-ptr*/
/* BIP bit is set on entry, no interrupts can occur */
addik r1, r1, CONFIG_KERNEL_BASE_ADDR - CONFIG_KERNEL_START - PT_SIZE;
SAVE_REGS;
/* save all regs to pt_reg structure */
swi r0, r1, PT_R0; /* R0 must be saved too */
swi r14, r1, PT_R14 /* rewrite saved R14 value */
swi r16, r1, PT_PC; /* PC and r16 are the same */
/* save special purpose registers to pt_regs */
mfs r11, rear;
swi r11, r1, PT_EAR;
mfs r11, resr;
swi r11, r1, PT_ESR;
mfs r11, rfsr;
swi r11, r1, PT_FSR;
/* stack pointer is in physical address at it is decrease
* by PT_SIZE but we need to get correct R1 value */
addik r11, r1, CONFIG_KERNEL_START - CONFIG_KERNEL_BASE_ADDR + PT_SIZE;
swi r11, r1, PT_R1
/* MS: r31 - current pointer isn't changed */
tovirt(r1,r1)
#ifdef CONFIG_KGDB
addi r5, r1, 0 /* pass pt_reg address as the first arg */
addik r15, r0, dbtrap_call; /* return address */
rtbd r0, microblaze_kgdb_break
nop;
#endif
/* MS: Place handler for brki from kernel space if KGDB is OFF.
* It is very unlikely that another brki instruction is called. */
bri 0
/* MS: User-mode state save - gdb */
1: lwi r1, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); /* get saved current */
tophys(r1,r1);
lwi r1, r1, TS_THREAD_INFO; /* get the thread info */
addik r1, r1, THREAD_SIZE; /* calculate kernel stack pointer */
tophys(r1,r1);
addik r1, r1, -PT_SIZE; /* Make room on the stack. */
SAVE_REGS;
swi r16, r1, PT_PC; /* Save LP */
swi r0, r1, PT_MODE; /* Was in user-mode. */
lwi r11, r0, TOPHYS(PER_CPU(ENTRY_SP));
swi r11, r1, PT_R1; /* Store user SP. */
lwi CURRENT_TASK, r0, TOPHYS(PER_CPU(CURRENT_SAVE));
tovirt(r1,r1)
set_vms;
addik r5, r1, 0;
addik r15, r0, dbtrap_call;
dbtrap_call: /* Return point for kernel/user entry + 8 because of rtsd r15, 8 */
rtbd r0, sw_exception
nop
/* MS: The first instruction for the second part of the gdb/kgdb */
set_bip; /* Ints masked for state restore */
lwi r11, r1, PT_MODE;
bnei r11, 2f;
/* MS: Return to user space - gdb */
1:
/* Get current task ptr into r11 */
lwi r11, CURRENT_TASK, TS_THREAD_INFO; /* get thread info */
lwi r19, r11, TI_FLAGS; /* get flags in thread info */
andi r11, r19, _TIF_NEED_RESCHED;
beqi r11, 5f;
/* Call the scheduler before returning from a syscall/trap. */
bralid r15, schedule; /* Call scheduler */
nop; /* delay slot */
bri 1b
/* Maybe handle a signal */
5: andi r11, r19, _TIF_SIGPENDING | _TIF_NOTIFY_RESUME;
beqi r11, 4f; /* Signals to handle, handle them */
addik r5, r1, 0; /* Arg 1: struct pt_regs *regs */
bralid r15, do_notify_resume; /* Handle any signals */
addi r6, r0, 0; /* Arg 2: int in_syscall */
bri 1b
/* Finally, return to user state. */
4: swi CURRENT_TASK, r0, PER_CPU(CURRENT_SAVE); /* save current */
VM_OFF;
tophys(r1,r1);
/* MS: Restore all regs */
RESTORE_REGS_RTBD
addik r1, r1, PT_SIZE /* Clean up stack space */
lwi r1, r1, PT_R1 - PT_SIZE; /* Restore user stack pointer */
DBTRAP_return_user: /* MS: Make global symbol for debugging */
rtbd r16, 0; /* MS: Instructions to return from a debug trap */
nop;
/* MS: Return to kernel state - kgdb */
2: VM_OFF;
tophys(r1,r1);
/* MS: Restore all regs */
RESTORE_REGS_RTBD
lwi r14, r1, PT_R14;
lwi r16, r1, PT_PC;
addik r1, r1, PT_SIZE; /* MS: Clean up stack space */
tovirt(r1,r1);
DBTRAP_return_kernel: /* MS: Make global symbol for debugging */
rtbd r16, 0; /* MS: Instructions to return from a debug trap */
nop;
ENTRY(_switch_to)
/* prepare return value */
addk r3, r0, CURRENT_TASK
/* save registers in cpu_context */
/* use r11 and r12, volatile registers, as temp register */
/* give start of cpu_context for previous process */
addik r11, r5, TI_CPU_CONTEXT
swi r1, r11, CC_R1
swi r2, r11, CC_R2
/* skip volatile registers.
* they are saved on stack when we jumped to _switch_to() */
/* dedicated registers */
swi r13, r11, CC_R13
swi r14, r11, CC_R14
swi r15, r11, CC_R15
swi r16, r11, CC_R16
swi r17, r11, CC_R17
swi r18, r11, CC_R18
/* save non-volatile registers */
swi r19, r11, CC_R19
swi r20, r11, CC_R20
swi r21, r11, CC_R21
swi r22, r11, CC_R22
swi r23, r11, CC_R23
swi r24, r11, CC_R24
swi r25, r11, CC_R25
swi r26, r11, CC_R26
swi r27, r11, CC_R27
swi r28, r11, CC_R28
swi r29, r11, CC_R29
swi r30, r11, CC_R30
/* special purpose registers */
mfs r12, rmsr
swi r12, r11, CC_MSR
mfs r12, rear
swi r12, r11, CC_EAR
mfs r12, resr
swi r12, r11, CC_ESR
mfs r12, rfsr
swi r12, r11, CC_FSR
/* update r31, the current-give me pointer to task which will be next */
lwi CURRENT_TASK, r6, TI_TASK
/* stored it to current_save too */
swi CURRENT_TASK, r0, PER_CPU(CURRENT_SAVE)
/* get new process' cpu context and restore */
/* give me start where start context of next task */
addik r11, r6, TI_CPU_CONTEXT
/* non-volatile registers */
lwi r30, r11, CC_R30
lwi r29, r11, CC_R29
lwi r28, r11, CC_R28
lwi r27, r11, CC_R27
lwi r26, r11, CC_R26
lwi r25, r11, CC_R25
lwi r24, r11, CC_R24
lwi r23, r11, CC_R23
lwi r22, r11, CC_R22
lwi r21, r11, CC_R21
lwi r20, r11, CC_R20
lwi r19, r11, CC_R19
/* dedicated registers */
lwi r18, r11, CC_R18
lwi r17, r11, CC_R17
lwi r16, r11, CC_R16
lwi r15, r11, CC_R15
lwi r14, r11, CC_R14
lwi r13, r11, CC_R13
/* skip volatile registers */
lwi r2, r11, CC_R2
lwi r1, r11, CC_R1
/* special purpose registers */
lwi r12, r11, CC_FSR
mts rfsr, r12
lwi r12, r11, CC_MSR
mts rmsr, r12
rtsd r15, 8
nop
ENTRY(_reset)
brai 0; /* Jump to reset vector */
/* These are compiled and loaded into high memory, then
* copied into place in mach_early_setup */
.section .init.ivt, "ax"
#if CONFIG_MANUAL_RESET_VECTOR
.org 0x0
brai CONFIG_MANUAL_RESET_VECTOR
#endif
.org 0x8
brai TOPHYS(_user_exception); /* syscall handler */
.org 0x10
brai TOPHYS(_interrupt); /* Interrupt handler */
.org 0x18
brai TOPHYS(_debug_exception); /* debug trap handler */
.org 0x20
brai TOPHYS(_hw_exception_handler); /* HW exception handler */
.section .rodata,"a"
#include "syscall_table.S"
syscall_table_size=(.-sys_call_table)
type_SYSCALL:
.ascii "SYSCALL\0"
type_IRQ:
.ascii "IRQ\0"
type_IRQ_PREEMPT:
.ascii "IRQ (PREEMPTED)\0"
type_SYSCALL_PREEMPT:
.ascii " SYSCALL (PREEMPTED)\0"
/*
* Trap decoding for stack unwinder
* Tuples are (start addr, end addr, string)
* If return address lies on [start addr, end addr],
* unwinder displays 'string'
*/
.align 4
.global microblaze_trap_handlers
microblaze_trap_handlers:
/* Exact matches come first */
.word ret_from_trap; .word ret_from_trap ; .word type_SYSCALL
.word ret_from_irq ; .word ret_from_irq ; .word type_IRQ
/* Fuzzy matches go here */
.word ret_from_irq ; .word no_intr_resched ; .word type_IRQ_PREEMPT
.word ret_from_trap; .word TRAP_return ; .word type_SYSCALL_PREEMPT
/* End of table */
.word 0 ; .word 0 ; .word 0
|
AirFortressIlikara/LS2K0300-linux-4.19
| 15,136
|
arch/microblaze/kernel/entry-nommu.S
|
/*
* Copyright (C) 2007-2009 Michal Simek <monstr@monstr.eu>
* Copyright (C) 2007-2009 PetaLogix
* Copyright (C) 2006 Atmark Techno, Inc.
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*/
#include <linux/linkage.h>
#include <asm/thread_info.h>
#include <linux/errno.h>
#include <asm/entry.h>
#include <asm/asm-offsets.h>
#include <asm/registers.h>
#include <asm/unistd.h>
#include <asm/percpu.h>
#include <asm/signal.h>
#if CONFIG_XILINX_MICROBLAZE0_USE_MSR_INSTR
.macro disable_irq
msrclr r0, MSR_IE
.endm
.macro enable_irq
msrset r0, MSR_IE
.endm
.macro clear_bip
msrclr r0, MSR_BIP
.endm
#else
.macro disable_irq
mfs r11, rmsr
andi r11, r11, ~MSR_IE
mts rmsr, r11
.endm
.macro enable_irq
mfs r11, rmsr
ori r11, r11, MSR_IE
mts rmsr, r11
.endm
.macro clear_bip
mfs r11, rmsr
andi r11, r11, ~MSR_BIP
mts rmsr, r11
.endm
#endif
ENTRY(_interrupt)
swi r1, r0, PER_CPU(ENTRY_SP) /* save the current sp */
swi r11, r0, PER_CPU(R11_SAVE) /* temporarily save r11 */
lwi r11, r0, PER_CPU(KM) /* load mode indicator */
beqid r11, 1f
nop
brid 2f /* jump over */
addik r1, r1, (-PT_SIZE) /* room for pt_regs (delay slot) */
1: /* switch to kernel stack */
lwi r1, r0, PER_CPU(CURRENT_SAVE) /* get the saved current */
lwi r1, r1, TS_THREAD_INFO /* get the thread info */
/* calculate kernel stack pointer */
addik r1, r1, THREAD_SIZE - PT_SIZE
2:
swi r11, r1, PT_MODE /* store the mode */
lwi r11, r0, PER_CPU(R11_SAVE) /* reload r11 */
swi r2, r1, PT_R2
swi r3, r1, PT_R3
swi r4, r1, PT_R4
swi r5, r1, PT_R5
swi r6, r1, PT_R6
swi r7, r1, PT_R7
swi r8, r1, PT_R8
swi r9, r1, PT_R9
swi r10, r1, PT_R10
swi r11, r1, PT_R11
swi r12, r1, PT_R12
swi r13, r1, PT_R13
swi r14, r1, PT_R14
swi r14, r1, PT_PC
swi r15, r1, PT_R15
swi r16, r1, PT_R16
swi r17, r1, PT_R17
swi r18, r1, PT_R18
swi r19, r1, PT_R19
swi r20, r1, PT_R20
swi r21, r1, PT_R21
swi r22, r1, PT_R22
swi r23, r1, PT_R23
swi r24, r1, PT_R24
swi r25, r1, PT_R25
swi r26, r1, PT_R26
swi r27, r1, PT_R27
swi r28, r1, PT_R28
swi r29, r1, PT_R29
swi r30, r1, PT_R30
swi r31, r1, PT_R31
/* special purpose registers */
mfs r11, rmsr
swi r11, r1, PT_MSR
mfs r11, rear
swi r11, r1, PT_EAR
mfs r11, resr
swi r11, r1, PT_ESR
mfs r11, rfsr
swi r11, r1, PT_FSR
/* reload original stack pointer and save it */
lwi r11, r0, PER_CPU(ENTRY_SP)
swi r11, r1, PT_R1
/* update mode indicator we are in kernel mode */
addik r11, r0, 1
swi r11, r0, PER_CPU(KM)
/* restore r31 */
lwi r31, r0, PER_CPU(CURRENT_SAVE)
/* prepare the link register, the argument and jump */
addik r15, r0, ret_from_intr - 8
addk r6, r0, r15
braid do_IRQ
add r5, r0, r1
ret_from_intr:
lwi r11, r1, PT_MODE
bneid r11, no_intr_resched
3:
lwi r6, r31, TS_THREAD_INFO /* get thread info */
lwi r19, r6, TI_FLAGS /* get flags in thread info */
/* do an extra work if any bits are set */
andi r11, r19, _TIF_NEED_RESCHED
beqi r11, 1f
bralid r15, schedule
nop
bri 3b
1: andi r11, r19, _TIF_SIGPENDING | _TIF_NOTIFY_RESUME
beqid r11, no_intr_resched
addk r5, r1, r0
bralid r15, do_notify_resume
addk r6, r0, r0
bri 3b
no_intr_resched:
/* Disable interrupts, we are now committed to the state restore */
disable_irq
/* save mode indicator */
lwi r11, r1, PT_MODE
swi r11, r0, PER_CPU(KM)
/* save r31 */
swi r31, r0, PER_CPU(CURRENT_SAVE)
restore_context:
/* special purpose registers */
lwi r11, r1, PT_FSR
mts rfsr, r11
lwi r11, r1, PT_ESR
mts resr, r11
lwi r11, r1, PT_EAR
mts rear, r11
lwi r11, r1, PT_MSR
mts rmsr, r11
lwi r31, r1, PT_R31
lwi r30, r1, PT_R30
lwi r29, r1, PT_R29
lwi r28, r1, PT_R28
lwi r27, r1, PT_R27
lwi r26, r1, PT_R26
lwi r25, r1, PT_R25
lwi r24, r1, PT_R24
lwi r23, r1, PT_R23
lwi r22, r1, PT_R22
lwi r21, r1, PT_R21
lwi r20, r1, PT_R20
lwi r19, r1, PT_R19
lwi r18, r1, PT_R18
lwi r17, r1, PT_R17
lwi r16, r1, PT_R16
lwi r15, r1, PT_R15
lwi r14, r1, PT_PC
lwi r13, r1, PT_R13
lwi r12, r1, PT_R12
lwi r11, r1, PT_R11
lwi r10, r1, PT_R10
lwi r9, r1, PT_R9
lwi r8, r1, PT_R8
lwi r7, r1, PT_R7
lwi r6, r1, PT_R6
lwi r5, r1, PT_R5
lwi r4, r1, PT_R4
lwi r3, r1, PT_R3
lwi r2, r1, PT_R2
lwi r1, r1, PT_R1
rtid r14, 0
nop
ENTRY(_reset)
brai 0;
ENTRY(_user_exception)
swi r1, r0, PER_CPU(ENTRY_SP) /* save the current sp */
swi r11, r0, PER_CPU(R11_SAVE) /* temporarily save r11 */
lwi r11, r0, PER_CPU(KM) /* load mode indicator */
beqid r11, 1f /* Already in kernel mode? */
nop
brid 2f /* jump over */
addik r1, r1, (-PT_SIZE) /* Room for pt_regs (delay slot) */
1: /* Switch to kernel stack */
lwi r1, r0, PER_CPU(CURRENT_SAVE) /* get the saved current */
lwi r1, r1, TS_THREAD_INFO /* get the thread info */
/* calculate kernel stack pointer */
addik r1, r1, THREAD_SIZE - PT_SIZE
2:
swi r11, r1, PT_MODE /* store the mode */
lwi r11, r0, PER_CPU(R11_SAVE) /* reload r11 */
/* save them on stack */
swi r2, r1, PT_R2
swi r3, r1, PT_R3 /* r3: _always_ in clobber list; see unistd.h */
swi r4, r1, PT_R4 /* r4: _always_ in clobber list; see unistd.h */
swi r5, r1, PT_R5
swi r6, r1, PT_R6
swi r7, r1, PT_R7
swi r8, r1, PT_R8
swi r9, r1, PT_R9
swi r10, r1, PT_R10
swi r11, r1, PT_R11
/* r12: _always_ in clobber list; see unistd.h */
swi r12, r1, PT_R12
swi r13, r1, PT_R13
/* r14: _always_ in clobber list; see unistd.h */
swi r14, r1, PT_R14
/* but we want to return to the next inst. */
addik r14, r14, 0x4
swi r14, r1, PT_PC /* increment by 4 and store in pc */
swi r15, r1, PT_R15
swi r16, r1, PT_R16
swi r17, r1, PT_R17
swi r18, r1, PT_R18
swi r19, r1, PT_R19
swi r20, r1, PT_R20
swi r21, r1, PT_R21
swi r22, r1, PT_R22
swi r23, r1, PT_R23
swi r24, r1, PT_R24
swi r25, r1, PT_R25
swi r26, r1, PT_R26
swi r27, r1, PT_R27
swi r28, r1, PT_R28
swi r29, r1, PT_R29
swi r30, r1, PT_R30
swi r31, r1, PT_R31
disable_irq
nop /* make sure IE bit is in effect */
clear_bip /* once IE is in effect it is safe to clear BIP */
nop
/* special purpose registers */
mfs r11, rmsr
swi r11, r1, PT_MSR
mfs r11, rear
swi r11, r1, PT_EAR
mfs r11, resr
swi r11, r1, PT_ESR
mfs r11, rfsr
swi r11, r1, PT_FSR
/* reload original stack pointer and save it */
lwi r11, r0, PER_CPU(ENTRY_SP)
swi r11, r1, PT_R1
/* update mode indicator we are in kernel mode */
addik r11, r0, 1
swi r11, r0, PER_CPU(KM)
/* restore r31 */
lwi r31, r0, PER_CPU(CURRENT_SAVE)
/* re-enable interrupts now we are in kernel mode */
enable_irq
/* See if the system call number is valid. */
addi r11, r12, -__NR_syscalls
bgei r11, 1f /* return to user if not valid */
/* Figure out which function to use for this system call. */
/* Note Microblaze barrel shift is optional, so don't rely on it */
add r12, r12, r12 /* convert num -> ptr */
addik r30, r0, 1 /* restarts allowed */
add r12, r12, r12
lwi r12, r12, sys_call_table /* Get function pointer */
addik r15, r0, ret_to_user-8 /* set return address */
bra r12 /* Make the system call. */
bri 0 /* won't reach here */
1:
brid ret_to_user /* jump to syscall epilogue */
addi r3, r0, -ENOSYS /* set errno in delay slot */
/*
* Debug traps are like a system call, but entered via brki r14, 0x60
* All we need to do is send the SIGTRAP signal to current, ptrace and
* do_notify_resume will handle the rest
*/
ENTRY(_debug_exception)
swi r1, r0, PER_CPU(ENTRY_SP) /* save the current sp */
lwi r1, r0, PER_CPU(CURRENT_SAVE) /* get the saved current */
lwi r1, r1, TS_THREAD_INFO /* get the thread info */
addik r1, r1, THREAD_SIZE - PT_SIZE /* get the kernel stack */
swi r11, r0, PER_CPU(R11_SAVE) /* temporarily save r11 */
lwi r11, r0, PER_CPU(KM) /* load mode indicator */
//save_context:
swi r11, r1, PT_MODE /* store the mode */
lwi r11, r0, PER_CPU(R11_SAVE) /* reload r11 */
/* save them on stack */
swi r2, r1, PT_R2
swi r3, r1, PT_R3 /* r3: _always_ in clobber list; see unistd.h */
swi r4, r1, PT_R4 /* r4: _always_ in clobber list; see unistd.h */
swi r5, r1, PT_R5
swi r6, r1, PT_R6
swi r7, r1, PT_R7
swi r8, r1, PT_R8
swi r9, r1, PT_R9
swi r10, r1, PT_R10
swi r11, r1, PT_R11
/* r12: _always_ in clobber list; see unistd.h */
swi r12, r1, PT_R12
swi r13, r1, PT_R13
/* r14: _always_ in clobber list; see unistd.h */
swi r14, r1, PT_R14
swi r14, r1, PT_PC /* Will return to interrupted instruction */
swi r15, r1, PT_R15
swi r16, r1, PT_R16
swi r17, r1, PT_R17
swi r18, r1, PT_R18
swi r19, r1, PT_R19
swi r20, r1, PT_R20
swi r21, r1, PT_R21
swi r22, r1, PT_R22
swi r23, r1, PT_R23
swi r24, r1, PT_R24
swi r25, r1, PT_R25
swi r26, r1, PT_R26
swi r27, r1, PT_R27
swi r28, r1, PT_R28
swi r29, r1, PT_R29
swi r30, r1, PT_R30
swi r31, r1, PT_R31
disable_irq
nop /* make sure IE bit is in effect */
clear_bip /* once IE is in effect it is safe to clear BIP */
nop
/* special purpose registers */
mfs r11, rmsr
swi r11, r1, PT_MSR
mfs r11, rear
swi r11, r1, PT_EAR
mfs r11, resr
swi r11, r1, PT_ESR
mfs r11, rfsr
swi r11, r1, PT_FSR
/* reload original stack pointer and save it */
lwi r11, r0, PER_CPU(ENTRY_SP)
swi r11, r1, PT_R1
/* update mode indicator we are in kernel mode */
addik r11, r0, 1
swi r11, r0, PER_CPU(KM)
/* restore r31 */
lwi r31, r0, PER_CPU(CURRENT_SAVE)
/* re-enable interrupts now we are in kernel mode */
enable_irq
addi r5, r0, SIGTRAP /* sending the trap signal */
add r6, r0, r31 /* to current */
bralid r15, send_sig
add r7, r0, r0 /* 3rd param zero */
addik r30, r0, 1 /* restarts allowed ??? */
/* Restore r3/r4 to work around how ret_to_user works */
lwi r3, r1, PT_R3
lwi r4, r1, PT_R4
bri ret_to_user
ENTRY(_break)
bri 0
/* struct task_struct *_switch_to(struct thread_info *prev,
struct thread_info *next); */
ENTRY(_switch_to)
/* prepare return value */
addk r3, r0, r31
/* save registers in cpu_context */
/* use r11 and r12, volatile registers, as temp register */
addik r11, r5, TI_CPU_CONTEXT
swi r1, r11, CC_R1
swi r2, r11, CC_R2
/* skip volatile registers.
* they are saved on stack when we jumped to _switch_to() */
/* dedicated registers */
swi r13, r11, CC_R13
swi r14, r11, CC_R14
swi r15, r11, CC_R15
swi r16, r11, CC_R16
swi r17, r11, CC_R17
swi r18, r11, CC_R18
/* save non-volatile registers */
swi r19, r11, CC_R19
swi r20, r11, CC_R20
swi r21, r11, CC_R21
swi r22, r11, CC_R22
swi r23, r11, CC_R23
swi r24, r11, CC_R24
swi r25, r11, CC_R25
swi r26, r11, CC_R26
swi r27, r11, CC_R27
swi r28, r11, CC_R28
swi r29, r11, CC_R29
swi r30, r11, CC_R30
/* special purpose registers */
mfs r12, rmsr
swi r12, r11, CC_MSR
mfs r12, rear
swi r12, r11, CC_EAR
mfs r12, resr
swi r12, r11, CC_ESR
mfs r12, rfsr
swi r12, r11, CC_FSR
/* update r31, the current */
lwi r31, r6, TI_TASK
swi r31, r0, PER_CPU(CURRENT_SAVE)
/* get new process' cpu context and restore */
addik r11, r6, TI_CPU_CONTEXT
/* special purpose registers */
lwi r12, r11, CC_FSR
mts rfsr, r12
lwi r12, r11, CC_ESR
mts resr, r12
lwi r12, r11, CC_EAR
mts rear, r12
lwi r12, r11, CC_MSR
mts rmsr, r12
/* non-volatile registers */
lwi r30, r11, CC_R30
lwi r29, r11, CC_R29
lwi r28, r11, CC_R28
lwi r27, r11, CC_R27
lwi r26, r11, CC_R26
lwi r25, r11, CC_R25
lwi r24, r11, CC_R24
lwi r23, r11, CC_R23
lwi r22, r11, CC_R22
lwi r21, r11, CC_R21
lwi r20, r11, CC_R20
lwi r19, r11, CC_R19
/* dedicated registers */
lwi r18, r11, CC_R18
lwi r17, r11, CC_R17
lwi r16, r11, CC_R16
lwi r15, r11, CC_R15
lwi r14, r11, CC_R14
lwi r13, r11, CC_R13
/* skip volatile registers */
lwi r2, r11, CC_R2
lwi r1, r11, CC_R1
rtsd r15, 8
nop
ENTRY(ret_from_fork)
addk r5, r0, r3
brlid r15, schedule_tail
nop
swi r31, r1, PT_R31 /* save r31 in user context. */
/* will soon be restored to r31 in ret_to_user */
addk r3, r0, r0
brid ret_to_user
nop
ENTRY(ret_from_kernel_thread)
brlid r15, schedule_tail
addk r5, r0, r3
brald r15, r20
addk r5, r0, r19
brid ret_to_user
addk r3, r0, r0
work_pending:
lwi r11, r1, PT_MODE
bneid r11, 2f
3:
enable_irq
andi r11, r19, _TIF_NEED_RESCHED
beqi r11, 1f
bralid r15, schedule
nop
bri 4f
1: andi r11, r19, _TIF_SIGPENDING | _TIF_NOTIFY_RESUME
beqi r11, no_work_pending
addk r5, r30, r0
bralid r15, do_notify_resume
addik r6, r0, 1
addk r30, r0, r0 /* no restarts from now on */
4:
disable_irq
lwi r6, r31, TS_THREAD_INFO /* get thread info */
lwi r19, r6, TI_FLAGS /* get flags in thread info */
bri 3b
ENTRY(ret_to_user)
disable_irq
swi r4, r1, PT_R4 /* return val */
swi r3, r1, PT_R3 /* return val */
lwi r6, r31, TS_THREAD_INFO /* get thread info */
lwi r19, r6, TI_FLAGS /* get flags in thread info */
bnei r19, work_pending /* do an extra work if any bits are set */
no_work_pending:
disable_irq
2:
/* save r31 */
swi r31, r0, PER_CPU(CURRENT_SAVE)
/* save mode indicator */
lwi r18, r1, PT_MODE
swi r18, r0, PER_CPU(KM)
//restore_context:
/* special purpose registers */
lwi r18, r1, PT_FSR
mts rfsr, r18
lwi r18, r1, PT_ESR
mts resr, r18
lwi r18, r1, PT_EAR
mts rear, r18
lwi r18, r1, PT_MSR
mts rmsr, r18
lwi r31, r1, PT_R31
lwi r30, r1, PT_R30
lwi r29, r1, PT_R29
lwi r28, r1, PT_R28
lwi r27, r1, PT_R27
lwi r26, r1, PT_R26
lwi r25, r1, PT_R25
lwi r24, r1, PT_R24
lwi r23, r1, PT_R23
lwi r22, r1, PT_R22
lwi r21, r1, PT_R21
lwi r20, r1, PT_R20
lwi r19, r1, PT_R19
lwi r18, r1, PT_R18
lwi r17, r1, PT_R17
lwi r16, r1, PT_R16
lwi r15, r1, PT_R15
lwi r14, r1, PT_PC
lwi r13, r1, PT_R13
lwi r12, r1, PT_R12
lwi r11, r1, PT_R11
lwi r10, r1, PT_R10
lwi r9, r1, PT_R9
lwi r8, r1, PT_R8
lwi r7, r1, PT_R7
lwi r6, r1, PT_R6
lwi r5, r1, PT_R5
lwi r4, r1, PT_R4 /* return val */
lwi r3, r1, PT_R3 /* return val */
lwi r2, r1, PT_R2
lwi r1, r1, PT_R1
rtid r14, 0
nop
sys_rt_sigreturn_wrapper:
addk r30, r0, r0 /* no restarts for this one */
brid sys_rt_sigreturn
addk r5, r1, r0
/* Interrupt vector table */
.section .init.ivt, "ax"
.org 0x0
brai _reset
brai _user_exception
brai _interrupt
brai _break
brai _hw_exception_handler
.org 0x60
brai _debug_exception
.section .rodata,"a"
#include "syscall_table.S"
syscall_table_size=(.-sys_call_table)
type_SYSCALL:
.ascii "SYSCALL\0"
type_IRQ:
.ascii "IRQ\0"
type_IRQ_PREEMPT:
.ascii "IRQ (PREEMPTED)\0"
type_SYSCALL_PREEMPT:
.ascii " SYSCALL (PREEMPTED)\0"
/*
* Trap decoding for stack unwinder
* Tuples are (start addr, end addr, string)
* If return address lies on [start addr, end addr],
* unwinder displays 'string'
*/
.align 4
.global microblaze_trap_handlers
microblaze_trap_handlers:
/* Exact matches come first */
.word ret_to_user ; .word ret_to_user ; .word type_SYSCALL
.word ret_from_intr; .word ret_from_intr ; .word type_IRQ
/* Fuzzy matches go here */
.word ret_from_intr; .word no_intr_resched; .word type_IRQ_PREEMPT
.word work_pending ; .word no_work_pending; .word type_SYSCALL_PREEMPT
/* End of table */
.word 0 ; .word 0 ; .word 0
|
AirFortressIlikara/LS2K0300-linux-4.19
| 34,475
|
arch/microblaze/kernel/hw_exception_handler.S
|
/*
* Exception handling for Microblaze
*
* Rewriten interrupt handling
*
* Copyright (C) 2008-2009 Michal Simek <monstr@monstr.eu>
* Copyright (C) 2008-2009 PetaLogix
*
* uClinux customisation (C) 2005 John Williams
*
* MMU code derived from arch/ppc/kernel/head_4xx.S:
* Copyright (C) 1995-1996 Gary Thomas <gdt@linuxppc.org>
* Initial PowerPC version.
* Copyright (C) 1996 Cort Dougan <cort@cs.nmt.edu>
* Rewritten for PReP
* Copyright (C) 1996 Paul Mackerras <paulus@cs.anu.edu.au>
* Low-level exception handers, MMU support, and rewrite.
* Copyright (C) 1997 Dan Malek <dmalek@jlc.net>
* PowerPC 8xx modifications.
* Copyright (C) 1998-1999 TiVo, Inc.
* PowerPC 403GCX modifications.
* Copyright (C) 1999 Grant Erickson <grant@lcse.umn.edu>
* PowerPC 403GCX/405GP modifications.
* Copyright 2000 MontaVista Software Inc.
* PPC405 modifications
* PowerPC 403GCX/405GP modifications.
* Author: MontaVista Software, Inc.
* frank_rowand@mvista.com or source@mvista.com
* debbie_chu@mvista.com
*
* Original code
* Copyright (C) 2004 Xilinx, Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
* by the Free Software Foundation.
*/
/*
* Here are the handlers which don't require enabling translation
* and calling other kernel code thus we can keep their design very simple
* and do all processing in real mode. All what they need is a valid current
* (that is an issue for the CONFIG_REGISTER_TASK_PTR case)
* This handlers use r3,r4,r5,r6 and optionally r[current] to work therefore
* these registers are saved/restored
* The handlers which require translation are in entry.S --KAA
*
* Microblaze HW Exception Handler
* - Non self-modifying exception handler for the following exception conditions
* - Unalignment
* - Instruction bus error
* - Data bus error
* - Illegal instruction opcode
* - Divide-by-zero
*
* - Privileged instruction exception (MMU)
* - Data storage exception (MMU)
* - Instruction storage exception (MMU)
* - Data TLB miss exception (MMU)
* - Instruction TLB miss exception (MMU)
*
* Note we disable interrupts during exception handling, otherwise we will
* possibly get multiple re-entrancy if interrupt handles themselves cause
* exceptions. JW
*/
#include <asm/exceptions.h>
#include <asm/unistd.h>
#include <asm/page.h>
#include <asm/entry.h>
#include <asm/current.h>
#include <linux/linkage.h>
#include <asm/mmu.h>
#include <asm/pgtable.h>
#include <asm/signal.h>
#include <asm/registers.h>
#include <asm/asm-offsets.h>
#undef DEBUG
/* Helpful Macros */
#define NUM_TO_REG(num) r ## num
#ifdef CONFIG_MMU
#define RESTORE_STATE \
lwi r5, r1, 0; \
mts rmsr, r5; \
nop; \
lwi r3, r1, PT_R3; \
lwi r4, r1, PT_R4; \
lwi r5, r1, PT_R5; \
lwi r6, r1, PT_R6; \
lwi r11, r1, PT_R11; \
lwi r31, r1, PT_R31; \
lwi r1, r1, PT_R1;
#endif /* CONFIG_MMU */
#define LWREG_NOP \
bri ex_handler_unhandled; \
nop;
#define SWREG_NOP \
bri ex_handler_unhandled; \
nop;
/* FIXME this is weird - for noMMU kernel is not possible to use brid
* instruction which can shorten executed time
*/
/* r3 is the source */
#define R3_TO_LWREG_V(regnum) \
swi r3, r1, 4 * regnum; \
bri ex_handler_done;
/* r3 is the source */
#define R3_TO_LWREG(regnum) \
or NUM_TO_REG (regnum), r0, r3; \
bri ex_handler_done;
/* r3 is the target */
#define SWREG_TO_R3_V(regnum) \
lwi r3, r1, 4 * regnum; \
bri ex_sw_tail;
/* r3 is the target */
#define SWREG_TO_R3(regnum) \
or r3, r0, NUM_TO_REG (regnum); \
bri ex_sw_tail;
#ifdef CONFIG_MMU
#define R3_TO_LWREG_VM_V(regnum) \
brid ex_lw_end_vm; \
swi r3, r7, 4 * regnum;
#define R3_TO_LWREG_VM(regnum) \
brid ex_lw_end_vm; \
or NUM_TO_REG (regnum), r0, r3;
#define SWREG_TO_R3_VM_V(regnum) \
brid ex_sw_tail_vm; \
lwi r3, r7, 4 * regnum;
#define SWREG_TO_R3_VM(regnum) \
brid ex_sw_tail_vm; \
or r3, r0, NUM_TO_REG (regnum);
/* Shift right instruction depending on available configuration */
#if CONFIG_XILINX_MICROBLAZE0_USE_BARREL == 0
/* Only the used shift constants defined here - add more if needed */
#define BSRLI2(rD, rA) \
srl rD, rA; /* << 1 */ \
srl rD, rD; /* << 2 */
#define BSRLI4(rD, rA) \
BSRLI2(rD, rA); \
BSRLI2(rD, rD)
#define BSRLI10(rD, rA) \
srl rD, rA; /* << 1 */ \
srl rD, rD; /* << 2 */ \
srl rD, rD; /* << 3 */ \
srl rD, rD; /* << 4 */ \
srl rD, rD; /* << 5 */ \
srl rD, rD; /* << 6 */ \
srl rD, rD; /* << 7 */ \
srl rD, rD; /* << 8 */ \
srl rD, rD; /* << 9 */ \
srl rD, rD /* << 10 */
#define BSRLI20(rD, rA) \
BSRLI10(rD, rA); \
BSRLI10(rD, rD)
.macro bsrli, rD, rA, IMM
.if (\IMM) == 2
BSRLI2(\rD, \rA)
.elseif (\IMM) == 10
BSRLI10(\rD, \rA)
.elseif (\IMM) == 12
BSRLI2(\rD, \rA)
BSRLI10(\rD, \rD)
.elseif (\IMM) == 14
BSRLI4(\rD, \rA)
BSRLI10(\rD, \rD)
.elseif (\IMM) == 20
BSRLI20(\rD, \rA)
.elseif (\IMM) == 24
BSRLI4(\rD, \rA)
BSRLI20(\rD, \rD)
.elseif (\IMM) == 28
BSRLI4(\rD, \rA)
BSRLI4(\rD, \rD)
BSRLI20(\rD, \rD)
.else
.error "BSRLI shift macros \IMM"
.endif
.endm
#endif
#endif /* CONFIG_MMU */
.extern other_exception_handler /* Defined in exception.c */
/*
* hw_exception_handler - Handler for exceptions
*
* Exception handler notes:
* - Handles all exceptions
* - Does not handle unaligned exceptions during load into r17, r1, r0.
* - Does not handle unaligned exceptions during store from r17 (cannot be
* done) and r1 (slows down common case)
*
* Relevant register structures
*
* EAR - |----|----|----|----|----|----|----|----|
* - < ## 32 bit faulting address ## >
*
* ESR - |----|----|----|----|----| - | - |-----|-----|
* - W S REG EXC
*
*
* STACK FRAME STRUCTURE (for CONFIG_MMU=n)
* ----------------------------------------
*
* +-------------+ + 0
* | MSR |
* +-------------+ + 4
* | r1 |
* | . |
* | . |
* | . |
* | . |
* | r18 |
* +-------------+ + 76
* | . |
* | . |
*
* MMU kernel uses the same 'pt_pool_space' pointed space
* which is used for storing register values - noMMu style was, that values were
* stored in stack but in case of failure you lost information about register.
* Currently you can see register value in memory in specific place.
* In compare to with previous solution the speed should be the same.
*
* MMU exception handler has different handling compare to no MMU kernel.
* Exception handler use jump table for directing of what happen. For MMU kernel
* is this approach better because MMU relate exception are handled by asm code
* in this file. In compare to with MMU expect of unaligned exception
* is everything handled by C code.
*/
/*
* every of these handlers is entered having R3/4/5/6/11/current saved on stack
* and clobbered so care should be taken to restore them if someone is going to
* return from exception
*/
/* wrappers to restore state before coming to entry.S */
#ifdef CONFIG_MMU
.section .data
.align 4
pt_pool_space:
.space PT_SIZE
#ifdef DEBUG
/* Create space for exception counting. */
.section .data
.global exception_debug_table
.align 4
exception_debug_table:
/* Look at exception vector table. There is 32 exceptions * word size */
.space (32 * 4)
#endif /* DEBUG */
.section .rodata
.align 4
_MB_HW_ExceptionVectorTable:
/* 0 - Undefined */
.long TOPHYS(ex_handler_unhandled)
/* 1 - Unaligned data access exception */
.long TOPHYS(handle_unaligned_ex)
/* 2 - Illegal op-code exception */
.long TOPHYS(full_exception_trapw)
/* 3 - Instruction bus error exception */
.long TOPHYS(full_exception_trapw)
/* 4 - Data bus error exception */
.long TOPHYS(full_exception_trapw)
/* 5 - Divide by zero exception */
.long TOPHYS(full_exception_trapw)
/* 6 - Floating point unit exception */
.long TOPHYS(full_exception_trapw)
/* 7 - Privileged instruction exception */
.long TOPHYS(full_exception_trapw)
/* 8 - 15 - Undefined */
.long TOPHYS(ex_handler_unhandled)
.long TOPHYS(ex_handler_unhandled)
.long TOPHYS(ex_handler_unhandled)
.long TOPHYS(ex_handler_unhandled)
.long TOPHYS(ex_handler_unhandled)
.long TOPHYS(ex_handler_unhandled)
.long TOPHYS(ex_handler_unhandled)
.long TOPHYS(ex_handler_unhandled)
/* 16 - Data storage exception */
.long TOPHYS(handle_data_storage_exception)
/* 17 - Instruction storage exception */
.long TOPHYS(handle_instruction_storage_exception)
/* 18 - Data TLB miss exception */
.long TOPHYS(handle_data_tlb_miss_exception)
/* 19 - Instruction TLB miss exception */
.long TOPHYS(handle_instruction_tlb_miss_exception)
/* 20 - 31 - Undefined */
.long TOPHYS(ex_handler_unhandled)
.long TOPHYS(ex_handler_unhandled)
.long TOPHYS(ex_handler_unhandled)
.long TOPHYS(ex_handler_unhandled)
.long TOPHYS(ex_handler_unhandled)
.long TOPHYS(ex_handler_unhandled)
.long TOPHYS(ex_handler_unhandled)
.long TOPHYS(ex_handler_unhandled)
.long TOPHYS(ex_handler_unhandled)
.long TOPHYS(ex_handler_unhandled)
.long TOPHYS(ex_handler_unhandled)
.long TOPHYS(ex_handler_unhandled)
#endif
.global _hw_exception_handler
.section .text
.align 4
.ent _hw_exception_handler
_hw_exception_handler:
#ifndef CONFIG_MMU
addik r1, r1, -(EX_HANDLER_STACK_SIZ); /* Create stack frame */
#else
swi r1, r0, TOPHYS(pt_pool_space + PT_R1); /* GET_SP */
/* Save date to kernel memory. Here is the problem
* when you came from user space */
ori r1, r0, TOPHYS(pt_pool_space);
#endif
swi r3, r1, PT_R3
swi r4, r1, PT_R4
swi r5, r1, PT_R5
swi r6, r1, PT_R6
#ifdef CONFIG_MMU
swi r11, r1, PT_R11
swi r31, r1, PT_R31
lwi r31, r0, TOPHYS(PER_CPU(CURRENT_SAVE)) /* get saved current */
#endif
mfs r5, rmsr;
nop
swi r5, r1, 0;
mfs r4, resr
nop
mfs r3, rear;
nop
#ifndef CONFIG_MMU
andi r5, r4, 0x1000; /* Check ESR[DS] */
beqi r5, not_in_delay_slot; /* Branch if ESR[DS] not set */
mfs r17, rbtr; /* ESR[DS] set - return address in BTR */
nop
not_in_delay_slot:
swi r17, r1, PT_R17
#endif
andi r5, r4, 0x1F; /* Extract ESR[EXC] */
#ifdef CONFIG_MMU
/* Calculate exception vector offset = r5 << 2 */
addk r6, r5, r5; /* << 1 */
addk r6, r6, r6; /* << 2 */
#ifdef DEBUG
/* counting which exception happen */
lwi r5, r0, TOPHYS(exception_debug_table)
addi r5, r5, 1
swi r5, r0, TOPHYS(exception_debug_table)
lwi r5, r6, TOPHYS(exception_debug_table)
addi r5, r5, 1
swi r5, r6, TOPHYS(exception_debug_table)
#endif
/* end */
/* Load the HW Exception vector */
lwi r6, r6, TOPHYS(_MB_HW_ExceptionVectorTable)
bra r6
full_exception_trapw:
RESTORE_STATE
bri full_exception_trap
#else
/* Exceptions enabled here. This will allow nested exceptions */
mfs r6, rmsr;
nop
swi r6, r1, 0; /* RMSR_OFFSET */
ori r6, r6, 0x100; /* Turn ON the EE bit */
andi r6, r6, ~2; /* Disable interrupts */
mts rmsr, r6;
nop
xori r6, r5, 1; /* 00001 = Unaligned Exception */
/* Jump to unalignment exception handler */
beqi r6, handle_unaligned_ex;
handle_other_ex: /* Handle Other exceptions here */
/* Save other volatiles before we make procedure calls below */
swi r7, r1, PT_R7
swi r8, r1, PT_R8
swi r9, r1, PT_R9
swi r10, r1, PT_R10
swi r11, r1, PT_R11
swi r12, r1, PT_R12
swi r14, r1, PT_R14
swi r15, r1, PT_R15
swi r18, r1, PT_R18
or r5, r1, r0
andi r6, r4, 0x1F; /* Load ESR[EC] */
lwi r7, r0, PER_CPU(KM) /* MS: saving current kernel mode to regs */
swi r7, r1, PT_MODE
mfs r7, rfsr
nop
addk r8, r17, r0; /* Load exception address */
bralid r15, full_exception; /* Branch to the handler */
nop;
mts rfsr, r0; /* Clear sticky fsr */
nop
/*
* Trigger execution of the signal handler by enabling
* interrupts and calling an invalid syscall.
*/
mfs r5, rmsr;
nop
ori r5, r5, 2;
mts rmsr, r5; /* enable interrupt */
nop
addi r12, r0, __NR_syscalls;
brki r14, 0x08;
mfs r5, rmsr; /* disable interrupt */
nop
andi r5, r5, ~2;
mts rmsr, r5;
nop
lwi r7, r1, PT_R7
lwi r8, r1, PT_R8
lwi r9, r1, PT_R9
lwi r10, r1, PT_R10
lwi r11, r1, PT_R11
lwi r12, r1, PT_R12
lwi r14, r1, PT_R14
lwi r15, r1, PT_R15
lwi r18, r1, PT_R18
bri ex_handler_done; /* Complete exception handling */
#endif
/* 0x01 - Unaligned data access exception
* This occurs when a word access is not aligned on a word boundary,
* or when a 16-bit access is not aligned on a 16-bit boundary.
* This handler perform the access, and returns, except for MMU when
* the unaligned address is last on a 4k page or the physical address is
* not found in the page table, in which case unaligned_data_trap is called.
*/
handle_unaligned_ex:
/* Working registers already saved: R3, R4, R5, R6
* R4 = ESR
* R3 = EAR
*/
#ifdef CONFIG_MMU
andi r6, r4, 0x1000 /* Check ESR[DS] */
beqi r6, _no_delayslot /* Branch if ESR[DS] not set */
mfs r17, rbtr; /* ESR[DS] set - return address in BTR */
nop
_no_delayslot:
/* jump to high level unaligned handler */
RESTORE_STATE;
bri unaligned_data_trap
#endif
andi r6, r4, 0x3E0; /* Mask and extract the register operand */
srl r6, r6; /* r6 >> 5 */
srl r6, r6;
srl r6, r6;
srl r6, r6;
srl r6, r6;
/* Store the register operand in a temporary location */
sbi r6, r0, TOPHYS(ex_reg_op);
andi r6, r4, 0x400; /* Extract ESR[S] */
bnei r6, ex_sw;
ex_lw:
andi r6, r4, 0x800; /* Extract ESR[W] */
beqi r6, ex_lhw;
lbui r5, r3, 0; /* Exception address in r3 */
/* Load a word, byte-by-byte from destination address
and save it in tmp space */
sbi r5, r0, TOPHYS(ex_tmp_data_loc_0);
lbui r5, r3, 1;
sbi r5, r0, TOPHYS(ex_tmp_data_loc_1);
lbui r5, r3, 2;
sbi r5, r0, TOPHYS(ex_tmp_data_loc_2);
lbui r5, r3, 3;
sbi r5, r0, TOPHYS(ex_tmp_data_loc_3);
/* Get the destination register value into r4 */
lwi r4, r0, TOPHYS(ex_tmp_data_loc_0);
bri ex_lw_tail;
ex_lhw:
lbui r5, r3, 0; /* Exception address in r3 */
/* Load a half-word, byte-by-byte from destination
address and save it in tmp space */
sbi r5, r0, TOPHYS(ex_tmp_data_loc_0);
lbui r5, r3, 1;
sbi r5, r0, TOPHYS(ex_tmp_data_loc_1);
/* Get the destination register value into r4 */
lhui r4, r0, TOPHYS(ex_tmp_data_loc_0);
ex_lw_tail:
/* Get the destination register number into r5 */
lbui r5, r0, TOPHYS(ex_reg_op);
/* Form load_word jump table offset (lw_table + (8 * regnum)) */
addik r6, r0, TOPHYS(lw_table);
addk r5, r5, r5;
addk r5, r5, r5;
addk r5, r5, r5;
addk r5, r5, r6;
bra r5;
ex_lw_end: /* Exception handling of load word, ends */
ex_sw:
/* Get the destination register number into r5 */
lbui r5, r0, TOPHYS(ex_reg_op);
/* Form store_word jump table offset (sw_table + (8 * regnum)) */
addik r6, r0, TOPHYS(sw_table);
add r5, r5, r5;
add r5, r5, r5;
add r5, r5, r5;
add r5, r5, r6;
bra r5;
ex_sw_tail:
mfs r6, resr;
nop
andi r6, r6, 0x800; /* Extract ESR[W] */
beqi r6, ex_shw;
/* Get the word - delay slot */
swi r4, r0, TOPHYS(ex_tmp_data_loc_0);
/* Store the word, byte-by-byte into destination address */
lbui r4, r0, TOPHYS(ex_tmp_data_loc_0);
sbi r4, r3, 0;
lbui r4, r0, TOPHYS(ex_tmp_data_loc_1);
sbi r4, r3, 1;
lbui r4, r0, TOPHYS(ex_tmp_data_loc_2);
sbi r4, r3, 2;
lbui r4, r0, TOPHYS(ex_tmp_data_loc_3);
sbi r4, r3, 3;
bri ex_handler_done;
ex_shw:
/* Store the lower half-word, byte-by-byte into destination address */
swi r4, r0, TOPHYS(ex_tmp_data_loc_0);
lbui r4, r0, TOPHYS(ex_tmp_data_loc_2);
sbi r4, r3, 0;
lbui r4, r0, TOPHYS(ex_tmp_data_loc_3);
sbi r4, r3, 1;
ex_sw_end: /* Exception handling of store word, ends. */
ex_handler_done:
#ifndef CONFIG_MMU
lwi r5, r1, 0 /* RMSR */
mts rmsr, r5
nop
lwi r3, r1, PT_R3
lwi r4, r1, PT_R4
lwi r5, r1, PT_R5
lwi r6, r1, PT_R6
lwi r17, r1, PT_R17
rted r17, 0
addik r1, r1, (EX_HANDLER_STACK_SIZ); /* Restore stack frame */
#else
RESTORE_STATE;
rted r17, 0
nop
#endif
#ifdef CONFIG_MMU
/* Exception vector entry code. This code runs with address translation
* turned off (i.e. using physical addresses). */
/* Exception vectors. */
/* 0x10 - Data Storage Exception
* This happens for just a few reasons. U0 set (but we don't do that),
* or zone protection fault (user violation, write to protected page).
* If this is just an update of modified status, we do that quickly
* and exit. Otherwise, we call heavyweight functions to do the work.
*/
handle_data_storage_exception:
/* Working registers already saved: R3, R4, R5, R6
* R3 = ESR
*/
mfs r11, rpid
nop
/* If we are faulting a kernel address, we have to use the
* kernel page tables.
*/
ori r5, r0, CONFIG_KERNEL_START
cmpu r5, r3, r5
bgti r5, ex3
/* First, check if it was a zone fault (which means a user
* tried to access a kernel or read-protected page - always
* a SEGV). All other faults here must be stores, so no
* need to check ESR_S as well. */
andi r4, r4, ESR_DIZ /* ESR_Z - zone protection */
bnei r4, ex2
ori r4, r0, swapper_pg_dir
mts rpid, r0 /* TLB will have 0 TID */
nop
bri ex4
/* Get the PGD for the current thread. */
ex3:
/* First, check if it was a zone fault (which means a user
* tried to access a kernel or read-protected page - always
* a SEGV). All other faults here must be stores, so no
* need to check ESR_S as well. */
andi r4, r4, ESR_DIZ /* ESR_Z */
bnei r4, ex2
/* get current task address */
addi r4 ,CURRENT_TASK, TOPHYS(0);
lwi r4, r4, TASK_THREAD+PGDIR
ex4:
tophys(r4,r4)
/* Create L1 (pgdir/pmd) address */
bsrli r5, r3, PGDIR_SHIFT - 2
andi r5, r5, PAGE_SIZE - 4
/* Assume pgdir aligned on 4K boundary, no need for "andi r4,r4,0xfffff003" */
or r4, r4, r5
lwi r4, r4, 0 /* Get L1 entry */
andi r5, r4, PAGE_MASK /* Extract L2 (pte) base address */
beqi r5, ex2 /* Bail if no table */
tophys(r5,r5)
bsrli r6, r3, PTE_SHIFT /* Compute PTE address */
andi r6, r6, PAGE_SIZE - 4
or r5, r5, r6
lwi r4, r5, 0 /* Get Linux PTE */
andi r6, r4, _PAGE_RW /* Is it writeable? */
beqi r6, ex2 /* Bail if not */
/* Update 'changed' */
ori r4, r4, _PAGE_DIRTY|_PAGE_ACCESSED|_PAGE_HWWRITE
swi r4, r5, 0 /* Update Linux page table */
/* Most of the Linux PTE is ready to load into the TLB LO.
* We set ZSEL, where only the LS-bit determines user access.
* We set execute, because we don't have the granularity to
* properly set this at the page level (Linux problem).
* If shared is set, we cause a zero PID->TID load.
* Many of these bits are software only. Bits we don't set
* here we (properly should) assume have the appropriate value.
*/
/* Ignore memory coherent, just LSB on ZSEL is used + EX/WR */
andi r4, r4, PAGE_MASK | TLB_EX | TLB_WR | \
TLB_ZSEL(1) | TLB_ATTR_MASK
ori r4, r4, _PAGE_HWEXEC /* make it executable */
/* find the TLB index that caused the fault. It has to be here*/
mts rtlbsx, r3
nop
mfs r5, rtlbx /* DEBUG: TBD */
nop
mts rtlblo, r4 /* Load TLB LO */
nop
/* Will sync shadow TLBs */
/* Done...restore registers and get out of here. */
mts rpid, r11
nop
bri 4
RESTORE_STATE;
rted r17, 0
nop
ex2:
/* The bailout. Restore registers to pre-exception conditions
* and call the heavyweights to help us out. */
mts rpid, r11
nop
bri 4
RESTORE_STATE;
bri page_fault_data_trap
/* 0x11 - Instruction Storage Exception
* This is caused by a fetch from non-execute or guarded pages. */
handle_instruction_storage_exception:
/* Working registers already saved: R3, R4, R5, R6
* R3 = ESR
*/
RESTORE_STATE;
bri page_fault_instr_trap
/* 0x12 - Data TLB Miss Exception
* As the name implies, translation is not in the MMU, so search the
* page tables and fix it. The only purpose of this function is to
* load TLB entries from the page table if they exist.
*/
handle_data_tlb_miss_exception:
/* Working registers already saved: R3, R4, R5, R6
* R3 = EAR, R4 = ESR
*/
mfs r11, rpid
nop
/* If we are faulting a kernel address, we have to use the
* kernel page tables. */
ori r6, r0, CONFIG_KERNEL_START
cmpu r4, r3, r6
bgti r4, ex5
ori r4, r0, swapper_pg_dir
mts rpid, r0 /* TLB will have 0 TID */
nop
bri ex6
/* Get the PGD for the current thread. */
ex5:
/* get current task address */
addi r4 ,CURRENT_TASK, TOPHYS(0);
lwi r4, r4, TASK_THREAD+PGDIR
ex6:
tophys(r4,r4)
/* Create L1 (pgdir/pmd) address */
bsrli r5, r3, PGDIR_SHIFT - 2
andi r5, r5, PAGE_SIZE - 4
/* Assume pgdir aligned on 4K boundary, no need for "andi r4,r4,0xfffff003" */
or r4, r4, r5
lwi r4, r4, 0 /* Get L1 entry */
andi r5, r4, PAGE_MASK /* Extract L2 (pte) base address */
beqi r5, ex7 /* Bail if no table */
tophys(r5,r5)
bsrli r6, r3, PTE_SHIFT /* Compute PTE address */
andi r6, r6, PAGE_SIZE - 4
or r5, r5, r6
lwi r4, r5, 0 /* Get Linux PTE */
andi r6, r4, _PAGE_PRESENT
beqi r6, ex7
ori r4, r4, _PAGE_ACCESSED
swi r4, r5, 0
/* Most of the Linux PTE is ready to load into the TLB LO.
* We set ZSEL, where only the LS-bit determines user access.
* We set execute, because we don't have the granularity to
* properly set this at the page level (Linux problem).
* If shared is set, we cause a zero PID->TID load.
* Many of these bits are software only. Bits we don't set
* here we (properly should) assume have the appropriate value.
*/
brid finish_tlb_load
andi r4, r4, PAGE_MASK | TLB_EX | TLB_WR | \
TLB_ZSEL(1) | TLB_ATTR_MASK
ex7:
/* The bailout. Restore registers to pre-exception conditions
* and call the heavyweights to help us out.
*/
mts rpid, r11
nop
bri 4
RESTORE_STATE;
bri page_fault_data_trap
/* 0x13 - Instruction TLB Miss Exception
* Nearly the same as above, except we get our information from
* different registers and bailout to a different point.
*/
handle_instruction_tlb_miss_exception:
/* Working registers already saved: R3, R4, R5, R6
* R3 = ESR
*/
mfs r11, rpid
nop
/* If we are faulting a kernel address, we have to use the
* kernel page tables.
*/
ori r4, r0, CONFIG_KERNEL_START
cmpu r4, r3, r4
bgti r4, ex8
ori r4, r0, swapper_pg_dir
mts rpid, r0 /* TLB will have 0 TID */
nop
bri ex9
/* Get the PGD for the current thread. */
ex8:
/* get current task address */
addi r4 ,CURRENT_TASK, TOPHYS(0);
lwi r4, r4, TASK_THREAD+PGDIR
ex9:
tophys(r4,r4)
/* Create L1 (pgdir/pmd) address */
bsrli r5, r3, PGDIR_SHIFT - 2
andi r5, r5, PAGE_SIZE - 4
/* Assume pgdir aligned on 4K boundary, no need for "andi r4,r4,0xfffff003" */
or r4, r4, r5
lwi r4, r4, 0 /* Get L1 entry */
andi r5, r4, PAGE_MASK /* Extract L2 (pte) base address */
beqi r5, ex10 /* Bail if no table */
tophys(r5,r5)
bsrli r6, r3, PTE_SHIFT /* Compute PTE address */
andi r6, r6, PAGE_SIZE - 4
or r5, r5, r6
lwi r4, r5, 0 /* Get Linux PTE */
andi r6, r4, _PAGE_PRESENT
beqi r6, ex10
ori r4, r4, _PAGE_ACCESSED
swi r4, r5, 0
/* Most of the Linux PTE is ready to load into the TLB LO.
* We set ZSEL, where only the LS-bit determines user access.
* We set execute, because we don't have the granularity to
* properly set this at the page level (Linux problem).
* If shared is set, we cause a zero PID->TID load.
* Many of these bits are software only. Bits we don't set
* here we (properly should) assume have the appropriate value.
*/
brid finish_tlb_load
andi r4, r4, PAGE_MASK | TLB_EX | TLB_WR | \
TLB_ZSEL(1) | TLB_ATTR_MASK
ex10:
/* The bailout. Restore registers to pre-exception conditions
* and call the heavyweights to help us out.
*/
mts rpid, r11
nop
bri 4
RESTORE_STATE;
bri page_fault_instr_trap
/* Both the instruction and data TLB miss get to this point to load the TLB.
* r3 - EA of fault
* r4 - TLB LO (info from Linux PTE)
* r5, r6 - available to use
* PID - loaded with proper value when we get here
* Upon exit, we reload everything and RFI.
* A common place to load the TLB.
*/
.section .data
.align 4
.global tlb_skip
tlb_skip:
.long MICROBLAZE_TLB_SKIP
tlb_index:
/* MS: storing last used tlb index */
.long MICROBLAZE_TLB_SIZE/2
.previous
finish_tlb_load:
/* MS: load the last used TLB index. */
lwi r5, r0, TOPHYS(tlb_index)
addik r5, r5, 1 /* MS: inc tlb_index -> use next one */
/* MS: FIXME this is potential fault, because this is mask not count */
andi r5, r5, MICROBLAZE_TLB_SIZE - 1
ori r6, r0, 1
cmp r31, r5, r6
blti r31, ex12
lwi r5, r0, TOPHYS(tlb_skip)
ex12:
/* MS: save back current TLB index */
swi r5, r0, TOPHYS(tlb_index)
ori r4, r4, _PAGE_HWEXEC /* make it executable */
mts rtlbx, r5 /* MS: save current TLB */
nop
mts rtlblo, r4 /* MS: save to TLB LO */
nop
/* Create EPN. This is the faulting address plus a static
* set of bits. These are size, valid, E, U0, and ensure
* bits 20 and 21 are zero.
*/
andi r3, r3, PAGE_MASK
#ifdef CONFIG_MICROBLAZE_64K_PAGES
ori r3, r3, TLB_VALID | TLB_PAGESZ(PAGESZ_64K)
#elif CONFIG_MICROBLAZE_16K_PAGES
ori r3, r3, TLB_VALID | TLB_PAGESZ(PAGESZ_16K)
#else
ori r3, r3, TLB_VALID | TLB_PAGESZ(PAGESZ_4K)
#endif
mts rtlbhi, r3 /* Load TLB HI */
nop
/* Done...restore registers and get out of here. */
mts rpid, r11
nop
bri 4
RESTORE_STATE;
rted r17, 0
nop
/* extern void giveup_fpu(struct task_struct *prev)
*
* The MicroBlaze processor may have an FPU, so this should not just
* return: TBD.
*/
.globl giveup_fpu;
.align 4;
giveup_fpu:
bralid r15,0 /* TBD */
nop
/* At present, this routine just hangs. - extern void abort(void) */
.globl abort;
.align 4;
abort:
br r0
.globl set_context;
.align 4;
set_context:
mts rpid, r5 /* Shadow TLBs are automatically */
nop
bri 4 /* flushed by changing PID */
rtsd r15,8
nop
#endif
.end _hw_exception_handler
#ifdef CONFIG_MMU
/* Unaligned data access exception last on a 4k page for MMU.
* When this is called, we are in virtual mode with exceptions enabled
* and registers 1-13,15,17,18 saved.
*
* R3 = ESR
* R4 = EAR
* R7 = pointer to saved registers (struct pt_regs *regs)
*
* This handler perform the access, and returns via ret_from_exc.
*/
.global _unaligned_data_exception
.ent _unaligned_data_exception
_unaligned_data_exception:
andi r8, r3, 0x3E0; /* Mask and extract the register operand */
bsrli r8, r8, 2; /* r8 >> 2 = register operand * 8 */
andi r6, r3, 0x400; /* Extract ESR[S] */
bneid r6, ex_sw_vm;
andi r6, r3, 0x800; /* Extract ESR[W] - delay slot */
ex_lw_vm:
beqid r6, ex_lhw_vm;
load1: lbui r5, r4, 0; /* Exception address in r4 - delay slot */
/* Load a word, byte-by-byte from destination address and save it in tmp space*/
addik r6, r0, ex_tmp_data_loc_0;
sbi r5, r6, 0;
load2: lbui r5, r4, 1;
sbi r5, r6, 1;
load3: lbui r5, r4, 2;
sbi r5, r6, 2;
load4: lbui r5, r4, 3;
sbi r5, r6, 3;
brid ex_lw_tail_vm;
/* Get the destination register value into r3 - delay slot */
lwi r3, r6, 0;
ex_lhw_vm:
/* Load a half-word, byte-by-byte from destination address and
* save it in tmp space */
addik r6, r0, ex_tmp_data_loc_0;
sbi r5, r6, 0;
load5: lbui r5, r4, 1;
sbi r5, r6, 1;
lhui r3, r6, 0; /* Get the destination register value into r3 */
ex_lw_tail_vm:
/* Form load_word jump table offset (lw_table_vm + (8 * regnum)) */
addik r5, r8, lw_table_vm;
bra r5;
ex_lw_end_vm: /* Exception handling of load word, ends */
brai ret_from_exc;
ex_sw_vm:
/* Form store_word jump table offset (sw_table_vm + (8 * regnum)) */
addik r5, r8, sw_table_vm;
bra r5;
ex_sw_tail_vm:
addik r5, r0, ex_tmp_data_loc_0;
beqid r6, ex_shw_vm;
swi r3, r5, 0; /* Get the word - delay slot */
/* Store the word, byte-by-byte into destination address */
lbui r3, r5, 0;
store1: sbi r3, r4, 0;
lbui r3, r5, 1;
store2: sbi r3, r4, 1;
lbui r3, r5, 2;
store3: sbi r3, r4, 2;
lbui r3, r5, 3;
brid ret_from_exc;
store4: sbi r3, r4, 3; /* Delay slot */
ex_shw_vm:
/* Store the lower half-word, byte-by-byte into destination address */
#ifdef __MICROBLAZEEL__
lbui r3, r5, 0;
store5: sbi r3, r4, 0;
lbui r3, r5, 1;
brid ret_from_exc;
store6: sbi r3, r4, 1; /* Delay slot */
#else
lbui r3, r5, 2;
store5: sbi r3, r4, 0;
lbui r3, r5, 3;
brid ret_from_exc;
store6: sbi r3, r4, 1; /* Delay slot */
#endif
ex_sw_end_vm: /* Exception handling of store word, ends. */
/* We have to prevent cases that get/put_user macros get unaligned pointer
* to bad page area. We have to find out which origin instruction caused it
* and called fixup for that origin instruction not instruction in unaligned
* handler */
ex_unaligned_fixup:
ori r5, r7, 0 /* setup pointer to pt_regs */
lwi r6, r7, PT_PC; /* faulting address is one instruction above */
addik r6, r6, -4 /* for finding proper fixup */
swi r6, r7, PT_PC; /* a save back it to PT_PC */
addik r7, r0, SIGSEGV
/* call bad_page_fault for finding aligned fixup, fixup address is saved
* in PT_PC which is used as return address from exception */
addik r15, r0, ret_from_exc-8 /* setup return address */
brid bad_page_fault
nop
/* We prevent all load/store because it could failed any attempt to access */
.section __ex_table,"a";
.word load1,ex_unaligned_fixup;
.word load2,ex_unaligned_fixup;
.word load3,ex_unaligned_fixup;
.word load4,ex_unaligned_fixup;
.word load5,ex_unaligned_fixup;
.word store1,ex_unaligned_fixup;
.word store2,ex_unaligned_fixup;
.word store3,ex_unaligned_fixup;
.word store4,ex_unaligned_fixup;
.word store5,ex_unaligned_fixup;
.word store6,ex_unaligned_fixup;
.previous;
.end _unaligned_data_exception
#endif /* CONFIG_MMU */
.global ex_handler_unhandled
ex_handler_unhandled:
/* FIXME add handle function for unhandled exception - dump register */
bri 0
/*
* hw_exception_handler Jump Table
* - Contains code snippets for each register that caused the unalign exception
* - Hence exception handler is NOT self-modifying
* - Separate table for load exceptions and store exceptions.
* - Each table is of size: (8 * 32) = 256 bytes
*/
.section .text
.align 4
lw_table:
lw_r0: R3_TO_LWREG (0);
lw_r1: LWREG_NOP;
lw_r2: R3_TO_LWREG (2);
lw_r3: R3_TO_LWREG_V (3);
lw_r4: R3_TO_LWREG_V (4);
lw_r5: R3_TO_LWREG_V (5);
lw_r6: R3_TO_LWREG_V (6);
lw_r7: R3_TO_LWREG (7);
lw_r8: R3_TO_LWREG (8);
lw_r9: R3_TO_LWREG (9);
lw_r10: R3_TO_LWREG (10);
lw_r11: R3_TO_LWREG (11);
lw_r12: R3_TO_LWREG (12);
lw_r13: R3_TO_LWREG (13);
lw_r14: R3_TO_LWREG (14);
lw_r15: R3_TO_LWREG (15);
lw_r16: R3_TO_LWREG (16);
lw_r17: LWREG_NOP;
lw_r18: R3_TO_LWREG (18);
lw_r19: R3_TO_LWREG (19);
lw_r20: R3_TO_LWREG (20);
lw_r21: R3_TO_LWREG (21);
lw_r22: R3_TO_LWREG (22);
lw_r23: R3_TO_LWREG (23);
lw_r24: R3_TO_LWREG (24);
lw_r25: R3_TO_LWREG (25);
lw_r26: R3_TO_LWREG (26);
lw_r27: R3_TO_LWREG (27);
lw_r28: R3_TO_LWREG (28);
lw_r29: R3_TO_LWREG (29);
lw_r30: R3_TO_LWREG (30);
#ifdef CONFIG_MMU
lw_r31: R3_TO_LWREG_V (31);
#else
lw_r31: R3_TO_LWREG (31);
#endif
sw_table:
sw_r0: SWREG_TO_R3 (0);
sw_r1: SWREG_NOP;
sw_r2: SWREG_TO_R3 (2);
sw_r3: SWREG_TO_R3_V (3);
sw_r4: SWREG_TO_R3_V (4);
sw_r5: SWREG_TO_R3_V (5);
sw_r6: SWREG_TO_R3_V (6);
sw_r7: SWREG_TO_R3 (7);
sw_r8: SWREG_TO_R3 (8);
sw_r9: SWREG_TO_R3 (9);
sw_r10: SWREG_TO_R3 (10);
sw_r11: SWREG_TO_R3 (11);
sw_r12: SWREG_TO_R3 (12);
sw_r13: SWREG_TO_R3 (13);
sw_r14: SWREG_TO_R3 (14);
sw_r15: SWREG_TO_R3 (15);
sw_r16: SWREG_TO_R3 (16);
sw_r17: SWREG_NOP;
sw_r18: SWREG_TO_R3 (18);
sw_r19: SWREG_TO_R3 (19);
sw_r20: SWREG_TO_R3 (20);
sw_r21: SWREG_TO_R3 (21);
sw_r22: SWREG_TO_R3 (22);
sw_r23: SWREG_TO_R3 (23);
sw_r24: SWREG_TO_R3 (24);
sw_r25: SWREG_TO_R3 (25);
sw_r26: SWREG_TO_R3 (26);
sw_r27: SWREG_TO_R3 (27);
sw_r28: SWREG_TO_R3 (28);
sw_r29: SWREG_TO_R3 (29);
sw_r30: SWREG_TO_R3 (30);
#ifdef CONFIG_MMU
sw_r31: SWREG_TO_R3_V (31);
#else
sw_r31: SWREG_TO_R3 (31);
#endif
#ifdef CONFIG_MMU
lw_table_vm:
lw_r0_vm: R3_TO_LWREG_VM (0);
lw_r1_vm: R3_TO_LWREG_VM_V (1);
lw_r2_vm: R3_TO_LWREG_VM_V (2);
lw_r3_vm: R3_TO_LWREG_VM_V (3);
lw_r4_vm: R3_TO_LWREG_VM_V (4);
lw_r5_vm: R3_TO_LWREG_VM_V (5);
lw_r6_vm: R3_TO_LWREG_VM_V (6);
lw_r7_vm: R3_TO_LWREG_VM_V (7);
lw_r8_vm: R3_TO_LWREG_VM_V (8);
lw_r9_vm: R3_TO_LWREG_VM_V (9);
lw_r10_vm: R3_TO_LWREG_VM_V (10);
lw_r11_vm: R3_TO_LWREG_VM_V (11);
lw_r12_vm: R3_TO_LWREG_VM_V (12);
lw_r13_vm: R3_TO_LWREG_VM_V (13);
lw_r14_vm: R3_TO_LWREG_VM_V (14);
lw_r15_vm: R3_TO_LWREG_VM_V (15);
lw_r16_vm: R3_TO_LWREG_VM_V (16);
lw_r17_vm: R3_TO_LWREG_VM_V (17);
lw_r18_vm: R3_TO_LWREG_VM_V (18);
lw_r19_vm: R3_TO_LWREG_VM_V (19);
lw_r20_vm: R3_TO_LWREG_VM_V (20);
lw_r21_vm: R3_TO_LWREG_VM_V (21);
lw_r22_vm: R3_TO_LWREG_VM_V (22);
lw_r23_vm: R3_TO_LWREG_VM_V (23);
lw_r24_vm: R3_TO_LWREG_VM_V (24);
lw_r25_vm: R3_TO_LWREG_VM_V (25);
lw_r26_vm: R3_TO_LWREG_VM_V (26);
lw_r27_vm: R3_TO_LWREG_VM_V (27);
lw_r28_vm: R3_TO_LWREG_VM_V (28);
lw_r29_vm: R3_TO_LWREG_VM_V (29);
lw_r30_vm: R3_TO_LWREG_VM_V (30);
lw_r31_vm: R3_TO_LWREG_VM_V (31);
sw_table_vm:
sw_r0_vm: SWREG_TO_R3_VM (0);
sw_r1_vm: SWREG_TO_R3_VM_V (1);
sw_r2_vm: SWREG_TO_R3_VM_V (2);
sw_r3_vm: SWREG_TO_R3_VM_V (3);
sw_r4_vm: SWREG_TO_R3_VM_V (4);
sw_r5_vm: SWREG_TO_R3_VM_V (5);
sw_r6_vm: SWREG_TO_R3_VM_V (6);
sw_r7_vm: SWREG_TO_R3_VM_V (7);
sw_r8_vm: SWREG_TO_R3_VM_V (8);
sw_r9_vm: SWREG_TO_R3_VM_V (9);
sw_r10_vm: SWREG_TO_R3_VM_V (10);
sw_r11_vm: SWREG_TO_R3_VM_V (11);
sw_r12_vm: SWREG_TO_R3_VM_V (12);
sw_r13_vm: SWREG_TO_R3_VM_V (13);
sw_r14_vm: SWREG_TO_R3_VM_V (14);
sw_r15_vm: SWREG_TO_R3_VM_V (15);
sw_r16_vm: SWREG_TO_R3_VM_V (16);
sw_r17_vm: SWREG_TO_R3_VM_V (17);
sw_r18_vm: SWREG_TO_R3_VM_V (18);
sw_r19_vm: SWREG_TO_R3_VM_V (19);
sw_r20_vm: SWREG_TO_R3_VM_V (20);
sw_r21_vm: SWREG_TO_R3_VM_V (21);
sw_r22_vm: SWREG_TO_R3_VM_V (22);
sw_r23_vm: SWREG_TO_R3_VM_V (23);
sw_r24_vm: SWREG_TO_R3_VM_V (24);
sw_r25_vm: SWREG_TO_R3_VM_V (25);
sw_r26_vm: SWREG_TO_R3_VM_V (26);
sw_r27_vm: SWREG_TO_R3_VM_V (27);
sw_r28_vm: SWREG_TO_R3_VM_V (28);
sw_r29_vm: SWREG_TO_R3_VM_V (29);
sw_r30_vm: SWREG_TO_R3_VM_V (30);
sw_r31_vm: SWREG_TO_R3_VM_V (31);
#endif /* CONFIG_MMU */
/* Temporary data structures used in the handler */
.section .data
.align 4
ex_tmp_data_loc_0:
.byte 0
ex_tmp_data_loc_1:
.byte 0
ex_tmp_data_loc_2:
.byte 0
ex_tmp_data_loc_3:
.byte 0
ex_reg_op:
.byte 0
|
AirFortressIlikara/LS2K0300-linux-4.19
| 1,363
|
arch/microblaze/kernel/misc.S
|
/*
* Miscellaneous low-level MMU functions.
*
* Copyright (C) 2008-2009 Michal Simek <monstr@monstr.eu>
* Copyright (C) 2008-2009 PetaLogix
* Copyright (C) 2007 Xilinx, Inc. All rights reserved.
*
* Derived from arch/ppc/kernel/misc.S
*
* This file is subject to the terms and conditions of the GNU General
* Public License. See the file COPYING in the main directory of this
* archive for more details.
*/
#include <linux/linkage.h>
#include <linux/sys.h>
#include <asm/unistd.h>
#include <linux/errno.h>
#include <asm/mmu.h>
#include <asm/page.h>
.text
/*
* Flush MMU TLB
*
* We avoid flushing the pinned 0, 1 and possibly 2 entries.
*/
.globl _tlbia;
.type _tlbia, @function
.align 4;
_tlbia:
lwi r12, r0, tlb_skip;
/* isync */
_tlbia_1:
mts rtlbx, r12
nop
mts rtlbhi, r0 /* flush: ensure V is clear */
nop
rsubi r11, r12, MICROBLAZE_TLB_SIZE - 1
bneid r11, _tlbia_1 /* loop for all entries */
addik r12, r12, 1
/* sync */
rtsd r15, 8
nop
.size _tlbia, . - _tlbia
/*
* Flush MMU TLB for a particular address (in r5)
*/
.globl _tlbie;
.type _tlbie, @function
.align 4;
_tlbie:
mts rtlbsx, r5 /* look up the address in TLB */
nop
mfs r12, rtlbx /* Retrieve index */
nop
blti r12, _tlbie_1 /* Check if found */
mts rtlbhi, r0 /* flush: ensure V is clear */
nop
_tlbie_1:
rtsd r15, 8
nop
.size _tlbie, . - _tlbie
|
AirFortressIlikara/LS2K0300-linux-4.19
| 4,066
|
arch/microblaze/kernel/mcount.S
|
/*
* Low-level ftrace handling
*
* Copyright (C) 2009 Michal Simek <monstr@monstr.eu>
* Copyright (C) 2009 PetaLogix
*
* This file is subject to the terms and conditions of the GNU General
* Public License. See the file COPYING in the main directory of this
* archive for more details.
*/
#include <linux/linkage.h>
#define NOALIGN_ENTRY(name) .globl name; name:
/* FIXME MS: I think that I don't need to save all regs */
#define SAVE_REGS \
addik r1, r1, -120; \
swi r2, r1, 4; \
swi r3, r1, 8; \
swi r4, r1, 12; \
swi r5, r1, 116; \
swi r6, r1, 16; \
swi r7, r1, 20; \
swi r8, r1, 24; \
swi r9, r1, 28; \
swi r10, r1, 32; \
swi r11, r1, 36; \
swi r12, r1, 40; \
swi r13, r1, 44; \
swi r14, r1, 48; \
swi r16, r1, 52; \
swi r17, r1, 56; \
swi r18, r1, 60; \
swi r19, r1, 64; \
swi r20, r1, 68; \
swi r21, r1, 72; \
swi r22, r1, 76; \
swi r23, r1, 80; \
swi r24, r1, 84; \
swi r25, r1, 88; \
swi r26, r1, 92; \
swi r27, r1, 96; \
swi r28, r1, 100; \
swi r29, r1, 104; \
swi r30, r1, 108; \
swi r31, r1, 112;
#define RESTORE_REGS \
lwi r2, r1, 4; \
lwi r3, r1, 8; \
lwi r4, r1, 12; \
lwi r5, r1, 116; \
lwi r6, r1, 16; \
lwi r7, r1, 20; \
lwi r8, r1, 24; \
lwi r9, r1, 28; \
lwi r10, r1, 32; \
lwi r11, r1, 36; \
lwi r12, r1, 40; \
lwi r13, r1, 44; \
lwi r14, r1, 48; \
lwi r16, r1, 52; \
lwi r17, r1, 56; \
lwi r18, r1, 60; \
lwi r19, r1, 64; \
lwi r20, r1, 68; \
lwi r21, r1, 72; \
lwi r22, r1, 76; \
lwi r23, r1, 80; \
lwi r24, r1, 84; \
lwi r25, r1, 88; \
lwi r26, r1, 92; \
lwi r27, r1, 96; \
lwi r28, r1, 100; \
lwi r29, r1, 104; \
lwi r30, r1, 108; \
lwi r31, r1, 112; \
addik r1, r1, 120;
ENTRY(ftrace_stub)
rtsd r15, 8;
nop;
ENTRY(_mcount)
#ifdef CONFIG_DYNAMIC_FTRACE
ENTRY(ftrace_caller)
/* MS: It is just barrier which is removed from C code */
rtsd r15, 8
nop
#endif /* CONFIG_DYNAMIC_FTRACE */
SAVE_REGS
swi r15, r1, 0;
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
#ifndef CONFIG_DYNAMIC_FTRACE
lwi r5, r0, ftrace_graph_return;
addik r6, r0, ftrace_stub; /* asm implementation */
cmpu r5, r5, r6; /* ftrace_graph_return != ftrace_stub */
beqid r5, end_graph_tracer;
nop;
lwi r6, r0, ftrace_graph_entry;
addik r5, r0, ftrace_graph_entry_stub; /* implemented in C */
cmpu r5, r5, r6; /* ftrace_graph_entry != ftrace_graph_entry_stub */
beqid r5, end_graph_tracer;
nop;
#else /* CONFIG_DYNAMIC_FTRACE */
NOALIGN_ENTRY(ftrace_call_graph)
/* MS: jump over graph function - replaced from C code */
bri end_graph_tracer
#endif /* CONFIG_DYNAMIC_FTRACE */
addik r5, r1, 120; /* MS: load parent addr */
addik r6, r15, 0; /* MS: load current function addr */
bralid r15, prepare_ftrace_return;
nop;
/* MS: graph was taken that's why - can jump over function trace */
brid end;
nop;
end_graph_tracer:
#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
#ifndef CONFIG_DYNAMIC_FTRACE
/* MS: test function trace if is taken or not */
lwi r20, r0, ftrace_trace_function;
addik r6, r0, ftrace_stub;
cmpu r5, r20, r6; /* ftrace_trace_function != ftrace_stub */
beqid r5, end; /* MS: not taken -> jump over */
nop;
#else /* CONFIG_DYNAMIC_FTRACE */
NOALIGN_ENTRY(ftrace_call)
/* instruction for setup imm FUNC_part1, addik r20, r0, FUNC_part2 */
nop
nop
#endif /* CONFIG_DYNAMIC_FTRACE */
/* static normal trace */
lwi r6, r1, 120; /* MS: load parent addr */
addik r5, r15, -4; /* MS: load current function addr */
/* MS: here is dependency on previous code */
brald r15, r20; /* MS: jump to ftrace handler */
nop;
end:
lwi r15, r1, 0;
RESTORE_REGS
rtsd r15, 8; /* MS: jump back */
nop;
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
ENTRY(return_to_handler)
nop; /* MS: just barrier for rtsd r15, 8 */
nop;
SAVE_REGS
swi r15, r1, 0;
/* MS: find out returning address */
bralid r15, ftrace_return_to_handler;
nop;
/* MS: return value from ftrace_return_to_handler is my returning addr
* must be before restore regs because I have to restore r3 content */
addik r15, r3, 0;
RESTORE_REGS
rtsd r15, 8; /* MS: jump back */
nop;
#endif /* CONFIG_FUNCTION_TRACER */
|
AirFortressIlikara/LS2K0300-linux-4.19
| 2,802
|
arch/microblaze/kernel/vmlinux.lds.S
|
/*
* Copyright (C) 2008-2009 Michal Simek <monstr@monstr.eu>
* Copyright (C) 2008-2009 PetaLogix
* Copyright (C) 2006 Atmark Techno, Inc.
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*/
OUTPUT_ARCH(microblaze)
ENTRY(microblaze_start)
#include <asm/page.h>
#include <asm-generic/vmlinux.lds.h>
#include <asm/thread_info.h>
#ifdef __MICROBLAZEEL__
jiffies = jiffies_64;
#else
jiffies = jiffies_64 + 4;
#endif
SECTIONS {
. = CONFIG_KERNEL_START;
microblaze_start = CONFIG_KERNEL_BASE_ADDR;
.text : AT(ADDR(.text) - LOAD_OFFSET) {
_text = . ;
_stext = . ;
HEAD_TEXT
TEXT_TEXT
*(.fixup)
EXIT_TEXT
EXIT_CALL
SCHED_TEXT
CPUIDLE_TEXT
LOCK_TEXT
KPROBES_TEXT
IRQENTRY_TEXT
SOFTIRQENTRY_TEXT
. = ALIGN (4) ;
_etext = . ;
}
. = ALIGN (4) ;
__fdt_blob : AT(ADDR(__fdt_blob) - LOAD_OFFSET) {
_fdt_start = . ; /* place for fdt blob */
*(__fdt_blob) ; /* Any link-placed DTB */
. = _fdt_start + 0x8000; /* Pad up to 32kbyte */
_fdt_end = . ;
}
. = ALIGN(16);
RODATA
EXCEPTION_TABLE(16)
NOTES
/*
* sdata2 section can go anywhere, but must be word aligned
* and SDA2_BASE must point to the middle of it
*/
.sdata2 : AT(ADDR(.sdata2) - LOAD_OFFSET) {
_ssrw = .;
. = ALIGN(PAGE_SIZE); /* page aligned when MMU used */
*(.sdata2)
. = ALIGN(8);
_essrw = .;
_ssrw_size = _essrw - _ssrw;
_KERNEL_SDA2_BASE_ = _ssrw + (_ssrw_size / 2);
}
_sdata = . ;
RW_DATA_SECTION(32, PAGE_SIZE, THREAD_SIZE)
_edata = . ;
/* Under the microblaze ABI, .sdata and .sbss must be contiguous */
. = ALIGN(8);
.sdata : AT(ADDR(.sdata) - LOAD_OFFSET) {
_ssro = .;
*(.sdata)
}
.sbss : AT(ADDR(.sbss) - LOAD_OFFSET) {
_ssbss = .;
*(.sbss)
_esbss = .;
_essro = .;
_ssro_size = _essro - _ssro ;
_KERNEL_SDA_BASE_ = _ssro + (_ssro_size / 2) ;
}
. = ALIGN(PAGE_SIZE);
__init_begin = .;
INIT_TEXT_SECTION(PAGE_SIZE)
.init.data : AT(ADDR(.init.data) - LOAD_OFFSET) {
INIT_DATA
}
. = ALIGN(4);
.init.ivt : AT(ADDR(.init.ivt) - LOAD_OFFSET) {
__ivt_start = .;
*(.init.ivt)
__ivt_end = .;
}
.init.setup : AT(ADDR(.init.setup) - LOAD_OFFSET) {
INIT_SETUP(0)
}
.initcall.init : AT(ADDR(.initcall.init) - LOAD_OFFSET ) {
INIT_CALLS
}
.con_initcall.init : AT(ADDR(.con_initcall.init) - LOAD_OFFSET) {
CON_INITCALL
}
SECURITY_INIT
__init_end_before_initramfs = .;
.init.ramfs : AT(ADDR(.init.ramfs) - LOAD_OFFSET) {
INIT_RAM_FS
}
__init_end = .;
.bss ALIGN (PAGE_SIZE) : AT(ADDR(.bss) - LOAD_OFFSET) {
/* page aligned when MMU used */
__bss_start = . ;
*(.bss*)
*(COMMON)
. = ALIGN (4) ;
__bss_stop = . ;
}
. = ALIGN(PAGE_SIZE);
_end = .;
DISCARDS
}
|
AirFortressIlikara/LS2K0300-linux-4.19
| 2,013
|
arch/microblaze/lib/umodsi3.S
|
/* SPDX-License-Identifier: GPL-2.0 */
#include <linux/linkage.h>
/*
* Unsigned modulo operation for 32 bit integers.
* Input : op1 in Reg r5
* op2 in Reg r6
* Output: op1 mod op2 in Reg r3
*/
.text
.globl __umodsi3
.type __umodsi3, @function
.ent __umodsi3
__umodsi3:
.frame r1, 0, r15
addik r1, r1, -12
swi r29, r1, 0
swi r30, r1, 4
swi r31, r1, 8
beqi r6, div_by_zero /* div_by_zero - division error */
beqid r5, result_is_zero /* result is zero */
addik r3, r0, 0 /* clear div */
addik r30, r0, 0 /* clear mod */
addik r29, r0, 32 /* initialize the loop count */
/* check if r6 and r5 are equal /* if yes, return 0 */
rsub r18, r5, r6
beqi r18, return_here
/* check if (uns)r6 is greater than (uns)r5. in that case, just return r5 */
xor r18, r5, r6
bgeid r18, 16
addik r3, r5, 0
blti r6, return_here
bri $lcheckr6
rsub r18, r5, r6 /* microblazecmp */
bgti r18, return_here
/* if r6 [bit 31] is set, then return result as r5-r6 */
$lcheckr6:
bgtid r6, div0
addik r3, r0, 0
addik r18, r0, 0x7fffffff
and r5, r5, r18
and r6, r6, r18
brid return_here
rsub r3, r6, r5
/* first part: try to find the first '1' in the r5 */
div0:
blti r5, div2
div1:
add r5, r5, r5 /* left shift logical r5 */
bgeid r5, div1
addik r29, r29, -1
div2:
/* left shift logical r5 get the '1' into the carry */
add r5, r5, r5
addc r3, r3, r3 /* move that bit into the mod register */
rsub r31, r6, r3 /* try to subtract (r3 a r6) */
blti r31, mod_too_small
/* move the r31 to mod since the result was positive */
or r3, r0, r31
addik r30, r30, 1
mod_too_small:
addik r29, r29, -1
beqi r29, loop_end
add r30, r30, r30 /* shift in the '1' into div */
bri div2 /* div2 */
loop_end:
bri return_here
div_by_zero:
result_is_zero:
or r3, r0, r0 /* set result to 0 */
return_here:
/* restore values of csrs and that of r3 and the divisor and the dividend */
lwi r29, r1, 0
lwi r30, r1, 4
lwi r31, r1, 8
rtsd r15, 8
addik r1, r1, 12
.size __umodsi3, . - __umodsi3
.end __umodsi3
|
AirFortressIlikara/LS2K0300-linux-4.19
| 1,958
|
arch/microblaze/lib/udivsi3.S
|
/* SPDX-License-Identifier: GPL-2.0 */
#include <linux/linkage.h>
/*
* Unsigned divide operation.
* Input : Divisor in Reg r5
* Dividend in Reg r6
* Output: Result in Reg r3
*/
.text
.globl __udivsi3
.type __udivsi3, @function
.ent __udivsi3
__udivsi3:
.frame r1, 0, r15
addik r1, r1, -12
swi r29, r1, 0
swi r30, r1, 4
swi r31, r1, 8
beqi r6, div_by_zero /* div_by_zero /* division error */
beqid r5, result_is_zero /* result is zero */
addik r30, r0, 0 /* clear mod */
addik r29, r0, 32 /* initialize the loop count */
/* check if r6 and r5 are equal - if yes, return 1 */
rsub r18, r5, r6
beqid r18, return_here
addik r3, r0, 1
/* check if (uns)r6 is greater than (uns)r5. in that case, just return 0 */
xor r18, r5, r6
bgeid r18, 16
add r3, r0, r0 /* we would anyways clear r3 */
blti r6, return_here /* r6[bit 31 = 1] hence is greater */
bri checkr6
rsub r18, r6, r5 /* microblazecmp */
blti r18, return_here
/* if r6 [bit 31] is set, then return result as 1 */
checkr6:
bgti r6, div0
brid return_here
addik r3, r0, 1
/* first part try to find the first '1' in the r5 */
div0:
blti r5, div2
div1:
add r5, r5, r5 /* left shift logical r5 */
bgtid r5, div1
addik r29, r29, -1
div2:
/* left shift logical r5 get the '1' into the carry */
add r5, r5, r5
addc r30, r30, r30 /* move that bit into the mod register */
rsub r31, r6, r30 /* try to subtract (r30 a r6) */
blti r31, mod_too_small
/* move the r31 to mod since the result was positive */
or r30, r0, r31
addik r3, r3, 1
mod_too_small:
addik r29, r29, -1
beqi r29, loop_end
add r3, r3, r3 /* shift in the '1' into div */
bri div2 /* div2 */
loop_end:
bri return_here
div_by_zero:
result_is_zero:
or r3, r0, r0 /* set result to 0 */
return_here:
/* restore values of csrs and that of r3 and the divisor and the dividend */
lwi r29, r1, 0
lwi r30, r1, 4
lwi r31, r1, 8
rtsd r15, 8
addik r1, r1, 12
.size __udivsi3, . - __udivsi3
.end __udivsi3
|
AirFortressIlikara/LS2K0300-linux-4.19
| 1,789
|
arch/microblaze/lib/divsi3.S
|
/* SPDX-License-Identifier: GPL-2.0 */
#include <linux/linkage.h>
/*
* Divide operation for 32 bit integers.
* Input : Dividend in Reg r5
* Divisor in Reg r6
* Output: Result in Reg r3
*/
.text
.globl __divsi3
.type __divsi3, @function
.ent __divsi3
__divsi3:
.frame r1, 0, r15
addik r1, r1, -16
swi r28, r1, 0
swi r29, r1, 4
swi r30, r1, 8
swi r31, r1, 12
beqi r6, div_by_zero /* div_by_zero - division error */
beqi r5, result_is_zero /* result is zero */
bgeid r5, r5_pos
xor r28, r5, r6 /* get the sign of the result */
rsubi r5, r5, 0 /* make r5 positive */
r5_pos:
bgei r6, r6_pos
rsubi r6, r6, 0 /* make r6 positive */
r6_pos:
addik r30, r0, 0 /* clear mod */
addik r3, r0, 0 /* clear div */
addik r29, r0, 32 /* initialize the loop count */
/* first part try to find the first '1' in the r5 */
div0:
blti r5, div2 /* this traps r5 == 0x80000000 */
div1:
add r5, r5, r5 /* left shift logical r5 */
bgtid r5, div1
addik r29, r29, -1
div2:
/* left shift logical r5 get the '1' into the carry */
add r5, r5, r5
addc r30, r30, r30 /* move that bit into the mod register */
rsub r31, r6, r30 /* try to subtract (r30 a r6) */
blti r31, mod_too_small
/* move the r31 to mod since the result was positive */
or r30, r0, r31
addik r3, r3, 1
mod_too_small:
addik r29, r29, -1
beqi r29, loop_end
add r3, r3, r3 /* shift in the '1' into div */
bri div2 /* div2 */
loop_end:
bgei r28, return_here
brid return_here
rsubi r3, r3, 0 /* negate the result */
div_by_zero:
result_is_zero:
or r3, r0, r0 /* set result to 0 */
return_here:
/* restore values of csrs and that of r3 and the divisor and the dividend */
lwi r28, r1, 0
lwi r29, r1, 4
lwi r30, r1, 8
lwi r31, r1, 12
rtsd r15, 8
addik r1, r1, 16
.size __divsi3, . - __divsi3
.end __divsi3
|
AirFortressIlikara/LS2K0300-linux-4.19
| 20,680
|
arch/microblaze/lib/fastcopy.S
|
/*
* Copyright (C) 2008-2009 Michal Simek <monstr@monstr.eu>
* Copyright (C) 2008-2009 PetaLogix
* Copyright (C) 2008 Jim Law - Iris LP All rights reserved.
*
* This file is subject to the terms and conditions of the GNU General
* Public License. See the file COPYING in the main directory of this
* archive for more details.
*
* Written by Jim Law <jlaw@irispower.com>
*
* intended to replace:
* memcpy in memcpy.c and
* memmove in memmove.c
* ... in arch/microblaze/lib
*
*
* assly_fastcopy.S
*
* Attempt at quicker memcpy and memmove for MicroBlaze
* Input : Operand1 in Reg r5 - destination address
* Operand2 in Reg r6 - source address
* Operand3 in Reg r7 - number of bytes to transfer
* Output: Result in Reg r3 - starting destinaition address
*
*
* Explanation:
* Perform (possibly unaligned) copy of a block of memory
* between mem locations with size of xfer spec'd in bytes
*/
#include <linux/linkage.h>
.text
.globl memcpy
.type memcpy, @function
.ent memcpy
memcpy:
fast_memcpy_ascending:
/* move d to return register as value of function */
addi r3, r5, 0
addi r4, r0, 4 /* n = 4 */
cmpu r4, r4, r7 /* n = c - n (unsigned) */
blti r4, a_xfer_end /* if n < 0, less than one word to transfer */
/* transfer first 0~3 bytes to get aligned dest address */
andi r4, r5, 3 /* n = d & 3 */
/* if zero, destination already aligned */
beqi r4, a_dalign_done
/* n = 4 - n (yields 3, 2, 1 transfers for 1, 2, 3 addr offset) */
rsubi r4, r4, 4
rsub r7, r4, r7 /* c = c - n adjust c */
a_xfer_first_loop:
/* if no bytes left to transfer, transfer the bulk */
beqi r4, a_dalign_done
lbui r11, r6, 0 /* h = *s */
sbi r11, r5, 0 /* *d = h */
addi r6, r6, 1 /* s++ */
addi r5, r5, 1 /* d++ */
brid a_xfer_first_loop /* loop */
addi r4, r4, -1 /* n-- (IN DELAY SLOT) */
a_dalign_done:
addi r4, r0, 32 /* n = 32 */
cmpu r4, r4, r7 /* n = c - n (unsigned) */
/* if n < 0, less than one block to transfer */
blti r4, a_block_done
a_block_xfer:
andi r4, r7, 0xffffffe0 /* n = c & ~31 */
rsub r7, r4, r7 /* c = c - n */
andi r9, r6, 3 /* t1 = s & 3 */
/* if temp != 0, unaligned transfers needed */
bnei r9, a_block_unaligned
a_block_aligned:
lwi r9, r6, 0 /* t1 = *(s + 0) */
lwi r10, r6, 4 /* t2 = *(s + 4) */
lwi r11, r6, 8 /* t3 = *(s + 8) */
lwi r12, r6, 12 /* t4 = *(s + 12) */
swi r9, r5, 0 /* *(d + 0) = t1 */
swi r10, r5, 4 /* *(d + 4) = t2 */
swi r11, r5, 8 /* *(d + 8) = t3 */
swi r12, r5, 12 /* *(d + 12) = t4 */
lwi r9, r6, 16 /* t1 = *(s + 16) */
lwi r10, r6, 20 /* t2 = *(s + 20) */
lwi r11, r6, 24 /* t3 = *(s + 24) */
lwi r12, r6, 28 /* t4 = *(s + 28) */
swi r9, r5, 16 /* *(d + 16) = t1 */
swi r10, r5, 20 /* *(d + 20) = t2 */
swi r11, r5, 24 /* *(d + 24) = t3 */
swi r12, r5, 28 /* *(d + 28) = t4 */
addi r6, r6, 32 /* s = s + 32 */
addi r4, r4, -32 /* n = n - 32 */
bneid r4, a_block_aligned /* while (n) loop */
addi r5, r5, 32 /* d = d + 32 (IN DELAY SLOT) */
bri a_block_done
a_block_unaligned:
andi r8, r6, 0xfffffffc /* as = s & ~3 */
add r6, r6, r4 /* s = s + n */
lwi r11, r8, 0 /* h = *(as + 0) */
addi r9, r9, -1
beqi r9, a_block_u1 /* t1 was 1 => 1 byte offset */
addi r9, r9, -1
beqi r9, a_block_u2 /* t1 was 2 => 2 byte offset */
a_block_u3:
bslli r11, r11, 24 /* h = h << 24 */
a_bu3_loop:
lwi r12, r8, 4 /* v = *(as + 4) */
bsrli r9, r12, 8 /* t1 = v >> 8 */
or r9, r11, r9 /* t1 = h | t1 */
swi r9, r5, 0 /* *(d + 0) = t1 */
bslli r11, r12, 24 /* h = v << 24 */
lwi r12, r8, 8 /* v = *(as + 8) */
bsrli r9, r12, 8 /* t1 = v >> 8 */
or r9, r11, r9 /* t1 = h | t1 */
swi r9, r5, 4 /* *(d + 4) = t1 */
bslli r11, r12, 24 /* h = v << 24 */
lwi r12, r8, 12 /* v = *(as + 12) */
bsrli r9, r12, 8 /* t1 = v >> 8 */
or r9, r11, r9 /* t1 = h | t1 */
swi r9, r5, 8 /* *(d + 8) = t1 */
bslli r11, r12, 24 /* h = v << 24 */
lwi r12, r8, 16 /* v = *(as + 16) */
bsrli r9, r12, 8 /* t1 = v >> 8 */
or r9, r11, r9 /* t1 = h | t1 */
swi r9, r5, 12 /* *(d + 12) = t1 */
bslli r11, r12, 24 /* h = v << 24 */
lwi r12, r8, 20 /* v = *(as + 20) */
bsrli r9, r12, 8 /* t1 = v >> 8 */
or r9, r11, r9 /* t1 = h | t1 */
swi r9, r5, 16 /* *(d + 16) = t1 */
bslli r11, r12, 24 /* h = v << 24 */
lwi r12, r8, 24 /* v = *(as + 24) */
bsrli r9, r12, 8 /* t1 = v >> 8 */
or r9, r11, r9 /* t1 = h | t1 */
swi r9, r5, 20 /* *(d + 20) = t1 */
bslli r11, r12, 24 /* h = v << 24 */
lwi r12, r8, 28 /* v = *(as + 28) */
bsrli r9, r12, 8 /* t1 = v >> 8 */
or r9, r11, r9 /* t1 = h | t1 */
swi r9, r5, 24 /* *(d + 24) = t1 */
bslli r11, r12, 24 /* h = v << 24 */
lwi r12, r8, 32 /* v = *(as + 32) */
bsrli r9, r12, 8 /* t1 = v >> 8 */
or r9, r11, r9 /* t1 = h | t1 */
swi r9, r5, 28 /* *(d + 28) = t1 */
bslli r11, r12, 24 /* h = v << 24 */
addi r8, r8, 32 /* as = as + 32 */
addi r4, r4, -32 /* n = n - 32 */
bneid r4, a_bu3_loop /* while (n) loop */
addi r5, r5, 32 /* d = d + 32 (IN DELAY SLOT) */
bri a_block_done
a_block_u1:
bslli r11, r11, 8 /* h = h << 8 */
a_bu1_loop:
lwi r12, r8, 4 /* v = *(as + 4) */
bsrli r9, r12, 24 /* t1 = v >> 24 */
or r9, r11, r9 /* t1 = h | t1 */
swi r9, r5, 0 /* *(d + 0) = t1 */
bslli r11, r12, 8 /* h = v << 8 */
lwi r12, r8, 8 /* v = *(as + 8) */
bsrli r9, r12, 24 /* t1 = v >> 24 */
or r9, r11, r9 /* t1 = h | t1 */
swi r9, r5, 4 /* *(d + 4) = t1 */
bslli r11, r12, 8 /* h = v << 8 */
lwi r12, r8, 12 /* v = *(as + 12) */
bsrli r9, r12, 24 /* t1 = v >> 24 */
or r9, r11, r9 /* t1 = h | t1 */
swi r9, r5, 8 /* *(d + 8) = t1 */
bslli r11, r12, 8 /* h = v << 8 */
lwi r12, r8, 16 /* v = *(as + 16) */
bsrli r9, r12, 24 /* t1 = v >> 24 */
or r9, r11, r9 /* t1 = h | t1 */
swi r9, r5, 12 /* *(d + 12) = t1 */
bslli r11, r12, 8 /* h = v << 8 */
lwi r12, r8, 20 /* v = *(as + 20) */
bsrli r9, r12, 24 /* t1 = v >> 24 */
or r9, r11, r9 /* t1 = h | t1 */
swi r9, r5, 16 /* *(d + 16) = t1 */
bslli r11, r12, 8 /* h = v << 8 */
lwi r12, r8, 24 /* v = *(as + 24) */
bsrli r9, r12, 24 /* t1 = v >> 24 */
or r9, r11, r9 /* t1 = h | t1 */
swi r9, r5, 20 /* *(d + 20) = t1 */
bslli r11, r12, 8 /* h = v << 8 */
lwi r12, r8, 28 /* v = *(as + 28) */
bsrli r9, r12, 24 /* t1 = v >> 24 */
or r9, r11, r9 /* t1 = h | t1 */
swi r9, r5, 24 /* *(d + 24) = t1 */
bslli r11, r12, 8 /* h = v << 8 */
lwi r12, r8, 32 /* v = *(as + 32) */
bsrli r9, r12, 24 /* t1 = v >> 24 */
or r9, r11, r9 /* t1 = h | t1 */
swi r9, r5, 28 /* *(d + 28) = t1 */
bslli r11, r12, 8 /* h = v << 8 */
addi r8, r8, 32 /* as = as + 32 */
addi r4, r4, -32 /* n = n - 32 */
bneid r4, a_bu1_loop /* while (n) loop */
addi r5, r5, 32 /* d = d + 32 (IN DELAY SLOT) */
bri a_block_done
a_block_u2:
bslli r11, r11, 16 /* h = h << 16 */
a_bu2_loop:
lwi r12, r8, 4 /* v = *(as + 4) */
bsrli r9, r12, 16 /* t1 = v >> 16 */
or r9, r11, r9 /* t1 = h | t1 */
swi r9, r5, 0 /* *(d + 0) = t1 */
bslli r11, r12, 16 /* h = v << 16 */
lwi r12, r8, 8 /* v = *(as + 8) */
bsrli r9, r12, 16 /* t1 = v >> 16 */
or r9, r11, r9 /* t1 = h | t1 */
swi r9, r5, 4 /* *(d + 4) = t1 */
bslli r11, r12, 16 /* h = v << 16 */
lwi r12, r8, 12 /* v = *(as + 12) */
bsrli r9, r12, 16 /* t1 = v >> 16 */
or r9, r11, r9 /* t1 = h | t1 */
swi r9, r5, 8 /* *(d + 8) = t1 */
bslli r11, r12, 16 /* h = v << 16 */
lwi r12, r8, 16 /* v = *(as + 16) */
bsrli r9, r12, 16 /* t1 = v >> 16 */
or r9, r11, r9 /* t1 = h | t1 */
swi r9, r5, 12 /* *(d + 12) = t1 */
bslli r11, r12, 16 /* h = v << 16 */
lwi r12, r8, 20 /* v = *(as + 20) */
bsrli r9, r12, 16 /* t1 = v >> 16 */
or r9, r11, r9 /* t1 = h | t1 */
swi r9, r5, 16 /* *(d + 16) = t1 */
bslli r11, r12, 16 /* h = v << 16 */
lwi r12, r8, 24 /* v = *(as + 24) */
bsrli r9, r12, 16 /* t1 = v >> 16 */
or r9, r11, r9 /* t1 = h | t1 */
swi r9, r5, 20 /* *(d + 20) = t1 */
bslli r11, r12, 16 /* h = v << 16 */
lwi r12, r8, 28 /* v = *(as + 28) */
bsrli r9, r12, 16 /* t1 = v >> 16 */
or r9, r11, r9 /* t1 = h | t1 */
swi r9, r5, 24 /* *(d + 24) = t1 */
bslli r11, r12, 16 /* h = v << 16 */
lwi r12, r8, 32 /* v = *(as + 32) */
bsrli r9, r12, 16 /* t1 = v >> 16 */
or r9, r11, r9 /* t1 = h | t1 */
swi r9, r5, 28 /* *(d + 28) = t1 */
bslli r11, r12, 16 /* h = v << 16 */
addi r8, r8, 32 /* as = as + 32 */
addi r4, r4, -32 /* n = n - 32 */
bneid r4, a_bu2_loop /* while (n) loop */
addi r5, r5, 32 /* d = d + 32 (IN DELAY SLOT) */
a_block_done:
addi r4, r0, 4 /* n = 4 */
cmpu r4, r4, r7 /* n = c - n (unsigned) */
blti r4, a_xfer_end /* if n < 0, less than one word to transfer */
a_word_xfer:
andi r4, r7, 0xfffffffc /* n = c & ~3 */
addi r10, r0, 0 /* offset = 0 */
andi r9, r6, 3 /* t1 = s & 3 */
/* if temp != 0, unaligned transfers needed */
bnei r9, a_word_unaligned
a_word_aligned:
lw r9, r6, r10 /* t1 = *(s+offset) */
sw r9, r5, r10 /* *(d+offset) = t1 */
addi r4, r4,-4 /* n-- */
bneid r4, a_word_aligned /* loop */
addi r10, r10, 4 /* offset++ (IN DELAY SLOT) */
bri a_word_done
a_word_unaligned:
andi r8, r6, 0xfffffffc /* as = s & ~3 */
lwi r11, r8, 0 /* h = *(as + 0) */
addi r8, r8, 4 /* as = as + 4 */
addi r9, r9, -1
beqi r9, a_word_u1 /* t1 was 1 => 1 byte offset */
addi r9, r9, -1
beqi r9, a_word_u2 /* t1 was 2 => 2 byte offset */
a_word_u3:
bslli r11, r11, 24 /* h = h << 24 */
a_wu3_loop:
lw r12, r8, r10 /* v = *(as + offset) */
bsrli r9, r12, 8 /* t1 = v >> 8 */
or r9, r11, r9 /* t1 = h | t1 */
sw r9, r5, r10 /* *(d + offset) = t1 */
bslli r11, r12, 24 /* h = v << 24 */
addi r4, r4,-4 /* n = n - 4 */
bneid r4, a_wu3_loop /* while (n) loop */
addi r10, r10, 4 /* offset = ofset + 4 (IN DELAY SLOT) */
bri a_word_done
a_word_u1:
bslli r11, r11, 8 /* h = h << 8 */
a_wu1_loop:
lw r12, r8, r10 /* v = *(as + offset) */
bsrli r9, r12, 24 /* t1 = v >> 24 */
or r9, r11, r9 /* t1 = h | t1 */
sw r9, r5, r10 /* *(d + offset) = t1 */
bslli r11, r12, 8 /* h = v << 8 */
addi r4, r4,-4 /* n = n - 4 */
bneid r4, a_wu1_loop /* while (n) loop */
addi r10, r10, 4 /* offset = ofset + 4 (IN DELAY SLOT) */
bri a_word_done
a_word_u2:
bslli r11, r11, 16 /* h = h << 16 */
a_wu2_loop:
lw r12, r8, r10 /* v = *(as + offset) */
bsrli r9, r12, 16 /* t1 = v >> 16 */
or r9, r11, r9 /* t1 = h | t1 */
sw r9, r5, r10 /* *(d + offset) = t1 */
bslli r11, r12, 16 /* h = v << 16 */
addi r4, r4,-4 /* n = n - 4 */
bneid r4, a_wu2_loop /* while (n) loop */
addi r10, r10, 4 /* offset = ofset + 4 (IN DELAY SLOT) */
a_word_done:
add r5, r5, r10 /* d = d + offset */
add r6, r6, r10 /* s = s + offset */
rsub r7, r10, r7 /* c = c - offset */
a_xfer_end:
a_xfer_end_loop:
beqi r7, a_done /* while (c) */
lbui r9, r6, 0 /* t1 = *s */
addi r6, r6, 1 /* s++ */
sbi r9, r5, 0 /* *d = t1 */
addi r7, r7, -1 /* c-- */
brid a_xfer_end_loop /* loop */
addi r5, r5, 1 /* d++ (IN DELAY SLOT) */
a_done:
rtsd r15, 8
nop
.size memcpy, . - memcpy
.end memcpy
/*----------------------------------------------------------------------------*/
.globl memmove
.type memmove, @function
.ent memmove
memmove:
cmpu r4, r5, r6 /* n = s - d */
bgei r4,fast_memcpy_ascending
fast_memcpy_descending:
/* move d to return register as value of function */
addi r3, r5, 0
add r5, r5, r7 /* d = d + c */
add r6, r6, r7 /* s = s + c */
addi r4, r0, 4 /* n = 4 */
cmpu r4, r4, r7 /* n = c - n (unsigned) */
blti r4,d_xfer_end /* if n < 0, less than one word to transfer */
/* transfer first 0~3 bytes to get aligned dest address */
andi r4, r5, 3 /* n = d & 3 */
/* if zero, destination already aligned */
beqi r4,d_dalign_done
rsub r7, r4, r7 /* c = c - n adjust c */
d_xfer_first_loop:
/* if no bytes left to transfer, transfer the bulk */
beqi r4,d_dalign_done
addi r6, r6, -1 /* s-- */
addi r5, r5, -1 /* d-- */
lbui r11, r6, 0 /* h = *s */
sbi r11, r5, 0 /* *d = h */
brid d_xfer_first_loop /* loop */
addi r4, r4, -1 /* n-- (IN DELAY SLOT) */
d_dalign_done:
addi r4, r0, 32 /* n = 32 */
cmpu r4, r4, r7 /* n = c - n (unsigned) */
/* if n < 0, less than one block to transfer */
blti r4, d_block_done
d_block_xfer:
andi r4, r7, 0xffffffe0 /* n = c & ~31 */
rsub r7, r4, r7 /* c = c - n */
andi r9, r6, 3 /* t1 = s & 3 */
/* if temp != 0, unaligned transfers needed */
bnei r9, d_block_unaligned
d_block_aligned:
addi r6, r6, -32 /* s = s - 32 */
addi r5, r5, -32 /* d = d - 32 */
lwi r9, r6, 28 /* t1 = *(s + 28) */
lwi r10, r6, 24 /* t2 = *(s + 24) */
lwi r11, r6, 20 /* t3 = *(s + 20) */
lwi r12, r6, 16 /* t4 = *(s + 16) */
swi r9, r5, 28 /* *(d + 28) = t1 */
swi r10, r5, 24 /* *(d + 24) = t2 */
swi r11, r5, 20 /* *(d + 20) = t3 */
swi r12, r5, 16 /* *(d + 16) = t4 */
lwi r9, r6, 12 /* t1 = *(s + 12) */
lwi r10, r6, 8 /* t2 = *(s + 8) */
lwi r11, r6, 4 /* t3 = *(s + 4) */
lwi r12, r6, 0 /* t4 = *(s + 0) */
swi r9, r5, 12 /* *(d + 12) = t1 */
swi r10, r5, 8 /* *(d + 8) = t2 */
swi r11, r5, 4 /* *(d + 4) = t3 */
addi r4, r4, -32 /* n = n - 32 */
bneid r4, d_block_aligned /* while (n) loop */
swi r12, r5, 0 /* *(d + 0) = t4 (IN DELAY SLOT) */
bri d_block_done
d_block_unaligned:
andi r8, r6, 0xfffffffc /* as = s & ~3 */
rsub r6, r4, r6 /* s = s - n */
lwi r11, r8, 0 /* h = *(as + 0) */
addi r9, r9, -1
beqi r9,d_block_u1 /* t1 was 1 => 1 byte offset */
addi r9, r9, -1
beqi r9,d_block_u2 /* t1 was 2 => 2 byte offset */
d_block_u3:
bsrli r11, r11, 8 /* h = h >> 8 */
d_bu3_loop:
addi r8, r8, -32 /* as = as - 32 */
addi r5, r5, -32 /* d = d - 32 */
lwi r12, r8, 28 /* v = *(as + 28) */
bslli r9, r12, 24 /* t1 = v << 24 */
or r9, r11, r9 /* t1 = h | t1 */
swi r9, r5, 28 /* *(d + 28) = t1 */
bsrli r11, r12, 8 /* h = v >> 8 */
lwi r12, r8, 24 /* v = *(as + 24) */
bslli r9, r12, 24 /* t1 = v << 24 */
or r9, r11, r9 /* t1 = h | t1 */
swi r9, r5, 24 /* *(d + 24) = t1 */
bsrli r11, r12, 8 /* h = v >> 8 */
lwi r12, r8, 20 /* v = *(as + 20) */
bslli r9, r12, 24 /* t1 = v << 24 */
or r9, r11, r9 /* t1 = h | t1 */
swi r9, r5, 20 /* *(d + 20) = t1 */
bsrli r11, r12, 8 /* h = v >> 8 */
lwi r12, r8, 16 /* v = *(as + 16) */
bslli r9, r12, 24 /* t1 = v << 24 */
or r9, r11, r9 /* t1 = h | t1 */
swi r9, r5, 16 /* *(d + 16) = t1 */
bsrli r11, r12, 8 /* h = v >> 8 */
lwi r12, r8, 12 /* v = *(as + 12) */
bslli r9, r12, 24 /* t1 = v << 24 */
or r9, r11, r9 /* t1 = h | t1 */
swi r9, r5, 12 /* *(d + 112) = t1 */
bsrli r11, r12, 8 /* h = v >> 8 */
lwi r12, r8, 8 /* v = *(as + 8) */
bslli r9, r12, 24 /* t1 = v << 24 */
or r9, r11, r9 /* t1 = h | t1 */
swi r9, r5, 8 /* *(d + 8) = t1 */
bsrli r11, r12, 8 /* h = v >> 8 */
lwi r12, r8, 4 /* v = *(as + 4) */
bslli r9, r12, 24 /* t1 = v << 24 */
or r9, r11, r9 /* t1 = h | t1 */
swi r9, r5, 4 /* *(d + 4) = t1 */
bsrli r11, r12, 8 /* h = v >> 8 */
lwi r12, r8, 0 /* v = *(as + 0) */
bslli r9, r12, 24 /* t1 = v << 24 */
or r9, r11, r9 /* t1 = h | t1 */
swi r9, r5, 0 /* *(d + 0) = t1 */
addi r4, r4, -32 /* n = n - 32 */
bneid r4, d_bu3_loop /* while (n) loop */
bsrli r11, r12, 8 /* h = v >> 8 (IN DELAY SLOT) */
bri d_block_done
d_block_u1:
bsrli r11, r11, 24 /* h = h >> 24 */
d_bu1_loop:
addi r8, r8, -32 /* as = as - 32 */
addi r5, r5, -32 /* d = d - 32 */
lwi r12, r8, 28 /* v = *(as + 28) */
bslli r9, r12, 8 /* t1 = v << 8 */
or r9, r11, r9 /* t1 = h | t1 */
swi r9, r5, 28 /* *(d + 28) = t1 */
bsrli r11, r12, 24 /* h = v >> 24 */
lwi r12, r8, 24 /* v = *(as + 24) */
bslli r9, r12, 8 /* t1 = v << 8 */
or r9, r11, r9 /* t1 = h | t1 */
swi r9, r5, 24 /* *(d + 24) = t1 */
bsrli r11, r12, 24 /* h = v >> 24 */
lwi r12, r8, 20 /* v = *(as + 20) */
bslli r9, r12, 8 /* t1 = v << 8 */
or r9, r11, r9 /* t1 = h | t1 */
swi r9, r5, 20 /* *(d + 20) = t1 */
bsrli r11, r12, 24 /* h = v >> 24 */
lwi r12, r8, 16 /* v = *(as + 16) */
bslli r9, r12, 8 /* t1 = v << 8 */
or r9, r11, r9 /* t1 = h | t1 */
swi r9, r5, 16 /* *(d + 16) = t1 */
bsrli r11, r12, 24 /* h = v >> 24 */
lwi r12, r8, 12 /* v = *(as + 12) */
bslli r9, r12, 8 /* t1 = v << 8 */
or r9, r11, r9 /* t1 = h | t1 */
swi r9, r5, 12 /* *(d + 112) = t1 */
bsrli r11, r12, 24 /* h = v >> 24 */
lwi r12, r8, 8 /* v = *(as + 8) */
bslli r9, r12, 8 /* t1 = v << 8 */
or r9, r11, r9 /* t1 = h | t1 */
swi r9, r5, 8 /* *(d + 8) = t1 */
bsrli r11, r12, 24 /* h = v >> 24 */
lwi r12, r8, 4 /* v = *(as + 4) */
bslli r9, r12, 8 /* t1 = v << 8 */
or r9, r11, r9 /* t1 = h | t1 */
swi r9, r5, 4 /* *(d + 4) = t1 */
bsrli r11, r12, 24 /* h = v >> 24 */
lwi r12, r8, 0 /* v = *(as + 0) */
bslli r9, r12, 8 /* t1 = v << 8 */
or r9, r11, r9 /* t1 = h | t1 */
swi r9, r5, 0 /* *(d + 0) = t1 */
addi r4, r4, -32 /* n = n - 32 */
bneid r4, d_bu1_loop /* while (n) loop */
bsrli r11, r12, 24 /* h = v >> 24 (IN DELAY SLOT) */
bri d_block_done
d_block_u2:
bsrli r11, r11, 16 /* h = h >> 16 */
d_bu2_loop:
addi r8, r8, -32 /* as = as - 32 */
addi r5, r5, -32 /* d = d - 32 */
lwi r12, r8, 28 /* v = *(as + 28) */
bslli r9, r12, 16 /* t1 = v << 16 */
or r9, r11, r9 /* t1 = h | t1 */
swi r9, r5, 28 /* *(d + 28) = t1 */
bsrli r11, r12, 16 /* h = v >> 16 */
lwi r12, r8, 24 /* v = *(as + 24) */
bslli r9, r12, 16 /* t1 = v << 16 */
or r9, r11, r9 /* t1 = h | t1 */
swi r9, r5, 24 /* *(d + 24) = t1 */
bsrli r11, r12, 16 /* h = v >> 16 */
lwi r12, r8, 20 /* v = *(as + 20) */
bslli r9, r12, 16 /* t1 = v << 16 */
or r9, r11, r9 /* t1 = h | t1 */
swi r9, r5, 20 /* *(d + 20) = t1 */
bsrli r11, r12, 16 /* h = v >> 16 */
lwi r12, r8, 16 /* v = *(as + 16) */
bslli r9, r12, 16 /* t1 = v << 16 */
or r9, r11, r9 /* t1 = h | t1 */
swi r9, r5, 16 /* *(d + 16) = t1 */
bsrli r11, r12, 16 /* h = v >> 16 */
lwi r12, r8, 12 /* v = *(as + 12) */
bslli r9, r12, 16 /* t1 = v << 16 */
or r9, r11, r9 /* t1 = h | t1 */
swi r9, r5, 12 /* *(d + 112) = t1 */
bsrli r11, r12, 16 /* h = v >> 16 */
lwi r12, r8, 8 /* v = *(as + 8) */
bslli r9, r12, 16 /* t1 = v << 16 */
or r9, r11, r9 /* t1 = h | t1 */
swi r9, r5, 8 /* *(d + 8) = t1 */
bsrli r11, r12, 16 /* h = v >> 16 */
lwi r12, r8, 4 /* v = *(as + 4) */
bslli r9, r12, 16 /* t1 = v << 16 */
or r9, r11, r9 /* t1 = h | t1 */
swi r9, r5, 4 /* *(d + 4) = t1 */
bsrli r11, r12, 16 /* h = v >> 16 */
lwi r12, r8, 0 /* v = *(as + 0) */
bslli r9, r12, 16 /* t1 = v << 16 */
or r9, r11, r9 /* t1 = h | t1 */
swi r9, r5, 0 /* *(d + 0) = t1 */
addi r4, r4, -32 /* n = n - 32 */
bneid r4, d_bu2_loop /* while (n) loop */
bsrli r11, r12, 16 /* h = v >> 16 (IN DELAY SLOT) */
d_block_done:
addi r4, r0, 4 /* n = 4 */
cmpu r4, r4, r7 /* n = c - n (unsigned) */
blti r4,d_xfer_end /* if n < 0, less than one word to transfer */
d_word_xfer:
andi r4, r7, 0xfffffffc /* n = c & ~3 */
rsub r5, r4, r5 /* d = d - n */
rsub r6, r4, r6 /* s = s - n */
rsub r7, r4, r7 /* c = c - n */
andi r9, r6, 3 /* t1 = s & 3 */
/* if temp != 0, unaligned transfers needed */
bnei r9, d_word_unaligned
d_word_aligned:
addi r4, r4,-4 /* n-- */
lw r9, r6, r4 /* t1 = *(s+n) */
bneid r4, d_word_aligned /* loop */
sw r9, r5, r4 /* *(d+n) = t1 (IN DELAY SLOT) */
bri d_word_done
d_word_unaligned:
andi r8, r6, 0xfffffffc /* as = s & ~3 */
lw r11, r8, r4 /* h = *(as + n) */
addi r9, r9, -1
beqi r9,d_word_u1 /* t1 was 1 => 1 byte offset */
addi r9, r9, -1
beqi r9,d_word_u2 /* t1 was 2 => 2 byte offset */
d_word_u3:
bsrli r11, r11, 8 /* h = h >> 8 */
d_wu3_loop:
addi r4, r4,-4 /* n = n - 4 */
lw r12, r8, r4 /* v = *(as + n) */
bslli r9, r12, 24 /* t1 = v << 24 */
or r9, r11, r9 /* t1 = h | t1 */
sw r9, r5, r4 /* *(d + n) = t1 */
bneid r4, d_wu3_loop /* while (n) loop */
bsrli r11, r12, 8 /* h = v >> 8 (IN DELAY SLOT) */
bri d_word_done
d_word_u1:
bsrli r11, r11, 24 /* h = h >> 24 */
d_wu1_loop:
addi r4, r4,-4 /* n = n - 4 */
lw r12, r8, r4 /* v = *(as + n) */
bslli r9, r12, 8 /* t1 = v << 8 */
or r9, r11, r9 /* t1 = h | t1 */
sw r9, r5, r4 /* *(d + n) = t1 */
bneid r4, d_wu1_loop /* while (n) loop */
bsrli r11, r12, 24 /* h = v >> 24 (IN DELAY SLOT) */
bri d_word_done
d_word_u2:
bsrli r11, r11, 16 /* h = h >> 16 */
d_wu2_loop:
addi r4, r4,-4 /* n = n - 4 */
lw r12, r8, r4 /* v = *(as + n) */
bslli r9, r12, 16 /* t1 = v << 16 */
or r9, r11, r9 /* t1 = h | t1 */
sw r9, r5, r4 /* *(d + n) = t1 */
bneid r4, d_wu2_loop /* while (n) loop */
bsrli r11, r12, 16 /* h = v >> 16 (IN DELAY SLOT) */
d_word_done:
d_xfer_end:
d_xfer_end_loop:
beqi r7, a_done /* while (c) */
addi r6, r6, -1 /* s-- */
lbui r9, r6, 0 /* t1 = *s */
addi r5, r5, -1 /* d-- */
sbi r9, r5, 0 /* *d = t1 */
brid d_xfer_end_loop /* loop */
addi r7, r7, -1 /* c-- (IN DELAY SLOT) */
d_done:
rtsd r15, 8
nop
.size memmove, . - memmove
.end memmove
|
AirFortressIlikara/LS2K0300-linux-4.19
| 5,281
|
arch/microblaze/lib/uaccess_old.S
|
/*
* Copyright (C) 2009 Michal Simek <monstr@monstr.eu>
* Copyright (C) 2009 PetaLogix
* Copyright (C) 2007 LynuxWorks, Inc.
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*/
#include <linux/errno.h>
#include <linux/linkage.h>
#include <asm/page.h>
/*
* int __strncpy_user(char *to, char *from, int len);
*
* Returns:
* -EFAULT for an exception
* len if we hit the buffer limit
* bytes copied
*/
.text
.globl __strncpy_user;
.type __strncpy_user, @function
.align 4;
__strncpy_user:
/*
* r5 - to
* r6 - from
* r7 - len
* r3 - temp count
* r4 - temp val
*/
beqid r7,3f
addik r3,r7,0 /* temp_count = len */
1:
lbu r4,r6,r0
beqid r4,2f
sb r4,r5,r0
addik r5,r5,1
addik r6,r6,1 /* delay slot */
addik r3,r3,-1
bnei r3,1b /* break on len */
2:
rsubk r3,r3,r7 /* temp_count = len - temp_count */
3:
rtsd r15,8
nop
.size __strncpy_user, . - __strncpy_user
.section .fixup, "ax"
.align 2
4:
brid 3b
addik r3,r0, -EFAULT
.section __ex_table, "a"
.word 1b,4b
/*
* int __strnlen_user(char __user *str, int maxlen);
*
* Returns:
* 0 on error
* maxlen + 1 if no NUL byte found within maxlen bytes
* size of the string (including NUL byte)
*/
.text
.globl __strnlen_user;
.type __strnlen_user, @function
.align 4;
__strnlen_user:
beqid r6,3f
addik r3,r6,0
1:
lbu r4,r5,r0
beqid r4,2f /* break on NUL */
addik r3,r3,-1 /* delay slot */
bneid r3,1b
addik r5,r5,1 /* delay slot */
addik r3,r3,-1 /* for break on len */
2:
rsubk r3,r3,r6
3:
rtsd r15,8
nop
.size __strnlen_user, . - __strnlen_user
.section .fixup,"ax"
4:
brid 3b
addk r3,r0,r0
.section __ex_table,"a"
.word 1b,4b
/* Loop unrolling for __copy_tofrom_user */
#define COPY(offset) \
1: lwi r4 , r6, 0x0000 + offset; \
2: lwi r19, r6, 0x0004 + offset; \
3: lwi r20, r6, 0x0008 + offset; \
4: lwi r21, r6, 0x000C + offset; \
5: lwi r22, r6, 0x0010 + offset; \
6: lwi r23, r6, 0x0014 + offset; \
7: lwi r24, r6, 0x0018 + offset; \
8: lwi r25, r6, 0x001C + offset; \
9: swi r4 , r5, 0x0000 + offset; \
10: swi r19, r5, 0x0004 + offset; \
11: swi r20, r5, 0x0008 + offset; \
12: swi r21, r5, 0x000C + offset; \
13: swi r22, r5, 0x0010 + offset; \
14: swi r23, r5, 0x0014 + offset; \
15: swi r24, r5, 0x0018 + offset; \
16: swi r25, r5, 0x001C + offset; \
.section __ex_table,"a"; \
.word 1b, 33f; \
.word 2b, 33f; \
.word 3b, 33f; \
.word 4b, 33f; \
.word 5b, 33f; \
.word 6b, 33f; \
.word 7b, 33f; \
.word 8b, 33f; \
.word 9b, 33f; \
.word 10b, 33f; \
.word 11b, 33f; \
.word 12b, 33f; \
.word 13b, 33f; \
.word 14b, 33f; \
.word 15b, 33f; \
.word 16b, 33f; \
.text
#define COPY_80(offset) \
COPY(0x00 + offset);\
COPY(0x20 + offset);\
COPY(0x40 + offset);\
COPY(0x60 + offset);
/*
* int __copy_tofrom_user(char *to, char *from, int len)
* Return:
* 0 on success
* number of not copied bytes on error
*/
.text
.globl __copy_tofrom_user;
.type __copy_tofrom_user, @function
.align 4;
__copy_tofrom_user:
/*
* r5 - to
* r6 - from
* r7, r3 - count
* r4 - tempval
*/
beqid r7, 0f /* zero size is not likely */
or r3, r5, r6 /* find if is any to/from unaligned */
or r3, r3, r7 /* find if count is unaligned */
andi r3, r3, 0x3 /* mask last 3 bits */
bneid r3, bu1 /* if r3 is not zero then byte copying */
or r3, r0, r0
rsubi r3, r7, PAGE_SIZE /* detect PAGE_SIZE */
beqid r3, page;
or r3, r0, r0
w1: lw r4, r6, r3 /* at least one 4 byte copy */
w2: sw r4, r5, r3
addik r7, r7, -4
bneid r7, w1
addik r3, r3, 4
addik r3, r7, 0
rtsd r15, 8
nop
.section __ex_table,"a"
.word w1, 0f;
.word w2, 0f;
.text
.align 4 /* Alignment is important to keep icache happy */
page: /* Create room on stack and save registers for storign values */
addik r1, r1, -40
swi r5, r1, 0
swi r6, r1, 4
swi r7, r1, 8
swi r19, r1, 12
swi r20, r1, 16
swi r21, r1, 20
swi r22, r1, 24
swi r23, r1, 28
swi r24, r1, 32
swi r25, r1, 36
loop: /* r4, r19, r20, r21, r22, r23, r24, r25 are used for storing values */
/* Loop unrolling to get performance boost */
COPY_80(0x000);
COPY_80(0x080);
COPY_80(0x100);
COPY_80(0x180);
/* copy loop */
addik r6, r6, 0x200
addik r7, r7, -0x200
bneid r7, loop
addik r5, r5, 0x200
/* Restore register content */
lwi r5, r1, 0
lwi r6, r1, 4
lwi r7, r1, 8
lwi r19, r1, 12
lwi r20, r1, 16
lwi r21, r1, 20
lwi r22, r1, 24
lwi r23, r1, 28
lwi r24, r1, 32
lwi r25, r1, 36
addik r1, r1, 40
/* return back */
addik r3, r0, 0
rtsd r15, 8
nop
/* Fault case - return temp count */
33:
addik r3, r7, 0
/* Restore register content */
lwi r5, r1, 0
lwi r6, r1, 4
lwi r7, r1, 8
lwi r19, r1, 12
lwi r20, r1, 16
lwi r21, r1, 20
lwi r22, r1, 24
lwi r23, r1, 28
lwi r24, r1, 32
lwi r25, r1, 36
addik r1, r1, 40
/* return back */
rtsd r15, 8
nop
.align 4 /* Alignment is important to keep icache happy */
bu1: lbu r4,r6,r3
bu2: sb r4,r5,r3
addik r7,r7,-1
bneid r7,bu1
addik r3,r3,1 /* delay slot */
0:
addik r3,r7,0
rtsd r15,8
nop
.size __copy_tofrom_user, . - __copy_tofrom_user
.section __ex_table,"a"
.word bu1, 0b;
.word bu2, 0b;
.text
|
AirFortressIlikara/LS2K0300-linux-4.19
| 1,798
|
arch/microblaze/lib/modsi3.S
|
/* SPDX-License-Identifier: GPL-2.0 */
#include <linux/linkage.h>
/*
* modulo operation for 32 bit integers.
* Input : op1 in Reg r5
* op2 in Reg r6
* Output: op1 mod op2 in Reg r3
*/
.text
.globl __modsi3
.type __modsi3, @function
.ent __modsi3
__modsi3:
.frame r1, 0, r15
addik r1, r1, -16
swi r28, r1, 0
swi r29, r1, 4
swi r30, r1, 8
swi r31, r1, 12
beqi r6, div_by_zero /* div_by_zero division error */
beqi r5, result_is_zero /* result is zero */
bgeid r5, r5_pos
/* get the sign of the result [ depends only on the first arg] */
add r28, r5, r0
rsubi r5, r5, 0 /* make r5 positive */
r5_pos:
bgei r6, r6_pos
rsubi r6, r6, 0 /* make r6 positive */
r6_pos:
addik r3, r0, 0 /* clear mod */
addik r30, r0, 0 /* clear div */
addik r29, r0, 32 /* initialize the loop count */
/* first part try to find the first '1' in the r5 */
div1:
add r5, r5, r5 /* left shift logical r5 */
bgeid r5, div1
addik r29, r29, -1
div2:
/* left shift logical r5 get the '1' into the carry */
add r5, r5, r5
addc r3, r3, r3 /* move that bit into the mod register */
rsub r31, r6, r3 /* try to subtract (r30 a r6) */
blti r31, mod_too_small
/* move the r31 to mod since the result was positive */
or r3, r0, r31
addik r30, r30, 1
mod_too_small:
addik r29, r29, -1
beqi r29, loop_end
add r30, r30, r30 /* shift in the '1' into div */
bri div2 /* div2 */
loop_end:
bgei r28, return_here
brid return_here
rsubi r3, r3, 0 /* negate the result */
div_by_zero:
result_is_zero:
or r3, r0, r0 /* set result to 0 [both mod as well as div are 0] */
return_here:
/* restore values of csrs and that of r3 and the divisor and the dividend */
lwi r28, r1, 0
lwi r29, r1, 4
lwi r30, r1, 8
lwi r31, r1, 12
rtsd r15, 8
addik r1, r1, 16
.size __modsi3, . - __modsi3
.end __modsi3
|
AirFortressIlikara/LS2K0300-linux-4.19
| 6,182
|
arch/hexagon/kernel/head.S
|
/*
* Early kernel startup code for Hexagon
*
* Copyright (c) 2010-2013, The Linux Foundation. All rights reserved.
*
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301, USA.
*/
#include <linux/linkage.h>
#include <linux/init.h>
#include <asm/asm-offsets.h>
#include <asm/mem-layout.h>
#include <asm/vm_mmu.h>
#include <asm/page.h>
#include <asm/hexagon_vm.h>
#define SEGTABLE_ENTRIES #0x0e0
__INIT
ENTRY(stext)
/*
* VMM will already have set up true vector page, MMU, etc.
* To set up initial kernel identity map, we have to pass
* the VMM a pointer to some canonical page tables. In
* this implementation, we're assuming that we've got
* them precompiled. Generate value in R24, as we'll need
* it again shortly.
*/
r24.L = #LO(swapper_pg_dir)
r24.H = #HI(swapper_pg_dir)
/*
* Symbol is kernel segment address, but we need
* the logical/physical address.
*/
r25 = pc;
r2.h = #0xffc0;
r2.l = #0x0000;
r25 = and(r2,r25); /* R25 holds PHYS_OFFSET now */
r1.h = #HI(PAGE_OFFSET);
r1.l = #LO(PAGE_OFFSET);
r24 = sub(r24,r1); /* swapper_pg_dir - PAGE_OFFSET */
r24 = add(r24,r25); /* + PHYS_OFFSET */
r0 = r24; /* aka __pa(swapper_pg_dir) */
/*
* Initialize page dir to make the virtual and physical
* addresses where the kernel was loaded be identical.
* Done in 4MB chunks.
*/
#define PTE_BITS ( __HVM_PTE_R | __HVM_PTE_W | __HVM_PTE_X \
| __HEXAGON_C_WB_L2 << 6 \
| __HVM_PDE_S_4MB)
/*
* Get number of VA=PA entries; only really needed for jump
* to hyperspace; gets blown away immediately after
*/
{
r1.l = #LO(_end);
r2.l = #LO(stext);
r3 = #1;
}
{
r1.h = #HI(_end);
r2.h = #HI(stext);
r3 = asl(r3, #22);
}
{
r1 = sub(r1, r2);
r3 = add(r3, #-1);
} /* r1 = _end - stext */
r1 = add(r1, r3); /* + (4M-1) */
r26 = lsr(r1, #22); /* / 4M = # of entries */
r1 = r25;
r2.h = #0xffc0;
r2.l = #0x0000; /* round back down to 4MB boundary */
r1 = and(r1,r2);
r2 = lsr(r1, #22) /* 4MB page number */
r2 = asl(r2, #2) /* times sizeof(PTE) (4bytes) */
r0 = add(r0,r2) /* r0 = address of correct PTE */
r2 = #PTE_BITS
r1 = add(r1,r2) /* r1 = 4MB PTE for the first entry */
r2.h = #0x0040
r2.l = #0x0000 /* 4MB increments */
loop0(1f,r26);
1:
memw(r0 ++ #4) = r1
{ r1 = add(r1, r2); } :endloop0
/* Also need to overwrite the initial 0xc0000000 entries */
/* PAGE_OFFSET >> (4MB shift - 4 bytes per entry shift) */
R1.H = #HI(PAGE_OFFSET >> (22 - 2))
R1.L = #LO(PAGE_OFFSET >> (22 - 2))
r0 = add(r1, r24); /* advance to 0xc0000000 entry */
r1 = r25;
r2.h = #0xffc0;
r2.l = #0x0000; /* round back down to 4MB boundary */
r1 = and(r1,r2); /* for huge page */
r2 = #PTE_BITS
r1 = add(r1,r2);
r2.h = #0x0040
r2.l = #0x0000 /* 4MB increments */
loop0(1f,SEGTABLE_ENTRIES);
1:
memw(r0 ++ #4) = r1;
{ r1 = add(r1,r2); } :endloop0
r0 = r24;
/*
* The subroutine wrapper around the virtual instruction touches
* no memory, so we should be able to use it even here.
* Note that in this version, R1 and R2 get "clobbered"; see
* vm_ops.S
*/
r1 = #VM_TRANS_TYPE_TABLE
call __vmnewmap;
/* Jump into virtual address range. */
r31.h = #hi(__head_s_vaddr_target)
r31.l = #lo(__head_s_vaddr_target)
jumpr r31
/* Insert trippy space effects. */
__head_s_vaddr_target:
/*
* Tear down VA=PA translation now that we are running
* in kernel virtual space.
*/
r0 = #__HVM_PDE_S_INVALID
r1.h = #0xffc0;
r1.l = #0x0000;
r2 = r25; /* phys_offset */
r2 = and(r1,r2);
r1.l = #lo(swapper_pg_dir)
r1.h = #hi(swapper_pg_dir)
r2 = lsr(r2, #22) /* 4MB page number */
r2 = asl(r2, #2) /* times sizeof(PTE) (4bytes) */
r1 = add(r1,r2);
loop0(1f,r26)
1:
{
memw(R1 ++ #4) = R0
}:endloop0
r0 = r24
r1 = #VM_TRANS_TYPE_TABLE
call __vmnewmap
/* Go ahead and install the trap0 return so angel calls work */
r0.h = #hi(_K_provisional_vec)
r0.l = #lo(_K_provisional_vec)
call __vmsetvec
/*
* OK, at this point we should start to be much more careful,
* we're going to enter C code and start touching memory
* in all sorts of places.
* This means:
* SGP needs to be OK
* Need to lock shared resources
* A bunch of other things that will cause
* all kinds of painful bugs
*/
/*
* Stack pointer should be pointed at the init task's
* thread stack, which should have been declared in arch/init_task.c.
* So uhhhhh...
* It's accessible via the init_thread_union, which is a union
* of a thread_info struct and a stack; of course, the top
* of the stack is not for you. The end of the stack
* is simply init_thread_union + THREAD_SIZE.
*/
{r29.H = #HI(init_thread_union); r0.H = #HI(_THREAD_SIZE); }
{r29.L = #LO(init_thread_union); r0.L = #LO(_THREAD_SIZE); }
/* initialize the register used to point to current_thread_info */
/* Fixme: THREADINFO_REG can't be R2 because of that memset thing. */
{r29 = add(r29,r0); THREADINFO_REG = r29; }
/* Hack: zero bss; */
{ r0.L = #LO(__bss_start); r1 = #0; r2.l = #LO(__bss_stop); }
{ r0.H = #HI(__bss_start); r2.h = #HI(__bss_stop); }
r2 = sub(r2,r0);
call memset;
/* Set PHYS_OFFSET; should be in R25 */
#ifdef CONFIG_HEXAGON_PHYS_OFFSET
r0.l = #LO(__phys_offset);
r0.h = #HI(__phys_offset);
memw(r0) = r25;
#endif
/* Time to make the doughnuts. */
call start_kernel
/*
* Should not reach here.
*/
1:
jump 1b
.p2align PAGE_SHIFT
ENTRY(external_cmdline_buffer)
.fill _PAGE_SIZE,1,0
.data
.p2align PAGE_SHIFT
ENTRY(empty_zero_page)
.fill _PAGE_SIZE,1,0
|
AirFortressIlikara/LS2K0300-linux-4.19
| 10,823
|
arch/hexagon/kernel/vm_entry.S
|
/*
* Event entry/exit for Hexagon
*
* Copyright (c) 2010-2013, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301, USA.
*/
#include <asm/asm-offsets.h> /* assembly-safer versions of C defines */
#include <asm/mem-layout.h> /* sigh, except for page_offset */
#include <asm/hexagon_vm.h>
#include <asm/thread_info.h>
/*
* Entry into guest-mode Linux under Hexagon Virtual Machine.
* Stack pointer points to event record - build pt_regs on top of it,
* set up a plausible C stack frame, and dispatch to the C handler.
* On return, do vmrte virtual instruction with SP where we started.
*
* VM Spec 0.5 uses a trap to fetch HVM record now.
*/
/*
* Save full register state, while setting up thread_info struct
* pointer derived from kernel stack pointer in THREADINFO_REG
* register, putting prior thread_info.regs pointer in a callee-save
* register (R24, which had better not ever be assigned to THREADINFO_REG),
* and updating thread_info.regs to point to current stack frame,
* so as to support nested events in kernel mode.
*
* As this is common code, we set the pt_regs system call number
* to -1 for all events. It will be replaced with the system call
* number in the case where we decode a system call (trap0(#1)).
*/
#if CONFIG_HEXAGON_ARCH_VERSION < 4
#define save_pt_regs()\
memd(R0 + #_PT_R3130) = R31:30; \
{ memw(R0 + #_PT_R2928) = R28; \
R31 = memw(R0 + #_PT_ER_VMPSP); }\
{ memw(R0 + #(_PT_R2928 + 4)) = R31; \
R31 = ugp; } \
{ memd(R0 + #_PT_R2726) = R27:26; \
R30 = gp ; } \
memd(R0 + #_PT_R2524) = R25:24; \
memd(R0 + #_PT_R2322) = R23:22; \
memd(R0 + #_PT_R2120) = R21:20; \
memd(R0 + #_PT_R1918) = R19:18; \
memd(R0 + #_PT_R1716) = R17:16; \
memd(R0 + #_PT_R1514) = R15:14; \
memd(R0 + #_PT_R1312) = R13:12; \
{ memd(R0 + #_PT_R1110) = R11:10; \
R15 = lc0; } \
{ memd(R0 + #_PT_R0908) = R9:8; \
R14 = sa0; } \
{ memd(R0 + #_PT_R0706) = R7:6; \
R13 = lc1; } \
{ memd(R0 + #_PT_R0504) = R5:4; \
R12 = sa1; } \
{ memd(R0 + #_PT_GPUGP) = R31:30; \
R11 = m1; \
R2.H = #HI(_THREAD_SIZE); } \
{ memd(R0 + #_PT_LC0SA0) = R15:14; \
R10 = m0; \
R2.L = #LO(_THREAD_SIZE); } \
{ memd(R0 + #_PT_LC1SA1) = R13:12; \
R15 = p3:0; \
R2 = neg(R2); } \
{ memd(R0 + #_PT_M1M0) = R11:10; \
R14 = usr; \
R2 = and(R0,R2); } \
{ memd(R0 + #_PT_PREDSUSR) = R15:14; \
THREADINFO_REG = R2; } \
{ r24 = memw(THREADINFO_REG + #_THREAD_INFO_PT_REGS); \
memw(THREADINFO_REG + #_THREAD_INFO_PT_REGS) = R0; \
R2 = #-1; } \
{ memw(R0 + #_PT_SYSCALL_NR) = R2; \
R30 = #0; }
#else
/* V4+ */
/* the # ## # syntax inserts a literal ## */
#define save_pt_regs()\
{ memd(R0 + #_PT_R3130) = R31:30; \
R30 = memw(R0 + #_PT_ER_VMPSP); }\
{ memw(R0 + #_PT_R2928) = R28; \
memw(R0 + #(_PT_R2928 + 4)) = R30; }\
{ R31:30 = C11:10; \
memd(R0 + #_PT_R2726) = R27:26; \
memd(R0 + #_PT_R2524) = R25:24; }\
{ memd(R0 + #_PT_R2322) = R23:22; \
memd(R0 + #_PT_R2120) = R21:20; }\
{ memd(R0 + #_PT_R1918) = R19:18; \
memd(R0 + #_PT_R1716) = R17:16; }\
{ memd(R0 + #_PT_R1514) = R15:14; \
memd(R0 + #_PT_R1312) = R13:12; \
R17:16 = C13:12; }\
{ memd(R0 + #_PT_R1110) = R11:10; \
memd(R0 + #_PT_R0908) = R9:8; \
R15:14 = C1:0; } \
{ memd(R0 + #_PT_R0706) = R7:6; \
memd(R0 + #_PT_R0504) = R5:4; \
R13:12 = C3:2; } \
{ memd(R0 + #_PT_GPUGP) = R31:30; \
memd(R0 + #_PT_LC0SA0) = R15:14; \
R11:10 = C7:6; }\
{ THREADINFO_REG = and(R0, # ## #-_THREAD_SIZE); \
memd(R0 + #_PT_LC1SA1) = R13:12; \
R15 = p3:0; }\
{ memd(R0 + #_PT_M1M0) = R11:10; \
memw(R0 + #_PT_PREDSUSR + 4) = R15; }\
{ r24 = memw(THREADINFO_REG + #_THREAD_INFO_PT_REGS); \
memw(THREADINFO_REG + #_THREAD_INFO_PT_REGS) = R0; \
R2 = #-1; } \
{ memw(R0 + #_PT_SYSCALL_NR) = R2; \
memd(R0 + #_PT_CS1CS0) = R17:16; \
R30 = #0; }
#endif
/*
* Restore registers and thread_info.regs state. THREADINFO_REG
* is assumed to still be sane, and R24 to have been correctly
* preserved. Don't restore R29 (SP) until later.
*/
#if CONFIG_HEXAGON_ARCH_VERSION < 4
#define restore_pt_regs() \
{ memw(THREADINFO_REG + #_THREAD_INFO_PT_REGS) = R24; \
R15:14 = memd(R0 + #_PT_PREDSUSR); } \
{ R11:10 = memd(R0 + #_PT_M1M0); \
p3:0 = R15; } \
{ R13:12 = memd(R0 + #_PT_LC1SA1); \
usr = R14; } \
{ R15:14 = memd(R0 + #_PT_LC0SA0); \
m1 = R11; } \
{ R3:2 = memd(R0 + #_PT_R0302); \
m0 = R10; } \
{ R5:4 = memd(R0 + #_PT_R0504); \
lc1 = R13; } \
{ R7:6 = memd(R0 + #_PT_R0706); \
sa1 = R12; } \
{ R9:8 = memd(R0 + #_PT_R0908); \
lc0 = R15; } \
{ R11:10 = memd(R0 + #_PT_R1110); \
sa0 = R14; } \
{ R13:12 = memd(R0 + #_PT_R1312); \
R15:14 = memd(R0 + #_PT_R1514); } \
{ R17:16 = memd(R0 + #_PT_R1716); \
R19:18 = memd(R0 + #_PT_R1918); } \
{ R21:20 = memd(R0 + #_PT_R2120); \
R23:22 = memd(R0 + #_PT_R2322); } \
{ R25:24 = memd(R0 + #_PT_R2524); \
R27:26 = memd(R0 + #_PT_R2726); } \
R31:30 = memd(R0 + #_PT_GPUGP); \
{ R28 = memw(R0 + #_PT_R2928); \
ugp = R31; } \
{ R31:30 = memd(R0 + #_PT_R3130); \
gp = R30; }
#else
/* V4+ */
#define restore_pt_regs() \
{ memw(THREADINFO_REG + #_THREAD_INFO_PT_REGS) = R24; \
R15:14 = memd(R0 + #_PT_PREDSUSR); } \
{ R11:10 = memd(R0 + #_PT_M1M0); \
R13:12 = memd(R0 + #_PT_LC1SA1); \
p3:0 = R15; } \
{ R15:14 = memd(R0 + #_PT_LC0SA0); \
R3:2 = memd(R0 + #_PT_R0302); \
usr = R14; } \
{ R5:4 = memd(R0 + #_PT_R0504); \
R7:6 = memd(R0 + #_PT_R0706); \
C7:6 = R11:10; }\
{ R9:8 = memd(R0 + #_PT_R0908); \
R11:10 = memd(R0 + #_PT_R1110); \
C3:2 = R13:12; }\
{ R13:12 = memd(R0 + #_PT_R1312); \
R15:14 = memd(R0 + #_PT_R1514); \
C1:0 = R15:14; }\
{ R17:16 = memd(R0 + #_PT_R1716); \
R19:18 = memd(R0 + #_PT_R1918); } \
{ R21:20 = memd(R0 + #_PT_R2120); \
R23:22 = memd(R0 + #_PT_R2322); } \
{ R25:24 = memd(R0 + #_PT_R2524); \
R27:26 = memd(R0 + #_PT_R2726); } \
R31:30 = memd(R0 + #_PT_CS1CS0); \
{ C13:12 = R31:30; \
R31:30 = memd(R0 + #_PT_GPUGP) ; \
R28 = memw(R0 + #_PT_R2928); }\
{ C11:10 = R31:30; \
R31:30 = memd(R0 + #_PT_R3130); }
#endif
/*
* Clears off enough space for the rest of pt_regs; evrec is a part
* of pt_regs in HVM mode. Save R0/R1, set handler's address in R1.
* R0 is the address of pt_regs and is the parameter to save_pt_regs.
*/
/*
* Since the HVM isn't automagically pushing the EVREC onto the stack anymore,
* we'll subract the entire size out and then fill it in ourselves.
* Need to save off R0, R1, R2, R3 immediately.
*/
#if CONFIG_HEXAGON_ARCH_VERSION < 4
#define vm_event_entry(CHandler) \
{ \
R29 = add(R29, #-(_PT_REGS_SIZE)); \
memd(R29 + #(_PT_R0100 + -_PT_REGS_SIZE)) = R1:0; \
} \
{ \
memd(R29 +#_PT_R0302) = R3:2; \
} \
trap1(#HVM_TRAP1_VMGETREGS); \
{ \
memd(R29 + #_PT_ER_VMEL) = R1:0; \
R0 = R29; \
R1.L = #LO(CHandler); \
} \
{ \
memd(R29 + #_PT_ER_VMPSP) = R3:2; \
R1.H = #HI(CHandler); \
jump event_dispatch; \
}
#else
/* V4+ */
/* turn on I$ prefetch early */
/* the # ## # syntax inserts a literal ## */
#define vm_event_entry(CHandler) \
{ \
R29 = add(R29, #-(_PT_REGS_SIZE)); \
memd(R29 + #(_PT_R0100 + -_PT_REGS_SIZE)) = R1:0; \
memd(R29 + #(_PT_R0302 + -_PT_REGS_SIZE)) = R3:2; \
R0 = usr; \
} \
{ \
memw(R29 + #_PT_PREDSUSR) = R0; \
R0 = setbit(R0, #16); \
} \
usr = R0; \
R1:0 = G1:0; \
{ \
memd(R29 + #_PT_ER_VMEL) = R1:0; \
R1 = # ## #(CHandler); \
R3:2 = G3:2; \
} \
{ \
R0 = R29; \
memd(R29 + #_PT_ER_VMPSP) = R3:2; \
jump event_dispatch; \
}
#endif
.text
/*
* Do bulk save/restore in one place.
* Adds a jump to dispatch latency, but
* saves hundreds of bytes.
*/
event_dispatch:
save_pt_regs()
callr r1
/*
* Coming back from the C-world, our thread info pointer
* should be in the designated register (usually R19)
*
* If we were in kernel mode, we don't need to check scheduler
* or signals if CONFIG_PREEMPT is not set. If set, then it has
* to jump to a need_resched kind of block.
* BTW, CONFIG_PREEMPT is not supported yet.
*/
#ifdef CONFIG_PREEMPT
R0 = #VM_INT_DISABLE
trap1(#HVM_TRAP1_VMSETIE)
#endif
/* "Nested control path" -- if the previous mode was kernel */
{
R0 = memw(R29 + #_PT_ER_VMEST);
R26.L = #LO(do_work_pending);
}
{
P0 = tstbit(R0, #HVM_VMEST_UM_SFT);
if (!P0.new) jump:nt restore_all;
R26.H = #HI(do_work_pending);
R0 = #VM_INT_DISABLE;
}
/*
* Check also the return from fork/system call, normally coming back from
* user mode
*
* R26 needs to have do_work_pending, and R0 should have VM_INT_DISABLE
*/
check_work_pending:
/* Disable interrupts while checking TIF */
trap1(#HVM_TRAP1_VMSETIE)
{
R0 = R29; /* regs should still be at top of stack */
R1 = memw(THREADINFO_REG + #_THREAD_INFO_FLAGS);
callr R26;
}
{
P0 = cmp.eq(R0, #0); if (!P0.new) jump:nt check_work_pending;
R0 = #VM_INT_DISABLE;
}
restore_all:
/*
* Disable interrupts, if they weren't already, before reg restore.
* R0 gets preloaded with #VM_INT_DISABLE before we get here.
*/
trap1(#HVM_TRAP1_VMSETIE)
/* do the setregs here for VM 0.5 */
/* R29 here should already be pointing at pt_regs */
{
R1:0 = memd(R29 + #_PT_ER_VMEL);
R3:2 = memd(R29 + #_PT_ER_VMPSP);
}
#if CONFIG_HEXAGON_ARCH_VERSION < 4
trap1(#HVM_TRAP1_VMSETREGS);
#else
G1:0 = R1:0;
G3:2 = R3:2;
#endif
R0 = R29
restore_pt_regs()
{
R1:0 = memd(R29 + #_PT_R0100);
R29 = add(R29, #_PT_REGS_SIZE);
}
trap1(#HVM_TRAP1_VMRTE)
/* Notreached */
.globl _K_enter_genex
_K_enter_genex:
vm_event_entry(do_genex)
.globl _K_enter_interrupt
_K_enter_interrupt:
vm_event_entry(arch_do_IRQ)
.globl _K_enter_trap0
_K_enter_trap0:
vm_event_entry(do_trap0)
.globl _K_enter_machcheck
_K_enter_machcheck:
vm_event_entry(do_machcheck)
.globl _K_enter_debug
_K_enter_debug:
vm_event_entry(do_debug_exception)
.globl ret_from_fork
ret_from_fork:
{
call schedule_tail
R26.H = #HI(do_work_pending);
}
{
P0 = cmp.eq(R24, #0);
R26.L = #LO(do_work_pending);
R0 = #VM_INT_DISABLE;
}
if (P0) jump check_work_pending
{
R0 = R25;
callr R24
}
{
jump check_work_pending
R0 = #VM_INT_DISABLE;
}
|
AirFortressIlikara/LS2K0300-linux-4.19
| 12,797
|
arch/hexagon/kernel/vm_init_segtable.S
|
/*
* Initial page table for Linux kernel under Hexagon VM,
*
* Copyright (c) 2010-2011, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301, USA.
*/
/*
* These tables are pre-computed and linked into kernel.
*/
#include <asm/vm_mmu.h>
/* #include <asm/iomap.h> */
/*
* Start with mapping PA=0 to both VA=0x0 and VA=0xc000000 as 16MB large pages.
* No user mode access, RWX, write-back cache. The entry needs
* to be replicated for all 4 virtual segments mapping to the page.
*/
/* "Big Kernel Page" */
#define BKP(pa) (((pa) & __HVM_PTE_PGMASK_4MB) \
| __HVM_PTE_R | __HVM_PTE_W | __HVM_PTE_X \
| __HEXAGON_C_WB_L2 << 6 \
| __HVM_PDE_S_16MB)
/* No cache version */
#define BKPG_IO(pa) (((pa) & __HVM_PTE_PGMASK_16MB) \
| __HVM_PTE_R | __HVM_PTE_W | __HVM_PTE_X \
| __HVM_PDE_S_16MB | __HEXAGON_C_DEV << 6 )
#define FOURK_IO(pa) (((pa) & __HVM_PTE_PGMASK_4KB) \
| __HVM_PTE_R | __HVM_PTE_W | __HVM_PTE_X \
| __HEXAGON_C_DEV << 6 )
#define L2_PTR(pa) (((pa) & __HVM_PTE_PGMASK_4KB) \
| __HVM_PDE_S_4KB )
#define X __HVM_PDE_S_INVALID
.p2align 12
.globl swapper_pg_dir
.globl _K_init_segtable
swapper_pg_dir:
/* VA 0x00000000 */
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X
.word X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X
.word X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X
.word X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X
.word X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X
.word X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X
.word X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X
.word X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X
/* VA 0x40000000 */
.word X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X
.word X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X
.word X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X
.word X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X
.word X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X
.word X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X
.word X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X
.word X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X
/* VA 0x80000000 */
.word X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X
.word X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X
.word X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X
.word X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X
.word X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X
/*0xa8*/.word X,X,X,X
#ifdef CONFIG_COMET_EARLY_UART_DEBUG
UART_PTE_ENTRY:
/*0xa9*/.word BKPG_IO(0xa9000000),BKPG_IO(0xa9000000),BKPG_IO(0xa9000000),BKPG_IO(0xa9000000)
#else
/*0xa9*/.word X,X,X,X
#endif
/*0xaa*/.word X,X,X,X
/*0xab*/.word X,X,X,X
/*0xac*/.word X,X,X,X
/*0xad*/.word X,X,X,X
/*0xae*/.word X,X,X,X
/*0xaf*/.word X,X,X,X
/*0xb0*/.word X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X
.word X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X,X
_K_init_segtable:
/* VA 0xC0000000 */
.word BKP(0x00000000), BKP(0x00400000), BKP(0x00800000), BKP(0x00c00000)
.word BKP(0x01000000), BKP(0x01400000), BKP(0x01800000), BKP(0x01c00000)
.word BKP(0x02000000), BKP(0x02400000), BKP(0x02800000), BKP(0x02c00000)
.word BKP(0x03000000), BKP(0x03400000), BKP(0x03800000), BKP(0x03c00000)
.word BKP(0x04000000), BKP(0x04400000), BKP(0x04800000), BKP(0x04c00000)
.word BKP(0x05000000), BKP(0x05400000), BKP(0x05800000), BKP(0x05c00000)
.word BKP(0x06000000), BKP(0x06400000), BKP(0x06800000), BKP(0x06c00000)
.word BKP(0x07000000), BKP(0x07400000), BKP(0x07800000), BKP(0x07c00000)
.word BKP(0x08000000), BKP(0x08400000), BKP(0x08800000), BKP(0x08c00000)
.word BKP(0x09000000), BKP(0x09400000), BKP(0x09800000), BKP(0x09c00000)
.word BKP(0x0a000000), BKP(0x0a400000), BKP(0x0a800000), BKP(0x0ac00000)
.word BKP(0x0b000000), BKP(0x0b400000), BKP(0x0b800000), BKP(0x0bc00000)
.word BKP(0x0c000000), BKP(0x0c400000), BKP(0x0c800000), BKP(0x0cc00000)
.word BKP(0x0d000000), BKP(0x0d400000), BKP(0x0d800000), BKP(0x0dc00000)
.word BKP(0x0e000000), BKP(0x0e400000), BKP(0x0e800000), BKP(0x0ec00000)
.word BKP(0x0f000000), BKP(0x0f400000), BKP(0x0f800000), BKP(0x0fc00000)
.word BKP(0x10000000), BKP(0x10400000), BKP(0x10800000), BKP(0x10c00000)
.word BKP(0x11000000), BKP(0x11400000), BKP(0x11800000), BKP(0x11c00000)
.word BKP(0x12000000), BKP(0x12400000), BKP(0x12800000), BKP(0x12c00000)
.word BKP(0x13000000), BKP(0x13400000), BKP(0x13800000), BKP(0x13c00000)
.word BKP(0x14000000), BKP(0x14400000), BKP(0x14800000), BKP(0x14c00000)
.word BKP(0x15000000), BKP(0x15400000), BKP(0x15800000), BKP(0x15c00000)
.word BKP(0x16000000), BKP(0x16400000), BKP(0x16800000), BKP(0x16c00000)
.word BKP(0x17000000), BKP(0x17400000), BKP(0x17800000), BKP(0x17c00000)
.word BKP(0x18000000), BKP(0x18400000), BKP(0x18800000), BKP(0x18c00000)
.word BKP(0x19000000), BKP(0x19400000), BKP(0x19800000), BKP(0x19c00000)
.word BKP(0x1a000000), BKP(0x1a400000), BKP(0x1a800000), BKP(0x1ac00000)
.word BKP(0x1b000000), BKP(0x1b400000), BKP(0x1b800000), BKP(0x1bc00000)
.word BKP(0x1c000000), BKP(0x1c400000), BKP(0x1c800000), BKP(0x1cc00000)
.word BKP(0x1d000000), BKP(0x1d400000), BKP(0x1d800000), BKP(0x1dc00000)
.word BKP(0x1e000000), BKP(0x1e400000), BKP(0x1e800000), BKP(0x1ec00000)
.word BKP(0x1f000000), BKP(0x1f400000), BKP(0x1f800000), BKP(0x1fc00000)
.word BKP(0x20000000), BKP(0x20400000), BKP(0x20800000), BKP(0x20c00000)
.word BKP(0x21000000), BKP(0x21400000), BKP(0x21800000), BKP(0x21c00000)
.word BKP(0x22000000), BKP(0x22400000), BKP(0x22800000), BKP(0x22c00000)
.word BKP(0x23000000), BKP(0x23400000), BKP(0x23800000), BKP(0x23c00000)
.word BKP(0x24000000), BKP(0x24400000), BKP(0x24800000), BKP(0x24c00000)
.word BKP(0x25000000), BKP(0x25400000), BKP(0x25800000), BKP(0x25c00000)
.word BKP(0x26000000), BKP(0x26400000), BKP(0x26800000), BKP(0x26c00000)
.word BKP(0x27000000), BKP(0x27400000), BKP(0x27800000), BKP(0x27c00000)
.word BKP(0x28000000), BKP(0x28400000), BKP(0x28800000), BKP(0x28c00000)
.word BKP(0x29000000), BKP(0x29400000), BKP(0x29800000), BKP(0x29c00000)
.word BKP(0x2a000000), BKP(0x2a400000), BKP(0x2a800000), BKP(0x2ac00000)
.word BKP(0x2b000000), BKP(0x2b400000), BKP(0x2b800000), BKP(0x2bc00000)
.word BKP(0x2c000000), BKP(0x2c400000), BKP(0x2c800000), BKP(0x2cc00000)
.word BKP(0x2d000000), BKP(0x2d400000), BKP(0x2d800000), BKP(0x2dc00000)
.word BKP(0x2e000000), BKP(0x2e400000), BKP(0x2e800000), BKP(0x2ec00000)
.word BKP(0x2f000000), BKP(0x2f400000), BKP(0x2f800000), BKP(0x2fc00000)
.word BKP(0x30000000), BKP(0x30400000), BKP(0x30800000), BKP(0x30c00000)
.word BKP(0x31000000), BKP(0x31400000), BKP(0x31800000), BKP(0x31c00000)
.word BKP(0x32000000), BKP(0x32400000), BKP(0x32800000), BKP(0x32c00000)
.word BKP(0x33000000), BKP(0x33400000), BKP(0x33800000), BKP(0x33c00000)
.word BKP(0x34000000), BKP(0x34400000), BKP(0x34800000), BKP(0x34c00000)
.word BKP(0x35000000), BKP(0x35400000), BKP(0x35800000), BKP(0x35c00000)
.word BKP(0x36000000), BKP(0x36400000), BKP(0x36800000), BKP(0x36c00000)
.word BKP(0x37000000), BKP(0x37400000), BKP(0x37800000), BKP(0x37c00000)
.word BKP(0x38000000), BKP(0x38400000), BKP(0x38800000), BKP(0x38c00000)
.word BKP(0x39000000), BKP(0x39400000), BKP(0x39800000), BKP(0x39c00000)
.word BKP(0x3a000000), BKP(0x3a400000), BKP(0x3a800000), BKP(0x3ac00000)
.word BKP(0x3b000000), BKP(0x3b400000), BKP(0x3b800000), BKP(0x3bc00000)
.word BKP(0x3c000000), BKP(0x3c400000), BKP(0x3c800000), BKP(0x3cc00000)
.word BKP(0x3d000000), BKP(0x3d400000), BKP(0x3d800000), BKP(0x3dc00000)
_K_io_map:
.word X,X,X,X /* 0x3e000000 - device IO early remap */
.word X,X,X,X /* 0x3f000000 - hypervisor space*/
#if 0
/*
* This is in here as an example for devices which need to be mapped really
* early.
*/
.p2align 12
.globl _K_io_kmap
.globl _K_init_devicetable
_K_init_devicetable: /* Should be 4MB worth of entries */
.word FOURK_IO(MSM_GPIO1_PHYS),FOURK_IO(MSM_GPIO2_PHYS),FOURK_IO(MSM_SIRC_PHYS),X
.word FOURK_IO(TLMM_GPIO1_PHYS),X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
.word X,X,X,X
#endif
|
AirFortressIlikara/LS2K0300-linux-4.19
| 2,281
|
arch/hexagon/kernel/vm_ops.S
|
/*
* Hexagon VM instruction support
*
* Copyright (c) 2010-2011, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301, USA.
*/
#include <linux/linkage.h>
#include <asm/hexagon_vm.h>
/*
* C wrappers for virtual machine "instructions". These
* could be, and perhaps some day will be, handled as in-line
* macros, but for tracing/debugging it's handy to have
* a single point of invocation for each of them.
* Conveniently, they take parameters and return values
* consistent with the ABI calling convention.
*/
ENTRY(__vmrte)
trap1(#HVM_TRAP1_VMRTE);
jumpr R31;
ENTRY(__vmsetvec)
trap1(#HVM_TRAP1_VMSETVEC);
jumpr R31;
ENTRY(__vmsetie)
trap1(#HVM_TRAP1_VMSETIE);
jumpr R31;
ENTRY(__vmgetie)
trap1(#HVM_TRAP1_VMGETIE);
jumpr R31;
ENTRY(__vmintop)
trap1(#HVM_TRAP1_VMINTOP);
jumpr R31;
ENTRY(__vmclrmap)
trap1(#HVM_TRAP1_VMCLRMAP);
jumpr R31;
ENTRY(__vmnewmap)
r1 = #VM_NEWMAP_TYPE_PGTABLES;
trap1(#HVM_TRAP1_VMNEWMAP);
jumpr R31;
ENTRY(__vmcache)
trap1(#HVM_TRAP1_VMCACHE);
jumpr R31;
ENTRY(__vmgettime)
trap1(#HVM_TRAP1_VMGETTIME);
jumpr R31;
ENTRY(__vmsettime)
trap1(#HVM_TRAP1_VMSETTIME);
jumpr R31;
ENTRY(__vmwait)
trap1(#HVM_TRAP1_VMWAIT);
jumpr R31;
ENTRY(__vmyield)
trap1(#HVM_TRAP1_VMYIELD);
jumpr R31;
ENTRY(__vmstart)
trap1(#HVM_TRAP1_VMSTART);
jumpr R31;
ENTRY(__vmstop)
trap1(#HVM_TRAP1_VMSTOP);
jumpr R31;
ENTRY(__vmvpid)
trap1(#HVM_TRAP1_VMVPID);
jumpr R31;
/* Probably not actually going to use these; see vm_entry.S */
ENTRY(__vmsetregs)
trap1(#HVM_TRAP1_VMSETREGS);
jumpr R31;
ENTRY(__vmgetregs)
trap1(#HVM_TRAP1_VMGETREGS);
jumpr R31;
|
AirFortressIlikara/LS2K0300-linux-4.19
| 1,262
|
arch/hexagon/kernel/vm_vectors.S
|
/*
* Event jump tables
*
* Copyright (c) 2010-2012,2013, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301, USA.
*/
#include <asm/hexagon_vm.h>
.text
/* This is registered early on to allow angel */
.global _K_provisional_vec
_K_provisional_vec:
jump 1f;
jump 1f;
jump 1f;
jump 1f;
jump 1f;
trap1(#HVM_TRAP1_VMRTE)
jump 1f;
jump 1f;
.global _K_VM_event_vector
_K_VM_event_vector:
1:
jump 1b; /* Reset */
jump _K_enter_machcheck;
jump _K_enter_genex;
jump _K_enter_debug;
jump 1b; /* 4 Rsvd */
jump _K_enter_trap0;
jump 1b; /* 6 Rsvd */
jump _K_enter_interrupt;
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.