repo_id
stringlengths 5
115
| size
int64 590
5.01M
| file_path
stringlengths 4
212
| content
stringlengths 590
5.01M
|
|---|---|---|---|
AbsInt/CompCert
| 2,147
|
runtime/arm/i64_udiv.S
|
@ *****************************************************************
@
@ The Compcert verified compiler
@
@ Xavier Leroy, INRIA Paris-Rocquencourt
@
@ Copyright (c) 2013 Institut National de Recherche en Informatique et
@ en Automatique.
@
@ Redistribution and use in source and binary forms, with or without
@ modification, are permitted provided that the following conditions are met:
@ * Redistributions of source code must retain the above copyright
@ notice, this list of conditions and the following disclaimer.
@ * Redistributions in binary form must reproduce the above copyright
@ notice, this list of conditions and the following disclaimer in the
@ documentation and/or other materials provided with the distribution.
@ * Neither the name of the <organization> nor the
@ names of its contributors may be used to endorse or promote products
@ derived from this software without specific prior written permission.
@
@ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
@ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
@ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
@ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT
@ HOLDER> BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
@ EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
@ PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
@ PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
@ LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
@ NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
@ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
@
@ *********************************************************************
@ Helper functions for 64-bit integer arithmetic. ARM version.
#include "sysdeps.h"
@@@ Unsigned division
FUNCTION(__compcert_i64_udiv)
push {r4, r5, r6, r7, r8, lr}
bl __compcert_i64_udivmod
LMOV(Reg0, Reg2)
pop {r4, r5, r6, r7, r8, lr}
bx lr
ENDFUNCTION(__compcert_i64_udiv)
|
AbsInt/CompCert
| 2,098
|
runtime/arm/i64_shl.S
|
@ *****************************************************************
@
@ The Compcert verified compiler
@
@ Xavier Leroy, INRIA Paris-Rocquencourt
@
@ Copyright (c) 2013 Institut National de Recherche en Informatique et
@ en Automatique.
@
@ Redistribution and use in source and binary forms, with or without
@ modification, are permitted provided that the following conditions are met:
@ * Redistributions of source code must retain the above copyright
@ notice, this list of conditions and the following disclaimer.
@ * Redistributions in binary form must reproduce the above copyright
@ notice, this list of conditions and the following disclaimer in the
@ documentation and/or other materials provided with the distribution.
@ * Neither the name of the <organization> nor the
@ names of its contributors may be used to endorse or promote products
@ derived from this software without specific prior written permission.
@
@ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
@ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
@ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
@ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT
@ HOLDER> BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
@ EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
@ PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
@ PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
@ LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
@ NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
@ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
@
@ *********************************************************************
@ Helper functions for 64-bit integer arithmetic. ARM version.
#include "sysdeps.h"
@@@ Shift left
FUNCTION(__compcert_i64_shl)
AND r2, r2, #63 @ normalize amount to 0...63
LSHL(Reg0, Reg0, r2, r3)
bx lr
ENDFUNCTION(__compcert_i64_shl)
|
AbsInt/CompCert
| 3,399
|
runtime/arm/i64_udivmod.S
|
@ *****************************************************************
@
@ The Compcert verified compiler
@
@ Xavier Leroy, INRIA Paris-Rocquencourt
@
@ Copyright (c) 2013 Institut National de Recherche en Informatique et
@ en Automatique.
@
@ Redistribution and use in source and binary forms, with or without
@ modification, are permitted provided that the following conditions are met:
@ * Redistributions of source code must retain the above copyright
@ notice, this list of conditions and the following disclaimer.
@ * Redistributions in binary form must reproduce the above copyright
@ notice, this list of conditions and the following disclaimer in the
@ documentation and/or other materials provided with the distribution.
@ * Neither the name of the <organization> nor the
@ names of its contributors may be used to endorse or promote products
@ derived from this software without specific prior written permission.
@
@ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
@ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
@ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
@ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT
@ HOLDER> BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
@ EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
@ PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
@ PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
@ LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
@ NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
@ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
@
@ *********************************************************************
@ Helper functions for 64-bit integer arithmetic. ARM version.
#include "sysdeps.h"
@@@ Auxiliary function for division and modulus. Don't call from C
@ On entry: N = Reg0 (r0, r1) numerator D = Reg1 (r2, r3) divisor
@ On exit: Q = Reg2 (r4, r5) quotient R = Reg0 (r0, r1) remainder
@ Locals: TMP = Reg3 (r6, r7) temporary
@ COUNT = r8 round counter
FUNCTION(__compcert_i64_udivmod)
orrs r6, Reg1LO, Reg1HI @ is D == 0?
it eq
bxeq lr @ if so, return with unspecified results
MOV Reg2LO, #0 @ Q = 0
MOV Reg2HI, #0
MOV r8, #1 @ round = 1
1: cmp Reg1HI, #0 @ while ((signed) D >= 0)
blt 3f
LSHL1(Reg1, Reg1) @ D = D << 1
LSUBS(Reg3, Reg0, Reg1) @ if N < D
blo 2f @ break and restore D to previous value
ADD r8, r8, #1 @ increment count
b 1b
2: LSHR1(Reg1, Reg1) @ D = D >> 1
3: LSHL1(Reg2, Reg2) @ Q = Q << 1
LSUBS(Reg3, Reg0, Reg1) @ TMP = N - D
blo 4f @ if N < D, leave N and Q unchanged
LMOV(Reg0, Reg3) @ N = N - D
ORR Reg2LO, Reg2LO, #1 @ Q = Q | 1
4: LSHR1(Reg1, Reg1) @ D = D >> 1
subs r8, r8, #1 @ decrement count
bne 3b @ repeat until count = 0
bx lr
ENDFUNCTION(__compcert_i64_udivmod)
|
AbsInt/CompCert
| 2,423
|
runtime/arm/i64_sar.S
|
@ *****************************************************************
@
@ The Compcert verified compiler
@
@ Xavier Leroy, INRIA Paris-Rocquencourt
@
@ Copyright (c) 2013 Institut National de Recherche en Informatique et
@ en Automatique.
@
@ Redistribution and use in source and binary forms, with or without
@ modification, are permitted provided that the following conditions are met:
@ * Redistributions of source code must retain the above copyright
@ notice, this list of conditions and the following disclaimer.
@ * Redistributions in binary form must reproduce the above copyright
@ notice, this list of conditions and the following disclaimer in the
@ documentation and/or other materials provided with the distribution.
@ * Neither the name of the <organization> nor the
@ names of its contributors may be used to endorse or promote products
@ derived from this software without specific prior written permission.
@
@ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
@ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
@ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
@ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT
@ HOLDER> BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
@ EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
@ PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
@ PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
@ LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
@ NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
@ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
@
@ *********************************************************************
@ Helper functions for 64-bit integer arithmetic. ARM version.
#include "sysdeps.h"
@@@ Shift right signed
FUNCTION(__compcert_i64_sar)
AND r2, r2, #63 @ normalize amount to 0...63
rsbs r3, r2, #32 @ r3 = 32 - amount
ble 1f @ branch if <= 0, namely if amount >= 32
LSR Reg0LO, Reg0LO, r2
LSL r3, Reg0HI, r3
ORR Reg0LO, Reg0LO, r3
ASR Reg0HI, Reg0HI, r2
bx lr
1:
SUB r2, r2, #32
ASR Reg0LO, Reg0HI, r2
ASR Reg0HI, Reg0HI, #31
bx lr
ENDFUNCTION(__compcert_i64_sar)
|
AbsInt/CompCert
| 3,253
|
runtime/arm/i64_utof.S
|
@ *****************************************************************
@
@ The Compcert verified compiler
@
@ Xavier Leroy, INRIA Paris-Rocquencourt
@
@ Copyright (c) 2013 Institut National de Recherche en Informatique et
@ en Automatique.
@
@ Redistribution and use in source and binary forms, with or without
@ modification, are permitted provided that the following conditions are met:
@ * Redistributions of source code must retain the above copyright
@ notice, this list of conditions and the following disclaimer.
@ * Redistributions in binary form must reproduce the above copyright
@ notice, this list of conditions and the following disclaimer in the
@ documentation and/or other materials provided with the distribution.
@ * Neither the name of the <organization> nor the
@ names of its contributors may be used to endorse or promote products
@ derived from this software without specific prior written permission.
@
@ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
@ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
@ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
@ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT
@ HOLDER> BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
@ EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
@ PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
@ PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
@ LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
@ NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
@ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
@
@ *********************************************************************
@ Helper functions for 64-bit integer arithmetic. ARM version.
#include "sysdeps.h"
@@@ Conversion from unsigned 64-bit integer to single float
FUNCTION(__compcert_i64_utof)
@ Check whether X < 2^53
lsrs r2, Reg0HI, #21 @ test if X >> 53 == 0
beq 1f
@ X is large enough that double rounding can occur.
@ Avoid it by nudging X away from the points where double rounding
@ occurs (the "round to odd" technique)
MOV r2, #0x700
ORR r2, r2, #0xFF @ r2 = 0x7FF
AND r3, Reg0LO, r2 @ extract bits 0 to 11 of X
ADD r3, r3, r2 @ r3 = (X & 0x7FF) + 0x7FF
@ bit 12 of r3 is 0 if all low 12 bits of X are 0, 1 otherwise
@ bits 13-31 of r3 are 0
ORR Reg0LO, Reg0LO, r3 @ correct bit number 12 of X
BIC Reg0LO, Reg0LO, r2 @ set to 0 bits 0 to 11 of X
@ Convert to double
1: vmov s0, Reg0LO
vcvt.f64.u32 d0, s0 @ convert low half to double (unsigned)
vmov s2, Reg0HI
vcvt.f64.u32 d1, s2 @ convert high half to double (unsigned)
vldr d2, .LC1 @ d2 = 2^32
vmla.f64 d0, d1, d2 @ d0 = d0 + d1 * d2 = double value of int64
@ Round to single
vcvt.f32.f64 s0, d0
#ifdef ABI_eabi
@ Return result in r0
vmov r0, s0
#endif
bx lr
ENDFUNCTION(__compcert_i64_utof)
.balign 8
.LC1: .quad 0x41f0000000000000 @ 2^32 in double precision
|
AbsInt/CompCert
| 2,490
|
runtime/arm/i64_sdiv.S
|
@ *****************************************************************
@
@ The Compcert verified compiler
@
@ Xavier Leroy, INRIA Paris-Rocquencourt
@
@ Copyright (c) 2013 Institut National de Recherche en Informatique et
@ en Automatique.
@
@ Redistribution and use in source and binary forms, with or without
@ modification, are permitted provided that the following conditions are met:
@ * Redistributions of source code must retain the above copyright
@ notice, this list of conditions and the following disclaimer.
@ * Redistributions in binary form must reproduce the above copyright
@ notice, this list of conditions and the following disclaimer in the
@ documentation and/or other materials provided with the distribution.
@ * Neither the name of the <organization> nor the
@ names of its contributors may be used to endorse or promote products
@ derived from this software without specific prior written permission.
@
@ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
@ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
@ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
@ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT
@ HOLDER> BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
@ EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
@ PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
@ PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
@ LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
@ NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
@ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
@
@ *********************************************************************
@ Helper functions for 64-bit integer arithmetic. ARM version.
#include "sysdeps.h"
@@@ Signed division
FUNCTION(__compcert_i64_sdiv)
push {r4, r5, r6, r7, r8, r10, lr}
ASR r4, Reg0HI, #31 @ r4 = sign of N
ASR r5, Reg1HI, #31 @ r5 = sign of D
EOR r10, r4, r5 @ r10 = sign of result
LCONDOPP(Reg0, Reg0, r4) @ take absolute value of N
LCONDOPP(Reg1, Reg1, r5) @ take absolute value of D
bl __compcert_i64_udivmod @ do unsigned division
LCONDOPP(Reg0, Reg2, r10) @ apply expected sign
pop {r4, r5, r6, r7, r8, r10, lr}
bx lr
ENDFUNCTION(__compcert_i64_sdiv)
|
AbsInt/CompCert
| 2,456
|
runtime/arm/i64_smod.S
|
@ *****************************************************************
@
@ The Compcert verified compiler
@
@ Xavier Leroy, INRIA Paris-Rocquencourt
@
@ Copyright (c) 2013 Institut National de Recherche en Informatique et
@ en Automatique.
@
@ Redistribution and use in source and binary forms, with or without
@ modification, are permitted provided that the following conditions are met:
@ * Redistributions of source code must retain the above copyright
@ notice, this list of conditions and the following disclaimer.
@ * Redistributions in binary form must reproduce the above copyright
@ notice, this list of conditions and the following disclaimer in the
@ documentation and/or other materials provided with the distribution.
@ * Neither the name of the <organization> nor the
@ names of its contributors may be used to endorse or promote products
@ derived from this software without specific prior written permission.
@
@ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
@ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
@ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
@ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT
@ HOLDER> BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
@ EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
@ PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
@ PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
@ LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
@ NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
@ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
@
@ *********************************************************************
@ Helper functions for 64-bit integer arithmetic. ARM version.
#include "sysdeps.h"
@@@ Signed modulus
FUNCTION(__compcert_i64_smod)
push {r4, r5, r6, r7, r8, r10, lr}
ASR r10, Reg0HI, #31 @ r10 = sign of N = sign of result
ASR r5, Reg1HI, #31 @ r5 = sign of D
LCONDOPP(Reg0, Reg0, r10) @ take absolute value of N
LCONDOPP(Reg1, Reg1, r5) @ take absolute value of D
bl __compcert_i64_udivmod @ do unsigned division
LCONDOPP(Reg0, Reg0, r10) @ apply expected sign
pop {r4, r5, r6, r7, r8, r10, lr}
bx lr
ENDFUNCTION(__compcert_i64_smod)
|
AbsInt/CompCert
| 3,562
|
runtime/arm/i64_dtos.S
|
@ *****************************************************************
@
@ The Compcert verified compiler
@
@ Xavier Leroy, INRIA Paris-Rocquencourt
@
@ Copyright (c) 2013 Institut National de Recherche en Informatique et
@ en Automatique.
@
@ Redistribution and use in source and binary forms, with or without
@ modification, are permitted provided that the following conditions are met:
@ * Redistributions of source code must retain the above copyright
@ notice, this list of conditions and the following disclaimer.
@ * Redistributions in binary form must reproduce the above copyright
@ notice, this list of conditions and the following disclaimer in the
@ documentation and/or other materials provided with the distribution.
@ * Neither the name of the <organization> nor the
@ names of its contributors may be used to endorse or promote products
@ derived from this software without specific prior written permission.
@
@ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
@ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
@ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
@ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT
@ HOLDER> BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
@ EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
@ PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
@ PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
@ LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
@ NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
@ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
@
@ *********************************************************************
@ Helper functions for 64-bit integer arithmetic. ARM version.
#include "sysdeps.h"
@@@ Conversion from double float to signed 64-bit integer
FUNCTION(__compcert_i64_dtos)
#ifndef ABI_eabi
vmov Reg0LO, Reg0HI, d0
#endif
ASR r12, Reg0HI, #31 @ save sign of result in r12
@ extract unbiased exponent ((HI & 0x7FF00000) >> 20) - (1023 + 52) in r2
@ note: 1023 + 52 = 1075 = 1024 + 51
@ note: (HI & 0x7FF00000) >> 20 = (HI << 1) >> 21
LSL r2, Reg0HI, #1
LSR r2, r2, #21
SUB r2, r2, #51
SUB r2, r2, #1024
@ check range of exponent
cmn r2, #52 @ if EXP < -52, |double| is < 1.0
blt 1f
cmp r2, #11 @ if EXP >= 63 - 52, |double| is >= 2^63
bge 2f
@ extract true mantissa
BIC Reg0HI, Reg0HI, #0xFF000000
BIC Reg0HI, Reg0HI, #0x00F00000 @ HI &= ~0xFFF00000
ORR Reg0HI, Reg0HI, #0x00100000 @ HI |= 0x00100000
@ shift it appropriately
cmp r2, #0
blt 3f
@ EXP >= 0: shift left by EXP. Note that EXP < 12
LSHL_small(Reg0, Reg0, r2, r3)
b 4f
@ EXP < 0: shift right by -EXP. Note that -EXP <= 52 but can be >= 32
3: RSB r2, r2, #0 @ r2 = -EXP = shift amount
LSHR(Reg0, Reg0, r2, r3)
@ apply sign to result
4: LCONDOPP(Reg0, Reg0, r12)
bx lr
@ special cases
1: MOV Reg0LO, #0 @ result is 0
MOV Reg0HI, #0
bx lr
2: cmp r12, #0
blt 6f
mvn Reg0LO, #0 @ result is 0x7F....FF (MAX_SINT)
LSR Reg0HI, Reg0LO, #1
bx lr
6: MOV Reg0LO, #0 @ result is 0x80....00 (MIN_SINT)
MOV Reg0HI, #0x80000000
bx lr
ENDFUNCTION(__compcert_i64_dtos)
|
AbsInt/CompCert
| 3,207
|
runtime/arm/i64_smulh.S
|
@ *****************************************************************
@
@ The Compcert verified compiler
@
@ Xavier Leroy, INRIA Paris
@
@ Copyright (c) 2016 Institut National de Recherche en Informatique et
@ en Automatique.
@
@ Redistribution and use in source and binary forms, with or without
@ modification, are permitted provided that the following conditions are met:
@ * Redistributions of source code must retain the above copyright
@ notice, this list of conditions and the following disclaimer.
@ * Redistributions in binary form must reproduce the above copyright
@ notice, this list of conditions and the following disclaimer in the
@ documentation and/or other materials provided with the distribution.
@ * Neither the name of the <organization> nor the
@ names of its contributors may be used to endorse or promote products
@ derived from this software without specific prior written permission.
@
@ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
@ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
@ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
@ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT
@ HOLDER> BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
@ EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
@ PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
@ PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
@ LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
@ NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
@ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
@
@ *********************************************************************
@ Helper functions for 64-bit integer arithmetic. ARM version.
#include "sysdeps.h"
@@@ Multiply-high signed
@ Hacker's Delight section 8.3:
@ - compute high 64 bits of the unsigned product X * Y (see i64_umulh.S)
@ - subtract X if Y < 0
@ - subtract Y if X < 0
FUNCTION(__compcert_i64_smulh)
push {r4, r5, r6, r7}
@@@ r7:r6 accumulate bits 95-32 of the full product
umull r4, r6, Reg0LO, Reg1LO @ r6 = high half of XL.YL product
umull r4, r5, Reg0LO, Reg1HI @ r5:r4 = product XL.YH
adds r6, r6, r4
ADC r7, r5, #0 @ no carry out
umull r4, r5, Reg0HI, Reg1LO @ r5:r4 = product XH.YL
adds r6, r6, r4
adcs r7, r7, r5 @ carry out is possible
@@@ r6:r7 accumulate bits 127-64 of the full product
mov r6, #0
ADC r6, r6, #0 @ put carry out in bits 127-96
umull r4, r5, Reg0HI, Reg1HI @ r5:r4 = product XH.YH
adds r7, r7, r4
ADC r6, r6, r5
@@@ subtract X if Y < 0
cmp Reg1HI, #0
bge 1f
subs r7, r7, Reg0LO
sbcs r6, r6, Reg0HI
@@@ subtract Y if X < 0
1: cmp Reg0HI, #0
bge 2f
subs r7, r7, Reg1LO
sbcs r6, r6, Reg1HI
@@@ return result in Reg0 pair
2: mov Reg0LO, r7
mov Reg0HI, r6
pop {r4, r5, r6, r7}
bx lr
ENDFUNCTION(__compcert_i64_smulh)
|
AbsInt/CompCert
| 2,580
|
runtime/arm/i64_stod.S
|
@ *****************************************************************
@
@ The Compcert verified compiler
@
@ Xavier Leroy, INRIA Paris-Rocquencourt
@
@ Copyright (c) 2013 Institut National de Recherche en Informatique et
@ en Automatique.
@
@ Redistribution and use in source and binary forms, with or without
@ modification, are permitted provided that the following conditions are met:
@ * Redistributions of source code must retain the above copyright
@ notice, this list of conditions and the following disclaimer.
@ * Redistributions in binary form must reproduce the above copyright
@ notice, this list of conditions and the following disclaimer in the
@ documentation and/or other materials provided with the distribution.
@ * Neither the name of the <organization> nor the
@ names of its contributors may be used to endorse or promote products
@ derived from this software without specific prior written permission.
@
@ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
@ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
@ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
@ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT
@ HOLDER> BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
@ EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
@ PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
@ PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
@ LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
@ NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
@ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
@
@ *********************************************************************
@ Helper functions for 64-bit integer arithmetic. ARM version.
#include "sysdeps.h"
@@@ Conversion from signed 64-bit integer to double float
FUNCTION(__compcert_i64_stod)
__compcert_i64_stod:
vmov s0, Reg0LO
vcvt.f64.u32 d0, s0 @ convert low half to double (unsigned)
vmov s2, Reg0HI
vcvt.f64.s32 d1, s2 @ convert high half to double (signed)
vldr d2, .LC1 @ d2 = 2^32
vmla.f64 d0, d1, d2 @ d0 = d0 + d1 * d2 = double value of int64
#ifdef ABI_eabi
vmov Reg0LO, Reg0HI, d0 @ return result in register pair r0:r1
#endif
bx lr
ENDFUNCTION(__compcert_i64_stod)
.balign 8
.LC1: .quad 0x41f0000000000000 @ 2^32 in double precision
|
AbsInt/CompCert
| 2,877
|
runtime/x86_32/i64_umulh.S
|
// *****************************************************************
//
// The Compcert verified compiler
//
// Xavier Leroy, INRIA Paris
//
// Copyright (c) 2016 Institut National de Recherche en Informatique et
// en Automatique.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of the <organization> nor the
// names of its contributors may be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT
// HOLDER> BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// *********************************************************************
// Helper functions for 64-bit integer arithmetic. IA32 version.
#include "sysdeps.h"
// Multiply-high unsigned
#define XL 12(%esp)
#define XH 16(%esp)
#define YL 20(%esp)
#define YH 24(%esp)
// X * Y = 2^64 XH.YH + 2^32 (XH.YL + XL.YH) + XL.YL
FUNCTION(__compcert_i64_umulh)
pushl %esi
pushl %edi
movl XL, %eax
mull YL // EDX:EAX = 64-bit product XL.YL
movl %edx, %ecx
xorl %esi, %esi
xorl %edi, %edi // EDI:ESI:ECX accumulate bits 127:32 of result
movl XH, %eax
mull YL // EDX:EAX = 64-bit product XH.YL
addl %eax, %ecx
adcl %edx, %esi
adcl $0, %edi
movl YH, %eax
mull XL // EDX:EAX = 64-bit product YH.XL
addl %eax, %ecx
adcl %edx, %esi
adcl $0, %edi
movl XH, %eax
mull YH // EDX:EAX = 64-bit product XH.YH
addl %esi, %eax
adcl %edi, %edx
popl %edi
popl %esi
ret
ENDFUNCTION(__compcert_i64_umulh)
|
AbsInt/CompCert
| 2,629
|
runtime/x86_32/i64_shr.S
|
// *****************************************************************
//
// The Compcert verified compiler
//
// Xavier Leroy, INRIA Paris-Rocquencourt
//
// Copyright (c) 2013 Institut National de Recherche en Informatique et
// en Automatique.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of the <organization> nor the
// names of its contributors may be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT
// HOLDER> BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// *********************************************************************
// Helper functions for 64-bit integer arithmetic. IA32 version.
#include "sysdeps.h"
// Shift right unsigned
// Note: IA32 shift instructions treat their amount (in %cl) modulo 32
FUNCTION(__compcert_i64_shr)
movl 12(%esp), %ecx // ecx = shift amount, treated mod 64
testb $32, %cl
jne 1f
// shift amount < 32
movl 4(%esp), %eax
movl 8(%esp), %edx
shrdl %cl, %edx, %eax // eax = low(XH:XL >> amount)
shrl %cl, %edx // edx = XH >> amount
ret
// shift amount >= 32
1: movl 8(%esp), %eax
shrl %cl, %eax // eax = XH >> (amount - 32)
xorl %edx, %edx // edx = 0
ret
ENDFUNCTION(__compcert_i64_shr)
|
AbsInt/CompCert
| 2,202
|
runtime/x86_32/i64_umod.S
|
// *****************************************************************
//
// The Compcert verified compiler
//
// Xavier Leroy, INRIA Paris-Rocquencourt
//
// Copyright (c) 2013 Institut National de Recherche en Informatique et
// en Automatique.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of the <organization> nor the
// names of its contributors may be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT
// HOLDER> BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// *********************************************************************
// Helper functions for 64-bit integer arithmetic. IA32 version.
#include "sysdeps.h"
// Unsigned remainder
FUNCTION(__compcert_i64_umod)
pushl %ebp
pushl %esi
pushl %edi
call GLOB(__compcert_i64_udivmod)
popl %edi
popl %esi
popl %ebp
ret
ENDFUNCTION(__compcert_i64_umod)
|
AbsInt/CompCert
| 3,222
|
runtime/x86_32/i64_dtou.S
|
// *****************************************************************
//
// The Compcert verified compiler
//
// Xavier Leroy, INRIA Paris-Rocquencourt
//
// Copyright (c) 2013 Institut National de Recherche en Informatique et
// en Automatique.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of the <organization> nor the
// names of its contributors may be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT
// HOLDER> BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// *********************************************************************
// Helper functions for 64-bit integer arithmetic. IA32 version.
#include "sysdeps.h"
// Conversion float -> unsigned long
FUNCTION(__compcert_i64_dtou)
subl $4, %esp
// Compare argument with 2^63
fldl 8(%esp)
flds LC1
fucomp
fnstsw %ax
sahf
jbe 1f // branch if not (ARG < 2^63)
// Argument < 2^63: convert as is
// Change rounding mode to "round towards zero"
fnstcw 0(%esp)
movw 0(%esp), %ax
movb $12, %ah
movw %ax, 2(%esp)
fldcw 2(%esp)
// Convert
fistpll 8(%esp)
movl 8(%esp), %eax
movl 12(%esp), %edx
// Restore rounding mode
fldcw 0(%esp)
addl $4, %esp
ret
// Argument > 2^63: offset ARG by -2^63, then convert, then offset RES by 2^63
1: fsubs LC1
// Change rounding mode to "round towards zero"
fnstcw 0(%esp)
movw 0(%esp), %ax
movb $12, %ah
movw %ax, 2(%esp)
fldcw 2(%esp)
// Convert
fistpll 8(%esp)
movl 8(%esp), %eax
movl 12(%esp), %edx
// Offset result by 2^63
addl $0x80000000, %edx
// Restore rounding mode
fldcw 0(%esp)
addl $4, %esp
ret
.p2align 2
LC1: .long 0x5f000000 // 2^63 in single precision
ENDFUNCTION(__compcert_i64_dtou)
|
AbsInt/CompCert
| 2,247
|
runtime/x86_32/i64_stof.S
|
// *****************************************************************
//
// The Compcert verified compiler
//
// Xavier Leroy, INRIA Paris-Rocquencourt
//
// Copyright (c) 2013 Institut National de Recherche en Informatique et
// en Automatique.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of the <organization> nor the
// names of its contributors may be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT
// HOLDER> BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// *********************************************************************
// Helper functions for 64-bit integer arithmetic. IA32 version.
#include "sysdeps.h"
// Conversion signed long -> single-precision float
FUNCTION(__compcert_i64_stof)
fildll 4(%esp)
// The TOS is in extended precision and therefore exact.
// Force rounding to single precision
fstps 4(%esp)
flds 4(%esp)
ret
ENDFUNCTION(__compcert_i64_stof)
|
AbsInt/CompCert
| 2,621
|
runtime/x86_32/i64_utod.S
|
// *****************************************************************
//
// The Compcert verified compiler
//
// Xavier Leroy, INRIA Paris-Rocquencourt
//
// Copyright (c) 2013 Institut National de Recherche en Informatique et
// en Automatique.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of the <organization> nor the
// names of its contributors may be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT
// HOLDER> BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// *********************************************************************
// Helper functions for 64-bit integer arithmetic. IA32 version.
#include "sysdeps.h"
// Conversion unsigned long -> double-precision float
FUNCTION(__compcert_i64_utod)
fildll 4(%esp) // convert as if signed
cmpl $0, 8(%esp) // is argument >= 2^63?
jns 1f
fadds LC1 // adjust by 2^64
1: ret
// The result is in extended precision (80 bits) and therefore
// exact (64 bits of mantissa). It will be rounded to double
// precision by the caller, when transferring the result
// to an XMM register or a 64-bit stack slot.
.p2align 2
LC1: .long 0x5f800000 // 2^64 in single precision
ENDFUNCTION(__compcert_i64_utod)
|
AbsInt/CompCert
| 3,712
|
runtime/x86_32/vararg.S
|
// *****************************************************************
//
// The Compcert verified compiler
//
// Xavier Leroy, INRIA Paris-Rocquencourt
//
// Copyright (c) 2013 Institut National de Recherche en Informatique et
// en Automatique.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of the <organization> nor the
// names of its contributors may be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT
// HOLDER> BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// *********************************************************************
// Helper functions for variadic functions <stdarg.h>. IA32 version
#include "sysdeps.h"
// typedef void * va_list;
// unsigned int __compcert_va_int32(va_list * ap);
// unsigned long long __compcert_va_int64(va_list * ap);
// double __compcert_va_float64(va_list * ap);
FUNCTION(__compcert_va_int32)
movl 4(%esp), %ecx // %ecx = ap parameter
movl 0(%ecx), %edx // %edx = current argument pointer
movl 0(%edx), %eax // load the int32 value there
addl $4, %edx // increment argument pointer by 4
movl %edx, 0(%ecx)
ret
ENDFUNCTION(__compcert_va_int32)
FUNCTION(__compcert_va_int64)
movl 4(%esp), %ecx // %ecx = ap parameter
movl 0(%ecx), %edx // %edx = current argument pointer
movl 0(%edx), %eax // load the int64 value there
movl 4(%edx), %edx
addl $8, 0(%ecx) // increment argument pointer by 8
ret
ENDFUNCTION(__compcert_va_int64)
FUNCTION(__compcert_va_float64)
movl 4(%esp), %ecx // %ecx = ap parameter
movl 0(%ecx), %edx // %edx = current argument pointer
fldl 0(%edx) // load the float64 value there
addl $8, %edx // increment argument pointer by 8
movl %edx, 0(%ecx)
ret
ENDFUNCTION(__compcert_va_float64)
FUNCTION(__compcert_va_composite)
movl 4(%esp), %ecx // %ecx = ap parameter
movl 8(%esp), %edx // %edx = size of composite in bytes
movl 0(%ecx), %eax // %eax = current argument pointer
leal 3(%eax, %edx), %edx // advance by size
andl $0xfffffffc, %edx // and round up to multiple of 4
movl %edx, 0(%ecx) // update argument pointer
ret
ENDFUNCTION(__compcert_va_composite)
|
AbsInt/CompCert
| 2,248
|
runtime/x86_32/i64_udiv.S
|
// *****************************************************************
//
// The Compcert verified compiler
//
// Xavier Leroy, INRIA Paris-Rocquencourt
//
// Copyright (c) 2013 Institut National de Recherche en Informatique et
// en Automatique.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of the <organization> nor the
// names of its contributors may be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT
// HOLDER> BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// *********************************************************************
// Helper functions for 64-bit integer arithmetic. IA32 version.
#include "sysdeps.h"
// Unsigned division
FUNCTION(__compcert_i64_udiv)
pushl %ebp
pushl %esi
pushl %edi
call GLOB(__compcert_i64_udivmod)
movl %esi, %eax
movl %edi, %edx
popl %edi
popl %esi
popl %ebp
ret
ENDFUNCTION(__compcert_i64_udiv)
|
AbsInt/CompCert
| 2,622
|
runtime/x86_32/i64_shl.S
|
// *****************************************************************
//
// The Compcert verified compiler
//
// Xavier Leroy, INRIA Paris-Rocquencourt
//
// Copyright (c) 2013 Institut National de Recherche en Informatique et
// en Automatique.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of the <organization> nor the
// names of its contributors may be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT
// HOLDER> BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// *********************************************************************
// Helper functions for 64-bit integer arithmetic. IA32 version.
#include "sysdeps.h"
// Shift left
// Note: IA32 shift instructions treat their amount (in %cl) modulo 32
FUNCTION(__compcert_i64_shl)
movl 12(%esp), %ecx // ecx = shift amount, treated mod 64
testb $32, %cl
jne 1f
// shift amount < 32
movl 4(%esp), %eax
movl 8(%esp), %edx
shldl %cl, %eax, %edx // edx = high(XH:XL << amount)
shll %cl, %eax // eax = XL << amount
ret
// shift amount >= 32
1: movl 4(%esp), %edx
shll %cl, %edx // edx = XL << (amount - 32)
xorl %eax, %eax // eax = 0
ret
ENDFUNCTION(__compcert_i64_shl)
|
AbsInt/CompCert
| 4,761
|
runtime/x86_32/i64_udivmod.S
|
// *****************************************************************
//
// The Compcert verified compiler
//
// Xavier Leroy, INRIA Paris-Rocquencourt
//
// Copyright (c) 2013 Institut National de Recherche en Informatique et
// en Automatique.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of the <organization> nor the
// names of its contributors may be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT
// HOLDER> BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// *********************************************************************
// Helper functions for 64-bit integer arithmetic. IA32 version.
#include "sysdeps.h"
// Division and remainder
// Auxiliary function, never called directly from C code
// Input: 20(esp), 24(esp) is dividend N
// 28(esp), 32(esp) is divisor D
// Output: esi:edi is quotient Q
// eax:edx is remainder R
// ebp is preserved
FUNCTION(__compcert_i64_udivmod)
cmpl $0, 32(%esp) // single-word divisor? (DH = 0)
jne 1f
// Special case 64 bits divided by 32 bits
movl 28(%esp), %ecx // divide NH by DL
movl 24(%esp), %eax // (will trap if D = 0)
xorl %edx, %edx
divl %ecx // eax = quotient, edx = remainder
movl %eax, %edi // high word of quotient in edi
movl 20(%esp), %eax // divide rem : NL by DL
divl %ecx // eax = quotient, edx = remainder
movl %eax, %esi // low word of quotient in esi */
movl %edx, %eax // low word of remainder in eax
xorl %edx, %edx // high word of remainder is 0, in edx
ret
// The general case
1: movl 28(%esp), %ecx // esi:ecx = D
movl 32(%esp), %esi
movl 20(%esp), %eax // edx:eax = N
movl 24(%esp), %edx
// Scale D and N down, giving D' and N', until D' fits in 32 bits
2: shrl $1, %esi // shift D' right by one
rcrl $1, %ecx
shrl $1, %edx // shift N' right by one
rcrl $1, %eax
testl %esi, %esi // repeat until D'H = 0
jnz 2b
// Divide N' by D' to get an approximate quotient
divl %ecx // eax = quotient, edx = remainder
movl %eax, %esi // save tentative quotient Q in esi
// Check for off by one quotient
// Compute Q * D
3: movl 32(%esp), %ecx
imull %esi, %ecx // ecx = Q * DH
movl 28(%esp), %eax
mull %esi // edx:eax = Q * DL
add %ecx, %edx // edx:eax = Q * D
jc 5f // overflow in addition means Q is too high
// Compare Q * D with N, computing the remainder in the process
movl %eax, %ecx
movl 20(%esp), %eax
subl %ecx, %eax
movl %edx, %ecx
movl 24(%esp), %edx
sbbl %ecx, %edx // edx:eax = N - Q * D
jnc 4f // no carry: N >= Q * D, we are fine
decl %esi // carry: N < Q * D, adjust Q down by 1
addl 28(%esp), %eax // and remainder up by D
adcl 32(%esp), %edx
// Finished
4: xorl %edi, %edi // high half of quotient is 0
ret
// Special case when Q * D overflows
5: decl %esi // adjust Q down by 1
jmp 3b // and redo check & computation of remainder
ENDFUNCTION(__compcert_i64_udivmod)
|
AbsInt/CompCert
| 2,668
|
runtime/x86_32/i64_sar.S
|
// *****************************************************************
//
// The Compcert verified compiler
//
// Xavier Leroy, INRIA Paris-Rocquencourt
//
// Copyright (c) 2013 Institut National de Recherche en Informatique et
// en Automatique.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of the <organization> nor the
// names of its contributors may be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT
// HOLDER> BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// *********************************************************************
// Helper functions for 64-bit integer arithmetic. IA32 version.
#include "sysdeps.h"
// Shift right signed
// Note: IA32 shift instructions treat their amount (in %cl) modulo 32
FUNCTION(__compcert_i64_sar)
movl 12(%esp), %ecx // ecx = shift amount, treated mod 64
testb $32, %cl
jne 1f
// shift amount < 32
movl 4(%esp), %eax
movl 8(%esp), %edx
shrdl %cl, %edx, %eax // eax = low(XH:XL >> amount)
sarl %cl, %edx // edx = XH >> amount (signed)
ret
// shift amount >= 32
1: movl 8(%esp), %eax
movl %eax, %edx
sarl %cl, %eax // eax = XH >> (amount - 32)
sarl $31, %edx // edx = sign of X
ret
ENDFUNCTION(__compcert_i64_sar)
|
AbsInt/CompCert
| 2,522
|
runtime/x86_32/i64_utof.S
|
// *****************************************************************
//
// The Compcert verified compiler
//
// Xavier Leroy, INRIA Paris-Rocquencourt
//
// Copyright (c) 2013 Institut National de Recherche en Informatique et
// en Automatique.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of the <organization> nor the
// names of its contributors may be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT
// HOLDER> BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// *********************************************************************
// Helper functions for 64-bit integer arithmetic. IA32 version.
#include "sysdeps.h"
// Conversion unsigned long -> single-precision float
FUNCTION(__compcert_i64_utof)
fildll 4(%esp) // convert as if signed
cmpl $0, 8(%esp) // is argument >= 2^63?
jns 1f
fadds LC1 // adjust by 2^64
// The TOS is in extended precision and therefore exact.
// Force rounding to single precision
1: fstps 4(%esp)
flds 4(%esp)
ret
.p2align 2
LC1: .long 0x5f800000 // 2^64 in single precision
ENDFUNCTION(__compcert_i64_utof)
|
AbsInt/CompCert
| 2,983
|
runtime/x86_32/i64_sdiv.S
|
// *****************************************************************
//
// The Compcert verified compiler
//
// Xavier Leroy, INRIA Paris-Rocquencourt
//
// Copyright (c) 2013 Institut National de Recherche en Informatique et
// en Automatique.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of the <organization> nor the
// names of its contributors may be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT
// HOLDER> BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// *********************************************************************
// Helper functions for 64-bit integer arithmetic. IA32 version.
#include "sysdeps.h"
// Signed division
FUNCTION(__compcert_i64_sdiv)
pushl %ebp
pushl %esi
pushl %edi
movl 20(%esp), %esi // esi = NH
movl %esi, %ebp // save sign of N in ebp
testl %esi, %esi
jge 1f // if N < 0,
negl 16(%esp) // N = -N
adcl $0, %esi
negl %esi
movl %esi, 20(%esp)
1: movl 28(%esp), %esi // esi = DH
xorl %esi, %ebp // sign of result in ebp
testl %esi, %esi
jge 2f // if D < 0,
negl 24(%esp) // D = -D
adcl $0, %esi
negl %esi
movl %esi, 28(%esp)
2: call GLOB(__compcert_i64_udivmod)
testl %ebp, %ebp // apply sign to result
jge 3f
negl %esi
adcl $0, %edi
negl %edi
3: movl %esi, %eax
movl %edi, %edx
popl %edi
popl %esi
popl %ebp
ret
ENDFUNCTION(__compcert_i64_sdiv)
|
AbsInt/CompCert
| 2,894
|
runtime/x86_32/i64_smod.S
|
// *****************************************************************
//
// The Compcert verified compiler
//
// Xavier Leroy, INRIA Paris-Rocquencourt
//
// Copyright (c) 2013 Institut National de Recherche en Informatique et
// en Automatique.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of the <organization> nor the
// names of its contributors may be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT
// HOLDER> BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// *********************************************************************
// Helper functions for 64-bit integer arithmetic. IA32 version.
#include "sysdeps.h"
// Signed remainder
FUNCTION(__compcert_i64_smod)
pushl %ebp
pushl %esi
pushl %edi
movl 20(%esp), %esi // esi = NH
movl %esi, %ebp // save sign of result in ebp
testl %esi, %esi
jge 1f // if N < 0,
negl 16(%esp) // N = -N
adcl $0, %esi
negl %esi
movl %esi, 20(%esp)
1: movl 28(%esp), %esi // esi = DH
testl %esi, %esi
jge 2f // if D < 0,
negl 24(%esp) // D = -D
adcl $0, %esi
negl %esi
movl %esi, 28(%esp)
2: call GLOB(__compcert_i64_udivmod)
testl %ebp, %ebp // apply sign to result
jge 3f
negl %eax
adcl $0, %edx
negl %edx
3: popl %edi
popl %esi
popl %ebp
ret
ENDFUNCTION(__compcert_i64_smod)
|
AbsInt/CompCert
| 2,467
|
runtime/x86_32/i64_dtos.S
|
// *****************************************************************
//
// The Compcert verified compiler
//
// Xavier Leroy, INRIA Paris-Rocquencourt
//
// Copyright (c) 2013 Institut National de Recherche en Informatique et
// en Automatique.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of the <organization> nor the
// names of its contributors may be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT
// HOLDER> BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// *********************************************************************
// Helper functions for 64-bit integer arithmetic. IA32 version.
#include "sysdeps.h"
// Conversion float -> signed long
FUNCTION(__compcert_i64_dtos)
subl $4, %esp
// Change rounding mode to "round towards zero"
fnstcw 0(%esp)
movw 0(%esp), %ax
movb $12, %ah
movw %ax, 2(%esp)
fldcw 2(%esp)
// Convert
fldl 8(%esp)
fistpll 8(%esp)
// Restore rounding mode
fldcw 0(%esp)
// Load result in edx:eax
movl 8(%esp), %eax
movl 12(%esp), %edx
addl $4, %esp
ret
ENDFUNCTION(__compcert_i64_dtos)
|
AbsInt/CompCert
| 3,661
|
runtime/x86_32/i64_smulh.S
|
// *****************************************************************
//
// The Compcert verified compiler
//
// Xavier Leroy, INRIA Paris
//
// Copyright (c) 2016 Institut National de Recherche en Informatique et
// en Automatique.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of the <organization> nor the
// names of its contributors may be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT
// HOLDER> BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// *********************************************************************
// Helper functions for 64-bit integer arithmetic. IA32 version.
#include "sysdeps.h"
// Multiply-high signed
#define XL 12(%esp)
#define XH 16(%esp)
#define YL 20(%esp)
#define YH 24(%esp)
// Hacker's Delight section 8.3:
// - compute high 64 bits of the unsigned product X * Y (see i64_umulh.S)
// - subtract X if Y < 0
// - subtract Y if X < 0
FUNCTION(__compcert_i64_smulh)
pushl %esi
pushl %edi
movl XL, %eax
mull YL // EDX:EAX = 64-bit product XL.YL
movl %edx, %ecx
xorl %esi, %esi
xorl %edi, %edi // EDI:ESI:ECX accumulatesbits 127:32 of result
movl XH, %eax
mull YL // EDX:EAX = 64-bit product XH.YL
addl %eax, %ecx
adcl %edx, %esi
adcl $0, %edi
movl YH, %eax
mull XL // EDX:EAX = 64-bit product YH.XL
addl %eax, %ecx
adcl %edx, %esi
adcl $0, %edi
movl XH, %eax
mull YH // EDX:EAX = 64-bit product XH.YH
addl %eax, %esi
adcl %edx, %edi
// Here, EDI:ESI is the high 64 bits of the unsigned product X.Y
xorl %eax, %eax
xorl %edx, %edx
cmpl $0, XH
cmovl YL, %eax
cmovl YH, %edx // EDX:EAX = Y if X < 0, = 0 if X >= 0
subl %eax, %esi
sbbl %edx, %edi // EDI:ESI -= Y if X < 0
xorl %eax, %eax
xorl %edx, %edx
cmpl $0, YH
cmovl XL, %eax
cmovl XH, %edx // EDX:EAX = X if Y < 0, = 0 if Y >= 0
subl %eax, %esi
sbbl %edx, %edi // EDI:ESI -= X if Y < 0
// Now EDI:ESI contains the high 64 bits of the signed product X.Y
movl %esi, %eax
movl %edi, %edx
popl %edi
popl %esi
ret
ENDFUNCTION(__compcert_i64_smulh)
|
AbsInt/CompCert
| 2,346
|
runtime/x86_32/i64_stod.S
|
// *****************************************************************
//
// The Compcert verified compiler
//
// Xavier Leroy, INRIA Paris-Rocquencourt
//
// Copyright (c) 2013 Institut National de Recherche en Informatique et
// en Automatique.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of the <organization> nor the
// names of its contributors may be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT
// HOLDER> BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// *********************************************************************
// Helper functions for 64-bit integer arithmetic. IA32 version.
#include "sysdeps.h"
// Conversion signed long -> double-precision float
FUNCTION(__compcert_i64_stod)
fildll 4(%esp)
ret
// The result is in extended precision (80 bits) and therefore
// exact (64 bits of mantissa). It will be rounded to double
// precision by the caller, when transferring the result
// to an XMM register or a 64-bit stack slot.
ENDFUNCTION(__compcert_i64_stod)
|
AbsInt/CompCert
| 2,422
|
runtime/x86_64/i64_dtou.S
|
// *****************************************************************
//
// The Compcert verified compiler
//
// Xavier Leroy, INRIA Paris
//
// Copyright (c) 2016 Institut National de Recherche en Informatique et
// en Automatique.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of the <organization> nor the
// names of its contributors may be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT
// HOLDER> BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// *********************************************************************
// Helper functions for 64-bit integer arithmetic. x86_64 version.
#include "sysdeps.h"
// Conversion float -> unsigned long
FUNCTION(__compcert_i64_dtou)
ucomisd .LC1(%rip), FP_ARG_1
jnb 1f
cvttsd2siq FP_ARG_1, INT_RES
ret
1: subsd .LC1(%rip), FP_ARG_1
cvttsd2siq FP_ARG_1, INT_RES
addq .LC2(%rip), INT_RES
ret
ENDFUNCTION(__compcert_i64_dtou)
RODATA
.p2align 3
.LC1: .quad 0x43e0000000000000 // 2^63 in double precision
.LC2: .quad 0x8000000000000000 // 2^63 as an integer
|
AbsInt/CompCert
| 2,650
|
runtime/x86_64/i64_utod.S
|
// *****************************************************************
//
// The Compcert verified compiler
//
// Xavier Leroy, INRIA Paris
//
// Copyright (c) 2016 Institut National de Recherche en Informatique et
// en Automatique.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of the <organization> nor the
// names of its contributors may be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT
// HOLDER> BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// *********************************************************************
// Helper functions for 64-bit integer arithmetic. x86_64 version.
#include "sysdeps.h"
// Conversion unsigned long -> double-precision float
FUNCTION(__compcert_i64_utod)
testq INT_ARG_1, INT_ARG_1
js 1f
pxor FP_RES, FP_RES // if < 2^63,
cvtsi2sdq INT_ARG_1, FP_RES // convert as if signed
ret
1: // if >= 2^63, use round-to-odd trick
movq INT_ARG_1, %rax
shrq %rax
andq $1, INT_ARG_1
orq INT_ARG_1, %rax // (arg >> 1) | (arg & 1)
pxor FP_RES, FP_RES
cvtsi2sdq %rax, FP_RES // convert as if signed
addsd FP_RES, FP_RES // multiply result by 2.0
ret
ENDFUNCTION(__compcert_i64_utod)
|
AbsInt/CompCert
| 8,459
|
runtime/x86_64/vararg.S
|
// *****************************************************************
//
// The Compcert verified compiler
//
// Xavier Leroy, INRIA Paris
//
// Copyright (c) 2016 Institut National de Recherche en Informatique et
// en Automatique.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of the <organization> nor the
// names of its contributors may be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT
// HOLDER> BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// *********************************************************************
// Helper functions for variadic functions <stdarg.h>. x86_64 version.
#include "sysdeps.h"
// ELF ABI
#if defined(SYS_linux) || defined(SYS_bsd) || defined(SYS_macos)
// typedef struct {
// unsigned int gp_offset;
// unsigned int fp_offset;
// void *overflow_arg_area;
// void *reg_save_area;
// } va_list[1];
// The va_start macro initializes the structure as follows:
// - reg_save_area: The element points to the start of the register save area.
// - overflow_arg_area: This pointer is used to fetch arguments passed on
// the stack. It is initialized with the address of the first argument
// passed on the stack, if any, and then always updated to point to the
// start of the next argument on the stack.
// - gp_offset: The element holds the offset in bytes from reg_save_area
// to the place where the next available general purpose argument
// register is saved. In case all argument registers have been
// exhausted, it is set to the value 48 (6 * 8).
// - fp_offset: The element holds the offset in bytes from reg_save_area
// to the place where the next available floating point argument
// register is saved. In case all argument registers have been
// exhausted, it is set to the value 176 (6 * 8 + 8 * 16).
// unsigned int __compcert_va_int32(va_list ap);
// unsigned long long __compcert_va_int64(va_list ap);
// double __compcert_va_float64(va_list ap);
FUNCTION(__compcert_va_int32)
movl 0(%rdi), %edx // edx = gp_offset
cmpl $48, %edx
jae 1f
// next argument is in gp reg area
movq 16(%rdi), %rsi // rsi = reg_save_area
movl 0(%rsi, %rdx, 1), %eax // next integer argument
addl $8, %edx
movl %edx, 0(%rdi) // increment gp_offset by 8
ret
// next argument is in overflow arg area
1: movq 8(%rdi), %rsi // rsi = overflow_arg_area
movq 0(%rsi), %rax // next integer argument
addq $8, %rsi
movq %rsi, 8(%rdi) // increment overflow_arg_area by 8
ret
ENDFUNCTION(__compcert_va_int32)
FUNCTION(__compcert_va_int64)
movl 0(%rdi), %edx // edx = gp_offset
cmpl $48, %edx
jae 1f
// next argument is in gp reg area
movq 16(%rdi), %rsi // rsi = reg_save_area
movq 0(%rsi, %rdx, 1), %rax // next integer argument
addl $8, %edx
movl %edx, 0(%rdi) // increment gp_offset by 8
ret
// next argument is in overflow arg area
1: movq 8(%rdi), %rsi // rsi = overflow_arg_area
movq 0(%rsi), %rax // next integer argument
addq $8, %rsi
movq %rsi, 8(%rdi) // increment overflow_arg_area by 8
ret
ENDFUNCTION(__compcert_va_int64)
FUNCTION(__compcert_va_float64)
movl 4(%rdi), %edx // edx = fp_offset
cmpl $176, %edx
jae 1f
// next argument is in fp reg area
movq 16(%rdi), %rsi // rsi = reg_save_area
movsd 0(%rsi, %rdx, 1), %xmm0 // next floating-point argument
addl $16, %edx
movl %edx, 4(%rdi) // increment fp_offset by 16
ret
// next argument is in overflow arg area
1: movq 8(%rdi), %rsi // rsi = overflow_arg_area
movsd 0(%rsi), %xmm0 // next floating-point argument
addq $8, %rsi
movq %rsi, 8(%rdi) // increment overflow_arg_area by 8
ret
ENDFUNCTION(__compcert_va_float64)
FUNCTION(__compcert_va_composite)
jmp GLOB(__compcert_va_int64) // by-ref convention, FIXME
ENDFUNCTION(__compcert_va_composite)
// Save integer and FP registers at beginning of vararg function
// r10 points to register save area
// al contains number of FP arguments passed in registers
// The register save area has the following shape:
// 0, 8, ..., 40 -> 6 x 8-byte slots for saving rdi, rsi, rdx, rcx, r8, r9
// 48, 64, ... 160 -> 8 x 16-byte slots for saving xmm0...xmm7
FUNCTION(__compcert_va_saveregs)
movq %rdi, 0(%r10)
movq %rsi, 8(%r10)
movq %rdx, 16(%r10)
movq %rcx, 24(%r10)
movq %r8, 32(%r10)
movq %r9, 40(%r10)
testb %al, %al
je 1f
movaps %xmm0, 48(%r10)
movaps %xmm1, 64(%r10)
movaps %xmm2, 80(%r10)
movaps %xmm3, 96(%r10)
movaps %xmm4, 112(%r10)
movaps %xmm5, 128(%r10)
movaps %xmm6, 144(%r10)
movaps %xmm7, 160(%r10)
1: ret
ENDFUNCTION(__compcert_va_saveregs)
#endif
// Windows ABI
#if defined(SYS_cygwin)
// typedef void * va_list;
// unsigned int __compcert_va_int32(va_list * ap);
// unsigned long long __compcert_va_int64(va_list * ap);
// double __compcert_va_float64(va_list * ap);
FUNCTION(__compcert_va_int32) // %rcx = pointer to argument pointer
movq 0(%rcx), %rdx // %rdx = current argument pointer
movl 0(%rdx), %eax // load the int32 value there
addq $8, %rdx // increment argument pointer by 8
movq %rdx, 0(%rcx)
ret
ENDFUNCTION(__compcert_va_int32)
FUNCTION(__compcert_va_int64) // %rcx = pointer to argument pointer
movq 0(%rcx), %rdx // %rdx = current argument pointer
movq 0(%rdx), %rax // load the int64 value there
addq $8, %rdx // increment argument pointer by 8
movq %rdx, 0(%rcx)
ret
ENDFUNCTION(__compcert_va_int64)
FUNCTION(__compcert_va_float64) // %rcx = pointer to argument pointer
movq 0(%rcx), %rdx // %rdx = current argument pointer
movsd 0(%rdx), %xmm0 // load the float64 value there
addq $8, %rdx // increment argument pointer by 8
movq %rdx, 0(%rcx)
ret
ENDFUNCTION(__compcert_va_float64)
FUNCTION(__compcert_va_composite)
jmp GLOB(__compcert_va_int64) // by-ref convention, FIXME
ENDFUNCTION(__compcert_va_composite)
// Save arguments passed in register in the stack at beginning of vararg
// function. The caller of the vararg function reserved 32 bytes of stack
// just for this purpose.
// FP arguments are passed both in FP registers and integer registers,
// so it's enough to save the integer registers used for parameter passing.
FUNCTION(__compcert_va_saveregs)
movq %rcx, 16(%rsp)
movq %rdx, 24(%rsp)
movq %r8, 32(%rsp)
movq %r9, 40(%rsp)
ret
ENDFUNCTION(__compcert_va_saveregs)
#endif
|
AbsInt/CompCert
| 2,650
|
runtime/x86_64/i64_utof.S
|
// *****************************************************************
//
// The Compcert verified compiler
//
// Xavier Leroy, INRIA Paris
//
// Copyright (c) 2016 Institut National de Recherche en Informatique et
// en Automatique.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of the <organization> nor the
// names of its contributors may be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT
// HOLDER> BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// *********************************************************************
// Helper functions for 64-bit integer arithmetic. x86_64 version.
#include "sysdeps.h"
// Conversion unsigned long -> single-precision float
FUNCTION(__compcert_i64_utof)
testq INT_ARG_1, INT_ARG_1
js 1f
pxor FP_RES, FP_RES // if < 2^63,
cvtsi2ssq INT_ARG_1, FP_RES // convert as if signed
ret
1: // if >= 2^63, use round-to-odd trick
movq INT_ARG_1, %rax
shrq %rax
andq $1, INT_ARG_1
orq INT_ARG_1, %rax // (arg >> 1) | (arg & 1)
pxor FP_RES, FP_RES
cvtsi2ssq %rax, FP_RES // convert as if signed
addss FP_RES, FP_RES // multiply result by 2.0
ret
ENDFUNCTION(__compcert_i64_utof)
|
AbsInt/CompCert
| 2,912
|
runtime/powerpc/i64_umulh.S
|
// *****************************************************************
//
// The Compcert verified compiler
//
// Xavier Leroy, INRIA Paris
//
// Copyright (c) 2016 Institut National de Recherche en Informatique et
// en Automatique.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of the <organization> nor the
// names of its contributors may be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT
// HOLDER> BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// *********************************************************************
// Helper functions for 64-bit integer arithmetic. PowerPC version.
#include "sysdeps.h"
.text
// Unsigned multiply-high
// X * Y = 2^64 XH.YH + 2^32 (XH.YL + XL.YH) + XL.YL
.balign 16
.globl __compcert_i64_umulh
__compcert_i64_umulh:
// r7:r8:r9 accumulate bits 127:32 of the full product
mulhwu r9, r4, r6 // r9 = high half of XL.YL
mullw r0, r4, r5 // r0 = low half of XL.YH
addc r9, r9, r0
mulhwu r0, r4, r5 // r0 = high half of XL.YH
addze r8, r0
mullw r0, r3, r6 // r0 = low half of XH.YL
addc r9, r9, r0
mulhwu r0, r3, r6 // r0 = high half of XH.YL
adde r8, r8, r0
li r7, 0
addze r7, r7
mullw r0, r3, r5 // r0 = low half of XH.YH
addc r4, r8, r0
mulhwu r0, r3, r5 // r0 = high half of XH.YH
adde r3, r7, r0
blr
.type __compcert_i64_umulh, @function
.size __compcert_i64_umulh, .-__compcert_i64_umulh
|
AbsInt/CompCert
| 2,847
|
runtime/powerpc/i64_shr.S
|
// *****************************************************************
//
// The Compcert verified compiler
//
// Xavier Leroy, INRIA Paris-Rocquencourt
//
// Copyright (c) 2013 Institut National de Recherche en Informatique et
// en Automatique.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of the <organization> nor the
// names of its contributors may be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT
// HOLDER> BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// *********************************************************************
// Helper functions for 64-bit integer arithmetic. PowerPC version.
#include "sysdeps.h"
.text
// Shift right unsigned
.balign 16
.globl __compcert_i64_shr
__compcert_i64_shr:
// On PowerPC, shift instructions with amount mod 64 >= 32 return 0
// lo = (lo >> amount) | (hi << (32 - amount)) | (hi >> (amount - 32))
// hi = hi >> amount
// if 0 <= amount < 32:
// (amount - 32) mod 64 >= 32, hence hi >> (amount - 32) == 0
// if 32 <= amount < 64:
// hi >> amount == 0
// (32 - amount) mod 64 >= 32, hence hi << (32 - amount) == 0
andi. r5, r5, 63 // take amount modulo 64
subfic r6, r5, 32 // r6 = 32 - amount
addi r7, r5, -32 // r7 = amount - 32
srw r4, r4, r5
slw r0, r3, r6
or r4, r4, r0
srw r0, r3, r7
or r4, r4, r0
srw r3, r3, r5
blr
.type __compcert_i64_shr, @function
.size __compcert_i64_shr, .-__compcert_i64_shr
|
AbsInt/CompCert
| 2,199
|
runtime/powerpc/i64_umod.S
|
// *****************************************************************
//
// The Compcert verified compiler
//
// Xavier Leroy, INRIA Paris-Rocquencourt
//
// Copyright (c) 2013 Institut National de Recherche en Informatique et
// en Automatique.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of the <organization> nor the
// names of its contributors may be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT
// HOLDER> BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// *********************************************************************
// Helper functions for 64-bit integer arithmetic. PowerPC version.
#include "sysdeps.h"
.text
// Unsigned modulus
.balign 16
.globl __compcert_i64_umod
__compcert_i64_umod:
b __compcert_i64_udivmod
.type __compcert_i64_umod, @function
.size __compcert_i64_umod, .-__compcert_i64_umod
|
AbsInt/CompCert
| 3,783
|
runtime/powerpc/i64_dtou.S
|
// *****************************************************************
//
// The Compcert verified compiler
//
// Xavier Leroy, INRIA Paris-Rocquencourt
//
// Copyright (c) 2013 Institut National de Recherche en Informatique et
// en Automatique.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of the <organization> nor the
// names of its contributors may be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT
// HOLDER> BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// *********************************************************************
// Helper functions for 64-bit integer arithmetic. PowerPC version.
#include "sysdeps.h"
.text
// Conversion from double float to unsigned long
.balign 16
.globl __compcert_i64_dtou
__compcert_i64_dtou:
stfdu f1, -16(r1) // extract LO (r4) and HI (r3) halves of double
lwz r3, 0(r1)
lwz r4, 4(r1)
addi r1, r1, 16
cmpwi r3, 0 // is double < 0?
blt 1f // then it converts to 0
// extract unbiased exponent ((HI & 0x7FF00000) >> 20) - (1023 + 52)
rlwinm r5, r3, 12, 21, 31
addi r5, r5, -1075
// check range of exponent
cmpwi r5, -52 // if EXP < -52, double is < 1.0
blt 1f
cmpwi r5, 12 // if EXP >= 64 - 52, double is >= 2^64
bge 2f
// extract true mantissa
rlwinm r3, r3, 0, 12, 31 // HI &= ~0xFFF00000
oris r3, r3, 0x10 // HI |= 0x00100000
// shift it appropriately
cmpwi r5, 0
blt 3f
// if EXP >= 0, shift left by EXP. Note that EXP < 12.
subfic r6, r5, 32 // r6 = 32 - EXP
slw r3, r3, r5
srw r0, r4, r6
or r3, r3, r0
slw r4, r4, r5
blr
// if EXP < 0, shift right by -EXP. Note that -EXP <= 52 but can be >= 32.
3: subfic r5, r5, 0 // r5 = -EXP = shift amount
subfic r6, r5, 32 // r6 = 32 - amount
addi r7, r5, -32 // r7 = amount - 32 (see i64_shr.s)
srw r4, r4, r5
slw r0, r3, r6
or r4, r4, r0
srw r0, r3, r7
or r4, r4, r0
srw r3, r3, r5
blr
// Special cases
1: li r3, 0 // result is 0
li r4, 0
blr
2: li r3, -1 // result is MAX_UINT
li r4, -1
blr
.type __compcert_i64_dtou, @function
.size __compcert_i64_dtou, .-__compcert_i64_dtou
|
AbsInt/CompCert
| 3,078
|
runtime/powerpc/i64_stof.S
|
// *****************************************************************
//
// The Compcert verified compiler
//
// Xavier Leroy, INRIA Paris-Rocquencourt
//
// Copyright (c) 2013 Institut National de Recherche en Informatique et
// en Automatique.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of the <organization> nor the
// names of its contributors may be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT
// HOLDER> BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// *********************************************************************
// Helper functions for 64-bit integer arithmetic. PowerPC version.
#include "sysdeps.h"
.text
// Conversion from signed long to single float
.balign 16
.globl __compcert_i64_stof
__compcert_i64_stof:
mflr r9
// Check whether -2^53 <= X < 2^53
srawi r5, r3, 21 // r5 = high 32 bits of X >> 53
// -2^53 <= X < 2^53 iff r5 is -1 or 0, that is, iff r5 + 1 is 0 or 1
addi r5, r5, 1
cmplwi r5, 2
blt 1f
// X is large enough that double rounding can occur.
// Avoid it by nudging X away from the points where double rounding
// occurs (the "round to odd" technique)
rlwinm r5, r4, 0, 21, 31 // extract bits 0 to 11 of X
addi r5, r5, 0x7FF // r5 = (X & 0x7FF) + 0x7FF
// bit 12 of r5 is 0 if all low 12 bits of X are 0, 1 otherwise
// bits 13-31 of r5 are 0
or r4, r4, r5 // correct bit number 12 of X
rlwinm r4, r4, 0, 0, 20 // set to 0 bits 0 to 11 of X
// Convert to double, then round to single
1: bl __compcert_i64_stod
mtlr r9
frsp f1, f1
blr
.type __compcert_i64_stof, @function
.size __compcert_i64_stof, .-__compcert_i64_stof
|
AbsInt/CompCert
| 2,876
|
runtime/powerpc/i64_utod.S
|
// *****************************************************************
//
// The Compcert verified compiler
//
// Xavier Leroy, INRIA Paris-Rocquencourt
//
// Copyright (c) 2013 Institut National de Recherche en Informatique et
// en Automatique.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of the <organization> nor the
// names of its contributors may be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT
// HOLDER> BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// *********************************************************************
// Helper functions for 64-bit integer arithmetic. PowerPC version.
#include "sysdeps.h"
.text
// Conversion from unsigned long to double float
.balign 16
.globl __compcert_i64_utod
__compcert_i64_utod:
addi r1, r1, -16
lis r5, 0x4330
li r6, 0
stw r5, 0(r1)
stw r4, 4(r1) // 0(r1) = 2^52 + (double) XL
stw r5, 8(r1)
stw r6, 12(r1) // 8(r1) = 2^52
lfd f1, 0(r1)
lfd f2, 8(r1)
fsub f1, f1, f2 // f1 is (double) XL
lis r5, 0x4530
stw r5, 0(r1) // 0(r1) = 2^84 + (double) XH * 2^32
stw r3, 4(r1)
stw r5, 8(r1) // 8(r1) = 2^84
lfd f2, 0(r1)
lfd f3, 8(r1)
fsub f2, f2, f3 // f2 is XH * 2^32 as a double
fadd f1, f1, f2 // add both to get result
addi r1, r1, 16
blr
.type __compcert_i64_utod, @function
.size __compcert_i64_utod, .-__compcert_i64_utod
|
AbsInt/CompCert
| 6,889
|
runtime/powerpc/vararg.S
|
// *****************************************************************
//
// The Compcert verified compiler
//
// Xavier Leroy, INRIA Paris-Rocquencourt
//
// Copyright (c) 2013 Institut National de Recherche en Informatique et
// en Automatique.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of the <organization> nor the
// names of its contributors may be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT
// HOLDER> BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// *********************************************************************
// Helper functions for variadic functions <stdarg.h>. PowerPC version
// typedef struct {
// unsigned char ireg; // index of next integer register
// unsigned char freg; // index of next FP register
// char * stk; // pointer to next argument in stack
// struct {
// int iregs[8];
// double fregs[8];
// } * regs; // pointer to saved register area
// } va_list[1];
//
// unsigned int __compcert_va_int32(va_list ap);
// unsigned long long __compcert_va_int64(va_list ap);
// double __compcert_va_float64(va_list ap);
#include "sysdeps.h"
.text
.balign 16
.globl __compcert_va_int32
__compcert_va_int32:
// r3 = ap = address of va_list structure
lbz r4, 0(r3) // r4 = ap->ireg = next integer register
cmplwi r4, 8
bge 1f
// Next argument was passed in an integer register
lwz r5, 8(r3) // r5 = ap->regs = base of saved register area
rlwinm r6, r4, 2, 0, 29 // r6 = r4 * 4
addi r4, r4, 1 // increment ap->ireg
stb r4, 0(r3)
lwzx r3, r5, r6 // load argument in r3
blr
// Next argument was passed on stack
1: lwz r5, 4(r3) // r5 = ap->stk = next argument passed on stack
addi r5, r5, 4 // advance ap->stk by 4
stw r5, 4(r3)
lwz r3, -4(r5) // load argument in r3
blr
.type __compcert_va_int32, @function
.size __compcert_va_int32, .-__compcert_va_int32
.balign 16
.globl __compcert_va_int64
__compcert_va_int64:
// r3 = ap = address of va_list structure
lbz r4, 0(r3) // r4 = ap->ireg = next integer register
cmplwi r4, 7
bge 1f
// Next argument was passed in two consecutive integer register
lwz r5, 8(r3) // r5 = ap->regs = base of saved register area
addi r4, r4, 3 // round r4 up to an even number and add 2
rlwinm r4, r4, 0, 0, 30
rlwinm r6, r4, 2, 0, 29 // r6 = r4 * 4
add r5, r5, r6 // r5 = address of argument + 8
stb r4, 0(r3) // update ap->ireg
lwz r3, -8(r5) // load argument in r3:r4
lwz r4, -4(r5)
blr
// Next argument was passed on stack
1: lwz r5, 4(r3) // r5 = ap->stk = next argument passed on stack
li r4, 8
stb r4, 0(r3) // set ap->ireg = 8 so that no ireg is left
addi r5, r5, 15 // round r5 to a multiple of 8 and add 8
rlwinm r5, r5, 0, 0, 28
stw r5, 4(r3) // update ap->stk
lwz r3, -8(r5) // load argument in r3:r4
lwz r4, -4(r5)
blr
.type __compcert_va_int64, @function
.size __compcert_va_int64, .-__compcert_va_int64
.balign 16
.globl __compcert_va_float64
__compcert_va_float64:
// r3 = ap = address of va_list structure
lbz r4, 1(r3) // r4 = ap->freg = next float register
cmplwi r4, 8
bge 1f
// Next argument was passed in a FP register
lwz r5, 8(r3) // r5 = ap->regs = base of saved register area
rlwinm r6, r4, 3, 0, 28 // r6 = r4 * 8
add r5, r5, r6
lfd f1, 32(r5) // load argument in f1
addi r4, r4, 1 // increment ap->freg
stb r4, 1(r3)
blr
// Next argument was passed on stack
1: lwz r5, 4(r3) // r5 = ap->stk = next argument passed on stack
addi r5, r5, 15 // round r5 to a multiple of 8 and add 8
rlwinm r5, r5, 0, 0, 28
lfd f1, -8(r5) // load argument in f1
stw r5, 4(r3) // update ap->stk
blr
.type __compcert_va_float64, @function
.size __compcert_va_float64, .-__compcert_va_int64
.balign 16
.globl __compcert_va_composite
__compcert_va_composite:
b __compcert_va_int32
.type __compcert_va_composite, @function
.size __compcert_va_composite, .-__compcert_va_composite
// Save integer and FP registers at beginning of vararg function
.balign 16
.globl __compcert_va_saveregs
__compcert_va_saveregs:
lwz r11, 0(r1) // r11 point to top of our frame
stwu r3, -96(r11) // register save area is 96 bytes below
stw r4, 4(r11)
stw r5, 8(r11)
stw r6, 12(r11)
stw r7, 16(r11)
stw r8, 20(r11)
stw r9, 24(r11)
stw r10, 28(r11)
bf 6, 1f // don't save FP regs if CR6 bit is clear
stfd f1, 32(r11)
stfd f2, 40(r11)
stfd f3, 48(r11)
stfd f4, 56(r11)
stfd f5, 64(r11)
stfd f6, 72(r11)
stfd f7, 80(r11)
stfd f8, 88(r11)
1: blr
.type __compcert_va_saveregs, @function
.size __compcert_va_saveregs, .-__compcert_va_saveregs
|
AbsInt/CompCert
| 2,473
|
runtime/powerpc/i64_udiv.S
|
// *****************************************************************
//
// The Compcert verified compiler
//
// Xavier Leroy, INRIA Paris-Rocquencourt
//
// Copyright (c) 2013 Institut National de Recherche en Informatique et
// en Automatique.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of the <organization> nor the
// names of its contributors may be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT
// HOLDER> BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// *********************************************************************
// Helper functions for 64-bit integer arithmetic. PowerPC version.
#include "sysdeps.h"
.text
// Unsigned division
.balign 16
.globl __compcert_i64_udiv
__compcert_i64_udiv:
mflr r0
stw r0, 4(r1) // save return address in caller's frame
bl __compcert_i64_udivmod // unsigned divide
lwz r0, 4(r1)
mtlr r0 // restore return address
mr r3, r5 // result = quotient
mr r4, r6
blr
.type __compcert_i64_udiv, @function
.size __compcert_i64_udiv, .-__compcert_i64_udiv
|
AbsInt/CompCert
| 2,835
|
runtime/powerpc/i64_shl.S
|
// *****************************************************************
//
// The Compcert verified compiler
//
// Xavier Leroy, INRIA Paris-Rocquencourt
//
// Copyright (c) 2013 Institut National de Recherche en Informatique et
// en Automatique.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of the <organization> nor the
// names of its contributors may be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT
// HOLDER> BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// *********************************************************************
// Helper functions for 64-bit integer arithmetic. PowerPC version.
#include "sysdeps.h"
.text
// Shift left
.balign 16
.globl __compcert_i64_shl
__compcert_i64_shl:
// On PowerPC, shift instructions with amount mod 64 >= 32 return 0
// hi = (hi << amount) | (lo >> (32 - amount)) | (lo << (amount - 32))
// lo = lo << amount
// if 0 <= amount < 32:
// (amount - 32) mod 64 >= 32, hence lo << (amount - 32) == 0
// if 32 <= amount < 64:
// lo << amount == 0
// (32 - amount) mod 64 >= 32, hence lo >> (32 - amount) == 0
andi. r5, r5, 63 // take amount modulo 64
subfic r6, r5, 32 // r6 = 32 - amount
addi r7, r5, -32 // r7 = amount - 32
slw r3, r3, r5
srw r0, r4, r6
or r3, r3, r0
slw r0, r4, r7
or r3, r3, r0
slw r4, r4, r5
blr
.type __compcert_i64_shl, @function
.size __compcert_i64_shl, .-__compcert_i64_shl
|
AbsInt/CompCert
| 8,671
|
runtime/powerpc/i64_udivmod.S
|
// *****************************************************************
//
// The Compcert verified compiler
//
// Xavier Leroy, INRIA Paris-Rocquencourt
//
// Copyright (c) 2013 Institut National de Recherche en Informatique et
// en Automatique.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of the <organization> nor the
// names of its contributors may be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT
// HOLDER> BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// *********************************************************************
// Helper functions for 64-bit integer arithmetic. PowerPC version.
#include "sysdeps.h"
.text
// Unsigned division and modulus
// This function computes both the quotient and the remainder of two
// unsigned 64-bit integers.
// Input: numerator N in (r3,r4), divisor D in (r5,r6)
// Output: quotient Q in (r5,r6), remainder R in (r3,r4)
// Destroys: all integer caller-save registers
.globl __compcert_i64_udivmod
.balign 16
__compcert_i64_udivmod:
cmplwi r5, 0 // DH == 0 ?
stwu r1, -32(r1)
mflr r0
stw r0, 8(r1)
stw r31, 12(r1)
beq 1f
// The general case
stw r30, 16(r1)
stw r29, 20(r1)
stw r28, 24(r1)
mr r28, r3 // Save N in (r28, r29)
mr r29, r4
mr r30, r5 // Save D in (r30, r31)
mr r31, r6
// Scale N and D down, giving N' and D', such that 2^31 <= D' < 2^32
cntlzw r7, r5 // r7 = leading zeros in DH = 32 - shift amount
subfic r8, r7, 32 // r8 = shift amount
slw r0, r3, r7 // N' = N >> shift amount
srw r3, r3, r8
srw r4, r4, r8
or r4, r4, r0
slw r0, r5, r7 // D' = D >> shift amount
srw r6, r6, r8
or r5, r6, r0
// Divide N' by D' to get an approximate quotient Q
bl __compcert_i64_udiv6432 // r3 = quotient, r4 = remainder
mr r6, r3 // low half of quotient Q
li r5, 0 // high half of quotient is 0
// Tentative quotient is either correct or one too high
// Compute Q * D in (r7, r8)
4: mullw r7, r6, r30 // r7 = Q * DH
mullw r8, r6, r31 // r8 = low 32 bits of Q * DL
mulhwu r0, r6, r31 // r0 = high 32 bits of Q * DL
addc r7, r7, r0
subfe. r0, r0, r0 // test carry: EQ iff carry
beq 2f // handle overflow case
// Compute R = N - Q * D, with borrow
subfc r4, r8, r29
subfe r3, r7, r28
subfe. r0, r0, r0 // test borrow: EQ iff no borrow
beq 3f // no borrow: N >= Q * D, we are good
addi r6, r6, -1 // borrow: adjust Q down by 1
addc r4, r4, r31 // and R up by D
adde r3, r3, r30
// Finished
3: lwz r0, 8(r1)
mtlr r0
lwz r31, 12(r1)
lwz r30, 16(r1)
lwz r29, 20(r1)
lwz r28, 24(r1)
addi r1, r1, 32
blr
// Special case when Q * D overflows
2: addi r6, r6, -1 // adjust Q down by 1
b 4b // and redo computation and check of remainder
.balign 16
// Special case 64 bits divided by 32 bits
1: cmplwi r3, 0 // NH == 0?
beq 4f
divwu r31, r3, r6 // Divide NH by DL, quotient QH in r31
mullw r0, r31, r6
subf r3, r0, r3 // NH is remainder of this division
mr r5, r6
bl __compcert_i64_udiv6432 // divide NH : NL by DL
mr r5, r31 // high word of quotient
mr r6, r3 // low word of quotient
// r4 contains low word of remainder
li r3, 0 // high word of remainder = 0
lwz r0, 8(r1)
mtlr r0
lwz r31, 12(r1)
addi r1, r1, 32
blr
.balign 16
// Special case 32 bits divided by 32 bits
4: mr r0, r6
divwu r6, r4, r6 // low word of quotient
li r5, 0 // high word of quotient is 0
mullw r0, r6, r0
subf r4, r0, r4 // low word of remainder
li r3, 0 // high word of remainder is 0
addi r1, r1, 32
blr
.type __compcert_i64_udivmod, @function
.size __compcert_i64_udivmod, .-__compcert_i64_udivmod
// Auxiliary division function: 64 bit integer divided by 32 bit integer
// Not exported
// Input: numerator N in (r3,r4), divisor D in r5
// Output: quotient Q in r3, remainder R in r4
// Destroys: all integer caller-save registers
// Assumes: high word of N is less than D
.balign 16
__compcert_i64_udiv6432:
// Algorithm 9.3 from Hacker's Delight, section 9.4
// Initially: u1 in r3, u0 in r4, v in r5
// s = __builtin_clz(v);
cntlzw r6, r5 // s in r6
// v = v << s;
slw r5, r5, r6
// vn1 = v >> 16; # vn1 in r7
srwi r7, r5, 16
// vn0 = v & 0xFFFF; # vn0 in r8
rlwinm r8, r5, 0, 16, 31
// un32 = (u1 << s) | (u0 >> 32 - s);
subfic r0, r6, 32
srw r0, r4, r0
slw r3, r3, r6 // u1 dies, un32 in r3
or r3, r3, r0
// un10 = u0 << s;
slw r4, r4, r6 // u0 dies, un10 in r4
// un1 = un10 >> 16;
srwi r9, r4, 16 // un1 in r9
// un0 = un10 & 0xFFFF;
rlwinm r4, r4, 0, 16, 31 // un10 dies, un0 in r4
// q1 = un32/vn1;
divwu r10, r3, r7 // q in r10
// rhat = un32 - q1*vn1;
mullw r0, r10, r7
subf r11, r0, r3 // rhat in r11
// again1:
1:
// if (q1 >= b || q1*vn0 > b*rhat + un1) {
cmplwi r10, 0xFFFF
bgt 2f
mullw r0, r10, r8
slwi r12, r11, 16
add r12, r12, r9
cmplw r0, r12
ble 3f
2:
// q1 = q1 - 1;
addi r10, r10, -1
// rhat = rhat + vn1;
add r11, r11, r7
// if (rhat < b) goto again1;}
cmplwi r11, 0xFFFF
ble 1b
3:
// un21 = un32*b + un1 - q1*v;
slwi r0, r3, 16 // un32 dies
add r9, r0, r9 // un1 dies
mullw r0, r10, r5
subf r9, r0, r9 // un21 in r9
// q0 = un21/vn1;
divwu r3, r9, r7 // q0 in r3
// rhat = un21 - q0*vn1;
mullw r0, r3, r7
subf r11, r0, r9 // rhat in r11
// again2:
4:
// if (q0 >= b || q0*vn0 > b*rhat + un0) {
cmplwi r3, 0xFFFF
bgt 5f
mullw r0, r3, r8
slwi r12, r11, 16
add r12, r12, r4
cmplw r0, r12
ble 6f
5:
// q0 = q0 - 1;
addi r3, r3, -1
// rhat = rhat + vn1;
add r11, r11, r7
// if (rhat < b) goto again2;}
cmplwi r11, 0xFFFF
ble 4b
6:
// remainder = (un21*b + un0 - q0*v) >> s;
slwi r0, r9, 16
add r4, r0, r4 // un0 dies, remainder in r4
mullw r0, r3, r5
subf r4, r0, r4
srw r4, r4, r6
// quotient = q1*b + q0;
slwi r0, r10, 16
add r3, r0, r3
blr
.type __compcert_i64_udiv6432, @function
.size __compcert_i64_udiv6432,.-__compcert_i64_udiv6432
|
AbsInt/CompCert
| 2,721
|
runtime/powerpc/i64_sar.S
|
// *****************************************************************
//
// The Compcert verified compiler
//
// Xavier Leroy, INRIA Paris-Rocquencourt
//
// Copyright (c) 2013 Institut National de Recherche en Informatique et
// en Automatique.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of the <organization> nor the
// names of its contributors may be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT
// HOLDER> BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// *********************************************************************
// Helper functions for 64-bit integer arithmetic. PowerPC version.
#include "sysdeps.h"
.text
// Shift right signed
.balign 16
.globl __compcert_i64_sar
__compcert_i64_sar:
andi. r5, r5, 63 // take amount modulo 64
cmpwi r5, 32
bge 1f // amount < 32?
subfic r6, r5, 32 // r6 = 32 - amount
srw r4, r4, r5 // RH = XH >>s amount
slw r0, r3, r6 // RL = XL >>u amount | XH << (32 - amount)
or r4, r4, r0
sraw r3, r3, r5
blr
1: addi r6, r5, -32 // amount >= 32
sraw r4, r3, r6 // RL = XH >>s (amount - 32)
srawi r3, r3, 31 // RL = sign extension of XH
blr
.type __compcert_i64_sar, @function
.size __compcert_i64_sar, .-__compcert_i64_sar
|
AbsInt/CompCert
| 2,952
|
runtime/powerpc/i64_utof.S
|
// *****************************************************************
//
// The Compcert verified compiler
//
// Xavier Leroy, INRIA Paris-Rocquencourt
//
// Copyright (c) 2013 Institut National de Recherche en Informatique et
// en Automatique.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of the <organization> nor the
// names of its contributors may be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT
// HOLDER> BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// *********************************************************************
// Helper functions for 64-bit integer arithmetic. PowerPC version.
#include "sysdeps.h"
.text
// Conversion from unsigned long to single float
.balign 16
.globl __compcert_i64_utof
__compcert_i64_utof:
mflr r9
// Check whether X < 2^53
andis. r0, r3, 0xFFE0 // test bits 53...63 of X
beq 1f
// X is large enough that double rounding can occur.
// Avoid it by nudging X away from the points where double rounding
// occurs (the "round to odd" technique)
rlwinm r5, r4, 0, 21, 31 // extract bits 0 to 11 of X
addi r5, r5, 0x7FF // r5 = (X & 0x7FF) + 0x7FF
// bit 12 of r5 is 0 if all low 12 bits of X are 0, 1 otherwise
// bits 13-31 of r5 are 0
or r4, r4, r5 // correct bit number 12 of X
rlwinm r4, r4, 0, 0, 20 // set to 0 bits 0 to 11 of X
// Convert to double, then round to single
1: bl __compcert_i64_utod
mtlr r9
frsp f1, f1
blr
.type __compcert_i64_utof, @function
.size __compcert_i64_utof, .-__compcert_i64_utof
|
AbsInt/CompCert
| 3,195
|
runtime/powerpc/i64_sdiv.S
|
// *****************************************************************
//
// The Compcert verified compiler
//
// Xavier Leroy, INRIA Paris-Rocquencourt
//
// Copyright (c) 2013 Institut National de Recherche en Informatique et
// en Automatique.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of the <organization> nor the
// names of its contributors may be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT
// HOLDER> BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// *********************************************************************
// Helper functions for 64-bit integer arithmetic. PowerPC version.
#include "sysdeps.h"
.text
// Signed division
.balign 16
.globl __compcert_i64_sdiv
__compcert_i64_sdiv:
mflr r0
stw r0, 4(r1) // save return address in caller's frame
xor r0, r3, r5 // compute sign of result (top bit)
mtctr r0 // save it in CTR (why not?)
srawi r0, r3, 31 // take absolute value of N
xor r4, r4, r0 // (i.e. N = N ^ r0 - r0,
xor r3, r3, r0 // where r0 = 0 if N >= 0 and r0 = -1 if N < 0)
subfc r4, r0, r4
subfe r3, r0, r3
srawi r0, r5, 31 // take absolute value of D
xor r6, r6, r0 // (same trick)
xor r5, r5, r0
subfc r6, r0, r6
subfe r5, r0, r5
bl __compcert_i64_udivmod // do unsigned division
lwz r0, 4(r1)
mtlr r0 // restore return address
mfctr r0
srawi r0, r0, 31 // apply expected sign to quotient
xor r6, r6, r0 // RES = Q if CTR >= 0, -Q if CTR < 0
xor r5, r5, r0
subfc r4, r0, r6
subfe r3, r0, r5
blr
.type __compcert_i64_sdiv, @function
.size __compcert_i64_sdiv, .-__compcert_i64_sdiv
|
AbsInt/CompCert
| 3,141
|
runtime/powerpc/i64_smod.S
|
// *****************************************************************
//
// The Compcert verified compiler
//
// Xavier Leroy, INRIA Paris-Rocquencourt
//
// Copyright (c) 2013 Institut National de Recherche en Informatique et
// en Automatique.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of the <organization> nor the
// names of its contributors may be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT
// HOLDER> BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// *********************************************************************
// Helper functions for 64-bit integer arithmetic. PowerPC version.
#include "sysdeps.h"
.text
// Signed remainder
.balign 16
.globl __compcert_i64_smod
__compcert_i64_smod:
mflr r0
stw r0, 4(r1) // save return address in caller's frame
mtctr r3 // save sign of result in CTR (sign of N)
srawi r0, r3, 31 // take absolute value of N
xor r4, r4, r0 // (i.e. N = N ^ r0 - r0,
xor r3, r3, r0 // where r0 = 0 if N >= 0 and r0 = -1 if N < 0)
subfc r4, r0, r4
subfe r3, r0, r3
srawi r0, r5, 31 // take absolute value of D
xor r6, r6, r0 // (same trick)
xor r5, r5, r0
subfc r6, r0, r6
subfe r5, r0, r5
bl __compcert_i64_udivmod // do unsigned division
lwz r0, 4(r1)
mtlr r0 // restore return address
mfctr r0
srawi r0, r0, 31 // apply expected sign to remainder
xor r4, r4, r0 // RES = R if CTR >= 0, -Q if CTR < 0
xor r3, r3, r0
subfc r4, r0, r4
subfe r3, r0, r3
blr
.type __compcert_i64_smod, @function
.size __compcert_i64_smod, .-__compcert_i64_smod
|
AbsInt/CompCert
| 4,108
|
runtime/powerpc/i64_dtos.S
|
// *****************************************************************
//
// The Compcert verified compiler
//
// Xavier Leroy, INRIA Paris-Rocquencourt
//
// Copyright (c) 2013 Institut National de Recherche en Informatique et
// en Automatique.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of the <organization> nor the
// names of its contributors may be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT
// HOLDER> BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// *********************************************************************
// Helper functions for 64-bit integer arithmetic. PowerPC version.
#include "sysdeps.h"
.text
// Conversion from double float to signed long
.balign 16
.globl __compcert_i64_dtos
__compcert_i64_dtos:
stfdu f1, -16(r1) // extract LO (r4) and HI (r3) halves of double
lwz r3, 0(r1)
lwz r4, 4(r1)
addi r1, r1, 16
srawi r10, r3, 31 // save sign of double in r10
// extract unbiased exponent ((HI & 0x7FF00000) >> 20) - (1023 + 52)
rlwinm r5, r3, 12, 21, 31
addi r5, r5, -1075
// check range of exponent
cmpwi r5, -52 // if EXP < -52, abs(double) is < 1.0
blt 1f
cmpwi r5, 11 // if EXP >= 63 - 52, abs(double) is >= 2^63
bge 2f
// extract true mantissa
rlwinm r3, r3, 0, 12, 31 // HI &= ~0xFFF00000
oris r3, r3, 0x10 // HI |= 0x00100000
// shift it appropriately
cmpwi r5, 0
blt 3f
// if EXP >= 0, shift left by EXP. Note that EXP < 11.
subfic r6, r5, 32 // r6 = 32 - EXP
slw r3, r3, r5
srw r0, r4, r6
or r3, r3, r0
slw r4, r4, r5
b 4f
// if EXP < 0, shift right by -EXP. Note that -EXP <= 52 but can be >= 32.
3: subfic r5, r5, 0 // r5 = -EXP = shift amount
subfic r6, r5, 32 // r6 = 32 - amount
addi r7, r5, -32 // r7 = amount - 32 (see i64_shr.s)
srw r4, r4, r5
slw r0, r3, r6
or r4, r4, r0
srw r0, r3, r7
or r4, r4, r0
srw r3, r3, r5
// apply sign to result
4: xor r4, r4, r10
xor r3, r3, r10
subfc r4, r10, r4
subfe r3, r10, r3
blr
// Special cases
1: li r3, 0 // result is 0
li r4, 0
blr
2: li r4, -1 // result is MAX_SINT or MIN_SINT
bge 5f // depending on sign
li r4, -1 // result is MAX_SINT = 0x7FFF_FFFF
srwi r3, r4, 1
blr
5: lis r3, 0x8000 // result is MIN_SINT = 0x8000_0000
li r4, 0
blr
.type __compcert_i64_dtos, @function
.size __compcert_i64_dtos, .-__compcert_i64_dtos
|
AbsInt/CompCert
| 3,593
|
runtime/powerpc/i64_smulh.S
|
// *****************************************************************
//
// The Compcert verified compiler
//
// Xavier Leroy, INRIA Paris
//
// Copyright (c) 2016 Institut National de Recherche en Informatique et
// en Automatique.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of the <organization> nor the
// names of its contributors may be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT
// HOLDER> BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// *********************************************************************
// Helper functions for 64-bit integer arithmetic. PowerPC version.
#include "sysdeps.h"
.text
// Signed multiply-high
// Hacker's Delight section 8.3:
// - compute high 64 bits of the unsigned product X * Y (see i64_umulh.S)
// - subtract X if Y < 0
// - subtract Y if X < 0
.balign 16
.globl __compcert_i64_smulh
__compcert_i64_smulh:
// r7:r8:r9 accumulate bits 127:32 of the full unsigned product
mulhwu r9, r4, r6 // r9 = high half of XL.YL
mullw r0, r4, r5 // r0 = low half of XL.YH
addc r9, r9, r0
mulhwu r0, r4, r5 // r0 = high half of XL.YH
addze r8, r0
mullw r0, r3, r6 // r0 = low half of XH.YL
addc r9, r9, r0
mulhwu r0, r3, r6 // r0 = high half of XH.YL
adde r8, r8, r0
li r7, 0
addze r7, r7
mullw r0, r3, r5 // r0 = low half of XH.YH
addc r8, r8, r0
mulhwu r0, r3, r5 // r0 = high half of XH.YH
adde r7, r7, r0
// Here r7:r8 contains the high 64 bits of the unsigned product.
// Now, test signs and subtract if needed
srawi r0, r3, 31 // r0 = -1 if X < 0, r0 = 0 if X >= 0
srawi r9, r5, 31 // r9 = -1 if Y < 0, r9 = 0 if Y >= 0
and r3, r3, r9 // set X = 0 if Y >= 0
and r4, r4, r9
and r5, r5, r0 // set Y = 0 if X >= 0
and r6, r6, r0
subfc r8, r4, r8 // subtract X
subfe r7, r3, r7
subfc r4, r6, r8 // subtract Y
subfe r3, r5, r7
blr
.type __compcert_i64_smulh, @function
.size __compcert_i64_smulh, .-__compcert_i64_smulh
|
AbsInt/CompCert
| 2,987
|
runtime/powerpc/i64_stod.S
|
// *****************************************************************
//
// The Compcert verified compiler
//
// Xavier Leroy, INRIA Paris-Rocquencourt
//
// Copyright (c) 2013 Institut National de Recherche en Informatique et
// en Automatique.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of the <organization> nor the
// names of its contributors may be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT
// HOLDER> BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// *********************************************************************
// Helper functions for 64-bit integer arithmetic. PowerPC version.
#include "sysdeps.h"
.text
// Conversion from signed long to double float
.balign 16
.globl __compcert_i64_stod
__compcert_i64_stod:
addi r1, r1, -16
lis r5, 0x4330
li r6, 0
stw r5, 0(r1)
stw r4, 4(r1) // 0(r1) = 2^52 + (double) XL
stw r5, 8(r1)
stw r6, 12(r1) // 8(r1) = 2^52
lfd f1, 0(r1)
lfd f2, 8(r1)
fsub f1, f1, f2 // f1 is XL (unsigned) as a double
lis r5, 0x4530
lis r6, 0x8000
stw r5, 0(r1) // 0(r1) = 2^84 + ((double)XH - 2^31) * 2^32
add r3, r3, r6
stw r3, 4(r1)
stw r5, 8(r1) // 8(r1) = 2^84 + 2^31 * 2^32
stw r6, 12(r1)
lfd f2, 0(r1)
lfd f3, 8(r1)
fsub f2, f2, f3 // f2 is XH (signed) * 2^32 as a double
fadd f1, f1, f2 // add both to get result
addi r1, r1, 16
blr
.type __compcert_i64_stod, @function
.size __compcert_i64_stod, .-__compcert_i64_stod
|
acangiano/ruby-benchmark-suite
| 1,127
|
benchmarks/rdoc/ruby_trunk/ia64.s
|
// rb_ia64_flushrs and rb_ia64_bsp is written in IA64 assembly language
// because Intel Compiler for IA64 doesn't support inline assembly.
//
// This file is based on following C program compiled by gcc.
//
// void rb_ia64_flushrs(void) { __builtin_ia64_flushrs(); }
// void *rb_ia64_bsp(void) { return __builtin_ia64_bsp(); }
//
// Note that rb_ia64_flushrs and rb_ia64_bsp works in its own stack frame.
// It's because BSP is updated by br.call/brl.call (not alloc instruction).
// So rb_ia64_flushrs flushes stack frames including caller's one.
// rb_ia64_bsp returns the address next to caller's register stack frame.
//
// See also
// Intel Itanium Architecture Software Developer's Manual
// Volume 2: System Architecture.
//
.file "ia64.c"
.text
.align 16
.global rb_ia64_flushrs#
.proc rb_ia64_flushrs#
rb_ia64_flushrs:
.prologue
.body
flushrs
;;
nop.i 0
br.ret.sptk.many b0
.endp rb_ia64_flushrs#
.align 16
.global rb_ia64_bsp#
.proc rb_ia64_bsp#
rb_ia64_bsp:
.prologue
.body
nop.m 0
;;
mov r8 = ar.bsp
br.ret.sptk.many b0
.endp rb_ia64_bsp#
.ident "GCC: (GNU) 3.3.5 (Debian 1:3.3.5-13)"
|
accel-sim/gpu-app-collection
| 1,351
|
src/cuda/rodinia/3.1/cuda/mummergpu/experiments/tests.regs.s
|
CONTROL,16,17
Q,15,18
QR,15,18
QRT,15,17
QRTm,15,18
QRTmn,15,18
QRTmr,16,18
QRTmrn,16,18
QRTmrt,17,20
QRTmrtn,17,20
QRTmt,16,20
QRTmtn,16,20
QRTn,15,17
QRTr,16,17
QRTrn,16,17
QRTrt,16,19
QRTrtn,16,19
QRTt,15,19
QRTtn,15,19
QRm,15,18
QRmn,15,18
QRmr,16,18
QRmrn,16,18
QRmrt,18,20
QRmrtn,18,20
QRmt,18,20
QRmtn,18,20
QRn,15,18
QRr,16,18
QRrn,16,18
QRrt,16,18
QRrtn,16,18
QRt,15,18
QRtn,15,18
QT,15,17
QTm,15,18
QTmn,15,18
QTmr,16,18
QTmrn,16,18
QTmrt,16,20
QTmrtn,16,20
QTmt,16,20
QTmtn,16,20
QTn,15,17
QTr,16,17
QTrn,16,17
QTrt,16,19
QTrtn,16,19
QTt,15,19
QTtn,15,19
Qm,15,18
Qmn,15,18
Qmr,16,18
Qmrn,16,18
Qmrt,16,20
Qmrtn,16,20
Qmt,15,20
Qmtn,15,20
Qn,15,18
Qr,16,18
Qrn,16,18
Qrt,16,18
Qrtn,16,18
Qt,15,18
Qtn,15,18
R,15,17
RT,15,16
RTm,15,18
RTmn,15,18
RTmr,16,18
RTmrn,16,18
RTmrt,16,20
RTmrtn,16,20
RTmt,16,20
RTmtn,16,20
RTn,15,16
RTr,16,16
RTrn,16,16
RTrt,16,19
RTrtn,16,19
RTt,15,19
RTtn,15,19
Rm,15,16
Rmn,15,16
Rmr,16,16
Rmrn,16,16
Rmrt,16,18
Rmrtn,16,18
Rmt,15,18
Rmtn,15,18
Rn,15,17
Rr,16,17
Rrn,16,17
Rrt,16,17
Rrtn,16,17
Rt,15,17
Rtn,15,17
T,15,16
Tm,15,18
Tmn,15,18
Tmr,16,18
Tmrn,16,18
Tmrt,16,20
Tmrtn,16,20
Tmt,15,20
Tmtn,15,20
Tn,15,16
Tr,16,16
Trn,16,16
Trt,16,19
Trtn,16,19
Tt,15,19
Ttn,15,19
m,15,16
mn,15,16
mr,16,16
mrn,16,16
mrt,16,18
mrtn,16,18
mt,15,18
mtn,15,18
n,16,17
r,16,17
rn,16,17
rt,16,17
rtn,16,17
t,16,17
tn,16,17
|
accel-sim/gpu-app-collection
| 1,351
|
src/cuda/rodinia/2.0-ft/no-ft-impl/mummergpu/experiments/tests.regs.s
|
CONTROL,16,17
Q,15,18
QR,15,18
QRT,15,17
QRTm,15,18
QRTmn,15,18
QRTmr,16,18
QRTmrn,16,18
QRTmrt,17,20
QRTmrtn,17,20
QRTmt,16,20
QRTmtn,16,20
QRTn,15,17
QRTr,16,17
QRTrn,16,17
QRTrt,16,19
QRTrtn,16,19
QRTt,15,19
QRTtn,15,19
QRm,15,18
QRmn,15,18
QRmr,16,18
QRmrn,16,18
QRmrt,18,20
QRmrtn,18,20
QRmt,18,20
QRmtn,18,20
QRn,15,18
QRr,16,18
QRrn,16,18
QRrt,16,18
QRrtn,16,18
QRt,15,18
QRtn,15,18
QT,15,17
QTm,15,18
QTmn,15,18
QTmr,16,18
QTmrn,16,18
QTmrt,16,20
QTmrtn,16,20
QTmt,16,20
QTmtn,16,20
QTn,15,17
QTr,16,17
QTrn,16,17
QTrt,16,19
QTrtn,16,19
QTt,15,19
QTtn,15,19
Qm,15,18
Qmn,15,18
Qmr,16,18
Qmrn,16,18
Qmrt,16,20
Qmrtn,16,20
Qmt,15,20
Qmtn,15,20
Qn,15,18
Qr,16,18
Qrn,16,18
Qrt,16,18
Qrtn,16,18
Qt,15,18
Qtn,15,18
R,15,17
RT,15,16
RTm,15,18
RTmn,15,18
RTmr,16,18
RTmrn,16,18
RTmrt,16,20
RTmrtn,16,20
RTmt,16,20
RTmtn,16,20
RTn,15,16
RTr,16,16
RTrn,16,16
RTrt,16,19
RTrtn,16,19
RTt,15,19
RTtn,15,19
Rm,15,16
Rmn,15,16
Rmr,16,16
Rmrn,16,16
Rmrt,16,18
Rmrtn,16,18
Rmt,15,18
Rmtn,15,18
Rn,15,17
Rr,16,17
Rrn,16,17
Rrt,16,17
Rrtn,16,17
Rt,15,17
Rtn,15,17
T,15,16
Tm,15,18
Tmn,15,18
Tmr,16,18
Tmrn,16,18
Tmrt,16,20
Tmrtn,16,20
Tmt,15,20
Tmtn,15,20
Tn,15,16
Tr,16,16
Trn,16,16
Trt,16,19
Trtn,16,19
Tt,15,19
Ttn,15,19
m,15,16
mn,15,16
mr,16,16
mrn,16,16
mrt,16,18
mrtn,16,18
mt,15,18
mtn,15,18
n,16,17
r,16,17
rn,16,17
rt,16,17
rtn,16,17
t,16,17
tn,16,17
|
Acedio/snes-forth
| 1,561
|
init.s
|
.p816
.i16
.a16
.segment "HEADERNAME"
.byte "SUPER SOKONYAN"
.segment "ROMINFO"
.byte $30 ; Fast LoROM
.byte 0 ; ROM-only cart
.byte $07 ; 128K ROM
.byte 0,0,0,0 ; No RAM, Japan, Homebrew, Version 0
.word $FFFF,$0000 ; dummy checksums
.segment "VECTORS"
.addr 0,0,0,0,0,nmi,0,0
.addr 0,0,0,0,0,0,reset,0
.segment "UNSIZED"
.include "preamble.inc"
reset:
clc ; native mode
xce
rep #$30 ; A/X/Y 16-bit
; We'll start in the 0th bank, but doing a long jump to `fastrom` (which ld65
; will put into the $80th bank, fast rom) will switch us.
jml fastrom
fastrom:
; Clear PPU registers
ldx #$33
@loop: stz $2100,x
stz $4200,x
dex
bpl @loop
lda #$8F
sta $4100
ldx #RETURN_STACK_ADDR
txs
ldx #DATA_STACK_ADDR
; Set Data Bank to the current (fast) Program Bank.
phk
plb
.import _SNES_MAIN
jsr _SNES_MAIN
forever:
jmp forever
.export not_implemented
not_implemented:
jmp not_implemented
nmi:
jml nmiFast
nmiFast:
; Thanks to Oziphantom (https://www.youtube.com/watch?v=rPcwGeX_hLs) for the NMI overview :)
; = Save registers =
; First save the data page and set it to the current code page (so we know that
; $4210 is indeed the ACK register).
phb
phk
plb
; ACK NMI
A8
bit $4210
; Make sure we save all 16 bits of each register.
A16
pha
phx
phy
phd
; Set direct page to 0 (Forth expects no offset).
lda #0000
tcd
; Call Forth NMI handler.
.import _SNES_NMI
jsr _SNES_NMI
; Restore registers.
A16
pld
ply
plx
pla
plb
rti
|
Acedio/snes-forth
| 40,346
|
tad-audio.s
|
;; Terrific Audio Driver ca65 API
; This file MUST be recompiled if the memory map changes.
;
; SPDX-FileCopyrightText: © 2024 Marcus Rowe <undisbeliever@gmail.com>
; SPDX-License-Identifier: Zlib
;
; Copyright © 2024 Marcus Rowe <undisbeliever@gmail.com>
;
; This software is provided 'as-is', without any express or implied warranty. In
; no event will the authors be held liable for any damages arising from the use of
; this software.
;
; Permission is granted to anyone to use this software for any purpose, including
; commercial applications, and to alter it and redistribute it freely, subject to
; the following restrictions:
;
; 1. The origin of this software must not be misrepresented; you must not
; claim that you wrote the original software. If you use this software in
; a product, an acknowledgment in the product documentation would be
; appreciated but is not required.
;
; 2. Altered source versions must be plainly marked as such, and must not be
; misrepresented as being the original software.
;
; 3. This notice may not be removed or altered from any source distribution.
.setcpu "65816"
.smart
; Ensure autoimport is disabled
.autoimport -
.export Tad_Init : far, Tad_Process : far, Tad_FinishLoadingData : far
.export Tad_QueueCommand, Tad_QueueCommandOverride
.export Tad_QueuePannedSoundEffect, Tad_QueueSoundEffect
.export Tad_LoadSong, Tad_LoadSongIfChanged, Tad_GetSong, Tad_ReloadCommonAudioData
.export Tad_SongsStartImmediately, Tad_SongsStartPaused
.export Tad_GlobalVolumesResetOnSongStart, Tad_GlobalVolumesPersist
.export Tad_SetTransferSize
.export Tad_IsLoaderActive, Tad_IsSongLoaded, Tad_IsSfxPlaying, Tad_IsSongPlaying
.exportzp Tad_sfxQueue_sfx, Tad_sfxQueue_pan
.export Tad_flags, Tad_audioMode
;; =======
;; DEFINES
;; =======
;; Memory Map
;; ----------
;;
;; `tad-audio.s` requires either a `LOROM` or `HIROM` symbol to determine the memory map used by the ROM.
.if .defined(LOROM) && .defined(HIROM)
.error "Cannot use HIROM and LOROM at the same time"
.endif
.if ! (.defined(LOROM) || .defined(HIROM))
.error "Unknown memory map: Missing LOROM or HIROM define"
.endif
;; Segments
;; --------
;;
;; The following optional defines are used to determine the segment to place the code in.
;;
;; * `TAD_PROCESS_SEGMENT` defines the segment to store the subroutines that processes the queues
;; and loads data into Audio-RAM (`Tad_Init`, `Tad_Process`, `Tad_FinishLoadingData`).
;; * The exported subroutines in this segment are called using `JSL` long addressing.
;; * If `TAD_PROCESS_SEGMENT` is undefined, `TAD_CODE_SEGMENT` is used.
;;
;; * `TAD_CODE_SEGMENT` defines the segment to store the remaining subroutines.
;; * The subroutines in this segment are called using `JSR` absolute addressing.
;; * If `TAD_CODE_SEGMENT` is undefined, "CODE" will be used.
;;
;;
;; NOTE: Because ca65 only allows numbers in `-D name=value` command line arguments, the only
;; way to set these defines is to create a new source file that defines `TAD_CODE_SEGMENT`
;; and/or `TAD_PROCESS_SEGMENT` and then includes `tad-audio.s`.
;;
;; For example:
;;
;; .define TAD_CODE_SEGMENT "CODE1"
;; .define TAD_PROCESS_SEGMENT "CODE3"
;;
;; .include "../terrific-audio-driver/audio-driver/ca65-api/tad-audio.s"
;;
.if .not .match({TAD_CODE_SEGMENT}, {""})
.define TAD_CODE_SEGMENT "CODE"
.endif
.if .not .match({TAD_PROCESS_SEGMENT}, {""})
.define TAD_PROCESS_SEGMENT TAD_CODE_SEGMENT
.endif
;; ===========
;; Binary Data
;; ===========
;;
;; These 3 files MUST be embedded (using `.incbin`) into the ROM if the developer uses a custom
;; `LoadAudioData` callback.
;;
;; Terrific Audio Driver spc700 Loader (loader.bin)
.import Tad_Loader_Bin
.importzp Tad_Loader_SIZE
;; Terrific Audio Driver spc700 driver (audio-driver.bin)
.import Tad_AudioDriver_Bin, Tad_AudioDriver_SIZE
.assert Tad_Loader_SIZE > 64 && Tad_Loader_SIZE < 128, lderror, "Invalid Tad_Loader_Bin size"
.assert .bankbyte(Tad_Loader_Bin) = .bankbyte(Tad_Loader_Bin + Tad_Loader_SIZE), lderror, "Tad_Loader_Bin does not fit inside a single bank"
.assert Tad_AudioDriver_SIZE > $600 && Tad_AudioDriver_SIZE < $d00, lderror, "Invalid Tad_AudioDriver_Bin size"
; `Tad_AudioDriver_Bin` can cross bank boundaries
;; =========
;; CALLBACKS
;; =========
;; LoadAudioData callback
;;
;; IN: A = 0 - Common audio data (MUST return carry set)
;; IN: A >= 1 - Song data (might be invalid)
;; OUT: Carry set if input (`A`) was valid
;; OUT: A:X = far address
;; OUT: Y = size
;;
;; Called with JSL long addressing (returns with RTL).
.a8
.i16
;; DB access registers
.import LoadAudioData: Far
;; =========
;; CONSTANTS
;; =========
;; Address to store the loader (in Audio-RAM).
;; Address (in Audio-RAM) to execute after loading the Loader.
;; MUST match LOADER_ADDR in `audio-driver/src/common_memmap.wiz`.
TAD_LOADER_ARAM_ADDR = $0200
;; Minimum transfer size accepted by `Tad_SetTransferSize`
;;
;; MUST BE > 0
TAD_MIN_TRANSFER_PER_FRAME = 32
;; Maximum transfer size accepted by `Tad_SetTransferSize`
;;
;; The loader can transfer ~849 bytes per 60Hz frame SlowROM or FastROM
TAD_MAX_TRANSFER_PER_FRAME = 800
;; ========
;; IO Ports
;; ========
;; IO communication protocol version.
;;
;; Used by `tad-compiler ca65-export` to verify the IO protocol in `tad-audio.s` matches the audio-driver.
;;
;; This constant MUST be increased if `LOADER_ADDR` or the IO Communication protocol changes.
.export TAD_IO_VERSION : abs = 20
; MUST match `audio-driver/src/io-commands.wiz`
.enum TadCommand
PAUSE = 0
PAUSE_MUSIC_PLAY_SFX = 2
UNPAUSE = 4
PLAY_SOUND_EFFECT = 6
STOP_SOUND_EFFECTS = 8
SET_MAIN_VOLUME = 10
SET_MUSIC_CHANNELS = 12
SET_SONG_TIMER = 14
SET_GLOBAL_MUSIC_VOLUME = 16
SET_GLOBAL_SFX_VOLUME = 18
SET_GLOBAL_VOLUMES = 20
.endenum
TAD_MAX_PAN = 128
TAD_CENTER_PAN = TAD_MAX_PAN / 2
;; MUST match `audio-driver/src/io-commands.wiz`
.scope TadIO_ToDriver
;; The command to execute.
;;
;; iiicccci
;; cccc = command
;; i = command id, MUST be different on every command.
;; Used to detect when a new command has been sent to the driver.
;;
;; NOTES:
;; * The command will only be execute if the `command` byte has changed.
;; * This value MUST be written last.
;; * The command and parameter bytes MUST NOT change unless the previous command
;; has been acknowledged.
COMMAND_PORT = $2140 ; APUIO0
COMMAND_MASK = %00011110
COMMAND_I_MASK = %11100001
;; The first command parameter port
PARAMETER0_PORT = $2141 ; APUIO1
;; The second command parameter port
PARAMETER1_PORT = $2142 ; APUIO2
;; Writing `SWITCH_TO_LOADER` to this port should stop execution and start the loader.
;;
;; If the audio-driver is running; if the `SWITCH_TO_LOADER_BIT` is set,
;; the audio driver will stop and execute the loader.
;;
;; If the loader is in the middle of a transfer and both the `SWITCH_TO_LOADER_BIT`
;; and MSB (bit 7) bits are set, the loader will restart.
SWITCH_TO_LOADER_PORT = $2143 ; APUIO3
SWITCH_TO_LOADER_BIT = 5
SWITCH_TO_LOADER = $80 | (1 << SWITCH_TO_LOADER_BIT)
.endscope
;; MUST match `audio-driver/src/io-commands.wiz`
.scope TadIO_ToScpu
;; Audio driver command acknowledgment.
;;
;; Acknowledgment of the `ToDriver.command` byte. Not used in the loader.
;;
;; After the command has been processed, the `IO.ToDriver.command` value will be written to this port.
COMMAND_ACK_PORT = $2140 ; APUIO0
;; The mode the S-SMP is currently executing.
;;
;; Used by both the loader and the audio-driver.
;;
;; NOTE: The IPL sets this value after at has cleared the zero-page.
;; Do not read this value immediately after reset.
;; Make sure enough time has passed for the IPL to set IO Port 1
;; to $bb before reading this port.
MODE_PORT = $2141 ; APUIO1
;; The S-SMP is at the start of the IPL, waiting for the ready signal.
MODE_IPL = $bb
;; The S-SMP is running the loader.
MODE_LOADER = $4c ; 'L', Loader.LOADER_READY_L
;; The S-SMP is running the audio-driver.
MODE_AUDIO_DRIVER = $61 ; 'a'
.endscope
;; MUST match `audio-driver/src/io-commands.wiz`
.scope TadLoaderDataType
CODE = 0
COMMON_DATA = 1
SONG_DATA_FLAG = 1 << 7
PLAY_SONG_FLAG = 1 << 6
RESET_GLOBAL_VOLUMES_FLAG = 1 << 5
STEREO_FLAG = 1 << 1
SURROUND_FLAG = 1 << 0
.endscope
;; MUST match `audio-driver/src/io-commands.wiz`
.scope TadIO_Loader_Init
LOADER_DATA_TYPE_PORT = $2141 ; APUIO1
READY_PORT_L = $2142 ; APUIO2
READY_PORT_H = $2143 ; APUIO3
READY_PORT_HL = $2142 ; APUIO2 & APUIO3
LOADER_READY_L = %01001100 ; 'L'
LOADER_READY_H = %01000100 ; 'D'
LOADER_READY_HL = LOADER_READY_L | (LOADER_READY_H << 8)
.endscope
;; MUST match `audio-driver/src/io-commands.wiz`
.scope TadIO_Loader
DATA_PORT_L = $2141 ; APUIO1
DATA_PORT_H = $2142 ; APUIO2
SPINLOCK_PORT = $2143 ; APUIO3
;; The spinlock value when the audio driver starts playing a song
SPINLOCK_INIT_VALUE = 0
;; Only the lower 4 bits of the spinlock should be set while sending data to the loader
SPINLOCK_MASK = $0f
;; Signal to the loader that the transfer has completed.
SPINLOCK_COMPLETE = $80
;; If this value is written to the spinlock, the loader will restart;
SPINLOCK_SWITCH_TO_LOADER = TadIO_ToDriver::SWITCH_TO_LOADER
.endscope
.enum TadState
NULL = $00
;; Waiting for loader to send the ready signal before loading common-audio-data
WAITING_FOR_LOADER_COMMON = $7b
;; Waiting for loader to send the ready signal before loading song data
WAITING_FOR_LOADER_SONG = $7c
;; Loading common audio data.
LOADING_COMMON_AUDIO_DATA = $7d
;; Loading a song and the TadLoaderDataType::PLAY_SONG_FLAG was clear.
LOADING_SONG_DATA_PAUSED = $7e
;; Loading a song and the TadLoaderDataType::PLAY_SONG_FLAG was set.
LOADING_SONG_DATA_PLAY = $7f
;; Song is loaded into Audio-RAM and the audio driver is paused.
;; No play-sound-effect commands will be sent when the driver is paused.
PAUSED = $80
;; Song is loaded into Audio-RAM and the audio driver is playing sfx (song paused).
PLAYING_SFX = $81
;; Song is loaded into Audio-RAM and the audio driver is playing the song.
PLAYING = $82
.endenum
TAD__FIRST_WAITING_STATE = TadState::WAITING_FOR_LOADER_COMMON
TAD__FIRST_LOADING_STATE = TadState::LOADING_COMMON_AUDIO_DATA
TAD__FIRST_LOADING_SONG_STATE = TadState::LOADING_SONG_DATA_PAUSED
.scope TadFlags
RELOAD_COMMON_AUDIO_DATA = 1 << 7
PLAY_SONG_IMMEDIATELY = 1 << 6
RESET_GLOBAL_VOLUMES_ON_SONG_START = 1 << 5
;; A mask for the flags that are sent to the loader
_ALL_FLAGS = RELOAD_COMMON_AUDIO_DATA | PLAY_SONG_IMMEDIATELY | RESET_GLOBAL_VOLUMES_ON_SONG_START
.endscope
.enum TadAudioMode
MONO = 0
STEREO = 1
SURROUND = 2
.endenum
TAD_N_AUDIO_MODES = 3
;; Default values
;; ==============
; Using a single symbol to enable custom defaults as I am unable to detect if a `.define`
; exists using an if statement.
;
; I recommend using a `.define` for custom defaults so `TadFlags` and `TadAudioMode` values
; can be referenced before they are defined.
.ifndef TAD_CUSTOM_DEFAULTS
;; Default TAD flags
;; MUST NOT set RELOAD_COMMON_AUDIO_DATA
TAD_DEFAULT_FLAGS = TadFlags::PLAY_SONG_IMMEDIATELY
;; Starting audio mode
TAD_DEFAULT_AUDIO_MODE = TadAudioMode::MONO
;; Default number of bytes to transfer to Audio-RAM per `Tad_Process` call.
;;
;; MUST be between the TAD_MIN_TRANSFER_PER_FRAME and TAD_MAX_TRANSFER_PER_FRAME
TAD_DEFAULT_TRANSFER_PER_FRAME = 256
.endif
;; =========
;; Variables
;; =========
.bss
;; The current audio driver state
;; (`TadState` enum)
TadPrivate_state: .res 1
;; `TadFlags` bitfield
;; (see `TadFlags` namespace)
Tad_flags: .res 1
;; Mono/Stereo/Surround audio mode
;; (`TadAudioMode` enum)
Tad_audioMode: .res 1
;; Number of bytes to transfer per `Tad_Process` call
;;
;; MUST be > 0
TadPrivate_bytesToTransferPerFrame: .res 2
;; The previous `TadIO_ToScpu::COMMAND_PORT` sent to the S-SMP audio driver.
TadPrivate_previousCommand: .res 1
;; ---------------------------------------------------
;; Queue 1 - remaining data to transfer into Audio-RAM
;; ---------------------------------------------------
.bss
;; A far pointer to the remaining data to transfer
TadPrivate_dataToTransfer_addr: .res 2
TadPrivate_dataToTransfer_bank: .res 1
;; The remaining number of bytes to transfer
TadPrivate_dataToTransfer_size: .res 2
;; The previous value written to the loader spinLock
TadPrivate_dataToTransfer_prevSpinLock: .res 1
;; ----------------------------------------------
;; Queue 2 - The next song to load into Audio-RAM
;; ----------------------------------------------
.bss
;; The next song to load into Audio-RAM
;; Used by the `WAITING_FOR_LOADER_*` states
;; If this value is 0 or an invalid song, a blank silent song will be loaded instead.
TadPrivate_nextSong: .res 1
;; ------------------------------------------------------
;; Queue 3 - The next command to send to the audio driver
;; ------------------------------------------------------
.bss
;; The next `TadCommand` to send to the audio driver.
;; If this value is negative, the queue is empty.
TadPrivate_nextCommand_id: .res 1
;; The two parameters of the next command (if any)
TadPrivate_nextCommand_parameter0: .res 1
TadPrivate_nextCommand_parameter1: .res 1
;; ---------------------------------------
;; Queue 4 - The next sound effect to play
;; ---------------------------------------
.zeropage
;; see tad-audio.inc
Tad_sfxQueue_sfx: .res 1
Tad_sfxQueue_pan: .res 1
;; Memory Map Asserts
;; ==================
.bss
.assert ((* > $100) && (* < $2000)) || ((* > $7e0100) && (* < $7e2000)), lderror, ".bss is not in lowram"
;; ==================
;; Loader subroutines
;; ==================
.segment TAD_PROCESS_SEGMENT
;; Transfer and execute Loader using the IPL
;;
;; REQUIRES: S-SMP reset and no data has been written to it yet
;;
;; This macro MUST only be called once. There is no way to reset the S-SMP and restart the IPL.
;;
;; A8
;; I16
;; DB access registers
.macro TadPrivate_Loader_TransferLoaderViaIpl
.assert .asize = 8, error
.assert .isize = 16, error
APUIO0 = $2140
APUIO1 = $2141
APUIO2 = $2142
APUIO3 = $2143
; Clear start command port (just in case APUIO0 has $cc in it)
; SOURCE: `blarggapu.s` from lorom-template, originally written by blargg (Shay Green)
stz APUIO0
; Wait for ready signal
ldy #$bbaa
:
cpy APUIO0
bne :-
ldx #TAD_LOADER_ARAM_ADDR
lda #$cc
stx APUIO2 ; destination ARAM address
sta APUIO1 ; non-zero = write data to address
sta APUIO0 ; New data command (non-zero and APUIO0 + more than 2, or $cc on the first transfer)
; Wait for a response from the IPL
:
cmp APUIO0
bne :-
; Transfer the data
.assert Tad_Loader_SIZE < $ff, error, "Cannot fit Tad_Loader_SIZE in an 8 bit index"
sep #$30
.i8
ldx #0
@IplLoop:
; Send the next byte to the IPL
lda f:Tad_Loader_Bin,x
sta APUIO1
; Tell the IPL the next byte is ready
stx APUIO0
; Wait for a response form the IPL
:
cpx APUIO0
bne :-
inx
cpx #Tad_Loader_SIZE
bcc @IplLoop
rep #$10
.i16
; Send an execute program command to the IPL
ldx #TAD_LOADER_ARAM_ADDR
stx APUIO2 ; A-RAM address
stz APUIO1 ; zero = execute program at A-RAM address
lda #Tad_Loader_SIZE + 2
sta APUIO0 ; New data command (must be +2 the previous APUIO0 write)
.endmacro
;; Sends a TadLoaderDataType byte to the loader if the loader is ready
;;
;; Assumes loader just started OR a `SWITCH_TO_LOADER` message was sent to the audio driver/loader.
;;
;; IN: A = TadLoaderDataType value
;; OUT: carry = loader is ready and TadLoaderDataType sent
;;
.a8
.i16
;; DB access registers
.proc TadPrivate_Loader_CheckReadyAndSendLoaderDataType
; Test if the loader is ready
ldx #TadIO_Loader_Init::LOADER_READY_HL
cpx TadIO_Loader_Init::READY_PORT_HL
bne ReturnFalse
; Send the ready signal and the TadLoaderDataType
sta TadIO_Loader_Init::LOADER_DATA_TYPE_PORT
lda #TadIO_Loader_Init::LOADER_READY_L
sta TadIO_Loader_Init::READY_PORT_L
lda #TadIO_Loader_Init::LOADER_READY_H
sta TadIO_Loader_Init::READY_PORT_H
; The S-CPU must wait for the loader to write 0 to the spinlock before transferring data.
stz TadPrivate_dataToTransfer_prevSpinLock
; return true
sec
rts
ReturnFalse:
clc
rts
.endproc
;; Set the data transfer queue
;;
;; IN: A:X = far address
;; IN: Y = size
.a8
.i16
;; DB access registers
.proc TadPrivate_Loader_SetDataToTransfer
stx TadPrivate_dataToTransfer_addr
sta TadPrivate_dataToTransfer_bank
sty TadPrivate_dataToTransfer_size
rts
.endproc
;; Transfer data to the audio loader.
;;
;; ASSUMES: `check_ready_and_send_loader_data_type` and `set_data_to_transfer` were previously called.
;;
;; NOTE: This function may read one byte past the end of the transfer queue.
;;
;; OUT: carry set if all data in the transfer queue was sent to Audio-RAM.
;;
.a8
.i16
;; DB access lowram
.proc TadPrivate_Loader_TransferData
; Early exit if the loader is not ready
;
; This test doubles as a lock for the previous transfer.
;
; This also prevents a freeze in `process()` if the loader has crashed/glitched.
; (`finish_loading_data()` will freeze if the loader has crashed/glitched.
lda TadPrivate_dataToTransfer_prevSpinLock
cmp f:TadIO_Loader::SPINLOCK_PORT
bne @ReturnFalse
phd
phb
rep #$30
.a16
; Calculate number of words to read
lda TadPrivate_dataToTransfer_size
cmp TadPrivate_bytesToTransferPerFrame
bcc :+
lda TadPrivate_bytesToTransferPerFrame
:
inc ; required
lsr
; Prevent corrupting all of Audio-RAM if number of words == 0
bne :+
inc
:
; Store word to read in X
tax
; Reverse subtract TadPrivate_dataToTransfer_size (with clamping)
asl ; convert number of words to number of bytes
eor #$ffff
sec
adc TadPrivate_dataToTransfer_size
bcs :+
lda #0
:
sta TadPrivate_dataToTransfer_size
lda #$2100
tcd
; D = $2100
sep #$20
.a8
lda TadPrivate_dataToTransfer_bank
ldy TadPrivate_dataToTransfer_addr
pha
plb
; DB = TadPrivate_dataToTransfer_bank
@Loop:
; x = number of words remaining
; y = data address (using y to force addr,y addressing mode)
lda a:0,y
sta z:.lobyte(TadIO_Loader::DATA_PORT_L)
; The bank overflow test must be done here as `TadPrivate_dataToTransfer_addr` might point to an odd memory address.
iny
beq @BankOverflow_1
@BankOverflow_1_Resume:
lda a:0,y
sta z:.lobyte(TadIO_Loader::DATA_PORT_H)
; Increment this spinloack value
;
; The upper 4 bits of the spinlock must be clear'
; Cannot be 0. Zero is used to spinlock the loader init before this loop starts
; (see Loader Step 3 in `terrific-audio-driver/audio-driver/src/io-commands.wiz`)
.assert ($ffff & 7) + 1 < TadIO_Loader::SPINLOCK_MASK, error
tya ; y = address of data, it should always increment by 2
and #7
inc
sta z:.lobyte(TadIO_Loader::SPINLOCK_PORT)
iny
beq @BankOverflow_2
@BankOverflow_2_Resume:
dex
beq @EndLoop
; Spinloop until the S-SMP has acknowledged the data
:
cmp z:.lobyte(TadIO_Loader::SPINLOCK_PORT)
bne :-
bra @Loop
@EndLoop:
plb
pld
; DB restored
; D = 0
sty TadPrivate_dataToTransfer_addr
sta TadPrivate_dataToTransfer_prevSpinLock
ldy TadPrivate_dataToTransfer_size
bne @ReturnFalse
; End of data transfer
; Wait for Loader to acknowledge the last write
:
cmp f:TadIO_Loader::SPINLOCK_PORT
bne :-
; No more data to transfer
lda #TadIO_Loader::SPINLOCK_COMPLETE
sta f:TadIO_Loader::SPINLOCK_PORT
sec
rts
@ReturnFalse:
clc
rts
@BankOverflow_1:
jsr TadPrivate_Loader_GotoNextBank
bra @BankOverflow_1_Resume
@BankOverflow_2:
; Must save/restore A, it holds the spinlock
pha
jsr TadPrivate_Loader_GotoNextBank
pla
bra @BankOverflow_2_Resume
.endproc
;; Advance to the next bank
;;
;; MUST only be called to TadPrivate_Loader_TransferData
;;
;; ASSUMES: Y = 0 (Y addr overflowed to 0)
;;
;; IN: Y = 0
;; IN: DB = TadPrivate_dataToTransfer_bank
;;
;; OUT: Y = new address
;; OUT: DB = new bank
;;
;; KEEP: X
.a8
.i16
;; DB = TadPrivate_dataToTransfer_bank
.proc TadPrivate_Loader_GotoNextBank
phb
pla
inc
sta f:TadPrivate_dataToTransfer_bank
pha
plb
; DB = new TadPrivate_dataToTransfer_bank value
; MUST NOT CHANGE X
; Y = 0
.if .defined(LOROM)
and #$fe
cmp #$7e
beq :+
; Bank is not Work-RAM
ldy #$8000
:
.elseif .defined(HIROM)
and #$7f
cmp #$40
bcs :+
; Bank is a register bank
; set Y to the first ROM address
ldy #$8000
:
.else
.error "Unknown memory map"
.endif
; Y = 0 or $8000
rts
.endproc
;; OUT: carry set if state is LOADING_*
;; A8
.macro TadPrivate_IsLoaderActive
.assert .asize = 8, error
.assert TadState::NULL < TAD__FIRST_LOADING_STATE, error
.assert TadState::WAITING_FOR_LOADER_COMMON < TAD__FIRST_LOADING_STATE, error
.assert TadState::WAITING_FOR_LOADER_SONG < TAD__FIRST_LOADING_STATE, error
.assert (TadState::PAUSED & $7f) < TAD__FIRST_LOADING_STATE, error
.assert (TadState::PLAYING & $7f) < TAD__FIRST_LOADING_STATE, error
lda TadPrivate_state
and #$7f
cmp #TAD__FIRST_LOADING_STATE
.endmacro
;; ==========
;; Public API
;; ==========
;; -------------------------------
;; TAD_PROCESS_SEGMENT subroutines
;; -------------------------------
.segment TAD_PROCESS_SEGMENT
; JSL/RTL subroutine
.a8
.i16
; DB unknown
.proc Tad_Init : far
phb
lda #$80
pha
plb
; DB = $80
TadPrivate_Loader_TransferLoaderViaIpl
; Set default settings
.assert (TAD_DEFAULT_FLAGS) & TadFlags::RELOAD_COMMON_AUDIO_DATA = 0, error, "RELOAD_COMMON_AUDIO_DATA flag must not be use in TAD_DEFAULT_FLAGS"
.assert (TAD_DEFAULT_FLAGS) & TadFlags::_ALL_FLAGS = (TAD_DEFAULT_FLAGS), error, "Invalid TAD_DEFAULT_FLAGS"
.assert (TAD_DEFAULT_AUDIO_MODE) >= 0 && (TAD_DEFAULT_AUDIO_MODE) < TAD_N_AUDIO_MODES, error, "Invalid TAD_DEFAULT_AUDIO_MODE"
.assert Tad_flags + 1 = Tad_audioMode, error
ldx #(TAD_DEFAULT_FLAGS) | ((TAD_DEFAULT_AUDIO_MODE) << 8)
stx Tad_flags
ldx #TAD_DEFAULT_TRANSFER_PER_FRAME
stx TadPrivate_bytesToTransferPerFrame
lda #.bankbyte(Tad_AudioDriver_Bin)
ldx #.loword(Tad_AudioDriver_Bin)
ldy #Tad_AudioDriver_SIZE
jsr TadPrivate_Loader_SetDataToTransfer
lda #$ff
sta TadPrivate_nextCommand_id
sta Tad_sfxQueue_sfx
stz TadPrivate_nextSong
@DataTypeLoop:
lda #TadLoaderDataType::CODE
jsr TadPrivate_Loader_CheckReadyAndSendLoaderDataType
bcc @DataTypeLoop
@TransferLoop:
jsr TadPrivate_Loader_TransferData
bcc @TransferLoop
lda #TadState::WAITING_FOR_LOADER_COMMON
sta TadPrivate_state
plb
; DB restored
rtl
.endproc
;; Sends a command to the audio driver.
;;
;; REQUIRES: state == PAUSED or state == PLAYING.
;; REQUIRES: The previous command has been processed by the audio-driver.
;; REQUIRES: `TadPrivate_nextCommand_id` is not a play-sound-effect command.
;; REQUIRES: `TadPrivate_nextCommand_id` is a valid comma.
;;
;; IN: Y = TadPrivate_nextCommand_id
.a8
.i8
;; DB access lowram
.macro TadPrivate_Process_SendCommand
.assert .asize = 8, error
.assert .isize = 8, error
lda TadPrivate_nextCommand_parameter0
sta f:TadIO_ToDriver::PARAMETER0_PORT
lda TadPrivate_nextCommand_parameter1
sta f:TadIO_ToDriver::PARAMETER1_PORT
lda TadPrivate_previousCommand
and #TadIO_ToDriver::COMMAND_I_MASK ; Clear the non i bits of the command
eor #TadIO_ToDriver::COMMAND_I_MASK ; Flip the i bits
ora TadPrivate_nextCommand_id ; Set the c bits
sta f:TadIO_ToDriver::COMMAND_PORT
sta TadPrivate_previousCommand
cpy #TadCommand::UNPAUSE + 1
bcs @NotPauseOrPlay
; Change state if the command is a pause or play command
.assert TadCommand::PAUSE = 0, error
.assert TadCommand::PAUSE_MUSIC_PLAY_SFX = 2, error
.assert TadCommand::UNPAUSE = 4, error
.assert (TadCommand::PAUSE >> 1) & 3 | $80 = TadState::PAUSED, error
.assert (TadCommand::PAUSE_MUSIC_PLAY_SFX >> 1) & 3 | $80 = TadState::PLAYING_SFX, error
.assert (TadCommand::UNPAUSE >> 1) & 3 | $80 = TadState::PLAYING, error
lsr
and #3
ora #$80
sta TadPrivate_state
@NotPauseOrPlay:
; Reset command queue
lda #$ff
sta TadPrivate_nextCommand_id
.endmacro
;; Send a play-sound-effect command to the audio driver.
;;
;; REQUIRES: state == PLAYING
;; REQUIRES: The previous command has been processed by the audio-driver.
;;
;; IN: A = Tad_sfxQueue_sfx
;;
;; A8
;; I8
;; DB access lowram
.macro TadPrivate_Process_SendSfxCommand
.assert .asize = 8, error
.assert .isize = 8, error
; parameter 0 = sfx_id
sta f:TadIO_ToDriver::PARAMETER0_PORT
; parameter 1 = pan
lda Tad_sfxQueue_pan
cmp #TAD_MAX_PAN + 1
bcc :+
lda #TAD_CENTER_PAN
:
sta f:TadIO_ToDriver::PARAMETER1_PORT
; Send play-sound-effect command
lda TadPrivate_previousCommand
and #TadIO_ToDriver::COMMAND_I_MASK ; Clear the non i bits of the command
eor #TadIO_ToDriver::COMMAND_I_MASK ; Flip the i bits
ora #TadCommand::PLAY_SOUND_EFFECT ; Set the c bits
sta f:TadIO_ToDriver::COMMAND_PORT
sta TadPrivate_previousCommand
; Reset the SFX queue
ldy #$ff
sty Tad_sfxQueue_sfx
sty Tad_sfxQueue_pan
.endmacro
; JSL/RTL subroutine
.a8
.i16
; DB access lowram
.proc Tad_Process : far
.assert TadState::PAUSED = $80, error
.assert TadState::PLAYING > $80, error
lda TadPrivate_state
bpl @NotLoaded
; Playing or paused state
sep #$10
.i8
tax
lda TadPrivate_previousCommand
cmp f:TadIO_ToScpu::COMMAND_ACK_PORT
bne @Return_I8
; Previous command has been processed
; Check command queue
ldy TadPrivate_nextCommand_id
bpl @SendCommand
; X = TadPrivate_state
.assert TadState::PAUSED < $81, error
.assert TadState::PLAYING >= $81, error
.assert TadState::PLAYING_SFX >= $81, error
dex
bpl @Return_I8
; Playing state
lda Tad_sfxQueue_sfx
cmp #$ff
beq @Return_I8
TadPrivate_Process_SendSfxCommand
@Return_I8:
rep #$10
.i16
rtl
.a8
.i8
@SendCommand:
TadPrivate_Process_SendCommand
rep #$10
.i16
rtl
@NotLoaded:
; Song is not loaded into Audio-RAM
; Test if state is WAITING_FOR_LOADER_* or LOADING_*
.assert TAD__FIRST_LOADING_STATE > TAD__FIRST_WAITING_STATE, error
.assert TAD__FIRST_LOADING_STATE = TadState::WAITING_FOR_LOADER_SONG + 1, error
cmp #TAD__FIRST_LOADING_STATE
bcs TadPrivate_Process_Loading
cmp #TAD__FIRST_WAITING_STATE
bcs TadPrivate_Process_WaitingForLoader
; TadState is null
rtl
.endproc
;; Process the WAITING_FOR_LOADER_* states
;;
;; return using RTL
.a8
.i16
;; DB access lowram
.proc TadPrivate_Process_WaitingForLoader ; RTL
phb
; Setting DB to access registers as it:
; * Simplifies `TadPrivate_Loader_CheckReadyAndSendLoaderDataType`
; * Ensures `LoadAudioData` is called with a fixed data bank
; (NOTE: `LoadAudioData` is tagged `DB access registers`)
lda #$80
pha
plb
; DB = $80
lda TadPrivate_state
cmp #TadState::WAITING_FOR_LOADER_COMMON
bne @SongData
; Common audio data
lda #TadLoaderDataType::COMMON_DATA
jsr TadPrivate_Loader_CheckReadyAndSendLoaderDataType
bcc @Return
lda #TadState::LOADING_COMMON_AUDIO_DATA
pha
lda #0
bra @LoadData
@SongData:
; Songs
; Tad_flags MUST NOT have the stereo/surround loader flag set
.assert TadFlags::_ALL_FLAGS & TadLoaderDataType::STEREO_FLAG = 0, error
.assert TadFlags::_ALL_FLAGS & TadLoaderDataType::SURROUND_FLAG = 0, error
; SONG_DATA_FLAG must always be sent and it also masks the RELOAD_COMMON_AUDIO_DATA flag in TadLoaderDataType
.assert TadFlags::RELOAD_COMMON_AUDIO_DATA = TadLoaderDataType::SONG_DATA_FLAG, error, "Cannot hide RELOAD_COMMON_AUDIO_DATA TadFlag with SONG_DATA_FLAG"
.assert TadFlags::PLAY_SONG_IMMEDIATELY = TadLoaderDataType::PLAY_SONG_FLAG, error
.assert TadFlags::RESET_GLOBAL_VOLUMES_ON_SONG_START = TadLoaderDataType::RESET_GLOBAL_VOLUMES_FLAG, error
; Clear unused TAD flags
lda #$ff ^ TadFlags::_ALL_FLAGS
trb Tad_flags
; Convert `Tad_audioMode` to TadLoaderDataType and combine with TadFlags
.assert ((0 + 1) & 3) = TadLoaderDataType::SURROUND_FLAG, error ; mono
.assert ((1 + 1) & 3) = TadLoaderDataType::STEREO_FLAG, error ; stereo
.assert ((2 + 1) & 3) = TadLoaderDataType::STEREO_FLAG | TadLoaderDataType::SURROUND_FLAG, error ; surround
lda Tad_audioMode
inc
and #3
ora Tad_flags
ora #TadLoaderDataType::SONG_DATA_FLAG
jsr TadPrivate_Loader_CheckReadyAndSendLoaderDataType
bcc @Return
; Determine next state
.assert TadFlags::PLAY_SONG_IMMEDIATELY = $40, error
.assert TadState::LOADING_SONG_DATA_PAUSED + 1 = TadState::LOADING_SONG_DATA_PLAY, error
lda Tad_flags
asl
asl
lda #0
; carry = PLAY_SONG_IMMEDIATELY flag
adc #TadState::LOADING_SONG_DATA_PAUSED
pha
; Load next song
lda TadPrivate_nextSong
beq @UseBlankSong
@LoadData:
jsl LoadAudioData
bcs :+
; LoadAudioData returned false
@UseBlankSong:
; The blank song is a single zero byte.
; ::HACK use the 3rd byte of `ldy #1` (which is `0x00`) for the blank song data::
ldy #1
@_BlankSongData = * - 1
lda #.bankbyte(@_BlankSongData)
ldx #.loword(@_BlankSongData)
:
; STACK holds next state
; A:X = data address
; Y = data size
jsr TadPrivate_Loader_SetDataToTransfer
; Must set state AFTER the `LoadAudioData` call.
; `LoadAudioData` might call `Tad_FinishLoadingData`.
pla
sta TadPrivate_state
@Return:
plb
; DB restored
rtl
.endproc
;; Process the LOADING_* states
;;
;; return using RTL
.a8
.i16
;; DB access lowram
.proc TadPrivate_Process_Loading ; RTL
jsr TadPrivate_Loader_TransferData
bcc @Return
; Data loaded successfully
lda TadPrivate_state
cmp #TadState::LOADING_COMMON_AUDIO_DATA
bne @Song
; Common audio data was just transferred
; Loader is still active
lda #TadState::WAITING_FOR_LOADER_SONG
bra @EndIf
@Song:
; song data was loaded into Audio-RAM
; Loader has finished, audio driver is now active
stz TadPrivate_previousCommand
; Reset command and SFX queues
lda #$ff
sta TadPrivate_nextCommand_id
sta Tad_sfxQueue_sfx
sta Tad_sfxQueue_pan
; Use `TadPrivate_state` to determine if the song is playing or paused.
; Cannot use `Tad_flags` as it may have changed after the `TadLoaderDataType` was sent to
; the loader (while the song was loaded).
.assert ((TadState::LOADING_SONG_DATA_PAUSED & 1) << 1) | $80 = TadState::PAUSED, error
.assert ((TadState::LOADING_SONG_DATA_PLAY & 1) << 1) | $80 = TadState::PLAYING, error
lda TadPrivate_state
and #1
asl
ora #$80
; A = new state
@EndIf:
sta TadPrivate_state
@Return:
rtl
.endproc
; JSL/RTL subroutine
.a8
.i16
; DB access lowram
.proc Tad_FinishLoadingData : far
@Loop:
TadPrivate_IsLoaderActive
bcc @EndLoop
jsl TadPrivate_Process_Loading
bra @Loop
@EndLoop:
rtl
.endproc
;; ----------------------------
;; TAD_CODE_SEGMENT subroutines
;; ----------------------------
.segment TAD_CODE_SEGMENT
; IN: A = command
; IN: X = first parameter
; IN: Y = second parameter
; OUT: Carry set if command added to queue
.a8
; I unknown
; DB access lowram
.proc Tad_QueueCommand
bit TadPrivate_nextCommand_id
bpl ReturnFalse
; command queue is empty
WriteCommand:
and #TadIO_ToDriver::COMMAND_MASK
sta TadPrivate_nextCommand_id
txa
sta TadPrivate_nextCommand_parameter0
tya
sta TadPrivate_nextCommand_parameter1
; return true
sec
rts
ReturnFalse:
clc
rts
.endproc
; IN: A = command
; IN: X = first parameter
; IN: Y = second parameter
.a8
; I unknown
; DB access lowram
Tad_QueueCommandOverride := Tad_QueueCommand::WriteCommand
; IN: A = sfx id
; IN: X = pan
.a8
; I unknown
; DB access lowram
; KEEP: X, Y
.proc Tad_QueuePannedSoundEffect
cmp Tad_sfxQueue_sfx
bcs @EndIf
sta Tad_sfxQueue_sfx
txa
sta Tad_sfxQueue_pan
@EndIf:
rts
.endproc
; IN: A = sfx_id
.a8
; I unknown
; DB access lowram
; KEEP: X, Y
.proc Tad_QueueSoundEffect
cmp Tad_sfxQueue_sfx
bcs @EndIf
sta Tad_sfxQueue_sfx
lda #TAD_CENTER_PAN
sta Tad_sfxQueue_pan
@EndIf:
rts
.endproc
; IN: A = song_id
.a8
; I unknown
; DB access lowram
.proc Tad_LoadSong
.assert TAD__FIRST_LOADING_SONG_STATE > TadState::NULL, error
.assert TAD__FIRST_LOADING_SONG_STATE > TadState::WAITING_FOR_LOADER_COMMON, error
.assert TAD__FIRST_LOADING_SONG_STATE > TadState::WAITING_FOR_LOADER_SONG, error
.assert TAD__FIRST_LOADING_SONG_STATE > TadState::LOADING_COMMON_AUDIO_DATA, error
sta TadPrivate_nextSong
lda #TadFlags::RELOAD_COMMON_AUDIO_DATA
trb Tad_flags
beq @SongRequested
; Common audio data requested
lda #TadState::WAITING_FOR_LOADER_COMMON
bra @SetStateAndSwitchToLoader
@SongRequested:
lda TadPrivate_state
cmp #TAD__FIRST_LOADING_SONG_STATE
bcc @Return
; TadState is not NULL, WAITING_FOR_LOADER_* or LOADING_COMMON_AUDIO_DATA
lda #TadState::WAITING_FOR_LOADER_SONG
@SetStateAndSwitchToLoader:
sta TadPrivate_state
; Assert it is safe to send a switch-to-loader command when the loader is waiting for a READY signal
.assert TadIO_ToDriver::SWITCH_TO_LOADER <> TadIO_Loader_Init::LOADER_READY_H, error
.assert TadIO_ToDriver::SWITCH_TO_LOADER_PORT = TadIO_Loader_Init::READY_PORT_H, error
; Send a *switch-to-loader* command to the audio-driver or loader
lda #TadIO_ToDriver::SWITCH_TO_LOADER
sta f:TadIO_ToDriver::SWITCH_TO_LOADER_PORT
@Return:
rts
.endproc
; IN: A = song_id
; OUT: carry set if `Tad_LoadSong` was called
.a8
; I unknown
; DB access lowram
.proc Tad_LoadSongIfChanged
cmp TadPrivate_nextSong
beq :+
jsr Tad_LoadSong
sec
rts
:
clc
rts
.endproc
;; OUT: A = The song_id used in the last `Tad_LoadSong` call.
.a8
; I unknown
; DB access lowram
.proc Tad_GetSong
; `TadPrivate_nextSong` is only written to in `Tad_Init` and `Tad_LoadSong`.
lda TadPrivate_nextSong
rts
.endproc
.a8
; I unknown
; DB access lowram
.proc Tad_ReloadCommonAudioData
lda #TadFlags::RELOAD_COMMON_AUDIO_DATA
tsb Tad_flags
rts
.endproc
.a8
; I unknown
; DB access lowram
.proc Tad_SongsStartImmediately
lda #TadFlags::PLAY_SONG_IMMEDIATELY
tsb Tad_flags
rts
.endproc
.a8
; I unknown
; DB access lowram
.proc Tad_SongsStartPaused
lda #TadFlags::PLAY_SONG_IMMEDIATELY
trb Tad_flags
rts
.endproc
.a8
; I unknown
; DB access lowram
.proc Tad_GlobalVolumesResetOnSongStart
lda #TadFlags::RESET_GLOBAL_VOLUMES_ON_SONG_START
tsb Tad_flags
rts
.endproc
.a8
; I unknown
; DB access lowram
.proc Tad_GlobalVolumesPersist
lda #TadFlags::RESET_GLOBAL_VOLUMES_ON_SONG_START
trb Tad_flags
rts
.endproc
; IN: X = new `TadPrivate_bytesToTransferPerFrame` value
; A unknown
.i16
; DB access lowram
.proc Tad_SetTransferSize
cpx #TAD_MAX_TRANSFER_PER_FRAME
bcc :+
ldx #TAD_MAX_TRANSFER_PER_FRAME
:
cpx #TAD_MIN_TRANSFER_PER_FRAME
bcs :+
ldx #TAD_MIN_TRANSFER_PER_FRAME
:
stx TadPrivate_bytesToTransferPerFrame
rts
.endproc
; OUT: carry set if state is LOADING_*
.a8
; I unknown
; DB access lowram
.proc Tad_IsLoaderActive
TadPrivate_IsLoaderActive
rts
.endproc
; OUT: carry set if state is PAUSED, PLAYING_SFX or PLAYING
.a8
; I unknown
; DB access lowram
.proc Tad_IsSongLoaded
.assert TadState::PLAYING_SFX > TadState::PAUSED, error
.assert TadState::PLAYING > TadState::PAUSED, error
; Assumes PLAYING is the last state
lda TadPrivate_state
cmp #TadState::PAUSED
rts
.endproc
; OUT: carry set if state is PLAYING_SFX or PLAYING
.a8
; I unknown
; DB access lowram
.proc Tad_IsSfxPlaying
.assert TadState::PLAYING > TadState::PLAYING_SFX, error
; Assumes PLAYING is the last state
lda TadPrivate_state
cmp #TadState::PLAYING_SFX
rts
.endproc
; OUT: carry set if state is PLAYING
.a8
; I unknown
; DB access lowram
.proc Tad_IsSongPlaying
; Assumes PLAYING is the last state
lda TadPrivate_state
cmp #TadState::PLAYING
rts
.endproc
|
achilleasa/bare-metal-gophers
| 5,211
|
arch/x86/asm/rt0.s
|
; vim: set ft=nasm :
section .bss
align 4
; Reserve 16K for our stack. Stacks should be aligned to 16 byte boundaries.
stack_bottom:
resb 16384 ; 16 KiB
stack_top:
; According to the "ELF handling for TLS" document section 4.3.2
; (https://www.akkadia.org/drepper/tls.pdf) for the GNU variant of the IA-32 ABI,
; gs:0x00 contains a pointer to the TCB. Variables in the TLS are stored
; before the TCB and are accessed using negative offsets from the TCB address.
g0_ptr: resd 1
tcb_ptr: resd 1
section .text
bits 32
align 4
MULTIBOOT_MAGIC equ 0x36d76289
G_STACK_LO equ 0x0
G_STACK_HI equ 0x4
G_STACKGUARD0 equ 0x8
err_unsupported_bootloader db '[rt0] kernel not loaded by multiboot-compliant bootloader', 0
;------------------------------------------------------------------------------
; Kernel arch-specific entry point
;
; The boot loader will jump to this symbol after setting up the CPU according
; to the multiboot standard. At this point:
; - A20 is enabled
; - The CPU is using 32-bit protected mode
; - Interrupts are disabled
; - Paging is disabled
; - EAX contains the magic value ‘0x36d76289’; the presence of this value indicates
; to the operating system that it was loaded by a Multiboot-compliant boot loader
; - EBX contains the 32-bit physical address of the Multiboot information structure
;------------------------------------------------------------------------------
global _rt0_entry
_rt0_entry:
cmp eax, MULTIBOOT_MAGIC
jne unsupported_bootloader
; Initalize our stack by pointing ESP to the BSS-allocated stack. In x86,
; stack grows downwards so we need to point ESP to stack_top
mov esp, stack_top
; Enable SSE/AVX
call _rt0_enable_sse
; Load initial GDT
call _rt0_load_gdt
; init g0 so we can invoke Go functions. For now we use hardcoded offsets
; that correspond to the g struct definition in src/runtime/runtime2.go
extern runtime.g0
mov dword [runtime.g0 + G_STACK_LO], stack_bottom
mov dword [runtime.g0 + G_STACK_HI], stack_top
mov dword [runtime.g0 + G_STACKGUARD0], stack_bottom
mov dword [g0_ptr], runtime.g0
; jump into the go code
extern main.main
call main.main
; Main should never return; halt the CPU
halt:
cli
hlt
unsupported_bootloader:
mov edi, err_unsupported_bootloader
call write_string
jmp halt
.end:
;------------------------------------------------------------------------------
; Write the NULL-terminated string contained in edi to the screen using white
; text on red background. Assumes that text-mode is enabled and that its
; physical address is 0xb8000.
;------------------------------------------------------------------------------
write_string:
push eax
push ebx
mov ebx,0xb8000
mov ah, 0x4F
next_char:
mov al, byte[edi]
test al, al
jz done
mov word [ebx], ax
add ebx, 2
inc edi
jmp next_char
done:
pop ebx
pop eax
ret
;------------------------------------------------------------------------------
; Load GDT and flush CPU caches
;------------------------------------------------------------------------------
_rt0_load_gdt:
push eax
push ebx
; Store the address to the TCB in tcb_ptr
; and set up gs base address to it
mov eax, tcb_ptr
mov [tcb_ptr], eax
mov ebx, gdt0_gs_seg
mov [ebx+2], al
mov [ebx+3], ah
shr eax, 16
mov [ebx+4], al
lgdt [gdt0_desc]
; GDT has been loaded but the CPU still has the previous GDT data in cache.
; We need to manually update the descriptors and use a JMP command to set
; the CS segment descriptor
jmp CS_SEG:update_descriptors
update_descriptors:
mov ax, DS_SEG
mov ds, ax
mov es, ax
mov fs, ax
mov ss, ax
mov ax, GS_SEG
mov gs, ax
pop ebx
pop eax
ret
;------------------------------------------------------------------------------
; GDT definition
;------------------------------------------------------------------------------
%include "gdt.inc"
align 2
gdt0:
gdt0_nil_seg: GDT_ENTRY_32 0x00, 0x0, 0x0, 0x0 ; nil descriptor (not used by CPU but required by some emulators)
gdt0_cs_seg: GDT_ENTRY_32 0x00, 0xFFFFF, SEG_EXEC | SEG_R, SEG_GRAN_4K_PAGE ; code descriptor
gdt0_ds_seg: GDT_ENTRY_32 0x00, 0xFFFFF, SEG_NOEXEC | SEG_W, SEG_GRAN_4K_PAGE ; data descriptor
gdt0_gs_seg: GDT_ENTRY_32 0x00, 0xFFFFF, SEG_NOEXEC | SEG_W, SEG_GRAN_BYTE ; TLS descriptor (required in order to use go segmented stacks)
gdt0_desc:
dw gdt0_desc - gdt0 - 1 ; gdt size should be 1 byte less than actual length
dd gdt0
NULL_SEG equ gdt0_nil_seg - gdt0
CS_SEG equ gdt0_cs_seg - gdt0
DS_SEG equ gdt0_ds_seg - gdt0
GS_SEG equ gdt0_gs_seg - gdt0
;------------------------------------------------------------------------------
; Enable SSE support. Code taken from:
; http://wiki.osdev.org/SSE#Checking_for_SSE
;------------------------------------------------------------------------------
_rt0_enable_sse:
push eax
; check for SSE
mov eax, 0x1
cpuid
test edx, 1<<25
jz .no_sse
; enable SSE
mov eax, cr0
and ax, 0xFFFB ; clear coprocessor emulation CR0.EM
or ax, 0x2 ; set coprocessor monitoring CR0.MP
mov cr0, eax
mov eax, cr4
or ax, 3 << 9 ; set CR4.OSFXSR and CR4.OSXMMEXCPT at the same time
mov cr4, eax
pop eax
ret
.no_sse:
cli
hlt
|
achilleasa/bare-metal-gophers
| 1,040
|
arch/x86/asm/multiboot_header.s
|
; vim: set ft=nasm :
section .multiboot_header
MAGIC equ 0xe85250d6
ARCH equ 0x0
; Define the multiboot header (multiboot 1.6)
; http://nongnu.askapache.com/grub/phcoder/multiboot.pdf
header_start:
dd MAGIC ; magic number
dd ARCH ; i386 protected mode
dd header_end - header_start ; header length
; The field ‘checksum’ is a 32-bit unsigned value which, when added to the other
; magic fields (i.e. ‘magic’, ‘architecture’ and ‘header_length’), must have a
; 32-bit unsigned sum of zero.
dd (1 << 32) - (MAGIC + ARCH + (header_end - header_start))
align 8 ; tags should be 64-bit aligned
; Define graphics mode tag
;dw 5 ; type
;dw 0 ; flags
;dd 20 ; size
;dd 80 ; width (pixels or chars)
;dd 25 ; height (pixels or chars)
;dd 0 ; bpp (0 for text mode
align 8 ; tags should be 64-bit aligned
; According to page 6 of the spec, the tag list is terminated by a tag with
; type 0 and size 8
dd 0 ; type & flag = 0
dd 8 ; size
header_end:
|
acguardia/AGraber-30i
| 3,201
|
Firmware/Marlin-2.0.8.2/buildroot/share/PlatformIO/scripts/exc.S
|
/* *****************************************************************************
* The MIT License
*
* Copyright (c) 2010 Perry Hung.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
* ****************************************************************************/
# On an exception, push a fake stack thread mode stack frame and redirect
# thread execution to a thread mode error handler
# From RM008:
# The SP is decremented by eight words by the completion of the stack push.
# Figure 5-1 shows the contents of the stack after an exception pre-empts the
# current program flow.
#
# Old SP--> <previous>
# xPSR
# PC
# LR
# r12
# r3
# r2
# r1
# SP--> r0
.text
.globl __exc_nmi
.weak __exc_nmi
.globl __exc_hardfault
.weak __exc_hardfault
.globl __exc_memmanage
.weak __exc_memmanage
.globl __exc_busfault
.weak __exc_busfault
.globl __exc_usagefault
.weak __exc_usagefault
.code 16
.thumb_func
__exc_nmi:
mov r0, #1
b __default_exc
.thumb_func
__exc_hardfault:
mov r0, #2
b __default_exc
.thumb_func
__exc_memmanage:
mov r0, #3
b __default_exc
.thumb_func
__exc_busfault:
mov r0, #4
b __default_exc
.thumb_func
__exc_usagefault:
mov r0, #5
b __default_exc
.thumb_func
__default_exc:
ldr r2, NVIC_CCR @ Enable returning to thread mode even if there are
mov r1 ,#1 @ pending exceptions. See flag NONEBASETHRDENA.
str r1, [r2]
cpsid i @ Disable global interrupts
ldr r2, SYSTICK_CSR @ Disable systick handler
mov r1, #0
str r1, [r2]
ldr r1, CPSR_MASK @ Set default CPSR
push {r1}
ldr r1, TARGET_PC @ Set target pc
push {r1}
sub sp, sp, #24 @ Don't care
ldr r1, EXC_RETURN @ Return to thread mode
mov lr, r1
bx lr @ Exception exit
.align 4
CPSR_MASK: .word 0x61000000
EXC_RETURN: .word 0xFFFFFFF9
TARGET_PC: .word __error
NVIC_CCR: .word 0xE000ED14 @ NVIC configuration control register
SYSTICK_CSR: .word 0xE000E010 @ Systick control register
|
acguardia/AGraber-30i
| 2,348
|
Firmware/Marlin-2.0.8.2/buildroot/share/PlatformIO/variants/marlin_CHITU_F103/wirish/start.S
|
/******************************************************************************
* The MIT License
*
* Copyright (c) 2011 LeafLabs, LLC.
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use, copy,
* modify, merge, publish, distribute, sublicense, and/or sell copies
* of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*****************************************************************************/
/*
* This file is a modified version of a file obtained from
* CodeSourcery Inc. (now part of Mentor Graphics Corp.), in which the
* following text appeared:
*
* The authors hereby grant permission to use, copy, modify, distribute,
* and license this software and its documentation for any purpose, provided
* that existing copyright notices are retained in all copies and that this
* notice is included verbatim in any distributions. No written agreement,
* license, or royalty fee is required for any of the authorized uses.
* Modifications to this software may be copyrighted by their authors
* and need not follow the licensing terms described here, provided that
* the new terms are clearly indicated on the first page of each file where
* they apply.
*/
.text
.code 16
.thumb_func
.globl __start__
.type __start__, %function
__start__:
.fnstart
ldr r1,=__msp_init
mov sp,r1
ldr r1,=start_c
bx r1
.pool
.cantunwind
.fnend
|
acguardia/AGraber-30i
| 2,348
|
Firmware/Marlin-2.0.8.2/buildroot/share/PlatformIO/variants/marlin_MEEB_3DP/wirish/start.S
|
/******************************************************************************
* The MIT License
*
* Copyright (c) 2011 LeafLabs, LLC.
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use, copy,
* modify, merge, publish, distribute, sublicense, and/or sell copies
* of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*****************************************************************************/
/*
* This file is a modified version of a file obtained from
* CodeSourcery Inc. (now part of Mentor Graphics Corp.), in which the
* following text appeared:
*
* The authors hereby grant permission to use, copy, modify, distribute,
* and license this software and its documentation for any purpose, provided
* that existing copyright notices are retained in all copies and that this
* notice is included verbatim in any distributions. No written agreement,
* license, or royalty fee is required for any of the authorized uses.
* Modifications to this software may be copyrighted by their authors
* and need not follow the licensing terms described here, provided that
* the new terms are clearly indicated on the first page of each file where
* they apply.
*/
.text
.code 16
.thumb_func
.globl __start__
.type __start__, %function
__start__:
.fnstart
ldr r1,=__msp_init
mov sp,r1
ldr r1,=start_c
bx r1
.pool
.cantunwind
.fnend
|
acidanthera/WhateverGreen
| 1,255
|
WhateverGreen/kern_ngfx_asm.S
|
//
// kern_ngfx_asm.S
// WhateverGreen
//
// Copyright © 2018 vit9696. All rights reserved.
//
#define PRESUBMIT_FROM_REG(x, y) \
push %rdi; \
push %rsi; \
push %rdx; \
push %rcx; \
push %r8; \
push %r9; \
push %r10; \
push %r11; \
mov x, %rdi; \
call y; \
pop %r11; \
pop %r10; \
pop %r9; \
pop %r8; \
pop %rcx; \
pop %rdx; \
pop %rsi; \
pop %rdi; \
ret;
.text
.globl _wrapVaddrPreSubmitTrampoline
_wrapVaddrPreSubmitTrampoline:
// Choose the routing
test %al, %al
// Standard routing (for normal calls)
jz __ZN4NGFX18wrapVaddrPreSubmitEPv
// Wrapped routing (for patched calls)
cmp $1,%al
jz handle_rbx_off
cmp $2,%al
jz handle_r13_off
cmp $3,%al
jz handle_r12_off
// Do we need more registers?
ud2
handle_rbx_off:
PRESUBMIT_FROM_REG(%rbx, __ZN4NGFX18wrapVaddrPreSubmitEPv)
handle_r13_off:
PRESUBMIT_FROM_REG(%r13, __ZN4NGFX18wrapVaddrPreSubmitEPv)
handle_r12_off:
PRESUBMIT_FROM_REG(%r12, __ZN4NGFX18wrapVaddrPreSubmitEPv)
.globl _orgVaddrPresubmitTrampoline
_orgVaddrPresubmitTrampoline:
// This is the prologue we patched out
push %rbp
mov %rsp, %rbp
// Jump to the original code
mov _orgVaddrPreSubmit(%rip), %rax
jmp *%rax
.data
.globl _orgVaddrPreSubmit
_orgVaddrPreSubmit:
.rept 8
.byte 0
.endr
|
acidanthera/VirtualSMC
| 2,307
|
VirtualSMC/kern_handler.S
|
//
// kern_handler.S
// VirtualSMC
//
// Copyright © 2017 vit9696. All rights reserved.
//
#if defined(__x86_64__)
// This routine shims the control to a C function after being jumped
// right after performing SMC I/O from kernelspace.
// It is assumed that the source index is pushed on stack.
// It is assumed process_io_result has the following prototype:
// mach_vm_address_t process_io_result(size_t index);
// Where index is the passed index, and the return address is the
// address we need to jump to once we finish restoring the registers.
// _______ _______ _______
// -0x50 | idx | | idx | | sub |
// -0x48 | | | flags | | flags |
// -0x40 | | | rax | | rax |
// -0x38 | | | rdi | | rdi |
// -0x30 | | pre call | rsi | after call | rsi |
// -0x28 | | ----------> | rdx | ------------> | rdx |
// -0x20 | | | rcx | | rcx |
// -0x18 | | | r8 | | r8 |
// -0x10 | | | r9 | | r9 |
// -0x08 | | | r10 | | r10 |
// -0x0 | | | r11 | | r11 |
// '-------' '-------' '-------'
//
.globl _ioTrapHandler
_ioTrapHandler:
// 1. Save the registers, the following assumptions are made:
// - xmm/ymm registers are unused (kexts do not use it atm)
// - hardware fpu is unused (kexts/kernel do not use it atm)
pushfq
push %rax
push %rdi
push %rsi
push %rdx
push %rcx
push %r8
push %r9
push %r10
push %r11
// 2. Get the index
mov 0x50(%rsp), %rdi
// 3. Align the stack and call the procedure.
// This is required, because even though the stack will always be at least 8-byte aligned,
// we may need 16-byte alignment for SSE (e.g. CoreCrypto uses it).
mov %rsp, %rax
push %rsp
push (%rsp)
andq $ -0x10, %rsp
call __ZN18VirtualSMCProvider15ioProcessResultEh
mov 0x8(%rsp), %rsp
// 4. Save the return address
mov %rax, 0x50(%rsp)
// 5. Restore the registers and transfer the control back
pop %r11
pop %r10
pop %r9
pop %r8
pop %rcx
pop %rdx
pop %rsi
pop %rdi
pop %rax
popfq
ret
#endif
|
acidanthera/VirtualSMC
| 2,101
|
Sensors/SMCDellSensors/bzh_dell_smm_lowlevel.S
|
// lowlevel.asm - low level hardware access for x64 systems
//
// FanIO driver V2.3
// Copyright(c) 2001-2007 Christian Diefer
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License version 2 as
// published by the Free Software Foundation.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with this program; if not, write to the Free Software
// Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
.intel_syntax noprefix
.globl _dell_smm_lowlevel
// rdi:SMBIOS_PKG* cmd
_dell_smm_lowlevel:
stat2 = -0x50
stat1 = -0x48
data = -0x40
cmd = -0x38
sptr = -0x30
push rbp
mov rbp, rsp
push rcx
push rdx
push r8
push r9
push rbx
sub rsp, 0x60
mov qword ptr [rbp+sptr],rdi # sptr
mov r8,rdi
xor rax,rax
mov eax,dword ptr [r8+0]
mov dword ptr [rbp+cmd],eax # cmd
xor rbx,rbx
mov ebx,dword ptr [r8+4]
mov dword ptr [rbp+data],ebx # data
xor rcx,rcx
mov ecx,dword ptr [r8+8]
mov dword ptr [rbp+stat1],ecx # stat1
xor rdx,rdx
mov edx,dword ptr [r8+12]
mov dword ptr [rbp+stat2],edx # stat2
out 0xb2,al
out 0x84,al
cmp eax,dword ptr [rbp+cmd] # cmd
jne cmdok
cmp ebx,dword ptr [rbp+data] # data
jne cmdok
cmp ecx,dword ptr [rbp+stat1] # stat1
jne cmdok
cmp edx,dword ptr [rbp+stat2] # stat2
je error
cmdok:
mov r8,qword ptr[rbp+sptr] # sptr
mov dword ptr [r8+0],eax # cmd
mov dword ptr [r8+4],ebx # data
mov dword ptr [r8+8],ecx # stat1
mov dword ptr [r8+12],edx # stat2
and eax,0xffff
cmp eax,0xffff
jne ok
error:
xor rax,rax // Error (rax=-1)
dec rax
jmp ende
ok:
xor rax,rax // Success (rax=0)
ende:
add rsp, 0x60
pop rbx
pop r9
pop r8
pop rdx
pop rcx
pop rbp
RET
|
acidanthera/Lilu
| 3,305
|
Lilu/Sources/kern_efi_trampoline_i386.s
|
//
// kern_efi_trampoline_i386.s
// Lilu
//
// Copyright © 2021 Goldfish64. All rights reserved.
//
#if defined(__i386__)
#define KERNEL32_CS 0x08 /* kernel 32-bit code for 32-bit kernel */
#define KERNEL64_CS 0x80 /* kernel 64-bit code for 32-bit kernel */
/*
* Copy "count" bytes from "src" to %esp, using
* "tmpindex" for a scratch counter and %eax
*/
#define COPY_STACK(src, count, tmpindex) \
mov $0, tmpindex /* initial scratch counter */ ; \
1: \
mov 0(src,tmpindex,1), %eax /* copy one 32-bit word from source... */ ; \
mov %eax, 0(%esp,tmpindex,1) /* ... to stack */ ; \
add $4, tmpindex /* increment counter */ ; \
cmp count, tmpindex /* exit it stack has been copied */ ; \
jne 1b
/*
* Long jump to 64-bit space from 32-bit compatibility mode.
*/
#define ENTER_64BIT_MODE() \
.code32 ;\
.byte 0xea /* far jump longmode */ ;\
.long 1f ;\
.word KERNEL64_CS ;\
.code64 ;\
1:
/*
* Long jump to 32-bit compatibility mode from 64-bit space.
*/
#define ENTER_COMPAT_MODE() \
ljmp *(%rip) ;\
.long 4f ;\
.word KERNEL32_CS ;\
.code32 ;\
4:
/**
* This code is a slightly modified pal_efi_call_in_64bit_mode_asm function.
*
* Switch from compatibility mode to long mode, and
* then execute the function pointer with the specified
* register and stack contents (based at %rsp). Afterwards,
* collect the return value, restore the original state,
* and return.
*/
.globl _performEfiCallAsm64
_performEfiCallAsm64:
pushl %ebp;
movl %esp, %ebp
/* save non-volatile registers */
push %ebx
push %esi
push %edi
sub $12, %esp /* align to 16-byte boundary */
mov 16(%ebp), %esi /* load efi_reg into %esi */
mov 20(%ebp), %edx /* load stack_contents into %edx */
mov 24(%ebp), %ecx /* load s_c_s into %ecx */
sub %ecx, %esp /* make room for stack contents */
COPY_STACK(%edx, %ecx, %edi)
ENTER_64BIT_MODE()
/* load efi_reg into real registers */
mov 0(%rsi), %rcx
mov 8(%rsi), %rdx
mov 16(%rsi), %r8
mov 24(%rsi), %r9
mov 32(%rsi), %rax
mov 8(%rbp), %rdi /* load func pointer */
call *%rdi /* call EFI runtime */
mov 16(%rbp), %esi /* load efi_reg into %esi */
mov %rax, 32(%rsi) /* save RAX back */
ENTER_COMPAT_MODE()
add 24(%ebp), %esp /* discard stack contents */
add $12, %esp /* restore stack pointer */
pop %edi
pop %esi
pop %ebx
leave
ret
/**
* This code is a slightly modified pal_efi_call_in_32bit_mode_asm function.
*/
.globl _performEfiCallAsm32
_performEfiCallAsm32:
pushl %ebp;
movl %esp, %ebp
/* save non-volatile registers */
push %ebx
push %esi
push %edi
sub $12, %esp /* align to 16-byte boundary */
mov 12(%ebp), %esi /* load efi_reg into %esi */
mov 16(%ebp), %edx /* load stack_contents into %edx */
mov 20(%ebp), %ecx /* load s_c_s into %ecx */
sub %ecx, %esp /* make room for stack contents */
COPY_STACK(%edx, %ecx, %edi)
/* load efi_reg into real registers */
mov 0(%esi), %ecx
mov 8(%esi), %edx
mov 32(%esi), %eax
mov 8(%ebp), %edi /* load func pointer */
call *%edi /* call EFI runtime */
mov 12(%ebp), %esi /* load efi_reg into %esi */
mov %eax, 32(%esi) /* save RAX back */
movl $0, 36(%esi) /* zero out high bits of RAX */
add 20(%ebp), %esp /* discard stack contents */
add $12, %esp /* restore stack pointer */
pop %edi
pop %esi
pop %ebx
leave
ret
#endif
|
acidanthera/Lilu
| 1,736
|
Lilu/Sources/kern_efi_trampoline_x86_64.s
|
//
// kern_efi_trampoline_x86_64.s
// Lilu
//
// Copyright © 2018 vit9696. All rights reserved.
//
#if defined(__x86_64__)
#define KERNEL32_CS 0x50 /* kernel 32-bit code for 64-bit kernel */
#define KERNEL64_CS 0x08 /* kernel 64-bit code for 64-bit kernel */
/*
* Copy "count" bytes from "src" to %rsp, using
* "tmpindex" for a scratch counter and %rax
*/
#define COPY_STACK(src, count, tmpindex) \
mov $0, tmpindex /* initial scratch counter */ ; \
1: \
mov 0(src,tmpindex,1), %rax /* copy one 64-bit word from source... */ ; \
mov %rax, 0(%rsp,tmpindex,1) /* ... to stack */ ; \
add $8, tmpindex /* increment counter */ ; \
cmp count, tmpindex /* exit it stack has been copied */ ; \
jne 1b
/**
* This code is a slightly modified pal_efi_call_in_64bit_mode_asm function.
*/
.globl _performEfiCallAsm64
_performEfiCallAsm64:
pushq %rbp
movq %rsp, %rbp
/* save non-volatile registers */
push %rbx
push %r12
push %r13
push %r14
push %r15
/* save parameters that we will need later */
push %rsi
push %rcx
sub $8, %rsp /* align to 16-byte boundary */
/* efi_reg in %rsi */
/* stack_contents into %rdx */
/* s_c_s into %rcx */
sub %rcx, %rsp /* make room for stack contents */
COPY_STACK(%rdx, %rcx, %r8)
/* load efi_reg into real registers */
mov 0(%rsi), %rcx
mov 8(%rsi), %rdx
mov 16(%rsi), %r8
mov 24(%rsi), %r9
mov 32(%rsi), %rax
/* func pointer in %rdi */
call *%rdi /* call EFI runtime */
mov -48(%rbp), %rsi /* load efi_reg into %esi */
mov %rax, 32(%rsi) /* save RAX back */
mov -56(%rbp), %rcx /* load s_c_s into %rcx */
add %rcx, %rsp /* discard stack contents */
add $8, %rsp /* restore stack pointer */
pop %rcx
pop %rsi
pop %r15
pop %r14
pop %r13
pop %r12
pop %rbx
leave
ret
#endif
|
acidanthera/audk
| 1,933
|
MdePkg/Library/DynamicStackCookieEntryPointLib/AArch64/DynamicCookieGcc.S
|
#------------------------------------------------------------------------------
#
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: BSD-2-Clause-Patent
#
# Module Name:
#
# DynamicCookie.S
#
# Abstract:
#
# Generates random number through the RNDR instruction on a 64-bit AARCH64 platform
# to store a random value in the GCC __stack_check_guard stack cookie.
# The first byte is 0'd to prevent string copy functions from clobbering
# the stack cookie.
#
# Notes:
#
# If RNDR fails, the build time static stack cookie value will be used instead.
#
#------------------------------------------------------------------------------
#include <AArch64/AArch64.h>
.text
.p2align 2
GCC_ASM_IMPORT(__stack_chk_guard)
GCC_ASM_IMPORT(_CModuleEntryPoint)
GCC_ASM_EXPORT(_ModuleEntryPoint)
#------------------------------------------------------------------------------
# VOID
# EFIAPI
# _ModuleEntryPoint (
# Parameters are passed through.
# )
#------------------------------------------------------------------------------
ASM_PFX(_ModuleEntryPoint):
AARCH64_BTI(c)
mrs x9, ID_AA64ISAR0_EL1 // Read the AArch64 Instruction Set Attribute Register 0
ubfx x9, x9, #60, #4 // Extract the RNDR bit field (bits 60-63)
cbz x9, c_entry // If RNDR is not supported, jump to c_entry
mrs x9, RNDR // Generate a random number
b.eq c_entry // RNDR sets NZCV to 0b0100 on failure
// So if the zero flag is set, use the static stack guard
and x9, x9, #0xFFFFFFFFFFFFFF00 // Zero the first byte of the random value
adrp x8, ASM_PFX(__stack_chk_guard) // Load the page address of __stack_chk_guard
str x9, [x8, :lo12:ASM_PFX(__stack_chk_guard)] // Store the random value in __stack_chk_guard
c_entry:
b ASM_PFX(_CModuleEntryPoint) // Jump to the C module entry point
|
acidanthera/audk
| 1,328
|
MdePkg/Library/BaseCpuLib/LoongArch/InitializeFpu.S
|
#------------------------------------------------------------------------------
#
# InitializeFloatingPointUnits() for LoongArch64
#
# Copyright (c) 2024, Loongson Technology Corporation Limited. All rights reserved.<BR>
#
# SPDX-License-Identifier: BSD-2-Clause-Patent
#
#------------------------------------------------------------------------------
ASM_GLOBAL ASM_PFX(InitializeFloatingPointUnits)
ASM_PFX(InitializeFloatingPointUnits):
li.d $t0, 0x0 // RNE mode
movgr2fcsr $r0, $t0
li.d $t1, -1 // SNaN
movgr2fr.d $f0, $t1
movgr2fr.d $f1, $t1
movgr2fr.d $f2, $t1
movgr2fr.d $f3, $t1
movgr2fr.d $f4, $t1
movgr2fr.d $f5, $t1
movgr2fr.d $f6, $t1
movgr2fr.d $f7, $t1
movgr2fr.d $f8, $t1
movgr2fr.d $f9, $t1
movgr2fr.d $f10, $t1
movgr2fr.d $f11, $t1
movgr2fr.d $f12, $t1
movgr2fr.d $f13, $t1
movgr2fr.d $f14, $t1
movgr2fr.d $f15, $t1
movgr2fr.d $f16, $t1
movgr2fr.d $f17, $t1
movgr2fr.d $f18, $t1
movgr2fr.d $f19, $t1
movgr2fr.d $f20, $t1
movgr2fr.d $f21, $t1
movgr2fr.d $f22, $t1
movgr2fr.d $f23, $t1
movgr2fr.d $f24, $t1
movgr2fr.d $f25, $t1
movgr2fr.d $f26, $t1
movgr2fr.d $f27, $t1
movgr2fr.d $f28, $t1
movgr2fr.d $f29, $t1
movgr2fr.d $f30, $t1
movgr2fr.d $f31, $t1
jirl $zero, $ra, 0
.end
|
acidanthera/audk
| 1,874
|
MdePkg/Library/BaseSynchronizationLib/RiscV64/Synchronization.S
|
//------------------------------------------------------------------------------
//
// RISC-V synchronization functions.
//
// Copyright (c) 2020, Hewlett Packard Enterprise Development LP. All rights reserved.<BR>
//
// SPDX-License-Identifier: BSD-2-Clause-Patent
//
//------------------------------------------------------------------------------
#include <Base.h>
.data
.text
.align 3
.global ASM_PFX(InternalSyncCompareExchange32)
.global ASM_PFX(InternalSyncCompareExchange64)
.global ASM_PFX(InternalSyncIncrement)
.global ASM_PFX(InternalSyncDecrement)
//
// ompare and xchange a 32-bit value.
//
// @param a0 : Pointer to 32-bit value.
// @param a1 : Compare value.
// @param a2 : Exchange value.
//
ASM_PFX (InternalSyncCompareExchange32):
lr.w a3, (a0) // Load the value from a0 and make
// the reservation of address.
bne a3, a1, exit
sc.w a3, a2, (a0) // Write the value back to the address.
mv a3, a1
exit:
mv a0, a3
ret
//
// Compare and xchange a 64-bit value.
//
// @param a0 : Pointer to 64-bit value.
// @param a1 : Compare value.
// @param a2 : Exchange value.
//
ASM_PFX (InternalSyncCompareExchange64):
lr.d a3, (a0) // Load the value from a0 and make
// the reservation of address.
bne a3, a1, exit
sc.d a3, a2, (a0) // Write the value back to the address.
mv a3, a1
exit2:
mv a0, a3
ret
//
// Performs an atomic increment of an 32-bit unsigned integer.
//
// @param a0 : Pointer to 32-bit value.
//
ASM_PFX (InternalSyncIncrement):
li a1, 1
amoadd.w a2, a1, (a0)
mv a0, a2
ret
//
// Performs an atomic decrement of an 32-bit unsigned integer.
//
// @param a0 : Pointer to 32-bit value.
//
ASM_PFX (InternalSyncDecrement):
li a1, -1
amoadd.w a2, a1, (a0)
mv a0, a2
ret
|
acidanthera/audk
| 5,930
|
MdePkg/Library/BaseSynchronizationLib/AArch64/Synchronization.S
|
// Implementation of synchronization functions for ARM architecture (AArch64)
//
// Copyright (c) 2012-2015, ARM Limited. All rights reserved.
// Copyright (c) 2015, Linaro Limited. All rights reserved.
//
// SPDX-License-Identifier: BSD-2-Clause-Patent
//
//
.text
.align 3
GCC_ASM_EXPORT(InternalSyncCompareExchange16)
GCC_ASM_EXPORT(InternalSyncCompareExchange32)
GCC_ASM_EXPORT(InternalSyncCompareExchange64)
GCC_ASM_EXPORT(InternalSyncIncrement)
GCC_ASM_EXPORT(InternalSyncDecrement)
/**
Performs an atomic compare exchange operation on a 16-bit unsigned integer.
Performs an atomic compare exchange operation on the 16-bit unsigned integer
specified by Value. If Value is equal to CompareValue, then Value is set to
ExchangeValue and CompareValue is returned. If Value is not equal to CompareValue,
then Value is returned. The compare exchange operation must be performed using
MP safe mechanisms.
@param Value A pointer to the 16-bit value for the compare exchange
operation.
@param CompareValue 16-bit value used in compare operation.
@param ExchangeValue 16-bit value used in exchange operation.
@return The original *Value before exchange.
**/
//UINT16
//EFIAPI
//InternalSyncCompareExchange16 (
// IN volatile UINT16 *Value,
// IN UINT16 CompareValue,
// IN UINT16 ExchangeValue
// )
ASM_PFX(InternalSyncCompareExchange16):
AARCH64_BTI(c)
uxth w1, w1
uxth w2, w2
dmb sy
InternalSyncCompareExchange16Again:
ldxrh w3, [x0]
cmp w3, w1
bne InternalSyncCompareExchange16Fail
InternalSyncCompareExchange16Exchange:
stxrh w4, w2, [x0]
cbnz w4, InternalSyncCompareExchange16Again
InternalSyncCompareExchange16Fail:
dmb sy
mov w0, w3
ret
/**
Performs an atomic compare exchange operation on a 32-bit unsigned integer.
Performs an atomic compare exchange operation on the 32-bit unsigned integer
specified by Value. If Value is equal to CompareValue, then Value is set to
ExchangeValue and CompareValue is returned. If Value is not equal to CompareValue,
then Value is returned. The compare exchange operation must be performed using
MP safe mechanisms.
@param Value A pointer to the 32-bit value for the compare exchange
operation.
@param CompareValue 32-bit value used in compare operation.
@param ExchangeValue 32-bit value used in exchange operation.
@return The original *Value before exchange.
**/
//UINT32
//EFIAPI
//InternalSyncCompareExchange32 (
// IN volatile UINT32 *Value,
// IN UINT32 CompareValue,
// IN UINT32 ExchangeValue
// )
ASM_PFX(InternalSyncCompareExchange32):
AARCH64_BTI(c)
dmb sy
InternalSyncCompareExchange32Again:
ldxr w3, [x0]
cmp w3, w1
bne InternalSyncCompareExchange32Fail
InternalSyncCompareExchange32Exchange:
stxr w4, w2, [x0]
cbnz w4, InternalSyncCompareExchange32Again
InternalSyncCompareExchange32Fail:
dmb sy
mov w0, w3
ret
/**
Performs an atomic compare exchange operation on a 64-bit unsigned integer.
Performs an atomic compare exchange operation on the 64-bit unsigned integer specified
by Value. If Value is equal to CompareValue, then Value is set to ExchangeValue and
CompareValue is returned. If Value is not equal to CompareValue, then Value is returned.
The compare exchange operation must be performed using MP safe mechanisms.
@param Value A pointer to the 64-bit value for the compare exchange
operation.
@param CompareValue 64-bit value used in compare operation.
@param ExchangeValue 64-bit value used in exchange operation.
@return The original *Value before exchange.
**/
//UINT64
//EFIAPI
//InternalSyncCompareExchange64 (
// IN volatile UINT64 *Value,
// IN UINT64 CompareValue,
// IN UINT64 ExchangeValue
// )
ASM_PFX(InternalSyncCompareExchange64):
AARCH64_BTI(c)
dmb sy
InternalSyncCompareExchange64Again:
ldxr x3, [x0]
cmp x3, x1
bne InternalSyncCompareExchange64Fail
InternalSyncCompareExchange64Exchange:
stxr w4, x2, [x0]
cbnz w4, InternalSyncCompareExchange64Again
InternalSyncCompareExchange64Fail:
dmb sy
mov x0, x3
ret
/**
Performs an atomic increment of an 32-bit unsigned integer.
Performs an atomic increment of the 32-bit unsigned integer specified by
Value and returns the incremented value. The increment operation must be
performed using MP safe mechanisms. The state of the return value is not
guaranteed to be MP safe.
@param Value A pointer to the 32-bit value to increment.
@return The incremented value.
**/
//UINT32
//EFIAPI
//InternalSyncIncrement (
// IN volatile UINT32 *Value
// )
ASM_PFX(InternalSyncIncrement):
AARCH64_BTI(c)
dmb sy
TryInternalSyncIncrement:
ldxr w1, [x0]
add w1, w1, #1
stxr w2, w1, [x0]
cbnz w2, TryInternalSyncIncrement
mov w0, w1
dmb sy
ret
/**
Performs an atomic decrement of an 32-bit unsigned integer.
Performs an atomic decrement of the 32-bit unsigned integer specified by
Value and returns the decrement value. The decrement operation must be
performed using MP safe mechanisms. The state of the return value is not
guaranteed to be MP safe.
@param Value A pointer to the 32-bit value to decrement.
@return The decrement value.
**/
//UINT32
//EFIAPI
//InternalSyncDecrement (
// IN volatile UINT32 *Value
// )
ASM_PFX(InternalSyncDecrement):
AARCH64_BTI(c)
dmb sy
TryInternalSyncDecrement:
ldxr w1, [x0]
sub w1, w1, #1
stxr w2, w1, [x0]
cbnz w2, TryInternalSyncDecrement
mov w0, w1
dmb sy
ret
|
acidanthera/audk
| 2,241
|
MdePkg/Library/BaseSynchronizationLib/LoongArch64/AsmSynchronization.S
|
#------------------------------------------------------------------------------
#
# LoongArch synchronization ASM functions.
#
# Copyright (c) 2022, Loongson Technology Corporation Limited. All rights reserved.<BR>
#
# SPDX-License-Identifier: BSD-2-Clause-Patent
#
#------------------------------------------------------------------------------
ASM_GLOBAL ASM_PFX(AsmInternalSyncCompareExchange16)
ASM_GLOBAL ASM_PFX(AsmInternalSyncCompareExchange32)
ASM_GLOBAL ASM_PFX(AsmInternalSyncCompareExchange64)
ASM_GLOBAL ASM_PFX(AsmInternalSyncIncrement)
ASM_GLOBAL ASM_PFX(AsmInternalSyncDecrement)
/**
UINT32
EFIAPI
AsmInternalSyncCompareExchange16 (
IN volatile UINT32 *Ptr32,
IN UINT64 Mask,
IN UINT64 LocalCompareValue,
IN UINT64 LocalExchangeValue
)
**/
ASM_PFX(AsmInternalSyncCompareExchange16):
1:
ll.w $t0, $a0, 0x0
and $t1, $t0, $a1
bne $t1, $a2, 2f
andn $t1, $t0, $a1
or $t1, $t1, $a3
sc.w $t1, $a0, 0x0
beqz $t1, 1b
b 3f
2:
dbar 0
3:
move $a0, $t0
jirl $zero, $ra, 0
/**
UINT32
EFIAPI
AsmInternalSyncCompareExchange32 (
IN volatile UINT32 *Value,
IN UINT64 CompareValue,
IN UINT64 ExchangeValue
)
**/
ASM_PFX(AsmInternalSyncCompareExchange32):
1:
ll.w $t0, $a0, 0x0
bne $t0, $a1, 2f
move $t1, $a2
sc.w $t1, $a0, 0x0
beqz $t1, 1b
b 3f
2:
dbar 0
3:
move $a0, $t0
jirl $zero, $ra, 0
/**
UINT64
EFIAPI
AsmInternalSyncCompareExchange64 (
IN volatile UINT64 *Value,
IN UINT64 CompareValue,
IN UINT64 ExchangeValue
)
**/
ASM_PFX(AsmInternalSyncCompareExchange64):
1:
ll.d $t0, $a0, 0x0
bne $t0, $a1, 2f
move $t1, $a2
sc.d $t1, $a0, 0x0
beqz $t1, 1b
b 3f
2:
dbar 0
3:
move $a0, $t0
jirl $zero, $ra, 0
/**
UINT32
EFIAPI
AsmInternalSyncIncrement (
IN volatile UINT32 *Value
)
**/
ASM_PFX(AsmInternalSyncIncrement):
li.w $t0, 1
amadd.w $zero, $t0, $a0
ld.w $a0, $a0, 0
jirl $zero, $ra, 0
/**
UINT32
EFIAPI
AsmInternalSyncDecrement (
IN volatile UINT32 *Value
)
**/
ASM_PFX(AsmInternalSyncDecrement):
li.w $t0, -1
amadd.w $zero, $t0, $a0
ld.w $a0, $a0, 0
jirl $zero, $ra, 0
.end
|
acidanthera/audk
| 5,977
|
MdePkg/Library/BaseSynchronizationLib/Arm/Synchronization.S
|
// Implementation of synchronization functions for ARM architecture
//
// Copyright (c) 2012-2015, ARM Limited. All rights reserved.
// Copyright (c) 2015, Linaro Limited. All rights reserved.
//
// SPDX-License-Identifier: BSD-2-Clause-Patent
//
//
.text
.align 3
GCC_ASM_EXPORT(InternalSyncCompareExchange16)
GCC_ASM_EXPORT(InternalSyncCompareExchange32)
GCC_ASM_EXPORT(InternalSyncCompareExchange64)
GCC_ASM_EXPORT(InternalSyncIncrement)
GCC_ASM_EXPORT(InternalSyncDecrement)
/**
Performs an atomic compare exchange operation on a 16-bit unsigned integer.
Performs an atomic compare exchange operation on the 16-bit unsigned integer
specified by Value. If Value is equal to CompareValue, then Value is set to
ExchangeValue and CompareValue is returned. If Value is not equal to CompareValue,
then Value is returned. The compare exchange operation must be performed using
MP safe mechanisms.
@param Value A pointer to the 16-bit value for the compare exchange
operation.
@param CompareValue 16-bit value used in compare operation.
@param ExchangeValue 16-bit value used in exchange operation.
@return The original *Value before exchange.
**/
//UINT16
//EFIAPI
//InternalSyncCompareExchange16 (
// IN volatile UINT16 *Value,
// IN UINT16 CompareValue,
// IN UINT16 ExchangeValue
// )
ASM_PFX(InternalSyncCompareExchange16):
dmb
InternalSyncCompareExchange16Again:
ldrexh r3, [r0]
cmp r3, r1
bne InternalSyncCompareExchange16Fail
InternalSyncCompareExchange16Exchange:
strexh ip, r2, [r0]
cmp ip, #0
bne InternalSyncCompareExchange16Again
InternalSyncCompareExchange16Fail:
dmb
mov r0, r3
bx lr
/**
Performs an atomic compare exchange operation on a 32-bit unsigned integer.
Performs an atomic compare exchange operation on the 32-bit unsigned integer
specified by Value. If Value is equal to CompareValue, then Value is set to
ExchangeValue and CompareValue is returned. If Value is not equal to CompareValue,
then Value is returned. The compare exchange operation must be performed using
MP safe mechanisms.
@param Value A pointer to the 32-bit value for the compare exchange
operation.
@param CompareValue 32-bit value used in compare operation.
@param ExchangeValue 32-bit value used in exchange operation.
@return The original *Value before exchange.
**/
//UINT32
//EFIAPI
//InternalSyncCompareExchange32 (
// IN volatile UINT32 *Value,
// IN UINT32 CompareValue,
// IN UINT32 ExchangeValue
// )
ASM_PFX(InternalSyncCompareExchange32):
dmb
InternalSyncCompareExchange32Again:
ldrex r3, [r0]
cmp r3, r1
bne InternalSyncCompareExchange32Fail
InternalSyncCompareExchange32Exchange:
strex ip, r2, [r0]
cmp ip, #0
bne InternalSyncCompareExchange32Again
InternalSyncCompareExchange32Fail:
dmb
mov r0, r3
bx lr
/**
Performs an atomic compare exchange operation on a 64-bit unsigned integer.
Performs an atomic compare exchange operation on the 64-bit unsigned integer specified
by Value. If Value is equal to CompareValue, then Value is set to ExchangeValue and
CompareValue is returned. If Value is not equal to CompareValue, then Value is returned.
The compare exchange operation must be performed using MP safe mechanisms.
@param Value A pointer to the 64-bit value for the compare exchange
operation.
@param CompareValue 64-bit value used in compare operation.
@param ExchangeValue 64-bit value used in exchange operation.
@return The original *Value before exchange.
**/
//UINT64
//EFIAPI
//InternalSyncCompareExchange64 (
// IN volatile UINT64 *Value, // r0
// IN UINT64 CompareValue, // r2-r3
// IN UINT64 ExchangeValue // stack
// )
ASM_PFX(InternalSyncCompareExchange64):
push { r4-r7 }
ldrd r4, r5, [sp, #16]
dmb
InternalSyncCompareExchange64Again:
ldrexd r6, r7, [r0]
cmp r6, r2
cmpeq r7, r3
bne InternalSyncCompareExchange64Fail
InternalSyncCompareExchange64Exchange:
strexd ip, r4, r5, [r0]
cmp ip, #0
bne InternalSyncCompareExchange64Again
InternalSyncCompareExchange64Fail:
dmb
mov r0, r6
mov r1, r7
pop { r4-r7 }
bx lr
/**
Performs an atomic increment of an 32-bit unsigned integer.
Performs an atomic increment of the 32-bit unsigned integer specified by
Value and returns the incremented value. The increment operation must be
performed using MP safe mechanisms. The state of the return value is not
guaranteed to be MP safe.
@param Value A pointer to the 32-bit value to increment.
@return The incremented value.
**/
//UINT32
//EFIAPI
//InternalSyncIncrement (
// IN volatile UINT32 *Value
// )
ASM_PFX(InternalSyncIncrement):
dmb
TryInternalSyncIncrement:
ldrex r1, [r0]
add r1, r1, #1
strex r2, r1, [r0]
cmp r2, #0
bne TryInternalSyncIncrement
dmb
mov r0, r1
bx lr
/**
Performs an atomic decrement of an 32-bit unsigned integer.
Performs an atomic decrement of the 32-bit unsigned integer specified by
Value and returns the decrement value. The decrement operation must be
performed using MP safe mechanisms. The state of the return value is not
guaranteed to be MP safe.
@param Value A pointer to the 32-bit value to decrement.
@return The decrement value.
**/
//UINT32
//EFIAPI
//InternalSyncDecrement (
// IN volatile UINT32 *Value
// )
ASM_PFX(InternalSyncDecrement):
dmb
TryInternalSyncDecrement:
ldrex r1, [r0]
sub r1, r1, #1
strex r2, r1, [r0]
cmp r2, #0
bne TryInternalSyncDecrement
dmb
mov r0, r1
bx lr
|
acidanthera/audk
| 3,771
|
MdePkg/Library/BaseIoLibIntrinsic/AArch64/ArmVirtMmio.S
|
#
# Copyright (c) 2014-2018, Linaro Limited. All rights reserved.
#
# SPDX-License-Identifier: BSD-2-Clause-Patent
#
#
.text
.align 3
GCC_ASM_EXPORT(MmioRead8Internal)
GCC_ASM_EXPORT(MmioWrite8Internal)
GCC_ASM_EXPORT(MmioRead16Internal)
GCC_ASM_EXPORT(MmioWrite16Internal)
GCC_ASM_EXPORT(MmioRead32Internal)
GCC_ASM_EXPORT(MmioWrite32Internal)
GCC_ASM_EXPORT(MmioRead64Internal)
GCC_ASM_EXPORT(MmioWrite64Internal)
//
// Reads an 8-bit MMIO register.
//
// Reads the 8-bit MMIO register specified by Address. The 8-bit read value is
// returned. This function must guarantee that all MMIO read and write
// operations are serialized.
//
// @param Address The MMIO register to read.
//
// @return The value read.
//
ASM_PFX(MmioRead8Internal):
AARCH64_BTI(c)
ldrb w0, [x0]
dmb ld
ret
//
// Writes an 8-bit MMIO register.
//
// Writes the 8-bit MMIO register specified by Address with the value specified
// by Value and returns Value. This function must guarantee that all MMIO read
// and write operations are serialized.
//
// @param Address The MMIO register to write.
// @param Value The value to write to the MMIO register.
//
ASM_PFX(MmioWrite8Internal):
AARCH64_BTI(c)
dmb st
strb w1, [x0]
ret
//
// Reads a 16-bit MMIO register.
//
// Reads the 16-bit MMIO register specified by Address. The 16-bit read value is
// returned. This function must guarantee that all MMIO read and write
// operations are serialized.
//
// @param Address The MMIO register to read.
//
// @return The value read.
//
ASM_PFX(MmioRead16Internal):
AARCH64_BTI(c)
ldrh w0, [x0]
dmb ld
ret
//
// Writes a 16-bit MMIO register.
//
// Writes the 16-bit MMIO register specified by Address with the value specified
// by Value and returns Value. This function must guarantee that all MMIO read
// and write operations are serialized.
//
// @param Address The MMIO register to write.
// @param Value The value to write to the MMIO register.
//
ASM_PFX(MmioWrite16Internal):
AARCH64_BTI(c)
dmb st
strh w1, [x0]
ret
//
// Reads a 32-bit MMIO register.
//
// Reads the 32-bit MMIO register specified by Address. The 32-bit read value is
// returned. This function must guarantee that all MMIO read and write
// operations are serialized.
//
// @param Address The MMIO register to read.
//
// @return The value read.
//
ASM_PFX(MmioRead32Internal):
AARCH64_BTI(c)
ldr w0, [x0]
dmb ld
ret
//
// Writes a 32-bit MMIO register.
//
// Writes the 32-bit MMIO register specified by Address with the value specified
// by Value and returns Value. This function must guarantee that all MMIO read
// and write operations are serialized.
//
// @param Address The MMIO register to write.
// @param Value The value to write to the MMIO register.
//
ASM_PFX(MmioWrite32Internal):
AARCH64_BTI(c)
dmb st
str w1, [x0]
ret
//
// Reads a 64-bit MMIO register.
//
// Reads the 64-bit MMIO register specified by Address. The 64-bit read value is
// returned. This function must guarantee that all MMIO read and write
// operations are serialized.
//
// @param Address The MMIO register to read.
//
// @return The value read.
//
ASM_PFX(MmioRead64Internal):
AARCH64_BTI(c)
ldr x0, [x0]
dmb ld
ret
//
// Writes a 64-bit MMIO register.
//
// Writes the 64-bit MMIO register specified by Address with the value specified
// by Value and returns Value. This function must guarantee that all MMIO read
// and write operations are serialized.
//
// @param Address The MMIO register to write.
// @param Value The value to write to the MMIO register.
//
ASM_PFX(MmioWrite64Internal):
AARCH64_BTI(c)
dmb st
str x1, [x0]
ret
|
acidanthera/audk
| 3,693
|
MdePkg/Library/BaseIoLibIntrinsic/Arm/ArmVirtMmio.S
|
#
# Copyright (c) 2014-2018, Linaro Limited. All rights reserved.
#
# SPDX-License-Identifier: BSD-2-Clause-Patent
#
#
GCC_ASM_EXPORT(MmioRead8Internal)
GCC_ASM_EXPORT(MmioWrite8Internal)
GCC_ASM_EXPORT(MmioRead16Internal)
GCC_ASM_EXPORT(MmioWrite16Internal)
GCC_ASM_EXPORT(MmioRead32Internal)
GCC_ASM_EXPORT(MmioWrite32Internal)
GCC_ASM_EXPORT(MmioRead64Internal)
GCC_ASM_EXPORT(MmioWrite64Internal)
//
// Reads an 8-bit MMIO register.
//
// Reads the 8-bit MMIO register specified by Address. The 8-bit read value is
// returned. This function must guarantee that all MMIO read and write
// operations are serialized.
//
// @param Address The MMIO register to read.
//
// @return The value read.
//
ASM_PFX(MmioRead8Internal):
ldrb r0, [r0]
dmb
bx lr
//
// Writes an 8-bit MMIO register.
//
// Writes the 8-bit MMIO register specified by Address with the value specified
// by Value and returns Value. This function must guarantee that all MMIO read
// and write operations are serialized.
//
// @param Address The MMIO register to write.
// @param Value The value to write to the MMIO register.
//
ASM_PFX(MmioWrite8Internal):
dmb st
strb r1, [r0]
bx lr
//
// Reads a 16-bit MMIO register.
//
// Reads the 16-bit MMIO register specified by Address. The 16-bit read value is
// returned. This function must guarantee that all MMIO read and write
// operations are serialized.
//
// @param Address The MMIO register to read.
//
// @return The value read.
//
ASM_PFX(MmioRead16Internal):
ldrh r0, [r0]
dmb
bx lr
//
// Writes a 16-bit MMIO register.
//
// Writes the 16-bit MMIO register specified by Address with the value specified
// by Value and returns Value. This function must guarantee that all MMIO read
// and write operations are serialized.
//
// @param Address The MMIO register to write.
// @param Value The value to write to the MMIO register.
//
ASM_PFX(MmioWrite16Internal):
dmb st
strh r1, [r0]
bx lr
//
// Reads a 32-bit MMIO register.
//
// Reads the 32-bit MMIO register specified by Address. The 32-bit read value is
// returned. This function must guarantee that all MMIO read and write
// operations are serialized.
//
// @param Address The MMIO register to read.
//
// @return The value read.
//
ASM_PFX(MmioRead32Internal):
ldr r0, [r0]
dmb
bx lr
//
// Writes a 32-bit MMIO register.
//
// Writes the 32-bit MMIO register specified by Address with the value specified
// by Value and returns Value. This function must guarantee that all MMIO read
// and write operations are serialized.
//
// @param Address The MMIO register to write.
// @param Value The value to write to the MMIO register.
//
ASM_PFX(MmioWrite32Internal):
dmb st
str r1, [r0]
bx lr
//
// Reads a 64-bit MMIO register.
//
// Reads the 64-bit MMIO register specified by Address. The 64-bit read value is
// returned. This function must guarantee that all MMIO read and write
// operations are serialized.
//
// @param Address The MMIO register to read.
//
// @return The value read.
//
ASM_PFX(MmioRead64Internal):
ldr r1, [r0, #4]
ldr r0, [r0]
dmb
bx lr
//
// Writes a 64-bit MMIO register.
//
// Writes the 64-bit MMIO register specified by Address with the value specified
// by Value and returns Value. This function must guarantee that all MMIO read
// and write operations are serialized.
//
// @param Address The MMIO register to write.
// @param Value The value to write to the MMIO register.
//
ASM_PFX(MmioWrite64Internal):
dmb st
str r2, [r0]
str r3, [r0, #4]
bx lr
|
acidanthera/audk
| 6,844
|
MdePkg/Library/BaseMemoryLibOptDxe/AArch64/CopyMem.S
|
//
// Copyright (c) 2012 - 2016, Linaro Limited
// All rights reserved.
// Copyright (c) 2015 ARM Ltd
// All rights reserved.
// SPDX-License-Identifier: BSD-2-Clause-Patent
//
// Assumptions:
//
// ARMv8-a, AArch64, unaligned accesses.
//
//
#define dstin x0
#define src x1
#define count x2
#define dst x3
#define srcend x4
#define dstend x5
#define A_l x6
#define A_lw w6
#define A_h x7
#define A_hw w7
#define B_l x8
#define B_lw w8
#define B_h x9
#define C_l x10
#define C_h x11
#define D_l x12
#define D_h x13
#define E_l x14
#define E_h x15
#define F_l srcend
#define F_h dst
#define tmp1 x9
#define tmp2 x3
#define L(l) .L ## l
// Copies are split into 3 main cases: small copies of up to 16 bytes,
// medium copies of 17..96 bytes which are fully unrolled. Large copies
// of more than 96 bytes align the destination and use an unrolled loop
// processing 64 bytes per iteration.
// Small and medium copies read all data before writing, allowing any
// kind of overlap, and memmove tailcalls memcpy for these cases as
// well as non-overlapping copies.
__memcpy:
prfm PLDL1KEEP, [src]
add srcend, src, count
add dstend, dstin, count
cmp count, 16
b.ls L(copy16)
cmp count, 96
b.hi L(copy_long)
// Medium copies: 17..96 bytes.
sub tmp1, count, 1
ldp A_l, A_h, [src]
tbnz tmp1, 6, L(copy96)
ldp D_l, D_h, [srcend, -16]
tbz tmp1, 5, 1f
ldp B_l, B_h, [src, 16]
ldp C_l, C_h, [srcend, -32]
stp B_l, B_h, [dstin, 16]
stp C_l, C_h, [dstend, -32]
1:
stp A_l, A_h, [dstin]
stp D_l, D_h, [dstend, -16]
ret
.p2align 4
// Small copies: 0..16 bytes.
L(copy16):
cmp count, 8
b.lo 1f
ldr A_l, [src]
ldr A_h, [srcend, -8]
str A_l, [dstin]
str A_h, [dstend, -8]
ret
.p2align 4
1:
tbz count, 2, 1f
ldr A_lw, [src]
ldr A_hw, [srcend, -4]
str A_lw, [dstin]
str A_hw, [dstend, -4]
ret
// Copy 0..3 bytes. Use a branchless sequence that copies the same
// byte 3 times if count==1, or the 2nd byte twice if count==2.
1:
cbz count, 2f
lsr tmp1, count, 1
ldrb A_lw, [src]
ldrb A_hw, [srcend, -1]
ldrb B_lw, [src, tmp1]
strb A_lw, [dstin]
strb B_lw, [dstin, tmp1]
strb A_hw, [dstend, -1]
2: ret
.p2align 4
// Copy 64..96 bytes. Copy 64 bytes from the start and
// 32 bytes from the end.
L(copy96):
ldp B_l, B_h, [src, 16]
ldp C_l, C_h, [src, 32]
ldp D_l, D_h, [src, 48]
ldp E_l, E_h, [srcend, -32]
ldp F_l, F_h, [srcend, -16]
stp A_l, A_h, [dstin]
stp B_l, B_h, [dstin, 16]
stp C_l, C_h, [dstin, 32]
stp D_l, D_h, [dstin, 48]
stp E_l, E_h, [dstend, -32]
stp F_l, F_h, [dstend, -16]
ret
// Align DST to 16 byte alignment so that we don't cross cache line
// boundaries on both loads and stores. There are at least 96 bytes
// to copy, so copy 16 bytes unaligned and then align. The loop
// copies 64 bytes per iteration and prefetches one iteration ahead.
.p2align 4
L(copy_long):
and tmp1, dstin, 15
bic dst, dstin, 15
ldp D_l, D_h, [src]
sub src, src, tmp1
add count, count, tmp1 // Count is now 16 too large.
ldp A_l, A_h, [src, 16]
stp D_l, D_h, [dstin]
ldp B_l, B_h, [src, 32]
ldp C_l, C_h, [src, 48]
ldp D_l, D_h, [src, 64]!
subs count, count, 128 + 16 // Test and readjust count.
b.ls 2f
1:
stp A_l, A_h, [dst, 16]
ldp A_l, A_h, [src, 16]
stp B_l, B_h, [dst, 32]
ldp B_l, B_h, [src, 32]
stp C_l, C_h, [dst, 48]
ldp C_l, C_h, [src, 48]
stp D_l, D_h, [dst, 64]!
ldp D_l, D_h, [src, 64]!
subs count, count, 64
b.hi 1b
// Write the last full set of 64 bytes. The remainder is at most 64
// bytes, so it is safe to always copy 64 bytes from the end even if
// there is just 1 byte left.
2:
ldp E_l, E_h, [srcend, -64]
stp A_l, A_h, [dst, 16]
ldp A_l, A_h, [srcend, -48]
stp B_l, B_h, [dst, 32]
ldp B_l, B_h, [srcend, -32]
stp C_l, C_h, [dst, 48]
ldp C_l, C_h, [srcend, -16]
stp D_l, D_h, [dst, 64]
stp E_l, E_h, [dstend, -64]
stp A_l, A_h, [dstend, -48]
stp B_l, B_h, [dstend, -32]
stp C_l, C_h, [dstend, -16]
ret
//
// All memmoves up to 96 bytes are done by memcpy as it supports overlaps.
// Larger backwards copies are also handled by memcpy. The only remaining
// case is forward large copies. The destination is aligned, and an
// unrolled loop processes 64 bytes per iteration.
//
ASM_GLOBAL ASM_PFX(InternalMemCopyMem)
ASM_PFX(InternalMemCopyMem):
AARCH64_BTI(c)
sub tmp2, dstin, src
cmp count, 96
ccmp tmp2, count, 2, hi
b.hs __memcpy
cbz tmp2, 3f
add dstend, dstin, count
add srcend, src, count
// Align dstend to 16 byte alignment so that we don't cross cache line
// boundaries on both loads and stores. There are at least 96 bytes
// to copy, so copy 16 bytes unaligned and then align. The loop
// copies 64 bytes per iteration and prefetches one iteration ahead.
and tmp2, dstend, 15
ldp D_l, D_h, [srcend, -16]
sub srcend, srcend, tmp2
sub count, count, tmp2
ldp A_l, A_h, [srcend, -16]
stp D_l, D_h, [dstend, -16]
ldp B_l, B_h, [srcend, -32]
ldp C_l, C_h, [srcend, -48]
ldp D_l, D_h, [srcend, -64]!
sub dstend, dstend, tmp2
subs count, count, 128
b.ls 2f
nop
1:
stp A_l, A_h, [dstend, -16]
ldp A_l, A_h, [srcend, -16]
stp B_l, B_h, [dstend, -32]
ldp B_l, B_h, [srcend, -32]
stp C_l, C_h, [dstend, -48]
ldp C_l, C_h, [srcend, -48]
stp D_l, D_h, [dstend, -64]!
ldp D_l, D_h, [srcend, -64]!
subs count, count, 64
b.hi 1b
// Write the last full set of 64 bytes. The remainder is at most 64
// bytes, so it is safe to always copy 64 bytes from the start even if
// there is just 1 byte left.
2:
ldp E_l, E_h, [src, 48]
stp A_l, A_h, [dstend, -16]
ldp A_l, A_h, [src, 32]
stp B_l, B_h, [dstend, -32]
ldp B_l, B_h, [src, 16]
stp C_l, C_h, [dstend, -48]
ldp C_l, C_h, [src]
stp D_l, D_h, [dstend, -64]
stp E_l, E_h, [dstin, 48]
stp A_l, A_h, [dstin, 32]
stp B_l, B_h, [dstin, 16]
stp C_l, C_h, [dstin]
3: ret
|
acidanthera/audk
| 4,176
|
MdePkg/Library/BaseMemoryLibOptDxe/AArch64/ScanMem.S
|
//
// Copyright (c) 2014, ARM Limited
// All rights Reserved.
// SPDX-License-Identifier: BSD-2-Clause-Patent
//
// Assumptions:
//
// ARMv8-a, AArch64
// Neon Available.
//
// Arguments and results.
#define srcin x0
#define cntin x1
#define chrin w2
#define result x0
#define src x3
#define tmp x4
#define wtmp2 w5
#define synd x6
#define soff x9
#define cntrem x10
#define vrepchr v0
#define vdata1 v1
#define vdata2 v2
#define vhas_chr1 v3
#define vhas_chr2 v4
#define vrepmask v5
#define vend v6
//
// Core algorithm:
//
// For each 32-byte chunk we calculate a 64-bit syndrome value, with two bits
// per byte. For each tuple, bit 0 is set if the relevant byte matched the
// requested character and bit 1 is not used (faster than using a 32bit
// syndrome). Since the bits in the syndrome reflect exactly the order in which
// things occur in the original string, counting trailing zeros allows to
// identify exactly which byte has matched.
//
ASM_GLOBAL ASM_PFX(InternalMemScanMem8)
ASM_PFX(InternalMemScanMem8):
AARCH64_BTI(c)
// Do not dereference srcin if no bytes to compare.
cbz cntin, .Lzero_length
//
// Magic constant 0x40100401 allows us to identify which lane matches
// the requested byte.
//
mov wtmp2, #0x0401
movk wtmp2, #0x4010, lsl #16
dup vrepchr.16b, chrin
// Work with aligned 32-byte chunks
bic src, srcin, #31
dup vrepmask.4s, wtmp2
ands soff, srcin, #31
and cntrem, cntin, #31
b.eq .Lloop
//
// Input string is not 32-byte aligned. We calculate the syndrome
// value for the aligned 32 bytes block containing the first bytes
// and mask the irrelevant part.
//
ld1 {vdata1.16b, vdata2.16b}, [src], #32
sub tmp, soff, #32
adds cntin, cntin, tmp
cmeq vhas_chr1.16b, vdata1.16b, vrepchr.16b
cmeq vhas_chr2.16b, vdata2.16b, vrepchr.16b
and vhas_chr1.16b, vhas_chr1.16b, vrepmask.16b
and vhas_chr2.16b, vhas_chr2.16b, vrepmask.16b
addp vend.16b, vhas_chr1.16b, vhas_chr2.16b // 256->128
addp vend.16b, vend.16b, vend.16b // 128->64
mov synd, vend.d[0]
// Clear the soff*2 lower bits
lsl tmp, soff, #1
lsr synd, synd, tmp
lsl synd, synd, tmp
// The first block can also be the last
b.ls .Lmasklast
// Have we found something already?
cbnz synd, .Ltail
.Lloop:
ld1 {vdata1.16b, vdata2.16b}, [src], #32
subs cntin, cntin, #32
cmeq vhas_chr1.16b, vdata1.16b, vrepchr.16b
cmeq vhas_chr2.16b, vdata2.16b, vrepchr.16b
// If we're out of data we finish regardless of the result
b.ls .Lend
// Use a fast check for the termination condition
orr vend.16b, vhas_chr1.16b, vhas_chr2.16b
addp vend.2d, vend.2d, vend.2d
mov synd, vend.d[0]
// We're not out of data, loop if we haven't found the character
cbz synd, .Lloop
.Lend:
// Termination condition found, let's calculate the syndrome value
and vhas_chr1.16b, vhas_chr1.16b, vrepmask.16b
and vhas_chr2.16b, vhas_chr2.16b, vrepmask.16b
addp vend.16b, vhas_chr1.16b, vhas_chr2.16b // 256->128
addp vend.16b, vend.16b, vend.16b // 128->64
mov synd, vend.d[0]
// Only do the clear for the last possible block
b.hi .Ltail
.Lmasklast:
// Clear the (32 - ((cntrem + soff) % 32)) * 2 upper bits
add tmp, cntrem, soff
and tmp, tmp, #31
sub tmp, tmp, #32
neg tmp, tmp, lsl #1
lsl synd, synd, tmp
lsr synd, synd, tmp
.Ltail:
// Count the trailing zeros using bit reversing
rbit synd, synd
// Compensate the last post-increment
sub src, src, #32
// Check that we have found a character
cmp synd, #0
// And count the leading zeros
clz synd, synd
// Compute the potential result
add result, src, synd, lsr #1
// Select result or NULL
csel result, xzr, result, eq
ret
.Lzero_length:
mov result, #0
ret
|
acidanthera/audk
| 3,384
|
MdePkg/Library/BaseMemoryLibOptDxe/AArch64/CompareMem.S
|
//
// Copyright (c) 2013, Linaro Limited
// All rights reserved.
// SPDX-License-Identifier: BSD-2-Clause-Patent
//
// Assumptions:
//
// ARMv8-a, AArch64
//
// Parameters and result.
#define src1 x0
#define src2 x1
#define limit x2
#define result x0
// Internal variables.
#define data1 x3
#define data1w w3
#define data2 x4
#define data2w w4
#define diff x6
#define endloop x7
#define tmp1 x8
#define tmp2 x9
#define pos x11
#define limit_wd x12
#define mask x13
.p2align 6
ASM_GLOBAL ASM_PFX(InternalMemCompareMem)
ASM_PFX(InternalMemCompareMem):
AARCH64_BTI(c)
eor tmp1, src1, src2
tst tmp1, #7
b.ne .Lmisaligned8
ands tmp1, src1, #7
b.ne .Lmutual_align
add limit_wd, limit, #7
lsr limit_wd, limit_wd, #3
// Start of performance-critical section -- one 64B cache line.
.Lloop_aligned:
ldr data1, [src1], #8
ldr data2, [src2], #8
.Lstart_realigned:
subs limit_wd, limit_wd, #1
eor diff, data1, data2 // Non-zero if differences found.
csinv endloop, diff, xzr, ne // Last Dword or differences.
cbz endloop, .Lloop_aligned
// End of performance-critical section -- one 64B cache line.
// Not reached the limit, must have found a diff.
cbnz limit_wd, .Lnot_limit
// Limit % 8 == 0 => all bytes significant.
ands limit, limit, #7
b.eq .Lnot_limit
lsl limit, limit, #3 // Bits -> bytes.
mov mask, #~0
lsl mask, mask, limit
bic data1, data1, mask
bic data2, data2, mask
orr diff, diff, mask
.Lnot_limit:
rev diff, diff
rev data1, data1
rev data2, data2
// The MS-non-zero bit of DIFF marks either the first bit
// that is different, or the end of the significant data.
// Shifting left now will bring the critical information into the
// top bits.
clz pos, diff
lsl data1, data1, pos
lsl data2, data2, pos
// But we need to zero-extend (char is unsigned) the value and then
// perform a signed 32-bit subtraction.
lsr data1, data1, #56
sub result, data1, data2, lsr #56
ret
.Lmutual_align:
// Sources are mutually aligned, but are not currently at an
// alignment boundary. Round down the addresses and then mask off
// the bytes that precede the start point.
bic src1, src1, #7
bic src2, src2, #7
add limit, limit, tmp1 // Adjust the limit for the extra.
lsl tmp1, tmp1, #3 // Bytes beyond alignment -> bits.
ldr data1, [src1], #8
neg tmp1, tmp1 // Bits to alignment -64.
ldr data2, [src2], #8
mov tmp2, #~0
// Little-endian. Early bytes are at LSB.
lsr tmp2, tmp2, tmp1 // Shift (tmp1 & 63).
add limit_wd, limit, #7
orr data1, data1, tmp2
orr data2, data2, tmp2
lsr limit_wd, limit_wd, #3
b .Lstart_realigned
.p2align 6
.Lmisaligned8:
sub limit, limit, #1
1:
// Perhaps we can do better than this.
ldrb data1w, [src1], #1
ldrb data2w, [src2], #1
subs limit, limit, #1
ccmp data1w, data2w, #0, cs // NZCV = 0b0000.
b.eq 1b
sub result, data1, data2
ret
|
acidanthera/audk
| 5,062
|
MdePkg/Library/BaseMemoryLibOptDxe/AArch64/SetMem.S
|
//
// Copyright (c) 2012 - 2016, Linaro Limited
// All rights reserved.
// Copyright (c) 2015 ARM Ltd
// All rights reserved.
// SPDX-License-Identifier: BSD-2-Clause-Patent
//
// Assumptions:
//
// ARMv8-a, AArch64, unaligned accesses
//
//
#define dstin x0
#define count x1
#define val x2
#define valw w2
#define dst x3
#define dstend x4
#define tmp1 x5
#define tmp1w w5
#define tmp2 x6
#define tmp2w w6
#define zva_len x7
#define zva_lenw w7
#define L(l) .L ## l
ASM_GLOBAL ASM_PFX(InternalMemSetMem16)
ASM_PFX(InternalMemSetMem16):
AARCH64_BTI(c)
dup v0.8H, valw
lsl count, count, #1
b 0f
ASM_GLOBAL ASM_PFX(InternalMemSetMem32)
ASM_PFX(InternalMemSetMem32):
AARCH64_BTI(c)
dup v0.4S, valw
lsl count, count, #2
b 0f
ASM_GLOBAL ASM_PFX(InternalMemSetMem64)
ASM_PFX(InternalMemSetMem64):
AARCH64_BTI(c)
dup v0.2D, val
lsl count, count, #3
b 0f
ASM_GLOBAL ASM_PFX(InternalMemZeroMem)
ASM_PFX(InternalMemZeroMem):
AARCH64_BTI(c)
movi v0.16B, #0
b 0f
ASM_GLOBAL ASM_PFX(InternalMemSetMem)
ASM_PFX(InternalMemSetMem):
AARCH64_BTI(c)
dup v0.16B, valw
0: add dstend, dstin, count
mov val, v0.D[0]
cmp count, 96
b.hi L(set_long)
cmp count, 16
b.hs L(set_medium)
// Set 0..15 bytes.
tbz count, 3, 1f
str val, [dstin]
str val, [dstend, -8]
ret
nop
1: tbz count, 2, 2f
str valw, [dstin]
str valw, [dstend, -4]
ret
2: cbz count, 3f
strb valw, [dstin]
tbz count, 1, 3f
strh valw, [dstend, -2]
3: ret
// Set 17..96 bytes.
L(set_medium):
str q0, [dstin]
tbnz count, 6, L(set96)
str q0, [dstend, -16]
tbz count, 5, 1f
str q0, [dstin, 16]
str q0, [dstend, -32]
1: ret
.p2align 4
// Set 64..96 bytes. Write 64 bytes from the start and
// 32 bytes from the end.
L(set96):
str q0, [dstin, 16]
stp q0, q0, [dstin, 32]
stp q0, q0, [dstend, -32]
ret
.p2align 3
nop
L(set_long):
bic dst, dstin, 15
str q0, [dstin]
cmp count, 256
ccmp val, 0, 0, cs
b.eq L(try_zva)
L(no_zva):
sub count, dstend, dst // Count is 16 too large.
add dst, dst, 16
sub count, count, 64 + 16 // Adjust count and bias for loop.
1: stp q0, q0, [dst], 64
stp q0, q0, [dst, -32]
L(tail64):
subs count, count, 64
b.hi 1b
2: stp q0, q0, [dstend, -64]
stp q0, q0, [dstend, -32]
ret
.p2align 3
L(try_zva):
mrs tmp1, dczid_el0
tbnz tmp1w, 4, L(no_zva)
and tmp1w, tmp1w, 15
cmp tmp1w, 4 // ZVA size is 64 bytes.
b.ne L(zva_128)
// Write the first and last 64 byte aligned block using stp rather
// than using DC ZVA. This is faster on some cores.
L(zva_64):
str q0, [dst, 16]
stp q0, q0, [dst, 32]
bic dst, dst, 63
stp q0, q0, [dst, 64]
stp q0, q0, [dst, 96]
sub count, dstend, dst // Count is now 128 too large.
sub count, count, 128+64+64 // Adjust count and bias for loop.
add dst, dst, 128
nop
1: dc zva, dst
add dst, dst, 64
subs count, count, 64
b.hi 1b
stp q0, q0, [dst, 0]
stp q0, q0, [dst, 32]
stp q0, q0, [dstend, -64]
stp q0, q0, [dstend, -32]
ret
.p2align 3
L(zva_128):
cmp tmp1w, 5 // ZVA size is 128 bytes.
b.ne L(zva_other)
str q0, [dst, 16]
stp q0, q0, [dst, 32]
stp q0, q0, [dst, 64]
stp q0, q0, [dst, 96]
bic dst, dst, 127
sub count, dstend, dst // Count is now 128 too large.
sub count, count, 128+128 // Adjust count and bias for loop.
add dst, dst, 128
1: dc zva, dst
add dst, dst, 128
subs count, count, 128
b.hi 1b
stp q0, q0, [dstend, -128]
stp q0, q0, [dstend, -96]
stp q0, q0, [dstend, -64]
stp q0, q0, [dstend, -32]
ret
L(zva_other):
mov tmp2w, 4
lsl zva_lenw, tmp2w, tmp1w
add tmp1, zva_len, 64 // Max alignment bytes written.
cmp count, tmp1
blo L(no_zva)
sub tmp2, zva_len, 1
add tmp1, dst, zva_len
add dst, dst, 16
subs count, tmp1, dst // Actual alignment bytes to write.
bic tmp1, tmp1, tmp2 // Aligned dc zva start address.
beq 2f
1: stp q0, q0, [dst], 64
stp q0, q0, [dst, -32]
subs count, count, 64
b.hi 1b
2: mov dst, tmp1
sub count, dstend, tmp1 // Remaining bytes to write.
subs count, count, zva_len
b.lo 4f
3: dc zva, dst
add dst, dst, zva_len
subs count, count, zva_len
b.hs 3b
4: add count, count, zva_len
b L(tail64)
|
acidanthera/audk
| 4,271
|
MdePkg/Library/BaseMemoryLibOptDxe/Arm/CopyMem.S
|
#------------------------------------------------------------------------------
#
# CopyMem() worker for ARM
#
# This file started out as C code that did 64 bit moves if the buffer was
# 32-bit aligned, else it does a byte copy. It also does a byte copy for
# any trailing bytes. It was updated to do 32-byte copies using stm/ldm.
#
# Copyright (c) 2008 - 2010, Apple Inc. All rights reserved.<BR>
# Copyright (c) 2016, Linaro Ltd. All rights reserved.<BR>
# SPDX-License-Identifier: BSD-2-Clause-Patent
#
#------------------------------------------------------------------------------
.text
.thumb
.syntax unified
/**
Copy Length bytes from Source to Destination. Overlap is OK.
This implementation
@param Destination Target of copy
@param Source Place to copy from
@param Length Number of bytes to copy
@return Destination
VOID *
EFIAPI
InternalMemCopyMem (
OUT VOID *DestinationBuffer,
IN CONST VOID *SourceBuffer,
IN UINTN Length
)
**/
.type ASM_PFX(InternalMemCopyMem), %function
ASM_GLOBAL ASM_PFX(InternalMemCopyMem)
ASM_PFX(InternalMemCopyMem):
push {r4-r11, lr}
// Save the input parameters in extra registers (r11 = destination, r14 = source, r12 = length)
mov r11, r0
mov r10, r0
mov r12, r2
mov r14, r1
cmp r11, r1
// If (dest < source)
bcc memcopy_check_optim_default
// If (source + length < dest)
rsb r3, r1, r11
cmp r12, r3
bcc memcopy_check_optim_default
b memcopy_check_optim_overlap
memcopy_check_optim_default:
// Check if we can use an optimized path ((length >= 32) && destination word-aligned && source word-aligned) for the memcopy (optimized path if r0 == 1)
tst r0, #0xF
it ne
movne.n r0, #0
bne memcopy_default
tst r1, #0xF
it ne
movne.n r3, #0
it eq
moveq.n r3, #1
cmp r2, #31
it ls
movls.n r0, #0
bls memcopy_default
and r0, r3, #1
b memcopy_default
memcopy_check_optim_overlap:
// r10 = dest_end, r14 = source_end
add r10, r11, r12
add r14, r12, r1
// Are we in the optimized case ((length >= 32) && dest_end word-aligned && source_end word-aligned)
cmp r2, #31
it ls
movls.n r0, #0
it hi
movhi.n r0, #1
tst r10, #0xF
it ne
movne.n r0, #0
tst r14, #0xF
it ne
movne.n r0, #0
b memcopy_overlapped
memcopy_overlapped_non_optim:
// We read 1 byte from the end of the source buffer
sub r3, r14, #1
sub r12, r12, #1
ldrb r3, [r3, #0]
sub r2, r10, #1
cmp r12, #0
// We write 1 byte at the end of the dest buffer
sub r10, r10, #1
sub r14, r14, #1
strb r3, [r2, #0]
bne memcopy_overlapped_non_optim
b memcopy_end
// r10 = dest_end, r14 = source_end
memcopy_overlapped:
// Are we in the optimized case ?
cmp r0, #0
beq memcopy_overlapped_non_optim
// Optimized Overlapped - Read 32 bytes
sub r14, r14, #32
sub r12, r12, #32
cmp r12, #31
ldmia r14, {r2-r9}
// If length is less than 32 then disable optim
it ls
movls.n r0, #0
cmp r12, #0
// Optimized Overlapped - Write 32 bytes
sub r10, r10, #32
stmia r10, {r2-r9}
// while (length != 0)
bne memcopy_overlapped
b memcopy_end
memcopy_default_non_optim:
// Byte copy
ldrb r3, [r14], #1
sub r12, r12, #1
strb r3, [r10], #1
memcopy_default:
cmp r12, #0
beq memcopy_end
// r10 = dest, r14 = source
memcopy_default_loop:
cmp r0, #0
beq memcopy_default_non_optim
// Optimized memcopy - Read 32 Bytes
sub r12, r12, #32
cmp r12, #31
ldmia r14!, {r2-r9}
// If length is less than 32 then disable optim
it ls
movls.n r0, #0
cmp r12, #0
// Optimized memcopy - Write 32 Bytes
stmia r10!, {r2-r9}
// while (length != 0)
bne memcopy_default_loop
memcopy_end:
mov r0, r11
pop {r4-r11, pc}
|
acidanthera/audk
| 4,196
|
MdePkg/Library/BaseMemoryLibOptDxe/Arm/ScanMem.S
|
// Copyright (c) 2010-2011, Linaro Limited
// All rights reserved.
// SPDX-License-Identifier: BSD-2-Clause-Patent
//
//
// Written by Dave Gilbert <david.gilbert@linaro.org>
//
// This memchr routine is optimised on a Cortex-A9 and should work on
// all ARMv7 processors. It has a fast past for short sizes, and has
// an optimised path for large data sets; the worst case is finding the
// match early in a large data set.
//
// 2011-02-07 david.gilbert@linaro.org
// Extracted from local git a5b438d861
// 2011-07-14 david.gilbert@linaro.org
// Import endianness fix from local git ea786f1b
// 2011-12-07 david.gilbert@linaro.org
// Removed unneeded cbz from align loop
// this lets us check a flag in a 00/ff byte easily in either endianness
#define CHARTSTMASK(c) 1<<(c*8)
.text
.thumb
.syntax unified
.type ASM_PFX(InternalMemScanMem8), %function
ASM_GLOBAL ASM_PFX(InternalMemScanMem8)
ASM_PFX(InternalMemScanMem8):
// r0 = start of memory to scan
// r1 = length
// r2 = character to look for
// returns r0 = pointer to character or NULL if not found
uxtb r2, r2 // Don't think we can trust the caller to actually pass a char
cmp r1, #16 // If it's short don't bother with anything clever
blt 20f
tst r0, #7 // If it's already aligned skip the next bit
beq 10f
// Work up to an aligned point
5:
ldrb r3, [r0],#1
subs r1, r1, #1
cmp r3, r2
beq 50f // If it matches exit found
tst r0, #7
bne 5b // If not aligned yet then do next byte
10:
// At this point, we are aligned, we know we have at least 8 bytes to work with
push {r4-r7}
orr r2, r2, r2, lsl #8 // expand the match word across to all bytes
orr r2, r2, r2, lsl #16
bic r4, r1, #7 // Number of double words to work with
mvns r7, #0 // all F's
movs r3, #0
15:
ldmia r0!, {r5,r6}
subs r4, r4, #8
eor r5, r5, r2 // Get it so that r5,r6 have 00's where the bytes match the target
eor r6, r6, r2
uadd8 r5, r5, r7 // Parallel add 0xff - sets the GE bits for anything that wasn't 0
sel r5, r3, r7 // bytes are 00 for none-00 bytes, or ff for 00 bytes - NOTE INVERSION
uadd8 r6, r6, r7 // Parallel add 0xff - sets the GE bits for anything that wasn't 0
sel r6, r5, r7 // chained....bytes are 00 for none-00 bytes, or ff for 00 bytes - NOTE INVERSION
cbnz r6, 60f
bne 15b // (Flags from the subs above) If not run out of bytes then go around again
pop {r4-r7}
and r2, r2, #0xff // Get r2 back to a single character from the expansion above
and r1, r1, #7 // Leave the count remaining as the number after the double words have been done
20:
cbz r1, 40f // 0 length or hit the end already then not found
21: // Post aligned section, or just a short call
ldrb r3, [r0], #1
subs r1, r1, #1
eor r3, r3, r2 // r3 = 0 if match - doesn't break flags from sub
cbz r3, 50f
bne 21b // on r1 flags
40:
movs r0, #0 // not found
bx lr
50:
subs r0, r0, #1 // found
bx lr
60: // We're here because the fast path found a hit - now we have to track down exactly which word it was
// r0 points to the start of the double word after the one that was tested
// r5 has the 00/ff pattern for the first word, r6 has the chained value
subs r0, r0, #3
cmp r5, #0
it eq
moveq.n r5, r6 // the end is in the 2nd word
it ne
subne.n r0, r0, #4 // or 2nd byte of 1st word
// r0 currently points to the 3rd byte of the word containing the hit
tst r5, #CHARTSTMASK(0) // 1st character
bne 61f
adds r0, r0, #1
tst r5, #CHARTSTMASK(1) // 2nd character
bne 61f
adds r0, r0 ,#1
tst r5, #(3 << 15) // 2nd & 3rd character
// If not the 3rd must be the last one
it eq
addeq.n r0, r0, #1
61:
pop {r4-r7}
subs r0, r0, #1
bx lr
|
acidanthera/audk
| 3,333
|
MdePkg/Library/BaseMemoryLibOptDxe/Arm/CompareMem.S
|
//
// Copyright (c) 2013 - 2016, Linaro Limited
// All rights reserved.
// SPDX-License-Identifier: BSD-2-Clause-Patent
//
// Parameters and result.
#define src1 r0
#define src2 r1
#define limit r2
#define result r0
// Internal variables.
#define data1 r3
#define data2 r4
#define limit_wd r5
#define diff r6
#define tmp1 r7
#define tmp2 r12
#define pos r8
#define mask r14
.text
.thumb
.syntax unified
.align 5
.type ASM_PFX(InternalMemCompareMem), %function
ASM_GLOBAL ASM_PFX(InternalMemCompareMem)
ASM_PFX(InternalMemCompareMem):
push {r4-r8, lr}
eor tmp1, src1, src2
tst tmp1, #3
bne .Lmisaligned4
ands tmp1, src1, #3
bne .Lmutual_align
add limit_wd, limit, #3
nop.w
lsr limit_wd, limit_wd, #2
// Start of performance-critical section -- one 32B cache line.
.Lloop_aligned:
ldr data1, [src1], #4
ldr data2, [src2], #4
.Lstart_realigned:
subs limit_wd, limit_wd, #1
eor diff, data1, data2 // Non-zero if differences found.
cbnz diff, 0f
bne .Lloop_aligned
// End of performance-critical section -- one 32B cache line.
// Not reached the limit, must have found a diff.
0: cbnz limit_wd, .Lnot_limit
// Limit % 4 == 0 => all bytes significant.
ands limit, limit, #3
beq .Lnot_limit
lsl limit, limit, #3 // Bits -> bytes.
mov mask, #~0
lsl mask, mask, limit
bic data1, data1, mask
bic data2, data2, mask
orr diff, diff, mask
.Lnot_limit:
rev diff, diff
rev data1, data1
rev data2, data2
// The MS-non-zero bit of DIFF marks either the first bit
// that is different, or the end of the significant data.
// Shifting left now will bring the critical information into the
// top bits.
clz pos, diff
lsl data1, data1, pos
lsl data2, data2, pos
// But we need to zero-extend (char is unsigned) the value and then
// perform a signed 32-bit subtraction.
lsr data1, data1, #28
sub result, data1, data2, lsr #28
pop {r4-r8, pc}
.Lmutual_align:
// Sources are mutually aligned, but are not currently at an
// alignment boundary. Round down the addresses and then mask off
// the bytes that precede the start point.
bic src1, src1, #3
bic src2, src2, #3
add limit, limit, tmp1 // Adjust the limit for the extra.
lsl tmp1, tmp1, #3 // Bytes beyond alignment -> bits.
ldr data1, [src1], #4
rsb tmp1, tmp1, #32 // Bits to alignment -32.
ldr data2, [src2], #4
mov tmp2, #~0
// Little-endian. Early bytes are at LSB.
lsr tmp2, tmp2, tmp1 // Shift (tmp1 & 31).
add limit_wd, limit, #3
orr data1, data1, tmp2
orr data2, data2, tmp2
lsr limit_wd, limit_wd, #2
b .Lstart_realigned
.Lmisaligned4:
sub limit, limit, #1
1:
// Perhaps we can do better than this.
ldrb data1, [src1], #1
ldrb data2, [src2], #1
subs limit, limit, #1
it cs
cmpcs.n data1, data2
beq 1b
sub result, data1, data2
pop {r4-r8, pc}
|
acidanthera/audk
| 2,832
|
MdePkg/Library/BaseMemoryLibOptDxe/Arm/SetMem.S
|
#------------------------------------------------------------------------------
#
# Copyright (c) 2016, Linaro Ltd. All rights reserved.<BR>
#
# SPDX-License-Identifier: BSD-2-Clause-Patent
#
#------------------------------------------------------------------------------
.text
.thumb
.syntax unified
.align 5
.type ASM_PFX(InternalMemSetMem16), %function
ASM_GLOBAL ASM_PFX(InternalMemSetMem16)
ASM_PFX(InternalMemSetMem16):
uxth r2, r2
lsl r1, r1, #1
orr r2, r2, r2, lsl #16
b 0f
.type ASM_PFX(InternalMemSetMem32), %function
ASM_GLOBAL ASM_PFX(InternalMemSetMem32)
ASM_PFX(InternalMemSetMem32):
lsl r1, r1, #2
b 0f
.type ASM_PFX(InternalMemSetMem64), %function
ASM_GLOBAL ASM_PFX(InternalMemSetMem64)
ASM_PFX(InternalMemSetMem64):
lsl r1, r1, #3
b 1f
.align 5
.type ASM_PFX(InternalMemSetMem), %function
ASM_GLOBAL ASM_PFX(InternalMemSetMem)
ASM_PFX(InternalMemSetMem):
uxtb r2, r2
orr r2, r2, r2, lsl #8
orr r2, r2, r2, lsl #16
b 0f
.type ASM_PFX(InternalMemZeroMem), %function
ASM_GLOBAL ASM_PFX(InternalMemZeroMem)
ASM_PFX(InternalMemZeroMem):
movs r2, #0
0: mov r3, r2
1: push {r4, lr}
cmp r1, #16 // fewer than 16 bytes of input?
add r1, r1, r0 // r1 := dst + length
add lr, r0, #16
blt 2f
bic lr, lr, #15 // align output pointer
str r2, [r0] // potentially unaligned store of 4 bytes
str r3, [r0, #4] // potentially unaligned store of 4 bytes
str r2, [r0, #8] // potentially unaligned store of 4 bytes
str r3, [r0, #12] // potentially unaligned store of 4 bytes
beq 1f
0: add lr, lr, #16 // advance the output pointer by 16 bytes
subs r4, r1, lr // past the output?
blt 3f // break out of the loop
strd r2, r3, [lr, #-16] // aligned store of 16 bytes
strd r2, r3, [lr, #-8]
bne 0b // goto beginning of loop
1: pop {r4, pc}
2: subs r4, r1, lr
3: adds r4, r4, #16
subs r1, r1, #8
cmp r4, #4 // between 4 and 15 bytes?
blt 4f
cmp r4, #8 // between 8 and 15 bytes?
sub r4, lr, #16
str r2, [r4] // overlapping store of 4 + (4 + 4) + 4 bytes
it gt
strgt.n r3, [r4, #4]
it gt
strgt.n r2, [r1]
str r3, [r1, #4]
pop {r4, pc}
4: cmp r4, #2 // 2 or 3 bytes?
strb r2, [lr, #-16] // store 1 byte
it ge
strhge.n r2, [r1, #6] // store 2 bytes
pop {r4, pc}
|
acidanthera/audk
| 5,102
|
MdePkg/Library/CompilerIntrinsicsLib/AArch64/Atomics.S
|
#------------------------------------------------------------------------------
#
# Copyright (c) 2020, Arm, Limited. All rights reserved.<BR>
#
# SPDX-License-Identifier: BSD-2-Clause-Patent
#
#------------------------------------------------------------------------------
/*
* Provide the GCC intrinsics that are required when using GCC 9 or
* later with the -moutline-atomics options (which became the default
* in GCC 10)
*/
.arch armv8-a
.macro reg_alias, pfx, sz
r0_\sz .req \pfx\()0
r1_\sz .req \pfx\()1
tmp0_\sz .req \pfx\()16
tmp1_\sz .req \pfx\()17
.endm
/*
* Define register aliases of the right type for each size
* (xN for 8 bytes, wN for everything smaller)
*/
reg_alias w, 1
reg_alias w, 2
reg_alias w, 4
reg_alias x, 8
.macro fn_start, name:req
.section .text.\name
.globl \name
.type \name, %function
\name\():
.endm
.macro fn_end, name:req
.size \name, . - \name
.endm
/*
* Emit an atomic helper for \model with operands of size \sz, using
* the operation specified by \insn (which is the LSE name), and which
* can be implemented using the generic load-locked/store-conditional
* (LL/SC) sequence below, using the arithmetic operation given by
* \opc.
*/
.macro emit_ld_sz, sz:req, insn:req, opc:req, model:req, s, a, l
fn_start __aarch64_\insn\()\sz\()\model
mov tmp0_\sz, r0_\sz
0: ld\a\()xr\s r0_\sz, [x1]
.ifnc \insn, swp
\opc tmp1_\sz, r0_\sz, tmp0_\sz
st\l\()xr\s w15, tmp1_\sz, [x1]
.else
st\l\()xr\s w15, tmp0_\sz, [x1]
.endif
cbnz w15, 0b
ret
fn_end __aarch64_\insn\()\sz\()\model
.endm
/*
* Emit atomic helpers for \model for operand sizes in the
* set {1, 2, 4, 8}, for the instruction pattern given by
* \insn. (This is the LSE name, but this implementation uses
* the generic LL/SC sequence using \opc as the arithmetic
* operation on the target.)
*/
.macro emit_ld, insn:req, opc:req, model:req, a, l
emit_ld_sz 1, \insn, \opc, \model, b, \a, \l
emit_ld_sz 2, \insn, \opc, \model, h, \a, \l
emit_ld_sz 4, \insn, \opc, \model, , \a, \l
emit_ld_sz 8, \insn, \opc, \model, , \a, \l
.endm
/*
* Emit the compare and swap helper for \model and size \sz
* using LL/SC instructions.
*/
.macro emit_cas_sz, sz:req, model:req, uxt:req, s, a, l
fn_start __aarch64_cas\sz\()\model
\uxt tmp0_\sz, r0_\sz
0: ld\a\()xr\s r0_\sz, [x2]
cmp r0_\sz, tmp0_\sz
bne 1f
st\l\()xr\s w15, r1_\sz, [x2]
cbnz w15, 0b
1: ret
fn_end __aarch64_cas\sz\()\model
.endm
/*
* Emit compare-and-swap helpers for \model for operand sizes in the
* set {1, 2, 4, 8, 16}.
*/
.macro emit_cas, model:req, a, l
emit_cas_sz 1, \model, uxtb, b, \a, \l
emit_cas_sz 2, \model, uxth, h, \a, \l
emit_cas_sz 4, \model, mov , , \a, \l
emit_cas_sz 8, \model, mov , , \a, \l
/*
* We cannot use the parameterized sequence for 16 byte CAS, so we
* need to define it explicitly.
*/
fn_start __aarch64_cas16\model
mov x16, x0
mov x17, x1
0: ld\a\()xp x0, x1, [x4]
cmp x0, x16
ccmp x1, x17, #0, eq
bne 1f
st\l\()xp w15, x16, x17, [x4]
cbnz w15, 0b
1: ret
fn_end __aarch64_cas16\model
.endm
/*
* Emit the set of GCC outline atomic helper functions for
* the memory ordering model given by \model:
* - relax unordered loads and stores
* - acq load-acquire, unordered store
* - rel unordered load, store-release
* - acq_rel load-acquire, store-release
*/
.macro emit_model, model:req, a, l
emit_ld ldadd, add, \model, \a, \l
emit_ld ldclr, bic, \model, \a, \l
emit_ld ldeor, eor, \model, \a, \l
emit_ld ldset, orr, \model, \a, \l
emit_ld swp, mov, \model, \a, \l
emit_cas \model, \a, \l
.endm
emit_model _relax
emit_model _acq, a
emit_model _rel,, l
emit_model _acq_rel, a, l
|
acidanthera/audk
| 1,102
|
MdePkg/Library/CompilerIntrinsicsLib/Arm/uwrite.S
|
#------------------------------------------------------------------------------
#
# Copyright (c) 2008 - 2009, Apple Inc. All rights reserved.<BR>
#
# SPDX-License-Identifier: BSD-2-Clause-Patent
#
#------------------------------------------------------------------------------
#include <AsmMacroLib.h>
#
#UINT32
#EFIAPI
#__aeabi_uwrite4 (
# IN UINT32 Data,
# IN VOID *Pointer
# );
#
ASM_FUNC(__aeabi_uwrite4)
mov r2, r0, lsr #8
strb r0, [r1]
strb r2, [r1, #1]
mov r2, r0, lsr #16
strb r2, [r1, #2]
mov r2, r0, lsr #24
strb r2, [r1, #3]
bx lr
#
#UINT64
#EFIAPI
#__aeabi_uwrite8 (
# IN UINT64 Data,
# IN VOID *Pointer
# );
#
ASM_FUNC(__aeabi_uwrite8)
mov r3, r0, lsr #8
strb r0, [r2]
strb r3, [r2, #1]
mov r3, r0, lsr #16
strb r3, [r2, #2]
mov r3, r0, lsr #24
strb r3, [r2, #3]
mov r3, r1, lsr #8
strb r1, [r2, #4]
strb r3, [r2, #5]
mov r3, r1, lsr #16
strb r3, [r2, #6]
mov r3, r1, lsr #24
strb r3, [r2, #7]
bx lr
|
acidanthera/audk
| 996
|
MdePkg/Library/CompilerIntrinsicsLib/Arm/udivsi3.S
|
#------------------------------------------------------------------------------
#
# Copyright (c) 2008 - 2009, Apple Inc. All rights reserved.<BR>
#
# SPDX-License-Identifier: BSD-2-Clause-Patent
#
#------------------------------------------------------------------------------
#include <AsmMacroLib.h>
.syntax unified
ASM_FUNC(__udivsi3)
cmp r1, #0
cmpne r0, #0
stmfd sp!, {r4, r5, r7, lr}
add r7, sp, #8
beq L2
clz r2, r1
clz r3, r0
rsb r3, r3, r2
cmp r3, #31
bhi L2
ldmfdeq sp!, {r4, r5, r7, pc}
add r5, r3, #1
rsb r3, r3, #31
mov lr, #0
mov r2, r0, asl r3
mov ip, r0, lsr r5
mov r4, lr
b L8
L9:
mov r0, r2, lsr #31
orr ip, r0, ip, asl #1
orr r2, r3, lr
rsb r3, ip, r1
sub r3, r3, #1
and r0, r1, r3, asr #31
mov lr, r3, lsr #31
rsb ip, r0, ip
add r4, r4, #1
L8:
cmp r4, r5
mov r3, r2, asl #1
bne L9
orr r0, r3, lr
ldmfd sp!, {r4, r5, r7, pc}
L2:
mov r0, #0
ldmfd sp!, {r4, r5, r7, pc}
|
acidanthera/audk
| 1,036
|
MdePkg/Library/CompilerIntrinsicsLib/Arm/clzsi2.S
|
#------------------------------------------------------------------------------
#
# Copyright (c) 2008 - 2010, Apple Inc. All rights reserved.<BR>
#
# SPDX-License-Identifier: BSD-2-Clause-Patent
#
#------------------------------------------------------------------------------
#include <AsmMacroLib.h>
ASM_FUNC(__clzsi2)
@ frame_needed = 1, uses_anonymous_args = 0
stmfd sp!, {r7, lr}
add r7, sp, #0
movs r3, r0, lsr #16
movne r3, #16
moveq r3, #0
movne r9, #0
moveq r9, #16
mov r3, r0, lsr r3
tst r3, #65280
movne r0, #8
moveq r0, #0
movne lr, #0
moveq lr, #8
mov r3, r3, lsr r0
tst r3, #240
movne r0, #4
moveq r0, #0
movne ip, #0
moveq ip, #4
mov r3, r3, lsr r0
tst r3, #12
movne r0, #2
moveq r0, #0
movne r1, #0
moveq r1, #2
mov r2, r3, lsr r0
add r3, lr, r9
add r0, r3, ip
add r1, r0, r1
mov r0, r2, lsr #1
eor r0, r0, #1
ands r0, r0, #1
mvnne r0, #0
rsb r3, r2, #2
and r0, r0, r3
add r0, r1, r0
ldmfd sp!, {r7, pc}
|
acidanthera/audk
| 6,639
|
MdePkg/Library/CompilerIntrinsicsLib/Arm/uldiv.S
|
//------------------------------------------------------------------------------
//
// Copyright (c) 2008 - 2009, Apple Inc. All rights reserved.<BR>
//
// SPDX-License-Identifier: BSD-2-Clause-Patent
//
//------------------------------------------------------------------------------
.text
.align 2
GCC_ASM_EXPORT(__aeabi_uldivmod)
//
//UINT64
//EFIAPI
//__aeabi_uldivmod (
// IN UINT64 Dividend
// IN UINT64 Divisor
// )
//
ASM_PFX(__aeabi_uldivmod):
stmdb sp!, {r4, r5, r6, lr}
mov r4, r1
mov r5, r0
mov r6, #0 // 0x0
orrs ip, r3, r2, lsr #31
bne ASM_PFX(__aeabi_uldivmod_label1)
tst r2, r2
beq ASM_PFX(_ll_div0)
movs ip, r2, lsr #15
addeq r6, r6, #16 // 0x10
mov ip, r2, lsl r6
movs lr, ip, lsr #23
moveq ip, ip, lsl #8
addeq r6, r6, #8 // 0x8
movs lr, ip, lsr #27
moveq ip, ip, lsl #4
addeq r6, r6, #4 // 0x4
movs lr, ip, lsr #29
moveq ip, ip, lsl #2
addeq r6, r6, #2 // 0x2
movs lr, ip, lsr #30
moveq ip, ip, lsl #1
addeq r6, r6, #1 // 0x1
b ASM_PFX(_ll_udiv_small)
ASM_PFX(__aeabi_uldivmod_label1):
tst r3, #-2147483648 // 0x80000000
bne ASM_PFX(__aeabi_uldivmod_label2)
movs ip, r3, lsr #15
addeq r6, r6, #16 // 0x10
mov ip, r3, lsl r6
movs lr, ip, lsr #23
moveq ip, ip, lsl #8
addeq r6, r6, #8 // 0x8
movs lr, ip, lsr #27
moveq ip, ip, lsl #4
addeq r6, r6, #4 // 0x4
movs lr, ip, lsr #29
moveq ip, ip, lsl #2
addeq r6, r6, #2 // 0x2
movs lr, ip, lsr #30
addeq r6, r6, #1 // 0x1
rsb r3, r6, #32 // 0x20
moveq ip, ip, lsl #1
orr ip, ip, r2, lsr r3
mov lr, r2, lsl r6
b ASM_PFX(_ll_udiv_big)
ASM_PFX(__aeabi_uldivmod_label2):
mov ip, r3
mov lr, r2
b ASM_PFX(_ll_udiv_ginormous)
ASM_PFX(_ll_udiv_small):
cmp r4, ip, lsl #1
mov r3, #0 // 0x0
subcs r4, r4, ip, lsl #1
addcs r3, r3, #2 // 0x2
cmp r4, ip
subcs r4, r4, ip
adcs r3, r3, #0 // 0x0
add r2, r6, #32 // 0x20
cmp r2, #32 // 0x20
rsb ip, ip, #0 // 0x0
bcc ASM_PFX(_ll_udiv_small_label1)
orrs r0, r4, r5, lsr #30
moveq r4, r5
moveq r5, #0 // 0x0
subeq r2, r2, #32 // 0x20
ASM_PFX(_ll_udiv_small_label1):
mov r1, #0 // 0x0
cmp r2, #16 // 0x10
bcc ASM_PFX(_ll_udiv_small_label2)
movs r0, r4, lsr #14
moveq r4, r4, lsl #16
addeq r1, r1, #16 // 0x10
ASM_PFX(_ll_udiv_small_label2):
sub lr, r2, r1
cmp lr, #8 // 0x8
bcc ASM_PFX(_ll_udiv_small_label3)
movs r0, r4, lsr #22
moveq r4, r4, lsl #8
addeq r1, r1, #8 // 0x8
ASM_PFX(_ll_udiv_small_label3):
rsb r0, r1, #32 // 0x20
sub r2, r2, r1
orr r4, r4, r5, lsr r0
mov r5, r5, lsl r1
cmp r2, #1 // 0x1
bcc ASM_PFX(_ll_udiv_small_label5)
sub r2, r2, #1 // 0x1
and r0, r2, #7 // 0x7
eor r0, r0, #7 // 0x7
adds r0, r0, r0, lsl #1
add pc, pc, r0, lsl #2
nop // (mov r0,r0)
ASM_PFX(_ll_udiv_small_label4):
adcs r5, r5, r5
adcs r4, ip, r4, lsl #1
rsbcc r4, ip, r4
adcs r5, r5, r5
adcs r4, ip, r4, lsl #1
rsbcc r4, ip, r4
adcs r5, r5, r5
adcs r4, ip, r4, lsl #1
rsbcc r4, ip, r4
adcs r5, r5, r5
adcs r4, ip, r4, lsl #1
rsbcc r4, ip, r4
adcs r5, r5, r5
adcs r4, ip, r4, lsl #1
rsbcc r4, ip, r4
adcs r5, r5, r5
adcs r4, ip, r4, lsl #1
rsbcc r4, ip, r4
adcs r5, r5, r5
adcs r4, ip, r4, lsl #1
rsbcc r4, ip, r4
adcs r5, r5, r5
adcs r4, ip, r4, lsl #1
sub r2, r2, #8 // 0x8
tst r2, r2
rsbcc r4, ip, r4
bpl ASM_PFX(_ll_udiv_small_label4)
ASM_PFX(_ll_udiv_small_label5):
mov r2, r4, lsr r6
bic r4, r4, r2, lsl r6
adcs r0, r5, r5
adc r1, r4, r4
add r1, r1, r3, lsl r6
mov r3, #0 // 0x0
ldmia sp!, {r4, r5, r6, pc}
ASM_PFX(_ll_udiv_big):
subs r0, r5, lr
mov r3, #0 // 0x0
sbcs r1, r4, ip
movcs r5, r0
movcs r4, r1
adcs r3, r3, #0 // 0x0
subs r0, r5, lr
sbcs r1, r4, ip
movcs r5, r0
movcs r4, r1
adcs r3, r3, #0 // 0x0
subs r0, r5, lr
sbcs r1, r4, ip
movcs r5, r0
movcs r4, r1
adcs r3, r3, #0 // 0x0
mov r1, #0 // 0x0
rsbs lr, lr, #0 // 0x0
rsc ip, ip, #0 // 0x0
cmp r6, #16 // 0x10
bcc ASM_PFX(_ll_udiv_big_label1)
movs r0, r4, lsr #14
moveq r4, r4, lsl #16
addeq r1, r1, #16 // 0x10
ASM_PFX(_ll_udiv_big_label1):
sub r2, r6, r1
cmp r2, #8 // 0x8
bcc ASM_PFX(_ll_udiv_big_label2)
movs r0, r4, lsr #22
moveq r4, r4, lsl #8
addeq r1, r1, #8 // 0x8
ASM_PFX(_ll_udiv_big_label2):
rsb r0, r1, #32 // 0x20
sub r2, r6, r1
orr r4, r4, r5, lsr r0
mov r5, r5, lsl r1
cmp r2, #1 // 0x1
bcc ASM_PFX(_ll_udiv_big_label4)
sub r2, r2, #1 // 0x1
and r0, r2, #3 // 0x3
rsb r0, r0, #3 // 0x3
adds r0, r0, r0, lsl #1
add pc, pc, r0, lsl #3
nop // (mov r0,r0)
ASM_PFX(_ll_udiv_big_label3):
adcs r5, r5, r5
adcs r4, r4, r4
adcs r0, lr, r5
adcs r1, ip, r4
movcs r5, r0
movcs r4, r1
adcs r5, r5, r5
adcs r4, r4, r4
adcs r0, lr, r5
adcs r1, ip, r4
movcs r5, r0
movcs r4, r1
adcs r5, r5, r5
adcs r4, r4, r4
adcs r0, lr, r5
adcs r1, ip, r4
movcs r5, r0
movcs r4, r1
sub r2, r2, #4 // 0x4
adcs r5, r5, r5
adcs r4, r4, r4
adcs r0, lr, r5
adcs r1, ip, r4
tst r2, r2
movcs r5, r0
movcs r4, r1
bpl ASM_PFX(_ll_udiv_big_label3)
ASM_PFX(_ll_udiv_big_label4):
mov r1, #0 // 0x0
mov r2, r5, lsr r6
bic r5, r5, r2, lsl r6
adcs r0, r5, r5
adc r1, r1, #0 // 0x0
movs lr, r3, lsl r6
mov r3, r4, lsr r6
bic r4, r4, r3, lsl r6
adc r1, r1, #0 // 0x0
adds r0, r0, lr
orr r2, r2, r4, ror r6
adc r1, r1, #0 // 0x0
ldmia sp!, {r4, r5, r6, pc}
ASM_PFX(_ll_udiv_ginormous):
subs r2, r5, lr
mov r1, #0 // 0x0
sbcs r3, r4, ip
adc r0, r1, r1
movcc r2, r5
movcc r3, r4
ldmia sp!, {r4, r5, r6, pc}
ASM_PFX(_ll_div0):
ldmia sp!, {r4, r5, r6, lr}
mov r0, #0 // 0x0
mov r1, #0 // 0x0
b ASM_PFX(__aeabi_ldiv0)
ASM_PFX(__aeabi_ldiv0):
bx r14
|
acidanthera/audk
| 1,117
|
MdePkg/Library/CompilerIntrinsicsLib/Arm/uread.S
|
#------------------------------------------------------------------------------
#
# Copyright (c) 2008 - 2009, Apple Inc. All rights reserved.<BR>
#
# SPDX-License-Identifier: BSD-2-Clause-Patent
#
#------------------------------------------------------------------------------
#include <AsmMacroLib.h>
#
#UINT32
#EFIAPI
#__aeabi_uread4 (
# IN VOID *Pointer
# );
#
ASM_FUNC(__aeabi_uread4)
ldrb r1, [r0]
ldrb r2, [r0, #1]
ldrb r3, [r0, #2]
ldrb r0, [r0, #3]
orr r1, r1, r2, lsl #8
orr r1, r1, r3, lsl #16
orr r0, r1, r0, lsl #24
bx lr
#
#UINT64
#EFIAPI
#__aeabi_uread8 (
# IN VOID *Pointer
# );
#
ASM_FUNC(__aeabi_uread8)
mov r3, r0
ldrb r1, [r3]
ldrb r2, [r3, #1]
orr r1, r1, r2, lsl #8
ldrb r2, [r3, #2]
orr r1, r1, r2, lsl #16
ldrb r0, [r3, #3]
orr r0, r1, r0, lsl #24
ldrb r1, [r3, #4]
ldrb r2, [r3, #5]
orr r1, r1, r2, lsl #8
ldrb r2, [r3, #6]
orr r1, r1, r2, lsl #16
ldrb r2, [r3, #7]
orr r1, r1, r2, lsl #24
bx lr
|
acidanthera/audk
| 1,113
|
MdePkg/Library/CompilerIntrinsicsLib/Arm/muldi3.S
|
#------------------------------------------------------------------------------
#
# Copyright (c) 2008 - 2009, Apple Inc. All rights reserved.<BR>
#
# SPDX-License-Identifier: BSD-2-Clause-Patent
#
#------------------------------------------------------------------------------
#include <AsmMacroLib.h>
ASM_FUNC(__muldi3)
stmfd sp!, {r4, r5, r6, r7, lr}
add r7, sp, #12
stmfd sp!, {r8, r10, r11}
ldr r11, L4
mov r4, r0, lsr #16
and r8, r0, r11
and ip, r2, r11
mul lr, ip, r8
mul ip, r4, ip
sub sp, sp, #8
add r10, ip, lr, lsr #16
and ip, r10, r11
and lr, lr, r11
mov r6, r2, lsr #16
str r4, [sp, #4]
add r4, lr, ip, asl #16
mul ip, r8, r6
mov r5, r10, lsr #16
add r10, ip, r4, lsr #16
and ip, r10, r11
and lr, r4, r11
add r4, lr, ip, asl #16
mul r0, r3, r0
add ip, r5, r10, lsr #16
ldr r5, [sp, #4]
mla r0, r2, r1, r0
mla r5, r6, r5, ip
mov r10, r4
add r11, r0, r5
mov r1, r11
mov r0, r4
sub sp, r7, #24
ldmfd sp!, {r8, r10, r11}
ldmfd sp!, {r4, r5, r6, r7, pc}
.p2align 2
L5:
.align 2
L4:
.long 65535
|
acidanthera/audk
| 1,094
|
MdePkg/Library/CompilerIntrinsicsLib/Arm/ldivmod.S
|
//------------------------------------------------------------------------------
//
// Copyright (c) 2008 - 2009, Apple Inc. All rights reserved.<BR>
//
// SPDX-License-Identifier: BSD-2-Clause-Patent
//
//------------------------------------------------------------------------------
#include <AsmMacroLib.h>
//
// A pair of (unsigned) long longs is returned in {{r0, r1}, {r2, r3}},
// the quotient in {r0, r1}, and the remainder in {r2, r3}.
//
//__value_in_regs lldiv_t
//EFIAPI
//__aeabi_ldivmod (
// IN UINT64 Dividen
// IN UINT64 Divisor
// )//
//
ASM_FUNC(__aeabi_ldivmod)
push {r4,lr}
asrs r4,r1,#1
eor r4,r4,r3,LSR #1
bpl L_Test1
rsbs r0,r0,#0
rsc r1,r1,#0
L_Test1:
tst r3,r3
bpl L_Test2
rsbs r2,r2,#0
rsc r3,r3,#0
L_Test2:
bl ASM_PFX(__aeabi_uldivmod)
tst r4,#0x40000000
beq L_Test3
rsbs r0,r0,#0
rsc r1,r1,#0
L_Test3:
tst r4,#0x80000000
beq L_Exit
rsbs r2,r2,#0
rsc r3,r3,#0
L_Exit:
pop {r4,pc}
|
acidanthera/audk
| 3,398
|
MdePkg/Library/CompilerIntrinsicsLib/Arm/div.S
|
#------------------------------------------------------------------------------
#
# Copyright (c) 2011, ARM. All rights reserved.<BR>
#
# SPDX-License-Identifier: BSD-2-Clause-Patent
#
#------------------------------------------------------------------------------
.text
.align 2
GCC_ASM_EXPORT(__aeabi_uidiv)
GCC_ASM_EXPORT(__aeabi_uidivmod)
GCC_ASM_EXPORT(__aeabi_idiv)
GCC_ASM_EXPORT(__aeabi_idivmod)
# AREA Math, CODE, READONLY
#
#UINT32
#EFIAPI
#__aeabi_uidivmode (
# IN UINT32 Dividen
# IN UINT32 Divisor
# );
#
ASM_PFX(__aeabi_uidiv):
ASM_PFX(__aeabi_uidivmod):
rsbs r12, r1, r0, LSR #4
mov r2, #0
bcc ASM_PFX(__arm_div4)
rsbs r12, r1, r0, LSR #8
bcc ASM_PFX(__arm_div8)
mov r3, #0
b ASM_PFX(__arm_div_large)
#
#INT32
#EFIAPI
#__aeabi_idivmode (
# IN INT32 Dividen
# IN INT32 Divisor
# );
#
ASM_PFX(__aeabi_idiv):
ASM_PFX(__aeabi_idivmod):
orrs r12, r0, r1
bmi ASM_PFX(__arm_div_negative)
rsbs r12, r1, r0, LSR #1
mov r2, #0
bcc ASM_PFX(__arm_div1)
rsbs r12, r1, r0, LSR #4
bcc ASM_PFX(__arm_div4)
rsbs r12, r1, r0, LSR #8
bcc ASM_PFX(__arm_div8)
mov r3, #0
b ASM_PFX(__arm_div_large)
ASM_PFX(__arm_div8):
rsbs r12, r1, r0, LSR #7
subcs r0, r0, r1, LSL #7
adc r2, r2, r2
rsbs r12, r1, r0,LSR #6
subcs r0, r0, r1, LSL #6
adc r2, r2, r2
rsbs r12, r1, r0, LSR #5
subcs r0, r0, r1, LSL #5
adc r2, r2, r2
rsbs r12, r1, r0, LSR #4
subcs r0, r0, r1, LSL #4
adc r2, r2, r2
ASM_PFX(__arm_div4):
rsbs r12, r1, r0, LSR #3
subcs r0, r0, r1, LSL #3
adc r2, r2, r2
rsbs r12, r1, r0, LSR #2
subcs r0, r0, r1, LSL #2
adcs r2, r2, r2
rsbs r12, r1, r0, LSR #1
subcs r0, r0, r1, LSL #1
adc r2, r2, r2
ASM_PFX(__arm_div1):
subs r1, r0, r1
movcc r1, r0
adc r0, r2, r2
bx r14
ASM_PFX(__arm_div_negative):
ands r2, r1, #0x80000000
rsbmi r1, r1, #0
eors r3, r2, r0, ASR #32
rsbcs r0, r0, #0
rsbs r12, r1, r0, LSR #4
bcc label1
rsbs r12, r1, r0, LSR #8
bcc label2
ASM_PFX(__arm_div_large):
lsl r1, r1, #6
rsbs r12, r1, r0, LSR #8
orr r2, r2, #0xfc000000
bcc label2
lsl r1, r1, #6
rsbs r12, r1, r0, LSR #8
orr r2, r2, #0x3f00000
bcc label2
lsl r1, r1, #6
rsbs r12, r1, r0, LSR #8
orr r2, r2, #0xfc000
orrcs r2, r2, #0x3f00
lslcs r1, r1, #6
rsbs r12, r1, #0
bcs ASM_PFX(__aeabi_idiv0)
label3:
lsrcs r1, r1, #6
label2:
rsbs r12, r1, r0, LSR #7
subcs r0, r0, r1, LSL #7
adc r2, r2, r2
rsbs r12, r1, r0, LSR #6
subcs r0, r0, r1, LSL #6
adc r2, r2, r2
rsbs r12, r1, r0, LSR #5
subcs r0, r0, r1, LSL #5
adc r2, r2, r2
rsbs r12, r1, r0, LSR #4
subcs r0, r0, r1, LSL #4
adc r2, r2, r2
label1:
rsbs r12, r1, r0, LSR #3
subcs r0, r0, r1, LSL #3
adc r2, r2, r2
rsbs r12, r1, r0, LSR #2
subcs r0, r0, r1, LSL #2
adcs r2, r2, r2
bcs label3
rsbs r12, r1, r0, LSR #1
subcs r0, r0, r1, LSL #1
adc r2, r2, r2
subs r1, r0, r1
movcc r1, r0
adc r0, r2, r2
asrs r3, r3, #31
rsbmi r0, r0, #0
rsbcs r1, r1, #0
bx r14
@ What to do about division by zero? For now, just return.
ASM_PFX(__aeabi_idiv0):
bx r14
|
acidanthera/audk
| 4,106
|
MdePkg/Library/CompilerIntrinsicsLib/Arm/udivmoddi4.S
|
#------------------------------------------------------------------------------
#
# Copyright (c) 2008 - 2009, Apple Inc. All rights reserved.<BR>
#
# SPDX-License-Identifier: BSD-2-Clause-Patent
#
#------------------------------------------------------------------------------
#include <AsmMacroLib.h>
.syntax unified
ASM_FUNC(__udivmoddi4)
stmfd sp!, {r4, r5, r6, r7, lr}
add r7, sp, #12
stmfd sp!, {r10, r11}
sub sp, sp, #20
stmia sp, {r2-r3}
ldr r6, [sp, #48]
orrs r2, r2, r3
mov r10, r0
mov r11, r1
beq L2
subs ip, r1, #0
bne L4
cmp r3, #0
bne L6
cmp r6, #0
beq L8
mov r1, r2
bl ASM_PFX(__umodsi3)
mov r1, #0
stmia r6, {r0-r1}
L8:
ldr r1, [sp, #0]
mov r0, r10
b L45
L6:
cmp r6, #0
movne r1, #0
stmiane r6, {r0-r1}
b L2
L4:
ldr r1, [sp, #0]
cmp r1, #0
bne L12
ldr r2, [sp, #4]
cmp r2, #0
bne L14
cmp r6, #0
beq L16
mov r1, r2
mov r0, r11
bl ASM_PFX(__umodsi3)
mov r1, #0
stmia r6, {r0-r1}
L16:
ldr r1, [sp, #4]
mov r0, r11
L45:
bl ASM_PFX(__udivsi3)
L46:
mov r10, r0
mov r11, #0
b L10
L14:
subs r1, r0, #0
bne L18
cmp r6, #0
beq L16
ldr r1, [sp, #4]
mov r0, r11
bl ASM_PFX(__umodsi3)
mov r4, r10
mov r5, r0
stmia r6, {r4-r5}
b L16
L18:
sub r3, r2, #1
tst r2, r3
bne L22
cmp r6, #0
movne r4, r0
andne r5, ip, r3
stmiane r6, {r4-r5}
L24:
rsb r3, r2, #0
and r3, r2, r3
clz r3, r3
rsb r3, r3, #31
mov r0, ip, lsr r3
b L46
L22:
clz r2, r2
clz r3, ip
rsb r3, r3, r2
cmp r3, #30
bhi L48
rsb r2, r3, #31
add lr, r3, #1
mov r3, r1, asl r2
str r3, [sp, #12]
mov r3, r1, lsr lr
ldr r0, [sp, #0]
mov r5, ip, lsr lr
orr r4, r3, ip, asl r2
str r0, [sp, #8]
b L29
L12:
ldr r3, [sp, #4]
cmp r3, #0
bne L30
sub r3, r1, #1
tst r1, r3
bne L32
cmp r6, #0
andne r3, r3, r0
movne r2, r3
movne r3, #0
stmiane r6, {r2-r3}
L34:
cmp r1, #1
beq L10
rsb r3, r1, #0
and r3, r1, r3
clz r3, r3
rsb r0, r3, #31
mov r1, ip, lsr r0
rsb r3, r0, #32
mov r0, r10, lsr r0
orr ip, r0, ip, asl r3
str r1, [sp, #12]
str ip, [sp, #8]
ldrd r10, [sp, #8]
b L10
L32:
clz r2, r1
clz r3, ip
rsb r3, r3, r2
rsb r4, r3, #31
mov r2, r0, asl r4
mvn r1, r3
and r2, r2, r1, asr #31
add lr, r3, #33
str r2, [sp, #8]
add r2, r3, #1
mov r3, r3, asr #31
and r0, r3, r0, asl r1
mov r3, r10, lsr r2
orr r3, r3, ip, asl r4
and r3, r3, r1, asr #31
orr r0, r0, r3
mov r3, ip, lsr lr
str r0, [sp, #12]
mov r0, r10, lsr lr
and r5, r3, r2, asr #31
rsb r3, lr, #31
mov r3, r3, asr #31
orr r0, r0, ip, asl r1
and r3, r3, ip, lsr r2
and r0, r0, r2, asr #31
orr r4, r3, r0
b L29
L30:
clz r2, r3
clz r3, ip
rsb r3, r3, r2
cmp r3, #31
bls L37
L48:
cmp r6, #0
stmiane r6, {r10-r11}
b L2
L37:
rsb r1, r3, #31
mov r0, r0, asl r1
add lr, r3, #1
mov r2, #0
str r0, [sp, #12]
mov r0, r10, lsr lr
str r2, [sp, #8]
sub r2, r3, #31
and r0, r0, r2, asr #31
mov r3, ip, lsr lr
orr r4, r0, ip, asl r1
and r5, r3, r2, asr #31
L29:
mov ip, #0
mov r10, ip
b L40
L41:
ldr r1, [sp, #12]
ldr r2, [sp, #8]
mov r3, r4, lsr #31
orr r5, r3, r5, asl #1
mov r3, r1, lsr #31
orr r4, r3, r4, asl #1
mov r3, r2, lsr #31
orr r0, r3, r1, asl #1
orr r1, ip, r2, asl #1
ldmia sp, {r2-r3}
str r0, [sp, #12]
subs r2, r2, r4
sbc r3, r3, r5
str r1, [sp, #8]
subs r0, r2, #1
sbc r1, r3, #0
mov r2, r1, asr #31
ldmia sp, {r0-r1}
mov r3, r2
and ip, r2, #1
and r3, r3, r1
and r2, r2, r0
subs r4, r4, r2
sbc r5, r5, r3
add r10, r10, #1
L40:
cmp r10, lr
bne L41
ldrd r0, [sp, #8]
adds r0, r0, r0
adc r1, r1, r1
cmp r6, #0
orr r10, r0, ip
mov r11, r1
stmiane r6, {r4-r5}
b L10
L2:
mov r10, #0
mov r11, #0
L10:
mov r0, r10
mov r1, r11
sub sp, r7, #20
ldmfd sp!, {r10, r11}
ldmfd sp!, {r4, r5, r6, r7, pc}
|
acidanthera/audk
| 997
|
MdePkg/Library/BaseLib/RiscV64/RiscVCacheMgmt.S
|
//------------------------------------------------------------------------------
//
// RISC-V cache operation.
//
// Copyright (c) 2020, Hewlett Packard Enterprise Development LP. All rights reserved.<BR>
// Copyright (c) 2023, Rivos Inc. All rights reserved.<BR>
//
// SPDX-License-Identifier: BSD-2-Clause-Patent
//
//------------------------------------------------------------------------------
.include "RiscVasm.inc"
.align 3
ASM_GLOBAL ASM_PFX(RiscVInvalidateInstCacheFenceAsm)
ASM_GLOBAL ASM_PFX(RiscVInvalidateDataCacheFenceAsm)
ASM_PFX(RiscVInvalidateInstCacheFenceAsm):
fence.i
ret
ASM_PFX(RiscVInvalidateDataCacheFenceAsm):
fence
ret
ASM_GLOBAL ASM_PFX (RiscVCpuCacheFlushCmoAsm)
ASM_PFX (RiscVCpuCacheFlushCmoAsm):
RISCVCMOFLUSH
ret
ASM_GLOBAL ASM_PFX (RiscVCpuCacheCleanCmoAsm)
ASM_PFX (RiscVCpuCacheCleanCmoAsm):
RISCVCMOCLEAN
ret
ASM_GLOBAL ASM_PFX (RiscVCpuCacheInvalCmoAsm)
ASM_PFX (RiscVCpuCacheInvalCmoAsm):
RISCVCMOINVALIDATE
ret
|
acidanthera/audk
| 2,716
|
MdePkg/Library/BaseLib/RiscV64/RiscVInterrupt.S
|
//------------------------------------------------------------------------------
//
// RISC-V Supervisor Mode interrupt enable/disable
//
// Copyright (c) 2020, Hewlett Packard Enterprise Development LP. All rights reserved.<BR>
//
// SPDX-License-Identifier: BSD-2-Clause-Patent
//
//------------------------------------------------------------------------------
#include <Register/RiscV64/RiscVImpl.h>
ASM_GLOBAL ASM_PFX(RiscVDisableSupervisorModeInterrupts)
ASM_GLOBAL ASM_PFX(RiscVEnableSupervisorModeInterrupt)
ASM_GLOBAL ASM_PFX(RiscVGetSupervisorModeInterrupts)
#define SSTATUS_SPP_BIT_POSITION 8
//
// This routine disables supervisor mode interrupt
//
ASM_PFX(RiscVDisableSupervisorModeInterrupts):
add sp, sp, -(__SIZEOF_POINTER__)
sd a1, (sp)
li a1, SSTATUS_SIE
csrc CSR_SSTATUS, a1
ld a1, (sp)
add sp, sp, (__SIZEOF_POINTER__)
ret
//
// This routine enables supervisor mode interrupt
//
ASM_PFX(RiscVEnableSupervisorModeInterrupt):
add sp, sp, -2*(__SIZEOF_POINTER__)
sd a0, (0*__SIZEOF_POINTER__)(sp)
sd a1, (1*__SIZEOF_POINTER__)(sp)
csrr a0, CSR_SSTATUS
and a0, a0, (1 << SSTATUS_SPP_BIT_POSITION)
bnez a0, InTrap // We are in supervisor mode (SMode)
// trap handler.
// Skip enabling SIE becasue SIE
// is set to disabled by RISC-V hart
// when the trap takes hart to SMode.
li a1, SSTATUS_SIE
csrs CSR_SSTATUS, a1
InTrap:
ld a0, (0*__SIZEOF_POINTER__)(sp)
ld a1, (1*__SIZEOF_POINTER__)(sp)
add sp, sp, 2*(__SIZEOF_POINTER__)
ret
//
// Set Supervisor mode trap vector.
// @param a0 : Value set to Supervisor mode trap vector
//
ASM_FUNC (RiscVSetSupervisorStvec)
csrrw a1, CSR_STVEC, a0
ret
//
// Get Supervisor mode trap vector.
// @retval a0 : Value in Supervisor mode trap vector
//
ASM_FUNC (RiscVGetSupervisorStvec)
csrr a0, CSR_STVEC
ret
//
// Get Supervisor trap cause CSR.
//
ASM_FUNC (RiscVGetSupervisorTrapCause)
csrrs a0, CSR_SCAUSE, 0
ret
//
// This routine returns supervisor mode interrupt
// status.
//
ASM_FUNC (RiscVGetSupervisorModeInterrupts)
csrr a0, CSR_SSTATUS
andi a0, a0, SSTATUS_SIE
ret
//
// This routine disables supervisor mode timer interrupt
//
ASM_FUNC (RiscVDisableTimerInterrupt)
li a0, SIP_STIP
csrc CSR_SIE, a0
ret
//
// This routine enables supervisor mode timer interrupt
//
ASM_FUNC (RiscVEnableTimerInterrupt)
li a0, SIP_STIP
csrs CSR_SIE, a0
ret
//
// This routine clears pending supervisor mode timer interrupt
//
ASM_FUNC (RiscVClearPendingTimerInterrupt)
li a0, SIP_STIP
csrc CSR_SIP, a0
ret
|
acidanthera/audk
| 1,365
|
MdePkg/Library/BaseLib/RiscV64/RiscVSetJumpLongJump.S
|
//------------------------------------------------------------------------------
//
// Set/Long jump for RISC-V
//
// Copyright (c) 2020, Hewlett Packard Enterprise Development LP. All rights reserved.<BR>
// Copyright (c) 2023, Intel Corporation. All rights reserved.<BR>
//
// SPDX-License-Identifier: BSD-2-Clause-Patent
//
//------------------------------------------------------------------------------
# define REG_S sd
# define REG_L ld
# define SZREG 8
.align 3
.globl SetJump
SetJump:
REG_S ra, 0*SZREG(a0)
REG_S s0, 1*SZREG(a0)
REG_S s1, 2*SZREG(a0)
REG_S s2, 3*SZREG(a0)
REG_S s3, 4*SZREG(a0)
REG_S s4, 5*SZREG(a0)
REG_S s5, 6*SZREG(a0)
REG_S s6, 7*SZREG(a0)
REG_S s7, 8*SZREG(a0)
REG_S s8, 9*SZREG(a0)
REG_S s9, 10*SZREG(a0)
REG_S s10, 11*SZREG(a0)
REG_S s11, 12*SZREG(a0)
REG_S sp, 13*SZREG(a0)
li a0, 0
ret
.globl InternalLongJump
InternalLongJump:
REG_L ra, 0*SZREG(a0)
REG_L s0, 1*SZREG(a0)
REG_L s1, 2*SZREG(a0)
REG_L s2, 3*SZREG(a0)
REG_L s3, 4*SZREG(a0)
REG_L s4, 5*SZREG(a0)
REG_L s5, 6*SZREG(a0)
REG_L s6, 7*SZREG(a0)
REG_L s7, 8*SZREG(a0)
REG_L s8, 9*SZREG(a0)
REG_L s9, 10*SZREG(a0)
REG_L s10, 11*SZREG(a0)
REG_L s11, 12*SZREG(a0)
REG_L sp, 13*SZREG(a0)
mv a0, a1
ret
|
acidanthera/audk
| 1,538
|
MdePkg/Library/BaseLib/AArch64/SwitchStack.S
|
#------------------------------------------------------------------------------
#
# Copyright (c) 2006 - 2009, Intel Corporation. All rights reserved.<BR>
# Portions copyright (c) 2008 - 2009, Apple Inc. All rights reserved.<BR>
# Portions copyright (c) 2011 - 2013, ARM Limited. All rights reserved.<BR>
# SPDX-License-Identifier: BSD-2-Clause-Patent
#
#------------------------------------------------------------------------------
.text
.align 5
GCC_ASM_EXPORT(InternalSwitchStackAsm)
GCC_ASM_EXPORT(CpuPause)
#/**
#
# This allows the caller to switch the stack and goes to the new entry point
#
# @param EntryPoint The pointer to the location to enter
# @param Context Parameter to pass in
# @param Context2 Parameter2 to pass in
# @param NewStack New Location of the stack
#
# @return Nothing. Goes to the Entry Point passing in the new parameters
#
#**/
#VOID
#EFIAPI
#InternalSwitchStackAsm (
# SWITCH_STACK_ENTRY_POINT EntryPoint,
# VOID *Context,
# VOID *Context2,
# VOID *NewStack
# );
#
ASM_PFX(InternalSwitchStackAsm):
AARCH64_BTI(c)
mov x29, #0
mov x30, x0
mov sp, x3
mov x0, x1
mov x1, x2
ret
#/**
#
# Requests CPU to pause for a short period of time.
#
# Requests CPU to pause for a short period of time. Typically used in MP
# systems to prevent memory starvation while waiting for a spin lock.
#
#**/
#VOID
#EFIAPI
#CpuPause (
# VOID
# )
#
ASM_PFX(CpuPause):
AARCH64_BTI(c)
nop
nop
nop
nop
nop
ret
|
acidanthera/audk
| 1,071
|
MdePkg/Library/BaseLib/AArch64/GetInterruptsState.S
|
#------------------------------------------------------------------------------
#
# GetInterruptState() function for AArch64
#
# Copyright (c) 2006 - 2009, Intel Corporation. All rights reserved.<BR>
# Portions copyright (c) 2008 - 2009, Apple Inc. All rights reserved.<BR>
# Portions copyright (c) 2011 - 2013, ARM Ltd. All rights reserved.<BR>
# SPDX-License-Identifier: BSD-2-Clause-Patent
#
#------------------------------------------------------------------------------
.text
.p2align 2
GCC_ASM_EXPORT(GetInterruptState)
.set DAIF_RD_IRQ_BIT, (1 << 7)
#/**
# Retrieves the current CPU interrupt state.
#
# Returns TRUE is interrupts are currently enabled. Otherwise
# returns FALSE.
#
# @retval TRUE CPU interrupts are enabled.
# @retval FALSE CPU interrupts are disabled.
#
#**/
#
#BOOLEAN
#EFIAPI
#GetInterruptState (
# VOID
# );
#
ASM_PFX(GetInterruptState):
AARCH64_BTI(c)
mrs x0, daif
tst x0, #DAIF_RD_IRQ_BIT // Check IRQ mask; set Z=1 if clear/unmasked
cset w0, eq // if Z=1 (eq) return 1, else 0
ret
|
acidanthera/audk
| 2,980
|
MdePkg/Library/BaseLib/AArch64/SetJumpLongJump.S
|
#------------------------------------------------------------------------------
#
# Copyright (c) 2009-2013, ARM Ltd. All rights reserved.
# SPDX-License-Identifier: BSD-2-Clause-Patent
#
#------------------------------------------------------------------------------
.text
.p2align 3
GCC_ASM_EXPORT(SetJump)
GCC_ASM_EXPORT(InternalLongJump)
GCC_ASM_IMPORT(InternalAssertJumpBuffer)
#define GPR_LAYOUT \
REG_PAIR (x19, x20, 0); \
REG_PAIR (x21, x22, 16); \
REG_PAIR (x23, x24, 32); \
REG_PAIR (x25, x26, 48); \
REG_PAIR (x27, x28, 64); \
REG_PAIR (x29, x30, 80);/*FP, LR*/ \
REG_ONE (x16, 96) /*IP0*/
#define FPR_LAYOUT \
REG_PAIR ( d8, d9, 104); \
REG_PAIR (d10, d11, 120); \
REG_PAIR (d12, d13, 136); \
REG_PAIR (d14, d15, 152);
#/**
# Saves the current CPU context that can be restored with a call to LongJump() and returns 0.#
#
# Saves the current CPU context in the buffer specified by JumpBuffer and returns 0. The initial
# call to SetJump() must always return 0. Subsequent calls to LongJump() cause a non-zero
# value to be returned by SetJump().
#
# If JumpBuffer is NULL, then ASSERT().
#
# @param JumpBuffer A pointer to CPU context buffer.
#
#**/
#
#UINTN
#EFIAPI
#SetJump (
# IN BASE_LIBRARY_JUMP_BUFFER *JumpBuffer // X0
# );
#
ASM_PFX(SetJump):
AARCH64_BTI(c)
#ifndef MDEPKG_NDEBUG
stp x29, x30, [sp, #-32]!
mov x29, sp
str x0, [sp, #16]
bl InternalAssertJumpBuffer
ldr x0, [sp, #16]
ldp x29, x30, [sp], #32
#endif
mov x16, sp // use IP0 so save SP
#define REG_PAIR(REG1, REG2, OFFS) stp REG1, REG2, [x0, OFFS]
#define REG_ONE(REG1, OFFS) str REG1, [x0, OFFS]
GPR_LAYOUT
FPR_LAYOUT
#undef REG_PAIR
#undef REG_ONE
mov x0, #0
ret
#/**
# Restores the CPU context that was saved with SetJump().#
#
# Restores the CPU context from the buffer specified by JumpBuffer.
# This function never returns to the caller.
# Instead it resumes execution based on the state of JumpBuffer.
#
# @param JumpBuffer A pointer to CPU context buffer.
# @param Value The value to return when the SetJump() context is restored.
#
#**/
#VOID
#EFIAPI
#InternalLongJump (
# IN BASE_LIBRARY_JUMP_BUFFER *JumpBuffer, // X0
# IN UINTN Value // X1
# );
#
ASM_PFX(InternalLongJump):
AARCH64_BTI(c)
#define REG_PAIR(REG1, REG2, OFFS) ldp REG1, REG2, [x0, OFFS]
#define REG_ONE(REG1, OFFS) ldr REG1, [x0, OFFS]
GPR_LAYOUT
FPR_LAYOUT
#undef REG_PAIR
#undef REG_ONE
mov sp, x16
cmp x1, #0
mov x0, #1
csel x0, x1, x0, ne
ret
ASM_FUNCTION_REMOVE_IF_UNREFERENCED
|
acidanthera/audk
| 2,735
|
MdePkg/Library/BaseLib/LoongArch64/IoCsr.S
|
#------------------------------------------------------------------------------
#
# LoongArch ASM IO CSR operation functions
#
# Copyright (c) 2024, Loongson Technology Corporation Limited. All rights reserved.<BR>
#
# SPDX-License-Identifier: BSD-2-Clause-Patent
#
#------------------------------------------------------------------------------
ASM_GLOBAL ASM_PFX (IoCsrRead8)
ASM_GLOBAL ASM_PFX (IoCsrRead16)
ASM_GLOBAL ASM_PFX (IoCsrRead32)
ASM_GLOBAL ASM_PFX (IoCsrRead64)
ASM_GLOBAL ASM_PFX (IoCsrWrite8)
ASM_GLOBAL ASM_PFX (IoCsrWrite16)
ASM_GLOBAL ASM_PFX (IoCsrWrite32)
ASM_GLOBAL ASM_PFX (IoCsrWrite64)
#/**
# IO CSR read byte operation.
#
# @param[in] Select IO CSR read instruction select values.
#
# @return The return value of iocsrrd.b instruction.
#
#**/
ASM_PFX (IoCsrRead8):
iocsrrd.b $a0, $a0
jirl $zero, $ra, 0
#/**
# IO CSR read half word operation.
#
# @param[in] Select IO CSR read instruction select values.
#
# @return The return value of iocsrrd.h instruction.
#
#**/
ASM_PFX (IoCsrRead16):
iocsrrd.h $a0, $a0
jirl $zero, $ra, 0
#/**
# IO CSR read word operation.
#
# @param[in] Select IO CSR read instruction select values.
#
# @return The return value of iocsrrd.w instruction.
#
#**/
ASM_PFX (IoCsrRead32):
iocsrrd.w $a0, $a0
jirl $zero, $ra, 0
#/**
# IO CSR read double word operation. Only for LoongArch64.
#
# @param[in] Select IO CSR read instruction select values.
#
# @return The return value of iocsrrd.d instruction.
#
#**/
ASM_PFX (IoCsrRead64):
iocsrrd.d $a0, $a0
jirl $zero, $ra, 0
#/**
# IO CSR write byte operation.
#
# @param[in] Select IO CSR write instruction select values.
# @param[in] Value The iocsrwr.b will write the value.
#
# @return VOID.
#
#**/
ASM_PFX (IoCsrWrite8):
iocsrwr.b $a1, $a0
jirl $zero, $ra, 0
#/**
# IO CSR write half word operation.
#
# @param[in] Select IO CSR write instruction select values.
# @param[in] Value The iocsrwr.h will write the value.
#
# @return VOID.
#
#**/
ASM_PFX (IoCsrWrite16):
iocsrwr.h $a1, $a0
jirl $zero, $ra, 0
#/**
# IO CSR write word operation.
#
# @param[in] Select IO CSR write instruction select values.
# @param[in] Value The iocsrwr.w will write the value.
#
# @return VOID.
#
#**/
ASM_PFX (IoCsrWrite32):
iocsrwr.w $a1, $a0
jirl $zero, $ra, 0
#/**
# IO CSR write double word operation. Only for LoongArch64.
#
# @param[in] Select IO CSR write instruction select values.
# @param[in] Value The iocsrwr.d will write the value.
#
# @return VOID.
#
#**/
ASM_PFX (IoCsrWrite64):
iocsrwr.d $a1, $a0
jirl $zero, $ra, 0
.end
|
acidanthera/audk
| 10,298
|
MdePkg/Library/BaseLib/LoongArch64/AsmCsr.S
|
#------------------------------------------------------------------------------
#
# LoongArch ASM CSR operation functions
#
# Copyright (c) 2024, Loongson Technology Corporation Limited. All rights reserved.<BR>
#
# SPDX-License-Identifier: BSD-2-Clause-Patent
#
#------------------------------------------------------------------------------
#include <Register/LoongArch64/Csr.h>
ASM_GLOBAL ASM_PFX (AsmCsrRead)
ASM_GLOBAL ASM_PFX (AsmCsrWrite)
ASM_GLOBAL ASM_PFX (AsmCsrXChg)
.macro AsmCsrRd Sel
csrrd $a0, \Sel
jirl $zero, $ra, 0
.endm
.macro AsmCsrWr Sel
csrwr $a0, \Sel
jirl $zero, $ra, 0
.endm
.macro AsmCsrXChange Sel
csrxchg $a0, $a1, \Sel
jirl $zero, $ra, 0
.endm
ASM_PFX(AsmCsrRead):
blt $a0, $zero, ReadSelNumErr
li.w $t0, LOONGARCH_CSR_EBASE
bltu $t0, $a0, TlbCsrRd
BasicCsrRd:
la.pcrel $t0, BasicCsrRead
alsl.d $t0, $a0, $t0, 3
jirl $zero, $t0, 0
TlbCsrRd:
li.w $t0, LOONGARCH_CSR_TLBIDX
bltu $a0, $t0, ReadSelNumErr
li.w $t0, LOONGARCH_CSR_RVACFG
bltu $t0, $a0, CfgCsrRd
la.pcrel $t0, TlbCsrRead
addi.w $t1, $a0, -LOONGARCH_CSR_TLBIDX
alsl.d $t0, $t1, $t0, 3
jirl $zero, $t0, 0
CfgCsrRd:
li.w $t0, LOONGARCH_CSR_CPUID
bltu $a0, $t0, ReadSelNumErr
li.w $t0, LOONGARCH_CSR_PRCFG3
bltu $t0, $a0, KcsCsrRd
la.pcrel $t0, CfgCsrRead
addi.w $t1, $a0, -LOONGARCH_CSR_CPUID
alsl.d $t0, $t1, $t0, 3
jirl $zero, $t0, 0
KcsCsrRd:
li.w $t0, LOONGARCH_CSR_KS0
bltu $a0, $t0, ReadSelNumErr
li.w $t0, LOONGARCH_CSR_KS8
bltu $t0, $a0, StableTimerCsrRd
la.pcrel $t0, KcsCsrRead
addi.w $t1, $a0, -LOONGARCH_CSR_KS0
alsl.d $t0, $t1, $t0, 3
jirl $zero, $t0, 0
StableTimerCsrRd:
li.w $t0, LOONGARCH_CSR_TMID
bltu $a0, $t0, ReadSelNumErr
li.w $t0, LOONGARCH_CSR_TINTCLR
bltu $t0, $a0, TlbRefillCsrRd
la.pcrel $t0, StableTimerCsrRead
addi.w $t1, $a0, -LOONGARCH_CSR_TMID
alsl.d $t0, $t1, $t0, 3
jirl $zero, $t0, 0
TlbRefillCsrRd:
li.w $t0, LOONGARCH_CSR_TLBREBASE
bltu $a0, $t0, ReadSelNumErr
li.w $t0, LOONGARCH_CSR_TLBREHI
bltu $t0, $a0, DirMapCsrRd
la.pcrel $t0, TlbRefillCsrRead
addi.w $t1, $a0, -LOONGARCH_CSR_TLBREBASE
alsl.d $t0, $t1, $t0, 3
jirl $zero, $t0, 0
DirMapCsrRd:
li.w $t0, LOONGARCH_CSR_DMWIN0
bltu $a0, $t0, ReadSelNumErr
li.w $t0, LOONGARCH_CSR_DMWIN3
bltu $t0, $a0, ReadSelNumErr
la.pcrel $t0, DirMapCsrRead
addi.w $t1, $a0, -LOONGARCH_CSR_DMWIN0
alsl.d $t0, $t1, $t0, 3
jirl $zero, $t0, 0
ReadSelNumErr:
break 0
BasicCsrRead:
CsrSel = LOONGARCH_CSR_CRMD
.rept LOONGARCH_CSR_EBASE - LOONGARCH_CSR_CRMD + 1
AsmCsrRd CsrSel
CsrSel = CsrSel + 1
.endr
TlbCsrRead:
CsrSel = LOONGARCH_CSR_TLBIDX
.rept LOONGARCH_CSR_RVACFG - LOONGARCH_CSR_TLBIDX + 1
AsmCsrRd CsrSel
CsrSel = CsrSel + 1
.endr
CfgCsrRead:
CsrSel = LOONGARCH_CSR_CPUID
.rept LOONGARCH_CSR_PRCFG3 - LOONGARCH_CSR_CPUID + 1
AsmCsrRd CsrSel
CsrSel = CsrSel + 1
.endr
KcsCsrRead:
CsrSel = LOONGARCH_CSR_KS0
.rept LOONGARCH_CSR_KS8 - LOONGARCH_CSR_KS0 + 1
AsmCsrRd CsrSel
CsrSel = CsrSel + 1
.endr
StableTimerCsrRead:
CsrSel = LOONGARCH_CSR_TMID
.rept LOONGARCH_CSR_TINTCLR - LOONGARCH_CSR_TMID + 1
AsmCsrRd CsrSel
CsrSel = CsrSel + 1
.endr
TlbRefillCsrRead:
CsrSel = LOONGARCH_CSR_TLBREBASE
.rept LOONGARCH_CSR_TLBREHI - LOONGARCH_CSR_TLBREBASE + 1
AsmCsrRd CsrSel
CsrSel = CsrSel + 1
.endr
DirMapCsrRead:
CsrSel = LOONGARCH_CSR_DMWIN0
.rept LOONGARCH_CSR_DMWIN3 - LOONGARCH_CSR_DMWIN0 + 1
AsmCsrRd CsrSel
CsrSel = CsrSel + 1
.endr
ASM_PFX(AsmCsrWrite):
blt $a0, $zero, WriteSelNumErr
li.w $t0, LOONGARCH_CSR_EBASE
bltu $t0, $a0, TlbCsrWr
BasicCsrWr:
la.pcrel $t0, BasicCsrWrite
alsl.d $t0, $a0, $t0, 3
move $a0, $a1
jirl $zero, $t0, 0
TlbCsrWr:
li.w $t0, LOONGARCH_CSR_TLBIDX
bltu $a0, $t0, WriteSelNumErr
li.w $t0, LOONGARCH_CSR_RVACFG
bltu $t0, $a0, CfgCsrWr
la.pcrel $t0, TlbCsrWrite
addi.w $t1, $a0, -LOONGARCH_CSR_TLBIDX
alsl.d $t0, $t1, $t0, 3
move $a0, $a1
jirl $zero, $t0, 0
CfgCsrWr:
li.w $t0, LOONGARCH_CSR_CPUID
bltu $a0, $t0, WriteSelNumErr
li.w $t0, LOONGARCH_CSR_PRCFG3
bltu $t0, $a0, KcsCsrWr
la.pcrel $t0, CfgCsrWrite
addi.w $t1, $a0, -LOONGARCH_CSR_CPUID
alsl.d $t0, $t1, $t0, 3
move $a0, $a1
jirl $zero, $t0, 0
KcsCsrWr:
li.w $t0, LOONGARCH_CSR_KS0
bltu $a0, $t0, WriteSelNumErr
li.w $t0, LOONGARCH_CSR_KS8
bltu $t0, $a0, StableTimerCsrWr
la.pcrel $t0, KcsCsrWrite
addi.w $t1, $a0, -LOONGARCH_CSR_KS0
alsl.d $t0, $t1, $t0, 3
move $a0, $a1
jirl $zero, $t0, 0
StableTimerCsrWr:
li.w $t0, LOONGARCH_CSR_TMID
bltu $a0, $t0, WriteSelNumErr
li.w $t0, LOONGARCH_CSR_TINTCLR
bltu $t0, $a0, TlbRefillCsrWr
la.pcrel $t0, StableTimerCsrWrite
addi.w $t1, $a0, -LOONGARCH_CSR_TMID
alsl.d $t0, $t1, $t0, 3
move $a0, $a1
jirl $zero, $t0, 0
TlbRefillCsrWr:
li.w $t0, LOONGARCH_CSR_TLBREBASE
bltu $a0, $t0, WriteSelNumErr
li.w $t0, LOONGARCH_CSR_TLBREHI
bltu $t0, $a0, DirMapCsrWr
la.pcrel $t0, TlbRefillCsrWrite
addi.w $t1, $a0, -LOONGARCH_CSR_TLBREBASE
alsl.d $t0, $t1, $t0, 3
move $a0, $a1
jirl $zero, $t0, 0
DirMapCsrWr:
li.w $t0, LOONGARCH_CSR_DMWIN0
bltu $a0, $t0, WriteSelNumErr
li.w $t0, LOONGARCH_CSR_DMWIN3
bltu $t0, $a0, WriteSelNumErr
la.pcrel $t0, DirMapCsrWrite
addi.w $t1, $a0, -LOONGARCH_CSR_DMWIN0
alsl.d $t0, $t1, $t0, 3
move $a0, $a1
jirl $zero, $t0, 0
WriteSelNumErr:
break 0
BasicCsrWrite:
CsrSel = LOONGARCH_CSR_CRMD
.rept LOONGARCH_CSR_EBASE - LOONGARCH_CSR_CRMD + 1
AsmCsrWr CsrSel
CsrSel = CsrSel + 1
.endr
TlbCsrWrite:
CsrSel = LOONGARCH_CSR_TLBIDX
.rept LOONGARCH_CSR_RVACFG - LOONGARCH_CSR_TLBIDX + 1
AsmCsrWr CsrSel
CsrSel = CsrSel + 1
.endr
CfgCsrWrite:
CsrSel = LOONGARCH_CSR_CPUID
.rept LOONGARCH_CSR_PRCFG3 - LOONGARCH_CSR_CPUID + 1
AsmCsrWr CsrSel
CsrSel = CsrSel + 1
.endr
KcsCsrWrite:
CsrSel = LOONGARCH_CSR_KS0
.rept LOONGARCH_CSR_KS8 - LOONGARCH_CSR_KS0 + 1
AsmCsrWr CsrSel
CsrSel = CsrSel + 1
.endr
StableTimerCsrWrite:
CsrSel = LOONGARCH_CSR_TMID
.rept LOONGARCH_CSR_TINTCLR - LOONGARCH_CSR_TMID + 1
AsmCsrWr CsrSel
CsrSel = CsrSel + 1
.endr
TlbRefillCsrWrite:
CsrSel = LOONGARCH_CSR_TLBREBASE
.rept LOONGARCH_CSR_TLBREHI - LOONGARCH_CSR_TLBREBASE + 1
AsmCsrWr CsrSel
CsrSel = CsrSel + 1
.endr
DirMapCsrWrite:
CsrSel = LOONGARCH_CSR_DMWIN0
.rept LOONGARCH_CSR_DMWIN3 - LOONGARCH_CSR_DMWIN0 + 1
AsmCsrWr CsrSel
CsrSel = CsrSel + 1
.endr
ASM_PFX(AsmCsrXChg):
blt $a0, $zero, XchgSelNumErr
li.w $t0, LOONGARCH_CSR_EBASE
bltu $t0, $a0, TlbCsrXchg
BasicCsrXchg:
la.pcrel $t0, BasicCsrXchange
alsl.d $t0, $a0, $t0, 3
move $a0, $a1
move $a1, $a2
jirl $zero, $t0, 0
TlbCsrXchg:
li.w $t0, LOONGARCH_CSR_TLBIDX
bltu $a0, $t0, XchgSelNumErr
li.w $t0, LOONGARCH_CSR_RVACFG
bltu $t0, $a0, CfgCsrXchg
la.pcrel $t0, TlbCsrXchange
addi.w $t1, $a0, -LOONGARCH_CSR_TLBIDX
alsl.d $t0, $t1, $t0, 3
move $a0, $a1
move $a1, $a2
jirl $zero, $t0, 0
CfgCsrXchg:
li.w $t0, LOONGARCH_CSR_CPUID
bltu $a0, $t0, XchgSelNumErr
li.w $t0, LOONGARCH_CSR_PRCFG3
bltu $t0, $a0, KcsCsrXchg
la.pcrel $t0, CfgCsrXchange
addi.w $t1, $a0, -LOONGARCH_CSR_CPUID
alsl.d $t0, $t1, $t0, 3
move $a0, $a1
move $a1, $a2
jirl $zero, $t0, 0
KcsCsrXchg:
li.w $t0, LOONGARCH_CSR_KS0
bltu $a0, $t0, XchgSelNumErr
li.w $t0, LOONGARCH_CSR_KS8
bltu $t0, $a0, StableTimerCsrXchg
la.pcrel $t0, KcsCsrXchange
addi.w $t1, $a0, -LOONGARCH_CSR_KS0
alsl.d $t0, $t1, $t0, 3
move $a0, $a1
move $a1, $a2
jirl $zero, $t0, 0
StableTimerCsrXchg:
li.w $t0, LOONGARCH_CSR_TMID
bltu $a0, $t0, XchgSelNumErr
li.w $t0, LOONGARCH_CSR_TINTCLR
bltu $t0, $a0, TlbRefillCsrXchg
la.pcrel $t0, StableTimerCsrXchange
addi.w $t1, $a0, -LOONGARCH_CSR_TMID
alsl.d $t0, $t1, $t0, 3
move $a0, $a1
move $a1, $a2
jirl $zero, $t0, 0
TlbRefillCsrXchg:
li.w $t0, LOONGARCH_CSR_TLBREBASE
bltu $a0, $t0, XchgSelNumErr
li.w $t0, LOONGARCH_CSR_TLBREHI
bltu $t0, $a0, DirMapCsrXchg
la.pcrel $t0, TlbRefillCsrXchange
addi.w $t1, $a0, -LOONGARCH_CSR_TLBREBASE
alsl.d $t0, $t1, $t0, 3
move $a0, $a1
move $a1, $a2
jirl $zero, $t0, 0
DirMapCsrXchg:
li.w $t0, LOONGARCH_CSR_DMWIN0
bltu $a0, $t0, XchgSelNumErr
li.w $t0, LOONGARCH_CSR_DMWIN3
bltu $t0, $a0, XchgSelNumErr
la.pcrel $t0, DirMapCsrXchange
addi.w $t1, $a0, -LOONGARCH_CSR_DMWIN0
alsl.d $t0, $t1, $t0, 3
move $a0, $a1
move $a1, $a2
jirl $zero, $t0, 0
XchgSelNumErr:
break 0
BasicCsrXchange:
CsrSel = LOONGARCH_CSR_CRMD
.rept LOONGARCH_CSR_EBASE - LOONGARCH_CSR_CRMD + 1
AsmCsrXChange CsrSel
CsrSel = CsrSel + 1
.endr
TlbCsrXchange:
CsrSel = LOONGARCH_CSR_TLBIDX
.rept LOONGARCH_CSR_RVACFG - LOONGARCH_CSR_TLBIDX + 1
AsmCsrXChange CsrSel
CsrSel = CsrSel + 1
.endr
CfgCsrXchange:
CsrSel = LOONGARCH_CSR_CPUID
.rept LOONGARCH_CSR_PRCFG3 - LOONGARCH_CSR_CPUID + 1
AsmCsrXChange CsrSel
CsrSel = CsrSel + 1
.endr
KcsCsrXchange:
CsrSel = LOONGARCH_CSR_KS0
.rept LOONGARCH_CSR_KS8 - LOONGARCH_CSR_KS0 + 1
AsmCsrXChange CsrSel
CsrSel = CsrSel + 1
.endr
StableTimerCsrXchange:
CsrSel = LOONGARCH_CSR_TMID
.rept LOONGARCH_CSR_TINTCLR - LOONGARCH_CSR_TMID + 1
AsmCsrXChange CsrSel
CsrSel = CsrSel + 1
.endr
TlbRefillCsrXchange:
CsrSel = LOONGARCH_CSR_TLBREBASE
.rept LOONGARCH_CSR_TLBREHI - LOONGARCH_CSR_TLBREBASE + 1
AsmCsrXChange CsrSel
CsrSel = CsrSel + 1
.endr
DirMapCsrXchange:
CsrSel = LOONGARCH_CSR_DMWIN0
.rept LOONGARCH_CSR_DMWIN3 - LOONGARCH_CSR_DMWIN0 + 1
AsmCsrXChange CsrSel
CsrSel = CsrSel + 1
.endr
.end
|
acidanthera/audk
| 1,183
|
MdePkg/Library/BaseLib/LoongArch64/ExceptionBase.S
|
#------------------------------------------------------------------------------
#
# LoongArch set exception base address operations
#
# Copyright (c) 2024, Loongson Technology Corporation Limited. All rights reserved.<BR>
#
# SPDX-License-Identifier: BSD-2-Clause-Patent
#
#------------------------------------------------------------------------------
#include <Base.h>
#include <Register/LoongArch64/Csr.h>
ASM_GLOBAL ASM_PFX(SetExceptionBaseAddress)
ASM_GLOBAL ASM_PFX(SetTlbRebaseAddress)
#/**
# Set the exception base address for LoongArch.
#
# @param ExceptionBaseAddress The exception base address, must be aligned greater than or qeual to 4K .
#**/
ASM_PFX(SetExceptionBaseAddress):
csrrd $t0, LOONGARCH_CSR_ECFG
li.d $t1, ~(BIT16 | BIT17 | BIT18)
and $t0, $t0, $t1
csrwr $t0, LOONGARCH_CSR_ECFG
move $t0, $a0
csrwr $t0, LOONGARCH_CSR_EBASE
jirl $zero, $ra, 0
#/**
# Set the TlbRebase address for LoongArch.
#
# @param TlbRebaseAddress The TlbRebase address, must be aligned greater than or qeual to 4K .
#**/
ASM_PFX(SetTlbRebaseAddress):
move $t0, $a0
csrwr $t0, LOONGARCH_CSR_TLBREBASE
jirl $zero, $ra, 0
.end
|
acidanthera/audk
| 1,182
|
MdePkg/Library/BaseLib/LoongArch64/SwitchStack.S
|
#------------------------------------------------------------------------------
#
# InternalSwitchStackAsm for LoongArch
#
# Copyright (c) 2022, Loongson Technology Corporation Limited. All rights reserved.<BR>
#
# SPDX-License-Identifier: BSD-2-Clause-Patent
#
#------------------------------------------------------------------------------
#define STORE st.d /* 64 bit mode regsave instruction */
#define LOAD ld.d /* 64 bit mode regload instruction */
#define RSIZE 8 /* 64 bit mode register size */
ASM_GLOBAL ASM_PFX(InternalSwitchStackAsm)
/**
This allows the caller to switch the stack and goes to the new entry point
@param JumpBuffer A pointer to CPU context buffer.
**/
ASM_PFX(InternalSwitchStackAsm):
LOAD $ra, $a0, RSIZE * 11
LOAD $s0, $a0, RSIZE * 0
LOAD $s1, $a0, RSIZE * 1
LOAD $s2, $a0, RSIZE * 2
LOAD $s3, $a0, RSIZE * 3
LOAD $s4, $a0, RSIZE * 4
LOAD $s5, $a0, RSIZE * 5
LOAD $s6, $a0, RSIZE * 6
LOAD $s7, $a0, RSIZE * 7
LOAD $s8, $a0, RSIZE * 8
LOAD $sp, $a0, RSIZE * 9
LOAD $fp, $a0, RSIZE * 10
LOAD $a0, $sp, 0
LOAD $a1, $sp, 8
jirl $zero, $ra, 0
.end
|
acidanthera/audk
| 1,477
|
MdePkg/Library/BaseLib/LoongArch64/SetJumpLongJump.S
|
#------------------------------------------------------------------------------
#
# Set/Long jump for LoongArch
#
# Copyright (c) 2022, Loongson Technology Corporation Limited. All rights reserved.<BR>
#
# SPDX-License-Identifier: BSD-2-Clause-Patent
#
#------------------------------------------------------------------------------
#define STORE st.d /* 64 bit mode regsave instruction */
#define LOAD ld.d /* 64 bit mode regload instruction */
#define RSIZE 8 /* 64 bit mode register size */
ASM_GLOBAL ASM_PFX(SetJump)
ASM_GLOBAL ASM_PFX(InternalLongJump)
ASM_PFX(SetJump):
STORE $s0, $a0, RSIZE * 0
STORE $s1, $a0, RSIZE * 1
STORE $s2, $a0, RSIZE * 2
STORE $s3, $a0, RSIZE * 3
STORE $s4, $a0, RSIZE * 4
STORE $s5, $a0, RSIZE * 5
STORE $s6, $a0, RSIZE * 6
STORE $s7, $a0, RSIZE * 7
STORE $s8, $a0, RSIZE * 8
STORE $sp, $a0, RSIZE * 9
STORE $fp, $a0, RSIZE * 10
STORE $ra, $a0, RSIZE * 11
li.w $a0, 0 # Setjmp return
jirl $zero, $ra, 0
ASM_PFX(InternalLongJump):
LOAD $ra, $a0, RSIZE * 11
LOAD $s0, $a0, RSIZE * 0
LOAD $s1, $a0, RSIZE * 1
LOAD $s2, $a0, RSIZE * 2
LOAD $s3, $a0, RSIZE * 3
LOAD $s4, $a0, RSIZE * 4
LOAD $s5, $a0, RSIZE * 5
LOAD $s6, $a0, RSIZE * 6
LOAD $s7, $a0, RSIZE * 7
LOAD $s8, $a0, RSIZE * 8
LOAD $sp, $a0, RSIZE * 9
LOAD $fp, $a0, RSIZE * 10
move $a0, $a1
jirl $zero, $ra, 0
.end
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.