repo_id
stringlengths
5
115
size
int64
590
5.01M
file_path
stringlengths
4
212
content
stringlengths
590
5.01M
4ms/metamodule-plugin-sdk
2,167
plugin-libc/libgcc/config/epiphany/udivsi3.S
/* Unsigned 32 bit division optimized for Epiphany. Copyright (C) 2009-2022 Free Software Foundation, Inc. Contributed by Embecosm on behalf of Adapteva, Inc. This file is part of GCC. This file is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 3, or (at your option) any later version. This file is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. Under Section 7 of GPL version 3, you are granted additional permissions described in the GCC Runtime Library Exception, version 3.1, as published by the Free Software Foundation. You should have received a copy of the GNU General Public License and a copy of the GCC Runtime Library Exception along with this program; see the files COPYING3 and COPYING.RUNTIME respectively. If not, see <http://www.gnu.org/licenses/>. */ #include "epiphany-asm.h" FSTAB (__udivsi3,T_UINT) .global SYM(__udivsi3) .balign 4 HIDDEN_FUNC(__udivsi3) SYM(__udivsi3): sub r3,r0,r1 bltu .Lret0 mov r3,0x95 lsl r12,r3,23 ; 0x4a800000 lsl r3,r3,30 ; 0x40000000 orr r16,r0,r3 orr r2,r1,r3 fsub r16,r16,r3 fsub r2,r2,r3 lsr r3,r1,21 lsr r17,r0,21 movt r17,0x4a80 fsub r17,r17,r12 movt r3,0x4a80 fsub r3,r3,r12 mov r12,%low(.L0step) movt r12,%high(.L0step) mov r21,1 movne r16,r17 lsr r17,r1,21 movne r2,r3 lsr r3,r16,23 ; must mask lower bits of r2 in case op0 was .. lsr r2,r2,23 ; .. shifted and op1 was not. sub r3,r3,r2 ; calculate bit number difference. lsl r1,r1,r3 lsr r16,r1,1 lsl r2,r21,r3 lsl r3,r3,3 sub r12,r12,r3 sub r3,r0,r1 movltu r3,r0 mov r0,0 movgteu r0,r2 lsr r2,r2,1 add r17,r2,r0 sub r1,r3,r16 movgteu r3,r1 movgteu r0,r17 sub r16,r16,1 jr r12 .rep 30 lsl r3,r3,1 sub r1,r3,r16 movgteu r3,r1 .endr sub r2,r2,1 ; mask result bits from steps ... and r3,r3,r2 orr r0,r0,r3 ; ... and combine with first bits. nop .L0step:rts .Lret0: mov r0,0 rts ENDFUNC(__udivsi3)
4ms/metamodule-plugin-sdk
2,217
plugin-libc/libgcc/config/epiphany/divsi3.S
/* Signed 32 bit division optimized for Epiphany. Copyright (C) 2009-2022 Free Software Foundation, Inc. Contributed by Embecosm on behalf of Adapteva, Inc. This file is part of GCC. This file is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 3, or (at your option) any later version. This file is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. Under Section 7 of GPL version 3, you are granted additional permissions described in the GCC Runtime Library Exception, version 3.1, as published by the Free Software Foundation. You should have received a copy of the GNU General Public License and a copy of the GCC Runtime Library Exception along with this program; see the files COPYING3 and COPYING.RUNTIME respectively. If not, see <http://www.gnu.org/licenses/>. */ #include "epiphany-asm.h" FSTAB (__divsi3,T_INT) .global SYM(__divsi3) .balign 4 HIDDEN_FUNC(__divsi3) SYM(__divsi3): mov r12,0 sub r2,r12,r0 movlt r2,r0 sub r3,r12,r1 movlt r3,r1 sub r19,r2,r3 bltu .Lret0 movt r12,0x4000 orr r16,r2,r12 orr r18,r3,r12 fsub r16,r16,r12 fsub r18,r18,r12 movt r12,0x4b80 lsr r19,r3,23 lsr r17,r2,23 movt r17,0x4b80 fsub r17,r17,r12 movt r19,0x4b80 fsub r19,r19,r12 mov r12,%low(.L0step) movt r12,%high(.L0step) mov r20,0 mov r21,1 movne r16,r17 lsr r17,r3,23 movne r18,r19 eor r1,r1,r0 ; save sign asr r19,r1,31 lsr r1,r16,23 lsr r0,r18,23 sub r1,r1,r0 ; calculate bit number difference. lsl r3,r3,r1 lsr r16,r3,1 lsl r0,r21,r1 lsl r1,r1,3 sub r12,r12,r1 sub r3,r2,r3 movgteu r2,r3 movgteu r20,r0 lsr r0,r0,1 add r17,r0,r20 sub r3,r2,r16 movgteu r2,r3 movgteu r20,r17 sub r16,r16,1 jr r12 .rep 30 lsl r2,r2,1 sub r3,r2,r16 movgteu r2,r3 .endr sub r0,r0,1 ; mask result bits from steps ... and r0,r0,r2 orr r20,r0,r20 ; ... and combine with first bit. .L0step:eor r0,r20,r19 ; restore sign sub r0,r0,r19 rts .Lret0: mov r0,0 rts ENDFUNC(__divsi3)
4ms/metamodule-plugin-sdk
2,137
plugin-libc/libgcc/config/epiphany/umodsi3-float.S
/* Unsigned 32 bit division optimized for Epiphany. Copyright (C) 2009-2022 Free Software Foundation, Inc. Contributed by Embecosm on behalf of Adapteva, Inc. This file is part of GCC. This file is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 3, or (at your option) any later version. This file is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. Under Section 7 of GPL version 3, you are granted additional permissions described in the GCC Runtime Library Exception, version 3.1, as published by the Free Software Foundation. You should have received a copy of the GNU General Public License and a copy of the GCC Runtime Library Exception along with this program; see the files COPYING3 and COPYING.RUNTIME respectively. If not, see <http://www.gnu.org/licenses/>. */ #include "epiphany-asm.h" /* Because we handle a divident with bit 31 set with truncating integer arithmetic, there is no rounding-related overflow. */ FSTAB (__umodsi3,T_UINT) .global SYM(__umodsi3) .balign 4 HIDDEN_FUNC(__umodsi3) SYM(__umodsi3): float r2,r0 mov TMP1,%low(0xb0800000) ; ??? this would be faster with small data float TMP2,r1 movt TMP1,%high(0xb0800000) asr TMP0,r0,8 sub TMP0,TMP0,TMP1 mov TMP1,%low(.L0step) movgteu r2,TMP0 sub r2,r2,TMP2 blteu .L0step asr r2,r2,23 movt TMP1,%high(.L0step) lsl TMP2,r2,3 lsl r2,r1,r2` sub r2,r0,r2` movgteu r0,r2 ; STEP(r2) sub r2,TMP1,TMP2 jr r2 #define STEP(n) lsl.l r2,r1,n` sub r2,r0,r2` movgteu r0,r2 .balign 8,,2 STEP(31)` STEP(30)` STEP(29)` STEP(28)` STEP(27)` STEP(26)` STEP(25)` STEP(24)` STEP(23)` STEP(22)` STEP(21)` STEP(20)` STEP(19)` STEP(18)` STEP(17)` STEP(16)` STEP(15)` STEP(14)` STEP(13)` STEP(12)` STEP(11)` STEP(10)` STEP(9)` STEP(8)` STEP(7)` STEP(6)` STEP(5)` STEP(4)` STEP(3)` STEP(2)` STEP(1) .L0step:STEP(0) .Lret_r0: rts ENDFUNC(__umodsi3)
4ms/metamodule-plugin-sdk
2,048
plugin-libc/libgcc/config/epiphany/divsi3-float.S
/* Signed 32 bit division optimized for Epiphany. Copyright (C) 2009-2022 Free Software Foundation, Inc. Contributed by Embecosm on behalf of Adapteva, Inc. This file is part of GCC. This file is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 3, or (at your option) any later version. This file is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. Under Section 7 of GPL version 3, you are granted additional permissions described in the GCC Runtime Library Exception, version 3.1, as published by the Free Software Foundation. You should have received a copy of the GNU General Public License and a copy of the GCC Runtime Library Exception along with this program; see the files COPYING3 and COPYING.RUNTIME respectively. If not, see <http://www.gnu.org/licenses/>. */ #include "epiphany-asm.h" FSTAB (__divsi3,T_UINT) .global SYM(__divsi3) .balign 4 HIDDEN_FUNC(__divsi3) SYM(__divsi3): float TMP2,r0 mov TMP4,0 float TMP1,r1 sub TMP0,TMP4,r0 beq .Lret_r0 movgt r0,TMP0 sub TMP0,TMP4,r1 movgt r1,TMP0 mov TMP0,1 sub TMP2,TMP2,TMP1 asr TMP3,TMP2,31 ; save sign lsl TMP2,TMP2,1 blt .Lret0 sub TMP1,TMP2,1 ; rounding compensation, avoid overflow movgte TMP2,TMP1 lsr TMP2,TMP2,24 lsl r1,r1,TMP2 lsl TMP0,TMP0,TMP2 sub TMP1,r0,r1 movgteu r0,TMP1 movgteu TMP4,TMP0 lsl TMP5,TMP0,1 sub TMP1,r0,r1 movgteu r0,TMP1 movgteu TMP4,TMP5 sub TMP1,r1,1 mov r1,%low(.L0step) movt r1,%high(.L0step) lsl TMP2,TMP2,3 sub r1,r1,TMP2 jr r1 .rep 30 lsl r0,r0,1 sub.l r1,r0,TMP1 movgteu r0,r1 .endr .L0step:sub r1,TMP0,1 ; mask result bits from steps ... and r0,r0,r1 orr r0,r0,TMP4 ; ... and combine with first bit. eor r0,r0,TMP3 ; restore sign sub r0,r0,TMP3 .Lret_r0:rts .Lret0: mov r0,0 rts ENDFUNC(__divsi3)
4ms/metamodule-plugin-sdk
1,975
plugin-libc/libgcc/config/epiphany/modsi3-float.S
/* Unsigned 32 bit division optimized for Epiphany. Copyright (C) 2009-2022 Free Software Foundation, Inc. Contributed by Embecosm on behalf of Adapteva, Inc. This file is part of GCC. This file is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 3, or (at your option) any later version. This file is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. Under Section 7 of GPL version 3, you are granted additional permissions described in the GCC Runtime Library Exception, version 3.1, as published by the Free Software Foundation. You should have received a copy of the GNU General Public License and a copy of the GCC Runtime Library Exception along with this program; see the files COPYING3 and COPYING.RUNTIME respectively. If not, see <http://www.gnu.org/licenses/>. */ #include "epiphany-asm.h" FSTAB (__modsi3,T_UINT) .global SYM(__modsi3) .balign 4 HIDDEN_FUNC(__modsi3) SYM(__modsi3): asr TMP3,r0,31 ; save sign float TMP0,r0 float TMP1,r1 mov r2,0 sub TMP4,r2,r0 beq .Lret_r0 movgt r0,TMP4 sub TMP2,r2,r1 movlte TMP2,r1 sub r2,TMP0,TMP1 lsl r2,r2,1 blte .L0step asr TMP4,r2,24 lsl r2,TMP4,3 mov TMP4,%low(.L0step) movt TMP4,%high(.L0step) sub r2,TMP4,r2 jr r2 #define STEP(n) lsl.l r2,TMP2,n` sub r2,r0,r2` movgteu r0,r2 .balign 8,,2 STEP(31)` STEP(30)` STEP(29)` STEP(28)` STEP(27)` STEP(26)` STEP(25)` STEP(24)` STEP(23)` STEP(22)` STEP(21)` STEP(20)` STEP(19)` STEP(18)` STEP(17)` STEP(16)` STEP(15)` STEP(14)` STEP(13)` STEP(12)` STEP(11)` STEP(10)` STEP(9)` STEP(8)` STEP(7)` STEP(6)` STEP(5)` STEP(4)` STEP(3)` STEP(2)` STEP(1) .L0step:STEP(0) eor r0,r0,TMP3 ; restore sign sub r0,r0,TMP3 .Lret_r0: rts ENDFUNC(__modsi3)
4ms/metamodule-plugin-sdk
1,094
plugin-libc/libgcc/config/epiphany/crtm1reg-r43.S
# initialize config for -m1reg-r43 # Copyright (C) 2011-2022 Free Software Foundation, Inc. # Contributed by Embecosm on behalf of Adapteva, Inc. # # This file is free software; you can redistribute it and/or modify it # under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 3, or (at your option) # any later version. # # GCC is distributed in the hope that it will be useful, but WITHOUT ANY # WARRANTY; without even the implied warranty of MERCHANTABILITY or # FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License # for more details. # # Under Section 7 of GPL version 3, you are granted additional # permissions described in the GCC Runtime Library Exception, version # 3.1, as published by the Free Software Foundation. # # You should have received a copy of the GNU General Public License and # a copy of the GCC Runtime Library Exception along with this program; # see the files COPYING3 and COPYING.RUNTIME respectively. If not, see # <http://www.gnu.org/licenses/>. .section .init mov r0, 0 sub r43,r0,1
4ms/metamodule-plugin-sdk
1,179
plugin-libc/libgcc/config/epiphany/crti.S
# Start .init and .fini sections. # Copyright (C) 2010-2022 Free Software Foundation, Inc. # Contributed by Embecosm on behalf of Adapteva, Inc. # # This file is free software; you can redistribute it and/or modify it # under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 3, or (at your option) # any later version. # # GCC is distributed in the hope that it will be useful, but WITHOUT ANY # WARRANTY; without even the implied warranty of MERCHANTABILITY or # FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License # for more details. # # Under Section 7 of GPL version 3, you are granted additional # permissions described in the GCC Runtime Library Exception, version # 3.1, as published by the Free Software Foundation. # # You should have received a copy of the GNU General Public License and # a copy of the GCC Runtime Library Exception along with this program; # see the files COPYING3 and COPYING.RUNTIME respectively. If not, see # <http://www.gnu.org/licenses/>. .section .init .global init .balign 2 init: str lr,[sp],-4 .section .fini .global fini .balign 2 fini: str lr,[sp],-4
4ms/metamodule-plugin-sdk
1,105
plugin-libc/libgcc/config/epiphany/crtrunc.S
# initialize config for -mfp-mode=truncate # Copyright (C) 2011-2022 Free Software Foundation, Inc. # Contributed by Embecosm on behalf of Adapteva, Inc. # # This file is free software; you can redistribute it and/or modify it # under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 3, or (at your option) # any later version. # # GCC is distributed in the hope that it will be useful, but WITHOUT ANY # WARRANTY; without even the implied warranty of MERCHANTABILITY or # FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License # for more details. # # Under Section 7 of GPL version 3, you are granted additional # permissions described in the GCC Runtime Library Exception, version # 3.1, as published by the Free Software Foundation. # # You should have received a copy of the GNU General Public License and # a copy of the GCC Runtime Library Exception along with this program; # see the files COPYING3 and COPYING.RUNTIME respectively. If not, see # <http://www.gnu.org/licenses/>. .section .init mov r0, 1 movts config,r0
4ms/metamodule-plugin-sdk
2,250
plugin-libc/libgcc/config/epiphany/udivsi3-float.S
/* Unsigned 32 bit division optimized for Epiphany. Copyright (C) 2009-2022 Free Software Foundation, Inc. Contributed by Embecosm on behalf of Adapteva, Inc. This file is part of GCC. This file is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 3, or (at your option) any later version. This file is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. Under Section 7 of GPL version 3, you are granted additional permissions described in the GCC Runtime Library Exception, version 3.1, as published by the Free Software Foundation. You should have received a copy of the GNU General Public License and a copy of the GCC Runtime Library Exception along with this program; see the files COPYING3 and COPYING.RUNTIME respectively. If not, see <http://www.gnu.org/licenses/>. */ #include "epiphany-asm.h" FSTAB (__udivsi3,T_UINT) .global SYM(__udivsi3) .balign 4 HIDDEN_FUNC(__udivsi3) SYM(__udivsi3): sub TMP0,r0,r1 bltu .Lret0 float TMP2,r0 mov TMP1,%low(0xb0800000) ; ??? this would be faster with small data float TMP3,r1 movt TMP1,%high(0xb0800000) asr TMP0,r0,8 sub TMP0,TMP0,TMP1 movt TMP1,%high(0x00810000) movgteu TMP2,TMP0 bblt .Lret1 sub TMP2,TMP2,TMP1 sub TMP2,TMP2,TMP3 mov TMP3,0 movltu TMP2,TMP3 lsr TMP2,TMP2,23 lsl r1,r1,TMP2 mov TMP0,1 lsl TMP0,TMP0,TMP2 sub r0,r0,r1 bltu .Ladd_back add TMP3,TMP3,TMP0 sub r0,r0,r1 bltu .Ladd_back .Lsub_loop:; More than two iterations are rare, so it makes sense to leave ; this label here to reduce average branch penalties. add TMP3,TMP3,TMP0 sub r0,r0,r1 bgteu .Lsub_loop .Ladd_back: add r0,r0,r1 sub TMP1,r1,1 mov r1,%low(.L0step) movt r1,%high(.L0step) lsl TMP2,TMP2,3 sub r1,r1,TMP2 jr r1 .rep 30 lsl r0,r0,1 sub.l r1,r0,TMP1 movgteu r0,r1 .endr .L0step:sub r1,TMP0,1 ; mask result bits from steps ... and r0,r0,r1 orr r0,r0,TMP3 ; ... and combine with first bits. rts .Lret0: mov r0,0 rts .Lret1: mov r0,1 rts ENDFUNC(__udivsi3)
4ms/metamodule-plugin-sdk
1,137
plugin-libc/libgcc/config/epiphany/crtint.S
# initialize config for -mfp-mode=int # Copyright (C) 2011-2022 Free Software Foundation, Inc. # Contributed by Embecosm on behalf of Adapteva, Inc. # # This file is free software; you can redistribute it and/or modify it # under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 3, or (at your option) # any later version. # # GCC is distributed in the hope that it will be useful, but WITHOUT ANY # WARRANTY; without even the implied warranty of MERCHANTABILITY or # FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License # for more details. # # Under Section 7 of GPL version 3, you are granted additional # permissions described in the GCC Runtime Library Exception, version # 3.1, as published by the Free Software Foundation. # # You should have received a copy of the GNU General Public License and # a copy of the GCC Runtime Library Exception along with this program; # see the files COPYING3 and COPYING.RUNTIME respectively. If not, see # <http://www.gnu.org/licenses/>. .section .init mov r0, %low(#524288) movt r0, %high(#524288) movts config,r0
4ms/metamodule-plugin-sdk
1,094
plugin-libc/libgcc/config/epiphany/crtm1reg-r63.S
# initialize config for -m1reg-r63 # Copyright (C) 2011-2022 Free Software Foundation, Inc. # Contributed by Embecosm on behalf of Adapteva, Inc. # # This file is free software; you can redistribute it and/or modify it # under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 3, or (at your option) # any later version. # # GCC is distributed in the hope that it will be useful, but WITHOUT ANY # WARRANTY; without even the implied warranty of MERCHANTABILITY or # FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License # for more details. # # Under Section 7 of GPL version 3, you are granted additional # permissions described in the GCC Runtime Library Exception, version # 3.1, as published by the Free Software Foundation. # # You should have received a copy of the GNU General Public License and # a copy of the GCC Runtime Library Exception along with this program; # see the files COPYING3 and COPYING.RUNTIME respectively. If not, see # <http://www.gnu.org/licenses/>. .section .init mov r0, 0 sub r63,r0,1
4ms/metamodule-plugin-sdk
2,275
plugin-libc/libgcc/config/epiphany/modsi3.S
/* Signed 32 bit modulo optimized for Epiphany. Copyright (C) 2009-2022 Free Software Foundation, Inc. Contributed by Embecosm on behalf of Adapteva, Inc. This file is part of GCC. This file is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 3, or (at your option) any later version. This file is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. Under Section 7 of GPL version 3, you are granted additional permissions described in the GCC Runtime Library Exception, version 3.1, as published by the Free Software Foundation. You should have received a copy of the GNU General Public License and a copy of the GCC Runtime Library Exception along with this program; see the files COPYING3 and COPYING.RUNTIME respectively. If not, see <http://www.gnu.org/licenses/>. */ #include "epiphany-asm.h" FSTAB (__modsi3,T_INT) .global SYM(__modsi3) .balign 4 HIDDEN_FUNC(__modsi3) SYM(__modsi3): asr r17,r0,31 ; save sign mov r2,0 sub r3,r2,r0 movgt r0,r3 sub r3,r2,r1 movgt r1,r3 movt r2,0xa000 ; 0xa0000000 orr r3,r2,r0 lsr r15,r0,16 movt r15,0xa800 movne r3,r15 lsr r16,r2,2 ; 0x28000000 and r15,r3,r16 fadd r12,r3,r15 orr r3,r2,r1 lsr r2,r1,16 movt r2,0xa800 movne r3,r2 and r2,r16,r3 fadd r3,r3,r2 sub r2,r0,r1 bltu .Lret_a lsr r12,r12,23 mov r2,%low(.L0step) movt r2,%high(.L0step) lsr r3,r3,23 sub r3,r12,r3 ; calculate bit number difference. lsl r3,r3,3 sub r2,r2,r3 jr r2 /* lsl_l r2,r1,n` sub r2,r0,r2` movgteu r0,r2 */ #define STEP(n) .long 0x0006441f | (n) << 5` sub r2,r0,r2` movgteu r0,r2 .balign 8,,2 STEP(31)` STEP(30)` STEP(29)` STEP(28)` STEP(27)` STEP(26)` STEP(25)` STEP(24)` STEP(23)` STEP(22)` STEP(21)` STEP(20)` STEP(19)` STEP(18)` STEP(17)` STEP(16)` STEP(15)` STEP(14)` STEP(13)` STEP(12)` STEP(11)` STEP(10)` STEP(9)` STEP(8)` STEP(7)` STEP(6)` STEP(5)` STEP(4)` STEP(3)` STEP(2)` STEP(1) .L0step:STEP(0) .Lret_a:eor r0,r0,r17 ; restore sign sub r0,r0,r17 rts ENDFUNC(__modsi3)
4ms/metamodule-plugin-sdk
2,934
plugin-libc/libgcc/config/lm32/_ashrsi3.S
# _ashrsi3.S for Lattice Mico32 # Contributed by Jon Beniston <jon@beniston.com> and Richard Henderson. # # Copyright (C) 2009-2022 Free Software Foundation, Inc. # # This file is free software; you can redistribute it and/or modify it # under the terms of the GNU General Public License as published by the # Free Software Foundation; either version 3, or (at your option) any # later version. # # This file is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # General Public License for more details. # # Under Section 7 of GPL version 3, you are granted additional # permissions described in the GCC Runtime Library Exception, version # 3.1, as published by the Free Software Foundation. # # You should have received a copy of the GNU General Public License and # a copy of the GCC Runtime Library Exception along with this program; # see the files COPYING3 and COPYING.RUNTIME respectively. If not, see # <http://www.gnu.org/licenses/>. # /* Arithmetic right shift. */ .global __ashrsi3 .type __ashrsi3,@function __ashrsi3: /* Only use 5 LSBs, as that's all the h/w shifter uses. */ andi r2, r2, 0x1f /* Get address of offset into unrolled shift loop to jump to. */ #ifdef __PIC__ lw r3, (gp+got(__ashrsi3_0)) #else mvhi r3, hi(__ashrsi3_0) ori r3, r3, lo(__ashrsi3_0) #endif add r2, r2, r2 add r2, r2, r2 sub r3, r3, r2 b r3 __ashrsi3_31: sri r1, r1, 1 __ashrsi3_30: sri r1, r1, 1 __ashrsi3_29: sri r1, r1, 1 __ashrsi3_28: sri r1, r1, 1 __ashrsi3_27: sri r1, r1, 1 __ashrsi3_26: sri r1, r1, 1 __ashrsi3_25: sri r1, r1, 1 __ashrsi3_24: sri r1, r1, 1 __ashrsi3_23: sri r1, r1, 1 __ashrsi3_22: sri r1, r1, 1 __ashrsi3_21: sri r1, r1, 1 __ashrsi3_20: sri r1, r1, 1 __ashrsi3_19: sri r1, r1, 1 __ashrsi3_18: sri r1, r1, 1 __ashrsi3_17: sri r1, r1, 1 __ashrsi3_16: sri r1, r1, 1 __ashrsi3_15: sri r1, r1, 1 __ashrsi3_14: sri r1, r1, 1 __ashrsi3_13: sri r1, r1, 1 __ashrsi3_12: sri r1, r1, 1 __ashrsi3_11: sri r1, r1, 1 __ashrsi3_10: sri r1, r1, 1 __ashrsi3_9: sri r1, r1, 1 __ashrsi3_8: sri r1, r1, 1 __ashrsi3_7: sri r1, r1, 1 __ashrsi3_6: sri r1, r1, 1 __ashrsi3_5: sri r1, r1, 1 __ashrsi3_4: sri r1, r1, 1 __ashrsi3_3: sri r1, r1, 1 __ashrsi3_2: sri r1, r1, 1 __ashrsi3_1: sri r1, r1, 1 __ashrsi3_0: ret
4ms/metamodule-plugin-sdk
1,274
plugin-libc/libgcc/config/lm32/crtn.S
# crtn.S for Lattice Mico32 # Contributed by Jon Beniston <jon@beniston.com> # # Copyright (C) 2009-2022 Free Software Foundation, Inc. # # This file is free software; you can redistribute it and/or modify it # under the terms of the GNU General Public License as published by the # Free Software Foundation; either version 3, or (at your option) any # later version. # # This file is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # General Public License for more details. # # Under Section 7 of GPL version 3, you are granted additional # permissions described in the GCC Runtime Library Exception, version # 3.1, as published by the Free Software Foundation. # # You should have received a copy of the GNU General Public License and # a copy of the GCC Runtime Library Exception along with this program; # see the files COPYING3 and COPYING.RUNTIME respectively. If not, see # <http://www.gnu.org/licenses/>. # .section .init lw ra, (sp+4) addi sp, sp, 4 ret .section .fini lw ra, (sp+4) addi sp, sp, 4 ret
4ms/metamodule-plugin-sdk
2,979
plugin-libc/libgcc/config/lm32/_ashlsi3.S
# _ashlsi3.S for Lattice Mico32 # Contributed by Jon Beniston <jon@beniston.com> and Richard Henderson. # # Copyright (C) 2009-2022 Free Software Foundation, Inc. # # This file is free software; you can redistribute it and/or modify it # under the terms of the GNU General Public License as published by the # Free Software Foundation; either version 3, or (at your option) any # later version. # # This file is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # General Public License for more details. # # Under Section 7 of GPL version 3, you are granted additional # permissions described in the GCC Runtime Library Exception, version # 3.1, as published by the Free Software Foundation. # # You should have received a copy of the GNU General Public License and # a copy of the GCC Runtime Library Exception along with this program; # see the files COPYING3 and COPYING.RUNTIME respectively. If not, see # <http://www.gnu.org/licenses/>. # /* Arithmetic left shift. */ .text .global __ashlsi3 .type __ashlsi3,@function .align 4 __ashlsi3: /* Only use 5 LSBs, as that's all the h/w shifter uses. */ andi r2, r2, 0x1f /* Get address of offset into unrolled shift loop to jump to. */ #ifdef __PIC__ lw r3, (gp+got(__ashlsi3_0)) #else mvhi r3, hi(__ashlsi3_0) ori r3, r3, lo(__ashlsi3_0) #endif add r2, r2, r2 add r2, r2, r2 sub r3, r3, r2 b r3 __ashlsi3_31: add r1, r1, r1 __ashlsi3_30: add r1, r1, r1 __ashlsi3_29: add r1, r1, r1 __ashlsi3_28: add r1, r1, r1 __ashlsi3_27: add r1, r1, r1 __ashlsi3_26: add r1, r1, r1 __ashlsi3_25: add r1, r1, r1 __ashlsi3_24: add r1, r1, r1 __ashlsi3_23: add r1, r1, r1 __ashlsi3_22: add r1, r1, r1 __ashlsi3_21: add r1, r1, r1 __ashlsi3_20: add r1, r1, r1 __ashlsi3_19: add r1, r1, r1 __ashlsi3_18: add r1, r1, r1 __ashlsi3_17: add r1, r1, r1 __ashlsi3_16: add r1, r1, r1 __ashlsi3_15: add r1, r1, r1 __ashlsi3_14: add r1, r1, r1 __ashlsi3_13: add r1, r1, r1 __ashlsi3_12: add r1, r1, r1 __ashlsi3_11: add r1, r1, r1 __ashlsi3_10: add r1, r1, r1 __ashlsi3_9: add r1, r1, r1 __ashlsi3_8: add r1, r1, r1 __ashlsi3_7: add r1, r1, r1 __ashlsi3_6: add r1, r1, r1 __ashlsi3_5: add r1, r1, r1 __ashlsi3_4: add r1, r1, r1 __ashlsi3_3: add r1, r1, r1 __ashlsi3_2: add r1, r1, r1 __ashlsi3_1: add r1, r1, r1 __ashlsi3_0: ret
4ms/metamodule-plugin-sdk
3,137
plugin-libc/libgcc/config/lm32/_lshrsi3.S
# _lshrsi3.S for Lattice Mico32 # Contributed by Jon Beniston <jon@beniston.com> and Richard Henderson. # # Copyright (C) 2009-2022 Free Software Foundation, Inc. # # This file is free software; you can redistribute it and/or modify it # under the terms of the GNU General Public License as published by the # Free Software Foundation; either version 3, or (at your option) any # later version. # # This file is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # General Public License for more details. # # Under Section 7 of GPL version 3, you are granted additional # permissions described in the GCC Runtime Library Exception, version # 3.1, as published by the Free Software Foundation. # # You should have received a copy of the GNU General Public License and # a copy of the GCC Runtime Library Exception along with this program; # see the files COPYING3 and COPYING.RUNTIME respectively. If not, see # <http://www.gnu.org/licenses/>. # /* Logical right shift. */ .global __lshrsi3 .type __lshrsi3,@function __lshrsi3: /* Only use 5 LSBs, as that's all the h/w shifter uses. */ andi r2, r2, 0x1f /* Get address of offset into unrolled shift loop to jump to. */ #ifdef __PIC__ lw r3, (gp+got(__lshrsi3_0)) #else mvhi r3, hi(__lshrsi3_0) ori r3, r3, lo(__lshrsi3_0) #endif add r2, r2, r2 add r2, r2, r2 sub r3, r3, r2 b r3 __lshrsi3_31: srui r1, r1, 1 __lshrsi3_30: srui r1, r1, 1 __lshrsi3_29: srui r1, r1, 1 __lshrsi3_28: srui r1, r1, 1 __lshrsi3_27: srui r1, r1, 1 __lshrsi3_26: srui r1, r1, 1 __lshrsi3_25: srui r1, r1, 1 __lshrsi3_24: srui r1, r1, 1 __lshrsi3_23: srui r1, r1, 1 __lshrsi3_22: srui r1, r1, 1 __lshrsi3_21: srui r1, r1, 1 __lshrsi3_20: srui r1, r1, 1 __lshrsi3_19: srui r1, r1, 1 __lshrsi3_18: srui r1, r1, 1 __lshrsi3_17: srui r1, r1, 1 __lshrsi3_16: srui r1, r1, 1 __lshrsi3_15: srui r1, r1, 1 __lshrsi3_14: srui r1, r1, 1 __lshrsi3_13: srui r1, r1, 1 __lshrsi3_12: srui r1, r1, 1 __lshrsi3_11: srui r1, r1, 1 __lshrsi3_10: srui r1, r1, 1 __lshrsi3_9: srui r1, r1, 1 __lshrsi3_8: srui r1, r1, 1 __lshrsi3_7: srui r1, r1, 1 __lshrsi3_6: srui r1, r1, 1 __lshrsi3_5: srui r1, r1, 1 __lshrsi3_4: srui r1, r1, 1 __lshrsi3_3: srui r1, r1, 1 __lshrsi3_2: srui r1, r1, 1 __lshrsi3_1: srui r1, r1, 1 __lshrsi3_0: ret
4ms/metamodule-plugin-sdk
1,385
plugin-libc/libgcc/config/lm32/crti.S
# crti.S for Lattice Mico32 # Contributed by Jon Beniston <jon@beniston.com> # # Copyright (C) 2009-2022 Free Software Foundation, Inc. # # This file is free software; you can redistribute it and/or modify it # under the terms of the GNU General Public License as published by the # Free Software Foundation; either version 3, or (at your option) any # later version. # # This file is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # General Public License for more details. # # Under Section 7 of GPL version 3, you are granted additional # permissions described in the GCC Runtime Library Exception, version # 3.1, as published by the Free Software Foundation. # # You should have received a copy of the GNU General Public License and # a copy of the GCC Runtime Library Exception along with this program; # see the files COPYING3 and COPYING.RUNTIME respectively. If not, see # <http://www.gnu.org/licenses/>. # .section .init .global _init .type _init,@function .align 4 _init: addi sp, sp, -4 sw (sp+4), ra .section .fini .global _fini .type _fini,@function .align 4 _fini: addi sp, sp, -4 sw (sp+4), ra
4ms/metamodule-plugin-sdk
1,309
plugin-libc/libgcc/config/c6x/crtn.S
/* Copyright (C) 2010-2022 Free Software Foundation, Inc. Contributed by Bernd Schmidt <bernds@codesourcery.com>. This file is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 3, or (at your option) any later version. This file is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. Under Section 7 of GPL version 3, you are granted additional permissions described in the GCC Runtime Library Exception, version 3.1, as published by the Free Software Foundation. You should have received a copy of the GNU General Public License and a copy of the GCC Runtime Library Exception along with this program; see the files COPYING3 and COPYING.RUNTIME respectively. If not, see <http://www.gnu.org/licenses/>. */ /* * This file supplies function epilogues for the .init and .fini sections. * It is linked in after all other files. */ .section .init ldw .d2t2 *+B15(4), B3 add .d2 B15, 8, B15 nop 3 ret .s2 B3 nop 5 .section .fini ldw .d2t2 *+B15(4), B3 add .d2 B15, 8, B15 nop 3 ret .s2 B3 nop 5
4ms/metamodule-plugin-sdk
1,343
plugin-libc/libgcc/config/c6x/crti.S
/* Copyright (C) 2010-2022 Free Software Foundation, Inc. Contributed by Bernd Schmidt <bernds@codesourcery.com>. This file is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 3, or (at your option) any later version. This file is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. Under Section 7 of GPL version 3, you are granted additional permissions described in the GCC Runtime Library Exception, version 3.1, as published by the Free Software Foundation. You should have received a copy of the GNU General Public License and a copy of the GCC Runtime Library Exception along with this program; see the files COPYING3 and COPYING.RUNTIME respectively. If not, see <http://www.gnu.org/licenses/>. */ /* * This file just supplies function prologues for the .init and .fini * sections. It is linked in before crtbegin.o. */ .section .init .globl _init .type _init,@function _init: add .l2 -8, B15, B15 stw .d2t2 B3,*+B15(4) .section .fini .globl _fini .type _fini,@function _fini: add .l2 -8, B15, B15 stw .d2t2 B3,*+B15(4)
4ms/metamodule-plugin-sdk
3,341
plugin-libc/libgcc/config/c6x/libunwind.S
.text .macro do_call fn #ifdef _TMS320C6400_PLUS callp .s2 (\fn), B3 #elif defined(_TMS320C6400) call .s2 (\fn) addkpc .s2 9f, B3, 0 nop 4 9f: #else call .s2 (\fn) mhkl .s2 9f, B3 mhkh .s2 9f, B3 nop 3 9f: #endif .endm .align 2 .global restore_core_regs .type restore_core_regs, STT_FUNC restore_core_regs: mv .s2x A4, B4 ldw .d1t1 *+A4[0], A0 || ldw .d2t2 *++B4[16], B0 ldw .d1t1 *+A4[1], A1 || ldw .d2t2 *+B4[1], B1 ldw .d1t1 *+A4[2], A2 || ldw .d2t2 *+B4[2], B2 ldw .d1t1 *+A4[3], A3 || ldw .d2t2 *+B4[3], B3 ;; Base registers are loaded later ldw .d1t1 *+A4[5], A5 || ldw .d2t2 *+B4[5], B5 ldw .d1t1 *+A4[6], A6 || ldw .d2t2 *+B4[6], B6 ldw .d1t1 *+A4[7], A7 || ldw .d2t2 *+B4[7], B7 ldw .d1t1 *+A4[8], A8 || ldw .d2t2 *+B4[8], B8 ldw .d1t1 *+A4[9], A9 || ldw .d2t2 *+B4[9], B9 ;; load PC into B10 so that it is ready for the branch ldw .d2t2 *+B4[16], B10 ldw .d1t1 *+A4[11], A11 || ldw .d2t2 *+B4[11], B11 ldw .d1t1 *+A4[12], A12 || ldw .d2t2 *+B4[12], B12 ldw .d1t1 *+A4[13], A13 || ldw .d2t2 *+B4[13], B13 ldw .d1t1 *+A4[14], A14 || ldw .d2t2 *+B4[14], B14 ;; Loads have 4 delay slots. Take advantage of this to restore the ;; scratch registers and stack pointer before the base registers ;; disappear. We also need to make sure no interrupts occur, ;; so put the whole thing in the delay slots of a dummy branch ;; We cannot move the ret earlier as that would cause it to occur ;; before the last load completes b .s1 (1f) ldw .d1t1 *+A4[4], A4 || ldw .d2t2 *+B4[4], B4 ldw .d1t1 *+A4[15], A15 || ldw .d2t2 *+B4[15], B15 ret .s2 B10 ldw .d1t1 *+A4[10], A10 || ldw .d2t2 *+B4[10], B10 nop 1 1: nop 3 .size restore_core_regs, . - restore_core_regs .macro UNWIND_WRAPPER name argreg argside .global \name .type \name, STT_FUNC \name: # Create saved register state: flags,A0-A15,B0-B15,PC = 136 bytes. # Plus 4 (rounded to 8) for saving return. addk .s2 -144, B15 stw .d2t1 A0, *+B15[2] stw .d2t1 A1, *+B15[3] stw .d2t1 A2, *+B15[4] stw .d2t1 A3, *+B15[5] stw .d2t1 A4, *+B15[6] stw .d2t1 A5, *+B15[7] stw .d2t1 A6, *+B15[8] stw .d2t1 A7, *+B15[9] stw .d2t1 A8, *+B15[10] stw .d2t1 A9, *+B15[11] stw .d2t1 A10, *+B15[12] stw .d2t1 A11, *+B15[13] stw .d2t1 A12, *+B15[14] stw .d2t1 A13, *+B15[15] stw .d2t1 A14, *+B15[16] stw .d2t1 A15, *+B15[17] mv .s1x B15, A0 addk .s1 144, A0 stw .d2t2 B0, *+B15[18] stw .d2t2 B1, *+B15[19] stw .d2t2 B2, *+B15[20] stw .d2t2 B3, *+B15[21] stw .d2t2 B4, *+B15[22] stw .d2t2 B5, *+B15[23] stw .d2t2 B6, *+B15[24] stw .d2t2 B7, *+B15[25] stw .d2t2 B8, *+B15[26] stw .d2t2 B9, *+B15[27] stw .d2t2 B10, *+B15[28] stw .d2t2 B11, *+B15[29] stw .d2t2 B12, *+B15[30] stw .d2t2 B13, *+B15[31] stw .d2t2 B14, *+B15[32] stw .d2t1 A0, *+B15[33] stw .d2t1 A0, *+B15[34] # Zero demand saved flags mvk .s1 0, A0 stw .d2t1 A0, *+B15[1] # Save return address, setup additional argument and call function stw .d2t2 B3, *+B15[35] add .d\argside B15, 4, \argreg do_call __gnu\name # Restore stack and return ldw .d2t2 *+B15[35], B3 addk .s2 144, B15 nop 3 ret .s2 B3 nop 5 .size \name, . - \name .endm UNWIND_WRAPPER _Unwind_RaiseException B4 2 UNWIND_WRAPPER _Unwind_Resume B4 2 UNWIND_WRAPPER _Unwind_Resume_or_Rethrow B4 2 UNWIND_WRAPPER _Unwind_ForcedUnwind B6 2 UNWIND_WRAPPER _Unwind_Backtrace A6 1x
4ms/metamodule-plugin-sdk
9,657
plugin-libc/libgcc/config/c6x/lib1funcs.S
/* Copyright (C) 2010-2022 Free Software Foundation, Inc. Contributed by Bernd Schmidt <bernds@codesourcery.com>. This file is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 3, or (at your option) any later version. This file is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. Under Section 7 of GPL version 3, you are granted additional permissions described in the GCC Runtime Library Exception, version 3.1, as published by the Free Software Foundation. You should have received a copy of the GNU General Public License and a copy of the GCC Runtime Library Exception along with this program; see the files COPYING3 and COPYING.RUNTIME respectively. If not, see <http://www.gnu.org/licenses/>. */ ;; ABI considerations for the divide functions ;; The following registers are call-used: ;; __c6xabi_divi A0,A1,A2,A4,A6,B0,B1,B2,B4,B5 ;; __c6xabi_divu A0,A1,A2,A4,A6,B0,B1,B2,B4 ;; __c6xabi_remi A1,A2,A4,A5,A6,B0,B1,B2,B4 ;; __c6xabi_remu A1,A4,A5,A7,B0,B1,B2,B4 ;; ;; In our implementation, divu and remu are leaf functions, ;; while both divi and remi call into divu. ;; A0 is not clobbered by any of the functions. ;; divu does not clobber B2 either, which is taken advantage of ;; in remi. ;; divi uses B5 to hold the original return address during ;; the call to divu. ;; remi uses B2 and A5 to hold the input values during the ;; call to divu. It stores B3 in on the stack. #ifdef L_divsi3 .text .align 2 .global __c6xabi_divi .hidden __c6xabi_divi .type __c6xabi_divi, STT_FUNC __c6xabi_divi: call .s2 __c6xabi_divu || mv .d2 B3, B5 || cmpgt .l1 0, A4, A1 || cmpgt .l2 0, B4, B1 [A1] neg .l1 A4, A4 || [B1] neg .l2 B4, B4 || xor .s1x A1, B1, A1 #ifdef _TMS320C6400 [A1] addkpc .s2 1f, B3, 4 #else [A1] mvkl .s2 1f, B3 [A1] mvkh .s2 1f, B3 nop 2 #endif 1: neg .l1 A4, A4 || mv .l2 B3,B5 || ret .s2 B5 nop 5 #endif #if defined L_modsi3 || defined L_divmodsi4 .align 2 #ifdef L_modsi3 #define MOD_OUTPUT_REG A4 .global __c6xabi_remi .hidden __c6xabi_remi .type __c6xabi_remi, STT_FUNC #else #define MOD_OUTPUT_REG A5 .global __c6xabi_divremi .hidden __c6xabi_divremi .type __c6xabi_divremi, STT_FUNC __c6xabi_divremi: #endif __c6xabi_remi: stw .d2t2 B3, *B15--[2] || cmpgt .l1 0, A4, A1 || cmpgt .l2 0, B4, B2 || mv .s1 A4, A5 || call .s2 __c6xabi_divu [A1] neg .l1 A4, A4 || [B2] neg .l2 B4, B4 || xor .s2x B2, A1, B0 || mv .d2 B4, B2 #ifdef _TMS320C6400 [B0] addkpc .s2 1f, B3, 1 [!B0] addkpc .s2 2f, B3, 1 nop 2 #else [B0] mvkl .s2 1f,B3 [!B0] mvkl .s2 2f,B3 [B0] mvkh .s2 1f,B3 [!B0] mvkh .s2 2f,B3 #endif 1: neg .l1 A4, A4 2: ldw .d2t2 *++B15[2], B3 #ifdef _TMS320C6400_PLUS mpy32 .m1x A4, B2, A6 nop 3 ret .s2 B3 sub .l1 A5, A6, MOD_OUTPUT_REG nop 4 #else mpyu .m1x A4, B2, A1 nop 1 mpylhu .m1x A4, B2, A6 || mpylhu .m2x B2, A4, B2 nop 1 add .l1x A6, B2, A6 || ret .s2 B3 shl .s1 A6, 16, A6 add .d1 A6, A1, A6 sub .l1 A5, A6, MOD_OUTPUT_REG nop 2 #endif #endif #if defined L_udivsi3 || defined L_udivmodsi4 .align 2 #ifdef L_udivsi3 .global __c6xabi_divu .hidden __c6xabi_divu .type __c6xabi_divu, STT_FUNC __c6xabi_divu: #else .global __c6xabi_divremu .hidden __c6xabi_divremu .type __c6xabi_divremu, STT_FUNC __c6xabi_divremu: #endif ;; We use a series of up to 31 subc instructions. First, we find ;; out how many leading zero bits there are in the divisor. This ;; gives us both a shift count for aligning (shifting) the divisor ;; to the, and the number of times we have to execute subc. ;; At the end, we have both the remainder and most of the quotient ;; in A4. The top bit of the quotient is computed first and is ;; placed in A2. ;; Return immediately if the dividend is zero. Setting B4 to 1 ;; is a trick to allow us to leave the following insns in the jump ;; delay slot without affecting the result. mv .s2x A4, B1 #ifndef _TMS320C6400 [!b1] mvk .s2 1, B4 #endif [b1] lmbd .l2 1, B4, B1 ||[!b1] b .s2 B3 ; RETURN A #ifdef _TMS320C6400 ||[!b1] mvk .d2 1, B4 #endif #ifdef L_udivmodsi4 ||[!b1] zero .s1 A5 #endif mv .l1x B1, A6 || shl .s2 B4, B1, B4 ;; The loop performs a maximum of 28 steps, so we do the ;; first 3 here. cmpltu .l1x A4, B4, A2 [!A2] sub .l1x A4, B4, A4 || shru .s2 B4, 1, B4 || xor .s1 1, A2, A2 shl .s1 A2, 31, A2 || [b1] subc .l1x A4,B4,A4 || [b1] add .s2 -1, B1, B1 [b1] subc .l1x A4,B4,A4 || [b1] add .s2 -1, B1, B1 ;; RETURN A may happen here (note: must happen before the next branch) 0: cmpgt .l2 B1, 7, B0 || [b1] subc .l1x A4,B4,A4 || [b1] add .s2 -1, B1, B1 [b1] subc .l1x A4,B4,A4 || [b1] add .s2 -1, B1, B1 || [b0] b .s1 0b [b1] subc .l1x A4,B4,A4 || [b1] add .s2 -1, B1, B1 [b1] subc .l1x A4,B4,A4 || [b1] add .s2 -1, B1, B1 [b1] subc .l1x A4,B4,A4 || [b1] add .s2 -1, B1, B1 [b1] subc .l1x A4,B4,A4 || [b1] add .s2 -1, B1, B1 [b1] subc .l1x A4,B4,A4 || [b1] add .s2 -1, B1, B1 ;; loop backwards branch happens here ret .s2 B3 || mvk .s1 32, A1 sub .l1 A1, A6, A6 #ifdef L_udivmodsi4 || extu .s1 A4, A6, A5 #endif shl .s1 A4, A6, A4 shru .s1 A4, 1, A4 || sub .l1 A6, 1, A6 or .l1 A2, A4, A4 shru .s1 A4, A6, A4 nop #endif #ifdef L_umodsi3 .align 2 .global __c6xabi_remu .hidden __c6xabi_remu .type __c6xabi_remu, STT_FUNC __c6xabi_remu: ;; The ABI seems designed to prevent these functions calling each other, ;; so we duplicate most of the divsi3 code here. mv .s2x A4, B1 #ifndef _TMS320C6400 [!b1] mvk .s2 1, B4 #endif lmbd .l2 1, B4, B1 ||[!b1] b .s2 B3 ; RETURN A #ifdef _TMS320C6400 ||[!b1] mvk .d2 1, B4 #endif mv .l1x B1, A7 || shl .s2 B4, B1, B4 cmpltu .l1x A4, B4, A1 [!a1] sub .l1x A4, B4, A4 shru .s2 B4, 1, B4 0: cmpgt .l2 B1, 7, B0 || [b1] subc .l1x A4,B4,A4 || [b1] add .s2 -1, B1, B1 ;; RETURN A may happen here (note: must happen before the next branch) [b1] subc .l1x A4,B4,A4 || [b1] add .s2 -1, B1, B1 || [b0] b .s1 0b [b1] subc .l1x A4,B4,A4 || [b1] add .s2 -1, B1, B1 [b1] subc .l1x A4,B4,A4 || [b1] add .s2 -1, B1, B1 [b1] subc .l1x A4,B4,A4 || [b1] add .s2 -1, B1, B1 [b1] subc .l1x A4,B4,A4 || [b1] add .s2 -1, B1, B1 [b1] subc .l1x A4,B4,A4 || [b1] add .s2 -1, B1, B1 ;; loop backwards branch happens here ret .s2 B3 [b1] subc .l1x A4,B4,A4 || [b1] add .s2 -1, B1, B1 [b1] subc .l1x A4,B4,A4 extu .s1 A4, A7, A4 nop 2 #endif #if defined L_strasgi_64plus && defined _TMS320C6400_PLUS .align 2 .global __c6xabi_strasgi_64plus .hidden __c6xabi_strasgi_64plus .type __c6xabi_strasgi_64plus, STT_FUNC __c6xabi_strasgi_64plus: shru .s2x a6, 2, b31 || mv .s1 a4, a30 || mv .d2 b4, b30 add .s2 -4, b31, b31 sploopd 1 || mvc .s2 b31, ilc ldw .d2t2 *b30++, b31 nop 4 mv .s1x b31,a31 spkernel 6, 0 || stw .d1t1 a31, *a30++ ret .s2 b3 nop 5 #endif #ifdef L_strasgi .global __c6xabi_strasgi .type __c6xabi_strasgi, STT_FUNC __c6xabi_strasgi: ;; This is essentially memcpy, with alignment known to be at least ;; 4, and the size a multiple of 4 greater than or equal to 28. ldw .d2t1 *B4++, A0 || mvk .s2 16, B1 ldw .d2t1 *B4++, A1 || mvk .s2 20, B2 || sub .d1 A6, 24, A6 ldw .d2t1 *B4++, A5 ldw .d2t1 *B4++, A7 || mv .l2x A6, B7 ldw .d2t1 *B4++, A8 ldw .d2t1 *B4++, A9 || mv .s2x A0, B5 || cmpltu .l2 B2, B7, B0 0: stw .d1t2 B5, *A4++ ||[b0] ldw .d2t1 *B4++, A0 || mv .s2x A1, B5 || mv .l2 B7, B6 [b0] sub .d2 B6, 24, B7 ||[b0] b .s2 0b || cmpltu .l2 B1, B6, B0 [b0] ldw .d2t1 *B4++, A1 || stw .d1t2 B5, *A4++ || mv .s2x A5, B5 || cmpltu .l2 12, B6, B0 [b0] ldw .d2t1 *B4++, A5 || stw .d1t2 B5, *A4++ || mv .s2x A7, B5 || cmpltu .l2 8, B6, B0 [b0] ldw .d2t1 *B4++, A7 || stw .d1t2 B5, *A4++ || mv .s2x A8, B5 || cmpltu .l2 4, B6, B0 [b0] ldw .d2t1 *B4++, A8 || stw .d1t2 B5, *A4++ || mv .s2x A9, B5 || cmpltu .l2 0, B6, B0 [b0] ldw .d2t1 *B4++, A9 || stw .d1t2 B5, *A4++ || mv .s2x A0, B5 || cmpltu .l2 B2, B7, B0 ;; loop back branch happens here cmpltu .l2 B1, B6, B0 || ret .s2 b3 [b0] stw .d1t1 A1, *A4++ || cmpltu .l2 12, B6, B0 [b0] stw .d1t1 A5, *A4++ || cmpltu .l2 8, B6, B0 [b0] stw .d1t1 A7, *A4++ || cmpltu .l2 4, B6, B0 [b0] stw .d1t1 A8, *A4++ || cmpltu .l2 0, B6, B0 [b0] stw .d1t1 A9, *A4++ ;; return happens here #endif #ifdef _TMS320C6400_PLUS #ifdef L_push_rts .align 2 .global __c6xabi_push_rts .hidden __c6xabi_push_rts .type __c6xabi_push_rts, STT_FUNC __c6xabi_push_rts: stw .d2t2 B14, *B15--[2] stdw .d2t1 A15:A14, *B15-- || b .s2x A3 stdw .d2t2 B13:B12, *B15-- stdw .d2t1 A13:A12, *B15-- stdw .d2t2 B11:B10, *B15-- stdw .d2t1 A11:A10, *B15-- stdw .d2t2 B3:B2, *B15-- #endif #ifdef L_pop_rts .align 2 .global __c6xabi_pop_rts .hidden __c6xabi_pop_rts .type __c6xabi_pop_rts, STT_FUNC __c6xabi_pop_rts: lddw .d2t2 *++B15, B3:B2 lddw .d2t1 *++B15, A11:A10 lddw .d2t2 *++B15, B11:B10 lddw .d2t1 *++B15, A13:A12 lddw .d2t2 *++B15, B13:B12 lddw .d2t1 *++B15, A15:A14 || b .s2 B3 ldw .d2t2 *++B15[2], B14 nop 4 #endif #ifdef L_call_stub .align 2 .global __c6xabi_call_stub .type __c6xabi_call_stub, STT_FUNC __c6xabi_call_stub: stw .d2t1 A2, *B15--[2] stdw .d2t1 A7:A6, *B15-- || call .s2 B31 stdw .d2t1 A1:A0, *B15-- stdw .d2t2 B7:B6, *B15-- stdw .d2t2 B5:B4, *B15-- stdw .d2t2 B1:B0, *B15-- stdw .d2t2 B3:B2, *B15-- || addkpc .s2 1f, B3, 0 1: lddw .d2t2 *++B15, B3:B2 lddw .d2t2 *++B15, B1:B0 lddw .d2t2 *++B15, B5:B4 lddw .d2t2 *++B15, B7:B6 lddw .d2t1 *++B15, A1:A0 lddw .d2t1 *++B15, A7:A6 || b .s2 B3 ldw .d2t1 *++B15[2], A2 nop 4 #endif #endif
4ms/metamodule-plugin-sdk
1,264
plugin-libc/libgcc/config/moxie/crtn.S
# crtn.S for moxie # # Copyright (C) 2009-2022 Free Software Foundation, Inc. # # This file is free software; you can redistribute it and/or modify it # under the terms of the GNU General Public License as published by the # Free Software Foundation; either version 3, or (at your option) any # later version. # # This file is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # General Public License for more details. # # Under Section 7 of GPL version 3, you are granted additional # permissions described in the GCC Runtime Library Exception, version # 3.1, as published by the Free Software Foundation. # # You should have received a copy of the GNU General Public License and # a copy of the GCC Runtime Library Exception along with this program; # see the files COPYING3 and COPYING.RUNTIME respectively. If not, see # <http://www.gnu.org/licenses/>. # This file just makes sure that the .fini and .init sections do in # fact return. Users may put any desired instructions in those sections. # This file is the last thing linked into any executable. .file "crtn.S" .section ".init" ret .section ".fini" ret
4ms/metamodule-plugin-sdk
1,317
plugin-libc/libgcc/config/moxie/crti.S
# crti.S for moxie # # Copyright (C) 2009-2022 Free Software Foundation, Inc. # # This file is free software; you can redistribute it and/or modify it # under the terms of the GNU General Public License as published by the # Free Software Foundation; either version 3, or (at your option) any # later version. # # This file is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # General Public License for more details. # # Under Section 7 of GPL version 3, you are granted additional # permissions described in the GCC Runtime Library Exception, version # 3.1, as published by the Free Software Foundation. # # You should have received a copy of the GNU General Public License and # a copy of the GCC Runtime Library Exception along with this program; # see the files COPYING3 and COPYING.RUNTIME respectively. If not, see # <http://www.gnu.org/licenses/>. # This file just make a stack frame for the contents of the .fini and # .init sections. Users may put any desired instructions in those # sections. .file "crti.S" .section ".init" .global _init .type _init, @function .p2align 1 _init: .section ".fini" .global _fini .type _fini,@function .p2align 1 _fini:
4ms/metamodule-plugin-sdk
1,459
plugin-libc/libgcc/config/microblaze/crtn.S
/* crtn.s for __init, __fini This file supplies the epilogue for __init and __fini routines Copyright (C) 2009-2022 Free Software Foundation, Inc. Contributed by Michael Eager <eager@eagercon.com>. This file is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 3, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. Under Section 7 of GPL version 3, you are granted additional permissions described in the GCC Runtime Library Exception, version 3.1, as published by the Free Software Foundation. You should have received a copy of the GNU General Public License and a copy of the GCC Runtime Library Exception along with this program; see the files COPYING3 and COPYING.RUNTIME respectively. If not, see <http://www.gnu.org/licenses/>. */ /* An executable stack is *not* required for these functions. */ #ifdef __linux__ .section .note.GNU-stack,"",%progbits .previous #endif .section .init, "ax" lw r15, r0, r1 rtsd r15, 8 addik r1, r1, 8 .section .fini, "ax" lw r15, r0, r1 rtsd r15, 8 addik r1, r1, 8
4ms/metamodule-plugin-sdk
3,212
plugin-libc/libgcc/config/microblaze/umodsi3.S
################################### # # Copyright (C) 2009-2022 Free Software Foundation, Inc. # # Contributed by Michael Eager <eager@eagercon.com>. # # This file is free software; you can redistribute it and/or modify it # under the terms of the GNU General Public License as published by the # Free Software Foundation; either version 3, or (at your option) any # later version. # # GCC is distributed in the hope that it will be useful, but WITHOUT # ANY WARRANTY; without even the implied warranty of MERCHANTABILITY # or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public # License for more details. # # Under Section 7 of GPL version 3, you are granted additional # permissions described in the GCC Runtime Library Exception, version # 3.1, as published by the Free Software Foundation. # # You should have received a copy of the GNU General Public License and # a copy of the GCC Runtime Library Exception along with this program; # see the files COPYING3 and COPYING.RUNTIME respectively. If not, see # <http://www.gnu.org/licenses/>. # # umodsi3.S # # Unsigned modulo operation for 32 bit integers. # Input : op1 in Reg r5 # op2 in Reg r6 # Output: op1 mod op2 in Reg r3 # ####################################### /* An executable stack is *not* required for these functions. */ #ifdef __linux__ .section .note.GNU-stack,"",%progbits .previous #endif .globl __umodsi3 .ent __umodsi3 .type __umodsi3,@function __umodsi3: .frame r1,0,r15 addik r1,r1,-12 swi r29,r1,0 swi r30,r1,4 swi r31,r1,8 BEQI r6,$LaDiv_By_Zero # Div_by_Zero # Division Error BEQId r5,$LaResult_Is_Zero # Result is Zero ADDIK r3,r0,0 # Clear div ADDIK r30,r0,0 # clear mod ADDIK r29,r0,32 # Initialize the loop count # Check if r6 and r5 are equal # if yes, return 0 rsub r18,r5,r6 beqi r18,$LaRETURN_HERE # Check if (uns)r6 is greater than (uns)r5. In that case, just return r5 xor r18,r5,r6 bgeid r18,16 addik r3,r5,0 blti r6,$LaRETURN_HERE bri $LCheckr6 rsub r18,r5,r6 # MICROBLAZEcmp bgti r18,$LaRETURN_HERE # If r6 [bit 31] is set, then return result as r5-r6 $LCheckr6: bgtid r6,$LaDIV0 addik r3,r0,0 addik r18,r0,0x7fffffff and r5,r5,r18 and r6,r6,r18 brid $LaRETURN_HERE rsub r3,r6,r5 # First part: try to find the first '1' in the r5 $LaDIV0: BLTI r5,$LaDIV2 $LaDIV1: ADD r5,r5,r5 # left shift logical r5 BGEID r5,$LaDIV1 # ADDIK r29,r29,-1 $LaDIV2: ADD r5,r5,r5 # left shift logical r5 get the '1' into the Carry ADDC r3,r3,r3 # Move that bit into the Mod register rSUB r31,r6,r3 # Try to subtract (r3 a r6) BLTi r31,$LaMOD_TOO_SMALL OR r3,r0,r31 # Move the r31 to mod since the result was positive ADDIK r30,r30,1 $LaMOD_TOO_SMALL: ADDIK r29,r29,-1 BEQi r29,$LaLOOP_END ADD r30,r30,r30 # Shift in the '1' into div BRI $LaDIV2 # Div2 $LaLOOP_END: BRI $LaRETURN_HERE $LaDiv_By_Zero: $LaResult_Is_Zero: or r3,r0,r0 # set result to 0 $LaRETURN_HERE: # Restore values of CSRs and that of r3 and the divisor and the dividend lwi r29,r1,0 lwi r30,r1,4 lwi r31,r1,8 rtsd r15,8 addik r1,r1,12 .end __umodsi3 .size __umodsi3, . - __umodsi3
4ms/metamodule-plugin-sdk
3,428
plugin-libc/libgcc/config/microblaze/udivsi3.S
###################################- # # Copyright (C) 2009-2022 Free Software Foundation, Inc. # # Contributed by Michael Eager <eager@eagercon.com>. # # This file is free software; you can redistribute it and/or modify it # under the terms of the GNU General Public License as published by the # Free Software Foundation; either version 3, or (at your option) any # later version. # # GCC is distributed in the hope that it will be useful, but WITHOUT # ANY WARRANTY; without even the implied warranty of MERCHANTABILITY # or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public # License for more details. # # Under Section 7 of GPL version 3, you are granted additional # permissions described in the GCC Runtime Library Exception, version # 3.1, as published by the Free Software Foundation. # # You should have received a copy of the GNU General Public License and # a copy of the GCC Runtime Library Exception along with this program; # see the files COPYING3 and COPYING.RUNTIME respectively. If not, see # <http://www.gnu.org/licenses/>. # # udivsi3.S # # Unsigned divide operation. # Input : Divisor in Reg r5 # Dividend in Reg r6 # Output: Result in Reg r3 # ####################################### /* An executable stack is *not* required for these functions. */ #ifdef __linux__ .section .note.GNU-stack,"",%progbits .previous #endif .globl __udivsi3 .ent __udivsi3 .type __udivsi3,@function __udivsi3: .frame r1,0,r15 ADDIK r1,r1,-12 SWI r29,r1,0 SWI r30,r1,4 SWI r31,r1,8 BEQI r6,$LaDiv_By_Zero # Div_by_Zero # Division Error BEQID r5,$LaResult_Is_Zero # Result is Zero ADDIK r30,r0,0 # Clear mod ADDIK r29,r0,32 # Initialize the loop count # Check if r6 and r5 are equal # if yes, return 1 RSUB r18,r5,r6 BEQID r18,$LaRETURN_HERE ADDIK r3,r0,1 # Check if (uns)r6 is greater than (uns)r5. In that case, just return 0 XOR r18,r5,r6 BGEID r18,16 ADD r3,r0,r0 # We would anyways clear r3 BLTI r6,$LaRETURN_HERE # r6[bit 31 = 1] hence is greater BRI $LCheckr6 RSUB r18,r6,r5 # MICROBLAZEcmp BLTI r18,$LaRETURN_HERE # If r6 [bit 31] is set, then return result as 1 $LCheckr6: BGTI r6,$LaDIV0 BRID $LaRETURN_HERE ADDIK r3,r0,1 # First part try to find the first '1' in the r5 $LaDIV0: BLTI r5,$LaDIV2 $LaDIV1: ADD r5,r5,r5 # left shift logical r5 BGTID r5,$LaDIV1 ADDIK r29,r29,-1 $LaDIV2: ADD r5,r5,r5 # left shift logical r5 get the '1' into the Carry ADDC r30,r30,r30 # Move that bit into the Mod register RSUB r31,r6,r30 # Try to subtract (r30 a r6) BLTI r31,$LaMOD_TOO_SMALL OR r30,r0,r31 # Move the r31 to mod since the result was positive ADDIK r3,r3,1 $LaMOD_TOO_SMALL: ADDIK r29,r29,-1 BEQi r29,$LaLOOP_END ADD r3,r3,r3 # Shift in the '1' into div BRI $LaDIV2 # Div2 $LaLOOP_END: BRI $LaRETURN_HERE $LaDiv_By_Zero: $LaResult_Is_Zero: OR r3,r0,r0 # set result to 0 $LaRETURN_HERE: # Restore values of CSRs and that of r3 and the divisor and the dividend LWI r29,r1,0 LWI r30,r1,4 LWI r31,r1,8 RTSD r15,8 ADDIK r1,r1,12 .end __udivsi3 .size __udivsi3, . - __udivsi3
4ms/metamodule-plugin-sdk
3,730
plugin-libc/libgcc/config/microblaze/muldi3_hard.S
###################################- # # Copyright (C) 2009-2022 Free Software Foundation, Inc. # # Contributed by Michael Eager <eager@eagercon.com>. # # This file is free software; you can redistribute it and/or modify it # under the terms of the GNU General Public License as published by the # Free Software Foundation; either version 3, or (at your option) any # later version. # # GCC is distributed in the hope that it will be useful, but WITHOUT # ANY WARRANTY; without even the implied warranty of MERCHANTABILITY # or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public # License for more details. # # Under Section 7 of GPL version 3, you are granted additional # permissions described in the GCC Runtime Library Exception, version # 3.1, as published by the Free Software Foundation. # # You should have received a copy of the GNU General Public License and # a copy of the GCC Runtime Library Exception along with this program; # see the files COPYING3 and COPYING.RUNTIME respectively. If not, see # <http://www.gnu.org/licenses/>. # # muldi3_hard.S # # Multiply operation for 64 bit integers, for devices with hard multiply # Input : Operand1[H] in Reg r5 # Operand1[L] in Reg r6 # Operand2[H] in Reg r7 # Operand2[L] in Reg r8 # Output: Result[H] in Reg r3 # Result[L] in Reg r4 # # Explaination: # # Both the input numbers are divided into 16 bit number as follows # op1 = A B C D # op2 = E F G H # result = D * H # + (C * H + D * G) << 16 # + (B * H + C * G + D * F) << 32 # + (A * H + B * G + C * F + D * E) << 48 # # Only 64 bits of the output are considered # ####################################### /* An executable stack is *not* required for these functions. */ #ifdef __linux__ .section .note.GNU-stack,"",%progbits .previous #endif .globl muldi3_hardproc .ent muldi3_hardproc muldi3_hardproc: addi r1,r1,-40 # Save the input operands on the caller's stack swi r5,r1,44 swi r6,r1,48 swi r7,r1,52 swi r8,r1,56 # Store all the callee saved registers sw r20,r1,r0 swi r21,r1,4 swi r22,r1,8 swi r23,r1,12 swi r24,r1,16 swi r25,r1,20 swi r26,r1,24 swi r27,r1,28 # Load all the 16 bit values for A through H lhui r20,r1,44 # A lhui r21,r1,46 # B lhui r22,r1,48 # C lhui r23,r1,50 # D lhui r24,r1,52 # E lhui r25,r1,54 # F lhui r26,r1,56 # G lhui r27,r1,58 # H # D * H ==> LSB of the result on stack ==> Store1 mul r9,r23,r27 swi r9,r1,36 # Pos2 and Pos3 # Hi (Store1) + C * H + D * G ==> Store2 ==> Pos1 and Pos2 # Store the carry generated in position 2 for Pos 3 lhui r11,r1,36 # Pos2 mul r9,r22,r27 # C * H mul r10,r23,r26 # D * G add r9,r9,r10 addc r12,r0,r0 add r9,r9,r11 addc r12,r12,r0 # Store the Carry shi r9,r1,36 # Store Pos2 swi r9,r1,32 lhui r11,r1,32 shi r11,r1,34 # Store Pos1 # Hi (Store2) + B * H + C * G + D * F ==> Store3 ==> Pos0 and Pos1 mul r9,r21,r27 # B * H mul r10,r22,r26 # C * G mul r7,r23,r25 # D * F add r9,r9,r11 add r9,r9,r10 add r9,r9,r7 swi r9,r1,32 # Pos0 and Pos1 # Hi (Store3) + A * H + B * G + C * F + D * E ==> Store3 ==> Pos0 lhui r11,r1,32 # Pos0 mul r9,r20,r27 # A * H mul r10,r21,r26 # B * G mul r7,r22,r25 # C * F mul r8,r23,r24 # D * E add r9,r9,r11 add r9,r9,r10 add r9,r9,r7 add r9,r9,r8 sext16 r9,r9 # Sign extend the MSB shi r9,r1,32 # Move results to r3 and r4 lhui r3,r1,32 add r3,r3,r12 shi r3,r1,32 lwi r3,r1,32 # Hi Part lwi r4,r1,36 # Lo Part # Restore Callee saved registers lw r20,r1,r0 lwi r21,r1,4 lwi r22,r1,8 lwi r23,r1,12 lwi r24,r1,16 lwi r25,r1,20 lwi r26,r1,24 lwi r27,r1,28 # Restore Frame and return rtsd r15,8 addi r1,r1,40 .end muldi3_hardproc
4ms/metamodule-plugin-sdk
3,280
plugin-libc/libgcc/config/microblaze/divsi3.S
###################################- # # Copyright (C) 2009-2022 Free Software Foundation, Inc. # # Contributed by Michael Eager <eager@eagercon.com>. # # This file is free software; you can redistribute it and/or modify it # under the terms of the GNU General Public License as published by the # Free Software Foundation; either version 3, or (at your option) any # later version. # # GCC is distributed in the hope that it will be useful, but WITHOUT # ANY WARRANTY; without even the implied warranty of MERCHANTABILITY # or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public # License for more details. # # Under Section 7 of GPL version 3, you are granted additional # permissions described in the GCC Runtime Library Exception, version # 3.1, as published by the Free Software Foundation. # # You should have received a copy of the GNU General Public License and # a copy of the GCC Runtime Library Exception along with this program; # see the files COPYING3 and COPYING.RUNTIME respectively. If not, see # <http://www.gnu.org/licenses/>. # # divsi3.S # # Divide operation for 32 bit integers. # Input : Dividend in Reg r5 # Divisor in Reg r6 # Output: Result in Reg r3 # ####################################### /* An executable stack is *not* required for these functions. */ #ifdef __linux__ .section .note.GNU-stack,"",%progbits .previous #endif .globl __divsi3 .ent __divsi3 .type __divsi3,@function __divsi3: .frame r1,0,r15 ADDIK r1,r1,-16 SWI r28,r1,0 SWI r29,r1,4 SWI r30,r1,8 SWI r31,r1,12 BEQI r6,$LaDiv_By_Zero # Div_by_Zero # Division Error BEQI r5,$LaResult_Is_Zero # Result is Zero BGEID r5,$LaR5_Pos XOR r28,r5,r6 # Get the sign of the result RSUBI r5,r5,0 # Make r5 positive $LaR5_Pos: BGEI r6,$LaR6_Pos RSUBI r6,r6,0 # Make r6 positive $LaR6_Pos: ADDIK r30,r0,0 # Clear mod ADDIK r3,r0,0 # clear div ADDIK r29,r0,32 # Initialize the loop count # First part try to find the first '1' in the r5 $LaDIV0: BLTI r5,$LaDIV2 # This traps r5 == 0x80000000 $LaDIV1: ADD r5,r5,r5 # left shift logical r5 BGTID r5,$LaDIV1 ADDIK r29,r29,-1 $LaDIV2: ADD r5,r5,r5 # left shift logical r5 get the '1' into the Carry ADDC r30,r30,r30 # Move that bit into the Mod register RSUB r31,r6,r30 # Try to subtract (r30 a r6) BLTI r31,$LaMOD_TOO_SMALL OR r30,r0,r31 # Move the r31 to mod since the result was positive ADDIK r3,r3,1 $LaMOD_TOO_SMALL: ADDIK r29,r29,-1 BEQi r29,$LaLOOP_END ADD r3,r3,r3 # Shift in the '1' into div BRI $LaDIV2 # Div2 $LaLOOP_END: BGEI r28,$LaRETURN_HERE BRID $LaRETURN_HERE RSUBI r3,r3,0 # Negate the result $LaDiv_By_Zero: $LaResult_Is_Zero: OR r3,r0,r0 # set result to 0 $LaRETURN_HERE: # Restore values of CSRs and that of r3 and the divisor and the dividend LWI r28,r1,0 LWI r29,r1,4 LWI r30,r1,8 LWI r31,r1,12 RTSD r15,8 ADDIK r1,r1,16 .end __divsi3 .size __divsi3, . - __divsi3
4ms/metamodule-plugin-sdk
2,005
plugin-libc/libgcc/config/microblaze/stack_overflow_exit.S
###################################-*-asm*- # # Copyright (C) 2009-2022 Free Software Foundation, Inc. # # # Contributed by Michael Eager <eager@eagercon.com>. # # This file is free software; you can redistribute it and/or modify it # under the terms of the GNU General Public License as published by the # Free Software Foundation; either version 3, or (at your option) any # later version. # # GCC is distributed in the hope that it will be useful, but WITHOUT # ANY WARRANTY; without even the implied warranty of MERCHANTABILITY # or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public # License for more details. # # Under Section 7 of GPL version 3, you are granted additional # permissions described in the GCC Runtime Library Exception, version # 3.1, as published by the Free Software Foundation. # # You should have received a copy of the GNU General Public License and # a copy of the GCC Runtime Library Exception along with this program; # see the files COPYING3 and COPYING.RUNTIME respectively. If not, see # <http://www.gnu.org/licenses/>. # # stack_overflow_exit.S # # Checks for stack overflows and sets the global variable # stack_overflow_error with the value of current stack pointer # # This routine exits from the program # ####################################### /* An executable stack is *not* required for these functions. */ #ifdef __linux__ .section .note.GNU-stack,"",%progbits .previous #endif .globl _stack_overflow_error .data .align 2 .type _stack_overflow_error,@object .size _stack_overflow_error,4 _stack_overflow_error: .data32 0 .text .globl _stack_overflow_exit .ent _stack_overflow_exit .type _stack_overflow_exit,@function _stack_overflow_exit: #ifdef __PIC__ mfs r20,rpc addik r20,r20,_GLOBAL_OFFSET_TABLE_+8 swi r1,r20,_stack_overflow_error@GOTOFF bri exit@PLT #else swi r1,r0,_stack_overflow_error bri exit #endif .end _stack_overflow_exit .size _stack_overflow_exit,. - _stack_overflow_exit
4ms/metamodule-plugin-sdk
1,696
plugin-libc/libgcc/config/microblaze/crti.S
/* crti.s for __init, __fini This file supplies the prologue for __init and __fini routines Copyright (C) 2009-2022 Free Software Foundation, Inc. Contributed by Michael Eager <eager@eagercon.com>. This file is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 3, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. Under Section 7 of GPL version 3, you are granted additional permissions described in the GCC Runtime Library Exception, version 3.1, as published by the Free Software Foundation. You should have received a copy of the GNU General Public License and a copy of the GCC Runtime Library Exception along with this program; see the files COPYING3 and COPYING.RUNTIME respectively. If not, see <http://www.gnu.org/licenses/>. */ /* An executable stack is *not* required for these functions. */ #ifdef __linux__ .section .note.GNU-stack,"",%progbits .previous #endif .section .init, "ax" .global __init .weak _stack .set _stack, 0xffffffff .weak _stack_end .set _stack_end, 0 .align 2 __init: addik r1, r1, -8 sw r15, r0, r1 la r11, r0, _stack mts rshr, r11 la r11, r0, _stack_end mts rslr, r11 .section .fini, "ax" .global __fini .align 2 __fini: addik r1, r1, -8 sw r15, r0, r1
4ms/metamodule-plugin-sdk
3,737
plugin-libc/libgcc/config/microblaze/moddi3.S
################################### # # Copyright (C) 2009-2022 Free Software Foundation, Inc. # # Contributed by Michael Eager <eager@eagercon.com>. # # This file is free software; you can redistribute it and/or modify it # under the terms of the GNU General Public License as published by the # Free Software Foundation; either version 3, or (at your option) any # later version. # # GCC is distributed in the hope that it will be useful, but WITHOUT # ANY WARRANTY; without even the implied warranty of MERCHANTABILITY # or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public # License for more details. # # Under Section 7 of GPL version 3, you are granted additional # permissions described in the GCC Runtime Library Exception, version # 3.1, as published by the Free Software Foundation. # # You should have received a copy of the GNU General Public License and # a copy of the GCC Runtime Library Exception along with this program; # see the files COPYING3 and COPYING.RUNTIME respectively. If not, see # <http://www.gnu.org/licenses/>. # # modsi3.S # # modulo operation for 64 bit integers. # ####################################### /* An executable stack is *not* required for these functions. */ #ifdef __linux__ .section .note.GNU-stack,"",%progbits .previous #endif .globl __moddi3 .ent __moddi3 __moddi3: .frame r1,0,r15 #Change the stack pointer value and Save callee saved regs addik r1,r1,-24 swi r25,r1,0 swi r26,r1,4 swi r27,r1,8 # used for sign swi r28,r1,12 # used for loop count swi r29,r1,16 # Used for div value High swi r30,r1,20 # Used for div value Low #Check for Zero Value in the divisor/dividend OR r9,r5,r6 # Check for the op1 being zero BEQID r9,$LaResult_Is_Zero # Result is zero OR r9,r7,r8 # Check for the dividend being zero BEQI r9,$LaDiv_By_Zero # Div_by_Zero # Division Error BGEId r5,$La1_Pos XOR r27,r5,r7 # Get the sign of the result RSUBI r6,r6,0 # Make dividend positive RSUBIC r5,r5,0 # Make dividend positive $La1_Pos: BGEI r7,$La2_Pos RSUBI r8,r8,0 # Make Divisor Positive RSUBIC r9,r9,0 # Make Divisor Positive $La2_Pos: ADDIK r4,r0,0 # Clear mod low ADDIK r3,r0,0 # Clear mod high ADDIK r29,r0,0 # clear div high ADDIK r30,r0,0 # clear div low ADDIK r28,r0,64 # Initialize the loop count # First part try to find the first '1' in the r5/r6 $LaDIV1: ADD r6,r6,r6 ADDC r5,r5,r5 # left shift logical r5 BGEID r5,$LaDIV1 ADDIK r28,r28,-1 $LaDIV2: ADD r6,r6,r6 ADDC r5,r5,r5 # left shift logical r5/r6 get the '1' into the Carry ADDC r4,r4,r4 # Move that bit into the Mod register ADDC r3,r3,r3 # Move carry into high mod register rsub r18,r7,r3 # Compare the High Parts of Mod and Divisor bnei r18,$L_High_EQ rsub r18,r6,r4 # Compare Low Parts only if Mod[h] == Divisor[h] $L_High_EQ: rSUB r26,r8,r4 # Subtract divisor[L] from Mod[L] rsubc r25,r7,r3 # Subtract divisor[H] from Mod[H] BLTi r25,$LaMOD_TOO_SMALL OR r3,r0,r25 # move r25 to mod [h] OR r4,r0,r26 # move r26 to mod [l] ADDI r30,r30,1 ADDC r29,r29,r0 $LaMOD_TOO_SMALL: ADDIK r28,r28,-1 BEQi r28,$LaLOOP_END ADD r30,r30,r30 # Shift in the '1' into div [low] ADDC r29,r29,r29 # Move the carry generated into high BRI $LaDIV2 # Div2 $LaLOOP_END: BGEI r27,$LaRETURN_HERE rsubi r30,r30,0 rsubc r29,r29,r0 BRI $LaRETURN_HERE $LaDiv_By_Zero: $LaResult_Is_Zero: or r29,r0,r0 # set result to 0 [High] or r30,r0,r0 # set result to 0 [Low] $LaRETURN_HERE: # Restore values of CSRs and that of r29 and the divisor and the dividend lwi r25,r1,0 lwi r26,r1,4 lwi r27,r1,8 lwi r28,r1,12 lwi r29,r1,16 lwi r30,r1,20 rtsd r15,8 addik r1,r1,24 .end __moddi3
4ms/metamodule-plugin-sdk
3,142
plugin-libc/libgcc/config/microblaze/modsi3.S
################################### # # Copyright (C) 2009-2022 Free Software Foundation, Inc. # # Contributed by Michael Eager <eager@eagercon.com>. # # This file is free software; you can redistribute it and/or modify it # under the terms of the GNU General Public License as published by the # Free Software Foundation; either version 3, or (at your option) any # later version. # # GCC is distributed in the hope that it will be useful, but WITHOUT # ANY WARRANTY; without even the implied warranty of MERCHANTABILITY # or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public # License for more details. # # Under Section 7 of GPL version 3, you are granted additional # permissions described in the GCC Runtime Library Exception, version # 3.1, as published by the Free Software Foundation. # # You should have received a copy of the GNU General Public License and # a copy of the GCC Runtime Library Exception along with this program; # see the files COPYING3 and COPYING.RUNTIME respectively. If not, see # <http://www.gnu.org/licenses/>. # # modsi3.S # # modulo operation for 32 bit integers. # Input : op1 in Reg r5 # op2 in Reg r6 # Output: op1 mod op2 in Reg r3 # ####################################### /* An executable stack is *not* required for these functions. */ #ifdef __linux__ .section .note.GNU-stack,"",%progbits .previous #endif .globl __modsi3 .ent __modsi3 .type __modsi3,@function __modsi3: .frame r1,0,r15 addik r1,r1,-16 swi r28,r1,0 swi r29,r1,4 swi r30,r1,8 swi r31,r1,12 BEQI r6,$LaDiv_By_Zero # Div_by_Zero # Division Error BEQI r5,$LaResult_Is_Zero # Result is Zero BGEId r5,$LaR5_Pos ADD r28,r5,r0 # Get the sign of the result [ Depends only on the first arg] RSUBI r5,r5,0 # Make r5 positive $LaR5_Pos: BGEI r6,$LaR6_Pos RSUBI r6,r6,0 # Make r6 positive $LaR6_Pos: ADDIK r3,r0,0 # Clear mod ADDIK r30,r0,0 # clear div BLTId r5,$LaDIV2 # If r5 is still negative (0x80000000), skip # the first bit search. ADDIK r29,r0,32 # Initialize the loop count # First part try to find the first '1' in the r5 $LaDIV1: ADD r5,r5,r5 # left shift logical r5 BGEID r5,$LaDIV1 # ADDIK r29,r29,-1 $LaDIV2: ADD r5,r5,r5 # left shift logical r5 get the '1' into the Carry ADDC r3,r3,r3 # Move that bit into the Mod register rSUB r31,r6,r3 # Try to subtract (r30 a r6) BLTi r31,$LaMOD_TOO_SMALL OR r3,r0,r31 # Move the r31 to mod since the result was positive ADDIK r30,r30,1 $LaMOD_TOO_SMALL: ADDIK r29,r29,-1 BEQi r29,$LaLOOP_END ADD r30,r30,r30 # Shift in the '1' into div BRI $LaDIV2 # Div2 $LaLOOP_END: BGEI r28,$LaRETURN_HERE BRId $LaRETURN_HERE rsubi r3,r3,0 # Negate the result $LaDiv_By_Zero: $LaResult_Is_Zero: or r3,r0,r0 # set result to 0 [Both mod as well as div are 0] $LaRETURN_HERE: # Restore values of CSRs and that of r3 and the divisor and the dividend lwi r28,r1,0 lwi r29,r1,4 lwi r30,r1,8 lwi r31,r1,12 rtsd r15,8 addik r1,r1,16 .end __modsi3 .size __modsi3, . - __modsi3
4ms/metamodule-plugin-sdk
2,128
plugin-libc/libgcc/config/microblaze/mulsi3.S
###################################-*-asm*- # # Copyright (C) 2009-2022 Free Software Foundation, Inc. # # Contributed by Michael Eager <eager@eagercon.com>. # # This file is free software; you can redistribute it and/or modify it # under the terms of the GNU General Public License as published by the # Free Software Foundation; either version 3, or (at your option) any # later version. # # GCC is distributed in the hope that it will be useful, but WITHOUT # ANY WARRANTY; without even the implied warranty of MERCHANTABILITY # or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public # License for more details. # # Under Section 7 of GPL version 3, you are granted additional # permissions described in the GCC Runtime Library Exception, version # 3.1, as published by the Free Software Foundation. # # You should have received a copy of the GNU General Public License and # a copy of the GCC Runtime Library Exception along with this program; # see the files COPYING3 and COPYING.RUNTIME respectively. If not, see # <http://www.gnu.org/licenses/>. # # mulsi3.S # # Multiply operation for 32 bit integers. # Input : Operand1 in Reg r5 # Operand2 in Reg r6 # Output: Result [op1 * op2] in Reg r3 # ####################################### /* An executable stack is *not* required for these functions. */ #ifdef __linux__ .section .note.GNU-stack,"",%progbits .previous #endif .globl __mulsi3 .ent __mulsi3 .type __mulsi3,@function __mulsi3: .frame r1,0,r15 add r3,r0,r0 BEQI r5,$L_Result_Is_Zero # Multiply by Zero BEQI r6,$L_Result_Is_Zero # Multiply by Zero BGEId r5,$L_R5_Pos XOR r4,r5,r6 # Get the sign of the result RSUBI r5,r5,0 # Make r5 positive $L_R5_Pos: BGEI r6,$L_R6_Pos RSUBI r6,r6,0 # Make r6 positive $L_R6_Pos: bri $L1 $L2: add r5,r5,r5 $L1: srl r6,r6 addc r7,r0,r0 beqi r7,$L2 bneid r6,$L2 add r3,r3,r5 blti r4,$L_NegateResult rtsd r15,8 nop $L_NegateResult: rtsd r15,8 rsub r3,r3,r0 $L_Result_Is_Zero: rtsd r15,8 addi r3,r0,0 .end __mulsi3 .size __mulsi3, . - __mulsi3
4ms/metamodule-plugin-sdk
41,922
plugin-libc/libgcc/config/avr/lib1funcs-fixed.S
/* -*- Mode: Asm -*- */ ;; Copyright (C) 2012-2022 Free Software Foundation, Inc. ;; Contributed by Sean D'Epagnier (sean@depagnier.com) ;; Georg-Johann Lay (avr@gjlay.de) ;; This file is free software; you can redistribute it and/or modify it ;; under the terms of the GNU General Public License as published by the ;; Free Software Foundation; either version 3, or (at your option) any ;; later version. ;; In addition to the permissions in the GNU General Public License, the ;; Free Software Foundation gives you unlimited permission to link the ;; compiled version of this file into combinations with other programs, ;; and to distribute those combinations without any restriction coming ;; from the use of this file. (The General Public License restrictions ;; do apply in other respects; for example, they cover modification of ;; the file, and distribution when not linked into a combine ;; executable.) ;; This file is distributed in the hope that it will be useful, but ;; WITHOUT ANY WARRANTY; without even the implied warranty of ;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ;; General Public License for more details. ;; You should have received a copy of the GNU General Public License ;; along with this program; see the file COPYING. If not, write to ;; the Free Software Foundation, 51 Franklin Street, Fifth Floor, ;; Boston, MA 02110-1301, USA. ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ;; Fixed point library routines for AVR ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; #if defined __AVR_TINY__ #define __zero_reg__ r17 #define __tmp_reg__ r16 #else #define __zero_reg__ r1 #define __tmp_reg__ r0 #endif .section .text.libgcc.fixed, "ax", @progbits #ifndef __AVR_TINY__ ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ;; Conversions to float ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; #if defined (L_fractqqsf) DEFUN __fractqqsf ;; Move in place for SA -> SF conversion clr r22 mov r23, r24 ;; Sign-extend lsl r24 sbc r24, r24 mov r25, r24 XJMP __fractsasf ENDF __fractqqsf #endif /* L_fractqqsf */ #if defined (L_fractuqqsf) DEFUN __fractuqqsf ;; Move in place for USA -> SF conversion clr r22 mov r23, r24 ;; Zero-extend clr r24 clr r25 XJMP __fractusasf ENDF __fractuqqsf #endif /* L_fractuqqsf */ #if defined (L_fracthqsf) DEFUN __fracthqsf ;; Move in place for SA -> SF conversion wmov 22, 24 ;; Sign-extend lsl r25 sbc r24, r24 mov r25, r24 XJMP __fractsasf ENDF __fracthqsf #endif /* L_fracthqsf */ #if defined (L_fractuhqsf) DEFUN __fractuhqsf ;; Move in place for USA -> SF conversion wmov 22, 24 ;; Zero-extend clr r24 clr r25 XJMP __fractusasf ENDF __fractuhqsf #endif /* L_fractuhqsf */ #if defined (L_fracthasf) DEFUN __fracthasf ;; Move in place for SA -> SF conversion clr r22 mov r23, r24 mov r24, r25 ;; Sign-extend lsl r25 sbc r25, r25 XJMP __fractsasf ENDF __fracthasf #endif /* L_fracthasf */ #if defined (L_fractuhasf) DEFUN __fractuhasf ;; Move in place for USA -> SF conversion clr r22 mov r23, r24 mov r24, r25 ;; Zero-extend clr r25 XJMP __fractusasf ENDF __fractuhasf #endif /* L_fractuhasf */ #if defined (L_fractsqsf) DEFUN __fractsqsf XCALL __floatsisf ;; Divide non-zero results by 2^31 to move the ;; decimal point into place tst r25 breq 0f subi r24, exp_lo (31) sbci r25, exp_hi (31) 0: ret ENDF __fractsqsf #endif /* L_fractsqsf */ #if defined (L_fractusqsf) DEFUN __fractusqsf XCALL __floatunsisf ;; Divide non-zero results by 2^32 to move the ;; decimal point into place cpse r25, __zero_reg__ subi r25, exp_hi (32) ret ENDF __fractusqsf #endif /* L_fractusqsf */ #if defined (L_fractsasf) DEFUN __fractsasf XCALL __floatsisf ;; Divide non-zero results by 2^15 to move the ;; decimal point into place tst r25 breq 0f subi r24, exp_lo (15) sbci r25, exp_hi (15) 0: ret ENDF __fractsasf #endif /* L_fractsasf */ #if defined (L_fractusasf) DEFUN __fractusasf XCALL __floatunsisf ;; Divide non-zero results by 2^16 to move the ;; decimal point into place cpse r25, __zero_reg__ subi r25, exp_hi (16) ret ENDF __fractusasf #endif /* L_fractusasf */ ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ;; Conversions from float ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; #if defined (L_fractsfqq) DEFUN __fractsfqq ;; Multiply with 2^{24+7} to get a QQ result in r25 subi r24, exp_lo (-31) sbci r25, exp_hi (-31) XCALL __fixsfsi mov r24, r25 ret ENDF __fractsfqq #endif /* L_fractsfqq */ #if defined (L_fractsfuqq) DEFUN __fractsfuqq ;; Multiply with 2^{24+8} to get a UQQ result in r25 subi r25, exp_hi (-32) XCALL __fixunssfsi mov r24, r25 ret ENDF __fractsfuqq #endif /* L_fractsfuqq */ #if defined (L_fractsfha) DEFUN __fractsfha ;; Multiply with 2^{16+7} to get a HA result in r25:r24 subi r24, exp_lo (-23) sbci r25, exp_hi (-23) XJMP __fixsfsi ENDF __fractsfha #endif /* L_fractsfha */ #if defined (L_fractsfuha) DEFUN __fractsfuha ;; Multiply with 2^24 to get a UHA result in r25:r24 subi r25, exp_hi (-24) XJMP __fixunssfsi ENDF __fractsfuha #endif /* L_fractsfuha */ #if defined (L_fractsfhq) FALIAS __fractsfsq DEFUN __fractsfhq ;; Multiply with 2^{16+15} to get a HQ result in r25:r24 ;; resp. with 2^31 to get a SQ result in r25:r22 subi r24, exp_lo (-31) sbci r25, exp_hi (-31) XJMP __fixsfsi ENDF __fractsfhq #endif /* L_fractsfhq */ #if defined (L_fractsfuhq) FALIAS __fractsfusq DEFUN __fractsfuhq ;; Multiply with 2^{16+16} to get a UHQ result in r25:r24 ;; resp. with 2^32 to get a USQ result in r25:r22 subi r25, exp_hi (-32) XJMP __fixunssfsi ENDF __fractsfuhq #endif /* L_fractsfuhq */ #if defined (L_fractsfsa) DEFUN __fractsfsa ;; Multiply with 2^15 to get a SA result in r25:r22 subi r24, exp_lo (-15) sbci r25, exp_hi (-15) XJMP __fixsfsi ENDF __fractsfsa #endif /* L_fractsfsa */ #if defined (L_fractsfusa) DEFUN __fractsfusa ;; Multiply with 2^16 to get a USA result in r25:r22 subi r25, exp_hi (-16) XJMP __fixunssfsi ENDF __fractsfusa #endif /* L_fractsfusa */ ;; For multiplication the functions here are called directly from ;; avr-fixed.md instead of using the standard libcall mechanisms. ;; This can make better code because GCC knows exactly which ;; of the call-used registers (not all of them) are clobbered. */ /******************************************************* Fractional Multiplication 8 x 8 without MUL *******************************************************/ #if defined (L_mulqq3) && !defined (__AVR_HAVE_MUL__) ;;; R23 = R24 * R25 ;;; Clobbers: __tmp_reg__, R22, R24, R25 ;;; Rounding: ??? DEFUN __mulqq3 XCALL __fmuls ;; TR 18037 requires that (-1) * (-1) does not overflow ;; The only input that can produce -1 is (-1)^2. dec r23 brvs 0f inc r23 0: ret ENDF __mulqq3 #endif /* L_mulqq3 && ! HAVE_MUL */ /******************************************************* Fractional Multiply .16 x .16 with and without MUL *******************************************************/ #if defined (L_mulhq3) ;;; Same code with and without MUL, but the interfaces differ: ;;; no MUL: (R25:R24) = (R22:R23) * (R24:R25) ;;; Clobbers: ABI, called by optabs ;;; MUL: (R25:R24) = (R19:R18) * (R27:R26) ;;; Clobbers: __tmp_reg__, R22, R23 ;;; Rounding: -0.5 LSB <= error <= 0.5 LSB DEFUN __mulhq3 XCALL __mulhisi3 ;; Shift result into place lsl r23 rol r24 rol r25 brvs 1f ;; Round sbrc r23, 7 adiw r24, 1 ret 1: ;; Overflow. TR 18037 requires (-1)^2 not to overflow ldi r24, lo8 (0x7fff) ldi r25, hi8 (0x7fff) ret ENDF __mulhq3 #endif /* defined (L_mulhq3) */ #if defined (L_muluhq3) ;;; Same code with and without MUL, but the interfaces differ: ;;; no MUL: (R25:R24) *= (R23:R22) ;;; Clobbers: ABI, called by optabs ;;; MUL: (R25:R24) = (R19:R18) * (R27:R26) ;;; Clobbers: __tmp_reg__, R22, R23 ;;; Rounding: -0.5 LSB < error <= 0.5 LSB DEFUN __muluhq3 XCALL __umulhisi3 ;; Round sbrc r23, 7 adiw r24, 1 ret ENDF __muluhq3 #endif /* L_muluhq3 */ /******************************************************* Fixed Multiply 8.8 x 8.8 with and without MUL *******************************************************/ #if defined (L_mulha3) ;;; Same code with and without MUL, but the interfaces differ: ;;; no MUL: (R25:R24) = (R22:R23) * (R24:R25) ;;; Clobbers: ABI, called by optabs ;;; MUL: (R25:R24) = (R19:R18) * (R27:R26) ;;; Clobbers: __tmp_reg__, R22, R23 ;;; Rounding: -0.5 LSB <= error <= 0.5 LSB DEFUN __mulha3 XCALL __mulhisi3 lsl r22 rol r23 rol r24 XJMP __muluha3_round ENDF __mulha3 #endif /* L_mulha3 */ #if defined (L_muluha3) ;;; Same code with and without MUL, but the interfaces differ: ;;; no MUL: (R25:R24) *= (R23:R22) ;;; Clobbers: ABI, called by optabs ;;; MUL: (R25:R24) = (R19:R18) * (R27:R26) ;;; Clobbers: __tmp_reg__, R22, R23 ;;; Rounding: -0.5 LSB < error <= 0.5 LSB DEFUN __muluha3 XCALL __umulhisi3 XJMP __muluha3_round ENDF __muluha3 #endif /* L_muluha3 */ #if defined (L_muluha3_round) DEFUN __muluha3_round ;; Shift result into place mov r25, r24 mov r24, r23 ;; Round sbrc r22, 7 adiw r24, 1 ret ENDF __muluha3_round #endif /* L_muluha3_round */ /******************************************************* Fixed Multiplication 16.16 x 16.16 *******************************************************/ ;; Bits outside the result (below LSB), used in the signed version #define GUARD __tmp_reg__ #if defined (__AVR_HAVE_MUL__) ;; Multiplier #define A0 16 #define A1 A0+1 #define A2 A1+1 #define A3 A2+1 ;; Multiplicand #define B0 20 #define B1 B0+1 #define B2 B1+1 #define B3 B2+1 ;; Result #define C0 24 #define C1 C0+1 #define C2 C1+1 #define C3 C2+1 #if defined (L_mulusa3) ;;; (C3:C0) = (A3:A0) * (B3:B0) DEFUN __mulusa3 set ;; Fallthru ENDF __mulusa3 ;;; Round for last digit iff T = 1 ;;; Return guard bits in GUARD (__tmp_reg__). ;;; Rounding, T = 0: -1.0 LSB < error <= 0 LSB ;;; Rounding, T = 1: -0.5 LSB < error <= 0.5 LSB DEFUN __mulusa3_round ;; Some of the MUL instructions have LSBs outside the result. ;; Don't ignore these LSBs in order to tame rounding error. ;; Use C2/C3 for these LSBs. clr C0 clr C1 mul A0, B0 $ movw C2, r0 mul A1, B0 $ add C3, r0 $ adc C0, r1 mul A0, B1 $ add C3, r0 $ adc C0, r1 $ rol C1 ;; Round if T = 1. Store guarding bits outside the result for rounding ;; and left-shift by the signed version (function below). brtc 0f sbrc C3, 7 adiw C0, 1 0: push C3 ;; The following MULs don't have LSBs outside the result. ;; C2/C3 is the high part. mul A0, B2 $ add C0, r0 $ adc C1, r1 $ sbc C2, C2 mul A1, B1 $ add C0, r0 $ adc C1, r1 $ sbci C2, 0 mul A2, B0 $ add C0, r0 $ adc C1, r1 $ sbci C2, 0 neg C2 mul A0, B3 $ add C1, r0 $ adc C2, r1 $ sbc C3, C3 mul A1, B2 $ add C1, r0 $ adc C2, r1 $ sbci C3, 0 mul A2, B1 $ add C1, r0 $ adc C2, r1 $ sbci C3, 0 mul A3, B0 $ add C1, r0 $ adc C2, r1 $ sbci C3, 0 neg C3 mul A1, B3 $ add C2, r0 $ adc C3, r1 mul A2, B2 $ add C2, r0 $ adc C3, r1 mul A3, B1 $ add C2, r0 $ adc C3, r1 mul A2, B3 $ add C3, r0 mul A3, B2 $ add C3, r0 ;; Guard bits used in the signed version below. pop GUARD clr __zero_reg__ ret ENDF __mulusa3_round #endif /* L_mulusa3 */ #if defined (L_mulsa3) ;;; (C3:C0) = (A3:A0) * (B3:B0) ;;; Clobbers: __tmp_reg__, T ;;; Rounding: -0.5 LSB <= error <= 0.5 LSB DEFUN __mulsa3 clt XCALL __mulusa3_round ;; A posteriori sign extension of the operands tst B3 brpl 1f sub C2, A0 sbc C3, A1 1: sbrs A3, 7 rjmp 2f sub C2, B0 sbc C3, B1 2: ;; Shift 1 bit left to adjust for 15 fractional bits lsl GUARD rol C0 rol C1 rol C2 rol C3 ;; Round last digit lsl GUARD adc C0, __zero_reg__ adc C1, __zero_reg__ adc C2, __zero_reg__ adc C3, __zero_reg__ ret ENDF __mulsa3 #endif /* L_mulsa3 */ #undef A0 #undef A1 #undef A2 #undef A3 #undef B0 #undef B1 #undef B2 #undef B3 #undef C0 #undef C1 #undef C2 #undef C3 #else /* __AVR_HAVE_MUL__ */ #define A0 18 #define A1 A0+1 #define A2 A0+2 #define A3 A0+3 #define B0 22 #define B1 B0+1 #define B2 B0+2 #define B3 B0+3 #define C0 22 #define C1 C0+1 #define C2 C0+2 #define C3 C0+3 ;; __tmp_reg__ #define CC0 0 ;; __zero_reg__ #define CC1 1 #define CC2 16 #define CC3 17 #define AA0 26 #define AA1 AA0+1 #define AA2 30 #define AA3 AA2+1 #if defined (L_mulsa3) ;;; (R25:R22) *= (R21:R18) ;;; Clobbers: ABI, called by optabs ;;; Rounding: -1 LSB <= error <= 1 LSB DEFUN __mulsa3 push B0 push B1 push B3 clt XCALL __mulusa3_round pop r30 ;; sign-extend B bst r30, 7 brtc 1f ;; A1, A0 survived in R27:R26 sub C2, AA0 sbc C3, AA1 1: pop AA1 ;; B1 pop AA0 ;; B0 ;; sign-extend A. A3 survived in R31 bst AA3, 7 brtc 2f sub C2, AA0 sbc C3, AA1 2: ;; Shift 1 bit left to adjust for 15 fractional bits lsl GUARD rol C0 rol C1 rol C2 rol C3 ;; Round last digit lsl GUARD adc C0, __zero_reg__ adc C1, __zero_reg__ adc C2, __zero_reg__ adc C3, __zero_reg__ ret ENDF __mulsa3 #endif /* L_mulsa3 */ #if defined (L_mulusa3) ;;; (R25:R22) *= (R21:R18) ;;; Clobbers: ABI, called by optabs ;;; Rounding: -1 LSB <= error <= 1 LSB DEFUN __mulusa3 set ;; Fallthru ENDF __mulusa3 ;;; A[] survives in 26, 27, 30, 31 ;;; Also used by __mulsa3 with T = 0 ;;; Round if T = 1 ;;; Return Guard bits in GUARD (__tmp_reg__), used by signed version. DEFUN __mulusa3_round push CC2 push CC3 ; clear result clr __tmp_reg__ wmov CC2, CC0 ; save multiplicand wmov AA0, A0 wmov AA2, A2 rjmp 3f ;; Loop the integral part 1: ;; CC += A * 2^n; n >= 0 add CC0,A0 $ adc CC1,A1 $ adc CC2,A2 $ adc CC3,A3 2: ;; A <<= 1 lsl A0 $ rol A1 $ rol A2 $ rol A3 3: ;; IBIT(B) >>= 1 ;; Carry = n-th bit of B; n >= 0 lsr B3 ror B2 brcs 1b sbci B3, 0 brne 2b ;; Loop the fractional part ;; B2/B3 is 0 now, use as guard bits for rounding ;; Restore multiplicand wmov A0, AA0 wmov A2, AA2 rjmp 5f 4: ;; CC += A:Guard * 2^n; n < 0 add B3,B2 $ adc CC0,A0 $ adc CC1,A1 $ adc CC2,A2 $ adc CC3,A3 5: ;; A:Guard >>= 1 lsr A3 $ ror A2 $ ror A1 $ ror A0 $ ror B2 ;; FBIT(B) <<= 1 ;; Carry = n-th bit of B; n < 0 lsl B0 rol B1 brcs 4b sbci B0, 0 brne 5b ;; Save guard bits and set carry for rounding push B3 lsl B3 ;; Move result into place wmov C2, CC2 wmov C0, CC0 clr __zero_reg__ brtc 6f ;; Round iff T = 1 adc C0, __zero_reg__ adc C1, __zero_reg__ adc C2, __zero_reg__ adc C3, __zero_reg__ 6: pop GUARD ;; Epilogue pop CC3 pop CC2 ret ENDF __mulusa3_round #endif /* L_mulusa3 */ #undef A0 #undef A1 #undef A2 #undef A3 #undef B0 #undef B1 #undef B2 #undef B3 #undef C0 #undef C1 #undef C2 #undef C3 #undef AA0 #undef AA1 #undef AA2 #undef AA3 #undef CC0 #undef CC1 #undef CC2 #undef CC3 #endif /* __AVR_HAVE_MUL__ */ #undef GUARD /*********************************************************** Fixed unsigned saturated Multiplication 8.8 x 8.8 ***********************************************************/ #define C0 22 #define C1 C0+1 #define C2 C0+2 #define C3 C0+3 #define SS __tmp_reg__ #if defined (L_usmuluha3) DEFUN __usmuluha3 ;; Widening multiply #ifdef __AVR_HAVE_MUL__ ;; Adjust interface movw R26, R22 movw R18, R24 #endif /* HAVE MUL */ XCALL __umulhisi3 tst C3 brne .Lmax ;; Round, target is in C1..C2 lsl C0 adc C1, __zero_reg__ adc C2, __zero_reg__ brcs .Lmax ;; Move result into place mov C3, C2 mov C2, C1 ret .Lmax: ;; Saturate ldi C2, 0xff ldi C3, 0xff ret ENDF __usmuluha3 #endif /* L_usmuluha3 */ /*********************************************************** Fixed signed saturated Multiplication s8.7 x s8.7 ***********************************************************/ #if defined (L_ssmulha3) DEFUN __ssmulha3 ;; Widening multiply #ifdef __AVR_HAVE_MUL__ ;; Adjust interface movw R26, R22 movw R18, R24 #endif /* HAVE MUL */ XCALL __mulhisi3 ;; Adjust decimal point lsl C0 rol C1 rol C2 brvs .LsatC3.3 ;; The 9 MSBs must be the same rol C3 sbc SS, SS cp C3, SS brne .LsatSS ;; Round lsl C0 adc C1, __zero_reg__ adc C2, __zero_reg__ brvs .Lmax ;; Move result into place mov C3, C2 mov C2, C1 ret .Lmax: ;; Load 0x7fff clr C3 .LsatC3.3: ;; C3 < 0 --> 0x8000 ;; C3 >= 0 --> 0x7fff mov SS, C3 .LsatSS: ;; Load min / max value: ;; SS = -1 --> 0x8000 ;; SS = 0 --> 0x7fff ldi C3, 0x7f ldi C2, 0xff sbrc SS, 7 adiw C2, 1 ret ENDF __ssmulha3 #endif /* L_ssmulha3 */ #undef C0 #undef C1 #undef C2 #undef C3 #undef SS /*********************************************************** Fixed unsigned saturated Multiplication 16.16 x 16.16 ***********************************************************/ #define C0 18 #define C1 C0+1 #define C2 C0+2 #define C3 C0+3 #define C4 C0+4 #define C5 C0+5 #define C6 C0+6 #define C7 C0+7 #define SS __tmp_reg__ #if defined (L_usmulusa3) ;; R22[4] = R22[4] *{ssat} R18[4] ;; Ordinary ABI function DEFUN __usmulusa3 ;; Widening multiply XCALL __umulsidi3 or C7, C6 brne .Lmax ;; Round, target is in C2..C5 lsl C1 adc C2, __zero_reg__ adc C3, __zero_reg__ adc C4, __zero_reg__ adc C5, __zero_reg__ brcs .Lmax ;; Move result into place wmov C6, C4 wmov C4, C2 ret .Lmax: ;; Saturate ldi C7, 0xff ldi C6, 0xff wmov C4, C6 ret ENDF __usmulusa3 #endif /* L_usmulusa3 */ /*********************************************************** Fixed signed saturated Multiplication s16.15 x s16.15 ***********************************************************/ #if defined (L_ssmulsa3) ;; R22[4] = R22[4] *{ssat} R18[4] ;; Ordinary ABI function DEFUN __ssmulsa3 ;; Widening multiply XCALL __mulsidi3 ;; Adjust decimal point lsl C1 rol C2 rol C3 rol C4 rol C5 brvs .LsatC7.7 ;; The 17 MSBs must be the same rol C6 rol C7 sbc SS, SS cp C6, SS cpc C7, SS brne .LsatSS ;; Round lsl C1 adc C2, __zero_reg__ adc C3, __zero_reg__ adc C4, __zero_reg__ adc C5, __zero_reg__ brvs .Lmax ;; Move result into place wmov C6, C4 wmov C4, C2 ret .Lmax: ;; Load 0x7fffffff clr C7 .LsatC7.7: ;; C7 < 0 --> 0x80000000 ;; C7 >= 0 --> 0x7fffffff lsl C7 sbc SS, SS .LsatSS: ;; Load min / max value: ;; SS = -1 --> 0x80000000 ;; SS = 0 --> 0x7fffffff com SS mov C4, SS mov C5, C4 wmov C6, C4 subi C7, 0x80 ret ENDF __ssmulsa3 #endif /* L_ssmulsa3 */ #undef C0 #undef C1 #undef C2 #undef C3 #undef C4 #undef C5 #undef C6 #undef C7 #undef SS /******************************************************* Fractional Division 8 / 8 *******************************************************/ #define r_divd r25 /* dividend */ #define r_quo r24 /* quotient */ #define r_div r22 /* divisor */ #define r_sign __tmp_reg__ #if defined (L_divqq3) DEFUN __divqq3 mov r_sign, r_divd eor r_sign, r_div sbrc r_div, 7 neg r_div sbrc r_divd, 7 neg r_divd XCALL __divqq_helper lsr r_quo sbrc r_sign, 7 ; negate result if needed neg r_quo ret ENDF __divqq3 #endif /* L_divqq3 */ #if defined (L_udivuqq3) DEFUN __udivuqq3 cp r_divd, r_div brsh 0f XJMP __divqq_helper ;; Result is out of [0, 1) ==> Return 1 - eps. 0: ldi r_quo, 0xff ret ENDF __udivuqq3 #endif /* L_udivuqq3 */ #if defined (L_divqq_helper) DEFUN __divqq_helper clr r_quo ; clear quotient inc __zero_reg__ ; init loop counter, used per shift __udivuqq3_loop: lsl r_divd ; shift dividend brcs 0f ; dividend overflow cp r_divd,r_div ; compare dividend & divisor brcc 0f ; dividend >= divisor rol r_quo ; shift quotient (with CARRY) rjmp __udivuqq3_cont 0: sub r_divd,r_div ; restore dividend lsl r_quo ; shift quotient (without CARRY) __udivuqq3_cont: lsl __zero_reg__ ; shift loop-counter bit brne __udivuqq3_loop com r_quo ; complement result ; because C flag was complemented in loop ret ENDF __divqq_helper #endif /* L_divqq_helper */ #undef r_divd #undef r_quo #undef r_div #undef r_sign /******************************************************* Fractional Division 16 / 16 *******************************************************/ #define r_divdL 26 /* dividend Low */ #define r_divdH 27 /* dividend Hig */ #define r_quoL 24 /* quotient Low */ #define r_quoH 25 /* quotient High */ #define r_divL 22 /* divisor */ #define r_divH 23 /* divisor */ #define r_cnt 21 #if defined (L_divhq3) DEFUN __divhq3 mov r0, r_divdH eor r0, r_divH sbrs r_divH, 7 rjmp 1f NEG2 r_divL 1: sbrs r_divdH, 7 rjmp 2f NEG2 r_divdL 2: cp r_divdL, r_divL cpc r_divdH, r_divH breq __divhq3_minus1 ; if equal return -1 XCALL __udivuhq3 lsr r_quoH ror r_quoL brpl 9f ;; negate result if needed NEG2 r_quoL 9: ret __divhq3_minus1: ldi r_quoH, 0x80 clr r_quoL ret ENDF __divhq3 #endif /* defined (L_divhq3) */ #if defined (L_udivuhq3) DEFUN __udivuhq3 sub r_quoH,r_quoH ; clear quotient and carry ;; FALLTHRU ENDF __udivuhq3 DEFUN __udivuha3_common clr r_quoL ; clear quotient ldi r_cnt,16 ; init loop counter __udivuhq3_loop: rol r_divdL ; shift dividend (with CARRY) rol r_divdH brcs __udivuhq3_ep ; dividend overflow cp r_divdL,r_divL ; compare dividend & divisor cpc r_divdH,r_divH brcc __udivuhq3_ep ; dividend >= divisor rol r_quoL ; shift quotient (with CARRY) rjmp __udivuhq3_cont __udivuhq3_ep: sub r_divdL,r_divL ; restore dividend sbc r_divdH,r_divH lsl r_quoL ; shift quotient (without CARRY) __udivuhq3_cont: rol r_quoH ; shift quotient dec r_cnt ; decrement loop counter brne __udivuhq3_loop com r_quoL ; complement result com r_quoH ; because C flag was complemented in loop ret ENDF __udivuha3_common #endif /* defined (L_udivuhq3) */ /******************************************************* Fixed Division 8.8 / 8.8 *******************************************************/ #if defined (L_divha3) DEFUN __divha3 mov r0, r_divdH eor r0, r_divH sbrs r_divH, 7 rjmp 1f NEG2 r_divL 1: sbrs r_divdH, 7 rjmp 2f NEG2 r_divdL 2: XCALL __udivuha3 lsr r_quoH ; adjust to 7 fractional bits ror r_quoL sbrs r0, 7 ; negate result if needed ret NEG2 r_quoL ret ENDF __divha3 #endif /* defined (L_divha3) */ #if defined (L_udivuha3) DEFUN __udivuha3 mov r_quoH, r_divdL mov r_divdL, r_divdH clr r_divdH lsl r_quoH ; shift quotient into carry XJMP __udivuha3_common ; same as fractional after rearrange ENDF __udivuha3 #endif /* defined (L_udivuha3) */ #undef r_divdL #undef r_divdH #undef r_quoL #undef r_quoH #undef r_divL #undef r_divH #undef r_cnt /******************************************************* Fixed Division 16.16 / 16.16 *******************************************************/ #define r_arg1L 24 /* arg1 gets passed already in place */ #define r_arg1H 25 #define r_arg1HL 26 #define r_arg1HH 27 #define r_divdL 26 /* dividend Low */ #define r_divdH 27 #define r_divdHL 30 #define r_divdHH 31 /* dividend High */ #define r_quoL 22 /* quotient Low */ #define r_quoH 23 #define r_quoHL 24 #define r_quoHH 25 /* quotient High */ #define r_divL 18 /* divisor Low */ #define r_divH 19 #define r_divHL 20 #define r_divHH 21 /* divisor High */ #define r_cnt __zero_reg__ /* loop count (0 after the loop!) */ #if defined (L_divsa3) DEFUN __divsa3 mov r0, r_arg1HH eor r0, r_divHH sbrs r_divHH, 7 rjmp 1f NEG4 r_divL 1: sbrs r_arg1HH, 7 rjmp 2f NEG4 r_arg1L 2: XCALL __udivusa3 lsr r_quoHH ; adjust to 15 fractional bits ror r_quoHL ror r_quoH ror r_quoL sbrs r0, 7 ; negate result if needed ret ;; negate r_quoL XJMP __negsi2 ENDF __divsa3 #endif /* defined (L_divsa3) */ #if defined (L_udivusa3) DEFUN __udivusa3 ldi r_divdHL, 32 ; init loop counter mov r_cnt, r_divdHL clr r_divdHL clr r_divdHH wmov r_quoL, r_divdHL lsl r_quoHL ; shift quotient into carry rol r_quoHH __udivusa3_loop: rol r_divdL ; shift dividend (with CARRY) rol r_divdH rol r_divdHL rol r_divdHH brcs __udivusa3_ep ; dividend overflow cp r_divdL,r_divL ; compare dividend & divisor cpc r_divdH,r_divH cpc r_divdHL,r_divHL cpc r_divdHH,r_divHH brcc __udivusa3_ep ; dividend >= divisor rol r_quoL ; shift quotient (with CARRY) rjmp __udivusa3_cont __udivusa3_ep: sub r_divdL,r_divL ; restore dividend sbc r_divdH,r_divH sbc r_divdHL,r_divHL sbc r_divdHH,r_divHH lsl r_quoL ; shift quotient (without CARRY) __udivusa3_cont: rol r_quoH ; shift quotient rol r_quoHL rol r_quoHH dec r_cnt ; decrement loop counter brne __udivusa3_loop com r_quoL ; complement result com r_quoH ; because C flag was complemented in loop com r_quoHL com r_quoHH ret ENDF __udivusa3 #endif /* defined (L_udivusa3) */ #undef r_arg1L #undef r_arg1H #undef r_arg1HL #undef r_arg1HH #undef r_divdL #undef r_divdH #undef r_divdHL #undef r_divdHH #undef r_quoL #undef r_quoH #undef r_quoHL #undef r_quoHH #undef r_divL #undef r_divH #undef r_divHL #undef r_divHH #undef r_cnt ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ;; Saturation, 1 Byte ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ;; First Argument and Return Register #define A0 24 #if defined (L_ssabs_1) DEFUN __ssabs_1 sbrs A0, 7 ret neg A0 sbrc A0,7 dec A0 ret ENDF __ssabs_1 #endif /* L_ssabs_1 */ #undef A0 ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ;; Saturation, 2 Bytes ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ;; First Argument and Return Register #define A0 24 #define A1 A0+1 #if defined (L_ssneg_2) DEFUN __ssneg_2 NEG2 A0 brvc 0f sbiw A0, 1 0: ret ENDF __ssneg_2 #endif /* L_ssneg_2 */ #if defined (L_ssabs_2) DEFUN __ssabs_2 sbrs A1, 7 ret XJMP __ssneg_2 ENDF __ssabs_2 #endif /* L_ssabs_2 */ #undef A0 #undef A1 ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ;; Saturation, 4 Bytes ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ;; First Argument and Return Register #define A0 22 #define A1 A0+1 #define A2 A0+2 #define A3 A0+3 #if defined (L_ssneg_4) DEFUN __ssneg_4 XCALL __negsi2 brvc 0f ldi A3, 0x7f ldi A2, 0xff ldi A1, 0xff ldi A0, 0xff 0: ret ENDF __ssneg_4 #endif /* L_ssneg_4 */ #if defined (L_ssabs_4) DEFUN __ssabs_4 sbrs A3, 7 ret XJMP __ssneg_4 ENDF __ssabs_4 #endif /* L_ssabs_4 */ #undef A0 #undef A1 #undef A2 #undef A3 ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ;; Saturation, 8 Bytes ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ;; First Argument and Return Register #define A0 18 #define A1 A0+1 #define A2 A0+2 #define A3 A0+3 #define A4 A0+4 #define A5 A0+5 #define A6 A0+6 #define A7 A0+7 #if defined (L_clr_8) FALIAS __usneguta2 FALIAS __usneguda2 FALIAS __usnegudq2 ;; Clear Carry and all Bytes DEFUN __clr_8 ;; Clear Carry and set Z sub A7, A7 ;; FALLTHRU ENDF __clr_8 ;; Propagate Carry to all Bytes, Carry unaltered DEFUN __sbc_8 sbc A7, A7 sbc A6, A6 wmov A4, A6 wmov A2, A6 wmov A0, A6 ret ENDF __sbc_8 #endif /* L_clr_8 */ #if defined (L_ssneg_8) FALIAS __ssnegta2 FALIAS __ssnegda2 FALIAS __ssnegdq2 DEFUN __ssneg_8 XCALL __negdi2 brvc 0f ;; A[] = 0x7fffffff sec XCALL __sbc_8 ldi A7, 0x7f 0: ret ENDF __ssneg_8 #endif /* L_ssneg_8 */ #if defined (L_ssabs_8) FALIAS __ssabsta2 FALIAS __ssabsda2 FALIAS __ssabsdq2 DEFUN __ssabs_8 sbrs A7, 7 ret XJMP __ssneg_8 ENDF __ssabs_8 #endif /* L_ssabs_8 */ ;; Second Argument #define B0 10 #define B1 B0+1 #define B2 B0+2 #define B3 B0+3 #define B4 B0+4 #define B5 B0+5 #define B6 B0+6 #define B7 B0+7 #if defined (L_usadd_8) FALIAS __usadduta3 FALIAS __usadduda3 FALIAS __usaddudq3 DEFUN __usadd_8 XCALL __adddi3 brcs 0f ret 0: ;; A[] = 0xffffffff XJMP __sbc_8 ENDF __usadd_8 #endif /* L_usadd_8 */ #if defined (L_ussub_8) FALIAS __ussubuta3 FALIAS __ussubuda3 FALIAS __ussubudq3 DEFUN __ussub_8 XCALL __subdi3 brcs 0f ret 0: ;; A[] = 0 XJMP __clr_8 ENDF __ussub_8 #endif /* L_ussub_8 */ #if defined (L_ssadd_8) FALIAS __ssaddta3 FALIAS __ssaddda3 FALIAS __ssadddq3 DEFUN __ssadd_8 XCALL __adddi3 brvc 0f ;; A = (B >= 0) ? INT64_MAX : INT64_MIN cpi B7, 0x80 XCALL __sbc_8 subi A7, 0x80 0: ret ENDF __ssadd_8 #endif /* L_ssadd_8 */ #if defined (L_sssub_8) FALIAS __sssubta3 FALIAS __sssubda3 FALIAS __sssubdq3 DEFUN __sssub_8 XCALL __subdi3 brvc 0f ;; A = (B < 0) ? INT64_MAX : INT64_MIN ldi A7, 0x7f cp A7, B7 XCALL __sbc_8 subi A7, 0x80 0: ret ENDF __sssub_8 #endif /* L_sssub_8 */ #undef A0 #undef A1 #undef A2 #undef A3 #undef A4 #undef A5 #undef A6 #undef A7 #undef B0 #undef B1 #undef B2 #undef B3 #undef B4 #undef B5 #undef B6 #undef B7 ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ;; Rounding Helpers ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; #ifdef L_mask1 #define AA 24 #define CC 25 ;; R25 = 1 << (R24 & 7) ;; CC = 1 << (AA & 7) ;; Clobbers: None DEFUN __mask1 ;; CC = 2 ^ AA.1 ldi CC, 1 << 2 sbrs AA, 1 ldi CC, 1 << 0 ;; CC *= 2 ^ AA.0 sbrc AA, 0 lsl CC ;; CC *= 2 ^ AA.2 sbrc AA, 2 swap CC ret ENDF __mask1 #undef AA #undef CC #endif /* L_mask1 */ ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ;; The rounding point. Any bits smaller than ;; 2^{-RP} will be cleared. #define RP R24 #define A0 22 #define A1 A0 + 1 #define C0 24 #define C1 C0 + 1 ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ;; Rounding, 1 Byte ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; #ifdef L_roundqq3 ;; R24 = round (R22, R24) ;; Clobbers: R22, __tmp_reg__ DEFUN __roundqq3 mov __tmp_reg__, C1 subi RP, __QQ_FBIT__ - 1 neg RP ;; R25 = 1 << RP (Total offset is FBIT-1 - RP) XCALL __mask1 mov C0, C1 ;; Add-Saturate 2^{-RP-1} add A0, C0 brvc 0f ldi C0, 0x7f rjmp 9f 0: ;; Mask out bits beyond RP lsl C0 neg C0 and C0, A0 9: mov C1, __tmp_reg__ ret ENDF __roundqq3 #endif /* L_roundqq3 */ #ifdef L_rounduqq3 ;; R24 = round (R22, R24) ;; Clobbers: R22, __tmp_reg__ DEFUN __rounduqq3 mov __tmp_reg__, C1 subi RP, __UQQ_FBIT__ - 1 neg RP ;; R25 = 1 << RP (Total offset is FBIT-1 - RP) XCALL __mask1 mov C0, C1 ;; Add-Saturate 2^{-RP-1} add A0, C0 brcc 0f ldi C0, 0xff rjmp 9f 0: ;; Mask out bits beyond RP lsl C0 neg C0 and C0, A0 9: mov C1, __tmp_reg__ ret ENDF __rounduqq3 #endif /* L_rounduqq3 */ ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ;; Rounding, 2 Bytes ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; #ifdef L_addmask_2 ;; [ R25:R24 = 1 << (R24 & 15) ;; R23:R22 += 1 << (R24 & 15) ] ;; SREG is set according to the addition DEFUN __addmask_2 ;; R25 = 1 << (R24 & 7) XCALL __mask1 cpi RP, 1 << 3 sbc C0, C0 ;; Swap C0 and C1 if RP.3 was set and C0, C1 eor C1, C0 ;; Finally, add the power-of-two: A[] += C[] add A0, C0 adc A1, C1 ret ENDF __addmask_2 #endif /* L_addmask_2 */ #ifdef L_round_s2 ;; R25:R24 = round (R23:R22, R24) ;; Clobbers: R23, R22 DEFUN __roundhq3 subi RP, __HQ_FBIT__ - __HA_FBIT__ ENDF __roundhq3 DEFUN __roundha3 subi RP, __HA_FBIT__ - 1 neg RP ;; [ R25:R24 = 1 << (FBIT-1 - RP) ;; R23:R22 += 1 << (FBIT-1 - RP) ] XCALL __addmask_2 XJMP __round_s2_const ENDF __roundha3 #endif /* L_round_s2 */ #ifdef L_round_u2 ;; R25:R24 = round (R23:R22, R24) ;; Clobbers: R23, R22 DEFUN __rounduhq3 subi RP, __UHQ_FBIT__ - __UHA_FBIT__ ENDF __rounduhq3 DEFUN __rounduha3 subi RP, __UHA_FBIT__ - 1 neg RP ;; [ R25:R24 = 1 << (FBIT-1 - RP) ;; R23:R22 += 1 << (FBIT-1 - RP) ] XCALL __addmask_2 XJMP __round_u2_const ENDF __rounduha3 #endif /* L_round_u2 */ #ifdef L_round_2_const ;; Helpers for 2 byte wide rounding DEFUN __round_s2_const brvc 2f ldi C1, 0x7f rjmp 1f ;; FALLTHRU (Barrier) ENDF __round_s2_const DEFUN __round_u2_const brcc 2f ldi C1, 0xff 1: ldi C0, 0xff rjmp 9f 2: ;; Saturation is performed now. ;; Currently, we have C[] = 2^{-RP-1} ;; C[] = 2^{-RP} lsl C0 rol C1 ;; NEG2 C0 ;; Clear the bits beyond the rounding point. and C0, A0 and C1, A1 9: ret ENDF __round_u2_const #endif /* L_round_2_const */ #undef A0 #undef A1 #undef C0 #undef C1 ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ;; Rounding, 4 Bytes ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; #define A0 18 #define A1 A0 + 1 #define A2 A0 + 2 #define A3 A0 + 3 #define C0 22 #define C1 C0 + 1 #define C2 C0 + 2 #define C3 C0 + 3 #ifdef L_addmask_4 ;; [ R25:R22 = 1 << (R24 & 31) ;; R21:R18 += 1 << (R24 & 31) ] ;; SREG is set according to the addition DEFUN __addmask_4 ;; R25 = 1 << (R24 & 7) XCALL __mask1 cpi RP, 1 << 4 sbc C0, C0 sbc C1, C1 ;; Swap C2 with C3 if RP.3 is not set cpi RP, 1 << 3 sbc C2, C2 and C2, C3 eor C3, C2 ;; Swap C3:C2 with C1:C0 if RP.4 is not set and C0, C2 $ eor C2, C0 and C1, C3 $ eor C3, C1 ;; Finally, add the power-of-two: A[] += C[] add A0, C0 adc A1, C1 adc A2, C2 adc A3, C3 ret ENDF __addmask_4 #endif /* L_addmask_4 */ #ifdef L_round_s4 ;; R25:R22 = round (R21:R18, R24) ;; Clobbers: R18...R21 DEFUN __roundsq3 subi RP, __SQ_FBIT__ - __SA_FBIT__ ENDF __roundsq3 DEFUN __roundsa3 subi RP, __SA_FBIT__ - 1 neg RP ;; [ R25:R22 = 1 << (FBIT-1 - RP) ;; R21:R18 += 1 << (FBIT-1 - RP) ] XCALL __addmask_4 XJMP __round_s4_const ENDF __roundsa3 #endif /* L_round_s4 */ #ifdef L_round_u4 ;; R25:R22 = round (R21:R18, R24) ;; Clobbers: R18...R21 DEFUN __roundusq3 subi RP, __USQ_FBIT__ - __USA_FBIT__ ENDF __roundusq3 DEFUN __roundusa3 subi RP, __USA_FBIT__ - 1 neg RP ;; [ R25:R22 = 1 << (FBIT-1 - RP) ;; R21:R18 += 1 << (FBIT-1 - RP) ] XCALL __addmask_4 XJMP __round_u4_const ENDF __roundusa3 #endif /* L_round_u4 */ #ifdef L_round_4_const ;; Helpers for 4 byte wide rounding DEFUN __round_s4_const brvc 2f ldi C3, 0x7f rjmp 1f ;; FALLTHRU (Barrier) ENDF __round_s4_const DEFUN __round_u4_const brcc 2f ldi C3, 0xff 1: ldi C2, 0xff ldi C1, 0xff ldi C0, 0xff rjmp 9f 2: ;; Saturation is performed now. ;; Currently, we have C[] = 2^{-RP-1} ;; C[] = 2^{-RP} lsl C0 rol C1 rol C2 rol C3 XCALL __negsi2 ;; Clear the bits beyond the rounding point. and C0, A0 and C1, A1 and C2, A2 and C3, A3 9: ret ENDF __round_u4_const #endif /* L_round_4_const */ #undef A0 #undef A1 #undef A2 #undef A3 #undef C0 #undef C1 #undef C2 #undef C3 #undef RP ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ;; Rounding, 8 Bytes ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; #define RP 16 #define FBITm1 31 #define C0 18 #define C1 C0 + 1 #define C2 C0 + 2 #define C3 C0 + 3 #define C4 C0 + 4 #define C5 C0 + 5 #define C6 C0 + 6 #define C7 C0 + 7 #define A0 16 #define A1 17 #define A2 26 #define A3 27 #define A4 28 #define A5 29 #define A6 30 #define A7 31 #ifdef L_rounddq3 ;; R25:R18 = round (R25:R18, R16) ;; Clobbers: ABI DEFUN __rounddq3 ldi FBITm1, __DQ_FBIT__ - 1 clt XJMP __round_x8 ENDF __rounddq3 #endif /* L_rounddq3 */ #ifdef L_roundudq3 ;; R25:R18 = round (R25:R18, R16) ;; Clobbers: ABI DEFUN __roundudq3 ldi FBITm1, __UDQ_FBIT__ - 1 set XJMP __round_x8 ENDF __roundudq3 #endif /* L_roundudq3 */ #ifdef L_roundda3 ;; R25:R18 = round (R25:R18, R16) ;; Clobbers: ABI DEFUN __roundda3 ldi FBITm1, __DA_FBIT__ - 1 clt XJMP __round_x8 ENDF __roundda3 #endif /* L_roundda3 */ #ifdef L_rounduda3 ;; R25:R18 = round (R25:R18, R16) ;; Clobbers: ABI DEFUN __rounduda3 ldi FBITm1, __UDA_FBIT__ - 1 set XJMP __round_x8 ENDF __rounduda3 #endif /* L_rounduda3 */ #ifdef L_roundta3 ;; R25:R18 = round (R25:R18, R16) ;; Clobbers: ABI DEFUN __roundta3 ldi FBITm1, __TA_FBIT__ - 1 clt XJMP __round_x8 ENDF __roundta3 #endif /* L_roundta3 */ #ifdef L_rounduta3 ;; R25:R18 = round (R25:R18, R16) ;; Clobbers: ABI DEFUN __rounduta3 ldi FBITm1, __UTA_FBIT__ - 1 set XJMP __round_x8 ENDF __rounduta3 #endif /* L_rounduta3 */ #ifdef L_round_x8 DEFUN __round_x8 push r16 push r17 push r28 push r29 ;; Compute log2 of addend from rounding point sub RP, FBITm1 neg RP ;; Move input to work register A[] push C0 mov A1, C1 wmov A2, C2 wmov A4, C4 wmov A6, C6 ;; C[] = 1 << (FBIT-1 - RP) XCALL __clr_8 inc C0 XCALL __ashldi3 pop A0 ;; A[] += C[] add A0, C0 adc A1, C1 adc A2, C2 adc A3, C3 adc A4, C4 adc A5, C5 adc A6, C6 adc A7, C7 brts 1f ;; Signed brvc 3f ;; Signed overflow: A[] = 0x7f... brvs 2f 1: ;; Unsigned brcc 3f ;; Unsigned overflow: A[] = 0xff... 2: ldi C7, 0xff ldi C6, 0xff wmov C0, C6 wmov C2, C6 wmov C4, C6 bld C7, 7 rjmp 9f 3: ;; C[] = -C[] - C[] push A0 ldi r16, 1 XCALL __ashldi3 pop A0 XCALL __negdi2 ;; Clear the bits beyond the rounding point. and C0, A0 and C1, A1 and C2, A2 and C3, A3 and C4, A4 and C5, A5 and C6, A6 and C7, A7 9: ;; Epilogue pop r29 pop r28 pop r17 pop r16 ret ENDF __round_x8 #endif /* L_round_x8 */ #undef A0 #undef A1 #undef A2 #undef A3 #undef A4 #undef A5 #undef A6 #undef A7 #undef C0 #undef C1 #undef C2 #undef C3 #undef C4 #undef C5 #undef C6 #undef C7 #undef RP #undef FBITm1 ;; Supply implementations / symbols for the bit-banging functions ;; __builtin_avr_bitsfx and __builtin_avr_fxbits #ifdef L_ret DEFUN __ret ret ENDF __ret #endif /* L_ret */ #endif /* if not __AVR_TINY__ */
4ms/metamodule-plugin-sdk
71,021
plugin-libc/libgcc/config/avr/lib1funcs.S
/* -*- Mode: Asm -*- */ /* Copyright (C) 1998-2022 Free Software Foundation, Inc. Contributed by Denis Chertykov <chertykov@gmail.com> This file is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 3, or (at your option) any later version. This file is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. Under Section 7 of GPL version 3, you are granted additional permissions described in the GCC Runtime Library Exception, version 3.1, as published by the Free Software Foundation. You should have received a copy of the GNU General Public License and a copy of the GCC Runtime Library Exception along with this program; see the files COPYING3 and COPYING.RUNTIME respectively. If not, see <http://www.gnu.org/licenses/>. */ #if defined (__AVR_TINY__) #define __zero_reg__ r17 #define __tmp_reg__ r16 #else #define __zero_reg__ r1 #define __tmp_reg__ r0 #endif #define __SREG__ 0x3f #if defined (__AVR_HAVE_SPH__) #define __SP_H__ 0x3e #endif #define __SP_L__ 0x3d #define __RAMPZ__ 0x3B #define __EIND__ 0x3C /* Most of the functions here are called directly from avr.md patterns, instead of using the standard libcall mechanisms. This can make better code because GCC knows exactly which of the call-used registers (not all of them) are clobbered. */ /* FIXME: At present, there is no SORT directive in the linker script so that we must not assume that different modules in the same input section like .libgcc.text.mul will be located close together. Therefore, we cannot use RCALL/RJMP to call a function like __udivmodhi4 from __divmodhi4 and have to use lengthy XCALL/XJMP even though they are in the same input section and all same input sections together are small enough to reach every location with a RCALL/RJMP instruction. */ #if defined (__AVR_HAVE_EIJMP_EICALL__) && !defined (__AVR_HAVE_ELPMX__) #error device not supported #endif .macro mov_l r_dest, r_src #if defined (__AVR_HAVE_MOVW__) movw \r_dest, \r_src #else mov \r_dest, \r_src #endif .endm .macro mov_h r_dest, r_src #if defined (__AVR_HAVE_MOVW__) ; empty #else mov \r_dest, \r_src #endif .endm .macro wmov r_dest, r_src #if defined (__AVR_HAVE_MOVW__) movw \r_dest, \r_src #else mov \r_dest, \r_src mov \r_dest+1, \r_src+1 #endif .endm #if defined (__AVR_HAVE_JMP_CALL__) #define XCALL call #define XJMP jmp #else #define XCALL rcall #define XJMP rjmp #endif #if defined (__AVR_HAVE_EIJMP_EICALL__) #define XICALL eicall #define XIJMP eijmp #else #define XICALL icall #define XIJMP ijmp #endif ;; Prologue stuff .macro do_prologue_saves n_pushed n_frame=0 ldi r26, lo8(\n_frame) ldi r27, hi8(\n_frame) ldi r30, lo8(gs(.L_prologue_saves.\@)) ldi r31, hi8(gs(.L_prologue_saves.\@)) XJMP __prologue_saves__ + ((18 - (\n_pushed)) * 2) .L_prologue_saves.\@: .endm ;; Epilogue stuff .macro do_epilogue_restores n_pushed n_frame=0 in r28, __SP_L__ #ifdef __AVR_HAVE_SPH__ in r29, __SP_H__ .if \n_frame > 63 subi r28, lo8(-\n_frame) sbci r29, hi8(-\n_frame) .elseif \n_frame > 0 adiw r28, \n_frame .endif #else clr r29 .if \n_frame > 0 subi r28, lo8(-\n_frame) .endif #endif /* HAVE SPH */ ldi r30, \n_pushed XJMP __epilogue_restores__ + ((18 - (\n_pushed)) * 2) .endm ;; Support function entry and exit for convenience .macro wsubi r_arg1, i_arg2 #if defined (__AVR_TINY__) subi \r_arg1, lo8(\i_arg2) sbci \r_arg1+1, hi8(\i_arg2) #else sbiw \r_arg1, \i_arg2 #endif .endm .macro waddi r_arg1, i_arg2 #if defined (__AVR_TINY__) subi \r_arg1, lo8(-\i_arg2) sbci \r_arg1+1, hi8(-\i_arg2) #else adiw \r_arg1, \i_arg2 #endif .endm .macro DEFUN name .global \name .func \name \name: .endm .macro ENDF name .size \name, .-\name .endfunc .endm .macro FALIAS name .global \name .func \name \name: .size \name, .-\name .endfunc .endm ;; Skip next instruction, typically a jump target #define skip cpse 16,16 ;; Negate a 2-byte value held in consecutive registers .macro NEG2 reg com \reg+1 neg \reg sbci \reg+1, -1 .endm ;; Negate a 4-byte value held in consecutive registers ;; Sets the V flag for signed overflow tests if REG >= 16 .macro NEG4 reg com \reg+3 com \reg+2 com \reg+1 .if \reg >= 16 neg \reg sbci \reg+1, -1 sbci \reg+2, -1 sbci \reg+3, -1 .else com \reg adc \reg, __zero_reg__ adc \reg+1, __zero_reg__ adc \reg+2, __zero_reg__ adc \reg+3, __zero_reg__ .endif .endm #define exp_lo(N) hlo8 ((N) << 23) #define exp_hi(N) hhi8 ((N) << 23) .section .text.libgcc.mul, "ax", @progbits ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; /* Note: mulqi3, mulhi3 are open-coded on the enhanced core. */ #if !defined (__AVR_HAVE_MUL__) /******************************************************* Multiplication 8 x 8 without MUL *******************************************************/ #if defined (L_mulqi3) #define r_arg2 r22 /* multiplicand */ #define r_arg1 r24 /* multiplier */ #define r_res __tmp_reg__ /* result */ DEFUN __mulqi3 clr r_res ; clear result __mulqi3_loop: sbrc r_arg1,0 add r_res,r_arg2 add r_arg2,r_arg2 ; shift multiplicand breq __mulqi3_exit ; while multiplicand != 0 lsr r_arg1 ; brne __mulqi3_loop ; exit if multiplier = 0 __mulqi3_exit: mov r_arg1,r_res ; result to return register ret ENDF __mulqi3 #undef r_arg2 #undef r_arg1 #undef r_res #endif /* defined (L_mulqi3) */ /******************************************************* Widening Multiplication 16 = 8 x 8 without MUL Multiplication 16 x 16 without MUL *******************************************************/ #define A0 22 #define A1 23 #define B0 24 #define BB0 20 #define B1 25 ;; Output overlaps input, thus expand result in CC0/1 #define C0 24 #define C1 25 #define CC0 __tmp_reg__ #define CC1 21 #if defined (L_umulqihi3) ;;; R25:R24 = (unsigned int) R22 * (unsigned int) R24 ;;; (C1:C0) = (unsigned int) A0 * (unsigned int) B0 ;;; Clobbers: __tmp_reg__, R21..R23 DEFUN __umulqihi3 clr A1 clr B1 XJMP __mulhi3 ENDF __umulqihi3 #endif /* L_umulqihi3 */ #if defined (L_mulqihi3) ;;; R25:R24 = (signed int) R22 * (signed int) R24 ;;; (C1:C0) = (signed int) A0 * (signed int) B0 ;;; Clobbers: __tmp_reg__, R20..R23 DEFUN __mulqihi3 ;; Sign-extend B0 clr B1 sbrc B0, 7 com B1 ;; The multiplication runs twice as fast if A1 is zero, thus: ;; Zero-extend A0 clr A1 #ifdef __AVR_HAVE_JMP_CALL__ ;; Store B0 * sign of A clr BB0 sbrc A0, 7 mov BB0, B0 call __mulhi3 #else /* have no CALL */ ;; Skip sign-extension of A if A >= 0 ;; Same size as with the first alternative but avoids errata skip ;; and is faster if A >= 0 sbrs A0, 7 rjmp __mulhi3 ;; If A < 0 store B mov BB0, B0 rcall __mulhi3 #endif /* HAVE_JMP_CALL */ ;; 1-extend A after the multiplication sub C1, BB0 ret ENDF __mulqihi3 #endif /* L_mulqihi3 */ #if defined (L_mulhi3) ;;; R25:R24 = R23:R22 * R25:R24 ;;; (C1:C0) = (A1:A0) * (B1:B0) ;;; Clobbers: __tmp_reg__, R21..R23 DEFUN __mulhi3 ;; Clear result clr CC0 clr CC1 rjmp 3f 1: ;; Bit n of A is 1 --> C += B << n add CC0, B0 adc CC1, B1 2: lsl B0 rol B1 3: ;; If B == 0 we are ready wsubi B0, 0 breq 9f ;; Carry = n-th bit of A lsr A1 ror A0 ;; If bit n of A is set, then go add B * 2^n to C brcs 1b ;; Carry = 0 --> The ROR above acts like CP A0, 0 ;; Thus, it is sufficient to CPC the high part to test A against 0 cpc A1, __zero_reg__ ;; Only proceed if A != 0 brne 2b 9: ;; Move Result into place mov C0, CC0 mov C1, CC1 ret ENDF __mulhi3 #endif /* L_mulhi3 */ #undef A0 #undef A1 #undef B0 #undef BB0 #undef B1 #undef C0 #undef C1 #undef CC0 #undef CC1 #define A0 22 #define A1 A0+1 #define A2 A0+2 #define A3 A0+3 #define B0 18 #define B1 B0+1 #define B2 B0+2 #define B3 B0+3 #define CC0 26 #define CC1 CC0+1 #define CC2 30 #define CC3 CC2+1 #define C0 22 #define C1 C0+1 #define C2 C0+2 #define C3 C0+3 /******************************************************* Widening Multiplication 32 = 16 x 16 without MUL *******************************************************/ #if defined (L_umulhisi3) DEFUN __umulhisi3 wmov B0, 24 ;; Zero-extend B clr B2 clr B3 ;; Zero-extend A wmov A2, B2 XJMP __mulsi3 ENDF __umulhisi3 #endif /* L_umulhisi3 */ #if defined (L_mulhisi3) DEFUN __mulhisi3 wmov B0, 24 ;; Sign-extend B lsl r25 sbc B2, B2 mov B3, B2 #ifdef __AVR_ERRATA_SKIP_JMP_CALL__ ;; Sign-extend A clr A2 sbrc A1, 7 com A2 mov A3, A2 XJMP __mulsi3 #else /* no __AVR_ERRATA_SKIP_JMP_CALL__ */ ;; Zero-extend A and __mulsi3 will run at least twice as fast ;; compared to a sign-extended A. clr A2 clr A3 sbrs A1, 7 XJMP __mulsi3 ;; If A < 0 then perform the B * 0xffff.... before the ;; very multiplication by initializing the high part of the ;; result CC with -B. wmov CC2, A2 sub CC2, B0 sbc CC3, B1 XJMP __mulsi3_helper #endif /* __AVR_ERRATA_SKIP_JMP_CALL__ */ ENDF __mulhisi3 #endif /* L_mulhisi3 */ /******************************************************* Multiplication 32 x 32 without MUL *******************************************************/ #if defined (L_mulsi3) DEFUN __mulsi3 #if defined (__AVR_TINY__) in r26, __SP_L__ ; safe to use X, as it is CC0/CC1 in r27, __SP_H__ subi r26, lo8(-3) ; Add 3 to point past return address sbci r27, hi8(-3) push B0 ; save callee saved regs push B1 ld B0, X+ ; load from caller stack ld B1, X+ ld B2, X+ ld B3, X #endif ;; Clear result clr CC2 clr CC3 ;; FALLTHRU ENDF __mulsi3 DEFUN __mulsi3_helper clr CC0 clr CC1 rjmp 3f 1: ;; If bit n of A is set, then add B * 2^n to the result in CC ;; CC += B add CC0,B0 $ adc CC1,B1 $ adc CC2,B2 $ adc CC3,B3 2: ;; B <<= 1 lsl B0 $ rol B1 $ rol B2 $ rol B3 3: ;; A >>= 1: Carry = n-th bit of A lsr A3 $ ror A2 $ ror A1 $ ror A0 brcs 1b ;; Only continue if A != 0 sbci A1, 0 brne 2b wsubi A2, 0 brne 2b ;; All bits of A are consumed: Copy result to return register C wmov C0, CC0 wmov C2, CC2 #if defined (__AVR_TINY__) pop B1 ; restore callee saved regs pop B0 #endif /* defined (__AVR_TINY__) */ ret ENDF __mulsi3_helper #endif /* L_mulsi3 */ #undef A0 #undef A1 #undef A2 #undef A3 #undef B0 #undef B1 #undef B2 #undef B3 #undef C0 #undef C1 #undef C2 #undef C3 #undef CC0 #undef CC1 #undef CC2 #undef CC3 #endif /* !defined (__AVR_HAVE_MUL__) */ ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; #if defined (__AVR_HAVE_MUL__) #define A0 26 #define B0 18 #define C0 22 #define A1 A0+1 #define B1 B0+1 #define B2 B0+2 #define B3 B0+3 #define C1 C0+1 #define C2 C0+2 #define C3 C0+3 /******************************************************* Widening Multiplication 32 = 16 x 16 with MUL *******************************************************/ #if defined (L_mulhisi3) ;;; R25:R22 = (signed long) R27:R26 * (signed long) R19:R18 ;;; C3:C0 = (signed long) A1:A0 * (signed long) B1:B0 ;;; Clobbers: __tmp_reg__ DEFUN __mulhisi3 XCALL __umulhisi3 ;; Sign-extend B tst B1 brpl 1f sub C2, A0 sbc C3, A1 1: ;; Sign-extend A XJMP __usmulhisi3_tail ENDF __mulhisi3 #endif /* L_mulhisi3 */ #if defined (L_usmulhisi3) ;;; R25:R22 = (signed long) R27:R26 * (unsigned long) R19:R18 ;;; C3:C0 = (signed long) A1:A0 * (unsigned long) B1:B0 ;;; Clobbers: __tmp_reg__ DEFUN __usmulhisi3 XCALL __umulhisi3 ;; FALLTHRU ENDF __usmulhisi3 DEFUN __usmulhisi3_tail ;; Sign-extend A sbrs A1, 7 ret sub C2, B0 sbc C3, B1 ret ENDF __usmulhisi3_tail #endif /* L_usmulhisi3 */ #if defined (L_umulhisi3) ;;; R25:R22 = (unsigned long) R27:R26 * (unsigned long) R19:R18 ;;; C3:C0 = (unsigned long) A1:A0 * (unsigned long) B1:B0 ;;; Clobbers: __tmp_reg__ DEFUN __umulhisi3 mul A0, B0 movw C0, r0 mul A1, B1 movw C2, r0 mul A0, B1 #ifdef __AVR_HAVE_JMP_CALL__ ;; This function is used by many other routines, often multiple times. ;; Therefore, if the flash size is not too limited, avoid the RCALL ;; and inverst 6 Bytes to speed things up. add C1, r0 adc C2, r1 clr __zero_reg__ adc C3, __zero_reg__ #else rcall 1f #endif mul A1, B0 1: add C1, r0 adc C2, r1 clr __zero_reg__ adc C3, __zero_reg__ ret ENDF __umulhisi3 #endif /* L_umulhisi3 */ /******************************************************* Widening Multiplication 32 = 16 x 32 with MUL *******************************************************/ #if defined (L_mulshisi3) ;;; R25:R22 = (signed long) R27:R26 * R21:R18 ;;; (C3:C0) = (signed long) A1:A0 * B3:B0 ;;; Clobbers: __tmp_reg__ DEFUN __mulshisi3 #ifdef __AVR_ERRATA_SKIP_JMP_CALL__ ;; Some cores have problem skipping 2-word instruction tst A1 brmi __mulohisi3 #else sbrs A1, 7 #endif /* __AVR_HAVE_JMP_CALL__ */ XJMP __muluhisi3 ;; FALLTHRU ENDF __mulshisi3 ;;; R25:R22 = (one-extended long) R27:R26 * R21:R18 ;;; (C3:C0) = (one-extended long) A1:A0 * B3:B0 ;;; Clobbers: __tmp_reg__ DEFUN __mulohisi3 XCALL __muluhisi3 ;; One-extend R27:R26 (A1:A0) sub C2, B0 sbc C3, B1 ret ENDF __mulohisi3 #endif /* L_mulshisi3 */ #if defined (L_muluhisi3) ;;; R25:R22 = (unsigned long) R27:R26 * R21:R18 ;;; (C3:C0) = (unsigned long) A1:A0 * B3:B0 ;;; Clobbers: __tmp_reg__ DEFUN __muluhisi3 XCALL __umulhisi3 mul A0, B3 add C3, r0 mul A1, B2 add C3, r0 mul A0, B2 add C2, r0 adc C3, r1 clr __zero_reg__ ret ENDF __muluhisi3 #endif /* L_muluhisi3 */ /******************************************************* Multiplication 32 x 32 with MUL *******************************************************/ #if defined (L_mulsi3) ;;; R25:R22 = R25:R22 * R21:R18 ;;; (C3:C0) = C3:C0 * B3:B0 ;;; Clobbers: R26, R27, __tmp_reg__ DEFUN __mulsi3 movw A0, C0 push C2 push C3 XCALL __muluhisi3 pop A1 pop A0 ;; A1:A0 now contains the high word of A mul A0, B0 add C2, r0 adc C3, r1 mul A0, B1 add C3, r0 mul A1, B0 add C3, r0 clr __zero_reg__ ret ENDF __mulsi3 #endif /* L_mulsi3 */ #undef A0 #undef A1 #undef B0 #undef B1 #undef B2 #undef B3 #undef C0 #undef C1 #undef C2 #undef C3 #endif /* __AVR_HAVE_MUL__ */ /******************************************************* Multiplication 24 x 24 with MUL *******************************************************/ #if defined (L_mulpsi3) ;; A[0..2]: In: Multiplicand; Out: Product #define A0 22 #define A1 A0+1 #define A2 A0+2 ;; B[0..2]: In: Multiplier #define B0 18 #define B1 B0+1 #define B2 B0+2 #if defined (__AVR_HAVE_MUL__) ;; C[0..2]: Expand Result #define C0 22 #define C1 C0+1 #define C2 C0+2 ;; R24:R22 *= R20:R18 ;; Clobbers: r21, r25, r26, r27, __tmp_reg__ #define AA0 26 #define AA2 21 DEFUN __mulpsi3 wmov AA0, A0 mov AA2, A2 XCALL __umulhisi3 mul AA2, B0 $ add C2, r0 mul AA0, B2 $ add C2, r0 clr __zero_reg__ ret ENDF __mulpsi3 #undef AA2 #undef AA0 #undef C2 #undef C1 #undef C0 #else /* !HAVE_MUL */ ;; C[0..2]: Expand Result #if defined (__AVR_TINY__) #define C0 16 #else #define C0 0 #endif /* defined (__AVR_TINY__) */ #define C1 C0+1 #define C2 21 ;; R24:R22 *= R20:R18 ;; Clobbers: __tmp_reg__, R18, R19, R20, R21 DEFUN __mulpsi3 #if defined (__AVR_TINY__) in r26,__SP_L__ in r27,__SP_H__ subi r26, lo8(-3) ; Add 3 to point past return address sbci r27, hi8(-3) push B0 ; save callee saved regs push B1 ld B0,X+ ; load from caller stack ld B1,X+ ld B2,X+ #endif /* defined (__AVR_TINY__) */ ;; C[] = 0 clr __tmp_reg__ clr C2 0: ;; Shift N-th Bit of B[] into Carry. N = 24 - Loop LSR B2 $ ror B1 $ ror B0 ;; If the N-th Bit of B[] was set... brcc 1f ;; ...then add A[] * 2^N to the Result C[] ADD C0,A0 $ adc C1,A1 $ adc C2,A2 1: ;; Multiply A[] by 2 LSL A0 $ rol A1 $ rol A2 ;; Loop until B[] is 0 subi B0,0 $ sbci B1,0 $ sbci B2,0 brne 0b ;; Copy C[] to the return Register A[] wmov A0, C0 mov A2, C2 clr __zero_reg__ #if defined (__AVR_TINY__) pop B1 pop B0 #endif /* (__AVR_TINY__) */ ret ENDF __mulpsi3 #undef C2 #undef C1 #undef C0 #endif /* HAVE_MUL */ #undef B2 #undef B1 #undef B0 #undef A2 #undef A1 #undef A0 #endif /* L_mulpsi3 */ #if defined (L_mulsqipsi3) && defined (__AVR_HAVE_MUL__) ;; A[0..2]: In: Multiplicand #define A0 22 #define A1 A0+1 #define A2 A0+2 ;; BB: In: Multiplier #define BB 25 ;; C[0..2]: Result #define C0 18 #define C1 C0+1 #define C2 C0+2 ;; C[] = A[] * sign_extend (BB) DEFUN __mulsqipsi3 mul A0, BB movw C0, r0 mul A2, BB mov C2, r0 mul A1, BB add C1, r0 adc C2, r1 clr __zero_reg__ sbrs BB, 7 ret ;; One-extend BB sub C1, A0 sbc C2, A1 ret ENDF __mulsqipsi3 #undef C2 #undef C1 #undef C0 #undef BB #undef A2 #undef A1 #undef A0 #endif /* L_mulsqipsi3 && HAVE_MUL */ /******************************************************* Multiplication 64 x 64 *******************************************************/ ;; A[] = A[] * B[] ;; A[0..7]: In: Multiplicand ;; Out: Product #define A0 18 #define A1 A0+1 #define A2 A0+2 #define A3 A0+3 #define A4 A0+4 #define A5 A0+5 #define A6 A0+6 #define A7 A0+7 ;; B[0..7]: In: Multiplier #define B0 10 #define B1 B0+1 #define B2 B0+2 #define B3 B0+3 #define B4 B0+4 #define B5 B0+5 #define B6 B0+6 #define B7 B0+7 #ifndef __AVR_TINY__ #if defined (__AVR_HAVE_MUL__) ;; Define C[] for convenience ;; Notice that parts of C[] overlap A[] respective B[] #define C0 16 #define C1 C0+1 #define C2 20 #define C3 C2+1 #define C4 28 #define C5 C4+1 #define C6 C4+2 #define C7 C4+3 #if defined (L_muldi3) ;; A[] *= B[] ;; R25:R18 *= R17:R10 ;; Ordinary ABI-Function DEFUN __muldi3 push r29 push r28 push r17 push r16 ;; Counting in Words, we have to perform a 4 * 4 Multiplication ;; 3 * 0 + 0 * 3 mul A7,B0 $ $ mov C7,r0 mul A0,B7 $ $ add C7,r0 mul A6,B1 $ $ add C7,r0 mul A6,B0 $ mov C6,r0 $ add C7,r1 mul B6,A1 $ $ add C7,r0 mul B6,A0 $ add C6,r0 $ adc C7,r1 ;; 1 * 2 mul A2,B4 $ add C6,r0 $ adc C7,r1 mul A3,B4 $ $ add C7,r0 mul A2,B5 $ $ add C7,r0 push A5 push A4 push B1 push B0 push A3 push A2 ;; 0 * 0 wmov 26, B0 XCALL __umulhisi3 wmov C0, 22 wmov C2, 24 ;; 0 * 2 wmov 26, B4 XCALL __umulhisi3 $ wmov C4,22 $ add C6,24 $ adc C7,25 wmov 26, B2 ;; 0 * 1 XCALL __muldi3_6 pop A0 pop A1 ;; 1 * 1 wmov 26, B2 XCALL __umulhisi3 $ add C4,22 $ adc C5,23 $ adc C6,24 $ adc C7,25 pop r26 pop r27 ;; 1 * 0 XCALL __muldi3_6 pop A0 pop A1 ;; 2 * 0 XCALL __umulhisi3 $ add C4,22 $ adc C5,23 $ adc C6,24 $ adc C7,25 ;; 2 * 1 wmov 26, B2 XCALL __umulhisi3 $ $ $ add C6,22 $ adc C7,23 ;; A[] = C[] wmov A0, C0 ;; A2 = C2 already wmov A4, C4 wmov A6, C6 pop r16 pop r17 pop r28 pop r29 ret ENDF __muldi3 #endif /* L_muldi3 */ #if defined (L_muldi3_6) ;; A helper for some 64-bit multiplications with MUL available DEFUN __muldi3_6 __muldi3_6: XCALL __umulhisi3 add C2, 22 adc C3, 23 adc C4, 24 adc C5, 25 brcc 0f adiw C6, 1 0: ret ENDF __muldi3_6 #endif /* L_muldi3_6 */ #undef C7 #undef C6 #undef C5 #undef C4 #undef C3 #undef C2 #undef C1 #undef C0 #else /* !HAVE_MUL */ #if defined (L_muldi3) #define C0 26 #define C1 C0+1 #define C2 C0+2 #define C3 C0+3 #define C4 C0+4 #define C5 C0+5 #define C6 0 #define C7 C6+1 #define Loop 9 ;; A[] *= B[] ;; R25:R18 *= R17:R10 ;; Ordinary ABI-Function DEFUN __muldi3 push r29 push r28 push Loop ldi C0, 64 mov Loop, C0 ;; C[] = 0 clr __tmp_reg__ wmov C0, 0 wmov C2, 0 wmov C4, 0 0: ;; Rotate B[] right by 1 and set Carry to the N-th Bit of B[] ;; where N = 64 - Loop. ;; Notice that B[] = B[] >>> 64 so after this Routine has finished, ;; B[] will have its initial Value again. LSR B7 $ ror B6 $ ror B5 $ ror B4 ror B3 $ ror B2 $ ror B1 $ ror B0 ;; If the N-th Bit of B[] was set then... brcc 1f ;; ...finish Rotation... ori B7, 1 << 7 ;; ...and add A[] * 2^N to the Result C[] ADD C0,A0 $ adc C1,A1 $ adc C2,A2 $ adc C3,A3 adc C4,A4 $ adc C5,A5 $ adc C6,A6 $ adc C7,A7 1: ;; Multiply A[] by 2 LSL A0 $ rol A1 $ rol A2 $ rol A3 rol A4 $ rol A5 $ rol A6 $ rol A7 dec Loop brne 0b ;; We expanded the Result in C[] ;; Copy Result to the Return Register A[] wmov A0, C0 wmov A2, C2 wmov A4, C4 wmov A6, C6 clr __zero_reg__ pop Loop pop r28 pop r29 ret ENDF __muldi3 #undef Loop #undef C7 #undef C6 #undef C5 #undef C4 #undef C3 #undef C2 #undef C1 #undef C0 #endif /* L_muldi3 */ #endif /* HAVE_MUL */ #endif /* if not __AVR_TINY__ */ #undef B7 #undef B6 #undef B5 #undef B4 #undef B3 #undef B2 #undef B1 #undef B0 #undef A7 #undef A6 #undef A5 #undef A4 #undef A3 #undef A2 #undef A1 #undef A0 /******************************************************* Widening Multiplication 64 = 32 x 32 with MUL *******************************************************/ #if defined (__AVR_HAVE_MUL__) #define A0 r22 #define A1 r23 #define A2 r24 #define A3 r25 #define B0 r18 #define B1 r19 #define B2 r20 #define B3 r21 #define C0 18 #define C1 C0+1 #define C2 20 #define C3 C2+1 #define C4 28 #define C5 C4+1 #define C6 C4+2 #define C7 C4+3 #if defined (L_umulsidi3) ;; Unsigned widening 64 = 32 * 32 Multiplication with MUL ;; R18[8] = R22[4] * R18[4] ;; ;; Ordinary ABI Function, but additionally sets ;; X = R20[2] = B2[2] ;; Z = R22[2] = A0[2] DEFUN __umulsidi3 clt ;; FALLTHRU ENDF __umulsidi3 ;; T = sign (A) DEFUN __umulsidi3_helper push 29 $ push 28 ; Y wmov 30, A2 ;; Counting in Words, we have to perform 4 Multiplications ;; 0 * 0 wmov 26, A0 XCALL __umulhisi3 push 23 $ push 22 ; C0 wmov 28, B0 wmov 18, B2 wmov C2, 24 push 27 $ push 26 ; A0 push 19 $ push 18 ; B2 ;; ;; 18 20 22 24 26 28 30 | B2, B3, A0, A1, C0, C1, Y ;; B2 C2 -- -- -- B0 A2 ;; 1 * 1 wmov 26, 30 ; A2 XCALL __umulhisi3 ;; Sign-extend A. T holds the sign of A brtc 0f ;; Subtract B from the high part of the result sub 22, 28 sbc 23, 29 sbc 24, 18 sbc 25, 19 0: wmov 18, 28 ;; B0 wmov C4, 22 wmov C6, 24 ;; ;; 18 20 22 24 26 28 30 | B2, B3, A0, A1, C0, C1, Y ;; B0 C2 -- -- A2 C4 C6 ;; ;; 1 * 0 XCALL __muldi3_6 ;; 0 * 1 pop 26 $ pop 27 ;; B2 pop 18 $ pop 19 ;; A0 XCALL __muldi3_6 ;; Move result C into place and save A0 in Z wmov 22, C4 wmov 24, C6 wmov 30, 18 ; A0 pop C0 $ pop C1 ;; Epilogue pop 28 $ pop 29 ;; Y ret ENDF __umulsidi3_helper #endif /* L_umulsidi3 */ #if defined (L_mulsidi3) ;; Signed widening 64 = 32 * 32 Multiplication ;; ;; R18[8] = R22[4] * R18[4] ;; Ordinary ABI Function DEFUN __mulsidi3 bst A3, 7 sbrs B3, 7 ; Enhanced core has no skip bug XJMP __umulsidi3_helper ;; B needs sign-extension push A3 push A2 XCALL __umulsidi3_helper ;; A0 survived in Z sub r22, r30 sbc r23, r31 pop r26 pop r27 sbc r24, r26 sbc r25, r27 ret ENDF __mulsidi3 #endif /* L_mulsidi3 */ #undef A0 #undef A1 #undef A2 #undef A3 #undef B0 #undef B1 #undef B2 #undef B3 #undef C0 #undef C1 #undef C2 #undef C3 #undef C4 #undef C5 #undef C6 #undef C7 #endif /* HAVE_MUL */ /********************************************************** Widening Multiplication 64 = 32 x 32 without MUL **********************************************************/ #ifndef __AVR_TINY__ /* if not __AVR_TINY__ */ #if defined (L_mulsidi3) && !defined (__AVR_HAVE_MUL__) #define A0 18 #define A1 A0+1 #define A2 A0+2 #define A3 A0+3 #define A4 A0+4 #define A5 A0+5 #define A6 A0+6 #define A7 A0+7 #define B0 10 #define B1 B0+1 #define B2 B0+2 #define B3 B0+3 #define B4 B0+4 #define B5 B0+5 #define B6 B0+6 #define B7 B0+7 #define AA0 22 #define AA1 AA0+1 #define AA2 AA0+2 #define AA3 AA0+3 #define BB0 18 #define BB1 BB0+1 #define BB2 BB0+2 #define BB3 BB0+3 #define Mask r30 ;; Signed / Unsigned widening 64 = 32 * 32 Multiplication without MUL ;; ;; R18[8] = R22[4] * R18[4] ;; Ordinary ABI Function DEFUN __mulsidi3 set skip ;; FALLTHRU ENDF __mulsidi3 DEFUN __umulsidi3 clt ; skipped ;; Save 10 Registers: R10..R17, R28, R29 do_prologue_saves 10 ldi Mask, 0xff bld Mask, 7 ;; Move B into place... wmov B0, BB0 wmov B2, BB2 ;; ...and extend it and BB3, Mask lsl BB3 sbc B4, B4 mov B5, B4 wmov B6, B4 ;; Move A into place... wmov A0, AA0 wmov A2, AA2 ;; ...and extend it and AA3, Mask lsl AA3 sbc A4, A4 mov A5, A4 wmov A6, A4 XCALL __muldi3 do_epilogue_restores 10 ENDF __umulsidi3 #undef A0 #undef A1 #undef A2 #undef A3 #undef A4 #undef A5 #undef A6 #undef A7 #undef B0 #undef B1 #undef B2 #undef B3 #undef B4 #undef B5 #undef B6 #undef B7 #undef AA0 #undef AA1 #undef AA2 #undef AA3 #undef BB0 #undef BB1 #undef BB2 #undef BB3 #undef Mask #endif /* L_mulsidi3 && !HAVE_MUL */ #endif /* if not __AVR_TINY__ */ ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; .section .text.libgcc.div, "ax", @progbits /******************************************************* Division 8 / 8 => (result + remainder) *******************************************************/ #define r_rem r25 /* remainder */ #define r_arg1 r24 /* dividend, quotient */ #define r_arg2 r22 /* divisor */ #define r_cnt r23 /* loop count */ #if defined (L_udivmodqi4) DEFUN __udivmodqi4 sub r_rem,r_rem ; clear remainder and carry ldi r_cnt,9 ; init loop counter rjmp __udivmodqi4_ep ; jump to entry point __udivmodqi4_loop: rol r_rem ; shift dividend into remainder cp r_rem,r_arg2 ; compare remainder & divisor brcs __udivmodqi4_ep ; remainder <= divisor sub r_rem,r_arg2 ; restore remainder __udivmodqi4_ep: rol r_arg1 ; shift dividend (with CARRY) dec r_cnt ; decrement loop counter brne __udivmodqi4_loop com r_arg1 ; complement result ; because C flag was complemented in loop ret ENDF __udivmodqi4 #endif /* defined (L_udivmodqi4) */ #if defined (L_divmodqi4) DEFUN __divmodqi4 bst r_arg1,7 ; store sign of dividend mov __tmp_reg__,r_arg1 eor __tmp_reg__,r_arg2; r0.7 is sign of result sbrc r_arg1,7 neg r_arg1 ; dividend negative : negate sbrc r_arg2,7 neg r_arg2 ; divisor negative : negate XCALL __udivmodqi4 ; do the unsigned div/mod brtc __divmodqi4_1 neg r_rem ; correct remainder sign __divmodqi4_1: sbrc __tmp_reg__,7 neg r_arg1 ; correct result sign __divmodqi4_exit: ret ENDF __divmodqi4 #endif /* defined (L_divmodqi4) */ #undef r_rem #undef r_arg1 #undef r_arg2 #undef r_cnt /******************************************************* Division 16 / 16 => (result + remainder) *******************************************************/ #define r_remL r26 /* remainder Low */ #define r_remH r27 /* remainder High */ /* return: remainder */ #define r_arg1L r24 /* dividend Low */ #define r_arg1H r25 /* dividend High */ /* return: quotient */ #define r_arg2L r22 /* divisor Low */ #define r_arg2H r23 /* divisor High */ #define r_cnt r21 /* loop count */ #if defined (L_udivmodhi4) DEFUN __udivmodhi4 sub r_remL,r_remL sub r_remH,r_remH ; clear remainder and carry ldi r_cnt,17 ; init loop counter rjmp __udivmodhi4_ep ; jump to entry point __udivmodhi4_loop: rol r_remL ; shift dividend into remainder rol r_remH cp r_remL,r_arg2L ; compare remainder & divisor cpc r_remH,r_arg2H brcs __udivmodhi4_ep ; remainder < divisor sub r_remL,r_arg2L ; restore remainder sbc r_remH,r_arg2H __udivmodhi4_ep: rol r_arg1L ; shift dividend (with CARRY) rol r_arg1H dec r_cnt ; decrement loop counter brne __udivmodhi4_loop com r_arg1L com r_arg1H ; div/mod results to return registers, as for the div() function mov_l r_arg2L, r_arg1L ; quotient mov_h r_arg2H, r_arg1H mov_l r_arg1L, r_remL ; remainder mov_h r_arg1H, r_remH ret ENDF __udivmodhi4 #endif /* defined (L_udivmodhi4) */ #if defined (L_divmodhi4) DEFUN __divmodhi4 .global _div _div: bst r_arg1H,7 ; store sign of dividend mov __tmp_reg__,r_arg2H brtc 0f com __tmp_reg__ ; r0.7 is sign of result rcall __divmodhi4_neg1 ; dividend negative: negate 0: sbrc r_arg2H,7 rcall __divmodhi4_neg2 ; divisor negative: negate XCALL __udivmodhi4 ; do the unsigned div/mod sbrc __tmp_reg__,7 rcall __divmodhi4_neg2 ; correct remainder sign brtc __divmodhi4_exit __divmodhi4_neg1: ;; correct dividend/remainder sign com r_arg1H neg r_arg1L sbci r_arg1H,0xff ret __divmodhi4_neg2: ;; correct divisor/result sign com r_arg2H neg r_arg2L sbci r_arg2H,0xff __divmodhi4_exit: ret ENDF __divmodhi4 #endif /* defined (L_divmodhi4) */ #undef r_remH #undef r_remL #undef r_arg1H #undef r_arg1L #undef r_arg2H #undef r_arg2L #undef r_cnt /******************************************************* Division 24 / 24 => (result + remainder) *******************************************************/ ;; A[0..2]: In: Dividend; Out: Quotient #define A0 22 #define A1 A0+1 #define A2 A0+2 ;; B[0..2]: In: Divisor; Out: Remainder #define B0 18 #define B1 B0+1 #define B2 B0+2 ;; C[0..2]: Expand remainder #define C0 __zero_reg__ #define C1 26 #define C2 25 ;; Loop counter #define r_cnt 21 #if defined (L_udivmodpsi4) ;; R24:R22 = R24:R24 udiv R20:R18 ;; R20:R18 = R24:R22 umod R20:R18 ;; Clobbers: R21, R25, R26 DEFUN __udivmodpsi4 ; init loop counter ldi r_cnt, 24+1 ; Clear remainder and carry. C0 is already 0 clr C1 sub C2, C2 ; jump to entry point rjmp __udivmodpsi4_start __udivmodpsi4_loop: ; shift dividend into remainder rol C0 rol C1 rol C2 ; compare remainder & divisor cp C0, B0 cpc C1, B1 cpc C2, B2 brcs __udivmodpsi4_start ; remainder <= divisor sub C0, B0 ; restore remainder sbc C1, B1 sbc C2, B2 __udivmodpsi4_start: ; shift dividend (with CARRY) rol A0 rol A1 rol A2 ; decrement loop counter dec r_cnt brne __udivmodpsi4_loop com A0 com A1 com A2 ; div/mod results to return registers ; remainder mov B0, C0 mov B1, C1 mov B2, C2 clr __zero_reg__ ; C0 ret ENDF __udivmodpsi4 #endif /* defined (L_udivmodpsi4) */ #if defined (L_divmodpsi4) ;; R24:R22 = R24:R22 div R20:R18 ;; R20:R18 = R24:R22 mod R20:R18 ;; Clobbers: T, __tmp_reg__, R21, R25, R26 DEFUN __divmodpsi4 ; R0.7 will contain the sign of the result: ; R0.7 = A.sign ^ B.sign mov __tmp_reg__, B2 ; T-flag = sign of dividend bst A2, 7 brtc 0f com __tmp_reg__ ; Adjust dividend's sign rcall __divmodpsi4_negA 0: ; Adjust divisor's sign sbrc B2, 7 rcall __divmodpsi4_negB ; Do the unsigned div/mod XCALL __udivmodpsi4 ; Adjust quotient's sign sbrc __tmp_reg__, 7 rcall __divmodpsi4_negA ; Adjust remainder's sign brtc __divmodpsi4_end __divmodpsi4_negB: ; Correct divisor/remainder sign com B2 com B1 neg B0 sbci B1, -1 sbci B2, -1 ret ; Correct dividend/quotient sign __divmodpsi4_negA: com A2 com A1 neg A0 sbci A1, -1 sbci A2, -1 __divmodpsi4_end: ret ENDF __divmodpsi4 #endif /* defined (L_divmodpsi4) */ #undef A0 #undef A1 #undef A2 #undef B0 #undef B1 #undef B2 #undef C0 #undef C1 #undef C2 #undef r_cnt /******************************************************* Division 32 / 32 => (result + remainder) *******************************************************/ #define r_remHH r31 /* remainder High */ #define r_remHL r30 #define r_remH r27 #define r_remL r26 /* remainder Low */ /* return: remainder */ #define r_arg1HH r25 /* dividend High */ #define r_arg1HL r24 #define r_arg1H r23 #define r_arg1L r22 /* dividend Low */ /* return: quotient */ #define r_arg2HH r21 /* divisor High */ #define r_arg2HL r20 #define r_arg2H r19 #define r_arg2L r18 /* divisor Low */ #define r_cnt __zero_reg__ /* loop count (0 after the loop!) */ #if defined (L_udivmodsi4) DEFUN __udivmodsi4 ldi r_remL, 33 ; init loop counter mov r_cnt, r_remL sub r_remL,r_remL sub r_remH,r_remH ; clear remainder and carry mov_l r_remHL, r_remL mov_h r_remHH, r_remH rjmp __udivmodsi4_ep ; jump to entry point __udivmodsi4_loop: rol r_remL ; shift dividend into remainder rol r_remH rol r_remHL rol r_remHH cp r_remL,r_arg2L ; compare remainder & divisor cpc r_remH,r_arg2H cpc r_remHL,r_arg2HL cpc r_remHH,r_arg2HH brcs __udivmodsi4_ep ; remainder <= divisor sub r_remL,r_arg2L ; restore remainder sbc r_remH,r_arg2H sbc r_remHL,r_arg2HL sbc r_remHH,r_arg2HH __udivmodsi4_ep: rol r_arg1L ; shift dividend (with CARRY) rol r_arg1H rol r_arg1HL rol r_arg1HH dec r_cnt ; decrement loop counter brne __udivmodsi4_loop ; __zero_reg__ now restored (r_cnt == 0) com r_arg1L com r_arg1H com r_arg1HL com r_arg1HH ; div/mod results to return registers, as for the ldiv() function mov_l r_arg2L, r_arg1L ; quotient mov_h r_arg2H, r_arg1H mov_l r_arg2HL, r_arg1HL mov_h r_arg2HH, r_arg1HH mov_l r_arg1L, r_remL ; remainder mov_h r_arg1H, r_remH mov_l r_arg1HL, r_remHL mov_h r_arg1HH, r_remHH ret ENDF __udivmodsi4 #endif /* defined (L_udivmodsi4) */ #if defined (L_divmodsi4) DEFUN __divmodsi4 mov __tmp_reg__,r_arg2HH bst r_arg1HH,7 ; store sign of dividend brtc 0f com __tmp_reg__ ; r0.7 is sign of result XCALL __negsi2 ; dividend negative: negate 0: sbrc r_arg2HH,7 rcall __divmodsi4_neg2 ; divisor negative: negate XCALL __udivmodsi4 ; do the unsigned div/mod sbrc __tmp_reg__, 7 ; correct quotient sign rcall __divmodsi4_neg2 brtc __divmodsi4_exit ; correct remainder sign XJMP __negsi2 __divmodsi4_neg2: ;; correct divisor/quotient sign com r_arg2HH com r_arg2HL com r_arg2H neg r_arg2L sbci r_arg2H,0xff sbci r_arg2HL,0xff sbci r_arg2HH,0xff __divmodsi4_exit: ret ENDF __divmodsi4 #endif /* defined (L_divmodsi4) */ #if defined (L_negsi2) ;; (set (reg:SI 22) ;; (neg:SI (reg:SI 22))) ;; Sets the V flag for signed overflow tests DEFUN __negsi2 NEG4 22 ret ENDF __negsi2 #endif /* L_negsi2 */ #undef r_remHH #undef r_remHL #undef r_remH #undef r_remL #undef r_arg1HH #undef r_arg1HL #undef r_arg1H #undef r_arg1L #undef r_arg2HH #undef r_arg2HL #undef r_arg2H #undef r_arg2L #undef r_cnt /* *di routines use registers below R19 and won't work with tiny arch right now. */ #if !defined (__AVR_TINY__) /******************************************************* Division 64 / 64 Modulo 64 % 64 *******************************************************/ ;; Use Speed-optimized Version on "big" Devices, i.e. Devices with ;; at least 16k of Program Memory. For smaller Devices, depend ;; on MOVW and SP Size. There is a Connexion between SP Size and ;; Flash Size so that SP Size can be used to test for Flash Size. #if defined (__AVR_HAVE_JMP_CALL__) # define SPEED_DIV 8 #elif defined (__AVR_HAVE_MOVW__) && defined (__AVR_HAVE_SPH__) # define SPEED_DIV 16 #else # define SPEED_DIV 0 #endif ;; A[0..7]: In: Dividend; ;; Out: Quotient (T = 0) ;; Out: Remainder (T = 1) #define A0 18 #define A1 A0+1 #define A2 A0+2 #define A3 A0+3 #define A4 A0+4 #define A5 A0+5 #define A6 A0+6 #define A7 A0+7 ;; B[0..7]: In: Divisor; Out: Clobber #define B0 10 #define B1 B0+1 #define B2 B0+2 #define B3 B0+3 #define B4 B0+4 #define B5 B0+5 #define B6 B0+6 #define B7 B0+7 ;; C[0..7]: Expand remainder; Out: Remainder (unused) #define C0 8 #define C1 C0+1 #define C2 30 #define C3 C2+1 #define C4 28 #define C5 C4+1 #define C6 26 #define C7 C6+1 ;; Holds Signs during Division Routine #define SS __tmp_reg__ ;; Bit-Counter in Division Routine #define R_cnt __zero_reg__ ;; Scratch Register for Negation #define NN r31 #if defined (L_udivdi3) ;; R25:R18 = R24:R18 umod R17:R10 ;; Ordinary ABI-Function DEFUN __umoddi3 set rjmp __udivdi3_umoddi3 ENDF __umoddi3 ;; R25:R18 = R24:R18 udiv R17:R10 ;; Ordinary ABI-Function DEFUN __udivdi3 clt ENDF __udivdi3 DEFUN __udivdi3_umoddi3 push C0 push C1 push C4 push C5 XCALL __udivmod64 pop C5 pop C4 pop C1 pop C0 ret ENDF __udivdi3_umoddi3 #endif /* L_udivdi3 */ #if defined (L_udivmod64) ;; Worker Routine for 64-Bit unsigned Quotient and Remainder Computation ;; No Registers saved/restored; the Callers will take Care. ;; Preserves B[] and T-flag ;; T = 0: Compute Quotient in A[] ;; T = 1: Compute Remainder in A[] and shift SS one Bit left DEFUN __udivmod64 ;; Clear Remainder (C6, C7 will follow) clr C0 clr C1 wmov C2, C0 wmov C4, C0 ldi C7, 64 #if SPEED_DIV == 0 || SPEED_DIV == 16 ;; Initialize Loop-Counter mov R_cnt, C7 wmov C6, C0 #endif /* SPEED_DIV */ #if SPEED_DIV == 8 push A7 clr C6 1: ;; Compare shifted Devidend against Divisor ;; If -- even after Shifting -- it is smaller... CP A7,B0 $ cpc C0,B1 $ cpc C1,B2 $ cpc C2,B3 cpc C3,B4 $ cpc C4,B5 $ cpc C5,B6 $ cpc C6,B7 brcc 2f ;; ...then we can subtract it. Thus, it is legal to shift left $ mov C6,C5 $ mov C5,C4 $ mov C4,C3 mov C3,C2 $ mov C2,C1 $ mov C1,C0 $ mov C0,A7 mov A7,A6 $ mov A6,A5 $ mov A5,A4 $ mov A4,A3 mov A3,A2 $ mov A2,A1 $ mov A1,A0 $ clr A0 ;; 8 Bits are done subi C7, 8 brne 1b ;; Shifted 64 Bits: A7 has traveled to C7 pop C7 ;; Divisor is greater than Dividend. We have: ;; A[] % B[] = A[] ;; A[] / B[] = 0 ;; Thus, we can return immediately rjmp 5f 2: ;; Initialze Bit-Counter with Number of Bits still to be performed mov R_cnt, C7 ;; Push of A7 is not needed because C7 is still 0 pop C7 clr C7 #elif SPEED_DIV == 16 ;; Compare shifted Dividend against Divisor cp A7, B3 cpc C0, B4 cpc C1, B5 cpc C2, B6 cpc C3, B7 brcc 2f ;; Divisor is greater than shifted Dividen: We can shift the Dividend ;; and it is still smaller than the Divisor --> Shift one 32-Bit Chunk wmov C2,A6 $ wmov C0,A4 wmov A6,A2 $ wmov A4,A0 wmov A2,C6 $ wmov A0,C4 ;; Set Bit Counter to 32 lsr R_cnt 2: #elif SPEED_DIV #error SPEED_DIV = ? #endif /* SPEED_DIV */ ;; The very Division + Remainder Routine 3: ;; Left-shift Dividend... lsl A0 $ rol A1 $ rol A2 $ rol A3 rol A4 $ rol A5 $ rol A6 $ rol A7 ;; ...into Remainder rol C0 $ rol C1 $ rol C2 $ rol C3 rol C4 $ rol C5 $ rol C6 $ rol C7 ;; Compare Remainder and Divisor CP C0,B0 $ cpc C1,B1 $ cpc C2,B2 $ cpc C3,B3 cpc C4,B4 $ cpc C5,B5 $ cpc C6,B6 $ cpc C7,B7 brcs 4f ;; Divisor fits into Remainder: Subtract it from Remainder... SUB C0,B0 $ sbc C1,B1 $ sbc C2,B2 $ sbc C3,B3 sbc C4,B4 $ sbc C5,B5 $ sbc C6,B6 $ sbc C7,B7 ;; ...and set according Bit in the upcoming Quotient ;; The Bit will travel to its final Position ori A0, 1 4: ;; This Bit is done dec R_cnt brne 3b ;; __zero_reg__ is 0 again ;; T = 0: We are fine with the Quotient in A[] ;; T = 1: Copy Remainder to A[] 5: brtc 6f wmov A0, C0 wmov A2, C2 wmov A4, C4 wmov A6, C6 ;; Move the Sign of the Result to SS.7 lsl SS 6: ret ENDF __udivmod64 #endif /* L_udivmod64 */ #if defined (L_divdi3) ;; R25:R18 = R24:R18 mod R17:R10 ;; Ordinary ABI-Function DEFUN __moddi3 set rjmp __divdi3_moddi3 ENDF __moddi3 ;; R25:R18 = R24:R18 div R17:R10 ;; Ordinary ABI-Function DEFUN __divdi3 clt ENDF __divdi3 DEFUN __divdi3_moddi3 #if SPEED_DIV mov r31, A7 or r31, B7 brmi 0f ;; Both Signs are 0: the following Complexitiy is not needed XJMP __udivdi3_umoddi3 #endif /* SPEED_DIV */ 0: ;; The Prologue ;; Save 12 Registers: Y, 17...8 ;; No Frame needed do_prologue_saves 12 ;; SS.7 will contain the Sign of the Quotient (A.sign * B.sign) ;; SS.6 will contain the Sign of the Remainder (A.sign) mov SS, A7 asr SS ;; Adjust Dividend's Sign as needed #if SPEED_DIV ;; Compiling for Speed we know that at least one Sign must be < 0 ;; Thus, if A[] >= 0 then we know B[] < 0 brpl 22f #else brpl 21f #endif /* SPEED_DIV */ XCALL __negdi2 ;; Adjust Divisor's Sign and SS.7 as needed 21: tst B7 brpl 3f 22: ldi NN, 1 << 7 eor SS, NN ldi NN, -1 com B4 $ com B5 $ com B6 $ com B7 $ com B1 $ com B2 $ com B3 NEG B0 $ sbc B1,NN $ sbc B2,NN $ sbc B3,NN sbc B4,NN $ sbc B5,NN $ sbc B6,NN $ sbc B7,NN 3: ;; Do the unsigned 64-Bit Division/Modulo (depending on T-flag) XCALL __udivmod64 ;; Adjust Result's Sign #ifdef __AVR_ERRATA_SKIP_JMP_CALL__ tst SS brpl 4f #else sbrc SS, 7 #endif /* __AVR_HAVE_JMP_CALL__ */ XCALL __negdi2 4: ;; Epilogue: Restore 12 Registers and return do_epilogue_restores 12 ENDF __divdi3_moddi3 #endif /* L_divdi3 */ #undef R_cnt #undef SS #undef NN .section .text.libgcc, "ax", @progbits #define TT __tmp_reg__ #if defined (L_adddi3) ;; (set (reg:DI 18) ;; (plus:DI (reg:DI 18) ;; (reg:DI 10))) ;; Sets the V flag for signed overflow tests ;; Sets the C flag for unsigned overflow tests DEFUN __adddi3 ADD A0,B0 $ adc A1,B1 $ adc A2,B2 $ adc A3,B3 adc A4,B4 $ adc A5,B5 $ adc A6,B6 $ adc A7,B7 ret ENDF __adddi3 #endif /* L_adddi3 */ #if defined (L_adddi3_s8) ;; (set (reg:DI 18) ;; (plus:DI (reg:DI 18) ;; (sign_extend:SI (reg:QI 26)))) ;; Sets the V flag for signed overflow tests ;; Sets the C flag for unsigned overflow tests provided 0 <= R26 < 128 DEFUN __adddi3_s8 clr TT sbrc r26, 7 com TT ADD A0,r26 $ adc A1,TT $ adc A2,TT $ adc A3,TT adc A4,TT $ adc A5,TT $ adc A6,TT $ adc A7,TT ret ENDF __adddi3_s8 #endif /* L_adddi3_s8 */ #if defined (L_subdi3) ;; (set (reg:DI 18) ;; (minus:DI (reg:DI 18) ;; (reg:DI 10))) ;; Sets the V flag for signed overflow tests ;; Sets the C flag for unsigned overflow tests DEFUN __subdi3 SUB A0,B0 $ sbc A1,B1 $ sbc A2,B2 $ sbc A3,B3 sbc A4,B4 $ sbc A5,B5 $ sbc A6,B6 $ sbc A7,B7 ret ENDF __subdi3 #endif /* L_subdi3 */ #if defined (L_cmpdi2) ;; (set (cc0) ;; (compare (reg:DI 18) ;; (reg:DI 10))) DEFUN __cmpdi2 CP A0,B0 $ cpc A1,B1 $ cpc A2,B2 $ cpc A3,B3 cpc A4,B4 $ cpc A5,B5 $ cpc A6,B6 $ cpc A7,B7 ret ENDF __cmpdi2 #endif /* L_cmpdi2 */ #if defined (L_cmpdi2_s8) ;; (set (cc0) ;; (compare (reg:DI 18) ;; (sign_extend:SI (reg:QI 26)))) DEFUN __cmpdi2_s8 clr TT sbrc r26, 7 com TT CP A0,r26 $ cpc A1,TT $ cpc A2,TT $ cpc A3,TT cpc A4,TT $ cpc A5,TT $ cpc A6,TT $ cpc A7,TT ret ENDF __cmpdi2_s8 #endif /* L_cmpdi2_s8 */ #if defined (L_negdi2) ;; (set (reg:DI 18) ;; (neg:DI (reg:DI 18))) ;; Sets the V flag for signed overflow tests DEFUN __negdi2 com A4 $ com A5 $ com A6 $ com A7 $ com A1 $ com A2 $ com A3 NEG A0 $ sbci A1,-1 $ sbci A2,-1 $ sbci A3,-1 sbci A4,-1 $ sbci A5,-1 $ sbci A6,-1 $ sbci A7,-1 ret ENDF __negdi2 #endif /* L_negdi2 */ #undef TT #undef C7 #undef C6 #undef C5 #undef C4 #undef C3 #undef C2 #undef C1 #undef C0 #undef B7 #undef B6 #undef B5 #undef B4 #undef B3 #undef B2 #undef B1 #undef B0 #undef A7 #undef A6 #undef A5 #undef A4 #undef A3 #undef A2 #undef A1 #undef A0 #endif /* !defined (__AVR_TINY__) */ .section .text.libgcc.prologue, "ax", @progbits /********************************** * This is a prologue subroutine **********************************/ #if !defined (__AVR_TINY__) #if defined (L_prologue) ;; This function does not clobber T-flag; 64-bit division relies on it DEFUN __prologue_saves__ push r2 push r3 push r4 push r5 push r6 push r7 push r8 push r9 push r10 push r11 push r12 push r13 push r14 push r15 push r16 push r17 push r28 push r29 #if !defined (__AVR_HAVE_SPH__) in r28,__SP_L__ sub r28,r26 out __SP_L__,r28 clr r29 #elif defined (__AVR_XMEGA__) in r28,__SP_L__ in r29,__SP_H__ sub r28,r26 sbc r29,r27 out __SP_L__,r28 out __SP_H__,r29 #else in r28,__SP_L__ in r29,__SP_H__ sub r28,r26 sbc r29,r27 in __tmp_reg__,__SREG__ cli out __SP_H__,r29 out __SREG__,__tmp_reg__ out __SP_L__,r28 #endif /* #SP = 8/16 */ XIJMP ENDF __prologue_saves__ #endif /* defined (L_prologue) */ /* * This is an epilogue subroutine */ #if defined (L_epilogue) DEFUN __epilogue_restores__ ldd r2,Y+18 ldd r3,Y+17 ldd r4,Y+16 ldd r5,Y+15 ldd r6,Y+14 ldd r7,Y+13 ldd r8,Y+12 ldd r9,Y+11 ldd r10,Y+10 ldd r11,Y+9 ldd r12,Y+8 ldd r13,Y+7 ldd r14,Y+6 ldd r15,Y+5 ldd r16,Y+4 ldd r17,Y+3 ldd r26,Y+2 #if !defined (__AVR_HAVE_SPH__) ldd r29,Y+1 add r28,r30 out __SP_L__,r28 mov r28, r26 #elif defined (__AVR_XMEGA__) ldd r27,Y+1 add r28,r30 adc r29,__zero_reg__ out __SP_L__,r28 out __SP_H__,r29 wmov 28, 26 #else ldd r27,Y+1 add r28,r30 adc r29,__zero_reg__ in __tmp_reg__,__SREG__ cli out __SP_H__,r29 out __SREG__,__tmp_reg__ out __SP_L__,r28 mov_l r28, r26 mov_h r29, r27 #endif /* #SP = 8/16 */ ret ENDF __epilogue_restores__ #endif /* defined (L_epilogue) */ #endif /* !defined (__AVR_TINY__) */ #ifdef L_exit .section .fini9,"ax",@progbits DEFUN _exit .weak exit exit: ENDF _exit /* Code from .fini8 ... .fini1 sections inserted by ld script. */ .section .fini0,"ax",@progbits cli __stop_program: rjmp __stop_program #endif /* defined (L_exit) */ #ifdef L_cleanup .weak _cleanup .func _cleanup _cleanup: ret .endfunc #endif /* defined (L_cleanup) */ .section .text.libgcc, "ax", @progbits #ifdef L_tablejump2 DEFUN __tablejump2__ lsl r30 rol r31 #if defined (__AVR_HAVE_EIJMP_EICALL__) ;; Word address of gs() jumptable entry in R24:Z rol r24 out __RAMPZ__, r24 #elif defined (__AVR_HAVE_ELPM__) ;; Word address of jumptable entry in Z clr __tmp_reg__ rol __tmp_reg__ out __RAMPZ__, __tmp_reg__ #endif ;; Read word address from jumptable and jump #if defined (__AVR_HAVE_ELPMX__) elpm __tmp_reg__, Z+ elpm r31, Z mov r30, __tmp_reg__ #ifdef __AVR_HAVE_RAMPD__ ;; Reset RAMPZ to 0 so that EBI devices don't read garbage from RAM out __RAMPZ__, __zero_reg__ #endif /* RAMPD */ XIJMP #elif defined (__AVR_HAVE_ELPM__) elpm push r0 adiw r30, 1 elpm push r0 ret #elif defined (__AVR_HAVE_LPMX__) lpm __tmp_reg__, Z+ lpm r31, Z mov r30, __tmp_reg__ ijmp #elif defined (__AVR_TINY__) wsubi 30, -(__AVR_TINY_PM_BASE_ADDRESS__) ; Add PM offset to Z ld __tmp_reg__, Z+ ld r31, Z ; Use ld instead of lpm to load Z mov r30, __tmp_reg__ ijmp #else lpm push r0 adiw r30, 1 lpm push r0 ret #endif ENDF __tablejump2__ #endif /* L_tablejump2 */ #if defined(__AVR_TINY__) #ifdef L_copy_data .section .init4,"ax",@progbits .global __do_copy_data __do_copy_data: ldi r18, hi8(__data_end) ldi r26, lo8(__data_start) ldi r27, hi8(__data_start) ldi r30, lo8(__data_load_start + __AVR_TINY_PM_BASE_ADDRESS__) ldi r31, hi8(__data_load_start + __AVR_TINY_PM_BASE_ADDRESS__) rjmp .L__do_copy_data_start .L__do_copy_data_loop: ld r19, z+ st X+, r19 .L__do_copy_data_start: cpi r26, lo8(__data_end) cpc r27, r18 brne .L__do_copy_data_loop #endif #else #ifdef L_copy_data .section .init4,"ax",@progbits DEFUN __do_copy_data #if defined(__AVR_HAVE_ELPMX__) ldi r17, hi8(__data_end) ldi r26, lo8(__data_start) ldi r27, hi8(__data_start) ldi r30, lo8(__data_load_start) ldi r31, hi8(__data_load_start) ldi r16, hh8(__data_load_start) out __RAMPZ__, r16 rjmp .L__do_copy_data_start .L__do_copy_data_loop: elpm r0, Z+ st X+, r0 .L__do_copy_data_start: cpi r26, lo8(__data_end) cpc r27, r17 brne .L__do_copy_data_loop #elif !defined(__AVR_HAVE_ELPMX__) && defined(__AVR_HAVE_ELPM__) ldi r17, hi8(__data_end) ldi r26, lo8(__data_start) ldi r27, hi8(__data_start) ldi r30, lo8(__data_load_start) ldi r31, hi8(__data_load_start) ldi r16, hh8(__data_load_start - 0x10000) .L__do_copy_data_carry: inc r16 out __RAMPZ__, r16 rjmp .L__do_copy_data_start .L__do_copy_data_loop: elpm st X+, r0 adiw r30, 1 brcs .L__do_copy_data_carry .L__do_copy_data_start: cpi r26, lo8(__data_end) cpc r27, r17 brne .L__do_copy_data_loop #elif !defined(__AVR_HAVE_ELPMX__) && !defined(__AVR_HAVE_ELPM__) ldi r17, hi8(__data_end) ldi r26, lo8(__data_start) ldi r27, hi8(__data_start) ldi r30, lo8(__data_load_start) ldi r31, hi8(__data_load_start) rjmp .L__do_copy_data_start .L__do_copy_data_loop: #if defined (__AVR_HAVE_LPMX__) lpm r0, Z+ #else lpm adiw r30, 1 #endif st X+, r0 .L__do_copy_data_start: cpi r26, lo8(__data_end) cpc r27, r17 brne .L__do_copy_data_loop #endif /* !defined(__AVR_HAVE_ELPMX__) && !defined(__AVR_HAVE_ELPM__) */ #if defined (__AVR_HAVE_ELPM__) && defined (__AVR_HAVE_RAMPD__) ;; Reset RAMPZ to 0 so that EBI devices don't read garbage from RAM out __RAMPZ__, __zero_reg__ #endif /* ELPM && RAMPD */ ENDF __do_copy_data #endif /* L_copy_data */ #endif /* !defined (__AVR_TINY__) */ /* __do_clear_bss is only necessary if there is anything in .bss section. */ #ifdef L_clear_bss .section .init4,"ax",@progbits DEFUN __do_clear_bss ldi r18, hi8(__bss_end) ldi r26, lo8(__bss_start) ldi r27, hi8(__bss_start) rjmp .do_clear_bss_start .do_clear_bss_loop: st X+, __zero_reg__ .do_clear_bss_start: cpi r26, lo8(__bss_end) cpc r27, r18 brne .do_clear_bss_loop ENDF __do_clear_bss #endif /* L_clear_bss */ /* __do_global_ctors and __do_global_dtors are only necessary if there are any constructors/destructors. */ #if defined(__AVR_TINY__) #define cdtors_tst_reg r18 #else #define cdtors_tst_reg r17 #endif #ifdef L_ctors .section .init6,"ax",@progbits DEFUN __do_global_ctors ldi cdtors_tst_reg, pm_hi8(__ctors_start) ldi r28, pm_lo8(__ctors_end) ldi r29, pm_hi8(__ctors_end) #ifdef __AVR_HAVE_EIJMP_EICALL__ ldi r16, pm_hh8(__ctors_end) #endif /* HAVE_EIJMP */ rjmp .L__do_global_ctors_start .L__do_global_ctors_loop: wsubi 28, 1 #ifdef __AVR_HAVE_EIJMP_EICALL__ sbc r16, __zero_reg__ mov r24, r16 #endif /* HAVE_EIJMP */ mov_h r31, r29 mov_l r30, r28 XCALL __tablejump2__ .L__do_global_ctors_start: cpi r28, pm_lo8(__ctors_start) cpc r29, cdtors_tst_reg #ifdef __AVR_HAVE_EIJMP_EICALL__ ldi r24, pm_hh8(__ctors_start) cpc r16, r24 #endif /* HAVE_EIJMP */ brne .L__do_global_ctors_loop ENDF __do_global_ctors #endif /* L_ctors */ #ifdef L_dtors .section .fini6,"ax",@progbits DEFUN __do_global_dtors ldi cdtors_tst_reg, pm_hi8(__dtors_end) ldi r28, pm_lo8(__dtors_start) ldi r29, pm_hi8(__dtors_start) #ifdef __AVR_HAVE_EIJMP_EICALL__ ldi r16, pm_hh8(__dtors_start) #endif /* HAVE_EIJMP */ rjmp .L__do_global_dtors_start .L__do_global_dtors_loop: #ifdef __AVR_HAVE_EIJMP_EICALL__ mov r24, r16 #endif /* HAVE_EIJMP */ mov_h r31, r29 mov_l r30, r28 XCALL __tablejump2__ waddi 28, 1 #ifdef __AVR_HAVE_EIJMP_EICALL__ adc r16, __zero_reg__ #endif /* HAVE_EIJMP */ .L__do_global_dtors_start: cpi r28, pm_lo8(__dtors_end) cpc r29, cdtors_tst_reg #ifdef __AVR_HAVE_EIJMP_EICALL__ ldi r24, pm_hh8(__dtors_end) cpc r16, r24 #endif /* HAVE_EIJMP */ brne .L__do_global_dtors_loop ENDF __do_global_dtors #endif /* L_dtors */ #undef cdtors_tst_reg .section .text.libgcc, "ax", @progbits #if !defined (__AVR_TINY__) ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ;; Loading n bytes from Flash; n = 3,4 ;; R22... = Flash[Z] ;; Clobbers: __tmp_reg__ #if (defined (L_load_3) \ || defined (L_load_4)) \ && !defined (__AVR_HAVE_LPMX__) ;; Destination #define D0 22 #define D1 D0+1 #define D2 D0+2 #define D3 D0+3 .macro .load dest, n lpm mov \dest, r0 .if \dest != D0+\n-1 adiw r30, 1 .else sbiw r30, \n-1 .endif .endm #if defined (L_load_3) DEFUN __load_3 push D3 XCALL __load_4 pop D3 ret ENDF __load_3 #endif /* L_load_3 */ #if defined (L_load_4) DEFUN __load_4 .load D0, 4 .load D1, 4 .load D2, 4 .load D3, 4 ret ENDF __load_4 #endif /* L_load_4 */ #endif /* L_load_3 || L_load_3 */ #endif /* !defined (__AVR_TINY__) */ #if !defined (__AVR_TINY__) ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ;; Loading n bytes from Flash or RAM; n = 1,2,3,4 ;; R22... = Flash[R21:Z] or RAM[Z] depending on R21.7 ;; Clobbers: __tmp_reg__, R21, R30, R31 #if (defined (L_xload_1) \ || defined (L_xload_2) \ || defined (L_xload_3) \ || defined (L_xload_4)) ;; Destination #define D0 22 #define D1 D0+1 #define D2 D0+2 #define D3 D0+3 ;; Register containing bits 16+ of the address #define HHI8 21 .macro .xload dest, n #if defined (__AVR_HAVE_ELPMX__) elpm \dest, Z+ #elif defined (__AVR_HAVE_ELPM__) elpm mov \dest, r0 .if \dest != D0+\n-1 adiw r30, 1 adc HHI8, __zero_reg__ out __RAMPZ__, HHI8 .endif #elif defined (__AVR_HAVE_LPMX__) lpm \dest, Z+ #else lpm mov \dest, r0 .if \dest != D0+\n-1 adiw r30, 1 .endif #endif #if defined (__AVR_HAVE_ELPM__) && defined (__AVR_HAVE_RAMPD__) .if \dest == D0+\n-1 ;; Reset RAMPZ to 0 so that EBI devices don't read garbage from RAM out __RAMPZ__, __zero_reg__ .endif #endif .endm ; .xload #if defined (L_xload_1) DEFUN __xload_1 #if defined (__AVR_HAVE_LPMX__) && !defined (__AVR_HAVE_ELPM__) sbrc HHI8, 7 ld D0, Z sbrs HHI8, 7 lpm D0, Z ret #else sbrc HHI8, 7 rjmp 1f #if defined (__AVR_HAVE_ELPM__) out __RAMPZ__, HHI8 #endif /* __AVR_HAVE_ELPM__ */ .xload D0, 1 ret 1: ld D0, Z ret #endif /* LPMx && ! ELPM */ ENDF __xload_1 #endif /* L_xload_1 */ #if defined (L_xload_2) DEFUN __xload_2 sbrc HHI8, 7 rjmp 1f #if defined (__AVR_HAVE_ELPM__) out __RAMPZ__, HHI8 #endif /* __AVR_HAVE_ELPM__ */ .xload D0, 2 .xload D1, 2 ret 1: ld D0, Z+ ld D1, Z+ ret ENDF __xload_2 #endif /* L_xload_2 */ #if defined (L_xload_3) DEFUN __xload_3 sbrc HHI8, 7 rjmp 1f #if defined (__AVR_HAVE_ELPM__) out __RAMPZ__, HHI8 #endif /* __AVR_HAVE_ELPM__ */ .xload D0, 3 .xload D1, 3 .xload D2, 3 ret 1: ld D0, Z+ ld D1, Z+ ld D2, Z+ ret ENDF __xload_3 #endif /* L_xload_3 */ #if defined (L_xload_4) DEFUN __xload_4 sbrc HHI8, 7 rjmp 1f #if defined (__AVR_HAVE_ELPM__) out __RAMPZ__, HHI8 #endif /* __AVR_HAVE_ELPM__ */ .xload D0, 4 .xload D1, 4 .xload D2, 4 .xload D3, 4 ret 1: ld D0, Z+ ld D1, Z+ ld D2, Z+ ld D3, Z+ ret ENDF __xload_4 #endif /* L_xload_4 */ #endif /* L_xload_{1|2|3|4} */ #endif /* if !defined (__AVR_TINY__) */ #if !defined (__AVR_TINY__) ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ;; memcopy from Address Space __pgmx to RAM ;; R23:Z = Source Address ;; X = Destination Address ;; Clobbers: __tmp_reg__, R23, R24, R25, X, Z #if defined (L_movmemx) #define HHI8 23 #define LOOP 24 DEFUN __movmemx_qi ;; #Bytes to copy fity in 8 Bits (1..255) ;; Zero-extend Loop Counter clr LOOP+1 ;; FALLTHRU ENDF __movmemx_qi DEFUN __movmemx_hi ;; Read from where? sbrc HHI8, 7 rjmp 1f ;; Read from Flash #if defined (__AVR_HAVE_ELPM__) out __RAMPZ__, HHI8 #endif 0: ;; Load 1 Byte from Flash... #if defined (__AVR_HAVE_ELPMX__) elpm r0, Z+ #elif defined (__AVR_HAVE_ELPM__) elpm adiw r30, 1 adc HHI8, __zero_reg__ out __RAMPZ__, HHI8 #elif defined (__AVR_HAVE_LPMX__) lpm r0, Z+ #else lpm adiw r30, 1 #endif ;; ...and store that Byte to RAM Destination st X+, r0 sbiw LOOP, 1 brne 0b #if defined (__AVR_HAVE_ELPM__) && defined (__AVR_HAVE_RAMPD__) ;; Reset RAMPZ to 0 so that EBI devices don't read garbage from RAM out __RAMPZ__, __zero_reg__ #endif /* ELPM && RAMPD */ ret ;; Read from RAM 1: ;; Read 1 Byte from RAM... ld r0, Z+ ;; and store that Byte to RAM Destination st X+, r0 sbiw LOOP, 1 brne 1b ret ENDF __movmemx_hi #undef HHI8 #undef LOOP #endif /* L_movmemx */ #endif /* !defined (__AVR_TINY__) */ .section .text.libgcc.builtins, "ax", @progbits /********************************** * Find first set Bit (ffs) **********************************/ #if defined (L_ffssi2) ;; find first set bit ;; r25:r24 = ffs32 (r25:r22) ;; clobbers: r22, r26 DEFUN __ffssi2 clr r26 tst r22 brne 1f subi r26, -8 or r22, r23 brne 1f subi r26, -8 or r22, r24 brne 1f subi r26, -8 or r22, r25 brne 1f ret 1: mov r24, r22 XJMP __loop_ffsqi2 ENDF __ffssi2 #endif /* defined (L_ffssi2) */ #if defined (L_ffshi2) ;; find first set bit ;; r25:r24 = ffs16 (r25:r24) ;; clobbers: r26 DEFUN __ffshi2 clr r26 #ifdef __AVR_ERRATA_SKIP_JMP_CALL__ ;; Some cores have problem skipping 2-word instruction tst r24 breq 2f #else cpse r24, __zero_reg__ #endif /* __AVR_HAVE_JMP_CALL__ */ 1: XJMP __loop_ffsqi2 2: ldi r26, 8 or r24, r25 brne 1b ret ENDF __ffshi2 #endif /* defined (L_ffshi2) */ #if defined (L_loop_ffsqi2) ;; Helper for ffshi2, ffssi2 ;; r25:r24 = r26 + zero_extend16 (ffs8(r24)) ;; r24 must be != 0 ;; clobbers: r26 DEFUN __loop_ffsqi2 inc r26 lsr r24 brcc __loop_ffsqi2 mov r24, r26 clr r25 ret ENDF __loop_ffsqi2 #endif /* defined (L_loop_ffsqi2) */ /********************************** * Count trailing Zeros (ctz) **********************************/ #if defined (L_ctzsi2) ;; count trailing zeros ;; r25:r24 = ctz32 (r25:r22) ;; clobbers: r26, r22 ;; ctz(0) = 255 ;; Note that ctz(0) in undefined for GCC DEFUN __ctzsi2 XCALL __ffssi2 dec r24 ret ENDF __ctzsi2 #endif /* defined (L_ctzsi2) */ #if defined (L_ctzhi2) ;; count trailing zeros ;; r25:r24 = ctz16 (r25:r24) ;; clobbers: r26 ;; ctz(0) = 255 ;; Note that ctz(0) in undefined for GCC DEFUN __ctzhi2 XCALL __ffshi2 dec r24 ret ENDF __ctzhi2 #endif /* defined (L_ctzhi2) */ /********************************** * Count leading Zeros (clz) **********************************/ #if defined (L_clzdi2) ;; count leading zeros ;; r25:r24 = clz64 (r25:r18) ;; clobbers: r22, r23, r26 DEFUN __clzdi2 XCALL __clzsi2 sbrs r24, 5 ret mov_l r22, r18 mov_h r23, r19 mov_l r24, r20 mov_h r25, r21 XCALL __clzsi2 subi r24, -32 ret ENDF __clzdi2 #endif /* defined (L_clzdi2) */ #if defined (L_clzsi2) ;; count leading zeros ;; r25:r24 = clz32 (r25:r22) ;; clobbers: r26 DEFUN __clzsi2 XCALL __clzhi2 sbrs r24, 4 ret mov_l r24, r22 mov_h r25, r23 XCALL __clzhi2 subi r24, -16 ret ENDF __clzsi2 #endif /* defined (L_clzsi2) */ #if defined (L_clzhi2) ;; count leading zeros ;; r25:r24 = clz16 (r25:r24) ;; clobbers: r26 DEFUN __clzhi2 clr r26 tst r25 brne 1f subi r26, -8 or r25, r24 brne 1f ldi r24, 16 ret 1: cpi r25, 16 brsh 3f subi r26, -3 swap r25 2: inc r26 3: lsl r25 brcc 2b mov r24, r26 clr r25 ret ENDF __clzhi2 #endif /* defined (L_clzhi2) */ /********************************** * Parity **********************************/ #if defined (L_paritydi2) ;; r25:r24 = parity64 (r25:r18) ;; clobbers: __tmp_reg__ DEFUN __paritydi2 eor r24, r18 eor r24, r19 eor r24, r20 eor r24, r21 XJMP __paritysi2 ENDF __paritydi2 #endif /* defined (L_paritydi2) */ #if defined (L_paritysi2) ;; r25:r24 = parity32 (r25:r22) ;; clobbers: __tmp_reg__ DEFUN __paritysi2 eor r24, r22 eor r24, r23 XJMP __parityhi2 ENDF __paritysi2 #endif /* defined (L_paritysi2) */ #if defined (L_parityhi2) ;; r25:r24 = parity16 (r25:r24) ;; clobbers: __tmp_reg__ DEFUN __parityhi2 eor r24, r25 ;; FALLTHRU ENDF __parityhi2 ;; r25:r24 = parity8 (r24) ;; clobbers: __tmp_reg__ DEFUN __parityqi2 ;; parity is in r24[0..7] mov __tmp_reg__, r24 swap __tmp_reg__ eor r24, __tmp_reg__ ;; parity is in r24[0..3] subi r24, -4 andi r24, -5 subi r24, -6 ;; parity is in r24[0,3] sbrc r24, 3 inc r24 ;; parity is in r24[0] andi r24, 1 clr r25 ret ENDF __parityqi2 #endif /* defined (L_parityhi2) */ /********************************** * Population Count **********************************/ #if defined (L_popcounthi2) ;; population count ;; r25:r24 = popcount16 (r25:r24) ;; clobbers: __tmp_reg__ DEFUN __popcounthi2 XCALL __popcountqi2 push r24 mov r24, r25 XCALL __popcountqi2 clr r25 ;; FALLTHRU ENDF __popcounthi2 DEFUN __popcounthi2_tail pop __tmp_reg__ add r24, __tmp_reg__ ret ENDF __popcounthi2_tail #endif /* defined (L_popcounthi2) */ #if defined (L_popcountsi2) ;; population count ;; r25:r24 = popcount32 (r25:r22) ;; clobbers: __tmp_reg__ DEFUN __popcountsi2 XCALL __popcounthi2 push r24 mov_l r24, r22 mov_h r25, r23 XCALL __popcounthi2 XJMP __popcounthi2_tail ENDF __popcountsi2 #endif /* defined (L_popcountsi2) */ #if defined (L_popcountdi2) ;; population count ;; r25:r24 = popcount64 (r25:r18) ;; clobbers: r22, r23, __tmp_reg__ DEFUN __popcountdi2 XCALL __popcountsi2 push r24 mov_l r22, r18 mov_h r23, r19 mov_l r24, r20 mov_h r25, r21 XCALL __popcountsi2 XJMP __popcounthi2_tail ENDF __popcountdi2 #endif /* defined (L_popcountdi2) */ #if defined (L_popcountqi2) ;; population count ;; r24 = popcount8 (r24) ;; clobbers: __tmp_reg__ DEFUN __popcountqi2 mov __tmp_reg__, r24 andi r24, 1 lsr __tmp_reg__ lsr __tmp_reg__ adc r24, __zero_reg__ lsr __tmp_reg__ adc r24, __zero_reg__ lsr __tmp_reg__ adc r24, __zero_reg__ lsr __tmp_reg__ adc r24, __zero_reg__ lsr __tmp_reg__ adc r24, __zero_reg__ lsr __tmp_reg__ adc r24, __tmp_reg__ ret ENDF __popcountqi2 #endif /* defined (L_popcountqi2) */ /********************************** * Swap bytes **********************************/ ;; swap two registers with different register number .macro bswap a, b eor \a, \b eor \b, \a eor \a, \b .endm #if defined (L_bswapsi2) ;; swap bytes ;; r25:r22 = bswap32 (r25:r22) DEFUN __bswapsi2 bswap r22, r25 bswap r23, r24 ret ENDF __bswapsi2 #endif /* defined (L_bswapsi2) */ #if defined (L_bswapdi2) ;; swap bytes ;; r25:r18 = bswap64 (r25:r18) DEFUN __bswapdi2 bswap r18, r25 bswap r19, r24 bswap r20, r23 bswap r21, r22 ret ENDF __bswapdi2 #endif /* defined (L_bswapdi2) */ /********************************** * 64-bit shifts **********************************/ #if defined (L_ashrdi3) #define SS __zero_reg__ ;; Arithmetic shift right ;; r25:r18 = ashr64 (r25:r18, r17:r16) DEFUN __ashrdi3 sbrc r25, 7 com SS ;; FALLTHRU ENDF __ashrdi3 ;; Logic shift right ;; r25:r18 = lshr64 (r25:r18, r17:r16) DEFUN __lshrdi3 ;; Signs are in SS (zero_reg) mov __tmp_reg__, r16 0: cpi r16, 8 brlo 2f subi r16, 8 mov r18, r19 mov r19, r20 mov r20, r21 mov r21, r22 mov r22, r23 mov r23, r24 mov r24, r25 mov r25, SS rjmp 0b 1: asr SS ror r25 ror r24 ror r23 ror r22 ror r21 ror r20 ror r19 ror r18 2: dec r16 brpl 1b clr __zero_reg__ mov r16, __tmp_reg__ ret ENDF __lshrdi3 #undef SS #endif /* defined (L_ashrdi3) */ #if defined (L_ashldi3) ;; Shift left ;; r25:r18 = ashl64 (r25:r18, r17:r16) ;; This function does not clobber T. DEFUN __ashldi3 mov __tmp_reg__, r16 0: cpi r16, 8 brlo 2f mov r25, r24 mov r24, r23 mov r23, r22 mov r22, r21 mov r21, r20 mov r20, r19 mov r19, r18 clr r18 subi r16, 8 rjmp 0b 1: lsl r18 rol r19 rol r20 rol r21 rol r22 rol r23 rol r24 rol r25 2: dec r16 brpl 1b mov r16, __tmp_reg__ ret ENDF __ashldi3 #endif /* defined (L_ashldi3) */ #if defined (L_rotldi3) ;; Rotate left ;; r25:r18 = rotl64 (r25:r18, r17:r16) DEFUN __rotldi3 push r16 0: cpi r16, 8 brlo 2f subi r16, 8 mov __tmp_reg__, r25 mov r25, r24 mov r24, r23 mov r23, r22 mov r22, r21 mov r21, r20 mov r20, r19 mov r19, r18 mov r18, __tmp_reg__ rjmp 0b 1: lsl r18 rol r19 rol r20 rol r21 rol r22 rol r23 rol r24 rol r25 adc r18, __zero_reg__ 2: dec r16 brpl 1b pop r16 ret ENDF __rotldi3 #endif /* defined (L_rotldi3) */ .section .text.libgcc.fmul, "ax", @progbits /***********************************************************/ ;;; Softmul versions of FMUL, FMULS and FMULSU to implement ;;; __builtin_avr_fmul* if !AVR_HAVE_MUL /***********************************************************/ #define A1 24 #define B1 25 #define C0 22 #define C1 23 #define A0 __tmp_reg__ #ifdef L_fmuls ;;; r23:r22 = fmuls (r24, r25) like in FMULS instruction ;;; Clobbers: r24, r25, __tmp_reg__ DEFUN __fmuls ;; A0.7 = negate result? mov A0, A1 eor A0, B1 ;; B1 = |B1| sbrc B1, 7 neg B1 XJMP __fmulsu_exit ENDF __fmuls #endif /* L_fmuls */ #ifdef L_fmulsu ;;; r23:r22 = fmulsu (r24, r25) like in FMULSU instruction ;;; Clobbers: r24, r25, __tmp_reg__ DEFUN __fmulsu ;; A0.7 = negate result? mov A0, A1 ;; FALLTHRU ENDF __fmulsu ;; Helper for __fmuls and __fmulsu DEFUN __fmulsu_exit ;; A1 = |A1| sbrc A1, 7 neg A1 #ifdef __AVR_ERRATA_SKIP_JMP_CALL__ ;; Some cores have problem skipping 2-word instruction tst A0 brmi 1f #else sbrs A0, 7 #endif /* __AVR_HAVE_JMP_CALL__ */ XJMP __fmul 1: XCALL __fmul ;; C = -C iff A0.7 = 1 NEG2 C0 ret ENDF __fmulsu_exit #endif /* L_fmulsu */ #ifdef L_fmul ;;; r22:r23 = fmul (r24, r25) like in FMUL instruction ;;; Clobbers: r24, r25, __tmp_reg__ DEFUN __fmul ; clear result clr C0 clr C1 clr A0 1: tst B1 ;; 1.0 = 0x80, so test for bit 7 of B to see if A must to be added to C. 2: brpl 3f ;; C += A add C0, A0 adc C1, A1 3: ;; A >>= 1 lsr A1 ror A0 ;; B <<= 1 lsl B1 brne 2b ret ENDF __fmul #endif /* L_fmul */ #undef A0 #undef A1 #undef B1 #undef C0 #undef C1 #include "lib1funcs-fixed.S"
4ms/metamodule-plugin-sdk
2,524
plugin-libc/libgcc/config/mmix/crtn.S
/* Copyright (C) 2001-2022 Free Software Foundation, Inc. Contributed by Hans-Peter Nilsson <hp@bitrange.com> This file is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 3, or (at your option) any later version. This file is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. Under Section 7 of GPL version 3, you are granted additional permissions described in the GCC Runtime Library Exception, version 3.1, as published by the Free Software Foundation. You should have received a copy of the GNU General Public License and a copy of the GCC Runtime Library Exception along with this program; see the files COPYING3 and COPYING.RUNTIME respectively. If not, see <http://www.gnu.org/licenses/>. */ % This must be the last file on the link-line, allocating global registers % from the top. % Register $254 is the stack-pointer. sp GREG % Register $253 is frame-pointer. It's not supposed to be used in most % functions. fp GREG % $252 is the static chain register; nested functions receive the % context of the surrounding function through a pointer passed in this % register. static_chain GREG struct_value_reg GREG % These registers are used to pass state at an exceptional return (C++). eh_state_3 GREG eh_state_2 GREG eh_state_1 GREG eh_state_0 GREG #ifdef __MMIX_ABI_GNU__ % Allocate global registers used by the GNU ABI. gnu_parm_reg_16 GREG gnu_parm_reg_15 GREG gnu_parm_reg_14 GREG gnu_parm_reg_13 GREG gnu_parm_reg_12 GREG gnu_parm_reg_11 GREG gnu_parm_reg_10 GREG gnu_parm_reg_9 GREG gnu_parm_reg_8 GREG gnu_parm_reg_7 GREG gnu_parm_reg_6 GREG gnu_parm_reg_5 GREG gnu_parm_reg_4 GREG gnu_parm_reg_3 GREG gnu_parm_reg_2 GREG gnu_parm_reg_1 GREG #endif /* __MMIX_ABI_GNU__ */ % Provide last part of _init and _fini. % The return address is stored in the topmost stored register in the % register-stack. We ignore the current value in rJ. It is probably % garbage because each fragment of _init and _fini may have their own idea % of the current stack frame, if they're cut out from a "real" function % like in gcc/crtstuff.c. .section .init,"ax",@progbits GETA $255,0F PUT rJ,$255 POP 0,0 0H PUT rJ,$0 POP 0,0 .section .fini,"ax",@progbits GETA $255,0F PUT rJ,$255 POP 0,0 0H PUT rJ,$0 POP 0,0
4ms/metamodule-plugin-sdk
5,121
plugin-libc/libgcc/config/mmix/crti.S
/* Copyright (C) 2001-2022 Free Software Foundation, Inc. Contributed by Hans-Peter Nilsson <hp@bitrange.com> This file is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 3, or (at your option) any later version. This file is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. Under Section 7 of GPL version 3, you are granted additional permissions described in the GCC Runtime Library Exception, version 3.1, as published by the Free Software Foundation. You should have received a copy of the GNU General Public License and a copy of the GCC Runtime Library Exception along with this program; see the files COPYING3 and COPYING.RUNTIME respectively. If not, see <http://www.gnu.org/licenses/>. */ % This is the crt0 equivalent for mmix-knuth-mmixware, for setting up % things for compiler-generated assembly-code and for setting up things % between where the simulator calls and main, and shutting things down on % the way back. There's an actual crt0.o elsewhere, but that's a dummy. % This file and the GCC output are supposed to be *reasonably* % mmixal-compatible to enable people to re-use output with Knuth's mmixal. % However, forward references are used more freely: we are using the % binutils tools. Users of mmixal beware; you will sometimes have to % re-order things or use temporary variables. % Users of mmixal will want to set up 8H and 9H to be .text and .data % respectively, so the compiler can switch between them pretending they're % segments. % This little treasure (some contents) is required so the 32 lowest % address bits of user data will not be zero. Because of truncation, % that would cause testcase gcc.c-torture/execute/980701-1.c to % incorrectly fail. .data ! mmixal:= 8H LOC Data_Segment .p2align 3 dstart OCTA 2009 .text ! mmixal:= 9H LOC 8B; LOC #100 .global Main % The __Stack_start symbol is provided by the link script. stackpp OCTA __Stack_start crtstxt OCTA _init % Assumed to be the lowest executed address. OCTA __etext % Assumed to be beyond the highest executed address. crtsdat OCTA dstart % Assumed to be the lowest accessed address. OCTA _end % Assumed to be beyond the highest accessed address. % "Main" is the magic symbol the simulator jumps to. We want to go % on to "main". % We need to set rG explicitly to avoid hard-to-debug situations. Main SETL $255,32 PUT rG,$255 % Make sure we have valid memory for addresses in .text and .data (and % .bss, but we include this in .data), for the benefit of mmo-using % simulators that require validation of addresses for which contents % is not present. Due to its implicit-zero nature, zeros in contents % may be left out in the mmo format, but we don't know the boundaries % of those zero-chunks; for mmo files from binutils, they correspond % to the beginning and end of sections in objects before linking. We % validate the contents by executing PRELD (0; one byte) on each % 2048-byte-boundary of our .text .data, and we assume this size % matches the magic lowest-denominator chunk-size for all % validation-requiring simulators. The effect of the PRELD (any size) % is assumed to be the same as initial loading of the contents, as % long as the PRELD happens before the first PUSHJ/PUSHGO. If it % happens after that, we'll need to distinguish between % access-for-execution and read/write access. GETA $255,crtstxt LDOU $2,$255,0 ANDNL $2,#7ff % Align the start at a 2048-boundary. LDOU $3,$255,8 SETL $4,2048 0H PRELD 0,$2,0 ADDU $2,$2,$4 CMP $255,$2,$3 BN $255,0B GETA $255,crtsdat LDOU $2,$255,0 ANDNL $2,#7ff LDOU $3,$255,8 0H PRELD 0,$2,0 ADDU $2,$2,$4 CMP $255,$2,$3 BN $255,0B % Initialize the stack pointer. It is supposedly made a global % zero-initialized (allowed to change) register in crtn.S; we use the % explicit number. GETA $255,stackpp LDOU $254,$255,0 PUSHJ $2,_init #ifdef __MMIX_ABI_GNU__ % Copy argc and argv from their initial position to argument registers % where necessary. SET $231,$0 SET $232,$1 #else % For the mmixware ABI, we need to move arguments. The return value will % appear in $0. SET $2,$1 SET $1,$0 #endif PUSHJ $0,main JMP exit % Provide the first part of _init and _fini. Save the return address on the % register stack. We eventually ignore the return address of these % PUSHJ:s, so it doesn't matter that whether .init and .fini code calls % functions or where they store rJ. We shouldn't get there, so die % (TRAP Halt) if that happens. .section .init,"ax",@progbits .global _init _init: GET $0,:rJ PUSHJ $1,0F SETL $255,255 TRAP 0,0,0 0H IS @ % Register _fini to be executed as the last atexit function. #ifdef __MMIX_ABI_GNU__ GETA $231,_fini #else GETA $1,_fini #endif PUSHJ $0,atexit .section .fini,"ax",@progbits .global _fini _fini: GET $0,:rJ PUSHJ $1,0F SETL $255,255 TRAP 0,0,0 0H IS @
4ms/metamodule-plugin-sdk
48,950
plugin-libc/libgcc/config/v850/lib1funcs.S
/* libgcc routines for NEC V850. Copyright (C) 1996-2022 Free Software Foundation, Inc. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 3, or (at your option) any later version. This file is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. Under Section 7 of GPL version 3, you are granted additional permissions described in the GCC Runtime Library Exception, version 3.1, as published by the Free Software Foundation. You should have received a copy of the GNU General Public License and a copy of the GCC Runtime Library Exception along with this program; see the files COPYING3 and COPYING.RUNTIME respectively. If not, see <http://www.gnu.org/licenses/>. */ #ifdef L_mulsi3 .text .globl ___mulsi3 .type ___mulsi3,@function ___mulsi3: #ifdef __v850__ /* #define SHIFT 12 #define MASK ((1 << SHIFT) - 1) #define STEP(i, j) \ ({ \ short a_part = (a >> (i)) & MASK; \ short b_part = (b >> (j)) & MASK; \ int res = (((int) a_part) * ((int) b_part)); \ res; \ }) int __mulsi3 (unsigned a, unsigned b) { return STEP (0, 0) + ((STEP (SHIFT, 0) + STEP (0, SHIFT)) << SHIFT) + ((STEP (0, 2 * SHIFT) + STEP (SHIFT, SHIFT) + STEP (2 * SHIFT, 0)) << (2 * SHIFT)); } */ mov r6, r14 movea lo(32767), r0, r10 and r10, r14 mov r7, r15 and r10, r15 shr 15, r6 mov r6, r13 and r10, r13 shr 15, r7 mov r7, r12 and r10, r12 shr 15, r6 shr 15, r7 mov r14, r10 mulh r15, r10 mov r14, r11 mulh r12, r11 mov r13, r16 mulh r15, r16 mulh r14, r7 mulh r15, r6 add r16, r11 mulh r13, r12 shl 15, r11 add r11, r10 add r12, r7 add r6, r7 shl 30, r7 add r7, r10 jmp [r31] #endif /* __v850__ */ #if defined(__v850e__) || defined(__v850ea__) || defined(__v850e2__) || defined(__v850e2v3__) || defined(__v850e3v5__) /* This routine is almost unneccesarry because gcc generates the MUL instruction for the RTX mulsi3. But if someone wants to link his application with previsously compiled v850 objects then they will need this function. */ /* It isn't good to put the inst sequence as below; mul r7, r6, mov r6, r10, r0 In this case, there is a RAW hazard between them. MUL inst takes 2 cycle in EX stage, then MOV inst must wait 1cycle. */ mov r7, r10 mul r6, r10, r0 jmp [r31] #endif /* __v850e__ */ .size ___mulsi3,.-___mulsi3 #endif /* L_mulsi3 */ #ifdef L_udivsi3 .text .global ___udivsi3 .type ___udivsi3,@function ___udivsi3: #ifdef __v850__ mov 1,r12 mov 0,r10 cmp r6,r7 bnl .L12 movhi hi(-2147483648),r0,r13 cmp r0,r7 blt .L12 .L4: shl 1,r7 shl 1,r12 cmp r6,r7 bnl .L12 cmp r0,r12 be .L8 mov r7,r19 and r13,r19 be .L4 br .L12 .L9: cmp r7,r6 bl .L10 sub r7,r6 or r12,r10 .L10: shr 1,r12 shr 1,r7 .L12: cmp r0,r12 bne .L9 .L8: jmp [r31] #else /* defined(__v850e__) */ /* See comments at end of __mulsi3. */ mov r6, r10 divu r7, r10, r0 jmp [r31] #endif /* __v850e__ */ .size ___udivsi3,.-___udivsi3 #endif #ifdef L_divsi3 .text .globl ___divsi3 .type ___divsi3,@function ___divsi3: #ifdef __v850__ add -8,sp st.w r31,4[sp] st.w r22,0[sp] mov 1,r22 tst r7,r7 bp .L3 subr r0,r7 subr r0,r22 .L3: tst r6,r6 bp .L4 subr r0,r6 subr r0,r22 .L4: jarl ___udivsi3,r31 cmp r0,r22 bp .L7 subr r0,r10 .L7: ld.w 0[sp],r22 ld.w 4[sp],r31 add 8,sp jmp [r31] #else /* defined(__v850e__) */ /* See comments at end of __mulsi3. */ mov r6, r10 div r7, r10, r0 jmp [r31] #endif /* __v850e__ */ .size ___divsi3,.-___divsi3 #endif #ifdef L_umodsi3 .text .globl ___umodsi3 .type ___umodsi3,@function ___umodsi3: #ifdef __v850__ add -12,sp st.w r31,8[sp] st.w r7,4[sp] st.w r6,0[sp] jarl ___udivsi3,r31 ld.w 4[sp],r7 mov r10,r6 jarl ___mulsi3,r31 ld.w 0[sp],r6 subr r6,r10 ld.w 8[sp],r31 add 12,sp jmp [r31] #else /* defined(__v850e__) */ /* See comments at end of __mulsi3. */ divu r7, r6, r10 jmp [r31] #endif /* __v850e__ */ .size ___umodsi3,.-___umodsi3 #endif /* L_umodsi3 */ #ifdef L_modsi3 .text .globl ___modsi3 .type ___modsi3,@function ___modsi3: #ifdef __v850__ add -12,sp st.w r31,8[sp] st.w r7,4[sp] st.w r6,0[sp] jarl ___divsi3,r31 ld.w 4[sp],r7 mov r10,r6 jarl ___mulsi3,r31 ld.w 0[sp],r6 subr r6,r10 ld.w 8[sp],r31 add 12,sp jmp [r31] #else /* defined(__v850e__) */ /* See comments at end of __mulsi3. */ div r7, r6, r10 jmp [r31] #endif /* __v850e__ */ .size ___modsi3,.-___modsi3 #endif /* L_modsi3 */ #ifdef L_save_2 .text .align 2 .globl __save_r2_r29 .type __save_r2_r29,@function /* Allocate space and save registers 2, 20 .. 29 on the stack. */ /* Called via: jalr __save_r2_r29,r10. */ __save_r2_r29: #ifdef __EP__ mov ep,r1 addi -44,sp,sp mov sp,ep sst.w r29,0[ep] sst.w r28,4[ep] sst.w r27,8[ep] sst.w r26,12[ep] sst.w r25,16[ep] sst.w r24,20[ep] sst.w r23,24[ep] sst.w r22,28[ep] sst.w r21,32[ep] sst.w r20,36[ep] sst.w r2,40[ep] mov r1,ep #else addi -44,sp,sp st.w r29,0[sp] st.w r28,4[sp] st.w r27,8[sp] st.w r26,12[sp] st.w r25,16[sp] st.w r24,20[sp] st.w r23,24[sp] st.w r22,28[sp] st.w r21,32[sp] st.w r20,36[sp] st.w r2,40[sp] #endif jmp [r10] .size __save_r2_r29,.-__save_r2_r29 /* Restore saved registers, deallocate stack and return to the user. */ /* Called via: jr __return_r2_r29. */ .align 2 .globl __return_r2_r29 .type __return_r2_r29,@function __return_r2_r29: #ifdef __EP__ mov ep,r1 mov sp,ep sld.w 0[ep],r29 sld.w 4[ep],r28 sld.w 8[ep],r27 sld.w 12[ep],r26 sld.w 16[ep],r25 sld.w 20[ep],r24 sld.w 24[ep],r23 sld.w 28[ep],r22 sld.w 32[ep],r21 sld.w 36[ep],r20 sld.w 40[ep],r2 addi 44,sp,sp mov r1,ep #else ld.w 0[sp],r29 ld.w 4[sp],r28 ld.w 8[sp],r27 ld.w 12[sp],r26 ld.w 16[sp],r25 ld.w 20[sp],r24 ld.w 24[sp],r23 ld.w 28[sp],r22 ld.w 32[sp],r21 ld.w 36[sp],r20 ld.w 40[sp],r2 addi 44,sp,sp #endif jmp [r31] .size __return_r2_r29,.-__return_r2_r29 #endif /* L_save_2 */ #ifdef L_save_20 .text .align 2 .globl __save_r20_r29 .type __save_r20_r29,@function /* Allocate space and save registers 20 .. 29 on the stack. */ /* Called via: jalr __save_r20_r29,r10. */ __save_r20_r29: #ifdef __EP__ mov ep,r1 addi -40,sp,sp mov sp,ep sst.w r29,0[ep] sst.w r28,4[ep] sst.w r27,8[ep] sst.w r26,12[ep] sst.w r25,16[ep] sst.w r24,20[ep] sst.w r23,24[ep] sst.w r22,28[ep] sst.w r21,32[ep] sst.w r20,36[ep] mov r1,ep #else addi -40,sp,sp st.w r29,0[sp] st.w r28,4[sp] st.w r27,8[sp] st.w r26,12[sp] st.w r25,16[sp] st.w r24,20[sp] st.w r23,24[sp] st.w r22,28[sp] st.w r21,32[sp] st.w r20,36[sp] #endif jmp [r10] .size __save_r20_r29,.-__save_r20_r29 /* Restore saved registers, deallocate stack and return to the user. */ /* Called via: jr __return_r20_r29. */ .align 2 .globl __return_r20_r29 .type __return_r20_r29,@function __return_r20_r29: #ifdef __EP__ mov ep,r1 mov sp,ep sld.w 0[ep],r29 sld.w 4[ep],r28 sld.w 8[ep],r27 sld.w 12[ep],r26 sld.w 16[ep],r25 sld.w 20[ep],r24 sld.w 24[ep],r23 sld.w 28[ep],r22 sld.w 32[ep],r21 sld.w 36[ep],r20 addi 40,sp,sp mov r1,ep #else ld.w 0[sp],r29 ld.w 4[sp],r28 ld.w 8[sp],r27 ld.w 12[sp],r26 ld.w 16[sp],r25 ld.w 20[sp],r24 ld.w 24[sp],r23 ld.w 28[sp],r22 ld.w 32[sp],r21 ld.w 36[sp],r20 addi 40,sp,sp #endif jmp [r31] .size __return_r20_r29,.-__return_r20_r29 #endif /* L_save_20 */ #ifdef L_save_21 .text .align 2 .globl __save_r21_r29 .type __save_r21_r29,@function /* Allocate space and save registers 21 .. 29 on the stack. */ /* Called via: jalr __save_r21_r29,r10. */ __save_r21_r29: #ifdef __EP__ mov ep,r1 addi -36,sp,sp mov sp,ep sst.w r29,0[ep] sst.w r28,4[ep] sst.w r27,8[ep] sst.w r26,12[ep] sst.w r25,16[ep] sst.w r24,20[ep] sst.w r23,24[ep] sst.w r22,28[ep] sst.w r21,32[ep] mov r1,ep #else addi -36,sp,sp st.w r29,0[sp] st.w r28,4[sp] st.w r27,8[sp] st.w r26,12[sp] st.w r25,16[sp] st.w r24,20[sp] st.w r23,24[sp] st.w r22,28[sp] st.w r21,32[sp] #endif jmp [r10] .size __save_r21_r29,.-__save_r21_r29 /* Restore saved registers, deallocate stack and return to the user. */ /* Called via: jr __return_r21_r29. */ .align 2 .globl __return_r21_r29 .type __return_r21_r29,@function __return_r21_r29: #ifdef __EP__ mov ep,r1 mov sp,ep sld.w 0[ep],r29 sld.w 4[ep],r28 sld.w 8[ep],r27 sld.w 12[ep],r26 sld.w 16[ep],r25 sld.w 20[ep],r24 sld.w 24[ep],r23 sld.w 28[ep],r22 sld.w 32[ep],r21 addi 36,sp,sp mov r1,ep #else ld.w 0[sp],r29 ld.w 4[sp],r28 ld.w 8[sp],r27 ld.w 12[sp],r26 ld.w 16[sp],r25 ld.w 20[sp],r24 ld.w 24[sp],r23 ld.w 28[sp],r22 ld.w 32[sp],r21 addi 36,sp,sp #endif jmp [r31] .size __return_r21_r29,.-__return_r21_r29 #endif /* L_save_21 */ #ifdef L_save_22 .text .align 2 .globl __save_r22_r29 .type __save_r22_r29,@function /* Allocate space and save registers 22 .. 29 on the stack. */ /* Called via: jalr __save_r22_r29,r10. */ __save_r22_r29: #ifdef __EP__ mov ep,r1 addi -32,sp,sp mov sp,ep sst.w r29,0[ep] sst.w r28,4[ep] sst.w r27,8[ep] sst.w r26,12[ep] sst.w r25,16[ep] sst.w r24,20[ep] sst.w r23,24[ep] sst.w r22,28[ep] mov r1,ep #else addi -32,sp,sp st.w r29,0[sp] st.w r28,4[sp] st.w r27,8[sp] st.w r26,12[sp] st.w r25,16[sp] st.w r24,20[sp] st.w r23,24[sp] st.w r22,28[sp] #endif jmp [r10] .size __save_r22_r29,.-__save_r22_r29 /* Restore saved registers, deallocate stack and return to the user. */ /* Called via: jr __return_r22_r29. */ .align 2 .globl __return_r22_r29 .type __return_r22_r29,@function __return_r22_r29: #ifdef __EP__ mov ep,r1 mov sp,ep sld.w 0[ep],r29 sld.w 4[ep],r28 sld.w 8[ep],r27 sld.w 12[ep],r26 sld.w 16[ep],r25 sld.w 20[ep],r24 sld.w 24[ep],r23 sld.w 28[ep],r22 addi 32,sp,sp mov r1,ep #else ld.w 0[sp],r29 ld.w 4[sp],r28 ld.w 8[sp],r27 ld.w 12[sp],r26 ld.w 16[sp],r25 ld.w 20[sp],r24 ld.w 24[sp],r23 ld.w 28[sp],r22 addi 32,sp,sp #endif jmp [r31] .size __return_r22_r29,.-__return_r22_r29 #endif /* L_save_22 */ #ifdef L_save_23 .text .align 2 .globl __save_r23_r29 .type __save_r23_r29,@function /* Allocate space and save registers 23 .. 29 on the stack. */ /* Called via: jalr __save_r23_r29,r10. */ __save_r23_r29: #ifdef __EP__ mov ep,r1 addi -28,sp,sp mov sp,ep sst.w r29,0[ep] sst.w r28,4[ep] sst.w r27,8[ep] sst.w r26,12[ep] sst.w r25,16[ep] sst.w r24,20[ep] sst.w r23,24[ep] mov r1,ep #else addi -28,sp,sp st.w r29,0[sp] st.w r28,4[sp] st.w r27,8[sp] st.w r26,12[sp] st.w r25,16[sp] st.w r24,20[sp] st.w r23,24[sp] #endif jmp [r10] .size __save_r23_r29,.-__save_r23_r29 /* Restore saved registers, deallocate stack and return to the user. */ /* Called via: jr __return_r23_r29. */ .align 2 .globl __return_r23_r29 .type __return_r23_r29,@function __return_r23_r29: #ifdef __EP__ mov ep,r1 mov sp,ep sld.w 0[ep],r29 sld.w 4[ep],r28 sld.w 8[ep],r27 sld.w 12[ep],r26 sld.w 16[ep],r25 sld.w 20[ep],r24 sld.w 24[ep],r23 addi 28,sp,sp mov r1,ep #else ld.w 0[sp],r29 ld.w 4[sp],r28 ld.w 8[sp],r27 ld.w 12[sp],r26 ld.w 16[sp],r25 ld.w 20[sp],r24 ld.w 24[sp],r23 addi 28,sp,sp #endif jmp [r31] .size __return_r23_r29,.-__return_r23_r29 #endif /* L_save_23 */ #ifdef L_save_24 .text .align 2 .globl __save_r24_r29 .type __save_r24_r29,@function /* Allocate space and save registers 24 .. 29 on the stack. */ /* Called via: jalr __save_r24_r29,r10. */ __save_r24_r29: #ifdef __EP__ mov ep,r1 addi -24,sp,sp mov sp,ep sst.w r29,0[ep] sst.w r28,4[ep] sst.w r27,8[ep] sst.w r26,12[ep] sst.w r25,16[ep] sst.w r24,20[ep] mov r1,ep #else addi -24,sp,sp st.w r29,0[sp] st.w r28,4[sp] st.w r27,8[sp] st.w r26,12[sp] st.w r25,16[sp] st.w r24,20[sp] #endif jmp [r10] .size __save_r24_r29,.-__save_r24_r29 /* Restore saved registers, deallocate stack and return to the user. */ /* Called via: jr __return_r24_r29. */ .align 2 .globl __return_r24_r29 .type __return_r24_r29,@function __return_r24_r29: #ifdef __EP__ mov ep,r1 mov sp,ep sld.w 0[ep],r29 sld.w 4[ep],r28 sld.w 8[ep],r27 sld.w 12[ep],r26 sld.w 16[ep],r25 sld.w 20[ep],r24 addi 24,sp,sp mov r1,ep #else ld.w 0[sp],r29 ld.w 4[sp],r28 ld.w 8[sp],r27 ld.w 12[sp],r26 ld.w 16[sp],r25 ld.w 20[sp],r24 addi 24,sp,sp #endif jmp [r31] .size __return_r24_r29,.-__return_r24_r29 #endif /* L_save_24 */ #ifdef L_save_25 .text .align 2 .globl __save_r25_r29 .type __save_r25_r29,@function /* Allocate space and save registers 25 .. 29 on the stack. */ /* Called via: jalr __save_r25_r29,r10. */ __save_r25_r29: #ifdef __EP__ mov ep,r1 addi -20,sp,sp mov sp,ep sst.w r29,0[ep] sst.w r28,4[ep] sst.w r27,8[ep] sst.w r26,12[ep] sst.w r25,16[ep] mov r1,ep #else addi -20,sp,sp st.w r29,0[sp] st.w r28,4[sp] st.w r27,8[sp] st.w r26,12[sp] st.w r25,16[sp] #endif jmp [r10] .size __save_r25_r29,.-__save_r25_r29 /* Restore saved registers, deallocate stack and return to the user. */ /* Called via: jr __return_r25_r29. */ .align 2 .globl __return_r25_r29 .type __return_r25_r29,@function __return_r25_r29: #ifdef __EP__ mov ep,r1 mov sp,ep sld.w 0[ep],r29 sld.w 4[ep],r28 sld.w 8[ep],r27 sld.w 12[ep],r26 sld.w 16[ep],r25 addi 20,sp,sp mov r1,ep #else ld.w 0[ep],r29 ld.w 4[ep],r28 ld.w 8[ep],r27 ld.w 12[ep],r26 ld.w 16[ep],r25 addi 20,sp,sp #endif jmp [r31] .size __return_r25_r29,.-__return_r25_r29 #endif /* L_save_25 */ #ifdef L_save_26 .text .align 2 .globl __save_r26_r29 .type __save_r26_r29,@function /* Allocate space and save registers 26 .. 29 on the stack. */ /* Called via: jalr __save_r26_r29,r10. */ __save_r26_r29: #ifdef __EP__ mov ep,r1 add -16,sp mov sp,ep sst.w r29,0[ep] sst.w r28,4[ep] sst.w r27,8[ep] sst.w r26,12[ep] mov r1,ep #else add -16,sp st.w r29,0[sp] st.w r28,4[sp] st.w r27,8[sp] st.w r26,12[sp] #endif jmp [r10] .size __save_r26_r29,.-__save_r26_r29 /* Restore saved registers, deallocate stack and return to the user. */ /* Called via: jr __return_r26_r29. */ .align 2 .globl __return_r26_r29 .type __return_r26_r29,@function __return_r26_r29: #ifdef __EP__ mov ep,r1 mov sp,ep sld.w 0[ep],r29 sld.w 4[ep],r28 sld.w 8[ep],r27 sld.w 12[ep],r26 addi 16,sp,sp mov r1,ep #else ld.w 0[sp],r29 ld.w 4[sp],r28 ld.w 8[sp],r27 ld.w 12[sp],r26 addi 16,sp,sp #endif jmp [r31] .size __return_r26_r29,.-__return_r26_r29 #endif /* L_save_26 */ #ifdef L_save_27 .text .align 2 .globl __save_r27_r29 .type __save_r27_r29,@function /* Allocate space and save registers 27 .. 29 on the stack. */ /* Called via: jalr __save_r27_r29,r10. */ __save_r27_r29: add -12,sp st.w r29,0[sp] st.w r28,4[sp] st.w r27,8[sp] jmp [r10] .size __save_r27_r29,.-__save_r27_r29 /* Restore saved registers, deallocate stack and return to the user. */ /* Called via: jr __return_r27_r29. */ .align 2 .globl __return_r27_r29 .type __return_r27_r29,@function __return_r27_r29: ld.w 0[sp],r29 ld.w 4[sp],r28 ld.w 8[sp],r27 add 12,sp jmp [r31] .size __return_r27_r29,.-__return_r27_r29 #endif /* L_save_27 */ #ifdef L_save_28 .text .align 2 .globl __save_r28_r29 .type __save_r28_r29,@function /* Allocate space and save registers 28,29 on the stack. */ /* Called via: jalr __save_r28_r29,r10. */ __save_r28_r29: add -8,sp st.w r29,0[sp] st.w r28,4[sp] jmp [r10] .size __save_r28_r29,.-__save_r28_r29 /* Restore saved registers, deallocate stack and return to the user. */ /* Called via: jr __return_r28_r29. */ .align 2 .globl __return_r28_r29 .type __return_r28_r29,@function __return_r28_r29: ld.w 0[sp],r29 ld.w 4[sp],r28 add 8,sp jmp [r31] .size __return_r28_r29,.-__return_r28_r29 #endif /* L_save_28 */ #ifdef L_save_29 .text .align 2 .globl __save_r29 .type __save_r29,@function /* Allocate space and save register 29 on the stack. */ /* Called via: jalr __save_r29,r10. */ __save_r29: add -4,sp st.w r29,0[sp] jmp [r10] .size __save_r29,.-__save_r29 /* Restore saved register 29, deallocate stack and return to the user. */ /* Called via: jr __return_r29. */ .align 2 .globl __return_r29 .type __return_r29,@function __return_r29: ld.w 0[sp],r29 add 4,sp jmp [r31] .size __return_r29,.-__return_r29 #endif /* L_save_28 */ #ifdef L_save_2c .text .align 2 .globl __save_r2_r31 .type __save_r2_r31,@function /* Allocate space and save registers 20 .. 29, 31 on the stack. */ /* Also allocate space for the argument save area. */ /* Called via: jalr __save_r2_r31,r10. */ __save_r2_r31: #ifdef __EP__ mov ep,r1 addi -48,sp,sp mov sp,ep sst.w r29,0[ep] sst.w r28,4[ep] sst.w r27,8[ep] sst.w r26,12[ep] sst.w r25,16[ep] sst.w r24,20[ep] sst.w r23,24[ep] sst.w r22,28[ep] sst.w r21,32[ep] sst.w r20,36[ep] sst.w r2,40[ep] sst.w r31,44[ep] mov r1,ep #else addi -48,sp,sp st.w r29,0[sp] st.w r28,4[sp] st.w r27,8[sp] st.w r26,12[sp] st.w r25,16[sp] st.w r24,20[sp] st.w r23,24[sp] st.w r22,28[sp] st.w r21,32[sp] st.w r20,36[sp] st.w r2,40[sp] st.w r31,44[sp] #endif jmp [r10] .size __save_r2_r31,.-__save_r2_r31 /* Restore saved registers, deallocate stack and return to the user. */ /* Called via: jr __return_r20_r31. */ .align 2 .globl __return_r2_r31 .type __return_r2_r31,@function __return_r2_r31: #ifdef __EP__ mov ep,r1 mov sp,ep sld.w 0[ep],r29 sld.w 4[ep],r28 sld.w 8[ep],r27 sld.w 12[ep],r26 sld.w 16[ep],r25 sld.w 20[ep],r24 sld.w 24[ep],r23 sld.w 28[ep],r22 sld.w 32[ep],r21 sld.w 36[ep],r20 sld.w 40[ep],r2 sld.w 44[ep],r31 addi 48,sp,sp mov r1,ep #else ld.w 44[sp],r29 ld.w 40[sp],r28 ld.w 36[sp],r27 ld.w 32[sp],r26 ld.w 28[sp],r25 ld.w 24[sp],r24 ld.w 20[sp],r23 ld.w 16[sp],r22 ld.w 12[sp],r21 ld.w 8[sp],r20 ld.w 4[sp],r2 ld.w 0[sp],r31 addi 48,sp,sp #endif jmp [r31] .size __return_r2_r31,.-__return_r2_r31 #endif /* L_save_2c */ #ifdef L_save_20c .text .align 2 .globl __save_r20_r31 .type __save_r20_r31,@function /* Allocate space and save registers 20 .. 29, 31 on the stack. */ /* Also allocate space for the argument save area. */ /* Called via: jalr __save_r20_r31,r10. */ __save_r20_r31: #ifdef __EP__ mov ep,r1 addi -44,sp,sp mov sp,ep sst.w r29,0[ep] sst.w r28,4[ep] sst.w r27,8[ep] sst.w r26,12[ep] sst.w r25,16[ep] sst.w r24,20[ep] sst.w r23,24[ep] sst.w r22,28[ep] sst.w r21,32[ep] sst.w r20,36[ep] sst.w r31,40[ep] mov r1,ep #else addi -44,sp,sp st.w r29,0[sp] st.w r28,4[sp] st.w r27,8[sp] st.w r26,12[sp] st.w r25,16[sp] st.w r24,20[sp] st.w r23,24[sp] st.w r22,28[sp] st.w r21,32[sp] st.w r20,36[sp] st.w r31,40[sp] #endif jmp [r10] .size __save_r20_r31,.-__save_r20_r31 /* Restore saved registers, deallocate stack and return to the user. */ /* Called via: jr __return_r20_r31. */ .align 2 .globl __return_r20_r31 .type __return_r20_r31,@function __return_r20_r31: #ifdef __EP__ mov ep,r1 mov sp,ep sld.w 0[ep],r29 sld.w 4[ep],r28 sld.w 8[ep],r27 sld.w 12[ep],r26 sld.w 16[ep],r25 sld.w 20[ep],r24 sld.w 24[ep],r23 sld.w 28[ep],r22 sld.w 32[ep],r21 sld.w 36[ep],r20 sld.w 40[ep],r31 addi 44,sp,sp mov r1,ep #else ld.w 0[sp],r29 ld.w 4[sp],r28 ld.w 8[sp],r27 ld.w 12[sp],r26 ld.w 16[sp],r25 ld.w 20[sp],r24 ld.w 24[sp],r23 ld.w 28[sp],r22 ld.w 32[sp],r21 ld.w 36[sp],r20 ld.w 40[sp],r31 addi 44,sp,sp #endif jmp [r31] .size __return_r20_r31,.-__return_r20_r31 #endif /* L_save_20c */ #ifdef L_save_21c .text .align 2 .globl __save_r21_r31 .type __save_r21_r31,@function /* Allocate space and save registers 21 .. 29, 31 on the stack. */ /* Also allocate space for the argument save area. */ /* Called via: jalr __save_r21_r31,r10. */ __save_r21_r31: #ifdef __EP__ mov ep,r1 addi -40,sp,sp mov sp,ep sst.w r29,0[ep] sst.w r28,4[ep] sst.w r27,8[ep] sst.w r26,12[ep] sst.w r25,16[ep] sst.w r24,20[ep] sst.w r23,24[ep] sst.w r22,28[ep] sst.w r21,32[ep] sst.w r31,36[ep] mov r1,ep jmp [r10] #else addi -40,sp,sp st.w r29,0[sp] st.w r28,4[sp] st.w r27,8[sp] st.w r26,12[sp] st.w r25,16[sp] st.w r24,20[sp] st.w r23,24[sp] st.w r22,28[sp] st.w r21,32[sp] st.w r31,36[sp] jmp [r10] #endif .size __save_r21_r31,.-__save_r21_r31 /* Restore saved registers, deallocate stack and return to the user. */ /* Called via: jr __return_r21_r31. */ .align 2 .globl __return_r21_r31 .type __return_r21_r31,@function __return_r21_r31: #ifdef __EP__ mov ep,r1 mov sp,ep sld.w 0[ep],r29 sld.w 4[ep],r28 sld.w 8[ep],r27 sld.w 12[ep],r26 sld.w 16[ep],r25 sld.w 20[ep],r24 sld.w 24[ep],r23 sld.w 28[ep],r22 sld.w 32[ep],r21 sld.w 36[ep],r31 addi 40,sp,sp mov r1,ep #else ld.w 0[sp],r29 ld.w 4[sp],r28 ld.w 8[sp],r27 ld.w 12[sp],r26 ld.w 16[sp],r25 ld.w 20[sp],r24 ld.w 24[sp],r23 ld.w 28[sp],r22 ld.w 32[sp],r21 ld.w 36[sp],r31 addi 40,sp,sp #endif jmp [r31] .size __return_r21_r31,.-__return_r21_r31 #endif /* L_save_21c */ #ifdef L_save_22c .text .align 2 .globl __save_r22_r31 .type __save_r22_r31,@function /* Allocate space and save registers 22 .. 29, 31 on the stack. */ /* Also allocate space for the argument save area. */ /* Called via: jalr __save_r22_r31,r10. */ __save_r22_r31: #ifdef __EP__ mov ep,r1 addi -36,sp,sp mov sp,ep sst.w r29,0[ep] sst.w r28,4[ep] sst.w r27,8[ep] sst.w r26,12[ep] sst.w r25,16[ep] sst.w r24,20[ep] sst.w r23,24[ep] sst.w r22,28[ep] sst.w r31,32[ep] mov r1,ep #else addi -36,sp,sp st.w r29,0[sp] st.w r28,4[sp] st.w r27,8[sp] st.w r26,12[sp] st.w r25,16[sp] st.w r24,20[sp] st.w r23,24[sp] st.w r22,28[sp] st.w r31,32[sp] #endif jmp [r10] .size __save_r22_r31,.-__save_r22_r31 /* Restore saved registers, deallocate stack and return to the user. */ /* Called via: jr __return_r22_r31. */ .align 2 .globl __return_r22_r31 .type __return_r22_r31,@function __return_r22_r31: #ifdef __EP__ mov ep,r1 mov sp,ep sld.w 0[ep],r29 sld.w 4[ep],r28 sld.w 8[ep],r27 sld.w 12[ep],r26 sld.w 16[ep],r25 sld.w 20[ep],r24 sld.w 24[ep],r23 sld.w 28[ep],r22 sld.w 32[ep],r31 addi 36,sp,sp mov r1,ep #else ld.w 0[sp],r29 ld.w 4[sp],r28 ld.w 8[sp],r27 ld.w 12[sp],r26 ld.w 16[sp],r25 ld.w 20[sp],r24 ld.w 24[sp],r23 ld.w 28[sp],r22 ld.w 32[sp],r31 addi 36,sp,sp #endif jmp [r31] .size __return_r22_r31,.-__return_r22_r31 #endif /* L_save_22c */ #ifdef L_save_23c .text .align 2 .globl __save_r23_r31 .type __save_r23_r31,@function /* Allocate space and save registers 23 .. 29, 31 on the stack. */ /* Also allocate space for the argument save area. */ /* Called via: jalr __save_r23_r31,r10. */ __save_r23_r31: #ifdef __EP__ mov ep,r1 addi -32,sp,sp mov sp,ep sst.w r29,0[ep] sst.w r28,4[ep] sst.w r27,8[ep] sst.w r26,12[ep] sst.w r25,16[ep] sst.w r24,20[ep] sst.w r23,24[ep] sst.w r31,28[ep] mov r1,ep #else addi -32,sp,sp st.w r29,0[sp] st.w r28,4[sp] st.w r27,8[sp] st.w r26,12[sp] st.w r25,16[sp] st.w r24,20[sp] st.w r23,24[sp] st.w r31,28[sp] #endif jmp [r10] .size __save_r23_r31,.-__save_r23_r31 /* Restore saved registers, deallocate stack and return to the user. */ /* Called via: jr __return_r23_r31. */ .align 2 .globl __return_r23_r31 .type __return_r23_r31,@function __return_r23_r31: #ifdef __EP__ mov ep,r1 mov sp,ep sld.w 0[ep],r29 sld.w 4[ep],r28 sld.w 8[ep],r27 sld.w 12[ep],r26 sld.w 16[ep],r25 sld.w 20[ep],r24 sld.w 24[ep],r23 sld.w 28[ep],r31 addi 32,sp,sp mov r1,ep #else ld.w 0[sp],r29 ld.w 4[sp],r28 ld.w 8[sp],r27 ld.w 12[sp],r26 ld.w 16[sp],r25 ld.w 20[sp],r24 ld.w 24[sp],r23 ld.w 28[sp],r31 addi 32,sp,sp #endif jmp [r31] .size __return_r23_r31,.-__return_r23_r31 #endif /* L_save_23c */ #ifdef L_save_24c .text .align 2 .globl __save_r24_r31 .type __save_r24_r31,@function /* Allocate space and save registers 24 .. 29, 31 on the stack. */ /* Also allocate space for the argument save area. */ /* Called via: jalr __save_r24_r31,r10. */ __save_r24_r31: #ifdef __EP__ mov ep,r1 addi -28,sp,sp mov sp,ep sst.w r29,0[ep] sst.w r28,4[ep] sst.w r27,8[ep] sst.w r26,12[ep] sst.w r25,16[ep] sst.w r24,20[ep] sst.w r31,24[ep] mov r1,ep #else addi -28,sp,sp st.w r29,0[sp] st.w r28,4[sp] st.w r27,8[sp] st.w r26,12[sp] st.w r25,16[sp] st.w r24,20[sp] st.w r31,24[sp] #endif jmp [r10] .size __save_r24_r31,.-__save_r24_r31 /* Restore saved registers, deallocate stack and return to the user. */ /* Called via: jr __return_r24_r31. */ .align 2 .globl __return_r24_r31 .type __return_r24_r31,@function __return_r24_r31: #ifdef __EP__ mov ep,r1 mov sp,ep sld.w 0[ep],r29 sld.w 4[ep],r28 sld.w 8[ep],r27 sld.w 12[ep],r26 sld.w 16[ep],r25 sld.w 20[ep],r24 sld.w 24[ep],r31 addi 28,sp,sp mov r1,ep #else ld.w 0[sp],r29 ld.w 4[sp],r28 ld.w 8[sp],r27 ld.w 12[sp],r26 ld.w 16[sp],r25 ld.w 20[sp],r24 ld.w 24[sp],r31 addi 28,sp,sp #endif jmp [r31] .size __return_r24_r31,.-__return_r24_r31 #endif /* L_save_24c */ #ifdef L_save_25c .text .align 2 .globl __save_r25_r31 .type __save_r25_r31,@function /* Allocate space and save registers 25 .. 29, 31 on the stack. */ /* Also allocate space for the argument save area. */ /* Called via: jalr __save_r25_r31,r10. */ __save_r25_r31: #ifdef __EP__ mov ep,r1 addi -24,sp,sp mov sp,ep sst.w r29,0[ep] sst.w r28,4[ep] sst.w r27,8[ep] sst.w r26,12[ep] sst.w r25,16[ep] sst.w r31,20[ep] mov r1,ep #else addi -24,sp,sp st.w r29,0[sp] st.w r28,4[sp] st.w r27,8[sp] st.w r26,12[sp] st.w r25,16[sp] st.w r31,20[sp] #endif jmp [r10] .size __save_r25_r31,.-__save_r25_r31 /* Restore saved registers, deallocate stack and return to the user. */ /* Called via: jr __return_r25_r31. */ .align 2 .globl __return_r25_r31 .type __return_r25_r31,@function __return_r25_r31: #ifdef __EP__ mov ep,r1 mov sp,ep sld.w 0[ep],r29 sld.w 4[ep],r28 sld.w 8[ep],r27 sld.w 12[ep],r26 sld.w 16[ep],r25 sld.w 20[ep],r31 addi 24,sp,sp mov r1,ep #else ld.w 0[sp],r29 ld.w 4[sp],r28 ld.w 8[sp],r27 ld.w 12[sp],r26 ld.w 16[sp],r25 ld.w 20[sp],r31 addi 24,sp,sp #endif jmp [r31] .size __return_r25_r31,.-__return_r25_r31 #endif /* L_save_25c */ #ifdef L_save_26c .text .align 2 .globl __save_r26_r31 .type __save_r26_r31,@function /* Allocate space and save registers 26 .. 29, 31 on the stack. */ /* Also allocate space for the argument save area. */ /* Called via: jalr __save_r26_r31,r10. */ __save_r26_r31: #ifdef __EP__ mov ep,r1 addi -20,sp,sp mov sp,ep sst.w r29,0[ep] sst.w r28,4[ep] sst.w r27,8[ep] sst.w r26,12[ep] sst.w r31,16[ep] mov r1,ep #else addi -20,sp,sp st.w r29,0[sp] st.w r28,4[sp] st.w r27,8[sp] st.w r26,12[sp] st.w r31,16[sp] #endif jmp [r10] .size __save_r26_r31,.-__save_r26_r31 /* Restore saved registers, deallocate stack and return to the user. */ /* Called via: jr __return_r26_r31. */ .align 2 .globl __return_r26_r31 .type __return_r26_r31,@function __return_r26_r31: #ifdef __EP__ mov ep,r1 mov sp,ep sld.w 0[ep],r29 sld.w 4[ep],r28 sld.w 8[ep],r27 sld.w 12[ep],r26 sld.w 16[ep],r31 addi 20,sp,sp mov r1,ep #else ld.w 0[sp],r29 ld.w 4[sp],r28 ld.w 8[sp],r27 ld.w 12[sp],r26 ld.w 16[sp],r31 addi 20,sp,sp #endif jmp [r31] .size __return_r26_r31,.-__return_r26_r31 #endif /* L_save_26c */ #ifdef L_save_27c .text .align 2 .globl __save_r27_r31 .type __save_r27_r31,@function /* Allocate space and save registers 27 .. 29, 31 on the stack. */ /* Also allocate space for the argument save area. */ /* Called via: jalr __save_r27_r31,r10. */ __save_r27_r31: #ifdef __EP__ mov ep,r1 addi -16,sp,sp mov sp,ep sst.w r29,0[ep] sst.w r28,4[ep] sst.w r27,8[ep] sst.w r31,12[ep] mov r1,ep #else addi -16,sp,sp st.w r29,0[sp] st.w r28,4[sp] st.w r27,8[sp] st.w r31,12[sp] #endif jmp [r10] .size __save_r27_r31,.-__save_r27_r31 /* Restore saved registers, deallocate stack and return to the user. */ /* Called via: jr __return_r27_r31. */ .align 2 .globl __return_r27_r31 .type __return_r27_r31,@function __return_r27_r31: #ifdef __EP__ mov ep,r1 mov sp,ep sld.w 0[ep],r29 sld.w 4[ep],r28 sld.w 8[ep],r27 sld.w 12[ep],r31 addi 16,sp,sp mov r1,ep #else ld.w 0[sp],r29 ld.w 4[sp],r28 ld.w 8[sp],r27 ld.w 12[sp],r31 addi 16,sp,sp #endif jmp [r31] .size __return_r27_r31,.-__return_r27_r31 #endif /* L_save_27c */ #ifdef L_save_28c .text .align 2 .globl __save_r28_r31 .type __save_r28_r31,@function /* Allocate space and save registers 28 .. 29, 31 on the stack. */ /* Also allocate space for the argument save area. */ /* Called via: jalr __save_r28_r31,r10. */ __save_r28_r31: addi -12,sp,sp st.w r29,0[sp] st.w r28,4[sp] st.w r31,8[sp] jmp [r10] .size __save_r28_r31,.-__save_r28_r31 /* Restore saved registers, deallocate stack and return to the user. */ /* Called via: jr __return_r28_r31. */ .align 2 .globl __return_r28_r31 .type __return_r28_r31,@function __return_r28_r31: ld.w 0[sp],r29 ld.w 4[sp],r28 ld.w 8[sp],r31 addi 12,sp,sp jmp [r31] .size __return_r28_r31,.-__return_r28_r31 #endif /* L_save_28c */ #ifdef L_save_29c .text .align 2 .globl __save_r29_r31 .type __save_r29_r31,@function /* Allocate space and save registers 29 & 31 on the stack. */ /* Also allocate space for the argument save area. */ /* Called via: jalr __save_r29_r31,r10. */ __save_r29_r31: addi -8,sp,sp st.w r29,0[sp] st.w r31,4[sp] jmp [r10] .size __save_r29_r31,.-__save_r29_r31 /* Restore saved registers, deallocate stack and return to the user. */ /* Called via: jr __return_r29_r31. */ .align 2 .globl __return_r29_r31 .type __return_r29_r31,@function __return_r29_r31: ld.w 0[sp],r29 ld.w 4[sp],r31 addi 8,sp,sp jmp [r31] .size __return_r29_r31,.-__return_r29_r31 #endif /* L_save_29c */ #ifdef L_save_31c .text .align 2 .globl __save_r31 .type __save_r31,@function /* Allocate space and save register 31 on the stack. */ /* Also allocate space for the argument save area. */ /* Called via: jalr __save_r31,r10. */ __save_r31: addi -4,sp,sp st.w r31,0[sp] jmp [r10] .size __save_r31,.-__save_r31 /* Restore saved registers, deallocate stack and return to the user. */ /* Called via: jr __return_r31. */ .align 2 .globl __return_r31 .type __return_r31,@function __return_r31: ld.w 0[sp],r31 addi 4,sp,sp jmp [r31] .size __return_r31,.-__return_r31 #endif /* L_save_31c */ #ifdef L_save_interrupt .text .align 2 .globl __save_interrupt .type __save_interrupt,@function /* Save registers r1, r4 on stack and load up with expected values. */ /* Note, 20 bytes of stack have already been allocated. */ /* Called via: jalr __save_interrupt,r10. */ __save_interrupt: /* add -20,sp ; st.w r11,16[sp] ; st.w r10,12[sp] ; */ st.w ep,0[sp] st.w gp,4[sp] st.w r1,8[sp] movhi hi(__ep),r0,ep movea lo(__ep),ep,ep movhi hi(__gp),r0,gp movea lo(__gp),gp,gp jmp [r10] .size __save_interrupt,.-__save_interrupt /* Restore saved registers, deallocate stack and return from the interrupt. */ /* Called via: jr __return_interrupt. */ .align 2 .globl __return_interrupt .type __return_interrupt,@function __return_interrupt: ld.w 0[sp],ep ld.w 4[sp],gp ld.w 8[sp],r1 ld.w 12[sp],r10 ld.w 16[sp],r11 addi 20,sp,sp reti .size __return_interrupt,.-__return_interrupt #endif /* L_save_interrupt */ #ifdef L_save_all_interrupt .text .align 2 .globl __save_all_interrupt .type __save_all_interrupt,@function /* Save all registers except for those saved in __save_interrupt. */ /* Allocate enough stack for all of the registers & 16 bytes of space. */ /* Called via: jalr __save_all_interrupt,r10. */ __save_all_interrupt: addi -104,sp,sp #ifdef __EP__ mov ep,r1 mov sp,ep sst.w r31,100[ep] sst.w r2,96[ep] sst.w gp,92[ep] sst.w r6,88[ep] sst.w r7,84[ep] sst.w r8,80[ep] sst.w r9,76[ep] sst.w r11,72[ep] sst.w r12,68[ep] sst.w r13,64[ep] sst.w r14,60[ep] sst.w r15,56[ep] sst.w r16,52[ep] sst.w r17,48[ep] sst.w r18,44[ep] sst.w r19,40[ep] sst.w r20,36[ep] sst.w r21,32[ep] sst.w r22,28[ep] sst.w r23,24[ep] sst.w r24,20[ep] sst.w r25,16[ep] sst.w r26,12[ep] sst.w r27,8[ep] sst.w r28,4[ep] sst.w r29,0[ep] mov r1,ep #else st.w r31,100[sp] st.w r2,96[sp] st.w gp,92[sp] st.w r6,88[sp] st.w r7,84[sp] st.w r8,80[sp] st.w r9,76[sp] st.w r11,72[sp] st.w r12,68[sp] st.w r13,64[sp] st.w r14,60[sp] st.w r15,56[sp] st.w r16,52[sp] st.w r17,48[sp] st.w r18,44[sp] st.w r19,40[sp] st.w r20,36[sp] st.w r21,32[sp] st.w r22,28[sp] st.w r23,24[sp] st.w r24,20[sp] st.w r25,16[sp] st.w r26,12[sp] st.w r27,8[sp] st.w r28,4[sp] st.w r29,0[sp] #endif jmp [r10] .size __save_all_interrupt,.-__save_all_interrupt .globl __restore_all_interrupt .type __restore_all_interrupt,@function /* Restore all registers saved in __save_all_interrupt and deallocate the stack space. */ /* Called via: jalr __restore_all_interrupt,r10. */ __restore_all_interrupt: #ifdef __EP__ mov ep,r1 mov sp,ep sld.w 100[ep],r31 sld.w 96[ep],r2 sld.w 92[ep],gp sld.w 88[ep],r6 sld.w 84[ep],r7 sld.w 80[ep],r8 sld.w 76[ep],r9 sld.w 72[ep],r11 sld.w 68[ep],r12 sld.w 64[ep],r13 sld.w 60[ep],r14 sld.w 56[ep],r15 sld.w 52[ep],r16 sld.w 48[ep],r17 sld.w 44[ep],r18 sld.w 40[ep],r19 sld.w 36[ep],r20 sld.w 32[ep],r21 sld.w 28[ep],r22 sld.w 24[ep],r23 sld.w 20[ep],r24 sld.w 16[ep],r25 sld.w 12[ep],r26 sld.w 8[ep],r27 sld.w 4[ep],r28 sld.w 0[ep],r29 mov r1,ep #else ld.w 100[sp],r31 ld.w 96[sp],r2 ld.w 92[sp],gp ld.w 88[sp],r6 ld.w 84[sp],r7 ld.w 80[sp],r8 ld.w 76[sp],r9 ld.w 72[sp],r11 ld.w 68[sp],r12 ld.w 64[sp],r13 ld.w 60[sp],r14 ld.w 56[sp],r15 ld.w 52[sp],r16 ld.w 48[sp],r17 ld.w 44[sp],r18 ld.w 40[sp],r19 ld.w 36[sp],r20 ld.w 32[sp],r21 ld.w 28[sp],r22 ld.w 24[sp],r23 ld.w 20[sp],r24 ld.w 16[sp],r25 ld.w 12[sp],r26 ld.w 8[sp],r27 ld.w 4[sp],r28 ld.w 0[sp],r29 #endif addi 104,sp,sp jmp [r10] .size __restore_all_interrupt,.-__restore_all_interrupt #endif /* L_save_all_interrupt */ #if defined __V850_CALLT__ #if defined(__v850e__) || defined(__v850e1__) || defined(__v850e2__) || defined(__v850e2v3__) || defined(__v850e3v5__) #ifdef L_callt_save_r2_r29 /* Put these functions into the call table area. */ .call_table_text /* Allocate space and save registers 2, 20 .. 29 on the stack. */ /* Called via: callt ctoff(__callt_save_r2_r29). */ .align 2 .L_save_r2_r29: add -4, sp st.w r2, 0[sp] prepare {r20 - r29}, 0 ctret /* Restore saved registers, deallocate stack and return to the user. */ /* Called via: callt ctoff(__callt_return_r2_r29). */ .align 2 .L_return_r2_r29: dispose 0, {r20-r29} ld.w 0[sp], r2 add 4, sp jmp [r31] /* Place the offsets of the start of these routines into the call table. */ .call_table_data .global __callt_save_r2_r29 .type __callt_save_r2_r29,@function __callt_save_r2_r29: .short ctoff(.L_save_r2_r29) .global __callt_return_r2_r29 .type __callt_return_r2_r29,@function __callt_return_r2_r29: .short ctoff(.L_return_r2_r29) #endif /* L_callt_save_r2_r29. */ #ifdef L_callt_save_r2_r31 /* Put these functions into the call table area. */ .call_table_text /* Allocate space and save registers 2 and 20 .. 29, 31 on the stack. */ /* Also allocate space for the argument save area. */ /* Called via: callt ctoff(__callt_save_r2_r31). */ .align 2 .L_save_r2_r31: add -4, sp st.w r2, 0[sp] prepare {r20 - r29, r31}, 0 ctret /* Restore saved registers, deallocate stack and return to the user. */ /* Called via: callt ctoff(__callt_return_r2_r31). */ .align 2 .L_return_r2_r31: dispose 0, {r20 - r29, r31} ld.w 0[sp], r2 addi 4, sp, sp jmp [r31] /* Place the offsets of the start of these routines into the call table. */ .call_table_data .global __callt_save_r2_r31 .type __callt_save_r2_r31,@function __callt_save_r2_r31: .short ctoff(.L_save_r2_r31) .global __callt_return_r2_r31 .type __callt_return_r2_r31,@function __callt_return_r2_r31: .short ctoff(.L_return_r2_r31) #endif /* L_callt_save_r2_r31 */ #ifdef L_callt_save_interrupt /* Put these functions into the call table area. */ .call_table_text /* Save registers r1, ep, gp, r10 on stack and load up with expected values. */ /* Called via: callt ctoff(__callt_save_interrupt). */ .align 2 .L_save_interrupt: /* SP has already been moved before callt ctoff(_save_interrupt). */ /* R1,R10,R11,ctpc,ctpsw has alread been saved bofore callt ctoff(_save_interrupt). */ /* addi -28, sp, sp */ /* st.w r1, 24[sp] */ /* st.w r10, 12[sp] */ /* st.w r11, 16[sp] */ /* stsr ctpc, r10 */ /* st.w r10, 20[sp] */ /* stsr ctpsw, r10 */ /* st.w r10, 24[sp] */ st.w ep, 0[sp] st.w gp, 4[sp] st.w r1, 8[sp] mov hilo(__ep),ep mov hilo(__gp),gp ctret .call_table_text /* Restore saved registers, deallocate stack and return from the interrupt. */ /* Called via: callt ctoff(__callt_restore_interrupt). */ .align 2 .globl __return_interrupt .type __return_interrupt,@function .L_return_interrupt: ld.w 24[sp], r1 ldsr r1, ctpsw ld.w 20[sp], r1 ldsr r1, ctpc ld.w 16[sp], r11 ld.w 12[sp], r10 ld.w 8[sp], r1 ld.w 4[sp], gp ld.w 0[sp], ep addi 28, sp, sp reti /* Place the offsets of the start of these routines into the call table. */ .call_table_data .global __callt_save_interrupt .type __callt_save_interrupt,@function __callt_save_interrupt: .short ctoff(.L_save_interrupt) .global __callt_return_interrupt .type __callt_return_interrupt,@function __callt_return_interrupt: .short ctoff(.L_return_interrupt) #endif /* L_callt_save_interrupt */ #ifdef L_callt_save_all_interrupt /* Put these functions into the call table area. */ .call_table_text /* Save all registers except for those saved in __save_interrupt. */ /* Allocate enough stack for all of the registers & 16 bytes of space. */ /* Called via: callt ctoff(__callt_save_all_interrupt). */ .align 2 .L_save_all_interrupt: addi -60, sp, sp #ifdef __EP__ mov ep, r1 mov sp, ep sst.w r2, 56[ep] sst.w r5, 52[ep] sst.w r6, 48[ep] sst.w r7, 44[ep] sst.w r8, 40[ep] sst.w r9, 36[ep] sst.w r11, 32[ep] sst.w r12, 28[ep] sst.w r13, 24[ep] sst.w r14, 20[ep] sst.w r15, 16[ep] sst.w r16, 12[ep] sst.w r17, 8[ep] sst.w r18, 4[ep] sst.w r19, 0[ep] mov r1, ep #else st.w r2, 56[sp] st.w r5, 52[sp] st.w r6, 48[sp] st.w r7, 44[sp] st.w r8, 40[sp] st.w r9, 36[sp] st.w r11, 32[sp] st.w r12, 28[sp] st.w r13, 24[sp] st.w r14, 20[sp] st.w r15, 16[sp] st.w r16, 12[sp] st.w r17, 8[sp] st.w r18, 4[sp] st.w r19, 0[sp] #endif prepare {r20 - r29, r31}, 0 ctret /* Restore all registers saved in __save_all_interrupt deallocate the stack space. */ /* Called via: callt ctoff(__callt_restore_all_interrupt). */ .align 2 .L_restore_all_interrupt: dispose 0, {r20 - r29, r31} #ifdef __EP__ mov ep, r1 mov sp, ep sld.w 0 [ep], r19 sld.w 4 [ep], r18 sld.w 8 [ep], r17 sld.w 12[ep], r16 sld.w 16[ep], r15 sld.w 20[ep], r14 sld.w 24[ep], r13 sld.w 28[ep], r12 sld.w 32[ep], r11 sld.w 36[ep], r9 sld.w 40[ep], r8 sld.w 44[ep], r7 sld.w 48[ep], r6 sld.w 52[ep], r5 sld.w 56[ep], r2 mov r1, ep #else ld.w 0 [sp], r19 ld.w 4 [sp], r18 ld.w 8 [sp], r17 ld.w 12[sp], r16 ld.w 16[sp], r15 ld.w 20[sp], r14 ld.w 24[sp], r13 ld.w 28[sp], r12 ld.w 32[sp], r11 ld.w 36[sp], r9 ld.w 40[sp], r8 ld.w 44[sp], r7 ld.w 48[sp], r6 ld.w 52[sp], r5 ld.w 56[sp], r2 #endif addi 60, sp, sp ctret /* Place the offsets of the start of these routines into the call table. */ .call_table_data .global __callt_save_all_interrupt .type __callt_save_all_interrupt,@function __callt_save_all_interrupt: .short ctoff(.L_save_all_interrupt) .global __callt_restore_all_interrupt .type __callt_restore_all_interrupt,@function __callt_restore_all_interrupt: .short ctoff(.L_restore_all_interrupt) #endif /* L_callt_save_all_interrupt */ #define MAKE_CALLT_FUNCS( START ) \ .call_table_text ;\ .align 2 ;\ /* Allocate space and save registers START .. r29 on the stack. */ ;\ /* Called via: callt ctoff(__callt_save_START_r29). */ ;\ .L_save_##START##_r29: ;\ prepare { START - r29 }, 0 ;\ ctret ;\ ;\ /* Restore saved registers, deallocate stack and return. */ ;\ /* Called via: callt ctoff(__return_START_r29). */ ;\ .align 2 ;\ .L_return_##START##_r29: ;\ dispose 0, { START - r29 }, r31 ;\ ;\ /* Place the offsets of the start of these funcs into the call table. */;\ .call_table_data ;\ ;\ .global __callt_save_##START##_r29 ;\ .type __callt_save_##START##_r29,@function ;\ __callt_save_##START##_r29: .short ctoff(.L_save_##START##_r29 ) ;\ ;\ .global __callt_return_##START##_r29 ;\ .type __callt_return_##START##_r29,@function ;\ __callt_return_##START##_r29: .short ctoff(.L_return_##START##_r29 ) #define MAKE_CALLT_CFUNCS( START ) \ .call_table_text ;\ .align 2 ;\ /* Allocate space and save registers START .. r31 on the stack. */ ;\ /* Called via: callt ctoff(__callt_save_START_r31c). */ ;\ .L_save_##START##_r31c: ;\ prepare { START - r29, r31}, 0 ;\ ctret ;\ ;\ /* Restore saved registers, deallocate stack and return. */ ;\ /* Called via: callt ctoff(__return_START_r31c). */ ;\ .align 2 ;\ .L_return_##START##_r31c: ;\ dispose 0, { START - r29, r31}, r31 ;\ ;\ /* Place the offsets of the start of these funcs into the call table. */;\ .call_table_data ;\ ;\ .global __callt_save_##START##_r31c ;\ .type __callt_save_##START##_r31c,@function ;\ __callt_save_##START##_r31c: .short ctoff(.L_save_##START##_r31c ) ;\ ;\ .global __callt_return_##START##_r31c ;\ .type __callt_return_##START##_r31c,@function ;\ __callt_return_##START##_r31c: .short ctoff(.L_return_##START##_r31c ) #ifdef L_callt_save_20 MAKE_CALLT_FUNCS (r20) #endif #ifdef L_callt_save_21 MAKE_CALLT_FUNCS (r21) #endif #ifdef L_callt_save_22 MAKE_CALLT_FUNCS (r22) #endif #ifdef L_callt_save_23 MAKE_CALLT_FUNCS (r23) #endif #ifdef L_callt_save_24 MAKE_CALLT_FUNCS (r24) #endif #ifdef L_callt_save_25 MAKE_CALLT_FUNCS (r25) #endif #ifdef L_callt_save_26 MAKE_CALLT_FUNCS (r26) #endif #ifdef L_callt_save_27 MAKE_CALLT_FUNCS (r27) #endif #ifdef L_callt_save_28 MAKE_CALLT_FUNCS (r28) #endif #ifdef L_callt_save_29 MAKE_CALLT_FUNCS (r29) #endif #ifdef L_callt_save_20c MAKE_CALLT_CFUNCS (r20) #endif #ifdef L_callt_save_21c MAKE_CALLT_CFUNCS (r21) #endif #ifdef L_callt_save_22c MAKE_CALLT_CFUNCS (r22) #endif #ifdef L_callt_save_23c MAKE_CALLT_CFUNCS (r23) #endif #ifdef L_callt_save_24c MAKE_CALLT_CFUNCS (r24) #endif #ifdef L_callt_save_25c MAKE_CALLT_CFUNCS (r25) #endif #ifdef L_callt_save_26c MAKE_CALLT_CFUNCS (r26) #endif #ifdef L_callt_save_27c MAKE_CALLT_CFUNCS (r27) #endif #ifdef L_callt_save_28c MAKE_CALLT_CFUNCS (r28) #endif #ifdef L_callt_save_29c MAKE_CALLT_CFUNCS (r29) #endif #ifdef L_callt_save_31c .call_table_text .align 2 /* Allocate space and save register r31 on the stack. */ /* Called via: callt ctoff(__callt_save_r31c). */ .L_callt_save_r31c: prepare {r31}, 0 ctret /* Restore saved registers, deallocate stack and return. */ /* Called via: callt ctoff(__return_r31c). */ .align 2 .L_callt_return_r31c: dispose 0, {r31}, r31 /* Place the offsets of the start of these funcs into the call table. */ .call_table_data .global __callt_save_r31c .type __callt_save_r31c,@function __callt_save_r31c: .short ctoff(.L_callt_save_r31c) .global __callt_return_r31c .type __callt_return_r31c,@function __callt_return_r31c: .short ctoff(.L_callt_return_r31c) #endif #endif /* __v850e__ + */ #endif /* __V850_CALLT__ */ /* libgcc2 routines for NEC V850. */ /* Double Integer Arithmetical Operation. */ #ifdef L_negdi2 .text .global ___negdi2 .type ___negdi2, @function ___negdi2: not r6, r10 add 1, r10 setf l, r6 not r7, r11 add r6, r11 jmp [lp] .size ___negdi2,.-___negdi2 #endif #ifdef L_cmpdi2 .text .global ___cmpdi2 .type ___cmpdi2,@function ___cmpdi2: # Signed comparison bitween each high word. cmp r9, r7 be .L_cmpdi_cmp_low setf ge, r10 setf gt, r6 add r6, r10 jmp [lp] .L_cmpdi_cmp_low: # Unsigned comparigon bitween each low word. cmp r8, r6 setf nl, r10 setf h, r6 add r6, r10 jmp [lp] .size ___cmpdi2, . - ___cmpdi2 #endif #ifdef L_ucmpdi2 .text .global ___ucmpdi2 .type ___ucmpdi2,@function ___ucmpdi2: cmp r9, r7 # Check if each high word are same. bne .L_ucmpdi_check_psw cmp r8, r6 # Compare the word. .L_ucmpdi_check_psw: setf nl, r10 # setf h, r6 # add r6, r10 # Add the result of comparison NL and comparison H. jmp [lp] .size ___ucmpdi2, . - ___ucmpdi2 #endif #ifdef L_muldi3 .text .global ___muldi3 .type ___muldi3,@function ___muldi3: #ifdef __v850__ jarl __save_r26_r31, r10 addi 16, sp, sp mov r6, r28 shr 15, r28 movea lo(32767), r0, r14 and r14, r28 mov r8, r10 shr 15, r10 and r14, r10 mov r6, r19 shr 30, r19 mov r7, r12 shl 2, r12 or r12, r19 and r14, r19 mov r8, r13 shr 30, r13 mov r9, r12 shl 2, r12 or r12, r13 and r14, r13 mov r7, r11 shr 13, r11 and r14, r11 mov r9, r31 shr 13, r31 and r14, r31 mov r7, r29 shr 28, r29 and r14, r29 mov r9, r12 shr 28, r12 and r14, r12 and r14, r6 and r14, r8 mov r6, r14 mulh r8, r14 mov r6, r16 mulh r10, r16 mov r6, r18 mulh r13, r18 mov r6, r15 mulh r31, r15 mulh r12, r6 mov r28, r17 mulh r10, r17 add -16, sp mov r28, r12 mulh r8, r12 add r17, r18 mov r28, r17 mulh r31, r17 add r12, r16 mov r28, r12 mulh r13, r12 add r17, r6 mov r19, r17 add r12, r15 mov r19, r12 mulh r8, r12 mulh r10, r17 add r12, r18 mov r19, r12 mulh r13, r12 add r17, r15 mov r11, r13 mulh r8, r13 add r12, r6 mov r11, r12 mulh r10, r12 add r13, r15 mulh r29, r8 add r12, r6 mov r16, r13 shl 15, r13 add r14, r13 mov r18, r12 shl 30, r12 mov r13, r26 add r12, r26 shr 15, r14 movhi hi(131071), r0, r12 movea lo(131071), r12, r13 and r13, r14 mov r16, r12 and r13, r12 add r12, r14 mov r18, r12 shl 15, r12 and r13, r12 add r12, r14 shr 17, r14 shr 17, r16 add r14, r16 shl 13, r15 shr 2, r18 add r18, r15 add r15, r16 mov r16, r27 add r8, r6 shl 28, r6 add r6, r27 mov r26, r10 mov r27, r11 jr __return_r26_r31 #else /* defined(__v850e__) */ /* (Ahi << 32 + Alo) * (Bhi << 32 + Blo) */ /* r7 r6 r9 r8 */ mov r8, r10 mulu r7, r8, r0 /* Ahi * Blo */ mulu r6, r9, r0 /* Alo * Bhi */ mulu r6, r10, r11 /* Alo * Blo */ add r8, r11 add r9, r11 jmp [r31] #endif /* defined(__v850e__) */ .size ___muldi3, . - ___muldi3 #endif
4ms/metamodule-plugin-sdk
4,412
plugin-libc/libgcc/config/m32c/lib1funcs.S
/* libgcc routines for R8C/M16C/M32C Copyright (C) 2005-2022 Free Software Foundation, Inc. Contributed by Red Hat. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 3, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. Under Section 7 of GPL version 3, you are granted additional permissions described in the GCC Runtime Library Exception, version 3.1, as published by the Free Software Foundation. You should have received a copy of the GNU General Public License and a copy of the GCC Runtime Library Exception along with this program; see the files COPYING3 and COPYING.RUNTIME respectively. If not, see <http://www.gnu.org/licenses/>. */ #if defined(__r8c_cpu__) || defined(__m16c_cpu__) #define A16 #define A(n,w) n #define W w #else #define A24 #define A(n,w) w #define W l #endif #ifdef L__m32c_memregs /* Warning: these memory locations are used as a register bank. They *must* end up consecutive in any final executable, so you may *not* use the otherwise obvious ".comm" directive to allocate space for them. */ .bss .global mem0 mem0: .space 1 .global mem1 mem1: .space 1 .global mem2 mem2: .space 1 .global mem3 mem3: .space 1 .global mem4 mem4: .space 1 .global mem5 mem5: .space 1 .global mem6 mem6: .space 1 .global mem7 mem7: .space 1 .global mem8 mem8: .space 1 .global mem9 mem9: .space 1 .global mem10 mem10: .space 1 .global mem11 mem11: .space 1 .global mem12 mem12: .space 1 .global mem13 mem13: .space 1 .global mem14 mem14: .space 1 .global mem15 mem15: .space 1 #endif #ifdef L__m32c_eh_return .text .global __m32c_eh_return __m32c_eh_return: /* At this point, r0 has the stack adjustment, r1r3 has the address to return to. The stack looks like this: old_ra old_fp <- unwound sp ... fb through r0 <- sp What we need to do is restore all the registers, update the stack, and return to the right place. */ stc sp,a0 add.W A(#16,#24),a0 /* a0 points to the current stack, just above the register save areas */ mov.w a0,a1 exts.w r0 sub.W A(r0,r2r0),a1 sub.W A(#3,#4),a1 /* a1 points to the new stack. */ /* This is for the "rts" below. */ mov.w r1,[a1] #ifdef A16 mov.w r2,r1 mov.b r1l,2[a1] #else mov.w r2,2[a1] #endif /* This is for the "popc sp" below. */ mov.W a1,[a0] popm r0,r1,r2,r3,a0,a1,sb,fb popc sp rts #endif /* SImode arguments for SI foo(SI,SI) functions. */ #ifdef A16 #define SAL 5[fb] #define SAH 7[fb] #define SBL 9[fb] #define SBH 11[fb] #else #define SAL 8[fb] #define SAH 10[fb] #define SBL 12[fb] #define SBH 14[fb] #endif #ifdef L__m32c_mulsi3 .text .global ___mulsi3 ___mulsi3: enter #0 push.w r2 mov.w SAL,r0 mulu.w SBL,r0 /* writes to r2r0 */ mov.w r0,mem0 mov.w r2,mem2 mov.w SAL,r0 mulu.w SBH,r0 /* writes to r2r0 */ add.w r0,mem2 mov.w SAH,r0 mulu.w SBL,r0 /* writes to r2r0 */ add.w r0,mem2 pop.w r2 exitd #endif #ifdef L__m32c_cmpsi2 .text .global ___cmpsi2 ___cmpsi2: enter #0 cmp.w SBH,SAH jgt cmpsi_gt jlt cmpsi_lt cmp.w SBL,SAL jgt cmpsi_gt jlt cmpsi_lt mov.w #1,r0 exitd cmpsi_gt: mov.w #2,r0 exitd cmpsi_lt: mov.w #0,r0 exitd #endif #ifdef L__m32c_ucmpsi2 .text .global ___ucmpsi2 ___ucmpsi2: enter #0 cmp.w SBH,SAH jgtu cmpsi_gt jltu cmpsi_lt cmp.w SBL,SAL jgtu cmpsi_gt jltu cmpsi_lt mov.w #1,r0 exitd cmpsi_gt: mov.w #2,r0 exitd cmpsi_lt: mov.w #0,r0 exitd #endif #ifdef L__m32c_jsri16 .text #ifdef A16 .global m32c_jsri16 m32c_jsri16: add.w #-1, sp /* Read the address (16 bits) and return address (24 bits) off the stack. */ mov.w 4[sp], r0 mov.w 1[sp], r3 mov.b 3[sp], a0 /* This zero-extends, so the high byte has zero in it. */ /* Write the return address, then new address, to the stack. */ mov.w a0, 1[sp] /* Just to get the zero in 2[sp]. */ mov.w r0, 0[sp] mov.w r3, 3[sp] mov.b a0, 5[sp] /* This "returns" to the target address, leaving the pending return address on the stack. */ rts #endif #endif
4ms/metamodule-plugin-sdk
1,706
plugin-libc/libgcc/config/sparc/crtn.S
! crtn.S for SPARC ! Copyright (C) 1992-2022 Free Software Foundation, Inc. ! Written By David Vinayak Henkel-Wallace, June 1992 ! ! This file is free software; you can redistribute it and/or modify it ! under the terms of the GNU General Public License as published by the ! Free Software Foundation; either version 3, or (at your option) any ! later version. ! ! This file is distributed in the hope that it will be useful, but ! WITHOUT ANY WARRANTY; without even the implied warranty of ! MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ! General Public License for more details. ! ! Under Section 7 of GPL version 3, you are granted additional ! permissions described in the GCC Runtime Library Exception, version ! 3.1, as published by the Free Software Foundation. ! ! You should have received a copy of the GNU General Public License and ! a copy of the GCC Runtime Library Exception along with this program; ! see the files COPYING3 and COPYING.RUNTIME respectively. If not, see ! <http://www.gnu.org/licenses/>. ! This file just makes sure that the .fini and .init sections do in ! fact return. Users may put any desired instructions in those sections. ! This file is the last thing linked into any executable. .section ".init" .align 4 #ifdef _FLAT mov %i7, %o7 #ifdef __sparcv9 ldx [%sp+2343], %i7 sub %sp, -176, %sp #else ld [%sp+156], %i7 sub %sp, -96, %sp #endif #else restore #endif jmp %o7+8 nop .section ".fini" .align 4 #ifdef _FLAT mov %i7, %o7 #ifdef __sparcv9 ldx [%sp+2343], %i7 sub %sp, -176, %sp #else ld [%sp+156], %i7 sub %sp, -96, %sp #endif #else restore #endif jmp %o7+8 nop ! Th-th-th-that is all folks!
4ms/metamodule-plugin-sdk
16,708
plugin-libc/libgcc/config/sparc/lb1spc.S
/* This is an assembly language implementation of mulsi3, divsi3, and modsi3 for the sparc processor. These routines are derived from the SPARC Architecture Manual, version 8, slightly edited to match the desired calling convention, and also to optimize them for our purposes. */ /* An executable stack is *not* required for these functions. */ #if defined(__ELF__) && defined(__linux__) .section .note.GNU-stack,"",%progbits .previous #endif #ifdef L_mulsi3 .text .align 4 .global .umul .proc 4 .umul: or %o0, %o1, %o4 ! logical or of multiplier and multiplicand mov %o0, %y ! multiplier to Y register andncc %o4, 0xfff, %o5 ! mask out lower 12 bits be mul_shortway ! can do it the short way andcc %g0, %g0, %o4 ! zero the partial product and clear NV cc ! ! long multiply ! mulscc %o4, %o1, %o4 ! first iteration of 33 mulscc %o4, %o1, %o4 mulscc %o4, %o1, %o4 mulscc %o4, %o1, %o4 mulscc %o4, %o1, %o4 mulscc %o4, %o1, %o4 mulscc %o4, %o1, %o4 mulscc %o4, %o1, %o4 mulscc %o4, %o1, %o4 mulscc %o4, %o1, %o4 mulscc %o4, %o1, %o4 mulscc %o4, %o1, %o4 mulscc %o4, %o1, %o4 mulscc %o4, %o1, %o4 mulscc %o4, %o1, %o4 mulscc %o4, %o1, %o4 mulscc %o4, %o1, %o4 mulscc %o4, %o1, %o4 mulscc %o4, %o1, %o4 mulscc %o4, %o1, %o4 mulscc %o4, %o1, %o4 mulscc %o4, %o1, %o4 mulscc %o4, %o1, %o4 mulscc %o4, %o1, %o4 mulscc %o4, %o1, %o4 mulscc %o4, %o1, %o4 mulscc %o4, %o1, %o4 mulscc %o4, %o1, %o4 mulscc %o4, %o1, %o4 mulscc %o4, %o1, %o4 mulscc %o4, %o1, %o4 mulscc %o4, %o1, %o4 ! 32nd iteration mulscc %o4, %g0, %o4 ! last iteration only shifts ! the upper 32 bits of product are wrong, but we do not care retl rd %y, %o0 ! ! short multiply ! mul_shortway: mulscc %o4, %o1, %o4 ! first iteration of 13 mulscc %o4, %o1, %o4 mulscc %o4, %o1, %o4 mulscc %o4, %o1, %o4 mulscc %o4, %o1, %o4 mulscc %o4, %o1, %o4 mulscc %o4, %o1, %o4 mulscc %o4, %o1, %o4 mulscc %o4, %o1, %o4 mulscc %o4, %o1, %o4 mulscc %o4, %o1, %o4 mulscc %o4, %o1, %o4 ! 12th iteration mulscc %o4, %g0, %o4 ! last iteration only shifts rd %y, %o5 sll %o4, 12, %o4 ! left shift partial product by 12 bits srl %o5, 20, %o5 ! right shift partial product by 20 bits retl or %o5, %o4, %o0 ! merge for true product #endif #ifdef L_divsi3 /* * Division and remainder, from Appendix E of the SPARC Version 8 * Architecture Manual, with fixes from Gordon Irlam. */ /* * Input: dividend and divisor in %o0 and %o1 respectively. * * m4 parameters: * .div name of function to generate * div div=div => %o0 / %o1; div=rem => %o0 % %o1 * true true=true => signed; true=false => unsigned * * Algorithm parameters: * N how many bits per iteration we try to get (4) * WORDSIZE total number of bits (32) * * Derived constants: * TOPBITS number of bits in the top decade of a number * * Important variables: * Q the partial quotient under development (initially 0) * R the remainder so far, initially the dividend * ITER number of main division loop iterations required; * equal to ceil(log2(quotient) / N). Note that this * is the log base (2^N) of the quotient. * V the current comparand, initially divisor*2^(ITER*N-1) * * Cost: * Current estimate for non-large dividend is * ceil(log2(quotient) / N) * (10 + 7N/2) + C * A large dividend is one greater than 2^(31-TOPBITS) and takes a * different path, as the upper bits of the quotient must be developed * one bit at a time. */ .global .udiv .align 4 .proc 4 .text .udiv: b ready_to_divide mov 0, %g3 ! result is always positive .global .div .align 4 .proc 4 .text .div: ! compute sign of result; if neither is negative, no problem orcc %o1, %o0, %g0 ! either negative? bge ready_to_divide ! no, go do the divide xor %o1, %o0, %g3 ! compute sign in any case tst %o1 bge 1f tst %o0 ! %o1 is definitely negative; %o0 might also be negative bge ready_to_divide ! if %o0 not negative... sub %g0, %o1, %o1 ! in any case, make %o1 nonneg 1: ! %o0 is negative, %o1 is nonnegative sub %g0, %o0, %o0 ! make %o0 nonnegative ready_to_divide: ! Ready to divide. Compute size of quotient; scale comparand. orcc %o1, %g0, %o5 bne 1f mov %o0, %o3 ! Divide by zero trap. If it returns, return 0 (about as ! wrong as possible, but that is what SunOS does...). ta 0x2 ! ST_DIV0 retl clr %o0 1: cmp %o3, %o5 ! if %o1 exceeds %o0, done blu got_result ! (and algorithm fails otherwise) clr %o2 sethi %hi(1 << (32 - 4 - 1)), %g1 cmp %o3, %g1 blu not_really_big clr %o4 ! Here the dividend is >= 2**(31-N) or so. We must be careful here, ! as our usual N-at-a-shot divide step will cause overflow and havoc. ! The number of bits in the result here is N*ITER+SC, where SC <= N. ! Compute ITER in an unorthodox manner: know we need to shift V into ! the top decade: so do not even bother to compare to R. 1: cmp %o5, %g1 bgeu 3f mov 1, %g2 sll %o5, 4, %o5 b 1b add %o4, 1, %o4 ! Now compute %g2. 2: addcc %o5, %o5, %o5 bcc not_too_big add %g2, 1, %g2 ! We get here if the %o1 overflowed while shifting. ! This means that %o3 has the high-order bit set. ! Restore %o5 and subtract from %o3. sll %g1, 4, %g1 ! high order bit srl %o5, 1, %o5 ! rest of %o5 add %o5, %g1, %o5 b do_single_div sub %g2, 1, %g2 not_too_big: 3: cmp %o5, %o3 blu 2b nop be do_single_div nop /* NB: these are commented out in the V8-SPARC manual as well */ /* (I do not understand this) */ ! %o5 > %o3: went too far: back up 1 step ! srl %o5, 1, %o5 ! dec %g2 ! do single-bit divide steps ! ! We have to be careful here. We know that %o3 >= %o5, so we can do the ! first divide step without thinking. BUT, the others are conditional, ! and are only done if %o3 >= 0. Because both %o3 and %o5 may have the high- ! order bit set in the first step, just falling into the regular ! division loop will mess up the first time around. ! So we unroll slightly... do_single_div: subcc %g2, 1, %g2 bl end_regular_divide nop sub %o3, %o5, %o3 mov 1, %o2 b end_single_divloop nop single_divloop: sll %o2, 1, %o2 bl 1f srl %o5, 1, %o5 ! %o3 >= 0 sub %o3, %o5, %o3 b 2f add %o2, 1, %o2 1: ! %o3 < 0 add %o3, %o5, %o3 sub %o2, 1, %o2 2: end_single_divloop: subcc %g2, 1, %g2 bge single_divloop tst %o3 b,a end_regular_divide not_really_big: 1: sll %o5, 4, %o5 cmp %o5, %o3 bleu 1b addcc %o4, 1, %o4 be got_result sub %o4, 1, %o4 tst %o3 ! set up for initial iteration divloop: sll %o2, 4, %o2 ! depth 1, accumulated bits 0 bl L1.16 srl %o5,1,%o5 ! remainder is positive subcc %o3,%o5,%o3 ! depth 2, accumulated bits 1 bl L2.17 srl %o5,1,%o5 ! remainder is positive subcc %o3,%o5,%o3 ! depth 3, accumulated bits 3 bl L3.19 srl %o5,1,%o5 ! remainder is positive subcc %o3,%o5,%o3 ! depth 4, accumulated bits 7 bl L4.23 srl %o5,1,%o5 ! remainder is positive subcc %o3,%o5,%o3 b 9f add %o2, (7*2+1), %o2 L4.23: ! remainder is negative addcc %o3,%o5,%o3 b 9f add %o2, (7*2-1), %o2 L3.19: ! remainder is negative addcc %o3,%o5,%o3 ! depth 4, accumulated bits 5 bl L4.21 srl %o5,1,%o5 ! remainder is positive subcc %o3,%o5,%o3 b 9f add %o2, (5*2+1), %o2 L4.21: ! remainder is negative addcc %o3,%o5,%o3 b 9f add %o2, (5*2-1), %o2 L2.17: ! remainder is negative addcc %o3,%o5,%o3 ! depth 3, accumulated bits 1 bl L3.17 srl %o5,1,%o5 ! remainder is positive subcc %o3,%o5,%o3 ! depth 4, accumulated bits 3 bl L4.19 srl %o5,1,%o5 ! remainder is positive subcc %o3,%o5,%o3 b 9f add %o2, (3*2+1), %o2 L4.19: ! remainder is negative addcc %o3,%o5,%o3 b 9f add %o2, (3*2-1), %o2 L3.17: ! remainder is negative addcc %o3,%o5,%o3 ! depth 4, accumulated bits 1 bl L4.17 srl %o5,1,%o5 ! remainder is positive subcc %o3,%o5,%o3 b 9f add %o2, (1*2+1), %o2 L4.17: ! remainder is negative addcc %o3,%o5,%o3 b 9f add %o2, (1*2-1), %o2 L1.16: ! remainder is negative addcc %o3,%o5,%o3 ! depth 2, accumulated bits -1 bl L2.15 srl %o5,1,%o5 ! remainder is positive subcc %o3,%o5,%o3 ! depth 3, accumulated bits -1 bl L3.15 srl %o5,1,%o5 ! remainder is positive subcc %o3,%o5,%o3 ! depth 4, accumulated bits -1 bl L4.15 srl %o5,1,%o5 ! remainder is positive subcc %o3,%o5,%o3 b 9f add %o2, (-1*2+1), %o2 L4.15: ! remainder is negative addcc %o3,%o5,%o3 b 9f add %o2, (-1*2-1), %o2 L3.15: ! remainder is negative addcc %o3,%o5,%o3 ! depth 4, accumulated bits -3 bl L4.13 srl %o5,1,%o5 ! remainder is positive subcc %o3,%o5,%o3 b 9f add %o2, (-3*2+1), %o2 L4.13: ! remainder is negative addcc %o3,%o5,%o3 b 9f add %o2, (-3*2-1), %o2 L2.15: ! remainder is negative addcc %o3,%o5,%o3 ! depth 3, accumulated bits -3 bl L3.13 srl %o5,1,%o5 ! remainder is positive subcc %o3,%o5,%o3 ! depth 4, accumulated bits -5 bl L4.11 srl %o5,1,%o5 ! remainder is positive subcc %o3,%o5,%o3 b 9f add %o2, (-5*2+1), %o2 L4.11: ! remainder is negative addcc %o3,%o5,%o3 b 9f add %o2, (-5*2-1), %o2 L3.13: ! remainder is negative addcc %o3,%o5,%o3 ! depth 4, accumulated bits -7 bl L4.9 srl %o5,1,%o5 ! remainder is positive subcc %o3,%o5,%o3 b 9f add %o2, (-7*2+1), %o2 L4.9: ! remainder is negative addcc %o3,%o5,%o3 b 9f add %o2, (-7*2-1), %o2 9: end_regular_divide: subcc %o4, 1, %o4 bge divloop tst %o3 bl,a got_result ! non-restoring fixup here (one instruction only!) sub %o2, 1, %o2 got_result: ! check to see if answer should be < 0 tst %g3 bl,a 1f sub %g0, %o2, %o2 1: retl mov %o2, %o0 #endif #ifdef L_modsi3 /* This implementation was taken from glibc: * * Input: dividend and divisor in %o0 and %o1 respectively. * * Algorithm parameters: * N how many bits per iteration we try to get (4) * WORDSIZE total number of bits (32) * * Derived constants: * TOPBITS number of bits in the top decade of a number * * Important variables: * Q the partial quotient under development (initially 0) * R the remainder so far, initially the dividend * ITER number of main division loop iterations required; * equal to ceil(log2(quotient) / N). Note that this * is the log base (2^N) of the quotient. * V the current comparand, initially divisor*2^(ITER*N-1) * * Cost: * Current estimate for non-large dividend is * ceil(log2(quotient) / N) * (10 + 7N/2) + C * A large dividend is one greater than 2^(31-TOPBITS) and takes a * different path, as the upper bits of the quotient must be developed * one bit at a time. */ .text .align 4 .global .urem .proc 4 .urem: b divide mov 0, %g3 ! result always positive .align 4 .global .rem .proc 4 .rem: ! compute sign of result; if neither is negative, no problem orcc %o1, %o0, %g0 ! either negative? bge 2f ! no, go do the divide mov %o0, %g3 ! sign of remainder matches %o0 tst %o1 bge 1f tst %o0 ! %o1 is definitely negative; %o0 might also be negative bge 2f ! if %o0 not negative... sub %g0, %o1, %o1 ! in any case, make %o1 nonneg 1: ! %o0 is negative, %o1 is nonnegative sub %g0, %o0, %o0 ! make %o0 nonnegative 2: ! Ready to divide. Compute size of quotient; scale comparand. divide: orcc %o1, %g0, %o5 bne 1f mov %o0, %o3 ! Divide by zero trap. If it returns, return 0 (about as ! wrong as possible, but that is what SunOS does...). ta 0x2 !ST_DIV0 retl clr %o0 1: cmp %o3, %o5 ! if %o1 exceeds %o0, done blu got_result ! (and algorithm fails otherwise) clr %o2 sethi %hi(1 << (32 - 4 - 1)), %g1 cmp %o3, %g1 blu not_really_big clr %o4 ! Here the dividend is >= 2**(31-N) or so. We must be careful here, ! as our usual N-at-a-shot divide step will cause overflow and havoc. ! The number of bits in the result here is N*ITER+SC, where SC <= N. ! Compute ITER in an unorthodox manner: know we need to shift V into ! the top decade: so do not even bother to compare to R. 1: cmp %o5, %g1 bgeu 3f mov 1, %g2 sll %o5, 4, %o5 b 1b add %o4, 1, %o4 ! Now compute %g2. 2: addcc %o5, %o5, %o5 bcc not_too_big add %g2, 1, %g2 ! We get here if the %o1 overflowed while shifting. ! This means that %o3 has the high-order bit set. ! Restore %o5 and subtract from %o3. sll %g1, 4, %g1 ! high order bit srl %o5, 1, %o5 ! rest of %o5 add %o5, %g1, %o5 b do_single_div sub %g2, 1, %g2 not_too_big: 3: cmp %o5, %o3 blu 2b nop be do_single_div nop /* NB: these are commented out in the V8-SPARC manual as well */ /* (I do not understand this) */ ! %o5 > %o3: went too far: back up 1 step ! srl %o5, 1, %o5 ! dec %g2 ! do single-bit divide steps ! ! We have to be careful here. We know that %o3 >= %o5, so we can do the ! first divide step without thinking. BUT, the others are conditional, ! and are only done if %o3 >= 0. Because both %o3 and %o5 may have the high- ! order bit set in the first step, just falling into the regular ! division loop will mess up the first time around. ! So we unroll slightly... do_single_div: subcc %g2, 1, %g2 bl end_regular_divide nop sub %o3, %o5, %o3 mov 1, %o2 b end_single_divloop nop single_divloop: sll %o2, 1, %o2 bl 1f srl %o5, 1, %o5 ! %o3 >= 0 sub %o3, %o5, %o3 b 2f add %o2, 1, %o2 1: ! %o3 < 0 add %o3, %o5, %o3 sub %o2, 1, %o2 2: end_single_divloop: subcc %g2, 1, %g2 bge single_divloop tst %o3 b,a end_regular_divide not_really_big: 1: sll %o5, 4, %o5 cmp %o5, %o3 bleu 1b addcc %o4, 1, %o4 be got_result sub %o4, 1, %o4 tst %o3 ! set up for initial iteration divloop: sll %o2, 4, %o2 ! depth 1, accumulated bits 0 bl L1.16 srl %o5,1,%o5 ! remainder is positive subcc %o3,%o5,%o3 ! depth 2, accumulated bits 1 bl L2.17 srl %o5,1,%o5 ! remainder is positive subcc %o3,%o5,%o3 ! depth 3, accumulated bits 3 bl L3.19 srl %o5,1,%o5 ! remainder is positive subcc %o3,%o5,%o3 ! depth 4, accumulated bits 7 bl L4.23 srl %o5,1,%o5 ! remainder is positive subcc %o3,%o5,%o3 b 9f add %o2, (7*2+1), %o2 L4.23: ! remainder is negative addcc %o3,%o5,%o3 b 9f add %o2, (7*2-1), %o2 L3.19: ! remainder is negative addcc %o3,%o5,%o3 ! depth 4, accumulated bits 5 bl L4.21 srl %o5,1,%o5 ! remainder is positive subcc %o3,%o5,%o3 b 9f add %o2, (5*2+1), %o2 L4.21: ! remainder is negative addcc %o3,%o5,%o3 b 9f add %o2, (5*2-1), %o2 L2.17: ! remainder is negative addcc %o3,%o5,%o3 ! depth 3, accumulated bits 1 bl L3.17 srl %o5,1,%o5 ! remainder is positive subcc %o3,%o5,%o3 ! depth 4, accumulated bits 3 bl L4.19 srl %o5,1,%o5 ! remainder is positive subcc %o3,%o5,%o3 b 9f add %o2, (3*2+1), %o2 L4.19: ! remainder is negative addcc %o3,%o5,%o3 b 9f add %o2, (3*2-1), %o2 L3.17: ! remainder is negative addcc %o3,%o5,%o3 ! depth 4, accumulated bits 1 bl L4.17 srl %o5,1,%o5 ! remainder is positive subcc %o3,%o5,%o3 b 9f add %o2, (1*2+1), %o2 L4.17: ! remainder is negative addcc %o3,%o5,%o3 b 9f add %o2, (1*2-1), %o2 L1.16: ! remainder is negative addcc %o3,%o5,%o3 ! depth 2, accumulated bits -1 bl L2.15 srl %o5,1,%o5 ! remainder is positive subcc %o3,%o5,%o3 ! depth 3, accumulated bits -1 bl L3.15 srl %o5,1,%o5 ! remainder is positive subcc %o3,%o5,%o3 ! depth 4, accumulated bits -1 bl L4.15 srl %o5,1,%o5 ! remainder is positive subcc %o3,%o5,%o3 b 9f add %o2, (-1*2+1), %o2 L4.15: ! remainder is negative addcc %o3,%o5,%o3 b 9f add %o2, (-1*2-1), %o2 L3.15: ! remainder is negative addcc %o3,%o5,%o3 ! depth 4, accumulated bits -3 bl L4.13 srl %o5,1,%o5 ! remainder is positive subcc %o3,%o5,%o3 b 9f add %o2, (-3*2+1), %o2 L4.13: ! remainder is negative addcc %o3,%o5,%o3 b 9f add %o2, (-3*2-1), %o2 L2.15: ! remainder is negative addcc %o3,%o5,%o3 ! depth 3, accumulated bits -3 bl L3.13 srl %o5,1,%o5 ! remainder is positive subcc %o3,%o5,%o3 ! depth 4, accumulated bits -5 bl L4.11 srl %o5,1,%o5 ! remainder is positive subcc %o3,%o5,%o3 b 9f add %o2, (-5*2+1), %o2 L4.11: ! remainder is negative addcc %o3,%o5,%o3 b 9f add %o2, (-5*2-1), %o2 L3.13: ! remainder is negative addcc %o3,%o5,%o3 ! depth 4, accumulated bits -7 bl L4.9 srl %o5,1,%o5 ! remainder is positive subcc %o3,%o5,%o3 b 9f add %o2, (-7*2+1), %o2 L4.9: ! remainder is negative addcc %o3,%o5,%o3 b 9f add %o2, (-7*2-1), %o2 9: end_regular_divide: subcc %o4, 1, %o4 bge divloop tst %o3 bl,a got_result ! non-restoring fixup here (one instruction only!) add %o3, %o1, %o3 got_result: ! check to see if answer should be < 0 tst %g3 bl,a 1f sub %g0, %o3, %o3 1: retl mov %o3, %o0 #endif
4ms/metamodule-plugin-sdk
1,945
plugin-libc/libgcc/config/sparc/crti.S
! crti.S for SPARC ! Copyright (C) 1992-2022 Free Software Foundation, Inc. ! Written By David Vinayak Henkel-Wallace, June 1992 ! ! This file is free software; you can redistribute it and/or modify it ! under the terms of the GNU General Public License as published by the ! Free Software Foundation; either version 3, or (at your option) any ! later version. ! ! This file is distributed in the hope that it will be useful, but ! WITHOUT ANY WARRANTY; without even the implied warranty of ! MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ! General Public License for more details. ! ! Under Section 7 of GPL version 3, you are granted additional ! permissions described in the GCC Runtime Library Exception, version ! 3.1, as published by the Free Software Foundation. ! ! You should have received a copy of the GNU General Public License and ! a copy of the GCC Runtime Library Exception along with this program; ! see the files COPYING3 and COPYING.RUNTIME respectively. If not, see ! <http://www.gnu.org/licenses/>. ! This file just make a stack frame for the contents of the .fini and ! .init sections. Users may put any desired instructions in those ! sections. ! This file is linked in before the Values-Xx.o files and also before ! crtbegin, with which perhaps it should be merged. .section ".init" .proc 022 .global _init .type _init,#function .align 4 _init: #ifdef _FLAT #ifdef __sparcv9 stx %i7, [%sp+2167] add %sp, -176, %sp #else st %i7, [%sp+60] add %sp, -96, %sp #endif mov %o7, %i7 #else #ifdef __sparcv9 save %sp, -176, %sp #else save %sp, -96, %sp #endif #endif .section ".fini" .proc 022 .global _fini .type _fini,#function .align 4 _fini: #ifdef _FLAT #ifdef __sparcv9 stx %i7, [%sp+2167] add %sp, -176, %sp #else st %i7, [%sp+60] add %sp, -96, %sp #endif mov %o7, %i7 #else #ifdef __sparcv9 save %sp, -176, %sp #else save %sp, -96, %sp #endif #endif
4ms/metamodule-plugin-sdk
2,759
plugin-libc/libgcc/config/sparc/sol2-c1.S
! crt1.s for sparc & sparcv9 (SunOS 5) ! Copyright (C) 1992-2022 Free Software Foundation, Inc. ! Written By David Vinayak Henkel-Wallace, June 1992 ! ! This file is free software; you can redistribute it and/or modify it ! under the terms of the GNU General Public License as published by the ! Free Software Foundation; either version 3, or (at your option) any ! later version. ! ! This file is distributed in the hope that it will be useful, but ! WITHOUT ANY WARRANTY; without even the implied warranty of ! MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ! General Public License for more details. ! ! Under Section 7 of GPL version 3, you are granted additional ! permissions described in the GCC Runtime Library Exception, version ! 3.1, as published by the Free Software Foundation. ! ! You should have received a copy of the GNU General Public License and ! a copy of the GCC Runtime Library Exception along with this program; ! see the files COPYING3 and COPYING.RUNTIME respectively. If not, see ! <http://www.gnu.org/licenses/>. ! This file takes control of the process from the kernel, as specified ! in section 3 of the SVr4 ABI. ! This file is the first thing linked into any executable. #ifdef __sparcv9 #define CPTRSIZE 8 #define CPTRSHIFT 3 #define STACK_BIAS 2047 #define ldn ldx #define stn stx #define setn(s, scratch, dst) setx s, scratch, dst #else #define CPTRSIZE 4 #define CPTRSHIFT 2 #define STACK_BIAS 0 #define ldn ld #define stn st #define setn(s, scratch, dst) set s, dst #endif .section ".text" .proc 022 .global _start _start: mov 0, %fp ! Mark bottom frame pointer ldn [%sp + (16 * CPTRSIZE) + STACK_BIAS], %l0 ! argc add %sp, (17 * CPTRSIZE) + STACK_BIAS, %l1 ! argv ! Leave some room for a call. Sun leaves 32 octets (to sit on ! a cache line?) so we do too. #ifdef __sparcv9 sub %sp, 48, %sp #else sub %sp, 32, %sp #endif ! %g1 may contain a function to be registered w/atexit orcc %g0, %g1, %g0 #ifdef __sparcv9 be %xcc, .nope #else be .nope #endif mov %g1, %o0 call atexit nop .nope: ! Now make sure constructors and destructors are handled. setn(_fini, %o1, %o0) call atexit, 1 nop call _init, 0 nop ! We ignore the auxiliary vector; there is no defined way to ! access those data anyway. Instead, go straight to main: mov %l0, %o0 ! argc mov %l1, %o1 ! argv #ifdef GCRT1 setn(___Argv, %o4, %o3) stn %o1, [%o3] ! *___Argv #endif ! Skip argc words past argv, to env: sll %l0, CPTRSHIFT, %o2 add %o2, CPTRSIZE, %o2 add %l1, %o2, %o2 ! env setn(_environ, %o4, %o3) stn %o2, [%o3] ! *_environ call main, 4 nop call exit, 0 nop call _exit, 0 nop ! We should never get here. .type _start,#function .size _start,.-_start
4ms/metamodule-plugin-sdk
1,394
plugin-libc/libgcc/config/fr30/crtn.S
# crtn.S for ELF # Copyright (C) 1992-2022 Free Software Foundation, Inc. # Written By David Vinayak Henkel-Wallace, June 1992 # # This file is free software; you can redistribute it and/or modify it # under the terms of the GNU General Public License as published by the # Free Software Foundation; either version 3, or (at your option) any # later version. # # This file is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # General Public License for more details. # # Under Section 7 of GPL version 3, you are granted additional # permissions described in the GCC Runtime Library Exception, version # 3.1, as published by the Free Software Foundation. # # You should have received a copy of the GNU General Public License and # a copy of the GCC Runtime Library Exception along with this program; # see the files COPYING3 and COPYING.RUNTIME respectively. If not, see # <http://www.gnu.org/licenses/>. # This file just makes sure that the .fini and .init sections do in # fact return. Users may put any desired instructions in those sections. # This file is the last thing linked into any executable. .section ".init" .align 4 leave ld @r15+,rp ret .section ".fini" .align 4 leave ld @r15+,rp ret # Th-th-th-that is all folks!
4ms/metamodule-plugin-sdk
1,622
plugin-libc/libgcc/config/fr30/crti.S
# crti.s for ELF # Copyright (C) 1992-2022 Free Software Foundation, Inc. # Written By David Vinayak Henkel-Wallace, June 1992 # # This file is free software; you can redistribute it and/or modify it # under the terms of the GNU General Public License as published by the # Free Software Foundation; either version 3, or (at your option) any # later version. # # This file is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # General Public License for more details. # # Under Section 7 of GPL version 3, you are granted additional # permissions described in the GCC Runtime Library Exception, version # 3.1, as published by the Free Software Foundation. # # You should have received a copy of the GNU General Public License and # a copy of the GCC Runtime Library Exception along with this program; # see the files COPYING3 and COPYING.RUNTIME respectively. If not, see # <http://www.gnu.org/licenses/>. # This file just make a stack frame for the contents of the .fini and # .init sections. Users may put any desired instructions in those # sections. .section ".init" .global _init .type _init,#function .align 4 _init: st rp, @-r15 enter #4 # These nops are here to align the end of this code with a 16 byte # boundary. The linker will start inserting code into the .init # section at such a boundary. nop nop nop nop nop nop .section ".fini" .global _fini .type _fini,#function .align 4 _fini: st rp, @-r15 enter #4 nop nop nop nop nop nop
4ms/metamodule-plugin-sdk
2,506
plugin-libc/libgcc/config/fr30/lib1funcs.S
/* libgcc routines for the FR30. Copyright (C) 1998-2022 Free Software Foundation, Inc. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 3, or (at your option) any later version. This file is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. Under Section 7 of GPL version 3, you are granted additional permissions described in the GCC Runtime Library Exception, version 3.1, as published by the Free Software Foundation. You should have received a copy of the GNU General Public License and a copy of the GCC Runtime Library Exception along with this program; see the files COPYING3 and COPYING.RUNTIME respectively. If not, see <http://www.gnu.org/licenses/>. */ .macro FUNC_START name .text .globl __\name .type __\name, @function __\name: .endm .macro FUNC_END name .size __\name, . - __\name .endm .macro DIV_BODY reg number .if \number DIV_BODY \reg, "\number - 1" div1 \reg .endif .endm #ifdef L_udivsi3 FUNC_START udivsi3 ;; Perform an unsiged division of r4 / r5 and place the result in r4. ;; Does not handle overflow yet... mov r4, mdl div0u r5 DIV_BODY r5 32 mov mdl, r4 ret FUNC_END udivsi3 #endif /* L_udivsi3 */ #ifdef L_divsi3 FUNC_START divsi3 ;; Perform a siged division of r4 / r5 and place the result in r4. ;; Does not handle overflow yet... mov r4, mdl div0s r5 DIV_BODY r5 32 div2 r5 div3 div4s mov mdl, r4 ret FUNC_END divsi3 #endif /* L_divsi3 */ #ifdef L_umodsi3 FUNC_START umodsi3 ;; Perform an unsiged division of r4 / r5 and places the remainder in r4. ;; Does not handle overflow yet... mov r4, mdl div0u r5 DIV_BODY r5 32 mov mdh, r4 ret FUNC_END umodsi3 #endif /* L_umodsi3 */ #ifdef L_modsi3 FUNC_START modsi3 ;; Perform a siged division of r4 / r5 and place the remainder in r4. ;; Does not handle overflow yet... mov r4, mdl div0s r5 DIV_BODY r5 32 div2 r5 div3 div4s mov mdh, r4 ret FUNC_END modsi3 #endif /* L_modsi3 */ #ifdef L_negsi2 FUNC_START negsi2 ldi:8 #0, r0 sub r4, r0 mov r0, r4 ret FUNC_END negsi2 #endif /* L_negsi2 */ #ifdef L_one_cmplsi2 FUNC_START one_cmplsi2 ldi:8 #0xff, r0 extsb r0 eor r0, r4 ret FUNC_END one_cmplsi2 #endif /* L_one_cmplsi2 */
4ms/metamodule-plugin-sdk
1,382
plugin-libc/libgcc/config/h8300/crtn.S
/* Copyright (C) 2001-2022 Free Software Foundation, Inc. This file was adapted from glibc sources. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 3, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. Under Section 7 of GPL version 3, you are granted additional permissions described in the GCC Runtime Library Exception, version 3.1, as published by the Free Software Foundation. You should have received a copy of the GNU General Public License and a copy of the GCC Runtime Library Exception along with this program; see the files COPYING3 and COPYING.RUNTIME respectively. If not, see <http://www.gnu.org/licenses/>. */ /* See an explanation about .init and .fini in crti.S. */ #ifdef __H8300H__ #ifdef __NORMAL_MODE__ .h8300hn #else .h8300h #endif #endif #ifdef __H8300S__ #ifdef __NORMAL_MODE__ .h8300sn #else .h8300s #endif #endif #ifdef __H8300SX__ #ifdef __NORMAL_MODE__ .h8300sxn #else .h8300sx #endif #endif .section .init, "ax", @progbits rts .section .fini, "ax", @progbits rts
4ms/metamodule-plugin-sdk
1,846
plugin-libc/libgcc/config/h8300/crti.S
/* Copyright (C) 2001-2022 Free Software Foundation, Inc. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 3, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. Under Section 7 of GPL version 3, you are granted additional permissions described in the GCC Runtime Library Exception, version 3.1, as published by the Free Software Foundation. You should have received a copy of the GNU General Public License and a copy of the GCC Runtime Library Exception along with this program; see the files COPYING3 and COPYING.RUNTIME respectively. If not, see <http://www.gnu.org/licenses/>. */ /* The code in sections .init and .fini is supposed to be a single regular function. The function in .init is called directly from start in crt0.S. The function in .fini is atexit()ed in crt0.S too. crti.S contributes the prologue of a function to these sections, and crtn.S comes up the epilogue. STARTFILE_SPEC should list crti.o before any other object files that might add code to .init or .fini sections, and ENDFILE_SPEC should list crtn.o after any such object files. */ #ifdef __H8300H__ #ifdef __NORMAL_MODE__ .h8300hn #else .h8300h #endif #endif #ifdef __H8300S__ #ifdef __NORMAL_MODE__ .h8300sn #else .h8300s #endif #endif #ifdef __H8300SX__ #ifdef __NORMAL_MODE__ .h8300sxn #else .h8300sx #endif #endif .section .init, "ax", @progbits .global __init __init: .section .fini, "ax", @progbits .global __fini __fini:
4ms/metamodule-plugin-sdk
14,936
plugin-libc/libgcc/config/h8300/lib1funcs.S
;; libgcc routines for the Renesas H8/300 CPU. ;; Contributed by Steve Chamberlain <sac@cygnus.com> ;; Optimizations by Toshiyasu Morita <toshiyasu.morita@renesas.com> /* Copyright (C) 1994-2022 Free Software Foundation, Inc. This file is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 3, or (at your option) any later version. This file is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. Under Section 7 of GPL version 3, you are granted additional permissions described in the GCC Runtime Library Exception, version 3.1, as published by the Free Software Foundation. You should have received a copy of the GNU General Public License and a copy of the GCC Runtime Library Exception along with this program; see the files COPYING3 and COPYING.RUNTIME respectively. If not, see <http://www.gnu.org/licenses/>. */ /* Assembler register definitions. */ #define A0 r0 #define A0L r0l #define A0H r0h #define A1 r1 #define A1L r1l #define A1H r1h #define A2 r2 #define A2L r2l #define A2H r2h #define A3 r3 #define A3L r3l #define A3H r3h #define S0 r4 #define S0L r4l #define S0H r4h #define S1 r5 #define S1L r5l #define S1H r5h #define S2 r6 #define S2L r6l #define S2H r6h #ifdef __H8300__ #define PUSHP push #define POPP pop #define A0P r0 #define A1P r1 #define A2P r2 #define A3P r3 #define S0P r4 #define S1P r5 #define S2P r6 #endif #if defined (__H8300H__) || defined (__H8300S__) || defined (__H8300SX__) #define PUSHP push.l #define POPP pop.l #define A0P er0 #define A1P er1 #define A2P er2 #define A3P er3 #define S0P er4 #define S1P er5 #define S2P er6 #define A0E e0 #define A1E e1 #define A2E e2 #define A3E e3 #endif #define CONCAT(A,B) A##B #define LABEL0(U,X) CONCAT(U,__##X) #define LABEL0_DEF(U,X) CONCAT(U,__##X##:) #define LABEL_DEF(X) LABEL0_DEF(__USER_LABEL_PREFIX__,X) #define LABEL(X) LABEL0(__USER_LABEL_PREFIX__,X) #ifdef __H8300H__ #ifdef __NORMAL_MODE__ .h8300hn #else .h8300h #endif #endif #ifdef __H8300S__ #ifdef __NORMAL_MODE__ .h8300sn #else .h8300s #endif #endif #ifdef __H8300SX__ #ifdef __NORMAL_MODE__ .h8300sxn #else .h8300sx #endif #endif #ifdef L_cmpsi2 #ifdef __H8300__ .section .text .align 2 .global LABEL(cmpsi2) LABEL_DEF(cmpsi2) cmp.w A0,A2 bne .L2 cmp.w A1,A3 bne .L4 mov.w #1,A0 rts .L2: bgt .L5 .L3: mov.w #2,A0 rts .L4: bls .L3 .L5: sub.w A0,A0 rts .end #endif #endif /* L_cmpsi2 */ #ifdef L_ucmpsi2 #ifdef __H8300__ .section .text .align 2 .global LABEL(ucmpsi2) LABEL_DEF(ucmpsi2) cmp.w A0,A2 bne .L2 cmp.w A1,A3 bne .L4 mov.w #1,A0 rts .L2: bhi .L5 .L3: mov.w #2,A0 rts .L4: bls .L3 .L5: sub.w A0,A0 rts .end #endif #endif /* L_ucmpsi2 */ #ifdef L_divhi3 ;; HImode divides for the H8/300. ;; We bunch all of this into one object file since there are several ;; "supporting routines". ; general purpose normalize routine ; ; divisor in A0 ; dividend in A1 ; turns both into +ve numbers, and leaves what the answer sign ; should be in A2L #ifdef __H8300__ .section .text .align 2 divnorm: or A0H,A0H ; is divisor > 0 stc ccr,A2L bge _lab1 not A0H ; no - then make it +ve not A0L adds #1,A0 _lab1: or A1H,A1H ; look at dividend bge _lab2 not A1H ; it is -ve, make it positive not A1L adds #1,A1 xor #0x8,A2L; and toggle sign of result _lab2: rts ;; Basically the same, except that the sign of the divisor determines ;; the sign. modnorm: or A0H,A0H ; is divisor > 0 stc ccr,A2L bge _lab7 not A0H ; no - then make it +ve not A0L adds #1,A0 _lab7: or A1H,A1H ; look at dividend bge _lab8 not A1H ; it is -ve, make it positive not A1L adds #1,A1 _lab8: rts ; A0=A0/A1 signed .global LABEL(divhi3) LABEL_DEF(divhi3) bsr divnorm bsr LABEL(udivhi3) negans: btst #3,A2L ; should answer be negative ? beq _lab4 not A0H ; yes, so make it so not A0L adds #1,A0 _lab4: rts ; A0=A0%A1 signed .global LABEL(modhi3) LABEL_DEF(modhi3) bsr modnorm bsr LABEL(udivhi3) mov A3,A0 bra negans ; A0=A0%A1 unsigned .global LABEL(umodhi3) LABEL_DEF(umodhi3) bsr LABEL(udivhi3) mov A3,A0 rts ; A0=A0/A1 unsigned ; A3=A0%A1 unsigned ; A2H trashed ; D high 8 bits of denom ; d low 8 bits of denom ; N high 8 bits of num ; n low 8 bits of num ; M high 8 bits of mod ; m low 8 bits of mod ; Q high 8 bits of quot ; q low 8 bits of quot ; P preserve ; The H8/300 only has a 16/8 bit divide, so we look at the incoming and ; see how to partition up the expression. .global LABEL(udivhi3) LABEL_DEF(udivhi3) ; A0 A1 A2 A3 ; Nn Dd P sub.w A3,A3 ; Nn Dd xP 00 or A1H,A1H bne divlongway or A0H,A0H beq _lab6 ; we know that D == 0 and N is != 0 mov.b A0H,A3L ; Nn Dd xP 0N divxu A1L,A3 ; MQ mov.b A3L,A0H ; Q ; dealt with N, do n _lab6: mov.b A0L,A3L ; n divxu A1L,A3 ; mq mov.b A3L,A0L ; Qq mov.b A3H,A3L ; m mov.b #0x0,A3H ; Qq 0m rts ; D != 0 - which means the denominator is ; loop around to get the result. divlongway: mov.b A0H,A3L ; Nn Dd xP 0N mov.b #0x0,A0H ; high byte of answer has to be zero mov.b #0x8,A2H ; 8 div8: add.b A0L,A0L ; n*=2 rotxl A3L ; Make remainder bigger rotxl A3H sub.w A1,A3 ; Q-=N bhs setbit ; set a bit ? add.w A1,A3 ; no : too far , Q+=N dec A2H bne div8 ; next bit rts setbit: inc A0L ; do insert bit dec A2H bne div8 ; next bit rts #endif /* __H8300__ */ #endif /* L_divhi3 */ #ifdef L_divsi3 ;; 4 byte integer divides for the H8/300. ;; ;; We have one routine which does all the work and lots of ;; little ones which prepare the args and massage the sign. ;; We bunch all of this into one object file since there are several ;; "supporting routines". .section .text .align 2 ; Put abs SIs into r0/r1 and r2/r3, and leave a 1 in r6l with sign of rest. ; This function is here to keep branch displacements small. #ifdef __H8300__ divnorm: mov.b A0H,A0H ; is the numerator -ve stc ccr,S2L ; keep the sign in bit 3 of S2L bge postive ; negate arg not A0H not A1H not A0L not A1L add #1,A1L addx #0,A1H addx #0,A0L addx #0,A0H postive: mov.b A2H,A2H ; is the denominator -ve bge postive2 not A2L not A2H not A3L not A3H add.b #1,A3L addx #0,A3H addx #0,A2L addx #0,A2H xor.b #0x08,S2L ; toggle the result sign postive2: rts ;; Basically the same, except that the sign of the divisor determines ;; the sign. modnorm: mov.b A0H,A0H ; is the numerator -ve stc ccr,S2L ; keep the sign in bit 3 of S2L bge mpostive ; negate arg not A0H not A1H not A0L not A1L add #1,A1L addx #0,A1H addx #0,A0L addx #0,A0H mpostive: mov.b A2H,A2H ; is the denominator -ve bge mpostive2 not A2L not A2H not A3L not A3H add.b #1,A3L addx #0,A3H addx #0,A2L addx #0,A2H mpostive2: rts #else /* __H8300H__ */ divnorm: mov.l A0P,A0P ; is the numerator -ve stc ccr,S2L ; keep the sign in bit 3 of S2L bge postive neg.l A0P ; negate arg postive: mov.l A1P,A1P ; is the denominator -ve bge postive2 neg.l A1P ; negate arg xor.b #0x08,S2L ; toggle the result sign postive2: rts ;; Basically the same, except that the sign of the divisor determines ;; the sign. modnorm: mov.l A0P,A0P ; is the numerator -ve stc ccr,S2L ; keep the sign in bit 3 of S2L bge mpostive neg.l A0P ; negate arg mpostive: mov.l A1P,A1P ; is the denominator -ve bge mpostive2 neg.l A1P ; negate arg mpostive2: rts #endif ; numerator in A0/A1 ; denominator in A2/A3 .global LABEL(modsi3) LABEL_DEF(modsi3) #ifdef __H8300__ PUSHP S2P PUSHP S0P PUSHP S1P bsr modnorm bsr divmodsi4 mov S0,A0 mov S1,A1 bra exitdiv #else PUSHP S2P bsr modnorm bsr LABEL(divsi3) mov.l er3,er0 bra exitdiv #endif ;; H8/300H and H8S version of ___udivsi3 is defined later in ;; the file. #ifdef __H8300__ .global LABEL(udivsi3) LABEL_DEF(udivsi3) PUSHP S2P PUSHP S0P PUSHP S1P bsr divmodsi4 bra reti #endif .global LABEL(umodsi3) LABEL_DEF(umodsi3) #ifdef __H8300__ PUSHP S2P PUSHP S0P PUSHP S1P bsr divmodsi4 mov S0,A0 mov S1,A1 bra reti #else bsr LABEL(udivsi3) mov.l er3,er0 rts #endif .global LABEL(divsi3) LABEL_DEF(divsi3) #ifdef __H8300__ PUSHP S2P PUSHP S0P PUSHP S1P jsr divnorm jsr divmodsi4 #else PUSHP S2P jsr divnorm bsr LABEL(udivsi3) #endif ; examine what the sign should be exitdiv: btst #3,S2L beq reti ; should be -ve #ifdef __H8300__ not A0H not A1H not A0L not A1L add #1,A1L addx #0,A1H addx #0,A0L addx #0,A0H #else /* __H8300H__ */ neg.l A0P #endif reti: #ifdef __H8300__ POPP S1P POPP S0P #endif POPP S2P rts ; takes A0/A1 numerator (A0P for H8/300H) ; A2/A3 denominator (A1P for H8/300H) ; returns A0/A1 quotient (A0P for H8/300H) ; S0/S1 remainder (S0P for H8/300H) ; trashes S2H #ifdef __H8300__ divmodsi4: sub.w S0,S0 ; zero play area mov.w S0,S1 mov.b A2H,S2H or A2L,S2H or A3H,S2H bne DenHighNonZero mov.b A0H,A0H bne NumByte0Zero mov.b A0L,A0L bne NumByte1Zero mov.b A1H,A1H bne NumByte2Zero bra NumByte3Zero NumByte0Zero: mov.b A0H,S1L divxu A3L,S1 mov.b S1L,A0H NumByte1Zero: mov.b A0L,S1L divxu A3L,S1 mov.b S1L,A0L NumByte2Zero: mov.b A1H,S1L divxu A3L,S1 mov.b S1L,A1H NumByte3Zero: mov.b A1L,S1L divxu A3L,S1 mov.b S1L,A1L mov.b S1H,S1L mov.b #0x0,S1H rts ; have to do the divide by shift and test DenHighNonZero: mov.b A0H,S1L mov.b A0L,A0H mov.b A1H,A0L mov.b A1L,A1H mov.b #0,A1L mov.b #24,S2H ; only do 24 iterations nextbit: add.w A1,A1 ; double the answer guess rotxl A0L rotxl A0H rotxl S1L ; double remainder rotxl S1H rotxl S0L rotxl S0H sub.w A3,S1 ; does it all fit subx A2L,S0L subx A2H,S0H bhs setone add.w A3,S1 ; no, restore mistake addx A2L,S0L addx A2H,S0H dec S2H bne nextbit rts setone: inc A1L dec S2H bne nextbit rts #else /* __H8300H__ */ ;; This function also computes the remainder and stores it in er3. .global LABEL(udivsi3) LABEL_DEF(udivsi3) mov.w A1E,A1E ; denominator top word 0? bne DenHighNonZero ; do it the easy way, see page 107 in manual mov.w A0E,A2 extu.l A2P divxu.w A1,A2P mov.w A2E,A0E divxu.w A1,A0P mov.w A0E,A3 mov.w A2,A0E extu.l A3P rts ; er0 = er0 / er1 ; er3 = er0 % er1 ; trashes er1 er2 ; expects er1 >= 2^16 DenHighNonZero: mov.l er0,er3 mov.l er1,er2 #ifdef __H8300H__ divmod_L21: shlr.l er0 shlr.l er2 ; make divisor < 2^16 mov.w e2,e2 bne divmod_L21 #else shlr.l #2,er2 ; make divisor < 2^16 mov.w e2,e2 beq divmod_L22A divmod_L21: shlr.l #2,er0 divmod_L22: shlr.l #2,er2 ; make divisor < 2^16 mov.w e2,e2 bne divmod_L21 divmod_L22A: rotxl.w r2 bcs divmod_L23 shlr.l er0 bra divmod_L24 divmod_L23: rotxr.w r2 shlr.l #2,er0 divmod_L24: #endif ;; At this point, ;; er0 contains shifted dividend ;; er1 contains divisor ;; er2 contains shifted divisor ;; er3 contains dividend, later remainder divxu.w r2,er0 ; r0 now contains the approximate quotient (AQ) extu.l er0 beq divmod_L25 subs #1,er0 ; er0 = AQ - 1 mov.w e1,r2 mulxu.w r0,er2 ; er2 = upper (AQ - 1) * divisor sub.w r2,e3 ; dividend - 65536 * er2 mov.w r1,r2 mulxu.w r0,er2 ; compute er3 = remainder (tentative) sub.l er2,er3 ; er3 = dividend - (AQ - 1) * divisor divmod_L25: cmp.l er1,er3 ; is divisor < remainder? blo divmod_L26 adds #1,er0 sub.l er1,er3 ; correct the remainder divmod_L26: rts #endif #endif /* L_divsi3 */ #ifdef L_mulhi3 ;; HImode multiply. ; The H8/300 only has an 8*8->16 multiply. ; The answer is the same as: ; ; product = (srca.l * srcb.l) + ((srca.h * srcb.l) + (srcb.h * srca.l)) * 256 ; (we can ignore A1.h * A0.h cause that will all off the top) ; A0 in ; A1 in ; A0 answer #ifdef __H8300__ .section .text .align 2 .global LABEL(mulhi3) LABEL_DEF(mulhi3) mov.b A1L,A2L ; A2l gets srcb.l mulxu A0L,A2 ; A2 gets first sub product mov.b A0H,A3L ; prepare for mulxu A1L,A3 ; second sub product add.b A3L,A2H ; sum first two terms mov.b A1H,A3L ; third sub product mulxu A0L,A3 add.b A3L,A2H ; almost there mov.w A2,A0 ; that is rts #endif #endif /* L_mulhi3 */ #ifdef L_mulsi3 ;; SImode multiply. ;; ;; I think that shift and add may be sufficient for this. Using the ;; supplied 8x8->16 would need 10 ops of 14 cycles each + overhead. This way ;; the inner loop uses maybe 20 cycles + overhead, but terminates ;; quickly on small args. ;; ;; A0/A1 src_a ;; A2/A3 src_b ;; ;; while (a) ;; { ;; if (a & 1) ;; r += b; ;; a >>= 1; ;; b <<= 1; ;; } .section .text .align 2 #ifdef __H8300__ .global LABEL(mulsi3) LABEL_DEF(mulsi3) PUSHP S0P PUSHP S1P sub.w S0,S0 sub.w S1,S1 ; while (a) _top: mov.w A0,A0 bne _more mov.w A1,A1 beq _done _more: ; if (a & 1) bld #0,A1L bcc _nobit ; r += b add.w A3,S1 addx A2L,S0L addx A2H,S0H _nobit: ; a >>= 1 shlr A0H rotxr A0L rotxr A1H rotxr A1L ; b <<= 1 add.w A3,A3 addx A2L,A2L addx A2H,A2H bra _top _done: mov.w S0,A0 mov.w S1,A1 POPP S1P POPP S0P rts #else /* __H8300H__ */ ; ; mulsi3 for H8/300H - based on Renesas SH implementation ; ; by Toshiyasu Morita ; ; Old code: ; ; 16b * 16b = 372 states (worst case) ; 32b * 32b = 724 states (worst case) ; ; New code: ; ; 16b * 16b = 48 states ; 16b * 32b = 72 states ; 32b * 32b = 92 states ; .global LABEL(mulsi3) LABEL_DEF(mulsi3) mov.w r1,r2 ; ( 2 states) b * d mulxu r0,er2 ; (22 states) mov.w e0,r3 ; ( 2 states) a * d beq L_skip1 ; ( 4 states) mulxu r1,er3 ; (22 states) add.w r3,e2 ; ( 2 states) L_skip1: mov.w e1,r3 ; ( 2 states) c * b beq L_skip2 ; ( 4 states) mulxu r0,er3 ; (22 states) add.w r3,e2 ; ( 2 states) L_skip2: mov.l er2,er0 ; ( 2 states) rts ; (10 states) #endif #endif /* L_mulsi3 */ #ifdef L_fixunssfsi_asm /* For the h8300 we use asm to save some bytes, to allow more programs to fit into the tiny address space. For the H8/300H and H8S, the C version is good enough. */ #ifdef __H8300__ /* We still treat NANs different than libgcc2.c, but then, the behavior is undefined anyways. */ .global LABEL(fixunssfsi) LABEL_DEF(fixunssfsi) cmp.b #0x4f,r0h bge Large_num jmp @LABEL(fixsfsi) Large_num: bhi L_huge_num xor.b #0x80,A0L bmi L_shift8 L_huge_num: mov.w #65535,A0 mov.w A0,A1 rts L_shift8: mov.b A0L,A0H mov.b A1H,A0L mov.b A1L,A1H mov.b #0,A1L rts #endif #endif /* L_fixunssfsi_asm */
4ms/metamodule-plugin-sdk
104,715
plugin-libc/libgcc/config/nds32/lib1asmsrc-mculib.S
/* mculib libgcc routines of Andes NDS32 cpu for GNU compiler Copyright (C) 2012-2022 Free Software Foundation, Inc. Contributed by Andes Technology Corporation. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 3, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. Under Section 7 of GPL version 3, you are granted additional permissions described in the GCC Runtime Library Exception, version 3.1, as published by the Free Software Foundation. You should have received a copy of the GNU General Public License and a copy of the GCC Runtime Library Exception along with this program; see the files COPYING3 and COPYING.RUNTIME respectively. If not, see <http://www.gnu.org/licenses/>. */ .section .mdebug.abi_nds32 .previous /* ------------------------------------------- */ /* FPBIT floating point operations for libgcc */ /* ------------------------------------------- */ #ifdef L_addsub_sf .text .align 2 .global __subsf3 .type __subsf3, @function __subsf3: push $lp pushm $r6, $r9 move $r2, #0x80000000 xor $r1, $r1, $r2 j .Lsfpadd .global __addsf3 .type __addsf3, @function __addsf3: push $lp pushm $r6, $r9 .Lsfpadd: srli $r5, $r0, #23 andi $r5, $r5, #0xff srli $r7, $r1, #23 andi $r7, $r7, #0xff move $r3, #0x80000000 slli $r4, $r0, #8 or $r4, $r4, $r3 slli $r6, $r1, #8 or $r6, $r6, $r3 addi $r9, $r5, #-1 slti $r15, $r9, #0xfe beqzs8 .LEspecA .LElab1: addi $r9, $r7, #-1 slti $r15, $r9, #0xfe beqzs8 .LEspecB .LElab2: sub $r8, $r5, $r7 sltsi $r15, $r8, #0 bnezs8 .Li1 sltsi $r15, $r8, #0x20 bnezs8 .Li2 move $r6, #2 j .Le1 .Li2: move $r2, $r6 srl $r6, $r6, $r8 sll $r9, $r6, $r8 beq $r9, $r2, .Le1 ori $r6, $r6, #2 j .Le1 .Li1: move $r5, $r7 subri $r8, $r8, #0 sltsi $r15, $r8, #0x20 bnezs8 .Li4 move $r4, #2 j .Le1 .Li4: move $r2, $r4 srl $r4, $r4, $r8 sll $r9, $r4, $r8 beq $r9, $r2, .Le1 ori $r4, $r4, #2 .Le1: and $r8, $r0, $r3 xor $r9, $r8, $r1 sltsi $r15, $r9, #0 bnezs8 .LEsub1 #ADD($r4, $r6) add $r4, $r4, $r6 slt $r15, $r4, $r6 beqzs8 .LEres andi $r9, $r4, #1 beqz $r9, .Li7 ori $r4, $r4, #2 .Li7: srli $r4, $r4, #1 addi $r5, $r5, #1 subri $r15, $r5, #0xff bnezs8 .LEres move $r4, #0 j .LEres .LEsub1: #SUB($r4, $r6) move $r15, $r4 sub $r4, $r4, $r6 slt $r15, $r15, $r4 beqzs8 .Li9 subri $r4, $r4, #0 xor $r8, $r8, $r3 j .Le9 .Li9: beqz $r4, .LEzer .Le9: #ifdef __NDS32_PERF_EXT__ clz $r2, $r4 #else pushm $r0, $r1 pushm $r3, $r5 move $r0, $r4 bal __clzsi2 move $r2, $r0 popm $r3, $r5 popm $r0, $r1 #endif sub $r5, $r5, $r2 sll $r4, $r4, $r2 .LEres: blez $r5, .LEund .LElab12: #ADD($r4, $0x80) move $r15, #0x80 add $r4, $r4, $r15 slt $r15, $r4, $r15 #ADDC($r5, $0x0) add $r5, $r5, $r15 srli $r9, $r4, #8 andi $r9, $r9, #1 sub $r4, $r4, $r9 slli $r4, $r4, #1 srli $r4, $r4, #9 slli $r9, $r5, #23 or $r4, $r4, $r9 or $r0, $r4, $r8 .LE999: popm $r6, $r9 pop $lp ret5 $lp .LEund: subri $r2, $r5, #1 slti $r15, $r2, #0x20 beqzs8 .LEzer move $r9, #0x80000000 or $r4, $r4, $r9 subri $r9, $r2, #0x20 sll $r5, $r4, $r9 srl $r4, $r4, $r2 beqz $r5, .Li10 ori $r4, $r4, #1 .Li10: move $r5, #0 addi $r9, $r4, #0x80 sltsi $r15, $r9, #0 beqzs8 .LElab12 move $r5, #1 j .LElab12 .LEspecA: bnez $r5, .Li12 add $r4, $r4, $r4 beqz $r4, .Li13 #ifdef __NDS32_PERF_EXT__ clz $r8, $r4 #else pushm $r0, $r5 move $r0, $r4 bal __clzsi2 move $r8, $r0 popm $r0, $r5 #endif sub $r5, $r5, $r8 sll $r4, $r4, $r8 j .LElab1 .Li13: subri $r15, $r7, #0xff beqzs8 .LEspecB move $r9, #0x80000000 bne $r1, $r9, .LEretB .Li12: add $r9, $r4, $r4 bnez $r9, .LEnan subri $r15, $r7, #0xff bnezs8 .LEretA xor $r9, $r0, $r1 sltsi $r15, $r9, #0 bnezs8 .LEnan j .LEretB .LEspecB: bnez $r7, .Li15 add $r6, $r6, $r6 beqz $r6, .LEretA #ifdef __NDS32_PERF_EXT__ clz $r8, $r6 #else pushm $r0, $r5 move $r0, $r6 bal __clzsi2 move $r8, $r0 popm $r0, $r5 #endif sub $r7, $r7, $r8 sll $r6, $r6, $r8 j .LElab2 .Li15: add $r9, $r6, $r6 bnez $r9, .LEnan .LEretB: move $r0, $r1 j .LE999 .LEretA: j .LE999 .LEzer: move $r0, #0 j .LE999 .LEnan: move $r0, #0xffc00000 j .LE999 .size __subsf3, .-__subsf3 .size __addsf3, .-__addsf3 #endif /* L_addsub_sf */ #ifdef L_sf_to_si .text .align 2 .global __fixsfsi .type __fixsfsi, @function __fixsfsi: push $lp slli $r1, $r0, #8 move $r3, #0x80000000 or $r1, $r1, $r3 srli $r3, $r0, #23 andi $r3, $r3, #0xff subri $r2, $r3, #0x9e blez $r2, .LJspec sltsi $r15, $r2, #0x20 bnezs8 .Li42 move $r0, #0 j .LJ999 .Li42: srl $r1, $r1, $r2 sltsi $r15, $r0, #0 beqzs8 .Li43 subri $r1, $r1, #0 .Li43: move $r0, $r1 .LJ999: pop $lp ret5 $lp .LJspec: move $r3, #0x7f800000 slt $r15, $r3, $r0 beqzs8 .Li44 move $r0, #0x80000000 j .LJ999 .Li44: move $r0, #0x7fffffff j .LJ999 .size __fixsfsi, .-__fixsfsi #endif /* L_sf_to_si */ #ifdef L_divsi3 .text .align 2 .globl __divsi3 .type __divsi3, @function __divsi3: ! --------------------------------------------------------------------- ! neg = 0; ! if (a < 0) ! { a = -a; ! neg = !neg; ! } ! --------------------------------------------------------------------- sltsi $r5, $r0, 0 ! $r5 <- neg = (a < 0) ? 1 : 0 subri $r4, $r0, 0 ! $r4 <- a = -a cmovn $r0, $r4, $r5 ! $r0 <- a = neg ? -a : a .L2: ! --------------------------------------------------------------------- ! if (b < 0) ! --------------------------------------------------------------------- bgez $r1, .L3 ! if b >= 0, skip ! --------------------------------------------------------------------- ! { b=-b; ! neg=!neg; ! } ! --------------------------------------------------------------------- subri $r1, $r1, 0 ! $r1 <- b = -b subri $r5, $r5, 1 ! $r5 <- neg = !neg .L3: ! --------------------------------------------------------------------- !!res = udivmodsi4 (a, b, 1); ! res = 0; ! if (den != 0) ! --------------------------------------------------------------------- movi $r2, 0 ! $r2 <- res = 0 beqz $r1, .L1 ! if den == 0, skip ! --------------------------------------------------------------------- ! bit = 1; ! --------------------------------------------------------------------- movi $r4, 1 ! $r4 <- bit = 1 #ifndef __OPTIMIZE_SIZE__ .L6: #endif ! --------------------------------------------------------------------- ! while (den < num && bit && !(den & (1L << 31))) ! --------------------------------------------------------------------- slt $ta, $r1, $r0 ! $ta <- den < num ? beqz $ta, .L5 ! if no, skip ! --------------------------------------------------------------------- ! { den << = 1; ! bit << = 1; ! } ! --------------------------------------------------------------------- #if defined (__OPTIMIZE_SIZE__) && !defined (__NDS32_ISA_V3M__) clz $r3, $r1 ! $r3 <- leading zero count for den clz $ta, $r0 ! $ta <- leading zero count for num sub $r3, $r3, $ta ! $r3 <- number of bits to shift sll $r1, $r1, $r3 ! $r1 <- den sll $r4, $r4, $r3 ! $r2 <- bit #else slli $r1, $r1, 1 ! $r1 <- den << = 1 slli $r4, $r4, 1 ! $r4 <- bit << = 1 b .L6 ! continue loop #endif .L5: ! --------------------------------------------------------------------- ! while (bit) ! { if (num >= den) ! --------------------------------------------------------------------- slt $ta, $r0, $r1 ! $ta <- num < den ? bnez $ta, .L9 ! if yes, skip ! --------------------------------------------------------------------- ! { num -= den; ! res |= bit; ! } ! --------------------------------------------------------------------- sub $r0, $r0, $r1 ! $r0 <- num -= den or $r2, $r2, $r4 ! $r2 <- res |= bit .L9: ! --------------------------------------------------------------------- ! bit >> = 1; ! den >> = 1; ! } !!if (modwanted) !! return num; !!return res; ! --------------------------------------------------------------------- srli $r4, $r4, 1 ! $r4 <- bit >> = 1 srli $r1, $r1, 1 ! $r1 <- den >> = 1 bnez $r4, .L5 ! if bit != 0, continue loop .L1: ! --------------------------------------------------------------------- ! if (neg) ! res = -res; ! return res; ! --------------------------------------------------------------------- subri $r0, $r2, 0 ! $r0 <- -res cmovz $r0, $r2, $r5 ! $r0 <- neg ? -res : res ! --------------------------------------------------------------------- ret .size __divsi3, .-__divsi3 #endif /* L_divsi3 */ #ifdef L_divdi3 !-------------------------------------- #ifdef __big_endian__ #define V1H $r0 #define V1L $r1 #define V2H $r2 #define V2L $r3 #else #define V1H $r1 #define V1L $r0 #define V2H $r3 #define V2L $r2 #endif !-------------------------------------- .text .align 2 .globl __divdi3 .type __divdi3, @function __divdi3: ! prologue #ifdef __NDS32_ISA_V3M__ push25 $r10, 0 #else smw.adm $r6, [$sp], $r10, 2 #endif ! end of prologue move $r8, V1L move $r9, V1H move $r6, V2L move $r7, V2H movi $r10, 0 bgez V1H, .L80 bal __negdi2 move $r8, V1L move $r9, V1H movi $r10, -1 .L80: bgez $r7, .L81 move V1L, $r6 move V1H, $r7 bal __negdi2 move $r6, V1L move $r7, V1H nor $r10, $r10, $r10 .L81: move V2L, $r6 move V2H, $r7 move V1L, $r8 move V1H, $r9 movi $r4, 0 bal __udivmoddi4 beqz $r10, .L82 bal __negdi2 .L82: ! epilogue #ifdef __NDS32_ISA_V3M__ pop25 $r10, 0 #else lmw.bim $r6, [$sp], $r10, 2 ret #endif .size __divdi3, .-__divdi3 #endif /* L_divdi3 */ #ifdef L_modsi3 .text .align 2 .globl __modsi3 .type __modsi3, @function __modsi3: ! --------------------------------------------------------------------- ! neg=0; ! if (a<0) ! { a=-a; ! neg=1; ! } ! --------------------------------------------------------------------- sltsi $r5, $r0, 0 ! $r5 <- neg < 0 ? 1 : 0 subri $r4, $r0, 0 ! $r4 <- -a cmovn $r0, $r4, $r5 ! $r0 <- |a| ! --------------------------------------------------------------------- ! if (b < 0) #ifndef __NDS32_PERF_EXT__ ! --------------------------------------------------------------------- bgez $r1, .L3 ! if b >= 0, skip ! --------------------------------------------------------------------- ! b = -b; ! --------------------------------------------------------------------- subri $r1, $r1, 0 ! $r1 <- |b| .L3: ! --------------------------------------------------------------------- !!res = udivmodsi4 (a, b, 1); ! if (den != 0) ! --------------------------------------------------------------------- #else /* __NDS32_PERF_EXT__ */ ! b = -b; !!res = udivmodsi4 (a, b, 1); ! if (den != 0) ! --------------------------------------------------------------------- abs $r1, $r1 ! $r1 <- |b| #endif /* __NDS32_PERF_EXT__ */ beqz $r1, .L1 ! if den == 0, skip ! --------------------------------------------------------------------- ! { bit = 1; ! res = 0; ! --------------------------------------------------------------------- movi $r4, 1 ! $r4 <- bit = 1 #ifndef __OPTIMIZE_SIZE__ .L6: #endif ! --------------------------------------------------------------------- ! while (den < num&&bit && !(den & (1L << 31))) ! --------------------------------------------------------------------- slt $ta, $r1, $r0 ! $ta <- den < num ? beqz $ta, .L5 ! if no, skip ! --------------------------------------------------------------------- ! { den << = 1; ! bit << = 1; ! } ! --------------------------------------------------------------------- #if defined (__OPTIMIZE_SIZE__) && ! defined (__NDS32_ISA_V3M__) clz $r3, $r1 ! $r3 <- leading zero count for den clz $ta, $r0 ! $ta <- leading zero count for num sub $r3, $r3, $ta ! $r3 <- number of bits to shift sll $r1, $r1, $r3 ! $r1 <- den sll $r4, $r4, $r3 ! $r2 <- bit #else slli $r1, $r1, 1 ! $r1 <- den << = 1 slli $r4, $r4, 1 ! $r4 <- bit << = 1 b .L6 ! continue loop #endif .L5: ! --------------------------------------------------------------------- ! while (bit) ! { if (num >= den) ! { num -= den; ! res |= bit; ! } ! bit >> = 1; ! den >> = 1; ! } ! } !!if (modwanted) !! return num; !!return res; ! --------------------------------------------------------------------- sub $r2, $r0, $r1 ! $r2 <- num - den slt $ta, $r0, $r1 ! $ta <- num < den ? srli $r4, $r4, 1 ! $r4 <- bit >> = 1 cmovz $r0, $r2, $ta ! $r0 <- num = (num < den) ? num : num - den srli $r1, $r1, 1 ! $r1 <- den >> = 1 bnez $r4, .L5 ! if bit != 0, continue loop .L1: ! --------------------------------------------------------------------- ! if (neg) ! res = -res; ! return res; ! --------------------------------------------------------------------- subri $r3, $r0, 0 ! $r3 <- -res cmovn $r0, $r3, $r5 ! $r0 <- neg ? -res : res ! --------------------------------------------------------------------- ret .size __modsi3, .-__modsi3 #endif /* L_modsi3 */ #ifdef L_moddi3 !-------------------------------------- #ifdef __big_endian__ #define V1H $r0 #define V1L $r1 #define V2H $r2 #define V2L $r3 #else #define V1H $r1 #define V1L $r0 #define V2H $r3 #define V2L $r2 #endif !-------------------------------------- .text .align 2 .globl __moddi3 .type __moddi3, @function __moddi3: ! ===================================================================== ! stack allocation: ! sp+32 +-----------------------+ ! | $lp | ! sp+28 +-----------------------+ ! | $r6 - $r10 | ! sp+8 +-----------------------+ ! | | ! sp+4 +-----------------------+ ! | | ! sp +-----------------------+ ! ===================================================================== ! prologue #ifdef __NDS32_ISA_V3M__ push25 $r10, 8 #else smw.adm $r6, [$sp], $r10, 2 addi $sp, $sp, -8 #endif ! end of prologue !------------------------------------------ ! __moddi3 (DWtype u, DWtype v) ! { ! word_type c = 0; ! DWunion uu = {.ll = u}; ! DWunion vv = {.ll = v}; ! DWtype w; ! if (uu.s.high < 0) ! c = ~c, ! uu.ll = -uu.ll; !--------------------------------------------- move $r8, V1L move $r9, V1H move $r6, V2L move $r7, V2H movi $r10, 0 ! r10 = c = 0 bgez V1H, .L80 ! if u > 0 , go L80 bal __negdi2 move $r8, V1L move $r9, V1H movi $r10, -1 ! r10 = c = ~c !------------------------------------------------ ! if (vv.s.high < 0) ! vv.ll = -vv.ll; !---------------------------------------------- .L80: bgez $r7, .L81 ! if v > 0 , go L81 move V1L, $r6 move V1H, $r7 bal __negdi2 move $r6, V1L move $r7, V1H !------------------------------------------ ! (void) __udivmoddi4 (uu.ll, vv.ll, &w); ! if (c) ! w = -w; ! return w; !----------------------------------------- .L81: move V2L, $r6 move V2H, $r7 move V1L, $r8 move V1H, $r9 addi $r4, $sp, 0 bal __udivmoddi4 lwi $r0, [$sp+(0)] ! le: sp + 0 is low, be: sp + 0 is high lwi $r1, [$sp+(4)] ! le: sp + 4 is low, be: sp + 4 is high beqz $r10, .L82 bal __negdi2 .L82: ! epilogue #ifdef __NDS32_ISA_V3M__ pop25 $r10, 8 #else addi $sp, $sp, 8 lmw.bim $r6, [$sp], $r10, 2 ret #endif .size __moddi3, .-__moddi3 #endif /* L_moddi3 */ #ifdef L_mulsi3 .text .align 2 .globl __mulsi3 .type __mulsi3, @function __mulsi3: ! --------------------------------------------------------------------- ! r = 0; ! while (a) ! $r0: r ! $r1: b ! $r2: a ! --------------------------------------------------------------------- beqz $r0, .L7 ! if a == 0, done move $r2, $r0 ! $r2 <- a movi $r0, 0 ! $r0 <- r <- 0 .L8: ! --------------------------------------------------------------------- ! { if (a & 1) ! r += b; ! a >> = 1; ! b << = 1; ! } ! $r0: r ! $r1: b ! $r2: a ! $r3: scratch ! $r4: scratch ! --------------------------------------------------------------------- andi $r3, $r2, 1 ! $r3 <- a & 1 add $r4, $r0, $r1 ! $r4 <- r += b cmovn $r0, $r4, $r3 ! $r0 <- r srli $r2, $r2, 1 ! $r2 <- a >> = 1 slli $r1, $r1, 1 ! $r1 <- b << = 1 bnez $r2, .L8 ! if a != 0, continue loop .L7: ! --------------------------------------------------------------------- ! $r0: return code ! --------------------------------------------------------------------- ret .size __mulsi3, .-__mulsi3 #endif /* L_mulsi3 */ #ifdef L_udivsi3 .text .align 2 .globl __udivsi3 .type __udivsi3, @function __udivsi3: ! --------------------------------------------------------------------- !!res=udivmodsi4(a,b,0); ! res=0; ! if (den!=0) ! --------------------------------------------------------------------- movi $r2, 0 ! $r2 <- res=0 beqz $r1, .L1 ! if den==0, skip ! --------------------------------------------------------------------- ! { bit=1; ! --------------------------------------------------------------------- movi $r4, 1 ! $r4 <- bit=1 #ifndef __OPTIMIZE_SIZE__ .L6: #endif ! --------------------------------------------------------------------- ! while (den<num ! --------------------------------------------------------------------- slt $ta, $r1, $r0 ! $ta <- den<num? beqz $ta, .L5 ! if no, skip ! --------------------------------------------------------------------- ! &&bit&&!(den&(1L<<31))) ! --------------------------------------------------------------------- bltz $r1, .L5 ! if den<0, skip ! --------------------------------------------------------------------- ! { den<<=1; ! bit<<=1; ! } ! --------------------------------------------------------------------- #if defined (__OPTIMIZE_SIZE__) && ! defined (__NDS32_ISA_V3M__) clz $r3, $r1 ! $r3 <- leading zero count for den clz $ta, $r0 ! $ta <- leading zero count for num sub $r3, $r3, $ta ! $r3 <- number of bits to shift sll $r1, $r1, $r3 ! $r1 <- den sll $r2, $r2, $r3 ! $r2 <- bit #else slli $r1, $r1, 1 ! $r1 <- den<<=1 slli $r4, $r4, 1 ! $r4 <- bit<<=1 b .L6 ! continue loop #endif .L5: ! --------------------------------------------------------------------- ! while (bit) ! { if (num>=den) ! --------------------------------------------------------------------- slt $ta, $r0, $r1 ! $ta <- num<den? bnez $ta, .L9 ! if yes, skip ! --------------------------------------------------------------------- ! { num-=den; ! res|=bit; ! } ! --------------------------------------------------------------------- sub $r0, $r0, $r1 ! $r0 <- num-=den or $r2, $r2, $r4 ! $r2 <- res|=bit .L9: ! --------------------------------------------------------------------- ! bit>>=1; ! den>>=1; ! } ! } !!if (modwanted) !! return num; !!return res; ! --------------------------------------------------------------------- srli $r4, $r4, 1 ! $r4 <- bit>>=1 srli $r1, $r1, 1 ! $r1 <- den>>=1 bnez $r4, .L5 ! if bit!=0, continue loop .L1: ! --------------------------------------------------------------------- ! return res; ! --------------------------------------------------------------------- move $r0, $r2 ! $r0 <- return value ! --------------------------------------------------------------------- ! --------------------------------------------------------------------- ret .size __udivsi3, .-__udivsi3 #endif /* L_udivsi3 */ #ifdef L_udivdi3 !-------------------------------------- #ifdef __big_endian__ #define V1H $r0 #define V1L $r1 #define V2H $r2 #define V2L $r3 #else #define V1H $r1 #define V1L $r0 #define V2H $r3 #define V2L $r2 #endif !-------------------------------------- .text .align 2 .globl __udivdi3 .type __udivdi3, @function __udivdi3: ! prologue #ifdef __NDS32_ISA_V3M__ push25 $r8, 0 #else smw.adm $r6, [$sp], $r8, 2 #endif ! end of prologue movi $r4, 0 bal __udivmoddi4 ! epilogue #ifdef __NDS32_ISA_V3M__ pop25 $r8, 0 #else lmw.bim $r6, [$sp], $r8, 2 ret #endif .size __udivdi3, .-__udivdi3 #endif /* L_udivdi3 */ #ifdef L_udivmoddi4 .text .align 2 .globl fudiv_qrnnd .type fudiv_qrnnd, @function #ifdef __big_endian__ #define P1H $r0 #define P1L $r1 #define P2H $r2 #define P2L $r3 #define W6H $r4 #define W6L $r5 #define OFFSET_L 4 #define OFFSET_H 0 #else #define P1H $r1 #define P1L $r0 #define P2H $r3 #define P2L $r2 #define W6H $r5 #define W6L $r4 #define OFFSET_L 0 #define OFFSET_H 4 #endif fudiv_qrnnd: !------------------------------------------------------ ! function: fudiv_qrnnd(quotient, remainder, high_numerator, low_numerator, denominator) ! divides a UDWtype, composed by the UWtype integers,HIGH_NUMERATOR (from $r4) ! and LOW_NUMERATOR(from $r5) by DENOMINATOR(from $r6), and places the quotient ! in $r7 and the remainder in $r8. !------------------------------------------------------ ! in reg:$r4(n1), $r5(n0), $r6(d0) ! __d1 = ((USItype) (d) >> ((4 * 8) / 2)); ! __d0 = ((USItype) (d) & (((USItype) 1 << ((4 * 8) / 2)) - 1)); ! __r1 = (n1) % __d1; ! __q1 = (n1) / __d1; ! __m = (USItype) __q1 * __d0; ! __r1 = __r1 * ((USItype) 1 << ((4 * 8) / 2)) | ((USItype) (n0) >> ((4 * 8) / 2)); ! if (__r1 < __m) ! { !------------------------------------------------------ smw.adm $r0, [$sp], $r4, 2 ! store $lp, when use BASELINE_V1,and must store $r0-$r3 srli $r7, $r6, 16 ! $r7 = d1 =__ll_highpart (d) movi $ta, 65535 and $r8, $r6, $ta ! $r8 = d0 = __ll_lowpart (d) divr $r9, $r10, $r4, $r7 ! $r9 = q1, $r10 = r1 and $r4, $r5, $ta ! $r4 = __ll_lowpart (n0) slli $r10, $r10, 16 ! $r10 = r1 << 16 srli $ta, $r5, 16 ! $ta = __ll_highpart (n0) or $r10, $r10, $ta ! $r10 <- $r0|$r3=__r1 mul $r5, $r9, $r8 ! $r5 = m = __q1*__d0 slt $ta, $r10, $r5 ! $ta <- __r1<__m beqz $ta, .L2 !if yes,skip !------------------------------------------------------ ! __q1--, __r1 += (d); ! if (__r1 >= (d)) ! { !------------------------------------------------------ add $r10, $r10, $r6 !$r10 <- __r1+d=__r1 addi $r9, $r9, -1 !$r9 <- __q1--=__q1 slt $ta, $r10, $r6 !$ta <- __r1<d bnez $ta, .L2 !if yes,skip !------------------------------------------------------ ! if (__r1 < __m) ! { !------------------------------------------------------ slt $ta, $r10, $r5 !$ta <- __r1<__m beqz $ta, .L2 !if yes,skip !------------------------------------------------------ ! __q1--, __r1 += (d); ! } ! } ! } !------------------------------------------------------ addi $r9, $r9, -1 !$r9 <- __q1--=__q1 add $r10, $r10, $r6 !$r2 <- __r1+d=__r1 .L2: !------------------------------------------------------ ! __r1 -= __m; ! __r0 = __r1 % __d1; ! __q0 = __r1 / __d1; ! __m = (USItype) __q0 * __d0; ! __r0 = __r0 * ((USItype) 1 << ((4 * 8) / 2)) \ ! | ((USItype) (n0) & (((USItype) 1 << ((4 * 8) / 2)) - 1)); ! if (__r0 < __m) ! { !------------------------------------------------------ sub $r10, $r10, $r5 !$r10 <- __r1-__m=__r1 divr $r7, $r10, $r10, $r7 !$r7 <- r1/__d1=__q0,$r10 <- r1%__d1=__r0 slli $r10, $r10, 16 !$r10 <- __r0<<16 mul $r5, $r8, $r7 !$r5 <- __q0*__d0=__m or $r10, $r4, $r10 !$r3 <- $r0|__ll_lowpart (n0) =__r0 slt $ta, $r10, $r5 !$ta <- __r0<__m beqz $ta, .L5 !if yes,skip !------------------------------------------------------ ! __q0--, __r0 += (d); ! if (__r0 >= (d)) ! { !------------------------------------------------------ add $r10, $r10, $r6 !$r10 <- __r0+d=__r0 addi $r7, $r7, -1 !$r7 <- __q0--=__q0 slt $ta, $r10, $r6 !$ta <- __r0<d bnez $ta, .L5 !if yes,skip !------------------------------------------------------ ! if (__r0 < __m) ! { !------------------------------------------------------ slt $ta, $r10, $r5 !$ta <- __r0<__m beqz $ta, .L5 !if yes,skip !------------------------------------------------------ ! __q0--, __r0 += (d); ! } ! } ! } !------------------------------------------------------ add $r10, $r10, $r6 !$r3 <- __r0+d=__r0 addi $r7, $r7, -1 !$r2 <- __q0--=__q0 .L5: !------------------------------------------------------ ! __r0 -= __m; ! *q = (USItype) __q1 * ((USItype) 1 << ((4 * 8) / 2)) | __q0; ! *r = __r0; !} !------------------------------------------------------ sub $r8, $r10, $r5 !$r8 = r = r0 = __r0-__m slli $r9, $r9, 16 !$r9 <- __q1<<16 or $r7, $r9, $r7 !$r7 = q = $r9|__q0 lmw.bim $r0, [$sp], $r4, 2 ret .size fudiv_qrnnd, .-fudiv_qrnnd .align 2 .globl __udivmoddi4 .type __udivmoddi4, @function __udivmoddi4: ! ===================================================================== ! stack allocation: ! sp+40 +------------------+ ! | q1 | ! sp+36 +------------------+ ! | q0 | ! sp+32 +------------------+ ! | bm | ! sp+28 +------------------+ ! | $lp | ! sp+24 +------------------+ ! | $fp | ! sp+20 +------------------+ ! | $r6 - $r10 | ! sp +------------------+ ! ===================================================================== addi $sp, $sp, -40 smw.bi $r6, [$sp], $r10, 10 !------------------------------------------------------ ! d0 = dd.s.low; ! d1 = dd.s.high; ! n0 = nn.s.low; ! n1 = nn.s.high; ! if (d1 == 0) ! { !------------------------------------------------------ move $fp, $r4 !$fp <- rp bnez P2H, .L9 !if yes,skip !------------------------------------------------------ ! if (d0 > n1) ! { !------------------------------------------------------ slt $ta, P1H, P2L !$ta <- n1<d0 beqz $ta, .L10 !if yes,skip #ifndef __NDS32_PERF_EXT__ smw.adm $r0, [$sp], $r5, 0 move $r0, P2L bal __clzsi2 move $r7, $r0 lmw.bim $r0, [$sp], $r5, 0 #else clz $r7, P2L #endif swi $r7, [$sp+(28)] beqz $r7, .L18 !if yes,skip !------------------------------------------------------ ! d0 = d0 << bm; ! n1 = (n1 << bm) | (n0 >> ((4 * 8) - bm)); ! n0 = n0 << bm; ! } !------------------------------------------------------ subri $r5, $r7, 32 !$r5 <- 32-bm srl $r5, P1L, $r5 !$r5 <- n0>>$r5 sll $r6, P1H, $r7 !$r6 <- n1<<bm or P1H, $r6, $r5 !P2h <- $r5|$r6=n1 sll P1L, P1L, $r7 !P1H <- n0<<bm=n0 sll P2L, P2L, $r7 !P2L <- d0<<bm=d0 .L18: !------------------------------------------------------ ! fudiv_qrnnd (&q0, &n0, n1, n0, d0); ! q1 = 0; ! } #if (d0 > n1) !------------------------------------------------------ move $r4,P1H ! give fudiv_qrnnd args move $r5,P1L ! move $r6,P2L ! bal fudiv_qrnnd !calcaulte q0 n0 movi $r6, 0 !P1L <- 0 swi $r7,[$sp+32] !q0 swi $r6,[$sp+36] !q1 move P1L,$r8 !n0 b .L19 .L10: !------------------------------------------------------ ! else #if (d0 > n1) ! { ! if(d0 == 0) !------------------------------------------------------ bnez P2L, .L20 !if yes,skip !------------------------------------------------------ ! d0 = 1 / d0; !------------------------------------------------------ movi $r4, 1 !P1L <- 1 divr P2L, $r4, $r4, P2L !$r9=1/d0,P1L=1%d0 .L20: #ifndef __NDS32_PERF_EXT__ smw.adm $r0, [$sp], $r5, 0 move $r0, P2L bal __clzsi2 move $r7, $r0 lmw.bim $r0, [$sp], $r5, 0 #else clz $r7, P2L #endif swi $r7,[$sp+(28)] ! store bm beqz $r7, .L28 ! if yes,skip !------------------------------------------------------ ! b = (4 * 8) - bm; ! d0 = d0 << bm; ! n2 = n1 >> b; ! n1 = (n1 << bm) | (n0 >> b); ! n0 = n0 << bm; ! fudiv_qrnnd (&q1, &n1, n2, n1, d0); ! } !------------------------------------------------------ subri $r10, $r7, 32 !$r10 <- 32-bm=b srl $r4, P1L, $r10 !$r4 <- n0>>b sll $r5, P1H, $r7 !$r5 <- n1<<bm or $r5, $r5, $r4 !$r5 <- $r5|$r4=n1 !for fun sll P2L, P2L, $r7 !P2L <- d0<<bm=d0 !for fun sll P1L, P1L, $r7 !P1L <- n0<<bm=n0 srl $r4, P1H, $r10 !$r4 <- n1>>b=n2 !for fun move $r6,P2L !for fun bal fudiv_qrnnd !caculate q1, n1 swi $r7,[$sp+(36)] ! q1 store move P1H,$r8 ! n1 store move $r4,$r8 ! prepare for next fudiv_qrnnd() move $r5,P1L move $r6,P2L b .L29 .L28: !------------------------------------------------------ ! else // bm != 0 ! { ! n1 -= d0; ! q1 = 1; ! !------------------------------------------------------ sub P1H, P1H, P2L !P1L <- n1-d0=n1 movi $ta, 1 ! swi $ta, [$sp+(36)] !1 -> [$sp+(36)] move $r4,P1H ! give fudiv_qrnnd args move $r5,P1L move $r6,P2L .L29: !------------------------------------------------------ ! fudiv_qrnnd (&q0, &n0, n1, n0, d0); !------------------------------------------------------ bal fudiv_qrnnd !calcuate q0, n0 swi $r7,[$sp+(32)] !q0 store move P1L,$r8 !n0 .L19: !------------------------------------------------------ ! if (rp != 0) ! { !------------------------------------------------------ beqz $fp, .L31 !if yes,skip !------------------------------------------------------ ! rr.s.low = n0 >> bm; ! rr.s.high = 0; ! *rp = rr.ll; ! } !------------------------------------------------------ movi $r5, 0 !$r5 <- 0 lwi $r7,[$sp+(28)] !load bm srl $r4, P1L, $r7 !$r4 <- n0>>bm swi $r4, [$fp+OFFSET_L] !r0 !$r4 -> [$sp+(48)] swi $r5, [$fp+OFFSET_H] !r1 !0 -> [$sp+(52)] b .L31 .L9: !------------------------------------------------------ ! else # d1 == 0 ! { ! if(d1 > n1) ! { !------------------------------------------------------ slt $ta, P1H, P2H !$ta <- n1<d1 beqz $ta, .L32 !if yes,skip !------------------------------------------------------ ! q0 = 0; ! q1 = 0; ! if (rp != 0) ! { !------------------------------------------------------ movi $r5, 0 !$r5 <- 0 swi $r5, [$sp+(32)] !q0 !0 -> [$sp+(40)]=q1 swi $r5, [$sp+(36)] !q1 !0 -> [$sp+(32)]=q0 beqz $fp, .L31 !if yes,skip !------------------------------------------------------ ! rr.s.low = n0; ! rr.s.high = n1; ! *rp = rr.ll; ! } !------------------------------------------------------ swi P1L, [$fp+OFFSET_L] !P1L -> [rp] swi P1H, [$fp+OFFSET_H] !P1H -> [rp+4] b .L31 .L32: #ifndef __NDS32_PERF_EXT__ smw.adm $r0, [$sp], $r5, 0 move $r0, P2H bal __clzsi2 move $r7, $r0 lmw.bim $r0, [$sp], $r5, 0 #else clz $r7,P2H #endif swi $r7,[$sp+(28)] !$r7=bm store beqz $r7, .L42 !if yes,skip !------------------------------------------------------ ! USItype m1, m0; ! b = (4 * 8) - bm; ! d1 = (d0 >> b) | (d1 << bm); ! d0 = d0 << bm; ! n2 = n1 >> b; ! n1 = (n0 >> b) | (n1 << bm); ! n0 = n0 << bm; ! fudiv_qrnnd (&q0, &n1, n2, n1, d1); !------------------------------------------------------ subri $r10, $r7, 32 !$r10 <- 32-bm=b srl $r5, P2L, $r10 !$r5 <- d0>>b sll $r6, P2H, $r7 !$r6 <- d1<<bm or $r6, $r5, $r6 !$r6 <- $r5|$r6=d1 !! func move P2H, $r6 !P2H <- d1 srl $r4, P1H, $r10 !$r4 <- n1>>b=n2 !!! func srl $r8, P1L, $r10 !$r8 <- n0>>b !!$r8 sll $r9, P1H, $r7 !$r9 <- n1<<bm or $r5, $r8, $r9 !$r5 <- $r8|$r9=n1 !func sll P2L, P2L, $r7 !P2L <- d0<<bm=d0 sll P1L, P1L, $r7 !P1L <- n0<<bm=n0 bal fudiv_qrnnd ! cal q0,n1 swi $r7,[$sp+(32)] move P1H,$r8 ! fudiv_qrnnd (&q0, &n1, n2, n1, d1); move $r6, $r7 ! from func !---------------------------------------------------- ! #umul_ppmm (m1, m0, q0, d0); ! do ! { USItype __x0, __x1, __x2, __x3; ! USItype __ul, __vl, __uh, __vh; ! __ul = ((USItype) (q0) & (((USItype) 1 << ((4 * 8) / 2)) - 1)); ! __uh = ((USItype) (q0) >> ((4 * 8) / 2)); ! __vl = ((USItype) (d0) & (((USItype) 1 << ((4 * 8) / 2)) - 1)); ! __vh = ((USItype) (d0) >> ((4 * 8) / 2)); ! __x0 = (USItype) __ul * __vl; ! __x1 = (USItype) __ul * __vh; ! __x2 = (USItype) __uh * __vl; ! __x3 = (USItype) __uh * __vh; ! __x1 += ((USItype) (__x0) >> ((4 * 8) / 2)); ! __x1 += __x2; ! if (__x1 < __x2) ! __x3 += ((USItype) 1 << ((4 * 8) / 2)); ! (m1) = __x3 + ((USItype) (__x1) >> ((4 * 8) / 2)); ! (m0) = (USItype)(q0*d0); ! } ! if (m1 > n1) !--------------------------------------------------- #ifdef __NDS32_ISA_V3M__ !mulr64 $r4, P2L, $r6 smw.adm $r0, [$sp], $r3, 0 move P1L, P2L move P2L, $r6 movi P1H, 0 movi P2H, 0 bal __muldi3 movd44 $r4, $r0 lmw.bim $r0, [$sp], $r3, 0 move $r8, W6H move $r5, W6L #else mulr64 $r4, P2L, $r6 move $r8, W6H move $r5, W6L #endif slt $ta, P1H, $r8 !$ta <- n1<m1 bnez $ta, .L46 !if yes,skip !------------------------------------------------------ ! if(m1 == n1) !------------------------------------------------------ bne $r8, P1H, .L45 !if yes,skip !------------------------------------------------------ ! if(m0 > n0) !------------------------------------------------------ slt $ta, P1L, $r5 !$ta <- n0<m0 beqz $ta, .L45 !if yes,skip .L46: !------------------------------------------------------ ! { ! q0--; ! # sub_ddmmss (m1, m0, m1, m0, d1, d0); ! do ! { USItype __x; ! __x = (m0) - (d0); ! (m1) = (m1) - (d1) - (__x > (m0)); ! (m0) = __x; ! } ! } !------------------------------------------------------ sub $r4, $r5, P2L !$r4 <- m0-d0=__x addi $r6, $r6, -1 !$r6 <- q0--=q0 sub $r8, $r8, P2H !$r8 <- m1-d1 swi $r6, [$sp+(32)] ! q0 !$r6->[$sp+(32)] slt $ta, $r5, $r4 !$ta <- m0<__x sub $r8, $r8, $ta !$r8 <- P1H-P1L=m1 move $r5, $r4 !$r5 <- __x=m0 .L45: !------------------------------------------------------ ! q1 = 0; ! if (rp != 0) ! { !------------------------------------------------------ movi $r4, 0 !$r4 <- 0 swi $r4, [$sp+(36)] !0 -> [$sp+(40)]=q1 beqz $fp, .L31 !if yes,skip !------------------------------------------------------ ! # sub_ddmmss (n1, n0, n1, n0, m1, m0); ! do ! { USItype __x; ! __x = (n0) - (m0); ! (n1) = (n1) - (m1) - (__x > (n0)); ! (n0) = __x; ! } ! rr.s.low = (n1 << b) | (n0 >> bm); ! rr.s.high = n1 >> bm; ! *rp = rr.ll; !------------------------------------------------------ sub $r4, P1H, $r8 !$r4 <- n1-m1 sub $r6, P1L, $r5 !$r6 <- n0-m0=__x=n0 slt $ta, P1L, $r6 !$ta <- n0<__x sub P1H, $r4, $ta !P1H <- $r4-$ta=n1 move P1L, $r6 lwi $r7,[$sp+(28)] ! load bm subri $r10,$r7,32 sll $r4, P1H, $r10 !$r4 <- n1<<b srl $r5, P1L, $r7 !$r5 <- __x>>bm or $r6, $r5, $r4 !$r6 <- $r5|$r4=rr.s.low srl $r8, P1H, $r7 !$r8 <- n1>>bm =rr.s.high swi $r6, [$fp+OFFSET_L] ! swi $r8, [$fp+OFFSET_H] ! b .L31 .L42: !------------------------------------------------------ ! else ! { ! if(n1 > d1) !------------------------------------------------------ slt $ta, P2H, P1H !$ta <- P2H<P1H bnez $ta, .L52 !if yes,skip !------------------------------------------------------ ! if (n0 >= d0) !------------------------------------------------------ slt $ta, P1L, P2L !$ta <- P1L<P2L bnez $ta, .L51 !if yes,skip !------------------------------------------------------ ! q0 = 1; ! do ! { USItype __x; ! __x = (n0) - (d0); ! (n1) = (n1) - (d1) - (__x > (n0)); ! (n0) = __x; ! } !------------------------------------------------------ .L52: sub $r4, P1H, P2H !$r4 <- P1H-P2H sub $r6, P1L, P2L !$r6 <- no-d0=__x=n0 slt $ta, P1L, $r6 !$ta <- no<__x sub P1H, $r4, $ta !P1H <- $r4-$ta=n1 move P1L, $r6 !n0 movi $r5, 1 ! swi $r5, [$sp+(32)] !1 -> [$sp+(32)]=q0 b .L54 .L51: !------------------------------------------------------ ! q0 = 0; !------------------------------------------------------ movi $r5,0 swi $r5, [$sp+(32)] !$r5=0 -> [$sp+(32)] .L54: !------------------------------------------------------ ! q1 = 0; ! if (rp != 0) ! { !------------------------------------------------------ movi $r5, 0 ! swi $r5, [$sp+(36)] !0 -> [$sp+(36)] beqz $fp, .L31 !------------------------------------------------------ ! rr.s.low = n0; ! rr.s.high = n1; ! *rp = rr.ll; ! } !------------------------------------------------------ swi P1L, [$fp+OFFSET_L] !remainder swi P1H, [$fp+OFFSET_H] ! .L31: !------------------------------------------------------ ! const DWunion ww = {{.low = q0, .high = q1}}; ! return ww.ll; !} !------------------------------------------------------ lwi P1L, [$sp+(32)] !quotient lwi P1H, [$sp+(36)] lmw.bim $r6, [$sp], $r10, 10 addi $sp, $sp, 12 ret .size __udivmoddi4, .-__udivmoddi4 #endif /* L_udivmoddi4 */ #ifdef L_umodsi3 ! ===================================================================== .text .align 2 .globl __umodsi3 .type __umodsi3, @function __umodsi3: ! --------------------------------------------------------------------- !!res=udivmodsi4(a,b,1); ! if (den==0) ! return num; ! --------------------------------------------------------------------- beqz $r1, .L1 ! if den==0, skip ! --------------------------------------------------------------------- ! bit=1; ! res=0; ! --------------------------------------------------------------------- movi $r4, 1 ! $r4 <- bit=1 #ifndef __OPTIMIZE_SIZE__ .L6: #endif ! --------------------------------------------------------------------- ! while (den<num ! --------------------------------------------------------------------- slt $ta, $r1, $r0 ! $ta <- den<num? beqz $ta, .L5 ! if no, skip ! --------------------------------------------------------------------- ! &&bit&&!(den&(1L<<31))) ! --------------------------------------------------------------------- bltz $r1, .L5 ! if den<0, skip ! --------------------------------------------------------------------- ! { den<<=1; ! bit<<=1; ! } ! --------------------------------------------------------------------- #if defined (__OPTIMIZE_SIZE__) && ! defined (__NDS32_ISA_V3M__) clz $r3, $r1 ! $r3 <- leading zero count for den clz $ta, $r0 ! $ta <- leading zero count for num sub $r3, $r3, $ta ! $r3 <- number of bits to shift sll $r1, $r1, $r3 ! $r1 <- den sll $r4, $r4, $r3 ! $r2 <- bit #else slli $r1, $r1, 1 ! $r1 <- den<<=1 slli $r4, $r4, 1 ! $r4 <- bit<<=1 b .L6 ! continue loop #endif .L5: ! --------------------------------------------------------------------- ! while (bit) ! { if (num>=den) ! { num-=den; ! res|=bit; ! } ! bit>>=1; ! den>>=1; ! } !!if (modwanted) !! return num; !!return res; ! --------------------------------------------------------------------- sub $r2, $r0, $r1 ! $r2 <- num-den slt $ta, $r0, $r1 ! $ta <- num<den? srli $r4, $r4, 1 ! $r4 <- bit>>=1 cmovz $r0, $r2, $ta ! $r0 <- num=(num<den)?num:num-den srli $r1, $r1, 1 ! $r1 <- den>>=1 bnez $r4, .L5 ! if bit!=0, continue loop .L1: ! --------------------------------------------------------------------- ! return res; ! --------------------------------------------------------------------- ret .size __umodsi3, .-__umodsi3 #endif /* L_umodsi3 */ #ifdef L_umoddi3 !-------------------------------------- #ifdef __big_endian__ #define V1H $r0 #define V1L $r1 #define V2H $r2 #define V2L $r3 #else #define V1H $r1 #define V1L $r0 #define V2H $r3 #define V2L $r2 #endif !-------------------------------------- .text .align 2 .globl __umoddi3 .type __umoddi3, @function __umoddi3: ! prologue addi $sp, $sp, -12 swi $lp, [$sp+(0)] ! end of prologue addi $r4, $sp, 4 bal __udivmoddi4 lwi $r0, [$sp+(4)] ! __udivmoddi4 return low when LE mode or return high when BE mode lwi $r1, [$sp+(8)] ! .L82: ! epilogue lwi $lp, [$sp+(0)] addi $sp, $sp, 12 ret .size __umoddi3, .-__umoddi3 #endif /* L_umoddi3 */ #ifdef L_muldi3 #ifdef __big_endian__ #define P1H $r0 #define P1L $r1 #define P2H $r2 #define P2L $r3 #define V2H $r4 #define V2L $r5 #else #define P1H $r1 #define P1L $r0 #define P2H $r3 #define P2L $r2 #define V2H $r5 #define V2L $r4 #endif ! ==================================================================== .text .align 2 .globl __muldi3 .type __muldi3, @function __muldi3: ! parameter passing for libgcc functions normally involves 2 doubles !--------------------------------------- #ifdef __NDS32_ISA_V3M__ ! There is no mulr64 instruction in Andes ISA V3M. ! So we must provide a sequence of calculations to complete the job. smw.adm $r6, [$sp], $r9, 0x0 zeh33 $r4, P1L srli $r7, P1L, 16 zeh33 $r5, P2L mul $r6, $r5, $r4 mul33 $r5, $r7 srli $r8, P2L, 16 mov55 $r9, $r5 maddr32 $r9, $r8, $r4 srli $r4, $r6, 16 add $r4, $r9, $r4 slt45 $r4, $r5 slli $r5, $r15, 16 maddr32 $r5, $r8, $r7 mul P2L, P1H, P2L srli $r7, $r4, 16 maddr32 P2L, P2H, P1L add333 P1H, $r5, $r7 slli $r4, $r4, 16 zeh33 $r6, $r6 add333 P1L, $r4, $r6 add333 P1H, P2L, P1H lmw.bim $r6, [$sp], $r9, 0x0 ret #else /* not __NDS32_ISA_V3M__ */ mul $ta, P1L, P2H mulr64 $r4, P1L, P2L maddr32 $ta, P1H, P2L move P1L, V2L add P1H, $ta, V2H ret #endif /* not __NDS32_ISA_V3M__ */ .size __muldi3, .-__muldi3 #endif /* L_muldi3 */ #ifdef L_addsub_df #ifndef __big_endian__ #define P1L $r0 #define P1H $r1 #define P2L $r2 #define P2H $r3 #define P3L $r4 #define P3H $r5 #define O1L $r7 #define O1H $r8 #else #define P1H $r0 #define P1L $r1 #define P2H $r2 #define P2L $r3 #define P3H $r4 #define P3L $r5 #define O1H $r7 #define O1L $r8 #endif .text .align 2 .global __subdf3 .type __subdf3, @function __subdf3: push $lp pushm $r6, $r10 move $r4, #0x80000000 xor P2H, P2H, $r4 j .Lsdpadd .global __adddf3 .type __adddf3, @function __adddf3: push $lp pushm $r6, $r10 .Lsdpadd: slli $r6, P1H, #1 srli $r6, $r6, #21 slli P3H, P1H, #11 srli $r10, P1L, #21 or P3H, P3H, $r10 slli P3L, P1L, #11 move O1L, #0x80000000 or P3H, P3H, O1L slli $r9, P2H, #1 srli $r9, $r9, #21 slli O1H, P2H, #11 srli $r10, P2L, #21 or O1H, O1H, $r10 or O1H, O1H, O1L slli O1L, P2L, #11 addi $r10, $r6, #-1 slti $r15, $r10, #0x7fe beqzs8 .LEspecA .LElab1: addi $r10, $r9, #-1 slti $r15, $r10, #0x7fe beqzs8 .LEspecB .LElab2: #NORMd($r4, P2L, P1L) bnez P3H, .LL1 bnez P3L, .LL2 move $r6, #0 j .LL3 .LL2: move P3H, P3L move P3L, #0 move P2L, #32 sub $r6, $r6, P2L .LL1: #ifndef __big_endian__ #ifdef __NDS32_PERF_EXT__ clz $r2, $r5 #else pushm $r0, $r1 pushm $r3, $r5 move $r0, $r5 bal __clzsi2 move $r2, $r0 popm $r3, $r5 popm $r0, $r1 #endif #else /* __big_endian__ */ #ifdef __NDS32_PERF_EXT__ clz $r3, $r4 #else pushm $r0, $r2 pushm $r4, $r5 move $r0, $r4 bal __clzsi2 move $r3, $r0 popm $r4, $r5 popm $r0, $r2 #endif #endif /* __big_endian__ */ beqz P2L, .LL3 sub $r6, $r6, P2L subri P1L, P2L, #32 srl P1L, P3L, P1L sll P3L, P3L, P2L sll P3H, P3H, P2L or P3H, P3H, P1L .LL3: #NORMd End #NORMd($r7, P2L, P1L) bnez O1H, .LL4 bnez O1L, .LL5 move $r9, #0 j .LL6 .LL5: move O1H, O1L move O1L, #0 move P2L, #32 sub $r9, $r9, P2L .LL4: #ifndef __big_endian__ #ifdef __NDS32_PERF_EXT__ clz $r2, O1H #else pushm $r0, $r1 pushm $r3, $r5 move $r0, O1H bal __clzsi2 move $r2, $r0 popm $r3, $r5 popm $r0, $r1 #endif #else /* __big_endian__ */ #ifdef __NDS32_PERF_EXT__ clz $r3, O1H #else pushm $r0, $r2 pushm $r4, $r5 move $r0, O1H bal __clzsi2 move $r3, $r0 popm $r4, $r5 popm $r0, $r2 #endif #endif /* __big_endian__ */ beqz P2L, .LL6 sub $r9, $r9, P2L subri P1L, P2L, #32 srl P1L, O1L, P1L sll O1L, O1L, P2L sll O1H, O1H, P2L or O1H, O1H, P1L .LL6: #NORMd End move $r10, #0x80000000 and P1H, P1H, $r10 beq $r6, $r9, .LEadd3 slts $r15, $r9, $r6 beqzs8 .Li1 sub $r9, $r6, $r9 move P2L, #0 .LL7: move $r10, #0x20 slt $r15, $r9, $r10 bnezs8 .LL8 or P2L, P2L, O1L move O1L, O1H move O1H, #0 addi $r9, $r9, #0xffffffe0 bnez O1L, .LL7 .LL8: beqz $r9, .LEadd3 move P1L, O1H move $r10, O1L srl O1L, O1L, $r9 srl O1H, O1H, $r9 subri $r9, $r9, #0x20 sll P1L, P1L, $r9 or O1L, O1L, P1L sll $r10, $r10, $r9 or P2L, P2L, $r10 beqz P2L, .LEadd3 ori O1L, O1L, #1 j .LEadd3 .Li1: move $r15, $r6 move $r6, $r9 sub $r9, $r9, $r15 move P2L, #0 .LL10: move $r10, #0x20 slt $r15, $r9, $r10 bnezs8 .LL11 or P2L, P2L, P3L move P3L, P3H move P3H, #0 addi $r9, $r9, #0xffffffe0 bnez P3L, .LL10 .LL11: beqz $r9, .LEadd3 move P1L, P3H move $r10, P3L srl P3L, P3L, $r9 srl P3H, P3H, $r9 subri $r9, $r9, #0x20 sll P1L, P1L, $r9 or P3L, P3L, P1L sll $r10, $r10, $r9 or P2L, P2L, $r10 beqz P2L, .LEadd3 ori P3L, P3L, #1 .LEadd3: xor $r10, P1H, P2H sltsi $r15, $r10, #0 bnezs8 .LEsub1 #ADD(P3L, O1L) add P3L, P3L, O1L slt $r15, P3L, O1L #ADDCC(P3H, O1H) beqzs8 .LL13 add P3H, P3H, O1H slt $r15, P3H, O1H beqzs8 .LL14 addi P3H, P3H, #0x1 j .LL15 .LL14: move $r15, #1 add P3H, P3H, $r15 slt $r15, P3H, $r15 j .LL15 .LL13: add P3H, P3H, O1H slt $r15, P3H, O1H .LL15: beqzs8 .LEres andi $r10, P3L, #1 beqz $r10, .Li3 ori P3L, P3L, #2 .Li3: srli P3L, P3L, #1 slli $r10, P3H, #31 or P3L, P3L, $r10 srli P3H, P3H, #1 move $r10, #0x80000000 or P3H, P3H, $r10 addi $r6, $r6, #1 subri $r15, $r6, #0x7ff bnezs8 .LEres move $r10, #0x7ff00000 or P1H, P1H, $r10 move P1L, #0 j .LEretA .LEsub1: #SUB(P3L, O1L) move $r15, P3L sub P3L, P3L, O1L slt $r15, $r15, P3L #SUBCC(P3H, O1H) beqzs8 .LL16 move $r15, P3H sub P3H, P3H, O1H slt $r15, $r15, P3H beqzs8 .LL17 subi333 P3H, P3H, #1 j .LL18 .LL17: move $r15, P3H subi333 P3H, P3H, #1 slt $r15, $r15, P3H j .LL18 .LL16: move $r15, P3H sub P3H, P3H, O1H slt $r15, $r15, P3H .LL18: beqzs8 .Li5 move $r10, #0x80000000 xor P1H, P1H, $r10 subri P3H, P3H, #0 beqz P3L, .LL19 subri P3L, P3L, #0 subi45 P3H, #1 .LL19: .Li5: #NORMd($r4, $r9, P1L) bnez P3H, .LL20 bnez P3L, .LL21 move $r6, #0 j .LL22 .LL21: move P3H, P3L move P3L, #0 move $r9, #32 sub $r6, $r6, $r9 .LL20: #ifdef __NDS32_PERF_EXT__ clz $r9, P3H #else pushm $r0, $r5 move $r0, P3H bal __clzsi2 move $r9, $r0 popm $r0, $r5 #endif beqz $r9, .LL22 sub $r6, $r6, $r9 subri P1L, $r9, #32 srl P1L, P3L, P1L sll P3L, P3L, $r9 sll P3H, P3H, $r9 or P3H, P3H, P1L .LL22: #NORMd End or $r10, P3H, P3L bnez $r10, .LEres move P1H, #0 .LEres: blez $r6, .LEund .LElab8: #ADD(P3L, $0x400) move $r15, #0x400 add P3L, P3L, $r15 slt $r15, P3L, $r15 #ADDCC(P3H, $0x0) beqzs8 .LL25 add P3H, P3H, $r15 slt $r15, P3H, $r15 .LL25: #ADDC($r6, $0x0) add $r6, $r6, $r15 srli $r10, P3L, #11 andi $r10, $r10, #1 sub P3L, P3L, $r10 srli P1L, P3L, #11 slli $r10, P3H, #21 or P1L, P1L, $r10 slli $r10, P3H, #1 srli $r10, $r10, #12 or P1H, P1H, $r10 slli $r10, $r6, #20 or P1H, P1H, $r10 .LEretA: .LE999: popm $r6, $r10 pop $lp ret5 $lp .LEspecA: #ADD(P3L, P3L) move $r15, P3L add P3L, P3L, P3L slt $r15, P3L, $r15 #ADDC(P3H, P3H) add P3H, P3H, P3H add P3H, P3H, $r15 bnez $r6, .Li7 or $r10, P3H, P3L beqz $r10, .Li8 j .LElab1 .Li8: subri $r15, $r9, #0x7ff beqzs8 .LEspecB add P3L, P2H, P2H or $r10, P3L, P2L bnez $r10, .LEretB sltsi $r15, P2H, #0 bnezs8 .LEretA .LEretB: move P1L, P2L move P1H, P2H j .LE999 .Li7: or $r10, P3H, P3L bnez $r10, .LEnan subri $r15, $r9, #0x7ff bnezs8 .LEretA xor $r10, P1H, P2H sltsi $r15, $r10, #0 bnezs8 .LEnan j .LEretB .LEspecB: #ADD(O1L, O1L) move $r15, O1L add O1L, O1L, O1L slt $r15, O1L, $r15 #ADDC(O1H, O1H) add O1H, O1H, O1H add O1H, O1H, $r15 bnez $r9, .Li11 or $r10, O1H, O1L beqz $r10, .LEretA j .LElab2 .Li11: or $r10, O1H, O1L beqz $r10, .LEretB .LEnan: move P1H, #0xfff80000 move P1L, #0 j .LEretA .LEund: subri $r9, $r6, #1 move P2L, #0 .LL26: move $r10, #0x20 slt $r15, $r9, $r10 bnezs8 .LL27 or P2L, P2L, P3L move P3L, P3H move P3H, #0 addi $r9, $r9, #0xffffffe0 bnez P3L, .LL26 .LL27: beqz $r9, .LL28 move P1L, P3H move $r10, P3L srl P3L, P3L, $r9 srl P3H, P3H, $r9 subri $r9, $r9, #0x20 sll P1L, P1L, $r9 or P3L, P3L, P1L sll $r10, $r10, $r9 or P2L, P2L, $r10 beqz P2L, .LL28 ori P3L, P3L, #1 .LL28: move $r6, #0 j .LElab8 .size __subdf3, .-__subdf3 .size __adddf3, .-__adddf3 #endif /* L_addsub_df */ #ifdef L_mul_sf #if !defined (__big_endian__) #define P1L $r0 #define P1H $r1 #define P2L $r2 #define P2H $r3 #else #define P1H $r0 #define P1L $r1 #define P2H $r2 #define P2L $r3 #endif .text .align 2 .global __mulsf3 .type __mulsf3, @function __mulsf3: push $lp pushm $r6, $r10 srli $r3, $r0, #23 andi $r3, $r3, #0xff srli $r5, $r1, #23 andi $r5, $r5, #0xff move $r6, #0x80000000 slli $r2, $r0, #8 or $r2, $r2, $r6 slli $r4, $r1, #8 or $r4, $r4, $r6 xor $r8, $r0, $r1 and $r6, $r6, $r8 addi $r8, $r3, #-1 slti $r15, $r8, #0xfe beqzs8 .LFspecA .LFlab1: addi $r8, $r5, #-1 slti $r15, $r8, #0xfe beqzs8 .LFspecB .LFlab2: move $r10, $r3 /* This is a 64-bit multiple. ($r2, $r7) is (high, low). */ #ifndef __NDS32_ISA_V3M__ mulr64 $r2, $r2, $r4 #else pushm $r0, $r1 pushm $r4, $r5 move P1L, $r2 movi P1H, #0 move P2L, $r4 movi P2H, #0 bal __muldi3 movd44 $r2, $r0 popm $r4, $r5 popm $r0, $r1 #endif #ifndef __big_endian__ move $r7, $r2 move $r2, $r3 #else move $r7, $r3 #endif move $r3, $r10 beqz $r7, .Li17 ori $r2, $r2, #1 .Li17: sltsi $r15, $r2, #0 bnezs8 .Li18 slli $r2, $r2, #1 addi $r3, $r3, #-1 .Li18: addi $r8, $r5, #0xffffff82 add $r3, $r3, $r8 addi $r8, $r3, #-1 slti $r15, $r8, #0xfe beqzs8 .LFoveund .LFlab8: #ADD($r2, $0x80) move $r15, #0x80 add $r2, $r2, $r15 slt $r15, $r2, $r15 #ADDC($r3, $0x0) add $r3, $r3, $r15 srli $r8, $r2, #8 andi $r8, $r8, #1 sub $r2, $r2, $r8 slli $r2, $r2, #1 srli $r2, $r2, #9 slli $r8, $r3, #23 or $r2, $r2, $r8 or $r0, $r2, $r6 .LF999: popm $r6, $r10 pop $lp ret5 $lp .LFspecA: bnez $r3, .Li19 add $r2, $r2, $r2 beqz $r2, .Li20 #ifdef __NDS32_PERF_EXT__ clz $r7, $r2 #else pushm $r0, $r5 move $r0, $r2 bal __clzsi2 move $r7, $r0 popm $r0, $r5 #endif sub $r3, $r3, $r7 sll $r2, $r2, $r7 j .LFlab1 .Li20: subri $r15, $r5, #0xff beqzs8 .LFnan j .LFzer .Li19: add $r8, $r2, $r2 bnez $r8, .LFnan bnez $r5, .Li21 add $r8, $r4, $r4 beqz $r8, .LFnan .Li21: subri $r15, $r5, #0xff bnezs8 .LFinf .LFspecB: bnez $r5, .Li22 add $r4, $r4, $r4 beqz $r4, .LFzer #ifdef __NDS32_PERF_EXT__ clz $r7, $r4 #else pushm $r0, $r5 move $r0, $r4 bal __clzsi2 move $r7, $r0 popm $r0, $r5 #endif sub $r5, $r5, $r7 sll $r4, $r4, $r7 j .LFlab2 .LFzer: move $r0, $r6 j .LF999 .Li22: add $r8, $r4, $r4 bnez $r8, .LFnan .LFinf: move $r8, #0x7f800000 or $r0, $r6, $r8 j .LF999 .LFnan: move $r0, #0xffc00000 j .LF999 .LFoveund: bgtz $r3, .LFinf subri $r7, $r3, #1 slti $r15, $r7, #0x20 beqzs8 .LFzer subri $r8, $r7, #0x20 sll $r3, $r2, $r8 srl $r2, $r2, $r7 beqz $r3, .Li25 ori $r2, $r2, #2 .Li25: move $r3, #0 addi $r8, $r2, #0x80 sltsi $r15, $r8, #0 beqzs8 .LFlab8 move $r3, #1 j .LFlab8 .size __mulsf3, .-__mulsf3 #endif /* L_mul_sf */ #ifdef L_mul_df #ifndef __big_endian__ #define P1L $r0 #define P1H $r1 #define P2L $r2 #define P2H $r3 #define P3L $r4 #define P3H $r5 #define O1L $r7 #define O1H $r8 #else #define P1H $r0 #define P1L $r1 #define P2H $r2 #define P2L $r3 #define P3H $r4 #define P3L $r5 #define O1H $r7 #define O1L $r8 #endif .text .align 2 .global __muldf3 .type __muldf3, @function __muldf3: push $lp pushm $r6, $r10 slli $r6, P1H, #1 srli $r6, $r6, #21 slli P3H, P1H, #11 srli $r10, P1L, #21 or P3H, P3H, $r10 slli P3L, P1L, #11 move O1L, #0x80000000 or P3H, P3H, O1L slli $r9, P2H, #1 srli $r9, $r9, #21 slli O1H, P2H, #11 srli $r10, P2L, #21 or O1H, O1H, $r10 or O1H, O1H, O1L xor P1H, P1H, P2H and P1H, P1H, O1L slli O1L, P2L, #11 addi $r10, $r6, #-1 slti $r15, $r10, #0x7fe beqzs8 .LFspecA .LFlab1: addi $r10, $r9, #-1 slti $r15, $r10, #0x7fe beqzs8 .LFspecB .LFlab2: addi $r10, $r9, #0xfffffc02 add $r6, $r6, $r10 move $r10, $r8 /* This is a 64-bit multiple. */ #ifndef __big_endian__ /* For little endian: ($r9, $r3) is (high, low). */ #ifndef __NDS32_ISA_V3M__ mulr64 $r8, $r5, $r8 #else pushm $r0, $r5 move $r0, $r5 movi $r1, #0 move $r2, $r8 movi $r3, #0 bal __muldi3 movd44 $r8, $r0 popm $r0, $r5 #endif move $r3, $r8 #else /* __big_endian__ */ /* For big endain: ($r9, $r2) is (high, low). */ #ifndef __NDS32_ISA_V3M__ mulr64 $r8, $r4, $r7 #else pushm $r0, $r5 move $r1, $r4 movi $r0, #0 move $r3, $r7 movi $r2, #0 bal __muldi3 movd44 $r8, $r0 popm $r0, $r5 #endif move $r2, $r9 move $r9, $r8 #endif /* __big_endian__ */ move $r8, $r10 move $r10, P1H /* This is a 64-bit multiple. */ #ifndef __big_endian__ /* For little endian: ($r0, $r2) is (high, low). */ #ifndef __NDS32_ISA_V3M__ mulr64 $r0, $r4, $r8 #else pushm $r2, $r5 move $r0, $r4 movi $r1, #0 move $r2, $r8 movi $r3, #0 bal __muldi3 popm $r2, $r5 #endif move $r2, $r0 move $r0, $r1 #else /* __big_endian__ */ /* For big endain: ($r1, $r3) is (high, low). */ #ifndef __NDS32_ISA_V3M__ mulr64 $r0, $r5, $r7 #else pushm $r2, $r5 move $r1, $r5 movi $r0, #0 move $r3, $r7 movi $r2, #0 bal __muldi3 popm $r2, $r5 #endif move $r3, $r1 move $r1, $r0 #endif /* __big_endian__ */ move P1H, $r10 #ADD(P2H, P1L) add P2H, P2H, P1L slt $r15, P2H, P1L #ADDC($r9, $0x0) add $r9, $r9, $r15 move $r10, P1H /* This is a 64-bit multiple. */ #ifndef __big_endian__ /* For little endian: ($r0, $r8) is (high, low). */ #ifndef __NDS32_ISA_V3M__ mulr64 $r0, $r5, $r7 #else pushm $r2, $r5 move $r0, $r5 movi $r1, #0 move $r2, $r7 movi $r3, #0 bal __muldi3 popm $r2, $r5 #endif move $r8, $r0 move $r0, $r1 #else /* __big_endian__ */ /* For big endian: ($r1, $r7) is (high, low). */ #ifndef __NDS32_ISA_V3M__ mulr64 $r0, $r4, $r8 #else pushm $r2, $r5 move $r1, $r4 movi $r0, #0 move $r3, $r8 movi $r2, #0 bal __muldi3 popm $r2, $r5 #endif move $r7, $r1 move $r1, $r0 #endif /* __big_endian__ */ move P1H, $r10 #ADD(P2L, O1H) add P2L, P2L, O1H slt $r15, P2L, O1H #ADDCC(P2H, P1L) beqzs8 .LL29 add P2H, P2H, P1L slt $r15, P2H, P1L beqzs8 .LL30 addi P2H, P2H, #0x1 j .LL31 .LL30: move $r15, #1 add P2H, P2H, $r15 slt $r15, P2H, $r15 j .LL31 .LL29: add P2H, P2H, P1L slt $r15, P2H, P1L .LL31: #ADDC($r9, $0x0) add $r9, $r9, $r15 /* This is a 64-bit multiple. */ #ifndef __big_endian__ /* For little endian: ($r8, $r0) is (high, low). */ move $r10, $r9 #ifndef __NDS32_ISA_V3M__ mulr64 $r8, $r4, $r7 #else pushm $r0, $r5 move $r0, $r4 movi $r1, #0 move $r2, $r7 movi $r3, #0 bal __muldi3 movd44 $r8, $r0 popm $r0, $r5 #endif move $r0, $r8 move $r8, $r9 move $r9, $r10 #else /* __big_endian__ */ /* For big endian: ($r7, $r1) is (high, low). */ move $r10, $r6 #ifndef __NDS32_ISA_V3M__ mulr64 $r6, $r5, $r8 #else pushm $r0, $r5 move $r1, $r5 movi $r0, #0 move $r3, $r8 movi $r2, #0 bal __muldi3 movd44 $r6, $r0 popm $r0, $r5 #endif move $r1, $r7 move $r7, $r6 move $r6, $r10 #endif /* __big_endian__ */ #ADD(P2L, O1H) add P2L, P2L, O1H slt $r15, P2L, O1H #ADDCC(P2H, $0x0) beqzs8 .LL34 add P2H, P2H, $r15 slt $r15, P2H, $r15 .LL34: #ADDC($r9, $0x0) add $r9, $r9, $r15 or $r10, P1L, P2L beqz $r10, .Li13 ori P2H, P2H, #1 .Li13: move P3H, $r9 move P3L, P2H sltsi $r15, P3H, #0 bnezs8 .Li14 move $r15, P3L add P3L, P3L, P3L slt $r15, P3L, $r15 add P3H, P3H, P3H add P3H, P3H, $r15 addi $r6, $r6, #-1 .Li14: addi $r10, $r6, #-1 slti $r15, $r10, #0x7fe beqzs8 .LFoveund #ADD(P3L, $0x400) move $r15, #0x400 add P3L, P3L, $r15 slt $r15, P3L, $r15 #ADDCC(P3H, $0x0) beqzs8 .LL37 add P3H, P3H, $r15 slt $r15, P3H, $r15 .LL37: #ADDC($r6, $0x0) add $r6, $r6, $r15 .LFlab8: srli $r10, P3L, #11 andi $r10, $r10, #1 sub P3L, P3L, $r10 srli P1L, P3L, #11 slli $r10, P3H, #21 or P1L, P1L, $r10 slli $r10, P3H, #1 srli $r10, $r10, #12 or P1H, P1H, $r10 slli $r10, $r6, #20 or P1H, P1H, $r10 .LFret: .LF999: popm $r6, $r10 pop $lp ret5 $lp .LFspecA: #ADD(P3L, P3L) move $r15, P3L add P3L, P3L, P3L slt $r15, P3L, $r15 #ADDC(P3H, P3H) add P3H, P3H, P3H add P3H, P3H, $r15 bnez $r6, .Li15 or $r10, P3H, P3L beqz $r10, .Li16 #NORMd($r4, P1L, P2H) bnez P3H, .LL38 bnez P3L, .LL39 move $r6, #0 j .LL40 .LL39: move P3H, P3L move P3L, #0 move P1L, #32 sub $r6, $r6, P1L .LL38: #ifndef __big_endian__ #ifdef __NDS32_PERF_EXT__ clz $r0, P3H #else pushm $r1, P3H move $r0, P3H bal __clzsi2 popm $r1, $r5 #endif #else /* __big_endian__ */ #ifdef __NDS32_PERF_EXT__ clz $r1, $r4 #else push $r0 pushm $r2, $r5 move $r0, $r4 bal __clzsi2 move $r1, $r0 popm $r2, $r5 pop $r0 #endif #endif /* __big_endian__ */ beqz P1L, .LL40 sub $r6, $r6, P1L subri P2H, P1L, #32 srl P2H, P3L, P2H sll P3L, P3L, P1L sll P3H, P3H, P1L or P3H, P3H, P2H .LL40: #NORMd End j .LFlab1 .Li16: subri $r15, $r9, #0x7ff beqzs8 .LFnan j .LFret .Li15: or $r10, P3H, P3L bnez $r10, .LFnan bnez $r9, .Li17 slli $r10, O1H, #1 or $r10, $r10, O1L beqz $r10, .LFnan .Li17: subri $r15, $r9, #0x7ff bnezs8 .LFinf .LFspecB: #ADD(O1L, O1L) move $r15, O1L add O1L, O1L, O1L slt $r15, O1L, $r15 #ADDC(O1H, O1H) add O1H, O1H, O1H add O1H, O1H, $r15 bnez $r9, .Li18 or $r10, O1H, O1L beqz $r10, .Li19 #NORMd($r7, P2L, P1L) bnez O1H, .LL41 bnez O1L, .LL42 move $r9, #0 j .LL43 .LL42: move O1H, O1L move O1L, #0 move P2L, #32 sub $r9, $r9, P2L .LL41: #ifndef __big_endian__ #ifdef __NDS32_PERF_EXT__ clz $r2, $r8 #else pushm $r0, $r1 pushm $r3, $r5 move $r0, $r8 bal __clzsi2 move $r2, $r0 popm $r3, $r5 popm $r0, $r1 #endif #else /* __big_endian__ */ #ifdef __NDS32_PERF_EXT__ clz $r3, $r7 #else pushm $r0, $r2 pushm $r4, $r5 move $r0, $r7 bal __clzsi2 move $r3, $r0 popm $r4, $r5 popm $r0, $r2 #endif #endif /* __big_endian__ */ beqz P2L, .LL43 sub $r9, $r9, P2L subri P1L, P2L, #32 srl P1L, O1L, P1L sll O1L, O1L, P2L sll O1H, O1H, P2L or O1H, O1H, P1L .LL43: #NORMd End j .LFlab2 .Li19: move P1L, #0 j .LFret .Li18: or $r10, O1H, O1L bnez $r10, .LFnan .LFinf: move $r10, #0x7ff00000 or P1H, P1H, $r10 move P1L, #0 j .LFret .LFnan: move P1H, #0xfff80000 move P1L, #0 j .LFret .LFoveund: bgtz $r6, .LFinf subri P1L, $r6, #1 move P2L, #0 .LL44: move $r10, #0x20 slt $r15, P1L, $r10 bnezs8 .LL45 or P2L, P2L, P3L move P3L, P3H move P3H, #0 addi P1L, P1L, #0xffffffe0 bnez P3L, .LL44 .LL45: beqz P1L, .LL46 move P2H, P3H move $r10, P3L srl P3L, P3L, P1L srl P3H, P3H, P1L subri P1L, P1L, #0x20 sll P2H, P2H, P1L or P3L, P3L, P2H sll $r10, $r10, P1L or P2L, P2L, $r10 beqz P2L, .LL46 ori P3L, P3L, #1 .LL46: #ADD(P3L, $0x400) move $r15, #0x400 add P3L, P3L, $r15 slt $r15, P3L, $r15 #ADDC(P3H, $0x0) add P3H, P3H, $r15 srli $r6, P3H, #31 j .LFlab8 .size __muldf3, .-__muldf3 #endif /* L_mul_df */ #ifdef L_div_sf .text .align 2 .global __divsf3 .type __divsf3, @function __divsf3: push $lp pushm $r6, $r10 move $r7, #0x80000000 srli $r4, $r0, #23 andi $r4, $r4, #0xff srli $r6, $r1, #23 andi $r6, $r6, #0xff slli $r3, $r0, #8 or $r3, $r3, $r7 slli $r5, $r1, #8 or $r5, $r5, $r7 xor $r10, $r0, $r1 and $r7, $r7, $r10 addi $r10, $r4, #-1 slti $r15, $r10, #0xfe beqzs8 .LGspecA .LGlab1: addi $r10, $r6, #-1 slti $r15, $r10, #0xfe beqzs8 .LGspecB .LGlab2: slt $r15, $r3, $r5 bnezs8 .Li27 srli $r3, $r3, #1 addi $r4, $r4, #1 .Li27: srli $r8, $r5, #14 divr $r0, $r2, $r3, $r8 andi $r9, $r5, #0x3fff mul $r1, $r9, $r0 slli $r2, $r2, #14 #SUB($r2, $r1) move $r15, $r2 sub $r2, $r2, $r1 slt $r15, $r15, $r2 beqzs8 .Li28 addi $r0, $r0, #-1 #ADD($r2, $r5) add $r2, $r2, $r5 slt $r15, $r2, $r5 .Li28: divr $r3, $r2, $r2, $r8 mul $r1, $r9, $r3 slli $r2, $r2, #14 #SUB($r2, $r1) move $r15, $r2 sub $r2, $r2, $r1 slt $r15, $r15, $r2 beqzs8 .Li29 addi $r3, $r3, #-1 #ADD($r2, $r5) add $r2, $r2, $r5 slt $r15, $r2, $r5 .Li29: slli $r10, $r0, #14 add $r3, $r3, $r10 slli $r3, $r3, #4 beqz $r2, .Li30 ori $r3, $r3, #1 .Li30: subri $r10, $r6, #0x7e add $r4, $r4, $r10 addi $r10, $r4, #-1 slti $r15, $r10, #0xfe beqzs8 .LGoveund .LGlab8: #ADD($r3, $0x80) move $r15, #0x80 add $r3, $r3, $r15 slt $r15, $r3, $r15 #ADDC($r4, $0x0) add $r4, $r4, $r15 srli $r10, $r3, #8 andi $r10, $r10, #1 sub $r3, $r3, $r10 slli $r3, $r3, #1 srli $r3, $r3, #9 slli $r10, $r4, #23 or $r3, $r3, $r10 or $r0, $r3, $r7 .LG999: popm $r6, $r10 pop $lp ret5 $lp .LGspecA: bnez $r4, .Li31 add $r3, $r3, $r3 beqz $r3, .Li31 #ifdef __NDS32_PERF_EXT__ clz $r8, $r3 #else pushm $r0, $r5 move $r0, $r3 bal __clzsi2 move $r8, $r0 popm $r0, $r5 #endif sub $r4, $r4, $r8 sll $r3, $r3, $r8 j .LGlab1 .Li31: bne $r6, $r4, .Li33 add $r10, $r5, $r5 beqz $r10, .LGnan .Li33: subri $r15, $r6, #0xff beqzs8 .LGspecB beqz $r4, .LGzer add $r10, $r3, $r3 bnez $r10, .LGnan j .LGinf .LGspecB: bnez $r6, .Li34 add $r5, $r5, $r5 beqz $r5, .LGinf #ifdef __NDS32_PERF_EXT__ clz $r8, $r5 #else pushm $r0, $r5 move $r0, $r5 bal __clzsi2 move $r8, $r0 popm $r0, $r5 #endif sub $r6, $r6, $r8 sll $r5, $r5, $r8 j .LGlab2 .Li34: add $r10, $r5, $r5 bnez $r10, .LGnan .LGzer: move $r0, $r7 j .LG999 .LGoveund: bgtz $r4, .LGinf subri $r8, $r4, #1 slti $r15, $r8, #0x20 beqzs8 .LGzer subri $r10, $r8, #0x20 sll $r4, $r3, $r10 srl $r3, $r3, $r8 beqz $r4, .Li37 ori $r3, $r3, #2 .Li37: move $r4, #0 addi $r10, $r3, #0x80 sltsi $r15, $r10, #0 beqzs8 .LGlab8 move $r4, #1 j .LGlab8 .LGinf: move $r10, #0x7f800000 or $r0, $r7, $r10 j .LG999 .LGnan: move $r0, #0xffc00000 j .LG999 .size __divsf3, .-__divsf3 #endif /* L_div_sf */ #ifdef L_div_df #ifndef __big_endian__ #define P1L $r0 #define P1H $r1 #define P2L $r2 #define P2H $r3 #define P3L $r4 #define P3H $r5 #define O1L $r7 #define O1H $r8 #else #define P1H $r0 #define P1L $r1 #define P2H $r2 #define P2L $r3 #define P3H $r4 #define P3L $r5 #define O1H $r7 #define O1L $r8 #endif .text .align 2 .global __divdf3 .type __divdf3, @function __divdf3: push $lp pushm $r6, $r10 slli $r6, P1H, #1 srli $r6, $r6, #21 slli P3H, P1H, #11 srli $r10, P1L, #21 or P3H, P3H, $r10 slli P3L, P1L, #11 move O1L, #0x80000000 or P3H, P3H, O1L slli $r9, P2H, #1 srli $r9, $r9, #21 slli O1H, P2H, #11 srli $r10, P2L, #21 or O1H, O1H, $r10 or O1H, O1H, O1L xor P1H, P1H, P2H and P1H, P1H, O1L slli O1L, P2L, #11 addi $r10, $r6, #-1 slti $r15, $r10, #0x7fe beqzs8 .LGspecA .LGlab1: addi $r10, $r9, #-1 slti $r15, $r10, #0x7fe beqzs8 .LGspecB .LGlab2: sub $r6, $r6, $r9 addi $r6, $r6, #0x3ff srli P3L, P3L, #1 slli $r10, P3H, #31 or P3L, P3L, $r10 srli P3H, P3H, #1 srli $r9, O1H, #16 divr P2H, P3H, P3H, $r9 move $r10, #0xffff and P2L, O1H, $r10 mul P1L, P2L, P2H slli P3H, P3H, #16 srli $r10, P3L, #16 or P3H, P3H, $r10 #SUB(P3H, P1L) move $r15, P3H sub P3H, P3H, P1L slt $r15, $r15, P3H beqzs8 .Li20 .Lb21: addi P2H, P2H, #-1 add P3H, P3H, O1H slt $r15, P3H, O1H beqzs8 .Lb21 .Li20: divr $r9, P3H, P3H, $r9 mul P1L, P2L, $r9 slli P3H, P3H, #16 move $r15, #0xffff and $r10, P3L, $r15 or P3H, P3H, $r10 #SUB(P3H, P1L) move $r15, P3H sub P3H, P3H, P1L slt $r15, $r15, P3H beqzs8 .Li22 .Lb23: addi $r9, $r9, #-1 add P3H, P3H, O1H slt $r15, P3H, O1H beqzs8 .Lb23 .Li22: slli P2H, P2H, #16 add P2H, P2H, $r9 /* This is a 64-bit multiple. */ #ifndef __big_endian__ /* For little endian: ($r0, $r9) is (high, low). */ move $r10, $r1 #ifndef __NDS32_ISA_V3M__ mulr64 $r0, $r3, $r7 #else pushm $r2, $r5 move $r0, $r3 movi $r1, #0 move $r2, $r7 movi $r3, #0 bal __muldi3 popm $r2, $r5 #endif move $r9, $r0 move $r0, $r1 move $r1, $r10 #else /* __big_endian__ */ /* For big endian: ($r1, $r9) is (high, low). */ move $r10, $r0 #ifndef __NDS32_ISA_V3M__ mulr64 $r0, $r2, $r8 #else pushm $r2, $r5 move $r1, $r2 movi $r0, #0 move $r3, $r8 movi $r2, #0 bal __muldi3 popm $r2, $r5 #endif move $r9, $r1 move $r1, $r0 move $r0, $r10 #endif /* __big_endian__ */ move P3L, #0 #SUB(P3L, $r9) move $r15, P3L sub P3L, P3L, $r9 slt $r15, $r15, P3L #SUBCC(P3H, P1L) beqzs8 .LL47 move $r15, P3H sub P3H, P3H, P1L slt $r15, $r15, P3H beqzs8 .LL48 subi333 P3H, P3H, #1 j .LL49 .LL48: move $r15, P3H subi333 P3H, P3H, #1 slt $r15, $r15, P3H j .LL49 .LL47: move $r15, P3H sub P3H, P3H, P1L slt $r15, $r15, P3H .LL49: beqzs8 .Li24 .LGlab3: addi P2H, P2H, #-1 #ADD(P3L, O1L) add P3L, P3L, O1L slt $r15, P3L, O1L #ADDCC(P3H, O1H) beqzs8 .LL50 add P3H, P3H, O1H slt $r15, P3H, O1H beqzs8 .LL51 addi P3H, P3H, #0x1 j .LL52 .LL51: move $r15, #1 add P3H, P3H, $r15 slt $r15, P3H, $r15 j .LL52 .LL50: add P3H, P3H, O1H slt $r15, P3H, O1H .LL52: beqzs8 .LGlab3 .Li24: bne P3H, O1H, .Li25 move P1L, O1L move P3H, P3L move $r9, #0 move P2L, $r9 j .Le25 .Li25: srli P2L, O1H, #16 divr $r9, P3H, P3H, P2L move $r10, #0xffff and $r10, O1H, $r10 mul P1L, $r10, $r9 slli P3H, P3H, #16 srli $r15, P3L, #16 or P3H, P3H, $r15 #SUB(P3H, P1L) move $r15, P3H sub P3H, P3H, P1L slt $r15, $r15, P3H beqzs8 .Li26 .Lb27: addi $r9, $r9, #-1 add P3H, P3H, O1H slt $r15, P3H, O1H beqzs8 .Lb27 .Li26: divr P2L, P3H, P3H, P2L mul P1L, $r10, P2L slli P3H, P3H, #16 move $r10, #0xffff and $r10, P3L, $r10 or P3H, P3H, $r10 #SUB(P3H, P1L) move $r15, P3H sub P3H, P3H, P1L slt $r15, $r15, P3H beqzs8 .Li28 .Lb29: addi P2L, P2L, #-1 add P3H, P3H, O1H slt $r15, P3H, O1H beqzs8 .Lb29 .Li28: slli $r9, $r9, #16 add $r9, $r9, P2L /* This is a 64-bit multiple. */ #ifndef __big_endian__ /* For little endian: ($r0, $r2) is (high, low). */ move $r10, $r1 #ifndef __NDS32_ISA_V3M__ mulr64 $r0, $r9, $r7 #else pushm $r2, $r5 move $r0, $r9 movi $r1, #0 move $r2, $r7 movi $r3, #0 bal __muldi3 popm $r2, $r5 #endif move $r2, $r0 move $r0, $r1 move $r1, $r10 #else /* __big_endian__ */ /* For big endian: ($r1, $r3) is (high, low). */ move $r10, $r0 #ifndef __NDS32_ISA_V3M__ mulr64 $r0, $r9, $r8 #else pushm $r2, $r5 move $r0, $r9 movi $r1, #0 move $r2, $r7 movi $r3, #0 bal __muldi3 popm $r2, $r5 #endif move $r3, $r1 move $r1, $r0 move $r0, $r10 #endif /* __big_endian__ */ .Le25: move P3L, #0 #SUB(P3L, P2L) move $r15, P3L sub P3L, P3L, P2L slt $r15, $r15, P3L #SUBCC(P3H, P1L) beqzs8 .LL53 move $r15, P3H sub P3H, P3H, P1L slt $r15, $r15, P3H beqzs8 .LL54 subi333 P3H, P3H, #1 j .LL55 .LL54: move $r15, P3H subi333 P3H, P3H, #1 slt $r15, $r15, P3H j .LL55 .LL53: move $r15, P3H sub P3H, P3H, P1L slt $r15, $r15, P3H .LL55: beqzs8 .Li30 .LGlab4: addi $r9, $r9, #-1 #ADD(P3L, O1L) add P3L, P3L, O1L slt $r15, P3L, O1L #ADDCC(P3H, O1H) beqzs8 .LL56 add P3H, P3H, O1H slt $r15, P3H, O1H beqzs8 .LL57 addi P3H, P3H, #0x1 j .LL58 .LL57: move $r15, #1 add P3H, P3H, $r15 slt $r15, P3H, $r15 j .LL58 .LL56: add P3H, P3H, O1H slt $r15, P3H, O1H .LL58: beqzs8 .LGlab4 .Li30: sltsi $r15, P2H, #0 bnezs8 .Li31 #ADD($r9, $r9) move $r15, $r9 add $r9, $r9, $r9 slt $r15, $r9, $r15 #ADDC(P2H, P2H) add P2H, P2H, P2H add P2H, P2H, $r15 addi $r6, $r6, #-1 .Li31: or $r10, P3H, P3L beqz $r10, .Li32 ori $r9, $r9, #1 .Li32: move P3H, P2H move P3L, $r9 addi $r10, $r6, #-1 slti $r15, $r10, #0x7fe beqzs8 .LGoveund #ADD(P3L, $0x400) move $r15, #0x400 add P3L, P3L, $r15 slt $r15, P3L, $r15 #ADDCC(P3H, $0x0) beqzs8 .LL61 add P3H, P3H, $r15 slt $r15, P3H, $r15 .LL61: #ADDC($r6, $0x0) add $r6, $r6, $r15 .LGlab8: srli $r10, P3L, #11 andi $r10, $r10, #1 sub P3L, P3L, $r10 srli P1L, P3L, #11 slli $r10, P3H, #21 or P1L, P1L, $r10 slli $r10, P3H, #1 srli $r10, $r10, #12 or P1H, P1H, $r10 slli $r10, $r6, #20 or P1H, P1H, $r10 .LGret: .LG999: popm $r6, $r10 pop $lp ret5 $lp .LGoveund: bgtz $r6, .LGinf subri P2H, $r6, #1 move P1L, #0 .LL62: move $r10, #0x20 slt $r15, P2H, $r10 bnezs8 .LL63 or P1L, P1L, P3L move P3L, P3H move P3H, #0 addi P2H, P2H, #0xffffffe0 bnez P3L, .LL62 .LL63: beqz P2H, .LL64 move P2L, P3H move $r10, P3L srl P3L, P3L, P2H srl P3H, P3H, P2H subri P2H, P2H, #0x20 sll P2L, P2L, P2H or P3L, P3L, P2L sll $r10, $r10, P2H or P1L, P1L, $r10 beqz P1L, .LL64 ori P3L, P3L, #1 .LL64: #ADD(P3L, $0x400) move $r15, #0x400 add P3L, P3L, $r15 slt $r15, P3L, $r15 #ADDC(P3H, $0x0) add P3H, P3H, $r15 srli $r6, P3H, #31 j .LGlab8 .LGspecA: #ADD(P3L, P3L) move $r15, P3L add P3L, P3L, P3L slt $r15, P3L, $r15 #ADDC(P3H, P3H) add P3H, P3H, P3H add P3H, P3H, $r15 bnez $r6, .Li33 or $r10, P3H, P3L beqz $r10, .Li33 #NORMd($r4, P2H, P2L) bnez P3H, .LL65 bnez P3L, .LL66 move $r6, #0 j .LL67 .LL66: move P3H, P3L move P3L, #0 move P2H, #32 sub $r6, $r6, P2H .LL65: #ifndef __big_endian__ #ifdef __NDS32_PERF_EXT__ clz $r3, $r5 #else pushm $r0, $r2 pushm $r4, $r5 move $r0, $r5 bal __clzsi2 move $r3, $r0 popm $r4, $r5 popm $r0, $r2 #endif #else /* __big_endian__ */ #ifdef __NDS32_PERF_EXT__ clz $r2, $r4 #else pushm $r0, $r1 pushm $r3, $r5 move $r0, $r4 bal __clzsi2 move $r2, $r0 popm $r3, $r5 popm $r0, $r1 #endif #endif /* __big_endian_ */ beqz P2H, .LL67 sub $r6, $r6, P2H subri P2L, P2H, #32 srl P2L, P3L, P2L sll P3L, P3L, P2H sll P3H, P3H, P2H or P3H, P3H, P2L .LL67: #NORMd End j .LGlab1 .Li33: bne $r6, $r9, .Li35 slli $r10, O1H, #1 or $r10, $r10, O1L beqz $r10, .LGnan .Li35: subri $r15, $r9, #0x7ff beqzs8 .LGspecB beqz $r6, .LGret or $r10, P3H, P3L bnez $r10, .LGnan .LGinf: move $r10, #0x7ff00000 or P1H, P1H, $r10 move P1L, #0 j .LGret .LGspecB: #ADD(O1L, O1L) move $r15, O1L add O1L, O1L, O1L slt $r15, O1L, $r15 #ADDC(O1H, O1H) add O1H, O1H, O1H add O1H, O1H, $r15 bnez $r9, .Li36 or $r10, O1H, O1L beqz $r10, .LGinf #NORMd($r7, P2H, P2L) bnez O1H, .LL68 bnez O1L, .LL69 move $r9, #0 j .LL70 .LL69: move O1H, O1L move O1L, #0 move P2H, #32 sub $r9, $r9, P2H .LL68: #ifndef __big_endian__ #ifdef __NDS32_PERF_EXT__ clz $r3, $r8 #else pushm $r0, $r2 pushm $r4, $r5 move $r0, $r8 bal __clzsi2 move $r3, $r0 popm $r4, $r5 popm $r0, $r2 #endif #else /* __big_endian__ */ #ifdef __NDS32_PERF_EXT__ clz $r2, $r7 #else pushm $r0, $r1 pushm $r3, $r5 move $r0, $r7 bal __clzsi2 move $r2, $r0 popm $r3, $r5 popm $r0, $r1 #endif #endif /* __big_endian__ */ beqz P2H, .LL70 sub $r9, $r9, P2H subri P2L, P2H, #32 srl P2L, O1L, P2L sll O1L, O1L, P2H sll O1H, O1H, P2H or O1H, O1H, P2L .LL70: #NORMd End j .LGlab2 .Li36: or $r10, O1H, O1L beqz $r10, .Li38 .LGnan: move P1H, #0xfff80000 .Li38: move P1L, #0 j .LGret .size __divdf3, .-__divdf3 #endif /* L_div_df */ #ifdef L_negate_sf .text .align 2 .global __negsf2 .type __negsf2, @function __negsf2: push $lp move $r1, #0x80000000 xor $r0, $r0, $r1 .LN999: pop $lp ret5 $lp .size __negsf2, .-__negsf2 #endif /* L_negate_sf */ #ifdef L_negate_df #ifndef __big_endian__ #define P1H $r1 #else #define P1H $r0 #endif .text .align 2 .global __negdf2 .type __negdf2, @function __negdf2: push $lp move $r2, #0x80000000 xor P1H, P1H, $r2 .LP999: pop $lp ret5 $lp .size __negdf2, .-__negdf2 #endif /* L_negate_df */ #ifdef L_sf_to_df #ifndef __big_endian__ #define O1L $r1 #define O1H $r2 #else #define O1H $r1 #define O1L $r2 #endif .text .align 2 .global __extendsfdf2 .type __extendsfdf2, @function __extendsfdf2: push $lp srli $r3, $r0, #23 andi $r3, $r3, #0xff move $r5, #0x80000000 and O1H, $r0, $r5 addi $r5, $r3, #-1 slti $r15, $r5, #0xfe beqzs8 .LJspec .LJlab1: addi $r3, $r3, #0x380 slli $r5, $r0, #9 srli $r5, $r5, #12 or O1H, O1H, $r5 slli O1L, $r0, #29 .LJret: slli $r5, $r3, #20 or O1H, O1H, $r5 move $r0, $r1 move $r1, $r2 .LJ999: pop $lp ret5 $lp .LJspec: move O1L, #0 add $r0, $r0, $r0 beqz $r0, .LJret bnez $r3, .Li42 .Lb43: addi $r3, $r3, #-1 add $r0, $r0, $r0 move $r5, #0x800000 slt $r15, $r0, $r5 bnezs8 .Lb43 j .LJlab1 .Li42: move $r3, #0x7ff move $r5, #0xff000000 slt $r15, $r5, $r0 beqzs8 .LJret move O1H, #0xfff80000 j .LJret .size __extendsfdf2, .-__extendsfdf2 #endif /* L_sf_to_df */ #ifdef L_df_to_sf #ifndef __big_endian__ #define P1L $r0 #define P1H $r1 #define P2L $r2 #define P2H $r3 #else #define P1H $r0 #define P1L $r1 #define P2H $r2 #define P2L $r3 #endif .text .align 2 .global __truncdfsf2 .type __truncdfsf2, @function __truncdfsf2: push $lp pushm $r6, $r8 slli P2H, P1H, #11 srli $r7, P1L, #21 or P2H, P2H, $r7 slli P2L, P1L, #11 move $r7, #0x80000000 or P2H, P2H, $r7 and $r5, P1H, $r7 slli $r4, P1H, #1 srli $r4, $r4, #21 addi $r4, $r4, #0xfffffc80 addi $r7, $r4, #-1 slti $r15, $r7, #0xfe beqzs8 .LKspec .LKlab1: beqz P2L, .Li45 ori P2H, P2H, #1 .Li45: #ADD(P2H, $0x80) move $r15, #0x80 add P2H, P2H, $r15 slt $r15, P2H, $r15 #ADDC($r4, $0x0) add $r4, $r4, $r15 srli $r7, P2H, #8 andi $r7, $r7, #1 sub P2H, P2H, $r7 slli P2H, P2H, #1 srli P2H, P2H, #9 slli $r7, $r4, #23 or P2H, P2H, $r7 or $r0, P2H, $r5 .LK999: popm $r6, $r8 pop $lp ret5 $lp .LKspec: subri $r15, $r4, #0x47f bnezs8 .Li46 slli $r7, P2H, #1 or $r7, $r7, P2L beqz $r7, .Li46 move $r0, #0xffc00000 j .LK999 .Li46: sltsi $r15, $r4, #0xff bnezs8 .Li48 move $r7, #0x7f800000 or $r0, $r5, $r7 j .LK999 .Li48: subri $r6, $r4, #1 move $r7, #0x20 slt $r15, $r6, $r7 bnezs8 .Li49 move $r0, $r5 j .LK999 .Li49: subri $r8, $r6, #0x20 sll $r7, P2H, $r8 or P2L, P2L, $r7 srl P2H, P2H, $r6 move $r4, #0 move $r7, #0x80000000 or P2H, P2H, $r7 j .LKlab1 .size __truncdfsf2, .-__truncdfsf2 #endif /* L_df_to_sf */ #ifdef L_df_to_si #ifndef __big_endian__ #define P1L $r0 #define P1H $r1 #else #define P1H $r0 #define P1L $r1 #endif .global __fixdfsi .type __fixdfsi, @function __fixdfsi: push $lp pushm $r6, $r6 slli $r3, P1H, #11 srli $r6, P1L, #21 or $r3, $r3, $r6 move $r6, #0x80000000 or $r3, $r3, $r6 slli $r6, P1H, #1 srli $r6, $r6, #21 subri $r2, $r6, #0x41e blez $r2, .LLnaninf move $r6, #0x20 slt $r15, $r2, $r6 bnezs8 .LL72 move $r3, #0 .LL72: srl $r3, $r3, $r2 sltsi $r15, P1H, #0 beqzs8 .Li50 subri $r3, $r3, #0 .Li50: move $r0, $r3 .LL999: popm $r6, $r6 pop $lp ret5 $lp .LLnaninf: beqz P1L, .Li51 ori P1H, P1H, #1 .Li51: move $r6, #0x7ff00000 slt $r15, $r6, P1H beqzs8 .Li52 move $r0, #0x80000000 j .LL999 .Li52: move $r0, #0x7fffffff j .LL999 .size __fixdfsi, .-__fixdfsi #endif /* L_df_to_si */ #ifdef L_fixsfdi #ifndef __big_endian__ #define O1L $r1 #define O1H $r2 #else #define O1H $r1 #define O1L $r2 #endif .text .align 2 .global __fixsfdi .type __fixsfdi, @function __fixsfdi: push $lp srli $r3, $r0, #23 andi $r3, $r3, #0xff slli O1H, $r0, #8 move $r5, #0x80000000 or O1H, O1H, $r5 move O1L, #0 sltsi $r15, $r3, #0xbe beqzs8 .LCinfnan subri $r3, $r3, #0xbe .LL8: move $r5, #0x20 slt $r15, $r3, $r5 bnezs8 .LL9 move O1L, O1H move O1H, #0 addi $r3, $r3, #0xffffffe0 bnez O1L, .LL8 .LL9: beqz $r3, .LL10 move $r4, O1H srl O1L, O1L, $r3 srl O1H, O1H, $r3 subri $r3, $r3, #0x20 sll $r4, $r4, $r3 or O1L, O1L, $r4 .LL10: sltsi $r15, $r0, #0 beqzs8 .LCret subri O1H, O1H, #0 beqz O1L, .LL11 subri O1L, O1L, #0 subi45 O1H, #1 .LL11: .LCret: move $r0, $r1 move $r1, $r2 .LC999: pop $lp ret5 $lp .LCinfnan: sltsi $r15, $r0, #0 bnezs8 .LCret3 subri $r15, $r3, #0xff bnezs8 .Li7 slli $r5, O1H, #1 beqz $r5, .Li7 .LCret3: move O1H, #0x80000000 j .LCret .Li7: move O1H, #0x7fffffff move O1L, #-1 j .LCret .size __fixsfdi, .-__fixsfdi #endif /* L_fixsfdi */ #ifdef L_fixdfdi #ifndef __big_endian__ #define P1L $r0 #define P1H $r1 #define O1L $r3 #define O1H $r4 #else #define P1H $r0 #define P1L $r1 #define O1H $r3 #define O1L $r4 #endif .text .align 2 .global __fixdfdi .type __fixdfdi, @function __fixdfdi: push $lp pushm $r6, $r6 slli $r5, P1H, #1 srli $r5, $r5, #21 slli O1H, P1H, #11 srli $r6, P1L, #21 or O1H, O1H, $r6 slli O1L, P1L, #11 move $r6, #0x80000000 or O1H, O1H, $r6 slti $r15, $r5, #0x43e beqzs8 .LCnaninf subri $r2, $r5, #0x43e .LL14: move $r6, #0x20 slt $r15, $r2, $r6 bnezs8 .LL15 move O1L, O1H move O1H, #0 addi $r2, $r2, #0xffffffe0 bnez O1L, .LL14 .LL15: beqz $r2, .LL16 move P1L, O1H srl O1L, O1L, $r2 srl O1H, O1H, $r2 subri $r2, $r2, #0x20 sll P1L, P1L, $r2 or O1L, O1L, P1L .LL16: sltsi $r15, P1H, #0 beqzs8 .LCret subri O1H, O1H, #0 beqz O1L, .LL17 subri O1L, O1L, #0 subi45 O1H, #1 .LL17: .LCret: move P1L, O1L move P1H, O1H .LC999: popm $r6, $r6 pop $lp ret5 $lp .LCnaninf: sltsi $r15, P1H, #0 bnezs8 .LCret3 subri $r15, $r5, #0x7ff bnezs8 .Li5 slli $r6, O1H, #1 or $r6, $r6, O1L beqz $r6, .Li5 .LCret3: move O1H, #0x80000000 move O1L, #0 j .LCret .Li5: move O1H, #0x7fffffff move O1L, #-1 j .LCret .size __fixdfdi, .-__fixdfdi #endif /* L_fixdfdi */ #ifdef L_fixunssfsi .global __fixunssfsi .type __fixunssfsi, @function __fixunssfsi: push $lp slli $r1, $r0, #8 move $r3, #0x80000000 or $r1, $r1, $r3 srli $r3, $r0, #23 andi $r3, $r3, #0xff subri $r2, $r3, #0x9e sltsi $r15, $r2, #0 bnezs8 .LLspec sltsi $r15, $r2, #0x20 bnezs8 .Li45 move $r0, #0 j .LL999 .Li45: srl $r1, $r1, $r2 sltsi $r15, $r0, #0 beqzs8 .Li46 subri $r1, $r1, #0 .Li46: move $r0, $r1 .LL999: pop $lp ret5 $lp .LLspec: move $r3, #0x7f800000 slt $r15, $r3, $r0 beqzs8 .Li47 move $r0, #0x80000000 j .LL999 .Li47: move $r0, #-1 j .LL999 .size __fixunssfsi, .-__fixunssfsi #endif /* L_fixunssfsi */ #ifdef L_fixunsdfsi #ifndef __big_endian__ #define P1L $r0 #define P1H $r1 #else #define P1H $r0 #define P1L $r1 #endif .text .align 2 .global __fixunsdfsi .type __fixunsdfsi, @function __fixunsdfsi: push $lp pushm $r6, $r6 slli $r3, P1H, #11 srli $r6, P1L, #21 or $r3, $r3, $r6 move $r6, #0x80000000 or $r3, $r3, $r6 slli $r6, P1H, #1 srli $r6, $r6, #21 subri $r2, $r6, #0x41e sltsi $r15, $r2, #0 bnezs8 .LNnaninf move $r6, #0x20 slt $r15, $r2, $r6 bnezs8 .LL73 move $r3, #0 .LL73: srl $r3, $r3, $r2 sltsi $r15, P1H, #0 beqzs8 .Li53 subri $r3, $r3, #0 .Li53: move $r0, $r3 .LN999: popm $r6, $r6 pop $lp ret5 $lp .LNnaninf: beqz P1L, .Li54 ori P1H, P1H, #1 .Li54: move $r6, #0x7ff00000 slt $r15, $r6, P1H beqzs8 .Li55 move $r0, #0x80000000 j .LN999 .Li55: move $r0, #-1 j .LN999 .size __fixunsdfsi, .-__fixunsdfsi #endif /* L_fixunsdfsi */ #ifdef L_fixunssfdi #ifndef __big_endian__ #define O1L $r1 #define O1H $r2 #else #define O1H $r1 #define O1L $r2 #endif .text .align 2 .global __fixunssfdi .type __fixunssfdi, @function __fixunssfdi: push $lp srli $r3, $r0, #23 andi $r3, $r3, #0xff slli O1H, $r0, #8 move $r5, #0x80000000 or O1H, O1H, $r5 move O1L, #0 sltsi $r15, $r3, #0xbe beqzs8 .LDinfnan subri $r3, $r3, #0xbe .LL12: move $r5, #0x20 slt $r15, $r3, $r5 bnezs8 .LL13 move O1L, O1H move O1H, #0 addi $r3, $r3, #0xffffffe0 bnez O1L, .LL12 .LL13: beqz $r3, .LL14 move $r4, O1H srl O1L, O1L, $r3 srl O1H, O1H, $r3 subri $r3, $r3, #0x20 sll $r4, $r4, $r3 or O1L, O1L, $r4 .LL14: sltsi $r15, $r0, #0 beqzs8 .LDret subri O1H, O1H, #0 beqz O1L, .LL15 subri O1L, O1L, #0 subi45 O1H, #1 .LL15: .LDret: move $r0, $r1 move $r1, $r2 .LD999: pop $lp ret5 $lp .LDinfnan: move O1H, #0x80000000 move O1L, #0 j .LDret .size __fixunssfdi, .-__fixunssfdi #endif /* L_fixunssfdi */ #ifdef L_fixunsdfdi #ifndef __big_endian__ #define P1L $r0 #define P1H $r1 #define O1L $r3 #define O1H $r4 #else #define P1H $r0 #define P1L $r1 #define O1H $r3 #define O1L $r4 #endif .text .align 2 .global __fixunsdfdi .type __fixunsdfdi, @function __fixunsdfdi: push $lp pushm $r6, $r6 slli $r5, P1H, #1 srli $r5, $r5, #21 slli O1H, P1H, #11 srli $r6, P1L, #21 or O1H, O1H, $r6 slli O1L, P1L, #11 move $r6, #0x80000000 or O1H, O1H, $r6 slti $r15, $r5, #0x43e beqzs8 .LDnaninf subri $r2, $r5, #0x43e .LL18: move $r6, #0x20 slt $r15, $r2, $r6 bnezs8 .LL19 move O1L, O1H move O1H, #0 addi $r2, $r2, #0xffffffe0 bnez O1L, .LL18 .LL19: beqz $r2, .LL20 move P1L, O1H srl O1L, O1L, $r2 srl O1H, O1H, $r2 subri $r2, $r2, #0x20 sll P1L, P1L, $r2 or O1L, O1L, P1L .LL20: sltsi $r15, P1H, #0 beqzs8 .LDret subri O1H, O1H, #0 beqz O1L, .LL21 subri O1L, O1L, #0 subi45 O1H, #1 .LL21: .LDret: move P1L, O1L move P1H, O1H .LD999: popm $r6, $r6 pop $lp ret5 $lp .LDnaninf: move O1H, #0x80000000 move O1L, #0 j .LDret .size __fixunsdfdi, .-__fixunsdfdi #endif /* L_fixunsdfdi */ #ifdef L_si_to_sf .text .align 2 .global __floatsisf .type __floatsisf, @function __floatsisf: push $lp move $r4, #0x80000000 and $r2, $r0, $r4 beqz $r0, .Li39 sltsi $r15, $r0, #0 beqzs8 .Li40 subri $r0, $r0, #0 .Li40: move $r1, #0x9e #ifdef __NDS32_PERF_EXT__ clz $r3, $r0 #else pushm $r0, $r2 pushm $r4, $r5 bal __clzsi2 move $r3, $r0 popm $r4, $r5 popm $r0, $r2 #endif sub $r1, $r1, $r3 sll $r0, $r0, $r3 #ADD($r0, $0x80) move $r15, #0x80 add $r0, $r0, $r15 slt $r15, $r0, $r15 #ADDC($r1, $0x0) add $r1, $r1, $r15 srai $r4, $r0, #8 andi $r4, $r4, #1 sub $r0, $r0, $r4 slli $r0, $r0, #1 srli $r0, $r0, #9 slli $r4, $r1, #23 or $r0, $r0, $r4 .Li39: or $r0, $r0, $r2 .LH999: pop $lp ret5 $lp .size __floatsisf, .-__floatsisf #endif /* L_si_to_sf */ #ifdef L_si_to_df #ifndef __big_endian__ #define O1L $r1 #define O1H $r2 #define O2L $r4 #define O2H $r5 #else #define O1H $r1 #define O1L $r2 #define O2H $r4 #define O2L $r5 #endif .text .align 2 .global __floatsidf .type __floatsidf, @function __floatsidf: push $lp pushm $r6, $r6 move O1L, #0 move O2H, O1L move $r3, O1L move O1H, $r0 beqz O1H, .Li39 sltsi $r15, O1H, #0 beqzs8 .Li40 move O2H, #0x80000000 subri O1H, O1H, #0 beqz O1L, .LL71 subri O1L, O1L, #0 subi45 O1H, #1 .LL71: .Li40: move $r3, #0x41e #ifndef __big_endian__ #ifdef __NDS32_PERF_EXT__ clz $r4, $r2 #else pushm $r0, $r3 push $r5 move $r0, $r2 bal __clzsi2 move $r4, $r0 pop $r5 popm $r0, $r3 #endif #else /* __big_endian__ */ #ifdef __NDS32_PERF_EXT__ clz $r5, $r1 #else pushm $r0, $r4 move $r0, $r1 bal __clzsi2 move $r5, $r0 popm $r0, $r4 #endif #endif /* __big_endian__ */ sub $r3, $r3, O2L sll O1H, O1H, O2L .Li39: srli O2L, O1L, #11 slli $r6, O1H, #21 or O2L, O2L, $r6 slli $r6, O1H, #1 srli $r6, $r6, #12 or O2H, O2H, $r6 slli $r6, $r3, #20 or O2H, O2H, $r6 move $r0, $r4 move $r1, $r5 .LH999: popm $r6, $r6 pop $lp ret5 $lp .size __floatsidf, .-__floatsidf #endif /* L_si_to_df */ #ifdef L_floatdisf #ifndef __big_endian__ #define P1L $r0 #define P1H $r1 #define P2L $r2 #define P2H $r3 #else #define P1H $r0 #define P1L $r1 #define P2H $r2 #define P2L $r3 #endif .text .align 2 .global __floatdisf .type __floatdisf, @function __floatdisf: push $lp pushm $r6, $r7 move $r7, #0x80000000 and $r5, P1H, $r7 move P2H, P1H move P2L, P1L or $r7, P1H, P1L beqz $r7, .Li1 sltsi $r15, P1H, #0 beqzs8 .Li2 subri P2H, P2H, #0 beqz P2L, .LL1 subri P2L, P2L, #0 subi45 P2H, #1 .LL1: .Li2: move $r4, #0xbe #NORMd($r2, $r6, P1L) bnez P2H, .LL2 bnez P2L, .LL3 move $r4, #0 j .LL4 .LL3: move P2H, P2L move P2L, #0 move $r6, #32 sub $r4, $r4, $r6 .LL2: #ifdef __NDS32_PERF_EXT__ clz $r6, P2H #else pushm $r0, $r5 move $r0, P2H bal __clzsi2 move $r6, $r0 popm $r0, $r5 #endif beqz $r6, .LL4 sub $r4, $r4, $r6 subri P1L, $r6, #32 srl P1L, P2L, P1L sll P2L, P2L, $r6 sll P2H, P2H, $r6 or P2H, P2H, P1L .LL4: #NORMd End beqz P2L, .Li3 ori P2H, P2H, #1 .Li3: #ADD(P2H, $0x80) move $r15, #0x80 add P2H, P2H, $r15 slt $r15, P2H, $r15 #ADDC($r4, $0x0) add $r4, $r4, $r15 srli $r7, P2H, #8 andi $r7, $r7, #1 sub P2H, P2H, $r7 slli P2H, P2H, #1 srli P2H, P2H, #9 slli $r7, $r4, #23 or P2H, P2H, $r7 .Li1: or $r0, P2H, $r5 .LA999: popm $r6, $r7 pop $lp ret5 $lp .size __floatdisf, .-__floatdisf #endif /* L_floatdisf */ #ifdef L_floatdidf #ifndef __big_endian__ #define P1L $r0 #define P1H $r1 #define P2L $r2 #define P2H $r3 #define O1L $r5 #define O1H $r6 #else #define P1H $r0 #define P1L $r1 #define P2H $r2 #define P2L $r3 #define O1H $r5 #define O1L $r6 #endif .text .align 2 .global __floatdidf .type __floatdidf, @function __floatdidf: push $lp pushm $r6, $r8 move $r4, #0 move $r7, $r4 move P2H, P1H move P2L, P1L or $r8, P1H, P1L beqz $r8, .Li1 move $r4, #0x43e sltsi $r15, P1H, #0 beqzs8 .Li2 move $r7, #0x80000000 subri P2H, P2H, #0 beqz P2L, .LL1 subri P2L, P2L, #0 subi45 P2H, #1 .LL1: .Li2: #NORMd($r2, O1H, O1L) bnez P2H, .LL2 bnez P2L, .LL3 move $r4, #0 j .LL4 .LL3: move P2H, P2L move P2L, #0 move O1H, #32 sub $r4, $r4, O1H .LL2: #ifdef __NDS32_PERF_EXT__ clz O1H, P2H #else /* not __NDS32_PERF_EXT__ */ /* Replace clz with function call. clz O1H, P2H EL: clz $r6, $r3 EB: clz $r5, $r2 */ #ifndef __big_endian__ pushm $r0, $r5 move $r0, $r3 bal __clzsi2 move $r6, $r0 popm $r0, $r5 #else pushm $r0, $r4 move $r0, $r2 bal __clzsi2 move $r5, $r0 popm $r0, $r4 #endif #endif /* not __NDS32_PERF_EXT__ */ beqz O1H, .LL4 sub $r4, $r4, O1H subri O1L, O1H, #32 srl O1L, P2L, O1L sll P2L, P2L, O1H sll P2H, P2H, O1H or P2H, P2H, O1L .LL4: #NORMd End #ADD(P2L, $0x400) move $r15, #0x400 add P2L, P2L, $r15 slt $r15, P2L, $r15 #ADDCC(P2H, $0x0) beqzs8 .LL7 add P2H, P2H, $r15 slt $r15, P2H, $r15 .LL7: #ADDC($r4, $0x0) add $r4, $r4, $r15 srli $r8, P2L, #11 andi $r8, $r8, #1 sub P2L, P2L, $r8 .Li1: srli O1L, P2L, #11 slli $r8, P2H, #21 or O1L, O1L, $r8 slli O1H, P2H, #1 srli O1H, O1H, #12 slli $r8, $r4, #20 or O1H, O1H, $r8 or O1H, O1H, $r7 move P1L, O1L move P1H, O1H .LA999: popm $r6, $r8 pop $lp ret5 $lp .size __floatdidf, .-__floatdidf #endif /* L_floatdidf */ #ifdef L_floatunsisf .text .align 2 .global __floatunsisf .type __floatunsisf, @function __floatunsisf: push $lp beqz $r0, .Li41 move $r2, #0x9e #ifdef __NDS32_PERF_EXT__ clz $r1, $r0 #else push $r0 pushm $r2, $r5 bal __clzsi2 move $r1, $r0 popm $r2, $r5 pop $r0 #endif sub $r2, $r2, $r1 sll $r0, $r0, $r1 #ADD($r0, $0x80) move $r15, #0x80 add $r0, $r0, $r15 slt $r15, $r0, $r15 #ADDC($r2, $0x0) add $r2, $r2, $r15 srli $r3, $r0, #8 andi $r3, $r3, #1 sub $r0, $r0, $r3 slli $r0, $r0, #1 srli $r0, $r0, #9 slli $r3, $r2, #23 or $r0, $r0, $r3 .Li41: .LI999: pop $lp ret5 $lp .size __floatunsisf, .-__floatunsisf #endif /* L_floatunsisf */ #ifdef L_floatunsidf #ifndef __big_endian__ #define O1L $r1 #define O1H $r2 #define O2L $r4 #define O2H $r5 #else #define O1H $r1 #define O1L $r2 #define O2H $r4 #define O2L $r5 #endif .text .align 2 .global __floatunsidf .type __floatunsidf, @function __floatunsidf: push $lp pushm $r6, $r6 move O1L, #0 move $r3, O1L move O1H, $r0 beqz O1H, .Li41 move $r3, #0x41e #ifndef __big_endian__ #ifdef __NDS32_PERF_EXT__ clz $r5, $r2 #else pushm $r0, $r4 move $r0, $r2 bal __clzsi2 move $r5, $r0 popm $r0, $r4 #endif #else /* __big_endian__ */ #ifdef __NDS32_PERF_EXT__ clz $r4, $r1 #else pushm $r0, $r3 push $r5 move $r0, $r1 bal __clzsi2 move $r4, $r0 pop $r5 popm $r0, $r3 #endif #endif /* __big_endian__ */ sub $r3, $r3, O2H sll O1H, O1H, O2H .Li41: srli O2L, O1L, #11 slli $r6, O1H, #21 or O2L, O2L, $r6 slli O2H, O1H, #1 srli O2H, O2H, #12 slli $r6, $r3, #20 or O2H, O2H, $r6 move $r0, $r4 move $r1, $r5 .LI999: popm $r6, $r6 pop $lp ret5 $lp .size __floatunsidf, .-__floatunsidf #endif /* L_floatunsidf */ #ifdef L_floatundisf #ifndef __big_endian__ #define P1L $r0 #define P1H $r1 #define P2L $r2 #define P2H $r3 #else #define P1H $r0 #define P1L $r1 #define P2H $r2 #define P2L $r3 #endif .text .align 2 .global __floatundisf .type __floatundisf, @function __floatundisf: push $lp pushm $r6, $r6 move P2H, P1H move P2L, P1L or $r6, P1H, P1L beqz $r6, .Li4 move $r4, #0xbe #NORMd($r2, $r5, P1L) bnez P2H, .LL5 bnez P2L, .LL6 move $r4, #0 j .LL7 .LL6: move P2H, P2L move P2L, #0 move $r5, #32 sub $r4, $r4, $r5 .LL5: #ifdef __NDS32_PERF_EXT__ clz $r5, P2H #else pushm $r0, $r4 move $r0, P2H bal __clzsi2 move $r5, $r0 popm $r0, $r4 #endif beqz $r5, .LL7 sub $r4, $r4, $r5 subri P1L, $r5, #32 srl P1L, P2L, P1L sll P2L, P2L, $r5 sll P2H, P2H, $r5 or P2H, P2H, P1L .LL7: #NORMd End beqz P2L, .Li5 ori P2H, P2H, #1 .Li5: #ADD(P2H, $0x80) move $r15, #0x80 add P2H, P2H, $r15 slt $r15, P2H, $r15 #ADDC($r4, $0x0) add $r4, $r4, $r15 srli $r6, P2H, #8 andi $r6, $r6, #1 sub P2H, P2H, $r6 slli P2H, P2H, #1 srli P2H, P2H, #9 slli $r6, $r4, #23 or P2H, P2H, $r6 .Li4: move $r0, P2H .LB999: popm $r6, $r6 pop $lp ret5 $lp .size __floatundisf, .-__floatundisf #endif /* L_floatundisf */ #ifdef L_floatundidf #ifndef __big_endian__ #define P1L $r0 #define P1H $r1 #define P2L $r2 #define P2H $r3 #define O1L $r5 #define O1H $r6 #else #define P1H $r0 #define P1L $r1 #define P2H $r2 #define P2L $r3 #define O1H $r5 #define O1L $r6 #endif .text .align 2 .global __floatundidf .type __floatundidf, @function __floatundidf: push $lp pushm $r6, $r7 move $r4, #0 move P2H, P1H move P2L, P1L or $r7, P1H, P1L beqz $r7, .Li3 move $r4, #0x43e #NORMd($r2, O1H, O1L) bnez P2H, .LL8 bnez P2L, .LL9 move $r4, #0 j .LL10 .LL9: move P2H, P2L move P2L, #0 move O1H, #32 sub $r4, $r4, O1H .LL8: #ifdef __NDS32_PERF_EXT__ clz O1H, P2H #else /* not __NDS32_PERF_EXT__ */ /* Replace clz with function call. clz O1H, P2H EL: clz $r6, $r3 EB: clz $r5, $r2 */ #ifndef __big_endian__ pushm $r0, $r5 move $r0, $r3 bal __clzsi2 move $r6, $r0 popm $r0, $r5 #else pushm $r0, $r4 move $r0, $r2 bal __clzsi2 move $r5, $r0 popm $r0, $r4 #endif #endif /* not __NDS32_PERF_EXT__ */ beqz O1H, .LL10 sub $r4, $r4, O1H subri O1L, O1H, #32 srl O1L, P2L, O1L sll P2L, P2L, O1H sll P2H, P2H, O1H or P2H, P2H, O1L .LL10: #NORMd End #ADD(P2L, $0x400) move $r15, #0x400 add P2L, P2L, $r15 slt $r15, P2L, $r15 #ADDCC(P2H, $0x0) beqzs8 .LL13 add P2H, P2H, $r15 slt $r15, P2H, $r15 .LL13: #ADDC($r4, $0x0) add $r4, $r4, $r15 srli $r7, P2L, #11 andi $r7, $r7, #1 sub P2L, P2L, $r7 .Li3: srli O1L, P2L, #11 slli $r7, P2H, #21 or O1L, O1L, $r7 slli O1H, P2H, #1 srli O1H, O1H, #12 slli $r7, $r4, #20 or O1H, O1H, $r7 move P1L, O1L move P1H, O1H .LB999: popm $r6, $r7 pop $lp ret5 $lp .size __floatundidf, .-__floatundidf #endif /* L_floatundidf */ #ifdef L_compare_sf .text .align 2 .global __cmpsf2 .type __cmpsf2, @function __cmpsf2: .global __eqsf2 .type __eqsf2, @function __eqsf2: .global __ltsf2 .type __ltsf2, @function __ltsf2: .global __lesf2 .type __lesf2, @function __lesf2: .global __nesf2 .type __nesf2, @function __nesf2: move $r4, #1 j .LA .global __gesf2 .type __gesf2, @function __gesf2: .global __gtsf2 .type __gtsf2, @function __gtsf2: move $r4, #-1 .LA: push $lp slli $r2, $r0, #1 slli $r3, $r1, #1 or $r5, $r2, $r3 beqz $r5, .LMequ move $r5, #0xff000000 slt $r15, $r5, $r2 bnezs8 .LMnan slt $r15, $r5, $r3 bnezs8 .LMnan srli $r2, $r2, #1 sltsi $r15, $r0, #0 beqzs8 .Li48 subri $r2, $r2, #0 .Li48: srli $r3, $r3, #1 sltsi $r15, $r1, #0 beqzs8 .Li49 subri $r3, $r3, #0 .Li49: slts $r15, $r2, $r3 beqzs8 .Li50 move $r0, #-1 j .LM999 .Li50: slts $r15, $r3, $r2 beqzs8 .LMequ move $r0, #1 j .LM999 .LMequ: move $r0, #0 .LM999: pop $lp ret5 $lp .LMnan: move $r0, $r4 j .LM999 .size __cmpsf2, .-__cmpsf2 .size __eqsf2, .-__eqsf2 .size __ltsf2, .-__ltsf2 .size __lesf2, .-__lesf2 .size __nesf2, .-__nesf2 .size __gesf2, .-__gesf2 .size __gtsf2, .-__gtsf2 #endif /* L_compare_sf */ #ifdef L_compare_df #ifdef __big_endian__ #define P1H $r0 #define P1L $r1 #define P2H $r2 #define P2L $r3 #else #define P1H $r1 #define P1L $r0 #define P2H $r3 #define P2L $r2 #endif .align 2 .globl __gtdf2 .globl __gedf2 .globl __ltdf2 .globl __ledf2 .globl __eqdf2 .globl __nedf2 .globl __cmpdf2 .type __gtdf2, @function .type __gedf2, @function .type __ltdf2, @function .type __ledf2, @function .type __eqdf2, @function .type __nedf2, @function .type __cmpdf2, @function __gtdf2: __gedf2: movi $r4, -1 b .L1 __ltdf2: __ledf2: __cmpdf2: __nedf2: __eqdf2: movi $r4, 1 .L1: #if defined (__NDS32_ISA_V3M__) push25 $r10, 0 #else smw.adm $r6, [$sp], $r9, 0 #endif sethi $r5, 0x7ff00 and $r6, P1H, $r5 ! r6=aExp and $r7, P2H, $r5 ! r7=bExp slli $r8, P1H, 12 ! r8=aSig0 slli $r9, P2H, 12 ! r9=bSig0 beq $r6, $r5, .L11 ! aExp==0x7ff beq $r7, $r5, .L12 ! bExp==0x7ff .L2: slli $ta, P1H, 1 ! ta=ahigh<<1 or $ta, P1L, $ta ! xor $r5, P1H, P2H ! r5=ahigh^bhigh beqz $ta, .L3 ! if(ahigh<<1)==0,go .L3 !------------------------------- ! (ahigh<<1)!=0 || (bhigh<<1)!=0 !------------------------------- .L4: beqz $r5, .L5 ! ahigh==bhigh, go .L5 !-------------------- ! a != b !-------------------- .L6: bltz $r5, .L7 ! if(aSign!=bSign), go .L7 !-------------------- ! aSign==bSign !-------------------- slt $ta, $r6, $r7 ! ta=(aExp<bExp) bne $r6, $r7, .L8 ! if(aExp!=bExp),go .L8 slt $ta, $r8, $r9 ! ta=(aSig0<bSig0) bne $r8, $r9, .L8 ! if(aSig0!=bSig0),go .L8 slt $ta, P1L, P2L ! ta=(aSig1<bSig1) .L8: beqz $ta, .L10 ! if(|a|>|b|), go .L10 nor $r0, P2H, P2H ! if(|a|<|b|),return (~yh) .L14: #if defined (__NDS32_ISA_V3M__) pop25 $r10, 0 #else lmw.bim $r6, [$sp], $r9, 0 ret #endif .L10: ori $r0, P2H, 1 ! return (yh|1) b .L14 !-------------------- ! (ahigh<<1)=0 !-------------------- .L3: slli $ta, P2H, 1 ! ta=bhigh<<1 or $ta, P2L, $ta ! bnez $ta, .L4 ! ta=(bhigh<<1)!=0,go .L4 .L5: xor $ta, P1L, P2L ! ta=alow^blow bnez $ta, .L6 ! alow!=blow,go .L6 movi $r0, 0 ! a==b, return 0 b .L14 !-------------------- ! aExp=0x7ff; !-------------------- .L11: or P1L, P1L, $r8 ! x1=(aSig0|aSig1) bnez P1L, .L13 ! if(a=nan), go.L13 xor $ta, $r7, $r5 ! ta=(bExp^0x7ff) bnez $ta, .L2 ! if(bExp!=0x7ff), go .L2 !-------------------- ! bExp=0x7ff; !-------------------- .L12: or $ta, P2L, $r9 ! ta=(bSig0|bSig1) beqz $ta, .L2 ! if(b!=nan), go .L2 .L13: move $r0, $r4 b .L14 !-------------------- ! aSign!=bSign !-------------------- .L7: ori $r0, P1H, 1 ! if(aSign!=bSign), return (ahigh|1) b .L14 .size __gtdf2, .-__gtdf2 .size __gedf2, .-__gedf2 .size __ltdf2, .-__ltdf2 .size __ledf2, .-__ledf2 .size __eqdf2, .-__eqdf2 .size __nedf2, .-__nedf2 .size __cmpdf2, .-__cmpdf2 #endif /* L_compare_df */ #ifdef L_unord_sf .text .align 2 .global __unordsf2 .type __unordsf2, @function __unordsf2: push $lp slli $r2, $r0, #1 move $r3, #0xff000000 slt $r15, $r3, $r2 beqzs8 .Li52 move $r0, #1 j .LP999 .Li52: slli $r2, $r1, #1 move $r3, #0xff000000 slt $r15, $r3, $r2 beqzs8 .Li53 move $r0, #1 j .LP999 .Li53: move $r0, #0 .LP999: pop $lp ret5 $lp .size __unordsf2, .-__unordsf2 #endif /* L_unord_sf */ #ifdef L_unord_df #ifndef __big_endian__ #define P1L $r0 #define P1H $r1 #define P2L $r2 #define P2H $r3 #else #define P1H $r0 #define P1L $r1 #define P2H $r2 #define P2L $r3 #endif .text .align 2 .global __unorddf2 .type __unorddf2, @function __unorddf2: push $lp slli $r4, P1H, #1 beqz P1L, .Li66 addi $r4, $r4, #1 .Li66: move $r5, #0xffe00000 slt $r15, $r5, $r4 beqzs8 .Li67 move $r0, #1 j .LR999 .Li67: slli $r4, P2H, #1 beqz P2L, .Li68 addi $r4, $r4, #1 .Li68: move $r5, #0xffe00000 slt $r15, $r5, $r4 beqzs8 .Li69 move $r0, #1 j .LR999 .Li69: move $r0, #0 .LR999: pop $lp ret5 $lp .size __unorddf2, .-__unorddf2 #endif /* L_unord_df */ /* ------------------------------------------- */ /* DPBIT floating point operations for libgcc */ /* ------------------------------------------- */
4ms/metamodule-plugin-sdk
3,888
plugin-libc/libgcc/config/nds32/lib1asmsrc-newlib.S
/* newlib libgcc routines of Andes NDS32 cpu for GNU compiler Copyright (C) 2012-2022 Free Software Foundation, Inc. Contributed by Andes Technology Corporation. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 3, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. Under Section 7 of GPL version 3, you are granted additional permissions described in the GCC Runtime Library Exception, version 3.1, as published by the Free Software Foundation. You should have received a copy of the GNU General Public License and a copy of the GCC Runtime Library Exception along with this program; see the files COPYING3 and COPYING.RUNTIME respectively. If not, see <http://www.gnu.org/licenses/>. */ .section .mdebug.abi_nds32 .previous #ifdef L_divsi3 .text .align 2 .globl __divsi3 .type __divsi3, @function __divsi3: movi $r5, 0 ! res = 0 xor $r4, $r0, $r1 ! neg bltz $r0, .L1 bltz $r1, .L2 .L3: movi $r2, 1 ! bit = 1 slt $r3, $r1, $r0 ! test if dividend is smaller than or equal to divisor beqz $r3, .L5 bltz $r1, .L5 .L4: slli $r2, $r2, 1 beqz $r2, .L6 slli $r1, $r1, 1 slt $r3, $r1, $r0 beqz $r3, .L5 bgez $r1, .L4 .L5: slt $r3, $r0, $r1 bnez $r3, .L8 sub $r0, $r0, $r1 or $r5, $r5, $r2 .L8: srli $r1, $r1, 1 srli $r2, $r2, 1 bnez $r2, .L5 .L6: bgez $r4, .L7 subri $r5, $r5, 0 ! negate if $r4 < 0 .L7: move $r0, $r5 ret .L1: subri $r0, $r0, 0 ! change neg to pos bgez $r1, .L3 .L2: subri $r1, $r1, 0 ! change neg to pos j .L3 .size __divsi3, .-__divsi3 #endif /* L_divsi3 */ #ifdef L_modsi3 .text .align 2 .globl __modsi3 .type __modsi3, @function __modsi3: movi $r5, 0 ! res = 0 move $r4, $r0 ! neg bltz $r0, .L1 bltz $r1, .L2 .L3: movi $r2, 1 ! bit = 1 slt $r3, $r1, $r0 ! test if dividend is smaller than or equal to divisor beqz $r3, .L5 bltz $r1, .L5 .L4: slli $r2, $r2, 1 beqz $r2, .L6 slli $r1, $r1, 1 slt $r3, $r1, $r0 beqz $r3, .L5 bgez $r1, .L4 .L5: slt $r3, $r0, $r1 bnez $r3, .L8 sub $r0, $r0, $r1 or $r5, $r5, $r2 .L8: srli $r1, $r1, 1 srli $r2, $r2, 1 bnez $r2, .L5 .L6: bgez $r4, .L7 subri $r0, $r0, 0 ! negate if $r4 < 0 .L7: ret .L1: subri $r0, $r0, 0 ! change neg to pos bgez $r1, .L3 .L2: subri $r1, $r1, 0 ! change neg to pos j .L3 .size __modsi3, .-__modsi3 #endif /* L_modsi3 */ #ifdef L_udivsi3 .text .align 2 .globl __udivsi3 .type __udivsi3, @function __udivsi3: movi $r5, 0 ! res = 0 movi $r2, 1 ! bit = 1 slt $r3, $r1, $r0 ! test if dividend is smaller than or equal to divisor beqz $r3, .L5 bltz $r1, .L5 .L4: slli $r2, $r2, 1 beqz $r2, .L6 slli $r1, $r1, 1 slt $r3, $r1, $r0 beqz $r3, .L5 bgez $r1, .L4 .L5: slt $r3, $r0, $r1 bnez $r3, .L8 sub $r0, $r0, $r1 or $r5, $r5, $r2 .L8: srli $r1, $r1, 1 srli $r2, $r2, 1 bnez $r2, .L5 .L6: move $r0, $r5 ret .size __udivsi3, .-__udivsi3 #endif /* L_udivsi3 */ #ifdef L_umodsi3 .text .align 2 .globl __umodsi3 .type __umodsi3, @function __umodsi3: movi $r5, 0 ! res = 0 movi $r2, 1 ! bit = 1 slt $r3, $r1, $r0 ! test if dividend is smaller than or equal to divisor beqz $r3, .L5 bltz $r1, .L5 .L4: slli $r2, $r2, 1 beqz $r2, .L6 slli $r1, $r1, 1 slt $r3, $r1, $r0 beqz $r3, .L5 bgez $r1, .L4 .L5: slt $r3, $r0, $r1 bnez $r3, .L8 sub $r0, $r0, $r1 or $r5, $r5, $r2 .L8: srli $r1, $r1, 1 srli $r2, $r2, 1 bnez $r2, .L5 .L6: ret .size __umodsi3, .-__umodsi3 #endif /* L_umodsi3 */ /* ----------------------------------------------------------- */
4ms/stm32mp1-baremetal
27,747
third-party/CMSIS/Device/ST/STM32MP1xx/Source/Templates/iar/startup_stm32mp15xx.s
;****************************************************************************** ;* File Name : startup_stm32mp15xx.s ;* Author : MCD Application Team ;* Description : STM32MP15xx devices vector table for EWARM toolchain. ;* This module performs: ;* - Set the initial SP ;* - Set the initial PC == __iar_program_start, ;* - Set the vector table entries with the exceptions ISR ;* address ;* - Branches to main in the C library (which eventually ;* calls main()). ;* After Reset the CortexM4 processor is in Thread mode, ;* priority is Privileged, and the Stack is set to Main. ;****************************************************************************** ;* @attention ;* ;* <h2><center>&copy; Copyright (c) 2019 STMicroelectronics. ;* All rights reserved.</center></h2> ;* ;* This software component is licensed by ST under BSD 3-Clause license, ;* the "License"; You may not use this file except in compliance with the ;* License. You may obtain a copy of the License at: ;* opensource.org/licenses/BSD-3-Clause ;* ;******************************************************************************* //#define __DATA_IN_ExtSRAM /* When External SRAM is used */ #ifdef __DATA_IN_ExtSRAM __initial_spTop EQU 0x20000400 ; stack used for SystemInit & SystemInit_ExtMemCtl #endif /*__DATA_IN_ExtSRAM*/ MODULE ?cstartup ;; Forward declaration of sections. SECTION CSTACK:DATA:NOROOT(3) SECTION .intvec:CODE:NOROOT(2) EXTERN __iar_program_start EXTERN SystemInit PUBLIC __vector_table DATA __vector_table #ifdef __DATA_IN_ExtSRAM DCD __initial_spTop ; Use internal RAM for stack for calling SystemInit #else DCD sfe(CSTACK) #endif /*__DATA_IN_ExtSRAM*/ DCD Reset_Handler ; Reset Handler DCD NMI_Handler ; NMI Handler DCD HardFault_Handler ; Hard Fault Handler DCD MemManage_Handler ; MPU Fault Handler DCD BusFault_Handler ; Bus Fault Handler DCD UsageFault_Handler ; Usage Fault Handler DCD 0 ; Reserved DCD 0 ; Reserved DCD 0 ; Reserved DCD 0 ; Reserved DCD SVC_Handler ; SVCall Handler DCD DebugMon_Handler ; Debug Monitor Handler DCD 0 ; Reserved DCD PendSV_Handler ; PendSV Handler DCD SysTick_Handler ; SysTick Handler ; External Interrupts DCD WWDG1_IRQHandler ; DCD PVD_AVD_IRQHandler ; DCD TAMP_IRQHandler ; DCD RTC_WKUP_ALARM_IRQHandler ; DCD 0 ; DCD RCC_IRQHandler ; DCD EXTI0_IRQHandler ; DCD EXTI1_IRQHandler ; DCD EXTI2_IRQHandler ; DCD EXTI3_IRQHandler ; DCD EXTI4_IRQHandler ; DCD DMA1_Stream0_IRQHandler ; DCD DMA1_Stream1_IRQHandler ; DCD DMA1_Stream2_IRQHandler ; DCD DMA1_Stream3_IRQHandler ; DCD DMA1_Stream4_IRQHandler ; DCD DMA1_Stream5_IRQHandler ; DCD DMA1_Stream6_IRQHandler ; DCD ADC1_IRQHandler ; DCD FDCAN1_IT0_IRQHandler ; DCD FDCAN2_IT0_IRQHandler ; DCD FDCAN1_IT1_IRQHandler ; DCD FDCAN2_IT1_IRQHandler ; DCD EXTI5_IRQHandler ; DCD TIM1_BRK_IRQHandler ; DCD TIM1_UP_IRQHandler ; DCD TIM1_TRG_COM_IRQHandler ; DCD TIM1_CC_IRQHandler ; DCD TIM2_IRQHandler ; DCD TIM3_IRQHandler ; DCD TIM4_IRQHandler ; DCD I2C1_EV_IRQHandler ; DCD I2C1_ER_IRQHandler ; DCD I2C2_EV_IRQHandler ; DCD I2C2_ER_IRQHandler ; DCD SPI1_IRQHandler ; DCD SPI2_IRQHandler ; DCD USART1_IRQHandler ; DCD USART2_IRQHandler ; DCD USART3_IRQHandler ; DCD EXTI10_IRQHandler ; DCD RTC_TIMESTAMP_IRQHandler ; DCD EXTI11_IRQHandler ; DCD TIM8_BRK_IRQHandler ; DCD TIM8_UP_IRQHandler ; DCD TIM8_TRG_COM_IRQHandler ; DCD TIM8_CC_IRQHandler ; DCD DMA1_Stream7_IRQHandler ; DCD FMC_IRQHandler ; DCD SDMMC1_IRQHandler ; DCD TIM5_IRQHandler ; DCD SPI3_IRQHandler ; DCD UART4_IRQHandler ; DCD UART5_IRQHandler ; DCD TIM6_IRQHandler ; DCD TIM7_IRQHandler ; DCD DMA2_Stream0_IRQHandler ; DCD DMA2_Stream1_IRQHandler ; DCD DMA2_Stream2_IRQHandler ; DCD DMA2_Stream3_IRQHandler ; DCD DMA2_Stream4_IRQHandler ; DCD ETH1_IRQHandler ; DCD ETH1_WKUP_IRQHandler ; DCD FDCAN_CAL_IRQHandler ; DCD EXTI6_IRQHandler ; DCD EXTI7_IRQHandler ; DCD EXTI8_IRQHandler ; DCD EXTI9_IRQHandler ; DCD DMA2_Stream5_IRQHandler ; DCD DMA2_Stream6_IRQHandler ; DCD DMA2_Stream7_IRQHandler ; DCD USART6_IRQHandler ; DCD I2C3_EV_IRQHandler ; DCD I2C3_ER_IRQHandler ; DCD USBH_OHCI_IRQHandler ; DCD USBH_EHCI_IRQHandler ; DCD EXTI12_IRQHandler ; DCD EXTI13_IRQHandler ; DCD DCMI_IRQHandler ; DCD CRYP1_IRQHandler ; DCD HASH1_IRQHandler ; DCD FPU_IRQHandler ; DCD UART7_IRQHandler ; DCD UART8_IRQHandler ; DCD SPI4_IRQHandler ; DCD SPI5_IRQHandler ; DCD SPI6_IRQHandler ; DCD SAI1_IRQHandler ; DCD LTDC_IRQHandler ; DCD LTDC_ER_IRQHandler ; DCD ADC2_IRQHandler ; DCD SAI2_IRQHandler ; DCD QUADSPI_IRQHandler ; DCD LPTIM1_IRQHandler ; DCD CEC_IRQHandler ; DCD I2C4_EV_IRQHandler ; DCD I2C4_ER_IRQHandler ; DCD SPDIF_RX_IRQHandler ; DCD OTG_IRQHandler ; DCD 0 ; DCD IPCC_RX0_IRQHandler ; DCD IPCC_TX0_IRQHandler ; DCD DMAMUX1_OVR_IRQHandler ; DCD IPCC_RX1_IRQHandler ; DCD IPCC_TX1_IRQHandler ; DCD CRYP2_IRQHandler ; DCD HASH2_IRQHandler ; DCD I2C5_EV_IRQHandler ; DCD I2C5_ER_IRQHandler ; DCD GPU_IRQHandler ; DCD DFSDM1_FLT0_IRQHandler ; DCD DFSDM1_FLT1_IRQHandler ; DCD DFSDM1_FLT2_IRQHandler ; DCD DFSDM1_FLT3_IRQHandler ; DCD SAI3_IRQHandler ; DCD DFSDM1_FLT4_IRQHandler ; DCD TIM15_IRQHandler ; DCD TIM16_IRQHandler ; DCD TIM17_IRQHandler ; DCD TIM12_IRQHandler ; DCD MDIOS_IRQHandler ; DCD EXTI14_IRQHandler ; DCD MDMA_IRQHandler ; DCD DSI_IRQHandler ; DCD SDMMC2_IRQHandler ; DCD HSEM_IT2_IRQHandler ; DCD DFSDM1_FLT5_IRQHandler ; DCD EXTI15_IRQHandler ; DCD nCTIIRQ1_IRQHandler ; DCD nCTIIRQ2_IRQHandler ; DCD TIM13_IRQHandler ; DCD TIM14_IRQHandler ; DCD DAC_IRQHandler ; DCD RNG1_IRQHandler ; DCD RNG2_IRQHandler ; DCD I2C6_EV_IRQHandler ; DCD I2C6_ER_IRQHandler ; DCD SDMMC3_IRQHandler ; DCD LPTIM2_IRQHandler ; DCD LPTIM3_IRQHandler ; DCD LPTIM4_IRQHandler ; DCD LPTIM5_IRQHandler ; DCD ETH1_LPI_IRQHandler ; DCD 0 ; DCD MPU_SEV_IRQHandler ; DCD RCC_WAKEUP_IRQHandler ; DCD SAI4_IRQHandler ; DCD DTS_IRQHandler ; DCD 0 ; DCD WAKEUP_PIN_IRQHandler ; ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ;; ;; Default interrupt handlers. ;; THUMB PUBWEAK Reset_Handler SECTION .text:CODE:NOROOT:REORDER(2) Reset_Handler LDR R0, =SystemInit BLX R0 LDR R0, =__iar_program_start BLX R0 PUBWEAK NMI_Handler SECTION .text:CODE:NOROOT:REORDER(1) NMI_Handler B NMI_Handler PUBWEAK HardFault_Handler SECTION .text:CODE:NOROOT:REORDER(1) HardFault_Handler B HardFault_Handler PUBWEAK MemManage_Handler SECTION .text:CODE:NOROOT:REORDER(1) MemManage_Handler B MemManage_Handler PUBWEAK BusFault_Handler SECTION .text:CODE:NOROOT:REORDER(1) BusFault_Handler B BusFault_Handler PUBWEAK UsageFault_Handler SECTION .text:CODE:NOROOT:REORDER(1) UsageFault_Handler B UsageFault_Handler PUBWEAK SVC_Handler SECTION .text:CODE:NOROOT:REORDER(1) SVC_Handler B SVC_Handler PUBWEAK DebugMon_Handler SECTION .text:CODE:NOROOT:REORDER(1) DebugMon_Handler B DebugMon_Handler PUBWEAK PendSV_Handler SECTION .text:CODE:NOROOT:REORDER(1) PendSV_Handler B PendSV_Handler PUBWEAK SysTick_Handler SECTION .text:CODE:NOROOT:REORDER(1) SysTick_Handler B SysTick_Handler PUBWEAK WWDG1_IRQHandler SECTION .text:CODE:NOROOT:REORDER(1) WWDG1_IRQHandler B WWDG1_IRQHandler PUBWEAK PVD_AVD_IRQHandler SECTION .text:CODE:NOROOT:REORDER(1) PVD_AVD_IRQHandler B PVD_AVD_IRQHandler PUBWEAK TAMP_IRQHandler SECTION .text:CODE:NOROOT:REORDER(1) TAMP_IRQHandler B TAMP_IRQHandler PUBWEAK RTC_WKUP_ALARM_IRQHandler SECTION .text:CODE:NOROOT:REORDER(1) RTC_WKUP_ALARM_IRQHandler B RTC_WKUP_ALARM_IRQHandler PUBWEAK RCC_IRQHandler SECTION .text:CODE:NOROOT:REORDER(1) RCC_IRQHandler B RCC_IRQHandler PUBWEAK EXTI0_IRQHandler SECTION .text:CODE:NOROOT:REORDER(1) EXTI0_IRQHandler B EXTI0_IRQHandler PUBWEAK EXTI1_IRQHandler SECTION .text:CODE:NOROOT:REORDER(1) EXTI1_IRQHandler B EXTI1_IRQHandler PUBWEAK EXTI2_IRQHandler SECTION .text:CODE:NOROOT:REORDER(1) EXTI2_IRQHandler B EXTI2_IRQHandler PUBWEAK EXTI3_IRQHandler SECTION .text:CODE:NOROOT:REORDER(1) EXTI3_IRQHandler B EXTI3_IRQHandler PUBWEAK EXTI4_IRQHandler SECTION .text:CODE:NOROOT:REORDER(1) EXTI4_IRQHandler B EXTI4_IRQHandler PUBWEAK DMA1_Stream0_IRQHandler SECTION .text:CODE:NOROOT:REORDER(1) DMA1_Stream0_IRQHandler B DMA1_Stream0_IRQHandler PUBWEAK DMA1_Stream1_IRQHandler SECTION .text:CODE:NOROOT:REORDER(1) DMA1_Stream1_IRQHandler B DMA1_Stream1_IRQHandler PUBWEAK DMA1_Stream2_IRQHandler SECTION .text:CODE:NOROOT:REORDER(1) DMA1_Stream2_IRQHandler B DMA1_Stream2_IRQHandler PUBWEAK DMA1_Stream3_IRQHandler SECTION .text:CODE:NOROOT:REORDER(1) DMA1_Stream3_IRQHandler B DMA1_Stream3_IRQHandler PUBWEAK DMA1_Stream4_IRQHandler SECTION .text:CODE:NOROOT:REORDER(1) DMA1_Stream4_IRQHandler B DMA1_Stream4_IRQHandler PUBWEAK DMA1_Stream5_IRQHandler SECTION .text:CODE:NOROOT:REORDER(1) DMA1_Stream5_IRQHandler B DMA1_Stream5_IRQHandler PUBWEAK DMA1_Stream6_IRQHandler SECTION .text:CODE:NOROOT:REORDER(1) DMA1_Stream6_IRQHandler B DMA1_Stream6_IRQHandler PUBWEAK ADC1_IRQHandler SECTION .text:CODE:NOROOT:REORDER(1) ADC1_IRQHandler B ADC1_IRQHandler PUBWEAK FDCAN1_IT0_IRQHandler SECTION .text:CODE:NOROOT:REORDER(1) FDCAN1_IT0_IRQHandler B FDCAN1_IT0_IRQHandler PUBWEAK FDCAN2_IT0_IRQHandler SECTION .text:CODE:NOROOT:REORDER(1) FDCAN2_IT0_IRQHandler B FDCAN2_IT0_IRQHandler PUBWEAK FDCAN1_IT1_IRQHandler SECTION .text:CODE:NOROOT:REORDER(1) FDCAN1_IT1_IRQHandler B FDCAN1_IT1_IRQHandler PUBWEAK FDCAN2_IT1_IRQHandler SECTION .text:CODE:NOROOT:REORDER(1) FDCAN2_IT1_IRQHandler B FDCAN2_IT1_IRQHandler PUBWEAK EXTI5_IRQHandler SECTION .text:CODE:NOROOT:REORDER(1) EXTI5_IRQHandler B EXTI5_IRQHandler PUBWEAK TIM1_BRK_IRQHandler SECTION .text:CODE:NOROOT:REORDER(1) TIM1_BRK_IRQHandler B TIM1_BRK_IRQHandler PUBWEAK TIM1_UP_IRQHandler SECTION .text:CODE:NOROOT:REORDER(1) TIM1_UP_IRQHandler B TIM1_UP_IRQHandler PUBWEAK TIM1_TRG_COM_IRQHandler SECTION .text:CODE:NOROOT:REORDER(1) TIM1_TRG_COM_IRQHandler B TIM1_TRG_COM_IRQHandler PUBWEAK TIM1_CC_IRQHandler SECTION .text:CODE:NOROOT:REORDER(1) TIM1_CC_IRQHandler B TIM1_CC_IRQHandler PUBWEAK TIM2_IRQHandler SECTION .text:CODE:NOROOT:REORDER(1) TIM2_IRQHandler B TIM2_IRQHandler PUBWEAK TIM3_IRQHandler SECTION .text:CODE:NOROOT:REORDER(1) TIM3_IRQHandler B TIM3_IRQHandler PUBWEAK TIM4_IRQHandler SECTION .text:CODE:NOROOT:REORDER(1) TIM4_IRQHandler B TIM4_IRQHandler PUBWEAK I2C1_EV_IRQHandler SECTION .text:CODE:NOROOT:REORDER(1) I2C1_EV_IRQHandler B I2C1_EV_IRQHandler PUBWEAK I2C1_ER_IRQHandler SECTION .text:CODE:NOROOT:REORDER(1) I2C1_ER_IRQHandler B I2C1_ER_IRQHandler PUBWEAK I2C2_EV_IRQHandler SECTION .text:CODE:NOROOT:REORDER(1) I2C2_EV_IRQHandler B I2C2_EV_IRQHandler PUBWEAK I2C2_ER_IRQHandler SECTION .text:CODE:NOROOT:REORDER(1) I2C2_ER_IRQHandler B I2C2_ER_IRQHandler PUBWEAK SPI1_IRQHandler SECTION .text:CODE:NOROOT:REORDER(1) SPI1_IRQHandler B SPI1_IRQHandler PUBWEAK SPI2_IRQHandler SECTION .text:CODE:NOROOT:REORDER(1) SPI2_IRQHandler B SPI2_IRQHandler PUBWEAK USART1_IRQHandler SECTION .text:CODE:NOROOT:REORDER(1) USART1_IRQHandler B USART1_IRQHandler PUBWEAK USART2_IRQHandler SECTION .text:CODE:NOROOT:REORDER(1) USART2_IRQHandler B USART2_IRQHandler PUBWEAK USART3_IRQHandler SECTION .text:CODE:NOROOT:REORDER(1) USART3_IRQHandler B USART3_IRQHandler PUBWEAK EXTI10_IRQHandler SECTION .text:CODE:NOROOT:REORDER(1) EXTI10_IRQHandler B EXTI10_IRQHandler PUBWEAK RTC_TIMESTAMP_IRQHandler SECTION .text:CODE:NOROOT:REORDER(1) RTC_TIMESTAMP_IRQHandler B RTC_TIMESTAMP_IRQHandler PUBWEAK EXTI11_IRQHandler SECTION .text:CODE:NOROOT:REORDER(1) EXTI11_IRQHandler B EXTI11_IRQHandler PUBWEAK TIM8_BRK_IRQHandler SECTION .text:CODE:NOROOT:REORDER(1) TIM8_BRK_IRQHandler B TIM8_BRK_IRQHandler PUBWEAK TIM8_UP_IRQHandler SECTION .text:CODE:NOROOT:REORDER(1) TIM8_UP_IRQHandler B TIM8_UP_IRQHandler PUBWEAK TIM8_TRG_COM_IRQHandler SECTION .text:CODE:NOROOT:REORDER(1) TIM8_TRG_COM_IRQHandler B TIM8_TRG_COM_IRQHandler PUBWEAK TIM8_CC_IRQHandler SECTION .text:CODE:NOROOT:REORDER(1) TIM8_CC_IRQHandler B TIM8_CC_IRQHandler PUBWEAK DMA1_Stream7_IRQHandler SECTION .text:CODE:NOROOT:REORDER(1) DMA1_Stream7_IRQHandler B DMA1_Stream7_IRQHandler PUBWEAK FMC_IRQHandler SECTION .text:CODE:NOROOT:REORDER(1) FMC_IRQHandler B FMC_IRQHandler PUBWEAK SDMMC1_IRQHandler SECTION .text:CODE:NOROOT:REORDER(1) SDMMC1_IRQHandler B SDMMC1_IRQHandler PUBWEAK TIM5_IRQHandler SECTION .text:CODE:NOROOT:REORDER(1) TIM5_IRQHandler B TIM5_IRQHandler PUBWEAK SPI3_IRQHandler SECTION .text:CODE:NOROOT:REORDER(1) SPI3_IRQHandler B SPI3_IRQHandler PUBWEAK UART4_IRQHandler SECTION .text:CODE:NOROOT:REORDER(1) UART4_IRQHandler B UART4_IRQHandler PUBWEAK UART5_IRQHandler SECTION .text:CODE:NOROOT:REORDER(1) UART5_IRQHandler B UART5_IRQHandler PUBWEAK TIM6_IRQHandler SECTION .text:CODE:NOROOT:REORDER(1) TIM6_IRQHandler B TIM6_IRQHandler PUBWEAK TIM7_IRQHandler SECTION .text:CODE:NOROOT:REORDER(1) TIM7_IRQHandler B TIM7_IRQHandler PUBWEAK DMA2_Stream0_IRQHandler SECTION .text:CODE:NOROOT:REORDER(1) DMA2_Stream0_IRQHandler B DMA2_Stream0_IRQHandler PUBWEAK DMA2_Stream1_IRQHandler SECTION .text:CODE:NOROOT:REORDER(1) DMA2_Stream1_IRQHandler B DMA2_Stream1_IRQHandler PUBWEAK DMA2_Stream2_IRQHandler SECTION .text:CODE:NOROOT:REORDER(1) DMA2_Stream2_IRQHandler B DMA2_Stream2_IRQHandler PUBWEAK DMA2_Stream3_IRQHandler SECTION .text:CODE:NOROOT:REORDER(1) DMA2_Stream3_IRQHandler B DMA2_Stream3_IRQHandler PUBWEAK DMA2_Stream4_IRQHandler SECTION .text:CODE:NOROOT:REORDER(1) DMA2_Stream4_IRQHandler B DMA2_Stream4_IRQHandler PUBWEAK ETH1_IRQHandler SECTION .text:CODE:NOROOT:REORDER(1) ETH1_IRQHandler B ETH1_IRQHandler PUBWEAK ETH1_WKUP_IRQHandler SECTION .text:CODE:NOROOT:REORDER(1) ETH1_WKUP_IRQHandler B ETH1_WKUP_IRQHandler PUBWEAK ETH1_LPI_IRQHandler SECTION .text:CODE:NOROOT:REORDER(1) ETH1_LPI_IRQHandler B ETH1_LPI_IRQHandler PUBWEAK FDCAN_CAL_IRQHandler SECTION .text:CODE:NOROOT:REORDER(1) FDCAN_CAL_IRQHandler B FDCAN_CAL_IRQHandler PUBWEAK EXTI6_IRQHandler SECTION .text:CODE:NOROOT:REORDER(1) EXTI6_IRQHandler B EXTI6_IRQHandler PUBWEAK EXTI7_IRQHandler SECTION .text:CODE:NOROOT:REORDER(1) EXTI7_IRQHandler B EXTI7_IRQHandler PUBWEAK EXTI8_IRQHandler SECTION .text:CODE:NOROOT:REORDER(1) EXTI8_IRQHandler B EXTI8_IRQHandler PUBWEAK EXTI9_IRQHandler SECTION .text:CODE:NOROOT:REORDER(1) EXTI9_IRQHandler B EXTI9_IRQHandler PUBWEAK DMA2_Stream5_IRQHandler SECTION .text:CODE:NOROOT:REORDER(1) DMA2_Stream5_IRQHandler B DMA2_Stream5_IRQHandler PUBWEAK DMA2_Stream6_IRQHandler SECTION .text:CODE:NOROOT:REORDER(1) DMA2_Stream6_IRQHandler B DMA2_Stream6_IRQHandler PUBWEAK DMA2_Stream7_IRQHandler SECTION .text:CODE:NOROOT:REORDER(1) DMA2_Stream7_IRQHandler B DMA2_Stream7_IRQHandler PUBWEAK USART6_IRQHandler SECTION .text:CODE:NOROOT:REORDER(1) USART6_IRQHandler B USART6_IRQHandler PUBWEAK I2C3_EV_IRQHandler SECTION .text:CODE:NOROOT:REORDER(1) I2C3_EV_IRQHandler B I2C3_EV_IRQHandler PUBWEAK I2C3_ER_IRQHandler SECTION .text:CODE:NOROOT:REORDER(1) I2C3_ER_IRQHandler B I2C3_ER_IRQHandler PUBWEAK USBH_OHCI_IRQHandler SECTION .text:CODE:NOROOT:REORDER(1) USBH_OHCI_IRQHandler B USBH_OHCI_IRQHandler PUBWEAK USBH_EHCI_IRQHandler SECTION .text:CODE:NOROOT:REORDER(1) USBH_EHCI_IRQHandler B USBH_EHCI_IRQHandler PUBWEAK EXTI12_IRQHandler SECTION .text:CODE:NOROOT:REORDER(1) EXTI12_IRQHandler B EXTI12_IRQHandler PUBWEAK EXTI13_IRQHandler SECTION .text:CODE:NOROOT:REORDER(1) EXTI13_IRQHandler B EXTI13_IRQHandler PUBWEAK DCMI_IRQHandler SECTION .text:CODE:NOROOT:REORDER(1) DCMI_IRQHandler B DCMI_IRQHandler PUBWEAK CRYP1_IRQHandler SECTION .text:CODE:NOROOT:REORDER(1) CRYP1_IRQHandler B CRYP1_IRQHandler PUBWEAK HASH1_IRQHandler SECTION .text:CODE:NOROOT:REORDER(1) HASH1_IRQHandler B HASH1_IRQHandler PUBWEAK FPU_IRQHandler SECTION .text:CODE:NOROOT:REORDER(1) FPU_IRQHandler B FPU_IRQHandler PUBWEAK UART7_IRQHandler SECTION .text:CODE:NOROOT:REORDER(1) UART7_IRQHandler B UART7_IRQHandler PUBWEAK UART8_IRQHandler SECTION .text:CODE:NOROOT:REORDER(1) UART8_IRQHandler B UART8_IRQHandler PUBWEAK SPI4_IRQHandler SECTION .text:CODE:NOROOT:REORDER(1) SPI4_IRQHandler B SPI4_IRQHandler PUBWEAK SPI5_IRQHandler SECTION .text:CODE:NOROOT:REORDER(1) SPI5_IRQHandler B SPI5_IRQHandler PUBWEAK SPI6_IRQHandler SECTION .text:CODE:NOROOT:REORDER(1) SPI6_IRQHandler B SPI6_IRQHandler PUBWEAK SAI1_IRQHandler SECTION .text:CODE:NOROOT:REORDER(1) SAI1_IRQHandler B SAI1_IRQHandler PUBWEAK LTDC_IRQHandler SECTION .text:CODE:NOROOT:REORDER(1) LTDC_IRQHandler B LTDC_IRQHandler PUBWEAK LTDC_ER_IRQHandler SECTION .text:CODE:NOROOT:REORDER(1) LTDC_ER_IRQHandler B LTDC_ER_IRQHandler PUBWEAK ADC2_IRQHandler SECTION .text:CODE:NOROOT:REORDER(1) ADC2_IRQHandler B ADC2_IRQHandler PUBWEAK SAI2_IRQHandler SECTION .text:CODE:NOROOT:REORDER(1) SAI2_IRQHandler B SAI2_IRQHandler PUBWEAK QUADSPI_IRQHandler SECTION .text:CODE:NOROOT:REORDER(1) QUADSPI_IRQHandler B QUADSPI_IRQHandler PUBWEAK LPTIM1_IRQHandler SECTION .text:CODE:NOROOT:REORDER(1) LPTIM1_IRQHandler B LPTIM1_IRQHandler PUBWEAK CEC_IRQHandler SECTION .text:CODE:NOROOT:REORDER(1) CEC_IRQHandler B CEC_IRQHandler PUBWEAK I2C4_EV_IRQHandler SECTION .text:CODE:NOROOT:REORDER(1) I2C4_EV_IRQHandler B I2C4_EV_IRQHandler PUBWEAK I2C4_ER_IRQHandler SECTION .text:CODE:NOROOT:REORDER(1) I2C4_ER_IRQHandler B I2C4_ER_IRQHandler PUBWEAK SPDIF_RX_IRQHandler SECTION .text:CODE:NOROOT:REORDER(1) SPDIF_RX_IRQHandler B SPDIF_RX_IRQHandler PUBWEAK OTG_IRQHandler SECTION .text:CODE:NOROOT:REORDER(1) OTG_IRQHandler B OTG_IRQHandler PUBWEAK IPCC_RX0_IRQHandler SECTION .text:CODE:NOROOT:REORDER(1) IPCC_RX0_IRQHandler B IPCC_RX0_IRQHandler PUBWEAK IPCC_TX0_IRQHandler SECTION .text:CODE:NOROOT:REORDER(1) IPCC_TX0_IRQHandler B IPCC_TX0_IRQHandler PUBWEAK DMAMUX1_OVR_IRQHandler SECTION .text:CODE:NOROOT:REORDER(1) DMAMUX1_OVR_IRQHandler B DMAMUX1_OVR_IRQHandler PUBWEAK IPCC_RX1_IRQHandler SECTION .text:CODE:NOROOT:REORDER(1) IPCC_RX1_IRQHandler B IPCC_RX1_IRQHandler PUBWEAK IPCC_TX1_IRQHandler SECTION .text:CODE:NOROOT:REORDER(1) IPCC_TX1_IRQHandler B IPCC_TX1_IRQHandler PUBWEAK CRYP2_IRQHandler SECTION .text:CODE:NOROOT:REORDER(1) CRYP2_IRQHandler B CRYP2_IRQHandler PUBWEAK HASH2_IRQHandler SECTION .text:CODE:NOROOT:REORDER(1) HASH2_IRQHandler B HASH2_IRQHandler PUBWEAK I2C5_EV_IRQHandler SECTION .text:CODE:NOROOT:REORDER(1) I2C5_EV_IRQHandler B I2C5_EV_IRQHandler PUBWEAK I2C5_ER_IRQHandler SECTION .text:CODE:NOROOT:REORDER(1) I2C5_ER_IRQHandler B I2C5_ER_IRQHandler PUBWEAK GPU_IRQHandler SECTION .text:CODE:NOROOT:REORDER(1) GPU_IRQHandler B GPU_IRQHandler PUBWEAK DFSDM1_FLT0_IRQHandler SECTION .text:CODE:NOROOT:REORDER(1) DFSDM1_FLT0_IRQHandler B DFSDM1_FLT0_IRQHandler PUBWEAK DFSDM1_FLT1_IRQHandler SECTION .text:CODE:NOROOT:REORDER(1) DFSDM1_FLT1_IRQHandler B DFSDM1_FLT1_IRQHandler PUBWEAK DFSDM1_FLT2_IRQHandler SECTION .text:CODE:NOROOT:REORDER(1) DFSDM1_FLT2_IRQHandler B DFSDM1_FLT2_IRQHandler PUBWEAK DFSDM1_FLT3_IRQHandler SECTION .text:CODE:NOROOT:REORDER(1) DFSDM1_FLT3_IRQHandler B DFSDM1_FLT3_IRQHandler PUBWEAK SAI3_IRQHandler SECTION .text:CODE:NOROOT:REORDER(1) SAI3_IRQHandler B SAI3_IRQHandler PUBWEAK DFSDM1_FLT4_IRQHandler SECTION .text:CODE:NOROOT:REORDER(1) DFSDM1_FLT4_IRQHandler B DFSDM1_FLT4_IRQHandler PUBWEAK TIM15_IRQHandler SECTION .text:CODE:NOROOT:REORDER(1) TIM15_IRQHandler B TIM15_IRQHandler PUBWEAK TIM16_IRQHandler SECTION .text:CODE:NOROOT:REORDER(1) TIM16_IRQHandler B TIM16_IRQHandler PUBWEAK TIM17_IRQHandler SECTION .text:CODE:NOROOT:REORDER(1) TIM17_IRQHandler B TIM17_IRQHandler PUBWEAK TIM12_IRQHandler SECTION .text:CODE:NOROOT:REORDER(1) TIM12_IRQHandler B TIM12_IRQHandler PUBWEAK MDIOS_IRQHandler SECTION .text:CODE:NOROOT:REORDER(1) MDIOS_IRQHandler B MDIOS_IRQHandler PUBWEAK EXTI14_IRQHandler SECTION .text:CODE:NOROOT:REORDER(1) EXTI14_IRQHandler B EXTI14_IRQHandler PUBWEAK MDMA_IRQHandler SECTION .text:CODE:NOROOT:REORDER(1) MDMA_IRQHandler B MDMA_IRQHandler PUBWEAK DSI_IRQHandler SECTION .text:CODE:NOROOT:REORDER(1) DSI_IRQHandler B DSI_IRQHandler PUBWEAK SDMMC2_IRQHandler SECTION .text:CODE:NOROOT:REORDER(1) SDMMC2_IRQHandler B SDMMC2_IRQHandler PUBWEAK HSEM_IT2_IRQHandler SECTION .text:CODE:NOROOT:REORDER(1) HSEM_IT2_IRQHandler B HSEM_IT2_IRQHandler PUBWEAK DFSDM1_FLT5_IRQHandler SECTION .text:CODE:NOROOT:REORDER(1) DFSDM1_FLT5_IRQHandler B DFSDM1_FLT5_IRQHandler PUBWEAK nCTIIRQ1_IRQHandler SECTION .text:CODE:NOROOT:REORDER(1) nCTIIRQ1_IRQHandler B nCTIIRQ1_IRQHandler PUBWEAK nCTIIRQ2_IRQHandler SECTION .text:CODE:NOROOT:REORDER(1) nCTIIRQ2_IRQHandler B nCTIIRQ2_IRQHandler PUBWEAK EXTI15_IRQHandler SECTION .text:CODE:NOROOT:REORDER(1) EXTI15_IRQHandler B EXTI15_IRQHandler PUBWEAK TIM13_IRQHandler SECTION .text:CODE:NOROOT:REORDER(1) TIM13_IRQHandler B TIM13_IRQHandler PUBWEAK TIM14_IRQHandler SECTION .text:CODE:NOROOT:REORDER(1) TIM14_IRQHandler B TIM14_IRQHandler PUBWEAK DAC_IRQHandler SECTION .text:CODE:NOROOT:REORDER(1) DAC_IRQHandler B DAC_IRQHandler PUBWEAK RNG1_IRQHandler SECTION .text:CODE:NOROOT:REORDER(1) RNG1_IRQHandler B RNG1_IRQHandler PUBWEAK RNG2_IRQHandler SECTION .text:CODE:NOROOT:REORDER(1) RNG2_IRQHandler B RNG2_IRQHandler PUBWEAK I2C6_EV_IRQHandler SECTION .text:CODE:NOROOT:REORDER(1) I2C6_EV_IRQHandler B I2C6_EV_IRQHandler PUBWEAK I2C6_ER_IRQHandler SECTION .text:CODE:NOROOT:REORDER(1) I2C6_ER_IRQHandler B I2C6_ER_IRQHandler PUBWEAK SDMMC3_IRQHandler SECTION .text:CODE:NOROOT:REORDER(1) SDMMC3_IRQHandler B SDMMC3_IRQHandler PUBWEAK LPTIM2_IRQHandler SECTION .text:CODE:NOROOT:REORDER(1) LPTIM2_IRQHandler B LPTIM2_IRQHandler PUBWEAK LPTIM3_IRQHandler SECTION .text:CODE:NOROOT:REORDER(1) LPTIM3_IRQHandler B LPTIM3_IRQHandler PUBWEAK LPTIM4_IRQHandler SECTION .text:CODE:NOROOT:REORDER(1) LPTIM4_IRQHandler B LPTIM4_IRQHandler PUBWEAK LPTIM5_IRQHandler SECTION .text:CODE:NOROOT:REORDER(1) LPTIM5_IRQHandler B LPTIM5_IRQHandler PUBWEAK MPU_SEV_IRQHandler SECTION .text:CODE:NOROOT:REORDER(1) MPU_SEV_IRQHandler B MPU_SEV_IRQHandler PUBWEAK RCC_WAKEUP_IRQHandler SECTION .text:CODE:NOROOT:REORDER(1) RCC_WAKEUP_IRQHandler B RCC_WAKEUP_IRQHandler PUBWEAK SAI4_IRQHandler SECTION .text:CODE:NOROOT:REORDER(1) SAI4_IRQHandler B SAI4_IRQHandler PUBWEAK DTS_IRQHandler SECTION .text:CODE:NOROOT:REORDER(1) DTS_IRQHandler B DTS_IRQHandler PUBWEAK WAKEUP_PIN_IRQHandler SECTION .text:CODE:NOROOT:REORDER(1) WAKEUP_PIN_IRQHandler B WAKEUP_PIN_IRQHandler END /************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/
4ms/stm32mp1-baremetal
34,186
third-party/CMSIS/Device/ST/STM32MP1xx/Source/Templates/gcc/startup_stm32mp15xx.s
/** ****************************************************************************** * @file startup_stm32mp15xx.s * @author MCD Application Team * @brief STM32MP15xx Devices vector table for GCC based toolchain. * This module performs: * - Set the initial SP * - Set the initial PC == Reset_Handler, * - Set the vector table entries with the exceptions ISR address * - Branches to main in the C library (which eventually * calls main()). * After Reset the Cortex-M processor is in Thread mode, * priority is Privileged, and the Stack is set to Main. ****************************************************************************** * @attention * * <h2><center>&copy; Copyright (c) 2019 STMicroelectronics. * All rights reserved.</center></h2> * * This software component is licensed by ST under BSD 3-Clause license, * the "License"; You may not use this file except in compliance with the * License. You may obtain a copy of the License at: * opensource.org/licenses/BSD-3-Clause * ****************************************************************************** */ .syntax unified .cpu cortex-m4 .fpu softvfp .thumb .global g_pfnVectors .global Default_Handler /* start address for the initialization values of the .data section. defined in linker script */ .word _sidata /* start address for the .data section. defined in linker script */ .word _sdata /* end address for the .data section. defined in linker script */ .word _edata /* start address for the .bss section. defined in linker script */ .word _sbss /* end address for the .bss section. defined in linker script */ .word _ebss .section .startup_copro_fw.Reset_Handler,"ax" .weak Reset_Handler .type Reset_Handler, %function Reset_Handler: ldr sp, =_estack /* set stack pointer */ /* Loop to copy data from read only memory to RAM. The ranges * of copy from/to are specified by following symbols evaluated in * linker script. * _sidata: End of code section, i.e., begin of data sections to copy from. * _sdata/_edata: RAM address range that data should be * copied to. Both must be aligned to 4 bytes boundary. */ movs r1, #0 b LoopCopyDataInit CopyDataInit: ldr r3, =_sidata ldr r3, [r3, r1] str r3, [r0, r1] adds r1, r1, #4 LoopCopyDataInit: ldr r0, =_sdata ldr r3, =_edata adds r2, r0, r1 cmp r2, r3 bcc CopyDataInit ldr r2, =_sbss b LoopFillZerobss /* Zero fill the bss segment. */ FillZerobss: movs r3, #0 str r3, [r2], #4 LoopFillZerobss: ldr r3, = _ebss cmp r2, r3 bcc FillZerobss /* Call the clock system intitialization function.*/ bl SystemInit // ldr r0, =SystemInit // blx r0 /* Call static constructors */ bl __libc_init_array // ldr r0, =__libc_init_array // blx r0 /* Call the application's entry point.*/ bl main //ldr r0, =main //blx r0 LoopForever: b LoopForever .size Reset_Handler, .-Reset_Handler /** * @brief This is the code that gets called when the processor receives an * unexpected interrupt. This simply enters an infinite loop, preserving * the system state for examination by a debugger. * * @param None * @retval : None */ .section .text.Default_Handler,"ax",%progbits Default_Handler: Infinite_Loop: b Infinite_Loop .size Default_Handler, .-Default_Handler /****************************************************************************** * * The minimal vector table for a Cortex M4. Note that the proper constructs * must be placed on this to ensure that it ends up at physical address * 0x0000.0000. * ******************************************************************************/ .section .isr_vector,"a",%progbits .type g_pfnVectors, %object .size g_pfnVectors, .-g_pfnVectors g_pfnVectors: .word _estack // Top of Stack .word Reset_Handler // Reset Handler .word NMI_Handler // NMI Handler .word HardFault_Handler // Hard Fault Handler .word MemManage_Handler // MPU Fault Handler .word BusFault_Handler // Bus Fault Handler .word UsageFault_Handler // Usage Fault Handler .word 0 // Reserved .word 0 // Reserved .word 0 // Reserved .word 0 // Reserved .word SVC_Handler // SVCall Handler .word DebugMon_Handler // Debug Monitor Handler .word 0 // Reserved .word PendSV_Handler // PendSV Handler .word SysTick_Handler // SysTick Handler // External Interrupts .word WWDG1_IRQHandler // Window WatchDog 1 .word PVD_AVD_IRQHandler // PVD and AVD through EXTI Line detection .word TAMP_IRQHandler // Tamper and TimeStamps through the EXTI line .word RTC_WKUP_ALARM_IRQHandler // RTC Wakeup and Alarm through the EXTI line .word RESERVED4_IRQHandler // Reserved .word RCC_IRQHandler // RCC .word EXTI0_IRQHandler // EXTI Line0 .word EXTI1_IRQHandler // EXTI Line1 .word EXTI2_IRQHandler // EXTI Line2 .word EXTI3_IRQHandler // EXTI Line3 .word EXTI4_IRQHandler // EXTI Line4 .word DMA1_Stream0_IRQHandler // DMA1 Stream 0 .word DMA1_Stream1_IRQHandler // DMA1 Stream 1 .word DMA1_Stream2_IRQHandler // DMA1 Stream 2 .word DMA1_Stream3_IRQHandler // DMA1 Stream 3 .word DMA1_Stream4_IRQHandler // DMA1 Stream 4 .word DMA1_Stream5_IRQHandler // DMA1 Stream 5 .word DMA1_Stream6_IRQHandler // DMA1 Stream 6 .word ADC1_IRQHandler // ADC1 .word FDCAN1_IT0_IRQHandler // FDCAN1 Interrupt line 0 .word FDCAN2_IT0_IRQHandler // FDCAN2 Interrupt line 0 .word FDCAN1_IT1_IRQHandler // FDCAN1 Interrupt line 1 .word FDCAN2_IT1_IRQHandler // FDCAN2 Interrupt line 1 .word EXTI5_IRQHandler // External Line5 interrupts through AIEC .word TIM1_BRK_IRQHandler // TIM1 Break interrupt .word TIM1_UP_IRQHandler // TIM1 Update Interrupt .word TIM1_TRG_COM_IRQHandler // TIM1 Trigger and Commutation Interrupt .word TIM1_CC_IRQHandler // TIM1 Capture Compare .word TIM2_IRQHandler // TIM2 .word TIM3_IRQHandler // TIM3 .word TIM4_IRQHandler // TIM4 .word I2C1_EV_IRQHandler // I2C1 Event .word I2C1_ER_IRQHandler // I2C1 Error .word I2C2_EV_IRQHandler // I2C2 Event .word I2C2_ER_IRQHandler // I2C2 Error .word SPI1_IRQHandler // SPI1 .word SPI2_IRQHandler // SPI2 .word USART1_IRQHandler // USART1 .word USART2_IRQHandler // USART2 .word USART3_IRQHandler // USART3 .word EXTI10_IRQHandler // External Line10 interrupts through AIEC .word RTC_TIMESTAMP_IRQHandler // RTC TimeStamp through EXTI Line .word EXTI11_IRQHandler // External Line11 interrupts through AIEC .word TIM8_BRK_IRQHandler // TIM8 Break Interrupt .word TIM8_UP_IRQHandler // TIM8 Update Interrupt .word TIM8_TRG_COM_IRQHandler // TIM8 Trigger and Commutation Interrupt .word TIM8_CC_IRQHandler // TIM8 Capture Compare Interrupt .word DMA1_Stream7_IRQHandler // DMA1 Stream7 .word FMC_IRQHandler // FMC .word SDMMC1_IRQHandler // SDMMC1 .word TIM5_IRQHandler // TIM5 .word SPI3_IRQHandler // SPI3 .word UART4_IRQHandler // UART4 .word UART5_IRQHandler // UART5 .word TIM6_IRQHandler // TIM6 .word TIM7_IRQHandler // TIM7 .word DMA2_Stream0_IRQHandler // DMA2 Stream 0 .word DMA2_Stream1_IRQHandler // DMA2 Stream 1 .word DMA2_Stream2_IRQHandler // DMA2 Stream 2 .word DMA2_Stream3_IRQHandler // DMA2 Stream 3 .word DMA2_Stream4_IRQHandler // DMA2 Stream 4 .word ETH1_IRQHandler // Ethernet .word ETH1_WKUP_IRQHandler // Ethernet Wakeup through EXTI line .word FDCAN_CAL_IRQHandler // FDCAN Calibration .word EXTI6_IRQHandler // EXTI Line6 interrupts through AIEC .word EXTI7_IRQHandler // EXTI Line7 interrupts through AIEC .word EXTI8_IRQHandler // EXTI Line8 interrupts through AIEC .word EXTI9_IRQHandler // EXTI Line9 interrupts through AIEC .word DMA2_Stream5_IRQHandler // DMA2 Stream 5 .word DMA2_Stream6_IRQHandler // DMA2 Stream 6 .word DMA2_Stream7_IRQHandler // DMA2 Stream 7 .word USART6_IRQHandler // USART6 .word I2C3_EV_IRQHandler // I2C3 event .word I2C3_ER_IRQHandler // I2C3 error .word USBH_OHCI_IRQHandler // USB Host OHCI .word USBH_EHCI_IRQHandler // USB Host EHCI .word EXTI12_IRQHandler // EXTI Line12 interrupts through AIEC .word EXTI13_IRQHandler // EXTI Line13 interrupts through AIEC .word DCMI_IRQHandler // DCMI .word CRYP1_IRQHandler // Crypto1 global interrupt .word HASH1_IRQHandler // Crypto Hash1 interrupt .word FPU_IRQHandler // FPU .word UART7_IRQHandler // UART7 .word UART8_IRQHandler // UART8 .word SPI4_IRQHandler // SPI4 .word SPI5_IRQHandler // SPI5 .word SPI6_IRQHandler // SPI6 .word SAI1_IRQHandler // SAI1 .word LTDC_IRQHandler // LTDC .word LTDC_ER_IRQHandler // LTDC error .word ADC2_IRQHandler // ADC2 .word SAI2_IRQHandler // SAI2 .word QUADSPI_IRQHandler // QUADSPI .word LPTIM1_IRQHandler // LPTIM1 global interrupt .word CEC_IRQHandler // HDMI_CEC .word I2C4_EV_IRQHandler // I2C4 Event .word I2C4_ER_IRQHandler // I2C4 Error .word SPDIF_RX_IRQHandler // SPDIF_RX .word OTG_IRQHandler // USB On The Go HS global interrupt .word RESERVED99_IRQHandler // Reserved .word IPCC_RX0_IRQHandler // Mailbox RX0 Free interrupt .word IPCC_TX0_IRQHandler // Mailbox TX0 Free interrupt .word DMAMUX1_OVR_IRQHandler // DMAMUX1 Overrun interrupt .word IPCC_RX1_IRQHandler // Mailbox RX1 Free interrupt .word IPCC_TX1_IRQHandler // Mailbox TX1 Free interrupt .word CRYP2_IRQHandler // Crypto2 global interrupt .word HASH2_IRQHandler // Crypto Hash2 interrupt .word I2C5_EV_IRQHandler // I2C5 Event Interrupt .word I2C5_ER_IRQHandler // I2C5 Error Interrupt .word GPU_IRQHandler // GPU Global Interrupt .word DFSDM1_FLT0_IRQHandler // DFSDM Filter0 Interrupt .word DFSDM1_FLT1_IRQHandler // DFSDM Filter1 Interrupt .word DFSDM1_FLT2_IRQHandler // DFSDM Filter2 Interrupt .word DFSDM1_FLT3_IRQHandler // DFSDM Filter3 Interrupt .word SAI3_IRQHandler // SAI3 global Interrupt .word DFSDM1_FLT4_IRQHandler // DFSDM Filter4 Interrupt .word TIM15_IRQHandler // TIM15 global Interrupt .word TIM16_IRQHandler // TIM16 global Interrupt .word TIM17_IRQHandler // TIM17 global Interrupt .word TIM12_IRQHandler // TIM12 global Interrupt .word MDIOS_IRQHandler // MDIOS global Interrupt .word EXTI14_IRQHandler // EXTI Line14 interrupts through AIEC .word MDMA_IRQHandler // MDMA global Interrupt .word DSI_IRQHandler // DSI global Interrupt .word SDMMC2_IRQHandler // SDMMC2 global Interrupt .word HSEM_IT2_IRQHandler // HSEM global Interrupt .word DFSDM1_FLT5_IRQHandler // DFSDM Filter5 Interrupt .word EXTI15_IRQHandler // EXTI Line15 interrupts through AIEC .word nCTIIRQ1_IRQHandler // Cortex-M4 CTI interrupt 1 .word nCTIIRQ2_IRQHandler // Cortex-M4 CTI interrupt 2 .word TIM13_IRQHandler // TIM13 global interrupt .word TIM14_IRQHandler // TIM14 global interrupt .word DAC_IRQHandler // DAC1 and DAC2 underrun error interrupts .word RNG1_IRQHandler // RNG1 interrupt .word RNG2_IRQHandler // RNG2 interrupt .word I2C6_EV_IRQHandler // I2C6 Event Interrupt .word I2C6_ER_IRQHandler // I2C6 Error Interrupt .word SDMMC3_IRQHandler // SDMMC3 global Interrupt .word LPTIM2_IRQHandler // LPTIM2 global interrupt .word LPTIM3_IRQHandler // LPTIM3 global interrupt .word LPTIM4_IRQHandler // LPTIM4 global interrupt .word LPTIM5_IRQHandler // LPTIM5 global interrupt .word ETH1_LPI_IRQHandler // ETH1_LPI interrupt .word RESERVED143_IRQHandler // Reserved .word MPU_SEV_IRQHandler // MPU Send Event through AIEC .word RCC_WAKEUP_IRQHandler // RCC Wake up interrupt .word SAI4_IRQHandler // SAI4 global interrupt .word DTS_IRQHandler // Temperature sensor interrupt .word RESERVED148_IRQHandler // Reserved .word WAKEUP_PIN_IRQHandler // Interrupt for all 6 wake-up pins /******************************************************************************* * * Provide weak aliases for each Exception handler to the Default_Handler. * As they are weak aliases, any function with the same name will override * this definition. * *******************************************************************************/ .weak NMI_Handler .thumb_set NMI_Handler,Default_Handler .weak HardFault_Handler .thumb_set HardFault_Handler,Default_Handler .weak MemManage_Handler .thumb_set MemManage_Handler,Default_Handler .weak BusFault_Handler .thumb_set BusFault_Handler,Default_Handler .weak UsageFault_Handler .thumb_set UsageFault_Handler,Default_Handler .weak SVC_Handler .thumb_set SVC_Handler,Default_Handler .weak DebugMon_Handler .thumb_set DebugMon_Handler,Default_Handler .weak PendSV_Handler .thumb_set PendSV_Handler,Default_Handler .weak SysTick_Handler .thumb_set SysTick_Handler,Default_Handler .weak RESERVED4_IRQHandler .thumb_set RESERVED4_IRQHandler,Default_Handler .weak RESERVED99_IRQHandler .thumb_set RESERVED99_IRQHandler,Default_Handler .weak ETH1_LPI_IRQHandler .thumb_set ETH1_LPI_IRQHandler,Default_Handler .weak RESERVED143_IRQHandler .thumb_set RESERVED143_IRQHandler,Default_Handler .weak WWDG1_IRQHandler .thumb_set WWDG1_IRQHandler,Default_Handler .weak PVD_AVD_IRQHandler .thumb_set PVD_AVD_IRQHandler,Default_Handler .weak TAMP_IRQHandler .thumb_set TAMP_IRQHandler,Default_Handler .weak RTC_WKUP_ALARM_IRQHandler .thumb_set RTC_WKUP_ALARM_IRQHandler,Default_Handler .weak RCC_IRQHandler .thumb_set RCC_IRQHandler,Default_Handler .weak EXTI0_IRQHandler .thumb_set EXTI0_IRQHandler,Default_Handler .weak EXTI1_IRQHandler .thumb_set EXTI1_IRQHandler,Default_Handler .weak EXTI2_IRQHandler .thumb_set EXTI2_IRQHandler,Default_Handler .weak EXTI3_IRQHandler .thumb_set EXTI3_IRQHandler,Default_Handler .weak EXTI4_IRQHandler .thumb_set EXTI4_IRQHandler,Default_Handler .weak DMA1_Stream0_IRQHandler .thumb_set DMA1_Stream0_IRQHandler,Default_Handler .weak DMA1_Stream1_IRQHandler .thumb_set DMA1_Stream1_IRQHandler,Default_Handler .weak DMA1_Stream2_IRQHandler .thumb_set DMA1_Stream2_IRQHandler,Default_Handler .weak DMA1_Stream3_IRQHandler .thumb_set DMA1_Stream3_IRQHandler,Default_Handler .weak DMA1_Stream4_IRQHandler .thumb_set DMA1_Stream4_IRQHandler,Default_Handler .weak DMA1_Stream5_IRQHandler .thumb_set DMA1_Stream5_IRQHandler,Default_Handler .weak DMA1_Stream6_IRQHandler .thumb_set DMA1_Stream6_IRQHandler,Default_Handler .weak ADC1_IRQHandler .thumb_set ADC1_IRQHandler,Default_Handler .weak ADC2_IRQHandler .thumb_set ADC2_IRQHandler,Default_Handler .weak FDCAN1_IT0_IRQHandler .thumb_set FDCAN1_IT0_IRQHandler,Default_Handler .weak FDCAN2_IT0_IRQHandler .thumb_set FDCAN2_IT0_IRQHandler,Default_Handler .weak FDCAN1_IT1_IRQHandler .thumb_set FDCAN1_IT1_IRQHandler,Default_Handler .weak FDCAN2_IT1_IRQHandler .thumb_set FDCAN2_IT1_IRQHandler,Default_Handler .weak FDCAN_CAL_IRQHandler .thumb_set FDCAN_CAL_IRQHandler,Default_Handler .weak EXTI5_IRQHandler .thumb_set EXTI5_IRQHandler,Default_Handler .weak TIM1_BRK_IRQHandler .thumb_set TIM1_BRK_IRQHandler,Default_Handler .weak TIM1_UP_IRQHandler .thumb_set TIM1_UP_IRQHandler,Default_Handler .weak TIM1_TRG_COM_IRQHandler .thumb_set TIM1_TRG_COM_IRQHandler,Default_Handler .weak TIM1_CC_IRQHandler .thumb_set TIM1_CC_IRQHandler,Default_Handler .weak TIM2_IRQHandler .thumb_set TIM2_IRQHandler,Default_Handler .weak TIM3_IRQHandler .thumb_set TIM3_IRQHandler,Default_Handler .weak TIM4_IRQHandler .thumb_set TIM4_IRQHandler,Default_Handler .weak I2C1_EV_IRQHandler .thumb_set I2C1_EV_IRQHandler,Default_Handler .weak I2C1_ER_IRQHandler .thumb_set I2C1_ER_IRQHandler,Default_Handler .weak I2C2_EV_IRQHandler .thumb_set I2C2_EV_IRQHandler,Default_Handler .weak I2C2_ER_IRQHandler .thumb_set I2C2_ER_IRQHandler,Default_Handler .weak SPI1_IRQHandler .thumb_set SPI1_IRQHandler,Default_Handler .weak SPI2_IRQHandler .thumb_set SPI2_IRQHandler,Default_Handler .weak USART1_IRQHandler .thumb_set USART1_IRQHandler,Default_Handler .weak USART2_IRQHandler .thumb_set USART2_IRQHandler,Default_Handler .weak USART3_IRQHandler .thumb_set USART3_IRQHandler,Default_Handler .weak EXTI10_IRQHandler .thumb_set EXTI10_IRQHandler,Default_Handler .weak RTC_TIMESTAMP_IRQHandler .thumb_set RTC_TIMESTAMP_IRQHandler,Default_Handler .weak EXTI11_IRQHandler .thumb_set EXTI11_IRQHandler,Default_Handler .weak TIM8_BRK_IRQHandler .thumb_set TIM8_BRK_IRQHandler,Default_Handler .weak TIM8_UP_IRQHandler .thumb_set TIM8_UP_IRQHandler,Default_Handler .weak TIM8_TRG_COM_IRQHandler .thumb_set TIM8_TRG_COM_IRQHandler,Default_Handler .weak TIM8_CC_IRQHandler .thumb_set TIM8_CC_IRQHandler,Default_Handler .weak DMA1_Stream7_IRQHandler .thumb_set DMA1_Stream7_IRQHandler,Default_Handler .weak FMC_IRQHandler .thumb_set FMC_IRQHandler,Default_Handler .weak SDMMC1_IRQHandler .thumb_set SDMMC1_IRQHandler,Default_Handler .weak TIM5_IRQHandler .thumb_set TIM5_IRQHandler,Default_Handler .weak SPI3_IRQHandler .thumb_set SPI3_IRQHandler,Default_Handler .weak UART4_IRQHandler .thumb_set UART4_IRQHandler,Default_Handler .weak UART5_IRQHandler .thumb_set UART5_IRQHandler,Default_Handler .weak TIM6_IRQHandler .thumb_set TIM6_IRQHandler,Default_Handler .weak TIM7_IRQHandler .thumb_set TIM7_IRQHandler,Default_Handler .weak DMA2_Stream0_IRQHandler .thumb_set DMA2_Stream0_IRQHandler,Default_Handler .weak DMA2_Stream1_IRQHandler .thumb_set DMA2_Stream1_IRQHandler,Default_Handler .weak DMA2_Stream2_IRQHandler .thumb_set DMA2_Stream2_IRQHandler,Default_Handler .weak DMA2_Stream3_IRQHandler .thumb_set DMA2_Stream3_IRQHandler,Default_Handler .weak DMA2_Stream4_IRQHandler .thumb_set DMA2_Stream4_IRQHandler,Default_Handler .weak ETH1_IRQHandler .thumb_set ETH1_IRQHandler,Default_Handler .weak ETH1_WKUP_IRQHandler .thumb_set ETH1_WKUP_IRQHandler,Default_Handler .weak ETH1_LPI_IRQHandler .thumb_set ETH1_LPI_IRQHandler,Default_Handler .weak EXTI6_IRQHandler .thumb_set EXTI6_IRQHandler,Default_Handler .weak EXTI7_IRQHandler .thumb_set EXTI7_IRQHandler,Default_Handler .weak EXTI8_IRQHandler .thumb_set EXTI8_IRQHandler,Default_Handler .weak EXTI9_IRQHandler .thumb_set EXTI9_IRQHandler,Default_Handler .weak DMA2_Stream5_IRQHandler .thumb_set DMA2_Stream5_IRQHandler,Default_Handler .weak DMA2_Stream6_IRQHandler .thumb_set DMA2_Stream6_IRQHandler,Default_Handler .weak DMA2_Stream7_IRQHandler .thumb_set DMA2_Stream7_IRQHandler,Default_Handler .weak USART6_IRQHandler .thumb_set USART6_IRQHandler,Default_Handler .weak I2C3_EV_IRQHandler .thumb_set I2C3_EV_IRQHandler,Default_Handler .weak I2C3_ER_IRQHandler .thumb_set I2C3_ER_IRQHandler,Default_Handler .weak USBH_OHCI_IRQHandler .thumb_set USBH_OHCI_IRQHandler,Default_Handler .weak USBH_EHCI_IRQHandler .thumb_set USBH_EHCI_IRQHandler,Default_Handler .weak EXTI12_IRQHandler .thumb_set EXTI12_IRQHandler,Default_Handler .weak EXTI13_IRQHandler .thumb_set EXTI13_IRQHandler,Default_Handler .weak DCMI_IRQHandler .thumb_set DCMI_IRQHandler,Default_Handler .weak CRYP1_IRQHandler .thumb_set CRYP1_IRQHandler,Default_Handler .weak HASH1_IRQHandler .thumb_set HASH1_IRQHandler,Default_Handler .weak FPU_IRQHandler .thumb_set FPU_IRQHandler,Default_Handler .weak UART7_IRQHandler .thumb_set UART7_IRQHandler,Default_Handler .weak UART8_IRQHandler .thumb_set UART8_IRQHandler,Default_Handler .weak SPI4_IRQHandler .thumb_set SPI4_IRQHandler,Default_Handler .weak SPI5_IRQHandler .thumb_set SPI5_IRQHandler,Default_Handler .weak SPI6_IRQHandler .thumb_set SPI6_IRQHandler,Default_Handler .weak SAI1_IRQHandler .thumb_set SAI1_IRQHandler,Default_Handler .weak LTDC_IRQHandler .thumb_set LTDC_IRQHandler,Default_Handler .weak LTDC_ER_IRQHandler .thumb_set LTDC_ER_IRQHandler,Default_Handler .weak SAI2_IRQHandler .thumb_set SAI2_IRQHandler,Default_Handler .weak QUADSPI_IRQHandler .thumb_set QUADSPI_IRQHandler,Default_Handler .weak LPTIM1_IRQHandler .thumb_set LPTIM1_IRQHandler,Default_Handler .weak CEC_IRQHandler .thumb_set CEC_IRQHandler,Default_Handler .weak I2C4_EV_IRQHandler .thumb_set I2C4_EV_IRQHandler,Default_Handler .weak I2C4_ER_IRQHandler .thumb_set I2C4_ER_IRQHandler,Default_Handler .weak SPDIF_RX_IRQHandler .thumb_set SPDIF_RX_IRQHandler,Default_Handler .weak OTG_IRQHandler .thumb_set OTG_IRQHandler,Default_Handler .weak IPCC_RX0_IRQHandler .thumb_set IPCC_RX0_IRQHandler,Default_Handler .weak IPCC_TX0_IRQHandler .thumb_set IPCC_TX0_IRQHandler,Default_Handler .weak DMAMUX1_OVR_IRQHandler .thumb_set DMAMUX1_OVR_IRQHandler,Default_Handler .weak IPCC_RX1_IRQHandler .thumb_set IPCC_RX1_IRQHandler,Default_Handler .weak IPCC_TX1_IRQHandler .thumb_set IPCC_TX1_IRQHandler,Default_Handler .weak CRYP2_IRQHandler .thumb_set CRYP2_IRQHandler,Default_Handler .weak HASH2_IRQHandler .thumb_set HASH2_IRQHandler,Default_Handler .weak I2C5_EV_IRQHandler .thumb_set I2C5_EV_IRQHandler,Default_Handler .weak I2C5_ER_IRQHandler .thumb_set I2C5_ER_IRQHandler,Default_Handler .weak GPU_IRQHandler .thumb_set GPU_IRQHandler,Default_Handler .weak DFSDM1_FLT0_IRQHandler .thumb_set DFSDM1_FLT0_IRQHandler,Default_Handler .weak DFSDM1_FLT1_IRQHandler .thumb_set DFSDM1_FLT1_IRQHandler,Default_Handler .weak DFSDM1_FLT2_IRQHandler .thumb_set DFSDM1_FLT2_IRQHandler,Default_Handler .weak DFSDM1_FLT3_IRQHandler .thumb_set DFSDM1_FLT3_IRQHandler,Default_Handler .weak SAI3_IRQHandler .thumb_set SAI3_IRQHandler,Default_Handler .weak DFSDM1_FLT4_IRQHandler .thumb_set DFSDM1_FLT4_IRQHandler,Default_Handler .weak TIM15_IRQHandler .thumb_set TIM15_IRQHandler,Default_Handler .weak TIM16_IRQHandler .thumb_set TIM16_IRQHandler,Default_Handler .weak TIM17_IRQHandler .thumb_set TIM17_IRQHandler,Default_Handler .weak TIM12_IRQHandler .thumb_set TIM12_IRQHandler,Default_Handler .weak MDIOS_IRQHandler .thumb_set MDIOS_IRQHandler,Default_Handler .weak EXTI14_IRQHandler .thumb_set EXTI14_IRQHandler,Default_Handler .weak MDMA_IRQHandler .thumb_set MDMA_IRQHandler,Default_Handler .weak DSI_IRQHandler .thumb_set DSI_IRQHandler,Default_Handler .weak SDMMC2_IRQHandler .thumb_set SDMMC2_IRQHandler,Default_Handler .weak HSEM_IT2_IRQHandler .thumb_set HSEM_IT2_IRQHandler,Default_Handler .weak DFSDM1_FLT5_IRQHandler .thumb_set DFSDM1_FLT5_IRQHandler,Default_Handler .weak EXTI15_IRQHandler .thumb_set EXTI15_IRQHandler,Default_Handler .weak nCTIIRQ1_IRQHandler .thumb_set nCTIIRQ1_IRQHandler,Default_Handler .weak nCTIIRQ2_IRQHandler .thumb_set nCTIIRQ2_IRQHandler,Default_Handler .weak TIM13_IRQHandler .thumb_set TIM13_IRQHandler,Default_Handler .weak TIM14_IRQHandler .thumb_set TIM14_IRQHandler,Default_Handler .weak DAC_IRQHandler .thumb_set DAC_IRQHandler,Default_Handler .weak RNG1_IRQHandler .thumb_set RNG1_IRQHandler,Default_Handler .weak RNG2_IRQHandler .thumb_set RNG2_IRQHandler,Default_Handler .weak I2C6_EV_IRQHandler .thumb_set I2C6_EV_IRQHandler,Default_Handler .weak I2C6_ER_IRQHandler .thumb_set I2C6_ER_IRQHandler,Default_Handler .weak SDMMC3_IRQHandler .thumb_set SDMMC3_IRQHandler,Default_Handler .weak LPTIM2_IRQHandler .thumb_set LPTIM2_IRQHandler,Default_Handler .weak LPTIM3_IRQHandler .thumb_set LPTIM3_IRQHandler,Default_Handler .weak LPTIM4_IRQHandler .thumb_set LPTIM4_IRQHandler,Default_Handler .weak LPTIM5_IRQHandler .thumb_set LPTIM5_IRQHandler,Default_Handler .weak MPU_SEV_IRQHandler .thumb_set MPU_SEV_IRQHandler,Default_Handler .weak RCC_WAKEUP_IRQHandler .thumb_set RCC_WAKEUP_IRQHandler,Default_Handler .weak SAI4_IRQHandler .thumb_set SAI4_IRQHandler,Default_Handler .weak DTS_IRQHandler .thumb_set DTS_IRQHandler,Default_Handler .weak RESERVED148_IRQHandler .thumb_set RESERVED148_IRQHandler,Default_Handler .weak WAKEUP_PIN_IRQHandler .thumb_set WAKEUP_PIN_IRQHandler,Default_Handler /************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/
4ms/metamodule-plugin-sdk
3,686
plugin-libc/libgcc/config/nds32/crtzero.S
/* The startup code sample of Andes NDS32 cpu for GNU compiler Copyright (C) 2012-2022 Free Software Foundation, Inc. Contributed by Andes Technology Corporation. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 3, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. Under Section 7 of GPL version 3, you are granted additional permissions described in the GCC Runtime Library Exception, version 3.1, as published by the Free Software Foundation. You should have received a copy of the GNU General Public License and a copy of the GCC Runtime Library Exception along with this program; see the files COPYING3 and COPYING.RUNTIME respectively. If not, see <http://www.gnu.org/licenses/>. */ !!============================================================================== !! !! crtzero.S !! !! This is JUST A SAMPLE of nds32 startup code !! !! You can refer this content and implement !! the actual one in newlib/mculib. !! !!============================================================================== !!------------------------------------------------------------------------------ !! Jump to start up code !!------------------------------------------------------------------------------ .section .nds32_init, "ax" j _start !!------------------------------------------------------------------------------ !! Startup code implementation !!------------------------------------------------------------------------------ .section .text .global _start .weak _SDA_BASE_ .weak _FP_BASE_ .align 2 .func _start .type _start, @function _start: .L_fp_gp_lp_init: la $fp, _FP_BASE_ ! init $fp la $gp, _SDA_BASE_ ! init $gp for small data access movi $lp, 0 ! init $lp .L_stack_init: la $sp, _stack ! init $sp movi $r0, -8 ! align $sp to 8-byte (use 0xfffffff8) and $sp, $sp, $r0 ! align $sp to 8-byte (filter out lower 3-bit) .L_bss_init: ! clear BSS, this process can be 4 time faster if data is 4 byte aligned ! if so, use swi.p instead of sbi.p ! the related stuff are defined in linker script la $r0, _edata ! get the starting addr of bss la $r2, _end ! get ending addr of bss beq $r0, $r2, .L_call_main ! if no bss just do nothing movi $r1, 0 ! should be cleared to 0 .L_clear_bss: sbi.p $r1, [$r0], 1 ! Set 0 to bss bne $r0, $r2, .L_clear_bss ! Still bytes left to set !.L_stack_heap_check: ! la $r0, _end ! init heap_end ! s.w $r0, heap_end ! save it !.L_init_argc_argv: ! ! argc/argv initialization if necessary; default implementation is in crt1.o ! la $r9, _arg_init ! load address of _arg_init? ! beqz $r9, .L4 ! has _arg_init? no, go check main() ! addi $sp, $sp, -512 ! allocate space for command line + arguments ! move $r6, $sp ! r6 = buffer addr of cmd line ! move $r0, $r6 ! r0 = buffer addr of cmd line ! syscall 6002 ! get cmd line ! move $r0, $r6 ! r0 = buffer addr of cmd line ! addi $r1, $r6, 256 ! r1 = argv ! jral $r9 ! init argc/argv ! addi $r1, $r6, 256 ! r1 = argv .L_call_main: ! call main() if main() is provided la $r15, main ! load address of main jral $r15 ! call main .L_terminate_program: syscall 0x1 ! use syscall 0x1 to terminate program .size _start, .-_start .end !! ------------------------------------------------------------------------
4ms/stm32mp1-baremetal
30,103
third-party/CMSIS/Device/ST/STM32MP1xx/Source/Templates/arm/startup_stm32mp15xx.s
;****************************************************************************** ;* File Name : startup_stm32mp15xx.s ;* Author : MCD Application Team ;* Description : STM32MP15xx devices vector table for MDK-ARM toolchain. ;* This module performs: ;* - Set the initial SP ;* - Set the initial PC == Reset_Handler ;* - Set the vector table entries with the exceptions ISR address ;* - Branches to __main in the C library (which eventually ;* calls main()). ;* After Reset the CortexM4 processor is in Thread mode, ;* priority is Privileged, and the Stack is set to Main. ;* <<< Use Configuration Wizard in Context Menu >>> ;****************************************************************************** ;* @attention ;* ;* <h2><center>&copy; Copyright (c) 2019 STMicroelectronics. ;* All rights reserved.</center></h2> ;* ;* This software component is licensed by ST under BSD 3-Clause license, ;* the "License"; You may not use this file except in compliance with the ;* License. You may obtain a copy of the License at: ;* opensource.org/licenses/BSD-3-Clause ;* ;****************************************************************************** ; Amount of memory (in bytes) allocated for Stack ; Tailor this value to your application needs ; <h> Stack Configuration ; <o> Stack Size (in Bytes) <0x0-0xFFFFFFFF:8> ; </h> Stack_Size EQU 0x00000400 AREA STACK, NOINIT, READWRITE, ALIGN=3 __stack_limit Stack_Mem SPACE Stack_Size __initial_sp ; <h> Heap Configuration ; <o> Heap Size (in Bytes) <0x0-0xFFFFFFFF:8> ; </h> Heap_Size EQU 0x00000200 AREA HEAP, NOINIT, READWRITE, ALIGN=3 __heap_base Heap_Mem SPACE Heap_Size __heap_limit PRESERVE8 THUMB ; Vector Table Mapped to Address 0 at Reset AREA RESET, DATA, READONLY EXPORT __Vectors EXPORT __Vectors_End EXPORT __Vectors_Size __Vectors DCD __initial_sp ; Top of Stack DCD Reset_Handler ; Reset Handler DCD NMI_Handler ; -14 NMI Handler DCD HardFault_Handler ; -13 Hard Fault Handler DCD MemManage_Handler ; -12 MPU Fault Handler DCD BusFault_Handler ; -11 Bus Fault Handler DCD UsageFault_Handler ; -10 Usage Fault Handler DCD 0 ; Reserved DCD 0 ; Reserved DCD 0 ; Reserved DCD 0 ; Reserved DCD SVC_Handler ; -5 SVCall Handler DCD DebugMon_Handler ; -4 Debug Monitor Handler DCD 0 ; Reserved DCD PendSV_Handler ; -2 PendSV Handler DCD SysTick_Handler ; -1 SysTick Handler ; Interrupts DCD WWDG1_IRQHandler ; DCD PVD_AVD_IRQHandler ; DCD TAMP_IRQHandler ; DCD RTC_WKUP_ALARM_IRQHandler ; DCD RESERVED4_IRQHandler ; DCD RCC_IRQHandler ; DCD EXTI0_IRQHandler ; DCD EXTI1_IRQHandler ; DCD EXTI2_IRQHandler ; DCD EXTI3_IRQHandler ; DCD EXTI4_IRQHandler ; DCD DMA1_Stream0_IRQHandler ; DCD DMA1_Stream1_IRQHandler ; DCD DMA1_Stream2_IRQHandler ; DCD DMA1_Stream3_IRQHandler ; DCD DMA1_Stream4_IRQHandler ; DCD DMA1_Stream5_IRQHandler ; DCD DMA1_Stream6_IRQHandler ; DCD ADC1_IRQHandler ; DCD FDCAN1_IT0_IRQHandler ; DCD FDCAN2_IT0_IRQHandler ; DCD FDCAN1_IT1_IRQHandler ; DCD FDCAN2_IT1_IRQHandler ; DCD EXTI5_IRQHandler ; DCD TIM1_BRK_IRQHandler ; DCD TIM1_UP_IRQHandler ; DCD TIM1_TRG_COM_IRQHandler ; DCD TIM1_CC_IRQHandler ; DCD TIM2_IRQHandler ; DCD TIM3_IRQHandler ; DCD TIM4_IRQHandler ; DCD I2C1_EV_IRQHandler ; DCD I2C1_ER_IRQHandler ; DCD I2C2_EV_IRQHandler ; DCD I2C2_ER_IRQHandler ; DCD SPI1_IRQHandler ; DCD SPI2_IRQHandler ; DCD USART1_IRQHandler ; DCD USART2_IRQHandler ; DCD USART3_IRQHandler ; DCD EXTI10_IRQHandler ; DCD RTC_TIMESTAMP_IRQHandler ; DCD EXTI11_IRQHandler ; DCD TIM8_BRK_IRQHandler ; DCD TIM8_UP_IRQHandler ; DCD TIM8_TRG_COM_IRQHandler ; DCD TIM8_CC_IRQHandler ; DCD DMA1_Stream7_IRQHandler ; DCD FMC_IRQHandler ; DCD SDMMC1_IRQHandler ; DCD TIM5_IRQHandler ; DCD SPI3_IRQHandler ; DCD UART4_IRQHandler ; DCD UART5_IRQHandler ; DCD TIM6_IRQHandler ; DCD TIM7_IRQHandler ; DCD DMA2_Stream0_IRQHandler ; DCD DMA2_Stream1_IRQHandler ; DCD DMA2_Stream2_IRQHandler ; DCD DMA2_Stream3_IRQHandler ; DCD DMA2_Stream4_IRQHandler ; DCD ETH1_IRQHandler ; DCD ETH1_WKUP_IRQHandler ; DCD FDCAN_CAL_IRQHandler ; DCD EXTI6_IRQHandler ; DCD EXTI7_IRQHandler ; DCD EXTI8_IRQHandler ; DCD EXTI9_IRQHandler ; DCD DMA2_Stream5_IRQHandler ; DCD DMA2_Stream6_IRQHandler ; DCD DMA2_Stream7_IRQHandler ; DCD USART6_IRQHandler ; DCD I2C3_EV_IRQHandler ; DCD I2C3_ER_IRQHandler ; DCD USBH_OHCI_IRQHandler ; DCD USBH_EHCI_IRQHandler ; DCD EXTI12_IRQHandler ; DCD EXTI13_IRQHandler ; DCD DCMI_IRQHandler ; DCD CRYP1_IRQHandler ; DCD HASH1_IRQHandler ; DCD FPU_IRQHandler ; DCD UART7_IRQHandler ; DCD UART8_IRQHandler ; DCD SPI4_IRQHandler ; DCD SPI5_IRQHandler ; DCD SPI6_IRQHandler ; DCD SAI1_IRQHandler ; DCD LTDC_IRQHandler ; DCD LTDC_ER_IRQHandler ; DCD ADC2_IRQHandler ; DCD SAI2_IRQHandler ; DCD QUADSPI_IRQHandler ; DCD LPTIM1_IRQHandler ; DCD CEC_IRQHandler ; DCD I2C4_EV_IRQHandler ; DCD I2C4_ER_IRQHandler ; DCD SPDIF_RX_IRQHandler ; DCD OTG_IRQHandler ; DCD RESERVED99_IRQHandler ; DCD IPCC_RX0_IRQHandler ; DCD IPCC_TX0_IRQHandler ; DCD DMAMUX1_OVR_IRQHandler ; DCD IPCC_RX1_IRQHandler ; DCD IPCC_TX1_IRQHandler ; DCD CRYP2_IRQHandler ; DCD HASH2_IRQHandler ; DCD I2C5_EV_IRQHandler ; DCD I2C5_ER_IRQHandler ; DCD GPU_IRQHandler ; DCD DFSDM1_FLT0_IRQHandler ; DCD DFSDM1_FLT1_IRQHandler ; DCD DFSDM1_FLT2_IRQHandler ; DCD DFSDM1_FLT3_IRQHandler ; DCD SAI3_IRQHandler ; DCD DFSDM1_FLT4_IRQHandler ; DCD TIM15_IRQHandler ; DCD TIM16_IRQHandler ; DCD TIM17_IRQHandler ; DCD TIM12_IRQHandler ; DCD MDIOS_IRQHandler ; DCD EXTI14_IRQHandler ; DCD MDMA_IRQHandler ; DCD DSI_IRQHandler ; DCD SDMMC2_IRQHandler ; DCD HSEM_IT2_IRQHandler ; DCD DFSDM1_FLT5_IRQHandler ; DCD EXTI15_IRQHandler ; DCD nCTIIRQ1_IRQHandler ; DCD nCTIIRQ2_IRQHandler ; DCD TIM13_IRQHandler ; DCD TIM14_IRQHandler ; DCD DAC_IRQHandler ; DCD RNG1_IRQHandler ; DCD RNG2_IRQHandler ; DCD I2C6_EV_IRQHandler ; DCD I2C6_ER_IRQHandler ; DCD SDMMC3_IRQHandler ; DCD LPTIM2_IRQHandler ; DCD LPTIM3_IRQHandler ; DCD LPTIM4_IRQHandler ; DCD LPTIM5_IRQHandler ; DCD ETH1_LPI_IRQHandler ; DCD RESERVED143_IRQHandler ; DCD MPU_SEV_IRQHandler ; DCD RCC_WAKEUP_IRQHandler ; DCD SAI4_IRQHandler ; DCD DTS_IRQHandler ; DCD RESERVED148_IRQHandler ; DCD WAKEUP_PIN_IRQHandler ; SPACE (73 * 4) ; Interrupts 151 .. 224 are left out __Vectors_End __Vectors_Size EQU __Vectors_End - __Vectors AREA |.text|, CODE, READONLY ; Reset Handler Reset_Handler PROC EXPORT Reset_Handler [WEAK] IMPORT SystemInit IMPORT __main LDR R0, =SystemInit BLX R0 LDR R0, =__main BX R0 ENDP ; Macro to define default exception/interrupt handlers. ; Default handler are weak symbols with an endless loop. ; They can be overwritten by real handlers. MACRO Set_Default_Handler $Handler_Name $Handler_Name PROC EXPORT $Handler_Name [WEAK] B . ENDP MEND ; Default exception/interrupt handler Set_Default_Handler NMI_Handler Set_Default_Handler HardFault_Handler Set_Default_Handler MemManage_Handler Set_Default_Handler BusFault_Handler Set_Default_Handler UsageFault_Handler Set_Default_Handler SVC_Handler Set_Default_Handler DebugMon_Handler Set_Default_Handler PendSV_Handler Set_Default_Handler SysTick_Handler Set_Default_Handler WWDG1_IRQHandler ; Window WatchDog 1 Set_Default_Handler PVD_AVD_IRQHandler ; PVD and AVD through EXTI Line detection Set_Default_Handler TAMP_IRQHandler ; Tamper and TimeStamps through the EXTI line Set_Default_Handler RTC_WKUP_ALARM_IRQHandler ; RTC Wakeup and Alarm through the EXTI line Set_Default_Handler RESERVED4_IRQHandler ; Reserved Set_Default_Handler RCC_IRQHandler ; RCC Set_Default_Handler EXTI0_IRQHandler ; EXTI Line0 Set_Default_Handler EXTI1_IRQHandler ; EXTI Line1 Set_Default_Handler EXTI2_IRQHandler ; EXTI Line2 Set_Default_Handler EXTI3_IRQHandler ; EXTI Line3 Set_Default_Handler EXTI4_IRQHandler ; EXTI Line4 Set_Default_Handler DMA1_Stream0_IRQHandler ; DMA1 Stream 0 Set_Default_Handler DMA1_Stream1_IRQHandler ; DMA1 Stream 1 Set_Default_Handler DMA1_Stream2_IRQHandler ; DMA1 Stream 2 Set_Default_Handler DMA1_Stream3_IRQHandler ; DMA1 Stream 3 Set_Default_Handler DMA1_Stream4_IRQHandler ; DMA1 Stream 4 Set_Default_Handler DMA1_Stream5_IRQHandler ; DMA1 Stream 5 Set_Default_Handler DMA1_Stream6_IRQHandler ; DMA1 Stream 6 Set_Default_Handler ADC1_IRQHandler ; ADC1 Set_Default_Handler FDCAN1_IT0_IRQHandler ; FDCAN1 Interrupt line 0 Set_Default_Handler FDCAN2_IT0_IRQHandler ; FDCAN2 Interrupt line 0 Set_Default_Handler FDCAN1_IT1_IRQHandler ; FDCAN1 Interrupt line 1 Set_Default_Handler FDCAN2_IT1_IRQHandler ; FDCAN2 Interrupt line 1 Set_Default_Handler EXTI5_IRQHandler ; External Line5 interrupts through AIEC Set_Default_Handler TIM1_BRK_IRQHandler ; TIM1 Break interrupt Set_Default_Handler TIM1_UP_IRQHandler ; TIM1 Update Interrupt Set_Default_Handler TIM1_TRG_COM_IRQHandler ; TIM1 Trigger and Commutation Interrupt Set_Default_Handler TIM1_CC_IRQHandler ; TIM1 Capture Compare Set_Default_Handler TIM2_IRQHandler ; TIM2 Set_Default_Handler TIM3_IRQHandler ; TIM3 Set_Default_Handler TIM4_IRQHandler ; TIM4 Set_Default_Handler I2C1_EV_IRQHandler ; I2C1 Event Set_Default_Handler I2C1_ER_IRQHandler ; I2C1 Error Set_Default_Handler I2C2_EV_IRQHandler ; I2C2 Event Set_Default_Handler I2C2_ER_IRQHandler ; I2C2 Error Set_Default_Handler SPI1_IRQHandler ; SPI1 Set_Default_Handler SPI2_IRQHandler ; SPI2 Set_Default_Handler USART1_IRQHandler ; USART1 Set_Default_Handler USART2_IRQHandler ; USART2 Set_Default_Handler USART3_IRQHandler ; USART3 Set_Default_Handler EXTI10_IRQHandler ; External Line10 interrupts through AIEC Set_Default_Handler RTC_TIMESTAMP_IRQHandler ; RTC TimeStamp through EXTI Line Set_Default_Handler EXTI11_IRQHandler ; External Line11 interrupts through AIEC Set_Default_Handler TIM8_BRK_IRQHandler ; TIM8 Break Interrupt Set_Default_Handler TIM8_UP_IRQHandler ; TIM8 Update Interrupt Set_Default_Handler TIM8_TRG_COM_IRQHandler ; TIM8 Trigger and Commutation Interrupt Set_Default_Handler TIM8_CC_IRQHandler ; TIM8 Capture Compare Interrupt Set_Default_Handler DMA1_Stream7_IRQHandler ; DMA1 Stream7 Set_Default_Handler FMC_IRQHandler ; FMC Set_Default_Handler SDMMC1_IRQHandler ; SDMMC1 Set_Default_Handler TIM5_IRQHandler ; TIM5 Set_Default_Handler SPI3_IRQHandler ; SPI3 Set_Default_Handler UART4_IRQHandler ; UART4 Set_Default_Handler UART5_IRQHandler ; UART5 Set_Default_Handler TIM6_IRQHandler ; TIM6 Set_Default_Handler TIM7_IRQHandler ; TIM7 Set_Default_Handler DMA2_Stream0_IRQHandler ; DMA2 Stream 0 Set_Default_Handler DMA2_Stream1_IRQHandler ; DMA2 Stream 1 Set_Default_Handler DMA2_Stream2_IRQHandler ; DMA2 Stream 2 Set_Default_Handler DMA2_Stream3_IRQHandler ; DMA2 Stream 3 Set_Default_Handler DMA2_Stream4_IRQHandler ; DMA2 Stream 4 Set_Default_Handler ETH1_IRQHandler ; Ethernet Set_Default_Handler ETH1_WKUP_IRQHandler ; Ethernet Wakeup through EXTI line Set_Default_Handler FDCAN_CAL_IRQHandler ; FDCAN Calibration Set_Default_Handler EXTI6_IRQHandler ; EXTI Line6 interrupts through AIEC Set_Default_Handler EXTI7_IRQHandler ; EXTI Line7 interrupts through AIEC Set_Default_Handler EXTI8_IRQHandler ; EXTI Line8 interrupts through AIEC Set_Default_Handler EXTI9_IRQHandler ; EXTI Line9 interrupts through AIEC Set_Default_Handler DMA2_Stream5_IRQHandler ; DMA2 Stream 5 Set_Default_Handler DMA2_Stream6_IRQHandler ; DMA2 Stream 6 Set_Default_Handler DMA2_Stream7_IRQHandler ; DMA2 Stream 7 Set_Default_Handler USART6_IRQHandler ; USART6 Set_Default_Handler I2C3_EV_IRQHandler ; I2C3 event Set_Default_Handler I2C3_ER_IRQHandler ; I2C3 error Set_Default_Handler USBH_OHCI_IRQHandler ; USB Host OHCI Set_Default_Handler USBH_EHCI_IRQHandler ; USB Host EHCI Set_Default_Handler EXTI12_IRQHandler ; EXTI Line12 interrupts through AIEC Set_Default_Handler EXTI13_IRQHandler ; EXTI Line13 interrupts through AIEC Set_Default_Handler DCMI_IRQHandler ; DCMI Set_Default_Handler CRYP1_IRQHandler ; Crypto1 global interrupt Set_Default_Handler HASH1_IRQHandler ; Crypto Hash1 interrupt Set_Default_Handler FPU_IRQHandler ; FPU Set_Default_Handler UART7_IRQHandler ; UART7 Set_Default_Handler UART8_IRQHandler ; UART8 Set_Default_Handler SPI4_IRQHandler ; SPI4 Set_Default_Handler SPI5_IRQHandler ; SPI5 Set_Default_Handler SPI6_IRQHandler ; SPI6 Set_Default_Handler SAI1_IRQHandler ; SAI1 Set_Default_Handler LTDC_IRQHandler ; LTDC Set_Default_Handler LTDC_ER_IRQHandler ; LTDC error Set_Default_Handler ADC2_IRQHandler ; ADC2 Set_Default_Handler SAI2_IRQHandler ; SAI2 Set_Default_Handler QUADSPI_IRQHandler ; QUADSPI Set_Default_Handler LPTIM1_IRQHandler ; LPTIM1 global interrupt Set_Default_Handler CEC_IRQHandler ; HDMI_CEC Set_Default_Handler I2C4_EV_IRQHandler ; I2C4 Event Set_Default_Handler I2C4_ER_IRQHandler ; I2C4 Error Set_Default_Handler SPDIF_RX_IRQHandler ; SPDIF_RX Set_Default_Handler OTG_IRQHandler ; USB On The Go HS global interrupt Set_Default_Handler RESERVED99_IRQHandler ; Reserved Set_Default_Handler IPCC_RX0_IRQHandler ; Mailbox RX0 Free interrupt Set_Default_Handler IPCC_TX0_IRQHandler ; Mailbox TX0 Free interrupt Set_Default_Handler DMAMUX1_OVR_IRQHandler ; DMAMUX1 Overrun interrupt Set_Default_Handler IPCC_RX1_IRQHandler ; Mailbox RX1 Free interrupt Set_Default_Handler IPCC_TX1_IRQHandler ; Mailbox TX1 Free interrupt Set_Default_Handler CRYP2_IRQHandler ; Crypto2 global interrupt Set_Default_Handler HASH2_IRQHandler ; Crypto Hash2 interrupt Set_Default_Handler I2C5_EV_IRQHandler ; I2C5 Event Interrupt Set_Default_Handler I2C5_ER_IRQHandler ; I2C5 Error Interrupt Set_Default_Handler GPU_IRQHandler ; GPU Global Interrupt Set_Default_Handler DFSDM1_FLT0_IRQHandler ; DFSDM Filter0 Interrupt Set_Default_Handler DFSDM1_FLT1_IRQHandler ; DFSDM Filter1 Interrupt Set_Default_Handler DFSDM1_FLT2_IRQHandler ; DFSDM Filter2 Interrupt Set_Default_Handler DFSDM1_FLT3_IRQHandler ; DFSDM Filter3 Interrupt Set_Default_Handler SAI3_IRQHandler ; SAI3 global Interrupt Set_Default_Handler DFSDM1_FLT4_IRQHandler ; DFSDM Filter4 Interrupt Set_Default_Handler TIM15_IRQHandler ; TIM15 global Interrupt Set_Default_Handler TIM16_IRQHandler ; TIM16 global Interrupt Set_Default_Handler TIM17_IRQHandler ; TIM17 global Interrupt Set_Default_Handler TIM12_IRQHandler ; TIM12 global Interrupt Set_Default_Handler MDIOS_IRQHandler ; MDIOS global Interrupt Set_Default_Handler EXTI14_IRQHandler ; EXTI Line14 interrupts through AIEC Set_Default_Handler MDMA_IRQHandler ; MDMA global Interrupt Set_Default_Handler DSI_IRQHandler ; DSI global Interrupt Set_Default_Handler SDMMC2_IRQHandler ; SDMMC2 global Interrupt Set_Default_Handler HSEM_IT2_IRQHandler ; HSEM global Interrupt Set_Default_Handler DFSDM1_FLT5_IRQHandler ; DFSDM Filter5 Interrupt Set_Default_Handler EXTI15_IRQHandler ; EXTI Line15 interrupts through AIEC Set_Default_Handler nCTIIRQ1_IRQHandler ; Cortex-M4 CTI interrupt 1 Set_Default_Handler nCTIIRQ2_IRQHandler ; Cortex-M4 CTI interrupt 2 Set_Default_Handler TIM13_IRQHandler ; TIM13 global interrupt Set_Default_Handler TIM14_IRQHandler ; TIM14 global interrupt Set_Default_Handler DAC_IRQHandler ; DAC1 and DAC2 underrun error interrupts Set_Default_Handler RNG1_IRQHandler ; RNG1 interrupt Set_Default_Handler RNG2_IRQHandler ; RNG2 interrupt Set_Default_Handler I2C6_EV_IRQHandler ; I2C6 Event Interrupt Set_Default_Handler I2C6_ER_IRQHandler ; I2C6 Error Interrupt Set_Default_Handler SDMMC3_IRQHandler ; SDMMC3 global Interrupt Set_Default_Handler LPTIM2_IRQHandler ; LPTIM2 global interrupt Set_Default_Handler LPTIM3_IRQHandler ; LPTIM3 global interrupt Set_Default_Handler LPTIM4_IRQHandler ; LPTIM4 global interrupt Set_Default_Handler LPTIM5_IRQHandler ; LPTIM5 global interrupt Set_Default_Handler ETH1_LPI_IRQHandler ; Reserved Set_Default_Handler RESERVED143_IRQHandler ; Reserved Set_Default_Handler MPU_SEV_IRQHandler ; MPU Send Event through AIEC Set_Default_Handler RCC_WAKEUP_IRQHandler ; RCC Wake up interrupt Set_Default_Handler SAI4_IRQHandler ; SAI4 global interrupt Set_Default_Handler DTS_IRQHandler ; Temperature sensor interrupt Set_Default_Handler RESERVED148_IRQHandler ; Reserved Set_Default_Handler WAKEUP_PIN_IRQHandler ; Interrupt for all 6 wake-up pins ALIGN ; User setup Stack & Heap EXPORT __stack_limit EXPORT __initial_sp IF Heap_Size != 0 ; Heap is provided EXPORT __heap_base EXPORT __heap_limit ENDIF END
4ms/metamodule-plugin-sdk
1,425
plugin-libc/libgcc/config/ia64/crtn.S
# Copyright (C) 2000-2022 Free Software Foundation, Inc. # Written By Timothy Wall # # This file is free software; you can redistribute it and/or modify it # under the terms of the GNU General Public License as published by the # Free Software Foundation; either version 3, or (at your option) any # later version. # # This file is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # General Public License for more details. # # Under Section 7 of GPL version 3, you are granted additional # permissions described in the GCC Runtime Library Exception, version # 3.1, as published by the Free Software Foundation. # # You should have received a copy of the GNU General Public License and # a copy of the GCC Runtime Library Exception along with this program; # see the files COPYING3 and COPYING.RUNTIME respectively. If not, see # <http://www.gnu.org/licenses/>. # This file just makes sure that the .fini and .init sections do in # fact return. Users may put any desired instructions in those sections. # This file is the last thing linked into any executable. .section ".init" ;; mov ar.pfs = r34 mov b0 = r33 .restore sp mov r12 = r35 br.ret.sptk.many b0 .section ".fini" ;; mov ar.pfs = r34 mov b0 = r33 .restore sp mov r12 = r35 br.ret.sptk.many b0 # end of crtn.S
4ms/metamodule-plugin-sdk
1,534
plugin-libc/libgcc/config/ia64/crti.S
# Copyright (C) 2000-2022 Free Software Foundation, Inc. # Written By Timothy Wall # # This file is free software; you can redistribute it and/or modify it # under the terms of the GNU General Public License as published by the # Free Software Foundation; either version 3, or (at your option) any # later version. # # This file is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # General Public License for more details. # # Under Section 7 of GPL version 3, you are granted additional # permissions described in the GCC Runtime Library Exception, version # 3.1, as published by the Free Software Foundation. # # You should have received a copy of the GNU General Public License and # a copy of the GCC Runtime Library Exception along with this program; # see the files COPYING3 and COPYING.RUNTIME respectively. If not, see # <http://www.gnu.org/licenses/>. # This file just make a stack frame for the contents of the .fini and # .init sections. Users may put any desired instructions in those # sections. .section ".init" .align 16 .global _init _init: .prologue 14, 33 .save ar.pfs, r34 alloc r34 = ar.pfs, 0, 4, 0, 0 .vframe r35 mov r35 = r12 .save rp, r33 mov r33 = b0 .body .section ".fini" .align 16 .global _fini _fini: .prologue 14, 33 .save ar.pfs, r34 alloc r34 = ar.pfs, 0, 4, 0, 0 .vframe r35 mov r35 = r12 .save rp, r33 mov r33 = b0 .body # end of crti.S
4ms/metamodule-plugin-sdk
1,029
plugin-libc/libgcc/config/ia64/vms-crtinit.S
/* Copyright (C) 2009-2022 Free Software Foundation, Inc. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 3, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. Under Section 7 of GPL version 3, you are granted additional permissions described in the GCC Runtime Library Exception, version 3.1, as published by the Free Software Foundation. You should have received a copy of the GNU General Public License and a copy of the GCC Runtime Library Exception along with this program; see the files COPYING3 and COPYING.RUNTIME respectively. If not, see <http://www.gnu.org/licenses/>. */ .global LIB$INITIALIZE#
4ms/metamodule-plugin-sdk
2,883
plugin-libc/libgcc/config/ia64/crtend.S
/* Copyright (C) 2000-2022 Free Software Foundation, Inc. Contributed by Jes Sorensen, <Jes.Sorensen@cern.ch> This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 3, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. Under Section 7 of GPL version 3, you are granted additional permissions described in the GCC Runtime Library Exception, version 3.1, as published by the Free Software Foundation. You should have received a copy of the GNU General Public License and a copy of the GCC Runtime Library Exception along with this program; see the files COPYING3 and COPYING.RUNTIME respectively. If not, see <http://www.gnu.org/licenses/>. */ #include "auto-host.h" .section .ctors,"aw","progbits" .align 8 __CTOR_END__: data8 0 .section .dtors,"aw","progbits" .align 8 __DTOR_END__: data8 0 #if HAVE_INITFINI_ARRAY_SUPPORT .global __do_global_ctors_aux .hidden __do_global_ctors_aux #else /* !HAVE_INITFINI_ARRAY_SUPPORT */ /* * Fragment of the ELF _init routine that invokes our dtor cleanup. * * We make the call by indirection, because in large programs the * .fini and .init sections are not in range of the destination, and * we cannot allow the linker to insert a stub at the end of this * fragment of the _fini function. Further, Itanium does not implement * the long branch instructions, and we do not wish every program to * trap to the kernel for emulation. * * Note that we require __do_global_ctors_aux to preserve the GP, * so that the next fragment in .fini gets the right value. */ .section .init,"ax","progbits" { .mlx movl r2 = @pcrel(__do_global_ctors_aux - 16) } { .mii mov r3 = ip ;; add r2 = r2, r3 ;; } { .mib mov b6 = r2 br.call.sptk.many b0 = b6 ;; } #endif /* !HAVE_INITFINI_ARRAY_SUPPORT */ .text .align 32 .proc __do_global_ctors_aux __do_global_ctors_aux: .prologue /* for (loc0 = __CTOR_END__-1; *p != -1; --p) (*p) (); */ .save ar.pfs, r34 alloc loc2 = ar.pfs, 0, 5, 0, 0 movl loc0 = @gprel(__CTOR_END__ - 8) ;; add loc0 = loc0, gp ;; ld8 loc3 = [loc0], -8 .save rp, loc1 mov loc1 = rp .body ;; cmp.eq p6, p0 = -1, loc3 mov loc4 = gp (p6) br.cond.spnt.few .exit .loop: ld8 r15 = [loc3], 8 ;; ld8 gp = [loc3] mov b6 = r15 ld8 loc3 = [loc0], -8 nop 0 br.call.sptk.many rp = b6 ;; cmp.ne p6, p0 = -1, loc3 nop 0 (p6) br.cond.sptk.few .loop .exit: mov gp = loc3 mov rp = loc1 mov ar.pfs = loc2 br.ret.sptk.many rp .endp __do_global_ctors_aux
4ms/metamodule-plugin-sdk
15,464
plugin-libc/libgcc/config/ia64/lib1funcs.S
/* Copyright (C) 2000-2022 Free Software Foundation, Inc. Contributed by James E. Wilson <wilson@cygnus.com>. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 3, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. Under Section 7 of GPL version 3, you are granted additional permissions described in the GCC Runtime Library Exception, version 3.1, as published by the Free Software Foundation. You should have received a copy of the GNU General Public License and a copy of the GCC Runtime Library Exception along with this program; see the files COPYING3 and COPYING.RUNTIME respectively. If not, see <http://www.gnu.org/licenses/>. */ #ifdef L__divxf3 // Compute a 80-bit IEEE double-extended quotient. // // From the Intel IA-64 Optimization Guide, choose the minimum latency // alternative. // // farg0 holds the dividend. farg1 holds the divisor. // // __divtf3 is an alternate symbol name for backward compatibility. .text .align 16 .global __divxf3 .proc __divxf3 __divxf3: #ifdef SHARED .global __divtf3 __divtf3: #endif cmp.eq p7, p0 = r0, r0 frcpa.s0 f10, p6 = farg0, farg1 ;; (p6) cmp.ne p7, p0 = r0, r0 .pred.rel.mutex p6, p7 (p6) fnma.s1 f11 = farg1, f10, f1 (p6) fma.s1 f12 = farg0, f10, f0 ;; (p6) fma.s1 f13 = f11, f11, f0 (p6) fma.s1 f14 = f11, f11, f11 ;; (p6) fma.s1 f11 = f13, f13, f11 (p6) fma.s1 f13 = f14, f10, f10 ;; (p6) fma.s1 f10 = f13, f11, f10 (p6) fnma.s1 f11 = farg1, f12, farg0 ;; (p6) fma.s1 f11 = f11, f10, f12 (p6) fnma.s1 f12 = farg1, f10, f1 ;; (p6) fma.s1 f10 = f12, f10, f10 (p6) fnma.s1 f12 = farg1, f11, farg0 ;; (p6) fma.s0 fret0 = f12, f10, f11 (p7) mov fret0 = f10 br.ret.sptk rp .endp __divxf3 #endif #ifdef L__divdf3 // Compute a 64-bit IEEE double quotient. // // From the Intel IA-64 Optimization Guide, choose the minimum latency // alternative. // // farg0 holds the dividend. farg1 holds the divisor. .text .align 16 .global __divdf3 .proc __divdf3 __divdf3: cmp.eq p7, p0 = r0, r0 frcpa.s0 f10, p6 = farg0, farg1 ;; (p6) cmp.ne p7, p0 = r0, r0 .pred.rel.mutex p6, p7 (p6) fmpy.s1 f11 = farg0, f10 (p6) fnma.s1 f12 = farg1, f10, f1 ;; (p6) fma.s1 f11 = f12, f11, f11 (p6) fmpy.s1 f13 = f12, f12 ;; (p6) fma.s1 f10 = f12, f10, f10 (p6) fma.s1 f11 = f13, f11, f11 ;; (p6) fmpy.s1 f12 = f13, f13 (p6) fma.s1 f10 = f13, f10, f10 ;; (p6) fma.d.s1 f11 = f12, f11, f11 (p6) fma.s1 f10 = f12, f10, f10 ;; (p6) fnma.d.s1 f8 = farg1, f11, farg0 ;; (p6) fma.d fret0 = f8, f10, f11 (p7) mov fret0 = f10 br.ret.sptk rp ;; .endp __divdf3 #endif #ifdef L__divsf3 // Compute a 32-bit IEEE float quotient. // // From the Intel IA-64 Optimization Guide, choose the minimum latency // alternative. // // farg0 holds the dividend. farg1 holds the divisor. .text .align 16 .global __divsf3 .proc __divsf3 __divsf3: cmp.eq p7, p0 = r0, r0 frcpa.s0 f10, p6 = farg0, farg1 ;; (p6) cmp.ne p7, p0 = r0, r0 .pred.rel.mutex p6, p7 (p6) fmpy.s1 f8 = farg0, f10 (p6) fnma.s1 f9 = farg1, f10, f1 ;; (p6) fma.s1 f8 = f9, f8, f8 (p6) fmpy.s1 f9 = f9, f9 ;; (p6) fma.s1 f8 = f9, f8, f8 (p6) fmpy.s1 f9 = f9, f9 ;; (p6) fma.d.s1 f10 = f9, f8, f8 ;; (p6) fnorm.s.s0 fret0 = f10 (p7) mov fret0 = f10 br.ret.sptk rp ;; .endp __divsf3 #endif #ifdef L__divdi3 // Compute a 64-bit integer quotient. // // From the Intel IA-64 Optimization Guide, choose the minimum latency // alternative. // // in0 holds the dividend. in1 holds the divisor. .text .align 16 .global __divdi3 .proc __divdi3 __divdi3: .regstk 2,0,0,0 // Transfer inputs to FP registers. setf.sig f8 = in0 setf.sig f9 = in1 // Check divide by zero. cmp.ne.unc p0,p7=0,in1 ;; // Convert the inputs to FP, so that they won't be treated as unsigned. fcvt.xf f8 = f8 fcvt.xf f9 = f9 (p7) break 1 ;; // Compute the reciprocal approximation. frcpa.s1 f10, p6 = f8, f9 ;; // 3 Newton-Raphson iterations. (p6) fnma.s1 f11 = f9, f10, f1 (p6) fmpy.s1 f12 = f8, f10 ;; (p6) fmpy.s1 f13 = f11, f11 (p6) fma.s1 f12 = f11, f12, f12 ;; (p6) fma.s1 f10 = f11, f10, f10 (p6) fma.s1 f11 = f13, f12, f12 ;; (p6) fma.s1 f10 = f13, f10, f10 (p6) fnma.s1 f12 = f9, f11, f8 ;; (p6) fma.s1 f10 = f12, f10, f11 ;; // Round quotient to an integer. fcvt.fx.trunc.s1 f10 = f10 ;; // Transfer result to GP registers. getf.sig ret0 = f10 br.ret.sptk rp ;; .endp __divdi3 #endif #ifdef L__moddi3 // Compute a 64-bit integer modulus. // // From the Intel IA-64 Optimization Guide, choose the minimum latency // alternative. // // in0 holds the dividend (a). in1 holds the divisor (b). .text .align 16 .global __moddi3 .proc __moddi3 __moddi3: .regstk 2,0,0,0 // Transfer inputs to FP registers. setf.sig f14 = in0 setf.sig f9 = in1 // Check divide by zero. cmp.ne.unc p0,p7=0,in1 ;; // Convert the inputs to FP, so that they won't be treated as unsigned. fcvt.xf f8 = f14 fcvt.xf f9 = f9 (p7) break 1 ;; // Compute the reciprocal approximation. frcpa.s1 f10, p6 = f8, f9 ;; // 3 Newton-Raphson iterations. (p6) fmpy.s1 f12 = f8, f10 (p6) fnma.s1 f11 = f9, f10, f1 ;; (p6) fma.s1 f12 = f11, f12, f12 (p6) fmpy.s1 f13 = f11, f11 ;; (p6) fma.s1 f10 = f11, f10, f10 (p6) fma.s1 f11 = f13, f12, f12 ;; sub in1 = r0, in1 (p6) fma.s1 f10 = f13, f10, f10 (p6) fnma.s1 f12 = f9, f11, f8 ;; setf.sig f9 = in1 (p6) fma.s1 f10 = f12, f10, f11 ;; fcvt.fx.trunc.s1 f10 = f10 ;; // r = q * (-b) + a xma.l f10 = f10, f9, f14 ;; // Transfer result to GP registers. getf.sig ret0 = f10 br.ret.sptk rp ;; .endp __moddi3 #endif #ifdef L__udivdi3 // Compute a 64-bit unsigned integer quotient. // // From the Intel IA-64 Optimization Guide, choose the minimum latency // alternative. // // in0 holds the dividend. in1 holds the divisor. .text .align 16 .global __udivdi3 .proc __udivdi3 __udivdi3: .regstk 2,0,0,0 // Transfer inputs to FP registers. setf.sig f8 = in0 setf.sig f9 = in1 // Check divide by zero. cmp.ne.unc p0,p7=0,in1 ;; // Convert the inputs to FP, to avoid FP software-assist faults. fcvt.xuf.s1 f8 = f8 fcvt.xuf.s1 f9 = f9 (p7) break 1 ;; // Compute the reciprocal approximation. frcpa.s1 f10, p6 = f8, f9 ;; // 3 Newton-Raphson iterations. (p6) fnma.s1 f11 = f9, f10, f1 (p6) fmpy.s1 f12 = f8, f10 ;; (p6) fmpy.s1 f13 = f11, f11 (p6) fma.s1 f12 = f11, f12, f12 ;; (p6) fma.s1 f10 = f11, f10, f10 (p6) fma.s1 f11 = f13, f12, f12 ;; (p6) fma.s1 f10 = f13, f10, f10 (p6) fnma.s1 f12 = f9, f11, f8 ;; (p6) fma.s1 f10 = f12, f10, f11 ;; // Round quotient to an unsigned integer. fcvt.fxu.trunc.s1 f10 = f10 ;; // Transfer result to GP registers. getf.sig ret0 = f10 br.ret.sptk rp ;; .endp __udivdi3 #endif #ifdef L__umoddi3 // Compute a 64-bit unsigned integer modulus. // // From the Intel IA-64 Optimization Guide, choose the minimum latency // alternative. // // in0 holds the dividend (a). in1 holds the divisor (b). .text .align 16 .global __umoddi3 .proc __umoddi3 __umoddi3: .regstk 2,0,0,0 // Transfer inputs to FP registers. setf.sig f14 = in0 setf.sig f9 = in1 // Check divide by zero. cmp.ne.unc p0,p7=0,in1 ;; // Convert the inputs to FP, to avoid FP software assist faults. fcvt.xuf.s1 f8 = f14 fcvt.xuf.s1 f9 = f9 (p7) break 1; ;; // Compute the reciprocal approximation. frcpa.s1 f10, p6 = f8, f9 ;; // 3 Newton-Raphson iterations. (p6) fmpy.s1 f12 = f8, f10 (p6) fnma.s1 f11 = f9, f10, f1 ;; (p6) fma.s1 f12 = f11, f12, f12 (p6) fmpy.s1 f13 = f11, f11 ;; (p6) fma.s1 f10 = f11, f10, f10 (p6) fma.s1 f11 = f13, f12, f12 ;; sub in1 = r0, in1 (p6) fma.s1 f10 = f13, f10, f10 (p6) fnma.s1 f12 = f9, f11, f8 ;; setf.sig f9 = in1 (p6) fma.s1 f10 = f12, f10, f11 ;; // Round quotient to an unsigned integer. fcvt.fxu.trunc.s1 f10 = f10 ;; // r = q * (-b) + a xma.l f10 = f10, f9, f14 ;; // Transfer result to GP registers. getf.sig ret0 = f10 br.ret.sptk rp ;; .endp __umoddi3 #endif #ifdef L__divsi3 // Compute a 32-bit integer quotient. // // From the Intel IA-64 Optimization Guide, choose the minimum latency // alternative. // // in0 holds the dividend. in1 holds the divisor. .text .align 16 .global __divsi3 .proc __divsi3 __divsi3: .regstk 2,0,0,0 // Check divide by zero. cmp.ne.unc p0,p7=0,in1 sxt4 in0 = in0 sxt4 in1 = in1 ;; setf.sig f8 = in0 setf.sig f9 = in1 (p7) break 1 ;; mov r2 = 0x0ffdd fcvt.xf f8 = f8 fcvt.xf f9 = f9 ;; setf.exp f11 = r2 frcpa.s1 f10, p6 = f8, f9 ;; (p6) fmpy.s1 f8 = f8, f10 (p6) fnma.s1 f9 = f9, f10, f1 ;; (p6) fma.s1 f8 = f9, f8, f8 (p6) fma.s1 f9 = f9, f9, f11 ;; (p6) fma.s1 f10 = f9, f8, f8 ;; fcvt.fx.trunc.s1 f10 = f10 ;; getf.sig ret0 = f10 br.ret.sptk rp ;; .endp __divsi3 #endif #ifdef L__modsi3 // Compute a 32-bit integer modulus. // // From the Intel IA-64 Optimization Guide, choose the minimum latency // alternative. // // in0 holds the dividend. in1 holds the divisor. .text .align 16 .global __modsi3 .proc __modsi3 __modsi3: .regstk 2,0,0,0 mov r2 = 0x0ffdd sxt4 in0 = in0 sxt4 in1 = in1 ;; setf.sig f13 = r32 setf.sig f9 = r33 // Check divide by zero. cmp.ne.unc p0,p7=0,in1 ;; sub in1 = r0, in1 fcvt.xf f8 = f13 fcvt.xf f9 = f9 ;; setf.exp f11 = r2 frcpa.s1 f10, p6 = f8, f9 (p7) break 1 ;; (p6) fmpy.s1 f12 = f8, f10 (p6) fnma.s1 f10 = f9, f10, f1 ;; setf.sig f9 = in1 (p6) fma.s1 f12 = f10, f12, f12 (p6) fma.s1 f10 = f10, f10, f11 ;; (p6) fma.s1 f10 = f10, f12, f12 ;; fcvt.fx.trunc.s1 f10 = f10 ;; xma.l f10 = f10, f9, f13 ;; getf.sig ret0 = f10 br.ret.sptk rp ;; .endp __modsi3 #endif #ifdef L__udivsi3 // Compute a 32-bit unsigned integer quotient. // // From the Intel IA-64 Optimization Guide, choose the minimum latency // alternative. // // in0 holds the dividend. in1 holds the divisor. .text .align 16 .global __udivsi3 .proc __udivsi3 __udivsi3: .regstk 2,0,0,0 mov r2 = 0x0ffdd zxt4 in0 = in0 zxt4 in1 = in1 ;; setf.sig f8 = in0 setf.sig f9 = in1 // Check divide by zero. cmp.ne.unc p0,p7=0,in1 ;; fcvt.xf f8 = f8 fcvt.xf f9 = f9 (p7) break 1 ;; setf.exp f11 = r2 frcpa.s1 f10, p6 = f8, f9 ;; (p6) fmpy.s1 f8 = f8, f10 (p6) fnma.s1 f9 = f9, f10, f1 ;; (p6) fma.s1 f8 = f9, f8, f8 (p6) fma.s1 f9 = f9, f9, f11 ;; (p6) fma.s1 f10 = f9, f8, f8 ;; fcvt.fxu.trunc.s1 f10 = f10 ;; getf.sig ret0 = f10 br.ret.sptk rp ;; .endp __udivsi3 #endif #ifdef L__umodsi3 // Compute a 32-bit unsigned integer modulus. // // From the Intel IA-64 Optimization Guide, choose the minimum latency // alternative. // // in0 holds the dividend. in1 holds the divisor. .text .align 16 .global __umodsi3 .proc __umodsi3 __umodsi3: .regstk 2,0,0,0 mov r2 = 0x0ffdd zxt4 in0 = in0 zxt4 in1 = in1 ;; setf.sig f13 = in0 setf.sig f9 = in1 // Check divide by zero. cmp.ne.unc p0,p7=0,in1 ;; sub in1 = r0, in1 fcvt.xf f8 = f13 fcvt.xf f9 = f9 ;; setf.exp f11 = r2 frcpa.s1 f10, p6 = f8, f9 (p7) break 1; ;; (p6) fmpy.s1 f12 = f8, f10 (p6) fnma.s1 f10 = f9, f10, f1 ;; setf.sig f9 = in1 (p6) fma.s1 f12 = f10, f12, f12 (p6) fma.s1 f10 = f10, f10, f11 ;; (p6) fma.s1 f10 = f10, f12, f12 ;; fcvt.fxu.trunc.s1 f10 = f10 ;; xma.l f10 = f10, f9, f13 ;; getf.sig ret0 = f10 br.ret.sptk rp ;; .endp __umodsi3 #endif #ifdef L__save_stack_nonlocal // Notes on save/restore stack nonlocal: We read ar.bsp but write // ar.bspstore. This is because ar.bsp can be read at all times // (independent of the RSE mode) but since it's read-only we need to // restore the value via ar.bspstore. This is OK because // ar.bsp==ar.bspstore after executing "flushrs". // void __ia64_save_stack_nonlocal(void *save_area, void *stack_pointer) .text .align 16 .global __ia64_save_stack_nonlocal .proc __ia64_save_stack_nonlocal __ia64_save_stack_nonlocal: { .mmf alloc r18 = ar.pfs, 2, 0, 0, 0 mov r19 = ar.rsc ;; } { .mmi flushrs st8 [in0] = in1, 24 and r19 = 0x1c, r19 ;; } { .mmi st8 [in0] = r18, -16 mov ar.rsc = r19 or r19 = 0x3, r19 ;; } { .mmi mov r16 = ar.bsp mov r17 = ar.rnat adds r2 = 8, in0 ;; } { .mmi st8 [in0] = r16 st8 [r2] = r17 } { .mib mov ar.rsc = r19 br.ret.sptk.few rp ;; } .endp __ia64_save_stack_nonlocal #endif #ifdef L__nonlocal_goto // void __ia64_nonlocal_goto(void *target_label, void *save_area, // void *static_chain); .text .align 16 .global __ia64_nonlocal_goto .proc __ia64_nonlocal_goto __ia64_nonlocal_goto: { .mmi alloc r20 = ar.pfs, 3, 0, 0, 0 ld8 r12 = [in1], 8 mov.ret.sptk rp = in0, .L0 ;; } { .mmf ld8 r16 = [in1], 8 mov r19 = ar.rsc ;; } { .mmi flushrs ld8 r17 = [in1], 8 and r19 = 0x1c, r19 ;; } { .mmi ld8 r18 = [in1] mov ar.rsc = r19 or r19 = 0x3, r19 ;; } { .mmi mov ar.bspstore = r16 ;; mov ar.rnat = r17 ;; } { .mmi loadrs invala mov r15 = in2 ;; } .L0: { .mib mov ar.rsc = r19 mov ar.pfs = r18 br.ret.sptk.few rp ;; } .endp __ia64_nonlocal_goto #endif #ifdef L__restore_stack_nonlocal // This is mostly the same as nonlocal_goto above. // ??? This has not been tested yet. // void __ia64_restore_stack_nonlocal(void *save_area) .text .align 16 .global __ia64_restore_stack_nonlocal .proc __ia64_restore_stack_nonlocal __ia64_restore_stack_nonlocal: { .mmf alloc r20 = ar.pfs, 4, 0, 0, 0 ld8 r12 = [in0], 8 ;; } { .mmb ld8 r16=[in0], 8 mov r19 = ar.rsc ;; } { .mmi flushrs ld8 r17 = [in0], 8 and r19 = 0x1c, r19 ;; } { .mmf ld8 r18 = [in0] mov ar.rsc = r19 ;; } { .mmi mov ar.bspstore = r16 ;; mov ar.rnat = r17 or r19 = 0x3, r19 ;; } { .mmf loadrs invala ;; } .L0: { .mib mov ar.rsc = r19 mov ar.pfs = r18 br.ret.sptk.few rp ;; } .endp __ia64_restore_stack_nonlocal #endif #ifdef L__trampoline // Implement the nested function trampoline. This is out of line // so that we don't have to bother with flushing the icache, as // well as making the on-stack trampoline smaller. // // The trampoline has the following form: // // +-------------------+ > // TRAMP: | __ia64_trampoline | | // +-------------------+ > fake function descriptor // | TRAMP+16 | | // +-------------------+ > // | target descriptor | // +-------------------+ // | static link | // +-------------------+ .text .align 16 .global __ia64_trampoline .proc __ia64_trampoline __ia64_trampoline: { .mmi ld8 r2 = [r1], 8 ;; ld8 r15 = [r1] } { .mmi ld8 r3 = [r2], 8 ;; ld8 r1 = [r2] mov b6 = r3 } { .bbb br.sptk.many b6 ;; } .endp __ia64_trampoline #endif #ifdef SHARED // Thunks for backward compatibility. #ifdef L_fixtfdi .text .align 16 .global __fixtfti .proc __fixtfti __fixtfti: { .bbb br.sptk.many __fixxfti ;; } .endp __fixtfti #endif #ifdef L_fixunstfdi .align 16 .global __fixunstfti .proc __fixunstfti __fixunstfti: { .bbb br.sptk.many __fixunsxfti ;; } .endp __fixunstfti #endif #ifdef L_floatditf .align 16 .global __floattitf .proc __floattitf __floattitf: { .bbb br.sptk.many __floattixf ;; } .endp __floattitf #endif #endif
4ms/metamodule-plugin-sdk
4,131
plugin-libc/libgcc/config/ia64/crtbegin.S
/* Copyright (C) 2000-2022 Free Software Foundation, Inc. Contributed by Jes Sorensen, <Jes.Sorensen@cern.ch> This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 3, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. Under Section 7 of GPL version 3, you are granted additional permissions described in the GCC Runtime Library Exception, version 3.1, as published by the Free Software Foundation. You should have received a copy of the GNU General Public License and a copy of the GCC Runtime Library Exception along with this program; see the files COPYING3 and COPYING.RUNTIME respectively. If not, see <http://www.gnu.org/licenses/>. */ #include "auto-host.h" .section .ctors,"aw","progbits" .align 8 __CTOR_LIST__: data8 -1 .section .dtors,"aw","progbits" .align 8 __DTOR_LIST__: data8 -1 .section .sdata .type dtor_ptr,@object .size dtor_ptr,8 dtor_ptr: data8 @gprel(__DTOR_LIST__ + 8) /* A handle for __cxa_finalize to manage c++ local destructors. */ .global __dso_handle .type __dso_handle,@object .size __dso_handle,8 #ifdef SHARED .section .sdata __dso_handle: data8 __dso_handle #else .section .sbss .align 8 __dso_handle: .skip 8 #endif .hidden __dso_handle #if HAVE_INITFINI_ARRAY_SUPPORT .section .fini_array, "a" data8 @fptr(__do_global_dtors_aux) .section .init_array, "a" data8 @fptr(__do_global_ctors_aux) #else /* !HAVE_INITFINI_ARRAY_SUPPORT */ /* * Fragment of the ELF _fini routine that invokes our dtor cleanup. * * We make the call by indirection, because in large programs the * .fini and .init sections are not in range of the destination, and * we cannot allow the linker to insert a stub at the end of this * fragment of the _fini function. Further, Itanium does not implement * the long branch instructions, and we do not wish every program to * trap to the kernel for emulation. * * Note that we require __do_global_dtors_aux to preserve the GP, * so that the next fragment in .fini gets the right value. */ .section .fini,"ax","progbits" { .mlx movl r2 = @pcrel(__do_global_dtors_aux - 16) } { .mii mov r3 = ip ;; add r2 = r2, r3 ;; } { .mib nop 0 mov b6 = r2 br.call.sptk.many b0 = b6 } #endif /* !HAVE_INITFINI_ARRAY_SUPPORT */ .section .text .align 32 .proc __do_global_dtors_aux __do_global_dtors_aux: .prologue #ifndef SHARED .save ar.pfs, r35 alloc loc3 = ar.pfs, 0, 4, 1, 0 addl loc0 = @gprel(dtor_ptr), gp .save rp, loc1 mov loc1 = rp .body mov loc2 = gp nop 0 br.sptk.many .entry #else /* if (__cxa_finalize) __cxa_finalize(__dso_handle) */ .save ar.pfs, r35 alloc loc3 = ar.pfs, 0, 4, 1, 0 addl loc0 = @gprel(dtor_ptr), gp addl r16 = @ltoff(@fptr(__cxa_finalize)), gp ;; ld8 r16 = [r16] ;; addl out0 = @ltoff(__dso_handle), gp cmp.ne p7, p0 = r0, r16 ;; ld8 out0 = [out0] (p7) ld8 r18 = [r16], 8 .save rp, loc1 mov loc1 = rp .body ;; mov loc2 = gp (p7) ld8 gp = [r16] (p7) mov b6 = r18 nop 0 nop 0 (p7) br.call.sptk.many rp = b6 ;; nop 0 nop 0 br.sptk.many .entry #endif /* do { dtor_ptr++; (*(dtor_ptr-1)) (); } while (dtor_ptr); */ .loop: st8 [loc0] = r15 // update dtor_ptr (in memory) ld8 r17 = [r16], 8 // r17 <- dtor's entry-point nop 0 ;; ld8 gp = [r16] // gp <- dtor's gp mov b6 = r17 br.call.sptk.many rp = b6 .entry: ld8 r15 = [loc0] // r15 <- dtor_ptr (gp-relative) ;; add r16 = r15, loc2 // r16 <- dtor_ptr (absolute) adds r15 = 8, r15 ;; ld8 r16 = [r16] // r16 <- pointer to dtor's fdesc mov rp = loc1 mov ar.pfs = loc3 ;; cmp.ne p6, p0 = r0, r16 (p6) br.cond.sptk.few .loop br.ret.sptk.many rp .endp __do_global_dtors_aux #ifdef SHARED .weak __cxa_finalize #endif .weak _Jv_RegisterClasses
4ms/metamodule-plugin-sdk
2,260
plugin-libc/libgcc/config/vax/lib1funcs.S
/* Copyright (C) 2009-2022 Free Software Foundation, Inc. This file is part of GCC. Contributed by Maciej W. Rozycki <macro@linux-mips.org>. This file is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 3, or (at your option) any later version. This file is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. Under Section 7 of GPL version 3, you are granted additional permissions described in the GCC Runtime Library Exception, version 3.1, as published by the Free Software Foundation. You should have received a copy of the GNU General Public License and a copy of the GCC Runtime Library Exception along with this program; see the files COPYING3 and COPYING.RUNTIME respectively. If not, see <http://www.gnu.org/licenses/>. */ #ifdef L_udivsi3 .text .globl __udivsi3 .type __udivsi3, @function __udivsi3: .word 0 movl 8(%ap), %r1 blss 0f /* Check bit #31 of divisor. */ movl 4(%ap), %r2 blss 1f /* Check bit #31 of dividend. */ /* Both zero, do a standard division. */ divl3 %r1, %r2, %r0 ret /* MSB of divisor set, only 1 or 0 may result. */ 0: decl %r1 clrl %r0 cmpl %r1, 4(%ap) adwc $0, %r0 ret /* MSB of dividend set, do an extended division. */ 1: clrl %r3 ediv %r1, %r2, %r0, %r3 ret .size __udivsi3, . - __udivsi3 .previous #endif #ifdef L_umodsi3 .text .globl __umodsi3 .type __umodsi3, @function __umodsi3: .word 0 movl 8(%ap), %r1 blss 0f /* Check bit #31 of divisor. */ movl 4(%ap), %r2 blss 1f /* Check bit #31 of dividend. */ /* Both zero, do a standard division. */ divl3 %r1, %r2, %r0 mull2 %r0, %r1 subl3 %r1, %r2, %r0 ret /* MSB of divisor set, subtract the divisor at most once. */ 0: movl 4(%ap), %r2 clrl %r0 cmpl %r2, %r1 sbwc $0, %r0 bicl2 %r0, %r1 subl3 %r1, %r2, %r0 ret /* MSB of dividend set, do an extended division. */ 1: clrl %r3 ediv %r1, %r2, %r3, %r0 ret .size __umodsi3, . - __umodsi3 .previous #endif
4ms/metamodule-plugin-sdk
1,496
plugin-libc/libgcc/config/mcore/crtn.S
# crtn.S for ELF based systems # Copyright (C) 1992-2022 Free Software Foundation, Inc. # Written By David Vinayak Henkel-Wallace, June 1992 # # This file is free software; you can redistribute it and/or modify it # under the terms of the GNU General Public License as published by the # Free Software Foundation; either version 3, or (at your option) any # later version. # # This file is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # General Public License for more details. # # Under Section 7 of GPL version 3, you are granted additional # permissions described in the GCC Runtime Library Exception, version # 3.1, as published by the Free Software Foundation. # # You should have received a copy of the GNU General Public License and # a copy of the GCC Runtime Library Exception along with this program; # see the files COPYING3 and COPYING.RUNTIME respectively. If not, see # <http://www.gnu.org/licenses/>. # This file just makes sure that the .fini and .init sections do in # fact return. Users may put any desired instructions in those sections. # This file is the last thing linked into any executable. .section ".init" .align 4 ldw r15,(r0, 12) addi r0,16 jmp r15 .section ".fini" .align 4 ldw r15, (r0, 12) addi r0,16 jmp r15 # Th-th-th-that is all folks!
4ms/metamodule-plugin-sdk
1,657
plugin-libc/libgcc/config/mcore/crti.S
# crti.S for ELF based systems # Copyright (C) 1992-2022 Free Software Foundation, Inc. # Written By David Vinayak Henkel-Wallace, June 1992 # # This file is free software; you can redistribute it and/or modify it # under the terms of the GNU General Public License as published by the # Free Software Foundation; either version 3, or (at your option) any # later version. # # This file is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # General Public License for more details. # # Under Section 7 of GPL version 3, you are granted additional # permissions described in the GCC Runtime Library Exception, version # 3.1, as published by the Free Software Foundation. # # You should have received a copy of the GNU General Public License and # a copy of the GCC Runtime Library Exception along with this program; # see the files COPYING3 and COPYING.RUNTIME respectively. If not, see # <http://www.gnu.org/licenses/>. # This file just makes a stack frame for the contents of the .fini and # .init sections. Users may put any desired instructions in those # sections. .section ".init" .global _init .type _init,@function .align 4 _init: subi r0, 16 st.w r15, (r0, 12) # These nops are here to align the end of this code with a 16 byte # boundary. The linker will start inserting code into the .init # section at such a boundary. nop nop nop nop nop nop .section ".fini" .global _fini .type _fini,@function .align 4 _fini: subi r0, 16 st.w r15, (r0, 12) nop nop nop nop nop nop
4ms/metamodule-plugin-sdk
7,378
plugin-libc/libgcc/config/mcore/lib1funcs.S
/* libgcc routines for the MCore. Copyright (C) 1993-2022 Free Software Foundation, Inc. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 3, or (at your option) any later version. This file is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. Under Section 7 of GPL version 3, you are granted additional permissions described in the GCC Runtime Library Exception, version 3.1, as published by the Free Software Foundation. You should have received a copy of the GNU General Public License and a copy of the GCC Runtime Library Exception along with this program; see the files COPYING3 and COPYING.RUNTIME respectively. If not, see <http://www.gnu.org/licenses/>. */ #define CONCAT1(a, b) CONCAT2(a, b) #define CONCAT2(a, b) a ## b /* Use the right prefix for global labels. */ #define SYM(x) CONCAT1 (__, x) #ifdef __ELF__ #define TYPE(x) .type SYM (x),@function #define SIZE(x) .size SYM (x), . - SYM (x) #else #define TYPE(x) #define SIZE(x) #endif .macro FUNC_START name .text .globl SYM (\name) TYPE (\name) SYM (\name): .endm .macro FUNC_END name SIZE (\name) .endm #ifdef L_udivsi3 FUNC_START udiv32 FUNC_START udivsi32 movi r1,0 // r1-r2 form 64 bit dividend movi r4,1 // r4 is quotient (1 for a sentinel) cmpnei r3,0 // look for 0 divisor bt 9f trap 3 // divide by 0 9: // control iterations; skip across high order 0 bits in dividend mov r7,r2 cmpnei r7,0 bt 8f movi r2,0 // 0 dividend jmp r15 // quick return 8: ff1 r7 // figure distance to skip lsl r4,r7 // move the sentinel along (with 0's behind) lsl r2,r7 // and the low 32 bits of numerator // appears to be wrong... // tested out incorrectly in our OS work... // mov r7,r3 // looking at divisor // ff1 r7 // I can move 32-r7 more bits to left. // addi r7,1 // ok, one short of that... // mov r1,r2 // lsr r1,r7 // bits that came from low order... // rsubi r7,31 // r7 == "32-n" == LEFT distance // addi r7,1 // this is (32-n) // lsl r4,r7 // fixes the high 32 (quotient) // lsl r2,r7 // cmpnei r4,0 // bf 4f // the sentinel went away... // run the remaining bits 1: lslc r2,1 // 1 bit left shift of r1-r2 addc r1,r1 cmphs r1,r3 // upper 32 of dividend >= divisor? bf 2f sub r1,r3 // if yes, subtract divisor 2: addc r4,r4 // shift by 1 and count subtracts bf 1b // if sentinel falls out of quotient, stop 4: mov r2,r4 // return quotient mov r3,r1 // and piggyback the remainder jmp r15 FUNC_END udiv32 FUNC_END udivsi32 #endif #ifdef L_umodsi3 FUNC_START urem32 FUNC_START umodsi3 movi r1,0 // r1-r2 form 64 bit dividend movi r4,1 // r4 is quotient (1 for a sentinel) cmpnei r3,0 // look for 0 divisor bt 9f trap 3 // divide by 0 9: // control iterations; skip across high order 0 bits in dividend mov r7,r2 cmpnei r7,0 bt 8f movi r2,0 // 0 dividend jmp r15 // quick return 8: ff1 r7 // figure distance to skip lsl r4,r7 // move the sentinel along (with 0's behind) lsl r2,r7 // and the low 32 bits of numerator 1: lslc r2,1 // 1 bit left shift of r1-r2 addc r1,r1 cmphs r1,r3 // upper 32 of dividend >= divisor? bf 2f sub r1,r3 // if yes, subtract divisor 2: addc r4,r4 // shift by 1 and count subtracts bf 1b // if sentinel falls out of quotient, stop mov r2,r1 // return remainder jmp r15 FUNC_END urem32 FUNC_END umodsi3 #endif #ifdef L_divsi3 FUNC_START div32 FUNC_START divsi3 mov r5,r2 // calc sign of quotient xor r5,r3 abs r2 // do unsigned divide abs r3 movi r1,0 // r1-r2 form 64 bit dividend movi r4,1 // r4 is quotient (1 for a sentinel) cmpnei r3,0 // look for 0 divisor bt 9f trap 3 // divide by 0 9: // control iterations; skip across high order 0 bits in dividend mov r7,r2 cmpnei r7,0 bt 8f movi r2,0 // 0 dividend jmp r15 // quick return 8: ff1 r7 // figure distance to skip lsl r4,r7 // move the sentinel along (with 0's behind) lsl r2,r7 // and the low 32 bits of numerator // tested out incorrectly in our OS work... // mov r7,r3 // looking at divisor // ff1 r7 // I can move 32-r7 more bits to left. // addi r7,1 // ok, one short of that... // mov r1,r2 // lsr r1,r7 // bits that came from low order... // rsubi r7,31 // r7 == "32-n" == LEFT distance // addi r7,1 // this is (32-n) // lsl r4,r7 // fixes the high 32 (quotient) // lsl r2,r7 // cmpnei r4,0 // bf 4f // the sentinel went away... // run the remaining bits 1: lslc r2,1 // 1 bit left shift of r1-r2 addc r1,r1 cmphs r1,r3 // upper 32 of dividend >= divisor? bf 2f sub r1,r3 // if yes, subtract divisor 2: addc r4,r4 // shift by 1 and count subtracts bf 1b // if sentinel falls out of quotient, stop 4: mov r2,r4 // return quotient mov r3,r1 // piggyback the remainder btsti r5,31 // after adjusting for sign bf 3f rsubi r2,0 rsubi r3,0 3: jmp r15 FUNC_END div32 FUNC_END divsi3 #endif #ifdef L_modsi3 FUNC_START rem32 FUNC_START modsi3 mov r5,r2 // calc sign of remainder abs r2 // do unsigned divide abs r3 movi r1,0 // r1-r2 form 64 bit dividend movi r4,1 // r4 is quotient (1 for a sentinel) cmpnei r3,0 // look for 0 divisor bt 9f trap 3 // divide by 0 9: // control iterations; skip across high order 0 bits in dividend mov r7,r2 cmpnei r7,0 bt 8f movi r2,0 // 0 dividend jmp r15 // quick return 8: ff1 r7 // figure distance to skip lsl r4,r7 // move the sentinel along (with 0's behind) lsl r2,r7 // and the low 32 bits of numerator 1: lslc r2,1 // 1 bit left shift of r1-r2 addc r1,r1 cmphs r1,r3 // upper 32 of dividend >= divisor? bf 2f sub r1,r3 // if yes, subtract divisor 2: addc r4,r4 // shift by 1 and count subtracts bf 1b // if sentinel falls out of quotient, stop mov r2,r1 // return remainder btsti r5,31 // after adjusting for sign bf 3f rsubi r2,0 3: jmp r15 FUNC_END rem32 FUNC_END modsi3 #endif /* GCC expects that {__eq,__ne,__gt,__ge,__le,__lt}{df2,sf2} will behave as __cmpdf2. So, we stub the implementations to jump on to __cmpdf2 and __cmpsf2. All of these shortcircuit the return path so that __cmp{sd}f2 will go directly back to the caller. */ .macro COMPARE_DF_JUMP name .import SYM (cmpdf2) FUNC_START \name jmpi SYM (cmpdf2) FUNC_END \name .endm #ifdef L_eqdf2 COMPARE_DF_JUMP eqdf2 #endif /* L_eqdf2 */ #ifdef L_nedf2 COMPARE_DF_JUMP nedf2 #endif /* L_nedf2 */ #ifdef L_gtdf2 COMPARE_DF_JUMP gtdf2 #endif /* L_gtdf2 */ #ifdef L_gedf2 COMPARE_DF_JUMP gedf2 #endif /* L_gedf2 */ #ifdef L_ltdf2 COMPARE_DF_JUMP ltdf2 #endif /* L_ltdf2 */ #ifdef L_ledf2 COMPARE_DF_JUMP ledf2 #endif /* L_ledf2 */ /* SINGLE PRECISION FLOATING POINT STUBS */ .macro COMPARE_SF_JUMP name .import SYM (cmpsf2) FUNC_START \name jmpi SYM (cmpsf2) FUNC_END \name .endm #ifdef L_eqsf2 COMPARE_SF_JUMP eqsf2 #endif /* L_eqsf2 */ #ifdef L_nesf2 COMPARE_SF_JUMP nesf2 #endif /* L_nesf2 */ #ifdef L_gtsf2 COMPARE_SF_JUMP gtsf2 #endif /* L_gtsf2 */ #ifdef L_gesf2 COMPARE_SF_JUMP __gesf2 #endif /* L_gesf2 */ #ifdef L_ltsf2 COMPARE_SF_JUMP __ltsf2 #endif /* L_ltsf2 */ #ifdef L_lesf2 COMPARE_SF_JUMP lesf2 #endif /* L_lesf2 */
4ms/metamodule-plugin-sdk
1,511
plugin-libc/libgcc/config/arc/crtn.S
/* Ensure .fini/.init return for the Synopsys DesignWare ARC CPU. Copyright (C) 1994-2022 Free Software Foundation, Inc. Contributor: Joern Rennecke <joern.rennecke@embecosm.com> on behalf of Synopsys Inc. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 3, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. Under Section 7 of GPL version 3, you are granted additional permissions described in the GCC Runtime Library Exception, version 3.1, as published by the Free Software Foundation. You should have received a copy of the GNU General Public License and a copy of the GCC Runtime Library Exception along with this program; see the files COPYING3 and COPYING.RUNTIME respectively. If not, see <http://www.gnu.org/licenses/>. */ # This file just makes sure that the .fini and .init sections do in # fact return. This file is the last thing linked into any executable. #ifdef __ARC_RF16__ /* Use object attributes to inform other tools this file is safe for RF16 configuration. */ .arc_attribute Tag_ARC_ABI_rf16, 1 #endif .section .init pop_s blink j_s [blink] .section .fini pop_s blink j_s [blink]
4ms/metamodule-plugin-sdk
2,339
plugin-libc/libgcc/config/arc/crttls.S
; newlib tls glue code for Synopsys DesignWare ARC cpu. /* Copyright (C) 2016-2022 Free Software Foundation, Inc. Contributor: Joern Rennecke <joern.rennecke@embecosm.com> on behalf of Synopsys Inc. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 3, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. Under Section 7 of GPL version 3, you are granted additional permissions described in the GCC Runtime Library Exception, version 3.1, as published by the Free Software Foundation. You should have received a copy of the GNU General Public License and a copy of the GCC Runtime Library Exception along with this program; see the files COPYING3 and COPYING.RUNTIME respectively. If not, see <http://www.gnu.org/licenses/>. */ /* As a special exception, if you link this library with other files, some of which are compiled with GCC, to produce an executable, this library does not by itself cause the resulting executable to be covered by the GNU General Public License. This exception does not however invalidate any other reasons why the executable file might be covered by the GNU General Public License. */ #ifdef __ARC_RF16__ /* Use object attributes to inform other tools this file is safe for RF16 configuration. */ .arc_attribute Tag_ARC_ABI_rf16, 1 #endif #if (__ARC_TLS_REGNO__ != -1) /* ANSI concatenation macros. */ #define CONCAT1(a, b) CONCAT2(a, b) #define CONCAT2(a, b) a ## b /* Use the right prefix for global labels. */ #define SYM(x) CONCAT1 (__USER_LABEL_PREFIX__, x) #define FUNC(X) .type SYM(X),@function #define ENDFUNC0(X) .Lfe_##X: .size X,.Lfe_##X-X #define ENDFUNC(X) ENDFUNC0(X) .global SYM(__read_tp) SYM(__read_tp): FUNC(__read_tp) mov r0, CONCAT1 (r, __ARC_TLS_REGNO__) nop j [blink] ENDFUNC(__read_tp) .section .init mov CONCAT1 (r, __ARC_TLS_REGNO__),__main_tcb_end+256 .section .tbss __main_tcb: .long 0 .long 0 __main_tcb_end: #endif /*__ARC_TLS_REGNO__ != -1 */
4ms/metamodule-plugin-sdk
1,537
plugin-libc/libgcc/config/arc/crti.S
/* .fini/.init stack frame setup for the Synopsys DesignWare ARC CPU. Copyright (C) 1994-2022 Free Software Foundation, Inc. Contributor: Joern Rennecke <joern.rennecke@embecosm.com> on behalf of Synopsys Inc. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 3, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. Under Section 7 of GPL version 3, you are granted additional permissions described in the GCC Runtime Library Exception, version 3.1, as published by the Free Software Foundation. You should have received a copy of the GNU General Public License and a copy of the GCC Runtime Library Exception along with this program; see the files COPYING3 and COPYING.RUNTIME respectively. If not, see <http://www.gnu.org/licenses/>. */ # This file contains the stack frame setup for contents of the .fini and # .init sections. #ifdef __ARC_RF16__ /* Use object attributes to inform other tools this file is safe for RF16 configuration. */ .arc_attribute Tag_ARC_ABI_rf16, 1 #endif .section .init .global _init .word 0 .type _init,@function _init: push_s blink .section .fini .global _fini .word 0 .type _fini,@function _fini: push_s blink
4ms/metamodule-plugin-sdk
30,805
plugin-libc/libgcc/config/arc/lib1funcs.S
; libgcc1 routines for Synopsys DesignWare ARC cpu. /* Copyright (C) 1995-2022 Free Software Foundation, Inc. Contributor: Joern Rennecke <joern.rennecke@embecosm.com> on behalf of Synopsys Inc. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 3, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. Under Section 7 of GPL version 3, you are granted additional permissions described in the GCC Runtime Library Exception, version 3.1, as published by the Free Software Foundation. You should have received a copy of the GNU General Public License and a copy of the GCC Runtime Library Exception along with this program; see the files COPYING3 and COPYING.RUNTIME respectively. If not, see <http://www.gnu.org/licenses/>. */ /* As a special exception, if you link this library with other files, some of which are compiled with GCC, to produce an executable, this library does not by itself cause the resulting executable to be covered by the GNU General Public License. This exception does not however invalidate any other reasons why the executable file might be covered by the GNU General Public License. */ /* ANSI concatenation macros. */ #define CONCAT1(a, b) CONCAT2(a, b) #define CONCAT2(a, b) a ## b /* Use the right prefix for global labels. */ #define SYM(x) CONCAT1 (__USER_LABEL_PREFIX__, x) #ifndef WORKING_ASSEMBLER #define abs_l abs #define asl_l asl #define mov_l mov #endif #define FUNC(X) .type SYM(X),@function #define HIDDEN_FUNC(X) FUNC(X)` .hidden X #define ENDFUNC0(X) .Lfe_##X: .size X,.Lfe_##X-X #define ENDFUNC(X) ENDFUNC0(X) #ifdef __ARC_RF16__ /* Use object attributes to inform other tools this file is safe for RF16 configuration. */ .arc_attribute Tag_ARC_ABI_rf16, 1 #endif #ifdef L_mulsi3 .section .text .align 4 .global SYM(__mulsi3) SYM(__mulsi3): /* This the simple version. while (a) { if (a & 1) r += b; a >>= 1; b <<= 1; } */ #if defined (__ARC_MUL64__) FUNC(__mulsi3) mulu64 r0,r1 j_s.d [blink] mov_s r0,mlo ENDFUNC(__mulsi3) #elif defined (__ARC_MPY__) HIDDEN_FUNC(__mulsi3) mpyu r0,r0,r1 nop_s j_s [blink] ENDFUNC(__mulsi3) #elif defined (__ARC_NORM__) FUNC(__mulsi3) norm.f r2,r0 rsub lp_count,r2,31 mov.mi lp_count,32 mov_s r2,r0 mov_s r0,0 lpnz @.Lend ; loop is aligned lsr.f r2,r2 add.cs r0,r0,r1 add_s r1,r1,r1 .Lend: j_s [blink] ENDFUNC(__mulsi3) #elif !defined (__OPTIMIZE_SIZE__) && defined (__ARC_BARREL_SHIFTER__) /* Up to 3.5 times faster than the simpler code below, but larger. */ FUNC(__mulsi3) ror.f r2,r0,4 mov_s r0,0 add3.mi r0,r0,r1 asl.f r2,r2,2 add2.cs r0,r0,r1 jeq_s [blink] .Loop: add1.mi r0,r0,r1 asl.f r2,r2,2 add.cs r0,r0,r1 asl_s r1,r1,4 ror.f r2,r2,8 add3.mi r0,r0,r1 asl.f r2,r2,2 bne.d .Loop add2.cs r0,r0,r1 j_s [blink] ENDFUNC(__mulsi3) #elif !defined (__OPTIMIZE_SIZE__) /* __ARC601__ */ FUNC(__mulsi3) lsr.f r2,r0 mov_s r0,0 mov_s r3,0 add.cs r0,r0,r1 .Loop: lsr.f r2,r2 add1.cs r0,r0,r1 lsr.f r2,r2 add2.cs r0,r0,r1 lsr.f r2,r2 add3.cs r0,r0,r1 bne.d .Loop add3 r1,r3,r1 j_s [blink] ENDFUNC(__mulsi3) #else /********************************************************/ FUNC(__mulsi3) mov_s r2,0 ; Accumulate result here. .Lloop: bbit0 r0,0,@.Ly add_s r2,r2,r1 ; r += b .Ly: lsr_s r0,r0 ; a >>= 1 asl_s r1,r1 ; b <<= 1 brne_s r0,0,@.Lloop .Ldone: j_s.d [blink] mov_s r0,r2 ENDFUNC(__mulsi3) /********************************************************/ #endif #endif /* L_mulsi3 */ #ifdef L_umulsidi3 .section .text .align 4 .global SYM(__umulsidi3) SYM(__umulsidi3): HIDDEN_FUNC(__umulsidi3) /* We need ARC700 /ARC_MUL64 definitions of __umulsidi3 / __umulsi3_highpart in case some code has been compiled without multiply support enabled, but linked with the multiply-support enabled libraries. For ARC601 (i.e. without a barrel shifter), we also use umuldisi3 as our umulsi3_highpart implementation; the use of the latter label doesn't actually benefit ARC601 platforms, but is useful when ARC601 code is linked against other libraries. */ #if defined (__ARC_MPY__) || defined (__ARC_MUL64__) \ || !defined (__ARC_BARREL_SHIFTER__) .global SYM(__umulsi3_highpart) SYM(__umulsi3_highpart): HIDDEN_FUNC(__umulsi3_highpart) #endif /* This the simple version. while (a) { if (a & 1) r += b; a >>= 1; b <<= 1; } */ #include "ieee-754/arc-ieee-754.h" #ifdef __ARC_MPY__ mov_s r12,DBL0L mpyu DBL0L,r12,DBL0H j_s.d [blink] MPYHU DBL0H,r12,DBL0H #elif defined (__ARC_MUL64__) /* Likewise for __ARC_MUL64__ */ mulu64 r0,r1 mov_s DBL0L,mlo j_s.d [blink] mov_s DBL0H,mhi #else /* !__ARC_MPY__ && !__ARC_MUL64__ */ /* Although it might look tempting to extend this to handle muldi3, using mulsi3 twice with 2.25 cycles per 32 bit add is faster than one loop with 3 or four cycles per 32 bit add. */ asl.f r12,0 ; Top part of b. mov_s r2,0 ; Accumulate result here. bbit1.d r0,0,@.Ladd mov_s r3,0 .Llooptst: rlc r12,r12 breq r0,0,@.Ldone ; while (a) .Lloop: asl.f r1,r1 ; b <<= 1 bbit0.d r0,1,@.Llooptst lsr r0,r0 ; a >>= 1 rlc r12,r12 .Ladd: add.f r3,r3,r1 ; r += b brne.d r0,0,@.Lloop ; while (a); adc r2,r2,r12 .Ldone: mov_s DBL0L,r3 j_s.d [blink] mov DBL0H,r2 #endif /* !__ARC_MPY__*/ ENDFUNC(__umulsidi3) #if defined (__ARC_MPY__) || defined (__ARC_MUL64__) \ || !defined (__ARC_BARREL_SHIFTER__) ENDFUNC(__umulsi3_highpart) #endif #endif /* L_umulsidi3 */ #ifndef __ARC_RF16__ #ifdef L_muldi3 .section .text .align 4 .global SYM(__muldi3) SYM(__muldi3): #ifdef __LITTLE_ENDIAN__ push_s blink mov_s r4,r3 ;4 mov_s r5,r2 ;4 mov_s r9,r0 ;4 mov_s r8,r1 ;4 bl.d @__umulsidi3 mov_s r1,r2 ;4 mov_s r6,r0 ;4 mov_s r7,r1 ;4 mov_s r0,r9 ;4 bl.d @__mulsi3 mov_s r1,r4 ;4 mov_s r4,r0 ;4 mov_s r1,r8 ;4 bl.d @__mulsi3 mov_s r0,r5 ;4 pop_s blink add_s r0,r0,r4 ;2 add r1,r0,r7 j_s.d [blink] mov_s r0,r6 ;4 #else push_s blink mov_s r5,r3 mov_s r9,r2 mov_s r4,r1 mov_s r8,r0 mov_s r0,r1 bl.d @__umulsidi3 mov_s r1,r3 mov_s r7,r0 mov_s r6,r1 mov_s r0,r4 bl.d @__mulsi3 mov_s r1,r9 mov_s r4,r0 mov_s r1,r8 bl.d @__mulsi3 mov_s r0,r5 pop_s blink add_s r0,r0,r4 add_s r0,r0,r7 j_s.d [blink] mov_s r1,r6 #endif /* __LITTLE_ENDIAN__ */ ENDFUNC(__muldi3) #endif /* L_muldi3 */ #endif /* !__ARC_RF16__ */ #ifdef L_umulsi3_highpart #include "ieee-754/arc-ieee-754.h" /* For use without a barrel shifter, and for ARC700 / ARC_MUL64, the mulsidi3 algorithms above look better, so for these, there is an extra label up there. */ #if !defined (__ARC_MPY__) && !defined (__ARC_MUL64__) \ && defined (__ARC_BARREL_SHIFTER__) .global SYM(__umulsi3_highpart) SYM(__umulsi3_highpart): HIDDEN_FUNC(__umulsi3_highpart) mov_s r2,0 mov_s r3,32 .Loop: lsr.f r0,r0 add.cs.f r2,r2,r1 sub_s r3,r3,1 brne.d r0,0,.Loop rrc r2,r2 j_s.d [blink] /* Make the result register peephole-compatible with mulsidi3. */ lsr DBL0H,r2,r3 ENDFUNC(__umulsi3_highpart) #endif /* !__ARC_MPY__ && __ARC_BARREL_SHIFTER__ */ #endif /* L_umulsi3_highpart */ #ifdef L_divmod_tools ; Utilities used by all routines. .section .text /* unsigned long udivmodsi4(int modwanted, unsigned long num, unsigned long den) { unsigned long bit = 1; unsigned long res = 0; while (den < num && bit && !(den & (1L<<31))) { den <<=1; bit <<=1; } while (bit) { if (num >= den) { num -= den; res |= bit; } bit >>=1; den >>=1; } if (modwanted) return num; return res; } */ ; inputs: r0 = numerator, r1 = denominator ; outputs: r0 = quotient, r1 = remainder, r2/r3 trashed .balign 4 .global SYM(__udivmodsi4) FUNC(__udivmodsi4) SYM(__udivmodsi4): #if defined (__ARC_EA__) /* Normalize divisor and divident, and then use the appropriate number of divaw (the number of result bits, or one more) to produce the result. There are some special conditions that need to be tested: - We can only directly normalize unsigned numbers that fit in 31 bit. For the divisor, we test early on that it is not 'negative'. - divaw can't corrrectly process a divident that is larger than the divisor. We handle this be checking that the divident prior to normalization is not larger than the normalized divisor. As we then already know then that the divisor fits 31 bit, this check also makes sure that the divident fits. - ordinary normalization of the divident could make it larger than the normalized divisor, which again would be unsuitable for divaw. Thus, we want to shift left the divident by one less, except that we want to leave it alone if it is already 31 bit. To this end, we double the input to norm with adds. - If the divident has less bits than the divisor, that would leave us with a negative number of divaw to execute. Although we could use a conditional loop to avoid excess divaw, and then the quotient could be extracted correctly as there'd be more than enough zero bits, the remainder would be shifted left too far, requiring a conditional shift right. The cost of that shift and the possible mispredict on the conditional loop cost as much as putting in an early check for a zero result. */ bmsk r3,r0,29 brne.d r3,r0,.Large_dividend norm.f r2,r1 brlo r0,r1,.Lret0 norm r3,r0 asl_s r1,r1,r2 sub_s r3,r3,1 asl_l r0,r0,r3 ; not short to keep loop aligned sub lp_count,r2,r3 lp .Ldiv_end divaw r0,r0,r1 .Ldiv_end:sub_s r3,r2,1 lsr r1,r0,r2 j_s.d [blink] bmsk r0,r0,r3 .balign 4 .Large_dividend: bmi .Ltrivial asl_s r1,r1,r2 mov_s r3,0 sub1.f r4,r0,r1 mov.lo r4,r0 mov.hs r3,2 cmp r4,r1 sub.hs r4,r4,r1 add.hs r3,r3,1 mov.f lp_count,r2 lpne .Ldiv_end2 divaw r4,r4,r1 .Ldiv_end2:asl r0,r3,r2 lsr r1,r4,r2 sub_s r2,r2,1 bmsk r4,r4,r2 j_s.d [blink] or.ne r0,r0,r4 .Lret0: mov_s r1,r0 j_s.d [blink] mov_l r0,0 .balign 4 .Ltrivial: sub.f r1,r0,r1 mov.c r1,r0 mov_s r0,1 j_s.d [blink] mov.c r0,0 #elif !defined (__OPTIMIZE_SIZE__) && !defined (__ARC_RF16__) #if defined (__ARC_NORM__) && defined (__ARC_BARREL_SHIFTER__) lsr_s r2,r0 brhs.d r1,r2,.Lret0_3 norm r2,r2 norm r3,r1 sub_s r3,r3,r2 asl_s r1,r1,r3 sub1.f 0,r0,r1 lsr.cs r1,r1,1 sbc r2,r3,0 sub1 r0,r0,r1 cmp_s r0,r1 mov.f lp_count,r2 #else /* ! __ARC_NORM__ */ lsr_s r2,r0 brhs.d r1,r2,.Lret0_3 mov lp_count,32 .Lloop1: asl_s r1,r1 ; den <<= 1 brls.d r1,r2,@.Lloop1 sub lp_count,lp_count,1 sub_s r0,r0,r1 lsr_s r1,r1 cmp_s r0,r1 xor.f r2,lp_count,31 #if !defined (__ARCEM__) && !defined (__ARCHS__) mov_s lp_count,r2 #else mov lp_count,r2 nop_s #endif /* !__ARCEM__ && !__ARCHS__ */ #endif /* !__ARC_NORM__ */ sub.cc r0,r0,r1 mov_s r3,3 sbc r3,r3,0 #if defined (__ARC_BARREL_SHIFTER__) asl_s r3,r3,r2 rsub r1,r1,1 lpne @.Lloop2_end add1.f r0,r1,r0 sub.cc r0,r0,r1 .Lloop2_end: lsr r1,r0,r2 #else rsub r1,r1,1 lpne @.Lloop2_end asl_s r3,r3 add1.f r0,r1,r0 sub.cc r0,r0,r1 .Lloop2_end: lsr_s r1,r0 lsr.f lp_count,r2 mov.cc r1,r0 lpnz 1f lsr_s r1,r1 lsr_s r1,r1 1: #endif bmsk r0,r0,r2 bclr r0,r0,r2 j_s.d [blink] or_s r0,r0,r3 .Lret0_3: #if 0 /* Slightly shorter, but slower. */ lp .Loop3_end brhi.d r1,r0,.Loop3_end sub_s r0,r0,r1 .Loop3_end add_s r1,r1,r0 j_s.d [blink] rsub r0,lp_count,32-1 #else mov_s r4,r1 sub.f r1,r0,r1 sbc r0,r0,r0 sub.cc.f r1,r1,r4 sbc r0,r0,0 sub.cc.f r1,r1,r4 sbc r0,r0,-3 j_s.d [blink] add.cs r1,r1,r4 #endif #else /* Arctangent-A5 */ breq_s r1,0,@.Ldivmodend mov_s r2,1 ; bit = 1 mov_s r3,0 ; res = 0 .Lloop1: brhs r1,r0,@.Lloop2 bbit1 r1,31,@.Lloop2 asl_s r1,r1 ; den <<= 1 b.d @.Lloop1 asl_s r2,r2 ; bit <<= 1 .Lloop2: brlo r0,r1,@.Lshiftdown sub_s r0,r0,r1 ; num -= den or_s r3,r3,r2 ; res |= bit .Lshiftdown: lsr_s r2,r2 ; bit >>= 1 lsr_s r1,r1 ; den >>= 1 brne_s r2,0,@.Lloop2 .Ldivmodend: mov_s r1,r0 ; r1 = mod j.d [blink] mov_s r0,r3 ; r0 = res /******************************************************/ #endif ENDFUNC(__udivmodsi4) #endif #ifdef L_udivsi3 .section .text .align 4 .global SYM(__udivsi3) FUNC(__udivsi3) SYM(__udivsi3): b @SYM(__udivmodsi4) ENDFUNC(__udivsi3) #endif /* L_udivsi3 */ #ifdef L_divsi3 .section .text .align 4 .global SYM(__divsi3) FUNC(__divsi3) #ifndef __ARC_EA__ SYM(__divsi3): /* A5 / ARC60? */ mov r12,blink xor r11,r0,r1 abs_s r0,r0 bl.d @SYM(__udivmodsi4) abs_s r1,r1 tst r11,r11 j.d [r12] neg.mi r0,r0 #else /* !ifndef __ARC_EA__ */ ;; We can use the abs, norm, divaw and mpy instructions for ARC700 #define MULDIV #ifdef MULDIV /* This table has been generated by divtab-arc700.c. */ /* 1/512 .. 1/256, normalized. There is a leading 1 in bit 31. For powers of two, we list unnormalized numbers instead. The values for powers of 2 are loaded, but not used. The value for 1 is actually the first instruction after .Lmuldiv. */ .balign 4 .Ldivtab: .long 0x1000000 .long 0x80808081 .long 0x81020409 .long 0x81848DA9 .long 0x82082083 .long 0x828CBFBF .long 0x83126E98 .long 0x83993053 .long 0x84210843 .long 0x84A9F9C9 .long 0x85340854 .long 0x85BF3762 .long 0x864B8A7E .long 0x86D90545 .long 0x8767AB60 .long 0x87F78088 .long 0x88888889 .long 0x891AC73B .long 0x89AE408A .long 0x8A42F871 .long 0x8AD8F2FC .long 0x8B70344B .long 0x8C08C08D .long 0x8CA29C05 .long 0x8D3DCB09 .long 0x8DDA5203 .long 0x8E78356E .long 0x8F1779DA .long 0x8FB823EF .long 0x905A3864 .long 0x90FDBC0A .long 0x91A2B3C5 .long 0x92492493 .long 0x92F11385 .long 0x939A85C5 .long 0x94458095 .long 0x94F20950 .long 0x95A02569 .long 0x964FDA6D .long 0x97012E03 .long 0x97B425EE .long 0x9868C80A .long 0x991F1A52 .long 0x99D722DB .long 0x9A90E7DA .long 0x9B4C6F9F .long 0x9C09C09D .long 0x9CC8E161 .long 0x9D89D89E .long 0x9E4CAD24 .long 0x9F1165E8 .long 0x9FD809FE .long 0xA0A0A0A1 .long 0xA16B312F .long 0xA237C32C .long 0xA3065E40 .long 0xA3D70A3E .long 0xA4A9CF1E .long 0xA57EB503 .long 0xA655C43A .long 0xA72F053A .long 0xA80A80A9 .long 0xA8E83F58 .long 0xA9C84A48 .long 0xAAAAAAAB .long 0xAB8F69E3 .long 0xAC769185 .long 0xAD602B59 .long 0xAE4C415D .long 0xAF3ADDC7 .long 0xB02C0B03 .long 0xB11FD3B9 .long 0xB21642C9 .long 0xB30F6353 .long 0xB40B40B5 .long 0xB509E68B .long 0xB60B60B7 .long 0xB70FBB5B .long 0xB81702E1 .long 0xB92143FB .long 0xBA2E8BA3 .long 0xBB3EE722 .long 0xBC52640C .long 0xBD691048 .long 0xBE82FA0C .long 0xBFA02FE9 .long 0xC0C0C0C1 .long 0xC1E4BBD6 .long 0xC30C30C4 .long 0xC4372F86 .long 0xC565C87C .long 0xC6980C6A .long 0xC7CE0C7D .long 0xC907DA4F .long 0xCA4587E7 .long 0xCB8727C1 .long 0xCCCCCCCD .long 0xCE168A78 .long 0xCF6474A9 .long 0xD0B69FCC .long 0xD20D20D3 .long 0xD3680D37 .long 0xD4C77B04 .long 0xD62B80D7 .long 0xD79435E6 .long 0xD901B204 .long 0xDA740DA8 .long 0xDBEB61EF .long 0xDD67C8A7 .long 0xDEE95C4D .long 0xE070381D .long 0xE1FC780F .long 0xE38E38E4 .long 0xE525982B .long 0xE6C2B449 .long 0xE865AC7C .long 0xEA0EA0EB .long 0xEBBDB2A6 .long 0xED7303B6 .long 0xEF2EB720 .long 0xF0F0F0F1 .long 0xF2B9D649 .long 0xF4898D60 .long 0xF6603D99 .long 0xF83E0F84 .long 0xFA232CF3 .long 0xFC0FC0FD .long 0xFE03F810 .long 0x2000000 .long 0x81020409 .long 0x82082083 .long 0x83126E98 .long 0x84210843 .long 0x85340854 .long 0x864B8A7E .long 0x8767AB60 .long 0x88888889 .long 0x89AE408A .long 0x8AD8F2FC .long 0x8C08C08D .long 0x8D3DCB09 .long 0x8E78356E .long 0x8FB823EF .long 0x90FDBC0A .long 0x92492493 .long 0x939A85C5 .long 0x94F20950 .long 0x964FDA6D .long 0x97B425EE .long 0x991F1A52 .long 0x9A90E7DA .long 0x9C09C09D .long 0x9D89D89E .long 0x9F1165E8 .long 0xA0A0A0A1 .long 0xA237C32C .long 0xA3D70A3E .long 0xA57EB503 .long 0xA72F053A .long 0xA8E83F58 .long 0xAAAAAAAB .long 0xAC769185 .long 0xAE4C415D .long 0xB02C0B03 .long 0xB21642C9 .long 0xB40B40B5 .long 0xB60B60B7 .long 0xB81702E1 .long 0xBA2E8BA3 .long 0xBC52640C .long 0xBE82FA0C .long 0xC0C0C0C1 .long 0xC30C30C4 .long 0xC565C87C .long 0xC7CE0C7D .long 0xCA4587E7 .long 0xCCCCCCCD .long 0xCF6474A9 .long 0xD20D20D3 .long 0xD4C77B04 .long 0xD79435E6 .long 0xDA740DA8 .long 0xDD67C8A7 .long 0xE070381D .long 0xE38E38E4 .long 0xE6C2B449 .long 0xEA0EA0EB .long 0xED7303B6 .long 0xF0F0F0F1 .long 0xF4898D60 .long 0xF83E0F84 .long 0xFC0FC0FD .long 0x4000000 .long 0x82082083 .long 0x84210843 .long 0x864B8A7E .long 0x88888889 .long 0x8AD8F2FC .long 0x8D3DCB09 .long 0x8FB823EF .long 0x92492493 .long 0x94F20950 .long 0x97B425EE .long 0x9A90E7DA .long 0x9D89D89E .long 0xA0A0A0A1 .long 0xA3D70A3E .long 0xA72F053A .long 0xAAAAAAAB .long 0xAE4C415D .long 0xB21642C9 .long 0xB60B60B7 .long 0xBA2E8BA3 .long 0xBE82FA0C .long 0xC30C30C4 .long 0xC7CE0C7D .long 0xCCCCCCCD .long 0xD20D20D3 .long 0xD79435E6 .long 0xDD67C8A7 .long 0xE38E38E4 .long 0xEA0EA0EB .long 0xF0F0F0F1 .long 0xF83E0F84 .long 0x8000000 .long 0x84210843 .long 0x88888889 .long 0x8D3DCB09 .long 0x92492493 .long 0x97B425EE .long 0x9D89D89E .long 0xA3D70A3E .long 0xAAAAAAAB .long 0xB21642C9 .long 0xBA2E8BA3 .long 0xC30C30C4 .long 0xCCCCCCCD .long 0xD79435E6 .long 0xE38E38E4 .long 0xF0F0F0F1 .long 0x10000000 .long 0x88888889 .long 0x92492493 .long 0x9D89D89E .long 0xAAAAAAAB .long 0xBA2E8BA3 .long 0xCCCCCCCD .long 0xE38E38E4 .long 0x20000000 .long 0x92492493 .long 0xAAAAAAAB .long 0xCCCCCCCD .long 0x40000000 .long 0xAAAAAAAB .long 0x80000000 __muldiv: neg r4,r2 ld.as r5,[pcl,r4] abs_s r12,r0 bic.f 0,r2,r4 mpyhu.ne r12,r12,r5 norm r3,r2 xor.f 0,r0,r1 ; write port allocation stall rsub r3,r3,30 lsr r0,r12,r3 j_s.d [blink] neg.mi r0,r0 .balign 4 SYM(__divsi3): norm r3,r1 abs_s r2,r1 brhs r3,23,__muldiv norm r4,r0 abs_l r12,r0 brhs r4,r3,.Lonebit asl_s r2,r2,r3 asl r12,r12,r4 sub lp_count,r3,r4 sub.f r12,r12,r2 brge.d r12,r2,.Lsbit sub r4,r3,r4 add.lo r12,r12,r2 lp .Ldivend .Ldivstart:divaw r12,r12,r2 .Ldivend:xor_s r1,r1,r0 sub r0,r4,1 bmsk r0,r12,r0 bset.hs r0,r0,r4 tst_s r1,r1 j_s.d [blink] neg.mi r0,r0 .Lonebit: xor_s r1,r1,r0 asr_s r1,r1,31 sub1.f 0,r12,r2 ; special case: -2**(n+1) / 2**n or r0,r1,1 add.eq r0,r0,r0 cmp_s r12,r2 j_s.d [blink] mov.lo r0,0 .Lsbit: ; Need to handle special cases involving negative powers of two: ; r12,r2 are normalized dividend / divisor; ; divide anything by 0x80000000, or divide 0x80000000 by 0x40000000 add_s r12,r12,r2 xor_s r1,r1,r0 rsub r4,r4,-1 ror r0,r12,r4 tst_s r2,r2 bmsk r0,r0,r3 add.pl r0,r0,r0 tst_s r1,r1 j_s.d [blink] neg.mi r0,r0 #else /* !MULDIV */ /* This version requires that divaw works with a divisor of 0x80000000U */ abs_s r2,r1 norm r4,r0 neg_s r3,r2 norm r3,r3 abs_s r12,r0 brhs r4,r3,.Lonebit asl_s r2,r2,r3 asl r12,r12,r4 sub lp_count,r3,r4 cmp_s r12,r2 sub.hs r12,r12,r2 lp .Ldivend .Ldivstart:divaw r12,r12,r2 .Ldivend:xor_s r1,r1,r0 sub_s r0,r3,1 bmsk r0,r12,r0 bset.hs r0,r0,r3 tst_s r1,r1 j_s.d [blink] negmi r0,r0 .Lonebit: xor_s r1,r1,r0 asr_s r1,r1,31 cmp_s r12,r2 mov_s r0,0 j_s.d [blink] orhs r0,r1,1 #endif /* MULDIV */ #endif /* ifndef __ARC700__ */ ENDFUNC(__divsi3) #endif /* L_divsi3 */ #ifdef L_umodsi3 .section .text .align 4 .global SYM(__umodsi3) FUNC(__umodsi3) SYM(__umodsi3): mov r7,blink bl.nd @SYM(__udivmodsi4) j.d [r7] mov r0,r1 ENDFUNC(__umodsi3) #endif /* L_umodsi3 */ #ifdef L_modsi3 .section .text .align 4 .global SYM (__modsi3) FUNC(__modsi3) SYM(__modsi3): #ifndef __ARC_EA__ /* A5 / ARC60? */ mov_s r12,blink mov_s r11,r0 abs_s r0,r0 bl.d @SYM(__udivmodsi4) abs_s r1,r1 tst r11,r11 neg_s r0,r1 j_s.d [r12] mov.pl r0,r1 #else /* __ARC_EA__ */ abs_s r2,r1 norm.f r4,r0 neg r5,r2 norm r3,r5 abs_l r12,r0 brhs r4,r3,.Lonebit asl_s r2,r2,r3 asl r12,r12,r4 sub lp_count,r3,r4 cmp_s r12,r2 sub.hs r12,r12,r2 tst_s r0,r0 lp .Ldivend .Ldivstart:divaw r12,r12,r2 .Ldivend: lsr r0,r12,r3 j_s.d [blink] neg.mi r0,r0 .balign 4 .Lonebit:neg.pl r5,r5 cmp_s r12,r2 j_s.d [blink] sub.hs r0,r0,r5 #endif /* !__ARC_EA__ */ ENDFUNC(__modsi3) #endif /* L_modsi3 */ #ifdef L_clzsi2 .section .text .align 4 .global SYM (__clzsi2) SYM(__clzsi2): #ifdef __ARC_NORM__ HIDDEN_FUNC(__clzsi2) norm.f r0,r0 mov.n r0,0 j_s.d [blink] add.pl r0,r0,1 ENDFUNC(__clzsi2) #elif !defined (__ARC_BARREL_SHIFTER__) FUNC(__clzsi2) mov lp_count,10 mov_l r1,0 bset r2,r1,29 lp .Loop_end brhs r0,r2,.Loop_end add3 r0,r1,r0 .Loop_end: asl.f 0,r0 sub2 r0,lp_count,lp_count sub.cs.f r0,r0,1 add r0,r0,31 j_s.d [blink] add.pl r0,r0,1 ENDFUNC(__clzsi2) #else FUNC(__clzsi2) asl.f 0,r0,2 mov r1,-1 .Lcheck: bbit1.d r0,31,.Ldone asl.pl r0,r0,3 bcs.d .Ldone_1 add_s r1,r1,3 bpnz.d .Lcheck asl.f 0,r0,2 mov_s r0,32 j_s.d [blink] mov.ne r0,r1 .Ldone: j_s.d [blink] add_s r0,r1,1 .Ldone_1: j_s.d [blink] sub_s r0,r1,1 ENDFUNC(__clzsi2) #endif #endif /* L_clzsi2 */ .section .text ;;; MILLICODE THUNK LIB ;*************** ;;; .macro push_regs from, to, offset ;;; st_s "\from", [sp, \offset] ;;; .if \to-\from ;;; push_regs "(\from+1)", \to, "(\offset+4)" ;;; .endif ;;; .endm ;;; push_regs 13, 18, 0 ;;; ;;;; .macro sum from, to, three ;;;; .long \from ;;;; .long \three ;;;; .local regno ;;;; .set regno, \from+1 ;;;; .set shift, 32 ;;;; .set shift, shift - 1 ;;;; # st_s %shift @3 lsl #shift ;;;; .if \to-\from ;;;; sum "(\from+1)", \to, "(\three)" ;;;; .endif ;;;; .endm ;;;; ;;;; SUM 0,5, 9 ;;;; ; .altmacro ;; .macro push_regs from=0, to=3, offset ;; st_s r\from, [sp, \offset] ;; .if \to-\from ;; push_regs "\from+1 ",\to,"(\offset+4)" ;; .endif ;; .endm ;; ;; .macro expand_to_push from=13, to ;; ; .section .text ;; ; .align 4 ;; ; .global st_ ;; ; .type foo, ;; st_13_to_25: ;; ; push_regs \from, \to, 0 ;; push_regs 0,3 ; ;; .endm ;; ;; expand_to_push 13,18 ;; ;#endif #ifndef __ARC_RF16__ #ifdef L_millicodethunk_st .section .text .align 4 .global SYM(__st_r13_to_r15) .global SYM(__st_r13_to_r16) .global SYM(__st_r13_to_r17) .global SYM(__st_r13_to_r18) .global SYM(__st_r13_to_r19) .global SYM(__st_r13_to_r20) .global SYM(__st_r13_to_r21) .global SYM(__st_r13_to_r22) .global SYM(__st_r13_to_r23) .global SYM(__st_r13_to_r24) .global SYM(__st_r13_to_r25) HIDDEN_FUNC(__st_r13_to_r15) HIDDEN_FUNC(__st_r13_to_r16) HIDDEN_FUNC(__st_r13_to_r17) HIDDEN_FUNC(__st_r13_to_r18) HIDDEN_FUNC(__st_r13_to_r19) HIDDEN_FUNC(__st_r13_to_r20) HIDDEN_FUNC(__st_r13_to_r21) HIDDEN_FUNC(__st_r13_to_r22) HIDDEN_FUNC(__st_r13_to_r23) HIDDEN_FUNC(__st_r13_to_r24) HIDDEN_FUNC(__st_r13_to_r25) .align 4 SYM(__st_r13_to_r25): st r25, [sp,48] SYM(__st_r13_to_r24): st r24, [sp,44] SYM(__st_r13_to_r23): st r23, [sp,40] SYM(__st_r13_to_r22): st r22, [sp,36] SYM(__st_r13_to_r21): st r21, [sp,32] SYM(__st_r13_to_r20): st r20, [sp,28] SYM(__st_r13_to_r19): st r19, [sp,24] SYM(__st_r13_to_r18): st r18, [sp,20] SYM(__st_r13_to_r17): st r17, [sp,16] SYM(__st_r13_to_r16): st r16, [sp,12] SYM(__st_r13_to_r15): #ifdef __ARC700__ st r15, [sp,8] ; minimum function size to avoid stall: 6 bytes. #else st_s r15, [sp,8] #endif st_s r14, [sp,4] j_s.d [%blink] st_s r13, [sp,0] ENDFUNC(__st_r13_to_r15) ENDFUNC(__st_r13_to_r16) ENDFUNC(__st_r13_to_r17) ENDFUNC(__st_r13_to_r18) ENDFUNC(__st_r13_to_r19) ENDFUNC(__st_r13_to_r20) ENDFUNC(__st_r13_to_r21) ENDFUNC(__st_r13_to_r22) ENDFUNC(__st_r13_to_r23) ENDFUNC(__st_r13_to_r24) ENDFUNC(__st_r13_to_r25) #endif /* L_millicodethunk_st */ #ifdef L_millicodethunk_ld .section .text .align 4 ; ================================== ; the loads .global SYM(__ld_r13_to_r15) .global SYM(__ld_r13_to_r16) .global SYM(__ld_r13_to_r17) .global SYM(__ld_r13_to_r18) .global SYM(__ld_r13_to_r19) .global SYM(__ld_r13_to_r20) .global SYM(__ld_r13_to_r21) .global SYM(__ld_r13_to_r22) .global SYM(__ld_r13_to_r23) .global SYM(__ld_r13_to_r24) .global SYM(__ld_r13_to_r25) HIDDEN_FUNC(__ld_r13_to_r15) HIDDEN_FUNC(__ld_r13_to_r16) HIDDEN_FUNC(__ld_r13_to_r17) HIDDEN_FUNC(__ld_r13_to_r18) HIDDEN_FUNC(__ld_r13_to_r19) HIDDEN_FUNC(__ld_r13_to_r20) HIDDEN_FUNC(__ld_r13_to_r21) HIDDEN_FUNC(__ld_r13_to_r22) HIDDEN_FUNC(__ld_r13_to_r23) HIDDEN_FUNC(__ld_r13_to_r24) HIDDEN_FUNC(__ld_r13_to_r25) SYM(__ld_r13_to_r25): ld r25, [sp,48] SYM(__ld_r13_to_r24): ld r24, [sp,44] SYM(__ld_r13_to_r23): ld r23, [sp,40] SYM(__ld_r13_to_r22): ld r22, [sp,36] SYM(__ld_r13_to_r21): ld r21, [sp,32] SYM(__ld_r13_to_r20): ld r20, [sp,28] SYM(__ld_r13_to_r19): ld r19, [sp,24] SYM(__ld_r13_to_r18): ld r18, [sp,20] SYM(__ld_r13_to_r17): ld r17, [sp,16] SYM(__ld_r13_to_r16): ld r16, [sp,12] SYM(__ld_r13_to_r15): #ifdef __ARC700__ ld r15, [sp,8] ; minimum function size to avoid stall: 6 bytes. #else ld_s r15, [sp,8] #endif ld_s r14, [sp,4] j_s.d [%blink] ld_s r13, [sp,0] ENDFUNC(__ld_r13_to_r15) ENDFUNC(__ld_r13_to_r16) ENDFUNC(__ld_r13_to_r17) ENDFUNC(__ld_r13_to_r18) ENDFUNC(__ld_r13_to_r19) ENDFUNC(__ld_r13_to_r20) ENDFUNC(__ld_r13_to_r21) ENDFUNC(__ld_r13_to_r22) ENDFUNC(__ld_r13_to_r23) ENDFUNC(__ld_r13_to_r24) ENDFUNC(__ld_r13_to_r25) #endif /* L_millicodethunk_ld */ #ifdef L_millicodethunk_ret .global SYM(__ld_r13_to_r14_ret) .global SYM(__ld_r13_to_r15_ret) .global SYM(__ld_r13_to_r16_ret) .global SYM(__ld_r13_to_r17_ret) .global SYM(__ld_r13_to_r18_ret) .global SYM(__ld_r13_to_r19_ret) .global SYM(__ld_r13_to_r20_ret) .global SYM(__ld_r13_to_r21_ret) .global SYM(__ld_r13_to_r22_ret) .global SYM(__ld_r13_to_r23_ret) .global SYM(__ld_r13_to_r24_ret) .global SYM(__ld_r13_to_r25_ret) HIDDEN_FUNC(__ld_r13_to_r14_ret) HIDDEN_FUNC(__ld_r13_to_r15_ret) HIDDEN_FUNC(__ld_r13_to_r16_ret) HIDDEN_FUNC(__ld_r13_to_r17_ret) HIDDEN_FUNC(__ld_r13_to_r18_ret) HIDDEN_FUNC(__ld_r13_to_r19_ret) HIDDEN_FUNC(__ld_r13_to_r20_ret) HIDDEN_FUNC(__ld_r13_to_r21_ret) HIDDEN_FUNC(__ld_r13_to_r22_ret) HIDDEN_FUNC(__ld_r13_to_r23_ret) HIDDEN_FUNC(__ld_r13_to_r24_ret) HIDDEN_FUNC(__ld_r13_to_r25_ret) .section .text .align 4 SYM(__ld_r13_to_r25_ret): ld r25, [sp,48] SYM(__ld_r13_to_r24_ret): ld r24, [sp,44] SYM(__ld_r13_to_r23_ret): ld r23, [sp,40] SYM(__ld_r13_to_r22_ret): ld r22, [sp,36] SYM(__ld_r13_to_r21_ret): ld r21, [sp,32] SYM(__ld_r13_to_r20_ret): ld r20, [sp,28] SYM(__ld_r13_to_r19_ret): ld r19, [sp,24] SYM(__ld_r13_to_r18_ret): ld r18, [sp,20] SYM(__ld_r13_to_r17_ret): ld r17, [sp,16] SYM(__ld_r13_to_r16_ret): ld r16, [sp,12] SYM(__ld_r13_to_r15_ret): ld r15, [sp,8] SYM(__ld_r13_to_r14_ret): ld blink,[sp,r12] ld_s r14, [sp,4] ld.ab r13, [sp,r12] j_s.d [%blink] add_s sp,sp,4 ENDFUNC(__ld_r13_to_r14_ret) ENDFUNC(__ld_r13_to_r15_ret) ENDFUNC(__ld_r13_to_r16_ret) ENDFUNC(__ld_r13_to_r17_ret) ENDFUNC(__ld_r13_to_r18_ret) ENDFUNC(__ld_r13_to_r19_ret) ENDFUNC(__ld_r13_to_r20_ret) ENDFUNC(__ld_r13_to_r21_ret) ENDFUNC(__ld_r13_to_r22_ret) ENDFUNC(__ld_r13_to_r23_ret) ENDFUNC(__ld_r13_to_r24_ret) ENDFUNC(__ld_r13_to_r25_ret) #endif /* L_millicodethunk_ret */ #if defined (__ARC700__) || defined (__ARC_FPX_QUARK__) #ifdef L_adddf3 #ifdef __ARC_NORM__ #include "ieee-754/adddf3.S" #endif #endif #ifdef L_muldf3 #ifdef __ARC_MPY__ #include "ieee-754/muldf3.S" #elif defined (__ARC_NORM__) && defined(__ARC_MUL64__) #include "ieee-754/arc600-mul64/muldf3.S" #elif defined (__ARC_NORM__) && defined(__ARC_MUL32BY16__) #include "ieee-754/arc600-dsp/muldf3.S" #endif #endif #ifdef L_addsf3 #ifdef __ARC_NORM__ #include "ieee-754/addsf3.S" #endif #endif #ifdef L_mulsf3 #ifdef __ARC_MPY__ #include "ieee-754/mulsf3.S" #elif defined (__ARC_NORM__) && defined(__ARC_MUL64__) #include "ieee-754/arc600-mul64/mulsf3.S" #elif defined (__ARC_NORM__) && defined(__ARC_MUL32BY16__) #include "ieee-754/arc600-dsp/mulsf3.S" #elif defined (__ARC_NORM__) #include "ieee-754/arc600/mulsf3.S" #endif #endif #ifdef L_divdf3 #ifdef __ARC_MPY__ #include "ieee-754/divdf3.S" #elif defined (__ARC_NORM__) && defined(__ARC_MUL64__) #include "ieee-754/arc600-mul64/divdf3.S" #elif defined (__ARC_NORM__) && defined(__ARC_MUL32BY16__) #include "ieee-754/arc600-dsp/divdf3.S" #endif #endif #ifdef L_divsf3 #ifdef __ARC_MPY__ #include "ieee-754/divsf3-stdmul.S" #elif defined (__ARC_NORM__) && defined(__ARC_MUL64__) #include "ieee-754/arc600-mul64/divsf3.S" #elif defined (__ARC_NORM__) && defined(__ARC_MUL32BY16__) #include "ieee-754/arc600-dsp/divsf3.S" #elif defined (__ARC_NORM__) #include "ieee-754/arc600/divsf3.S" #endif #endif #ifdef L_extendsfdf2 #ifdef __ARC_NORM__ #include "ieee-754/extendsfdf2.S" #endif #endif #ifdef L_truncdfsf2 #ifdef __ARC_NORM__ #include "ieee-754/truncdfsf2.S" #endif #endif #ifdef L_floatsidf #ifdef __ARC_NORM__ #include "ieee-754/floatsidf.S" #endif #endif #ifdef L_floatsisf #ifdef __ARC_NORM__ #include "ieee-754/floatsisf.S" #endif #endif #ifdef L_floatunsidf #ifdef __ARC_NORM__ #include "ieee-754/floatunsidf.S" #endif #endif #ifdef L_fixdfsi #ifdef __ARC_NORM__ #include "ieee-754/fixdfsi.S" #endif #endif #ifdef L_fixsfsi #ifdef __ARC_NORM__ #include "ieee-754/fixsfsi.S" #endif #endif #ifdef L_fixunsdfsi #ifdef __ARC_NORM__ #include "ieee-754/fixunsdfsi.S" #endif #endif #ifdef L_eqdf2 #ifdef __ARC_NORM__ #include "ieee-754/eqdf2.S" #endif #endif #ifdef L_eqsf2 #ifdef __ARC_NORM__ #include "ieee-754/eqsf2.S" #endif #endif #ifdef L_gtdf2 #ifdef __ARC_NORM__ #include "ieee-754/gtdf2.S" #endif #endif #ifdef L_gtsf2 #ifdef __ARC_NORM__ #include "ieee-754/gtsf2.S" #endif #endif #ifdef L_gedf2 #ifdef __ARC_NORM__ #include "ieee-754/gedf2.S" #endif #endif #ifdef L_gesf2 #ifdef __ARC_NORM__ #include "ieee-754/gesf2.S" #endif #endif #ifdef L_uneqdf2 #ifdef __ARC_NORM__ #include "ieee-754/uneqdf2.S" #endif #endif #ifdef L_uneqsf2 #ifdef __ARC_NORM__ #include "ieee-754/uneqsf2.S" #endif #endif #ifdef L_orddf2 #ifdef __ARC_NORM__ #include "ieee-754/orddf2.S" #endif #endif #ifdef L_ordsf2 #ifdef __ARC_NORM__ #include "ieee-754/ordsf2.S" #endif #endif #endif /* ARC_OPTFPE */ #endif /* !__ARC_RF16__ */
4ms/metamodule-plugin-sdk
11,199
plugin-libc/libgcc/config/riscv/save-restore.S
/* Callee-saved register spill and fill routines for RISC-V. Copyright (C) 2016-2022 Free Software Foundation, Inc. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 3, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. Under Section 7 of GPL version 3, you are granted additional permissions described in the GCC Runtime Library Exception, version 3.1, as published by the Free Software Foundation. You should have received a copy of the GNU General Public License and a copy of the GCC Runtime Library Exception along with this program; see the files COPYING3 and COPYING.RUNTIME respectively. If not, see <http://www.gnu.org/licenses/>. */ #include "riscv-asm.h" .text #if __riscv_xlen == 64 FUNC_BEGIN (__riscv_save_12) .cfi_startproc # __riscv_save_* routine use t0/x5 as return address .cfi_return_column 5 addi sp, sp, -112 .cfi_def_cfa_offset 112 li t1, 0 sd s11, 8(sp) .cfi_offset 27, -104 j .Ls10 FUNC_BEGIN (__riscv_save_11) FUNC_BEGIN (__riscv_save_10) .cfi_restore 27 addi sp, sp, -112 .cfi_def_cfa_offset 112 li t1, 1 .Ls10: sd s10, 16(sp) .cfi_offset 26, -96 sd s9, 24(sp) .cfi_offset 25, -88 j .Ls8 FUNC_BEGIN (__riscv_save_9) FUNC_BEGIN (__riscv_save_8) .cfi_restore 25 .cfi_restore 26 .cfi_restore 27 addi sp, sp, -112 .cfi_def_cfa_offset 112 li t1, 2 .Ls8: sd s8, 32(sp) .cfi_offset 24, -80 sd s7, 40(sp) .cfi_offset 23, -72 j .Ls6 FUNC_BEGIN (__riscv_save_7) FUNC_BEGIN (__riscv_save_6) .cfi_restore 23 .cfi_restore 24 .cfi_restore 25 .cfi_restore 26 .cfi_restore 27 addi sp, sp, -112 .cfi_def_cfa_offset 112 li t1, 3 .Ls6: sd s6, 48(sp) .cfi_offset 22, -64 sd s5, 56(sp) .cfi_offset 21, -56 j .Ls4 FUNC_BEGIN (__riscv_save_5) FUNC_BEGIN (__riscv_save_4) .cfi_restore 21 .cfi_restore 22 .cfi_restore 24 .cfi_restore 25 .cfi_restore 26 .cfi_restore 27 .cfi_restore 24 .cfi_restore 25 .cfi_restore 26 .cfi_restore 27 addi sp, sp, -112 .cfi_def_cfa_offset 112 li t1, 4 .Ls4: sd s4, 64(sp) .cfi_offset 20, -48 sd s3, 72(sp) .cfi_offset 19, -40 j .Ls2 FUNC_BEGIN (__riscv_save_3) FUNC_BEGIN (__riscv_save_2) .cfi_restore 19 .cfi_restore 20 .cfi_restore 21 .cfi_restore 22 .cfi_restore 24 .cfi_restore 25 .cfi_restore 26 .cfi_restore 27 .cfi_restore 24 .cfi_restore 25 .cfi_restore 26 .cfi_restore 27 addi sp, sp, -112 .cfi_def_cfa_offset 112 li t1, 5 .Ls2: sd s2, 80(sp) .cfi_offset 18, -32 sd s1, 88(sp) .cfi_offset 9, -24 sd s0, 96(sp) .cfi_offset 8, -16 sd ra, 104(sp) .cfi_offset 1, -8 slli t1, t1, 4 # CFA info is not correct in next 2 instruction since t1's # value is depend on how may register really save. add sp, sp, t1 jr t0 .cfi_endproc FUNC_END (__riscv_save_12) FUNC_END (__riscv_save_11) FUNC_END (__riscv_save_10) FUNC_END (__riscv_save_9) FUNC_END (__riscv_save_8) FUNC_END (__riscv_save_7) FUNC_END (__riscv_save_6) FUNC_END (__riscv_save_5) FUNC_END (__riscv_save_4) FUNC_END (__riscv_save_3) FUNC_END (__riscv_save_2) FUNC_BEGIN (__riscv_save_1) FUNC_BEGIN (__riscv_save_0) .cfi_startproc # __riscv_save_* routine use t0/x5 as return address .cfi_return_column 5 addi sp, sp, -16 .cfi_def_cfa_offset 16 sd s0, 0(sp) .cfi_offset 8, -16 sd ra, 8(sp) .cfi_offset 1, -8 jr t0 .cfi_endproc FUNC_END (__riscv_save_1) FUNC_END (__riscv_save_0) FUNC_BEGIN (__riscv_restore_12) .cfi_startproc .cfi_def_cfa_offset 112 .cfi_offset 27, -104 .cfi_offset 26, -96 .cfi_offset 25, -88 .cfi_offset 24, -80 .cfi_offset 23, -72 .cfi_offset 22, -64 .cfi_offset 21, -56 .cfi_offset 20, -48 .cfi_offset 19, -40 .cfi_offset 18, -32 .cfi_offset 9, -24 .cfi_offset 8, -16 .cfi_offset 1, -8 ld s11, 8(sp) .cfi_restore 27 addi sp, sp, 16 FUNC_BEGIN (__riscv_restore_11) FUNC_BEGIN (__riscv_restore_10) .cfi_restore 27 .cfi_def_cfa_offset 96 ld s10, 0(sp) .cfi_restore 26 ld s9, 8(sp) .cfi_restore 25 addi sp, sp, 16 FUNC_BEGIN (__riscv_restore_9) FUNC_BEGIN (__riscv_restore_8) .cfi_restore 25 .cfi_restore 26 .cfi_restore 27 .cfi_def_cfa_offset 80 ld s8, 0(sp) .cfi_restore 24 ld s7, 8(sp) .cfi_restore 23 addi sp, sp, 16 FUNC_BEGIN (__riscv_restore_7) FUNC_BEGIN (__riscv_restore_6) .cfi_restore 23 .cfi_restore 24 .cfi_restore 25 .cfi_restore 26 .cfi_restore 27 .cfi_def_cfa_offset 64 ld s6, 0(sp) .cfi_restore 22 ld s5, 8(sp) .cfi_restore 21 addi sp, sp, 16 FUNC_BEGIN (__riscv_restore_5) FUNC_BEGIN (__riscv_restore_4) .cfi_restore 21 .cfi_restore 22 .cfi_restore 23 .cfi_restore 24 .cfi_restore 25 .cfi_restore 26 .cfi_restore 27 .cfi_def_cfa_offset 48 ld s4, 0(sp) .cfi_restore 20 ld s3, 8(sp) .cfi_restore 19 addi sp, sp, 16 FUNC_BEGIN (__riscv_restore_3) FUNC_BEGIN (__riscv_restore_2) .cfi_restore 19 .cfi_restore 20 .cfi_restore 21 .cfi_restore 22 .cfi_restore 23 .cfi_restore 24 .cfi_restore 25 .cfi_restore 26 .cfi_restore 27 .cfi_def_cfa_offset 32 ld s2, 0(sp) .cfi_restore 18 ld s1, 8(sp) .cfi_restore 9 addi sp, sp, 16 FUNC_BEGIN (__riscv_restore_1) FUNC_BEGIN (__riscv_restore_0) .cfi_restore 9 .cfi_restore 18 .cfi_restore 19 .cfi_restore 20 .cfi_restore 21 .cfi_restore 22 .cfi_restore 23 .cfi_restore 24 .cfi_restore 25 .cfi_restore 26 .cfi_restore 27 .cfi_def_cfa_offset 16 ld s0, 0(sp) .cfi_restore 8 ld ra, 8(sp) .cfi_restore 1 addi sp, sp, 16 .cfi_def_cfa_offset 0 ret .cfi_endproc FUNC_END (__riscv_restore_12) FUNC_END (__riscv_restore_11) FUNC_END (__riscv_restore_10) FUNC_END (__riscv_restore_9) FUNC_END (__riscv_restore_8) FUNC_END (__riscv_restore_7) FUNC_END (__riscv_restore_6) FUNC_END (__riscv_restore_5) FUNC_END (__riscv_restore_4) FUNC_END (__riscv_restore_3) FUNC_END (__riscv_restore_2) FUNC_END (__riscv_restore_1) FUNC_END (__riscv_restore_0) #else #ifdef __riscv_32e FUNC_BEGIN(__riscv_save_2) FUNC_BEGIN(__riscv_save_1) FUNC_BEGIN(__riscv_save_0) .cfi_startproc # __riscv_save_* routine use t0/x5 as return address .cfi_return_column 5 addi sp, sp, -12 .cfi_def_cfa_offset 12 sw s1, 0(sp) .cfi_offset 9, -12 sw s0, 4(sp) .cfi_offset 8, -8 sw ra, 8(sp) .cfi_offset 1, 0 jr t0 .cfi_endproc FUNC_END(__riscv_save_2) FUNC_END(__riscv_save_1) FUNC_END(__riscv_save_0) FUNC_BEGIN(__riscv_restore_2) FUNC_BEGIN(__riscv_restore_1) FUNC_BEGIN(__riscv_restore_0) .cfi_startproc .cfi_def_cfa_offset 14 lw s1, 0(sp) .cfi_restore 9 lw s0, 4(sp) .cfi_restore 8 lw ra, 8(sp) .cfi_restore 1 addi sp, sp, 12 .cfi_def_cfa_offset 0 ret .cfi_endproc FUNC_END(__riscv_restore_2) FUNC_END(__riscv_restore_1) FUNC_END(__riscv_restore_0) #else FUNC_BEGIN (__riscv_save_12) .cfi_startproc # __riscv_save_* routine use t0/x5 as return address .cfi_return_column 5 addi sp, sp, -64 .cfi_def_cfa_offset 64 li t1, 0 sw s11, 12(sp) .cfi_offset 27, -52 j .Ls10 FUNC_BEGIN (__riscv_save_11) FUNC_BEGIN (__riscv_save_10) FUNC_BEGIN (__riscv_save_9) FUNC_BEGIN (__riscv_save_8) .cfi_restore 27 addi sp, sp, -64 .cfi_def_cfa_offset 64 li t1, -16 .Ls10: sw s10, 16(sp) .cfi_offset 26, -48 sw s9, 20(sp) .cfi_offset 25, -44 sw s8, 24(sp) .cfi_offset 24, -40 sw s7, 28(sp) .cfi_offset 23, -36 j .Ls6 FUNC_BEGIN (__riscv_save_7) FUNC_BEGIN (__riscv_save_6) FUNC_BEGIN (__riscv_save_5) FUNC_BEGIN (__riscv_save_4) .cfi_restore 23 .cfi_restore 24 .cfi_restore 25 .cfi_restore 26 .cfi_restore 27 addi sp, sp, -64 .cfi_def_cfa_offset 64 li t1, -32 .Ls6: sw s6, 32(sp) .cfi_offset 22, -32 sw s5, 36(sp) .cfi_offset 21, -28 sw s4, 40(sp) .cfi_offset 20, -24 sw s3, 44(sp) .cfi_offset 19, -20 sw s2, 48(sp) .cfi_offset 18, -16 sw s1, 52(sp) .cfi_offset 9, -12 sw s0, 56(sp) .cfi_offset 8, -8 sw ra, 60(sp) .cfi_offset 1, -4 # CFA info is not correct in next 2 instruction since t1's # value is depend on how may register really save. sub sp, sp, t1 jr t0 .cfi_endproc FUNC_END (__riscv_save_12) FUNC_END (__riscv_save_11) FUNC_END (__riscv_save_10) FUNC_END (__riscv_save_9) FUNC_END (__riscv_save_8) FUNC_END (__riscv_save_7) FUNC_END (__riscv_save_6) FUNC_END (__riscv_save_5) FUNC_END (__riscv_save_4) FUNC_BEGIN (__riscv_save_3) FUNC_BEGIN (__riscv_save_2) FUNC_BEGIN (__riscv_save_1) FUNC_BEGIN (__riscv_save_0) .cfi_startproc # __riscv_save_* routine use t0/x5 as return address .cfi_return_column 5 addi sp, sp, -16 .cfi_def_cfa_offset 16 sw s2, 0(sp) sw s1, 4(sp) .cfi_offset 9, -16 sw s0, 8(sp) .cfi_offset 8, -8 sw ra, 12(sp) .cfi_offset 1, -4 jr t0 .cfi_endproc FUNC_END (__riscv_save_3) FUNC_END (__riscv_save_2) FUNC_END (__riscv_save_1) FUNC_END (__riscv_save_0) FUNC_BEGIN (__riscv_restore_12) .cfi_startproc .cfi_def_cfa_offset 64 .cfi_offset 27, -52 .cfi_offset 26, -48 .cfi_offset 25, -44 .cfi_offset 24, -40 .cfi_offset 23, -36 .cfi_offset 22, -32 .cfi_offset 21, -28 .cfi_offset 20, -24 .cfi_offset 19, -20 .cfi_offset 18, -16 .cfi_offset 9, -12 .cfi_offset 8, -8 .cfi_offset 1, -4 lw s11, 12(sp) .cfi_restore 27 addi sp, sp, 16 FUNC_BEGIN (__riscv_restore_11) FUNC_BEGIN (__riscv_restore_10) FUNC_BEGIN (__riscv_restore_9) FUNC_BEGIN (__riscv_restore_8) .cfi_restore 27 .cfi_def_cfa_offset 48 lw s10, 0(sp) .cfi_restore 26 lw s9, 4(sp) .cfi_restore 25 lw s8, 8(sp) .cfi_restore 24 lw s7, 12(sp) .cfi_restore 23 addi sp, sp, 16 FUNC_BEGIN (__riscv_restore_7) FUNC_BEGIN (__riscv_restore_6) FUNC_BEGIN (__riscv_restore_5) FUNC_BEGIN (__riscv_restore_4) .cfi_restore 23 .cfi_restore 24 .cfi_restore 25 .cfi_restore 26 .cfi_restore 27 .cfi_def_cfa_offset 32 lw s6, 0(sp) .cfi_restore 22 lw s5, 4(sp) .cfi_restore 21 lw s4, 8(sp) .cfi_restore 20 lw s3, 12(sp) .cfi_restore 19 addi sp, sp, 16 FUNC_BEGIN (__riscv_restore_3) FUNC_BEGIN (__riscv_restore_2) FUNC_BEGIN (__riscv_restore_1) FUNC_BEGIN (__riscv_restore_0) .cfi_restore 19 .cfi_restore 20 .cfi_restore 21 .cfi_restore 22 .cfi_restore 24 .cfi_restore 25 .cfi_restore 26 .cfi_restore 27 .cfi_def_cfa_offset 16 lw s2, 0(sp) .cfi_restore 18 lw s1, 4(sp) .cfi_restore 9 lw s0, 8(sp) .cfi_restore 8 lw ra, 12(sp) .cfi_restore 1 addi sp, sp, 16 .cfi_def_cfa_offset 0 ret .cfi_endproc FUNC_END (__riscv_restore_12) FUNC_END (__riscv_restore_11) FUNC_END (__riscv_restore_10) FUNC_END (__riscv_restore_9) FUNC_END (__riscv_restore_8) FUNC_END (__riscv_restore_7) FUNC_END (__riscv_restore_6) FUNC_END (__riscv_restore_5) FUNC_END (__riscv_restore_4) FUNC_END (__riscv_restore_3) FUNC_END (__riscv_restore_2) FUNC_END (__riscv_restore_1) FUNC_END (__riscv_restore_0) #endif /* __riscv_32e */ #endif /* __riscv_xlen == 64 */
4ms/metamodule-plugin-sdk
1,379
plugin-libc/libgcc/config/riscv/muldi3.S
/* Integer multiplication routines for RISC-V. Copyright (C) 2016-2022 Free Software Foundation, Inc. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 3, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. Under Section 7 of GPL version 3, you are granted additional permissions described in the GCC Runtime Library Exception, version 3.1, as published by the Free Software Foundation. You should have received a copy of the GNU General Public License and a copy of the GCC Runtime Library Exception along with this program; see the files COPYING3 and COPYING.RUNTIME respectively. If not, see <http://www.gnu.org/licenses/>. */ #include "riscv-asm.h" .text .align 2 #if __riscv_xlen == 32 /* Our RV64 64-bit routine is equivalent to our RV32 32-bit routine. */ # define __muldi3 __mulsi3 #endif FUNC_BEGIN (__muldi3) mv a2, a0 li a0, 0 .L1: andi a3, a1, 1 beqz a3, .L2 add a0, a0, a2 .L2: srli a1, a1, 1 slli a2, a2, 1 bnez a1, .L1 ret FUNC_END (__muldi3)
4ms/metamodule-plugin-sdk
3,706
plugin-libc/libgcc/config/riscv/div.S
/* Integer division routines for RISC-V. Copyright (C) 2016-2022 Free Software Foundation, Inc. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 3, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. Under Section 7 of GPL version 3, you are granted additional permissions described in the GCC Runtime Library Exception, version 3.1, as published by the Free Software Foundation. You should have received a copy of the GNU General Public License and a copy of the GCC Runtime Library Exception along with this program; see the files COPYING3 and COPYING.RUNTIME respectively. If not, see <http://www.gnu.org/licenses/>. */ #include "riscv-asm.h" .text .align 2 #if __riscv_xlen == 32 /* Our RV64 64-bit routines are equivalent to our RV32 32-bit routines. */ # define __udivdi3 __udivsi3 # define __umoddi3 __umodsi3 # define __divdi3 __divsi3 # define __moddi3 __modsi3 #else FUNC_BEGIN (__udivsi3) /* Compute __udivdi3(a0 << 32, a1 << 32); cast result to uint32_t. */ sll a0, a0, 32 sll a1, a1, 32 move t0, ra jal HIDDEN_JUMPTARGET(__udivdi3) sext.w a0, a0 jr t0 FUNC_END (__udivsi3) FUNC_BEGIN (__umodsi3) /* Compute __udivdi3((uint32_t)a0, (uint32_t)a1); cast a1 to uint32_t. */ sll a0, a0, 32 sll a1, a1, 32 srl a0, a0, 32 srl a1, a1, 32 move t0, ra jal HIDDEN_JUMPTARGET(__udivdi3) sext.w a0, a1 jr t0 FUNC_END (__umodsi3) FUNC_ALIAS (__modsi3, __moddi3) FUNC_BEGIN( __divsi3) /* Check for special case of INT_MIN/-1. Otherwise, fall into __divdi3. */ li t0, -1 beq a1, t0, .L20 #endif FUNC_BEGIN (__divdi3) bltz a0, .L10 bltz a1, .L11 /* Since the quotient is positive, fall into __udivdi3. */ FUNC_BEGIN (__udivdi3) mv a2, a1 mv a1, a0 li a0, -1 beqz a2, .L5 li a3, 1 bgeu a2, a1, .L2 .L1: blez a2, .L2 slli a2, a2, 1 slli a3, a3, 1 bgtu a1, a2, .L1 .L2: li a0, 0 .L3: bltu a1, a2, .L4 sub a1, a1, a2 or a0, a0, a3 .L4: srli a3, a3, 1 srli a2, a2, 1 bnez a3, .L3 .L5: ret FUNC_END (__udivdi3) HIDDEN_DEF (__udivdi3) FUNC_BEGIN (__umoddi3) /* Call __udivdi3(a0, a1), then return the remainder, which is in a1. */ move t0, ra jal HIDDEN_JUMPTARGET(__udivdi3) move a0, a1 jr t0 FUNC_END (__umoddi3) /* Handle negative arguments to __divdi3. */ .L10: neg a0, a0 /* Zero is handled as a negative so that the result will not be inverted. */ bgtz a1, .L12 /* Compute __udivdi3(-a0, a1), then negate the result. */ neg a1, a1 j HIDDEN_JUMPTARGET(__udivdi3) /* Compute __udivdi3(-a0, -a1). */ .L11: /* Compute __udivdi3(a0, -a1), then negate the result. */ neg a1, a1 .L12: move t0, ra jal HIDDEN_JUMPTARGET(__udivdi3) neg a0, a0 jr t0 FUNC_END (__divdi3) FUNC_BEGIN (__moddi3) move t0, ra bltz a1, .L31 bltz a0, .L32 .L30: jal HIDDEN_JUMPTARGET(__udivdi3) /* The dividend is not negative. */ move a0, a1 jr t0 .L31: neg a1, a1 bgez a0, .L30 .L32: neg a0, a0 jal HIDDEN_JUMPTARGET(__udivdi3) /* The dividend is hella negative. */ neg a0, a1 jr t0 FUNC_END (__moddi3) #if __riscv_xlen == 64 /* continuation of __divsi3 */ .L20: sll t0, t0, 31 bne a0, t0, __divdi3 ret FUNC_END (__divsi3) #endif
4ms/metamodule-plugin-sdk
1,296
plugin-libc/libgcc/config/sh/crtn.S
/* Copyright (C) 2000-2022 Free Software Foundation, Inc. This file was adapted from glibc sources. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 3, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. Under Section 7 of GPL version 3, you are granted additional permissions described in the GCC Runtime Library Exception, version 3.1, as published by the Free Software Foundation. You should have received a copy of the GNU General Public License and a copy of the GCC Runtime Library Exception along with this program; see the files COPYING3 and COPYING.RUNTIME respectively. If not, see <http://www.gnu.org/licenses/>. */ /* See an explanation about .init and .fini in crti.S. */ .section .init mov r14,r15 lds.l @r15+,pr mov.l @r15+,r14 rts #ifdef __ELF__ mov.l @r15+,r12 #else nop #endif .section .fini mov r14,r15 lds.l @r15+,pr mov.l @r15+,r14 rts #ifdef __ELF__ mov.l @r15+,r12 #else nop #endif
4ms/metamodule-plugin-sdk
15,380
plugin-libc/libgcc/config/sh/lib1funcs-4-300.S
/* Copyright (C) 2004-2022 Free Software Foundation, Inc. This file is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 3, or (at your option) any later version. This file is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. Under Section 7 of GPL version 3, you are granted additional permissions described in the GCC Runtime Library Exception, version 3.1, as published by the Free Software Foundation. You should have received a copy of the GNU General Public License and a copy of the GCC Runtime Library Exception along with this program; see the files COPYING3 and COPYING.RUNTIME respectively. If not, see <http://www.gnu.org/licenses/>. */ /* libgcc routines for the STMicroelectronics ST40-300 CPU. Contributed by J"orn Rennecke joern.rennecke@st.com. */ #include "lib1funcs.h" #ifdef L_div_table #if defined (__SH3__) || defined (__SH3E__) || defined (__SH4__) || defined (__SH4_SINGLE__) || defined (__SH4_SINGLE_ONLY__) || defined (__SH4_NOFPU__) /* This code used shld, thus is not suitable for SH1 / SH2. */ /* Signed / unsigned division without use of FPU, optimized for SH4-300. Uses a lookup table for divisors in the range -128 .. +127, and div1 with case distinction for larger divisors in three more ranges. The code is lumped together with the table to allow the use of mova. */ #ifdef __LITTLE_ENDIAN__ #define L_LSB 0 #define L_LSWMSB 1 #define L_MSWLSB 2 #else #define L_LSB 3 #define L_LSWMSB 2 #define L_MSWLSB 1 #endif .global GLOBAL(udivsi3_i4i) .global GLOBAL(sdivsi3_i4i) FUNC(GLOBAL(udivsi3_i4i)) FUNC(GLOBAL(sdivsi3_i4i)) .balign 4 LOCAL(div_ge8m): ! 10 cycles up to here rotcr r1 ! signed shift must use original sign from r4 div0s r5,r4 mov #24,r7 shld r7,r6 shad r0,r1 rotcl r6 div1 r5,r1 swap.w r5,r0 ! detect -0x80000000 : 0x800000 rotcl r6 swap.w r4,r7 div1 r5,r1 swap.b r7,r7 rotcl r6 or r7,r0 div1 r5,r1 swap.w r0,r7 rotcl r6 or r7,r0 div1 r5,r1 add #-0x80,r0 rotcl r6 extu.w r0,r0 div1 r5,r1 neg r0,r0 rotcl r6 swap.w r0,r0 div1 r5,r1 mov.l @r15+,r7 and r6,r0 rotcl r6 div1 r5,r1 shll2 r0 rotcl r6 exts.b r0,r0 div1 r5,r1 swap.w r0,r0 exts.w r0,r1 exts.b r6,r0 mov.l @r15+,r6 rotcl r0 rts sub r1,r0 ! 31 cycles up to here .balign 4 LOCAL(udiv_ge64k): ! 3 cycles up to here mov r4,r0 shlr8 r0 div0u cmp/hi r0,r5 bt LOCAL(udiv_r8) mov.l r5,@-r15 shll8 r5 ! 7 cycles up to here .rept 8 div1 r5,r0 .endr extu.b r4,r1 ! 15 cycles up to here extu.b r0,r6 xor r1,r0 xor r6,r0 swap.b r6,r6 .rept 8 div1 r5,r0 .endr ! 25 cycles up to here extu.b r0,r0 mov.l @r15+,r5 or r6,r0 mov.l @r15+,r6 rts rotcl r0 ! 28 cycles up to here .balign 4 LOCAL(udiv_r8): ! 6 cycles up to here mov.l r4,@-r15 shll16 r4 shll8 r4 ! shll r4 mov r0,r1 div1 r5,r1 mov r4,r0 rotcl r0 mov.l @r15+,r4 div1 r5,r1 ! 12 cycles up to here .rept 6 rotcl r0; div1 r5,r1 .endr mov.l @r15+,r6 ! 24 cycles up to here rts rotcl r0 .balign 4 LOCAL(div_ge32k): ! 6 cycles up to here mov.l r7,@-r15 swap.w r5,r6 exts.b r6,r7 exts.w r6,r6 cmp/eq r6,r7 extu.b r1,r6 bf/s LOCAL(div_ge8m) cmp/hi r1,r4 ! copy sign bit of r4 into T rotcr r1 ! signed shift must use original sign from r4 div0s r5,r4 shad r0,r1 shll8 r5 div1 r5,r1 mov r5,r7 ! detect r4 == 0x80000000 && r5 == 0x8000(00) div1 r5,r1 shlr8 r7 div1 r5,r1 swap.w r4,r0 div1 r5,r1 swap.b r0,r0 div1 r5,r1 or r0,r7 div1 r5,r1 add #-80,r7 div1 r5,r1 swap.w r7,r0 div1 r5,r1 or r0,r7 extu.b r1,r0 xor r6,r1 xor r0,r1 exts.b r0,r0 div1 r5,r1 extu.w r7,r7 div1 r5,r1 neg r7,r7 ! upper 16 bit of r7 == 0 if r4 == 0x80000000 && r5 == 0x8000 div1 r5,r1 and r0,r7 div1 r5,r1 swap.w r7,r7 ! 26 cycles up to here. div1 r5,r1 shll8 r0 div1 r5,r1 exts.w r7,r7 div1 r5,r1 add r0,r0 div1 r5,r1 sub r7,r0 extu.b r1,r1 mov.l @r15+,r7 rotcl r1 mov.l @r15+,r6 add r1,r0 mov #-8,r1 rts shad r1,r5 ! 34 cycles up to here .balign 4 GLOBAL(udivsi3_i4i): mov.l r6,@-r15 extu.w r5,r6 cmp/eq r5,r6 mov #0x7f,r0 bf LOCAL(udiv_ge64k) cmp/hi r0,r5 bf LOCAL(udiv_le128) mov r4,r1 shlr8 r1 div0u shlr r1 shll16 r6 div1 r6,r1 extu.b r4,r0 ! 7 cycles up to here .rept 8 div1 r6,r1 .endr ! 15 cycles up to here xor r1,r0 ! xor dividend with result lsb .rept 6 div1 r6,r1 .endr mov.l r7,@-r15 ! 21 cycles up to here div1 r6,r1 extu.b r0,r7 div1 r6,r1 shll8 r7 extu.w r1,r0 xor r7,r1 ! replace lsb of result with lsb of dividend div1 r6,r1 mov #0,r7 div1 r6,r1 ! div1 r6,r1 bra LOCAL(div_end) div1 r6,r1 ! 28 cycles up to here /* This is link-compatible with a GLOBAL(sdivsi3) call, but we effectively clobber only r1, macl and mach */ /* Because negative quotients are calculated as one's complements, -0x80000000 divided by the smallest positive number of a number range (0x80, 0x8000, 0x800000) causes saturation in the one's complement representation, and we have to suppress the one's -> two's complement adjustment. Since positive numbers don't get such an adjustment, it's OK to also compute one's -> two's complement adjustment suppression for a dividend of 0. */ .balign 4 GLOBAL(sdivsi3_i4i): mov.l r6,@-r15 exts.b r5,r6 cmp/eq r5,r6 mov #-1,r1 bt/s LOCAL(div_le128) cmp/pz r4 addc r4,r1 exts.w r5,r6 cmp/eq r5,r6 mov #-7,r0 bf/s LOCAL(div_ge32k) cmp/hi r1,r4 ! copy sign bit of r4 into T rotcr r1 shll16 r6 ! 7 cycles up to here shad r0,r1 div0s r5,r4 div1 r6,r1 mov.l r7,@-r15 div1 r6,r1 mov r4,r0 ! re-compute adjusted dividend div1 r6,r1 mov #-31,r7 div1 r6,r1 shad r7,r0 div1 r6,r1 add r4,r0 ! adjusted dividend div1 r6,r1 mov.l r8,@-r15 div1 r6,r1 swap.w r4,r8 ! detect special case r4 = 0x80000000, r5 = 0x80 div1 r6,r1 swap.b r8,r8 xor r1,r0 ! xor dividend with result lsb div1 r6,r1 div1 r6,r1 or r5,r8 div1 r6,r1 add #-0x80,r8 ! r8 is 0 iff there is a match div1 r6,r1 swap.w r8,r7 ! or upper 16 bits... div1 r6,r1 or r7,r8 !...into lower 16 bits div1 r6,r1 extu.w r8,r8 div1 r6,r1 extu.b r0,r7 div1 r6,r1 shll8 r7 exts.w r1,r0 xor r7,r1 ! replace lsb of result with lsb of dividend div1 r6,r1 neg r8,r8 ! upper 16 bits of r8 are now 0xffff iff we want end adjm. div1 r6,r1 and r0,r8 div1 r6,r1 swap.w r8,r7 div1 r6,r1 mov.l @r15+,r8 ! 58 insns, 29 cycles up to here LOCAL(div_end): div1 r6,r1 shll8 r0 div1 r6,r1 exts.w r7,r7 div1 r6,r1 add r0,r0 div1 r6,r1 sub r7,r0 extu.b r1,r1 mov.l @r15+,r7 rotcl r1 mov.l @r15+,r6 rts add r1,r0 .balign 4 LOCAL(udiv_le128): ! 4 cycles up to here (or 7 for mispredict) mova LOCAL(div_table_inv),r0 shll2 r6 mov.l @(r0,r6),r1 mova LOCAL(div_table_clz),r0 lds r4,mach ! ! ! tst r1,r1 ! bt 0f dmulu.l r1,r4 0: mov.b @(r0,r5),r1 clrt ! ! sts mach,r0 addc r4,r0 rotcr r0 mov.l @r15+,r6 rts shld r1,r0 .balign 4 LOCAL(div_le128): ! 3 cycles up to here (or 6 for mispredict) mova LOCAL(div_table_inv),r0 shll2 r6 mov.l @(r0,r6),r1 mova LOCAL(div_table_clz),r0 neg r4,r6 bf 0f mov r4,r6 0: lds r6,mach tst r1,r1 bt 0f dmulu.l r1,r6 0: div0s r4,r5 mov.b @(r0,r5),r1 bt/s LOCAL(le128_neg) clrt ! sts mach,r0 addc r6,r0 rotcr r0 mov.l @r15+,r6 rts shld r1,r0 /* Could trap divide by zero for the cost of one cycle more mispredict penalty: ... dmulu.l r1,r6 0: div0s r4,r5 bt/s LOCAL(le128_neg) tst r5,r5 bt LOCAL(div_by_zero) mov.b @(r0,r5),r1 sts mach,r0 addc r6,r0 ... LOCAL(div_by_zero): trapa # .balign 4 LOCAL(le128_neg): bt LOCAL(div_by_zero) mov.b @(r0,r5),r1 sts mach,r0 addc r6,r0 ... */ .balign 4 LOCAL(le128_neg): sts mach,r0 addc r6,r0 rotcr r0 mov.l @r15+,r6 shad r1,r0 rts neg r0,r0 ENDFUNC(GLOBAL(udivsi3_i4i)) ENDFUNC(GLOBAL(sdivsi3_i4i)) /* This table has been generated by divtab-sh4.c. */ .balign 4 .byte -7 .byte -6 .byte -6 .byte -6 .byte -6 .byte -6 .byte -6 .byte -6 .byte -6 .byte -6 .byte -6 .byte -6 .byte -6 .byte -6 .byte -6 .byte -6 .byte -6 .byte -6 .byte -6 .byte -6 .byte -6 .byte -6 .byte -6 .byte -6 .byte -6 .byte -6 .byte -6 .byte -6 .byte -6 .byte -6 .byte -6 .byte -6 .byte -6 .byte -6 .byte -6 .byte -6 .byte -6 .byte -6 .byte -6 .byte -6 .byte -6 .byte -6 .byte -6 .byte -6 .byte -6 .byte -6 .byte -6 .byte -6 .byte -6 .byte -6 .byte -6 .byte -6 .byte -6 .byte -6 .byte -6 .byte -6 .byte -6 .byte -6 .byte -6 .byte -6 .byte -6 .byte -6 .byte -6 .byte -6 .byte -6 .byte -5 .byte -5 .byte -5 .byte -5 .byte -5 .byte -5 .byte -5 .byte -5 .byte -5 .byte -5 .byte -5 .byte -5 .byte -5 .byte -5 .byte -5 .byte -5 .byte -5 .byte -5 .byte -5 .byte -5 .byte -5 .byte -5 .byte -5 .byte -5 .byte -5 .byte -5 .byte -5 .byte -5 .byte -5 .byte -5 .byte -5 .byte -5 .byte -4 .byte -4 .byte -4 .byte -4 .byte -4 .byte -4 .byte -4 .byte -4 .byte -4 .byte -4 .byte -4 .byte -4 .byte -4 .byte -4 .byte -4 .byte -4 .byte -3 .byte -3 .byte -3 .byte -3 .byte -3 .byte -3 .byte -3 .byte -3 .byte -2 .byte -2 .byte -2 .byte -2 .byte -1 .byte -1 .byte 0 LOCAL(div_table_clz): .byte 0 .byte 0 .byte -1 .byte -1 .byte -2 .byte -2 .byte -2 .byte -2 .byte -3 .byte -3 .byte -3 .byte -3 .byte -3 .byte -3 .byte -3 .byte -3 .byte -4 .byte -4 .byte -4 .byte -4 .byte -4 .byte -4 .byte -4 .byte -4 .byte -4 .byte -4 .byte -4 .byte -4 .byte -4 .byte -4 .byte -4 .byte -4 .byte -5 .byte -5 .byte -5 .byte -5 .byte -5 .byte -5 .byte -5 .byte -5 .byte -5 .byte -5 .byte -5 .byte -5 .byte -5 .byte -5 .byte -5 .byte -5 .byte -5 .byte -5 .byte -5 .byte -5 .byte -5 .byte -5 .byte -5 .byte -5 .byte -5 .byte -5 .byte -5 .byte -5 .byte -5 .byte -5 .byte -5 .byte -5 .byte -6 .byte -6 .byte -6 .byte -6 .byte -6 .byte -6 .byte -6 .byte -6 .byte -6 .byte -6 .byte -6 .byte -6 .byte -6 .byte -6 .byte -6 .byte -6 .byte -6 .byte -6 .byte -6 .byte -6 .byte -6 .byte -6 .byte -6 .byte -6 .byte -6 .byte -6 .byte -6 .byte -6 .byte -6 .byte -6 .byte -6 .byte -6 .byte -6 .byte -6 .byte -6 .byte -6 .byte -6 .byte -6 .byte -6 .byte -6 .byte -6 .byte -6 .byte -6 .byte -6 .byte -6 .byte -6 .byte -6 .byte -6 .byte -6 .byte -6 .byte -6 .byte -6 .byte -6 .byte -6 .byte -6 .byte -6 .byte -6 .byte -6 .byte -6 .byte -6 .byte -6 .byte -6 .byte -6 .byte -6 /* 1/-128 .. 1/127, normalized. There is an implicit leading 1 in bit 32, or in bit 33 for powers of two. */ .balign 4 .long 0x0 .long 0x2040811 .long 0x4104105 .long 0x624DD30 .long 0x8421085 .long 0xA6810A7 .long 0xC9714FC .long 0xECF56BF .long 0x11111112 .long 0x135C8114 .long 0x15B1E5F8 .long 0x18118119 .long 0x1A7B9612 .long 0x1CF06ADB .long 0x1F7047DD .long 0x21FB7813 .long 0x24924925 .long 0x27350B89 .long 0x29E4129F .long 0x2C9FB4D9 .long 0x2F684BDB .long 0x323E34A3 .long 0x3521CFB3 .long 0x38138139 .long 0x3B13B13C .long 0x3E22CBCF .long 0x41414142 .long 0x446F8657 .long 0x47AE147B .long 0x4AFD6A06 .long 0x4E5E0A73 .long 0x51D07EAF .long 0x55555556 .long 0x58ED2309 .long 0x5C9882BA .long 0x60581606 .long 0x642C8591 .long 0x68168169 .long 0x6C16C16D .long 0x702E05C1 .long 0x745D1746 .long 0x78A4C818 .long 0x7D05F418 .long 0x81818182 .long 0x86186187 .long 0x8ACB90F7 .long 0x8F9C18FA .long 0x948B0FCE .long 0x9999999A .long 0x9EC8E952 .long 0xA41A41A5 .long 0xA98EF607 .long 0xAF286BCB .long 0xB4E81B4F .long 0xBACF914D .long 0xC0E07039 .long 0xC71C71C8 .long 0xCD856891 .long 0xD41D41D5 .long 0xDAE6076C .long 0xE1E1E1E2 .long 0xE9131AC0 .long 0xF07C1F08 .long 0xF81F81F9 .long 0x0 .long 0x4104105 .long 0x8421085 .long 0xC9714FC .long 0x11111112 .long 0x15B1E5F8 .long 0x1A7B9612 .long 0x1F7047DD .long 0x24924925 .long 0x29E4129F .long 0x2F684BDB .long 0x3521CFB3 .long 0x3B13B13C .long 0x41414142 .long 0x47AE147B .long 0x4E5E0A73 .long 0x55555556 .long 0x5C9882BA .long 0x642C8591 .long 0x6C16C16D .long 0x745D1746 .long 0x7D05F418 .long 0x86186187 .long 0x8F9C18FA .long 0x9999999A .long 0xA41A41A5 .long 0xAF286BCB .long 0xBACF914D .long 0xC71C71C8 .long 0xD41D41D5 .long 0xE1E1E1E2 .long 0xF07C1F08 .long 0x0 .long 0x8421085 .long 0x11111112 .long 0x1A7B9612 .long 0x24924925 .long 0x2F684BDB .long 0x3B13B13C .long 0x47AE147B .long 0x55555556 .long 0x642C8591 .long 0x745D1746 .long 0x86186187 .long 0x9999999A .long 0xAF286BCB .long 0xC71C71C8 .long 0xE1E1E1E2 .long 0x0 .long 0x11111112 .long 0x24924925 .long 0x3B13B13C .long 0x55555556 .long 0x745D1746 .long 0x9999999A .long 0xC71C71C8 .long 0x0 .long 0x24924925 .long 0x55555556 .long 0x9999999A .long 0x0 .long 0x55555556 .long 0x0 .long 0x0 LOCAL(div_table_inv): .long 0x0 .long 0x0 .long 0x0 .long 0x55555556 .long 0x0 .long 0x9999999A .long 0x55555556 .long 0x24924925 .long 0x0 .long 0xC71C71C8 .long 0x9999999A .long 0x745D1746 .long 0x55555556 .long 0x3B13B13C .long 0x24924925 .long 0x11111112 .long 0x0 .long 0xE1E1E1E2 .long 0xC71C71C8 .long 0xAF286BCB .long 0x9999999A .long 0x86186187 .long 0x745D1746 .long 0x642C8591 .long 0x55555556 .long 0x47AE147B .long 0x3B13B13C .long 0x2F684BDB .long 0x24924925 .long 0x1A7B9612 .long 0x11111112 .long 0x8421085 .long 0x0 .long 0xF07C1F08 .long 0xE1E1E1E2 .long 0xD41D41D5 .long 0xC71C71C8 .long 0xBACF914D .long 0xAF286BCB .long 0xA41A41A5 .long 0x9999999A .long 0x8F9C18FA .long 0x86186187 .long 0x7D05F418 .long 0x745D1746 .long 0x6C16C16D .long 0x642C8591 .long 0x5C9882BA .long 0x55555556 .long 0x4E5E0A73 .long 0x47AE147B .long 0x41414142 .long 0x3B13B13C .long 0x3521CFB3 .long 0x2F684BDB .long 0x29E4129F .long 0x24924925 .long 0x1F7047DD .long 0x1A7B9612 .long 0x15B1E5F8 .long 0x11111112 .long 0xC9714FC .long 0x8421085 .long 0x4104105 .long 0x0 .long 0xF81F81F9 .long 0xF07C1F08 .long 0xE9131AC0 .long 0xE1E1E1E2 .long 0xDAE6076C .long 0xD41D41D5 .long 0xCD856891 .long 0xC71C71C8 .long 0xC0E07039 .long 0xBACF914D .long 0xB4E81B4F .long 0xAF286BCB .long 0xA98EF607 .long 0xA41A41A5 .long 0x9EC8E952 .long 0x9999999A .long 0x948B0FCE .long 0x8F9C18FA .long 0x8ACB90F7 .long 0x86186187 .long 0x81818182 .long 0x7D05F418 .long 0x78A4C818 .long 0x745D1746 .long 0x702E05C1 .long 0x6C16C16D .long 0x68168169 .long 0x642C8591 .long 0x60581606 .long 0x5C9882BA .long 0x58ED2309 .long 0x55555556 .long 0x51D07EAF .long 0x4E5E0A73 .long 0x4AFD6A06 .long 0x47AE147B .long 0x446F8657 .long 0x41414142 .long 0x3E22CBCF .long 0x3B13B13C .long 0x38138139 .long 0x3521CFB3 .long 0x323E34A3 .long 0x2F684BDB .long 0x2C9FB4D9 .long 0x29E4129F .long 0x27350B89 .long 0x24924925 .long 0x21FB7813 .long 0x1F7047DD .long 0x1CF06ADB .long 0x1A7B9612 .long 0x18118119 .long 0x15B1E5F8 .long 0x135C8114 .long 0x11111112 .long 0xECF56BF .long 0xC9714FC .long 0xA6810A7 .long 0x8421085 .long 0x624DD30 .long 0x4104105 .long 0x2040811 /* maximum error: 0.987342 scaled: 0.921875*/ #endif /* SH3 / SH4 */ #endif /* L_div_table */
4ms/metamodule-plugin-sdk
2,709
plugin-libc/libgcc/config/sh/crti.S
/* Copyright (C) 2000-2022 Free Software Foundation, Inc. This file was adapted from glibc sources. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 3, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. Under Section 7 of GPL version 3, you are granted additional permissions described in the GCC Runtime Library Exception, version 3.1, as published by the Free Software Foundation. You should have received a copy of the GNU General Public License and a copy of the GCC Runtime Library Exception along with this program; see the files COPYING3 and COPYING.RUNTIME respectively. If not, see <http://www.gnu.org/licenses/>. */ #include "crt.h" /* The code in sections .init and .fini is supposed to be a single regular function. The function in .init is called directly from start in crt1.S. The function in .fini is atexit()ed in crt1.S too. crti.S contributes the prologue of a function to these sections, and crtn.S comes up the epilogue. STARTFILE_SPEC should list crti.o before any other object files that might add code to .init or .fini sections, and ENDFILE_SPEC should list crtn.o after any such object files. */ .section .init /* The alignment below can't be smaller, otherwise the mova below breaks. Yes, we might align just the label, but then we'd be exchanging an alignment here for one there, since the code fragment below ensures 4-byte alignment on __ELF__. */ #ifdef __ELF__ .p2align 2 #else .p2align 1 #endif .global GLOBAL(_init) GLOBAL(_init): #ifdef __ELF__ mov.l r12,@-r15 mova 0f,r0 mov.l 0f,r12 #endif mov.l r14,@-r15 #ifdef __ELF__ add r0,r12 #endif sts.l pr,@-r15 #ifdef __ELF__ bra 1f #endif mov r15,r14 #ifdef __ELF__ 0: .long _GLOBAL_OFFSET_TABLE_ 1: #endif .section .fini /* The alignment below can't be smaller, otherwise the mova below breaks. Yes, we might align just the label, but then we'd be exchanging an alignment here for one there, since the code fragment below ensures 4-byte alignment on __ELF__. */ #ifdef __ELF__ .p2align 2 #else .p2align 1 #endif .global GLOBAL(_fini) GLOBAL(_fini): #ifdef __ELF__ mov.l r12,@-r15 mova 0f,r0 mov.l 0f,r12 #endif mov.l r14,@-r15 #ifdef __ELF__ add r0,r12 #endif sts.l pr,@-r15 #ifdef __ELF__ bra 1f #endif mov r15,r14 #ifdef __ELF__ 0: .long _GLOBAL_OFFSET_TABLE_ 1: #endif
4ms/metamodule-plugin-sdk
15,005
plugin-libc/libgcc/config/sh/crt1.S
/* Copyright (C) 2000-2022 Free Software Foundation, Inc. This file was pretty much copied from newlib. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 3, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. Under Section 7 of GPL version 3, you are granted additional permissions described in the GCC Runtime Library Exception, version 3.1, as published by the Free Software Foundation. You should have received a copy of the GNU General Public License and a copy of the GCC Runtime Library Exception along with this program; see the files COPYING3 and COPYING.RUNTIME respectively. If not, see <http://www.gnu.org/licenses/>. */ #include "crt.h" #ifdef MMU_SUPPORT /* Section used for exception/timer interrupt stack area */ .section .data.vbr.stack,"aw" .align 4 .global __ST_VBR __ST_VBR: .zero 1024 * 2 /* ; 2k for VBR handlers */ /* Label at the highest stack address where the stack grows from */ __timer_stack: #endif /* MMU_SUPPORT */ /* ;---------------------------------------- Normal newlib crt1.S */ ! make a place to keep any previous value of the vbr register ! this will only have a value if it has been set by redboot (for example) .section .bss old_vbr: .long 0 #ifdef PROFILE profiling_enabled: .long 0 #endif .section .text .global start .import ___rtos_profiler_start_timer .weak ___rtos_profiler_start_timer start: mov.l stack_k,r15 #if defined (__SH3__) || (defined (__SH_FPU_ANY__) && ! defined (__SH2E__) && ! defined (__SH2A__)) || defined (__SH4_NOFPU__) #define VBR_SETUP ! before zeroing the bss ... ! if the vbr is already set to vbr_start then the program has been restarted ! (i.e. it is not the first time the program has been run since reset) ! reset the vbr to its old value before old_vbr (in bss) is wiped ! this ensures that the later code does not create a circular vbr chain stc vbr, r1 mov.l vbr_start_k, r2 cmp/eq r1, r2 bf 0f ! reset the old vbr value mov.l old_vbr_k, r1 mov.l @r1, r2 ldc r2, vbr 0: #endif /* VBR_SETUP */ ! zero out bss mov.l edata_k,r0 mov.l end_k,r1 mov #0,r2 start_l: mov.l r2,@r0 add #4,r0 cmp/ge r0,r1 bt start_l #if defined (__SH_FPU_ANY__) mov.l set_fpscr_k, r1 mov #4,r4 jsr @r1 shll16 r4 ! Set DN bit (flush denormal inputs to zero) lds r3,fpscr ! Switch to default precision #endif /* defined (__SH_FPU_ANY__) */ #ifdef VBR_SETUP ! save the existing contents of the vbr ! there will only be a prior value when using something like redboot ! otherwise it will be zero stc vbr, r1 mov.l old_vbr_k, r2 mov.l r1, @r2 ! setup vbr mov.l vbr_start_k, r1 ldc r1,vbr #endif /* VBR_SETUP */ ! if an rtos is exporting a timer start fn, ! then pick up an SR which does not enable ints ! (the rtos will take care of this) mov.l rtos_start_fn, r0 mov.l sr_initial_bare, r1 tst r0, r0 bt set_sr mov.l sr_initial_rtos, r1 set_sr: ! Set status register (sr) ldc r1, sr ! arrange for exit to call fini mov.l atexit_k,r0 mov.l fini_k,r4 jsr @r0 nop #ifdef PROFILE ! arrange for exit to call _mcleanup (via stop_profiling) mova stop_profiling,r0 mov.l atexit_k,r1 jsr @r1 mov r0, r4 ! Call profiler startup code mov.l monstartup_k, r0 mov.l start_k, r4 mov.l etext_k, r5 jsr @r0 nop ! enable profiling trap ! until now any trap 33s will have been ignored ! This means that all library functions called before this point ! (directly or indirectly) may have the profiling trap at the start. ! Therefore, only mcount itself may not have the extra header. mov.l profiling_enabled_k2, r0 mov #1, r1 mov.l r1, @r0 #endif /* PROFILE */ ! call init mov.l init_k,r0 jsr @r0 nop ! call the mainline mov.l main_k,r0 jsr @r0 nop ! call exit mov r0,r4 mov.l exit_k,r0 jsr @r0 nop .balign 4 #ifdef PROFILE stop_profiling: # stop mcount counting mov.l profiling_enabled_k2, r0 mov #0, r1 mov.l r1, @r0 # call mcleanup mov.l mcleanup_k, r0 jmp @r0 nop .balign 4 mcleanup_k: .long __mcleanup monstartup_k: .long ___monstartup profiling_enabled_k2: .long profiling_enabled start_k: .long _start etext_k: .long __etext #endif /* PROFILE */ .align 2 #if defined (__SH_FPU_ANY__) set_fpscr_k: .long ___set_fpscr #endif /* defined (__SH_FPU_ANY__) */ stack_k: .long _stack edata_k: .long _edata end_k: .long _end main_k: .long ___setup_argv_and_call_main exit_k: .long _exit atexit_k: .long _atexit init_k: .long GLOBAL(_init) fini_k: .long GLOBAL(_fini) #ifdef VBR_SETUP old_vbr_k: .long old_vbr vbr_start_k: .long vbr_start #endif /* VBR_SETUP */ sr_initial_rtos: ! Privileged mode RB 1 BL 0. Keep BL 0 to allow default trap handlers to work. ! Whether profiling or not, keep interrupts masked, ! the RTOS will enable these if required. .long 0x600000f1 rtos_start_fn: .long ___rtos_profiler_start_timer #ifdef PROFILE sr_initial_bare: ! Privileged mode RB 1 BL 0. Keep BL 0 to allow default trap handlers to work. ! For bare machine, we need to enable interrupts to get profiling working .long 0x60000001 #else sr_initial_bare: ! Privileged mode RB 1 BL 0. Keep BL 0 to allow default trap handlers to work. ! Keep interrupts disabled - the application will enable as required. .long 0x600000f1 #endif ! supplied for backward compatibility only, in case of linking ! code whose main() was compiled with an older version of GCC. .global ___main ___main: rts nop #ifdef VBR_SETUP ! Exception handlers .section .text.vbr, "ax" vbr_start: .org 0x100 vbr_100: #ifdef PROFILE ! Note on register usage. ! we use r0..r3 as scratch in this code. If we are here due to a trapa for profiling ! then this is OK as we are just before executing any function code. ! The other r4..r7 we save explicityl on the stack ! Remaining registers are saved by normal ABI conventions and we assert we do not ! use floating point registers. mov.l expevt_k1, r1 mov.l @r1, r1 mov.l event_mask, r0 and r0,r1 mov.l trapcode_k, r2 cmp/eq r1,r2 bt 1f bra handler_100 ! if not a trapa, go to default handler nop 1: mov.l trapa_k, r0 mov.l @r0, r0 shlr2 r0 ! trapa code is shifted by 2. cmp/eq #33, r0 bt 2f bra handler_100 nop 2: ! If here then it looks like we have trap #33 ! Now we need to call mcount with the following convention ! Save and restore r4..r7 mov.l r4,@-r15 mov.l r5,@-r15 mov.l r6,@-r15 mov.l r7,@-r15 sts.l pr,@-r15 ! r4 is frompc. ! r5 is selfpc ! r0 is the branch back address. ! The code sequence emitted by gcc for the profiling trap is ! .align 2 ! trapa #33 ! .align 2 ! .long lab Where lab is planted by the compiler. This is the address ! of a datum that needs to be incremented. sts pr, r4 ! frompc stc spc, r5 ! selfpc mov #2, r2 not r2, r2 ! pattern to align to 4 and r2, r5 ! r5 now has aligned address ! add #4, r5 ! r5 now has address of address mov r5, r2 ! Remember it. ! mov.l @r5, r5 ! r5 has value of lable (lab in above example) add #8, r2 ldc r2, spc ! our return address avoiding address word ! only call mcount if profiling is enabled mov.l profiling_enabled_k, r0 mov.l @r0, r0 cmp/eq #0, r0 bt 3f ! call mcount mov.l mcount_k, r2 jsr @r2 nop 3: lds.l @r15+,pr mov.l @r15+,r7 mov.l @r15+,r6 mov.l @r15+,r5 mov.l @r15+,r4 rte nop .balign 4 event_mask: .long 0xfff trapcode_k: .long 0x160 expevt_k1: .long 0xff000024 ! Address of expevt trapa_k: .long 0xff000020 mcount_k: .long __call_mcount profiling_enabled_k: .long profiling_enabled #endif ! Non profiling case. handler_100: mov.l 2f, r0 ! load the old vbr setting (if any) mov.l @r0, r0 cmp/eq #0, r0 bf 1f ! no previous vbr - jump to own generic handler bra handler nop 1: ! there was a previous handler - chain them add #0x7f, r0 ! 0x7f add #0x7f, r0 ! 0xfe add #0x2, r0 ! add 0x100 without corrupting another register jmp @r0 nop .balign 4 2: .long old_vbr .org 0x400 vbr_400: ! Should be at vbr+0x400 mov.l 2f, r0 ! load the old vbr setting (if any) mov.l @r0, r0 cmp/eq #0, r0 ! no previous vbr - jump to own generic handler bt handler ! there was a previous handler - chain them rotcr r0 rotcr r0 add #0x7f, r0 ! 0x1fc add #0x7f, r0 ! 0x3f8 add #0x02, r0 ! 0x400 rotcl r0 rotcl r0 ! Add 0x400 without corrupting another register jmp @r0 nop .balign 4 2: .long old_vbr handler: /* If the trap handler is there call it */ mov.l superh_trap_handler_k, r0 cmp/eq #0, r0 ! True if zero. bf 3f bra chandler nop 3: ! Here handler available, call it. /* Now call the trap handler with as much of the context unchanged as possible. Move trapping address into PR to make it look like the trap point */ stc spc, r1 lds r1, pr mov.l expevt_k, r4 mov.l @r4, r4 ! r4 is value of expevt, first parameter. mov r1, r5 ! Remember trapping pc. mov r1, r6 ! Remember trapping pc. mov.l chandler_k, r1 mov.l superh_trap_handler_k, r2 ! jmp to trap handler to avoid disturbing pr. jmp @r2 nop .org 0x600 vbr_600: #ifdef PROFILE ! Should be at vbr+0x600 ! Now we are in the land of interrupts so need to save more state. ! Save register state mov.l interrupt_stack_k, r15 ! r15 has been saved to sgr. mov.l r0,@-r15 mov.l r1,@-r15 mov.l r2,@-r15 mov.l r3,@-r15 mov.l r4,@-r15 mov.l r5,@-r15 mov.l r6,@-r15 mov.l r7,@-r15 sts.l pr,@-r15 sts.l mach,@-r15 sts.l macl,@-r15 #if defined(__SH_FPU_ANY__) ! Save fpul and fpscr, save fr0-fr7 in 64 bit mode ! and set the pervading precision for the timer_handler mov #0,r0 sts.l fpul,@-r15 sts.l fpscr,@-r15 lds r0,fpscr ! Clear fpscr fmov fr0,@-r15 fmov fr1,@-r15 fmov fr2,@-r15 fmov fr3,@-r15 mov.l pervading_precision_k,r0 fmov fr4,@-r15 fmov fr5,@-r15 mov.l @r0,r0 fmov fr6,@-r15 fmov fr7,@-r15 lds r0,fpscr #endif /* __SH_FPU_ANY__ */ ! Pass interrupted pc to timer_handler as first parameter (r4). stc spc, r4 mov.l timer_handler_k, r0 jsr @r0 nop #if defined(__SH_FPU_ANY__) mov #0,r0 lds r0,fpscr ! Clear the fpscr fmov @r15+,fr7 fmov @r15+,fr6 fmov @r15+,fr5 fmov @r15+,fr4 fmov @r15+,fr3 fmov @r15+,fr2 fmov @r15+,fr1 fmov @r15+,fr0 lds.l @r15+,fpscr lds.l @r15+,fpul #endif /* __SH_FPU_ANY__ */ lds.l @r15+,macl lds.l @r15+,mach lds.l @r15+,pr mov.l @r15+,r7 mov.l @r15+,r6 mov.l @r15+,r5 mov.l @r15+,r4 mov.l @r15+,r3 mov.l @r15+,r2 mov.l @r15+,r1 mov.l @r15+,r0 stc sgr, r15 ! Restore r15, destroyed by this sequence. rte nop #if defined(__SH_FPU_ANY__) .balign 4 pervading_precision_k: .long GLOBAL(__fpscr_values)+4 #endif #else mov.l 2f, r0 ! Load the old vbr setting (if any). mov.l @r0, r0 cmp/eq #0, r0 ! no previous vbr - jump to own handler bt chandler ! there was a previous handler - chain them rotcr r0 rotcr r0 add #0x7f, r0 ! 0x1fc add #0x7f, r0 ! 0x3f8 add #0x7f, r0 ! 0x5f4 add #0x03, r0 ! 0x600 rotcl r0 rotcl r0 ! Add 0x600 without corrupting another register jmp @r0 nop .balign 4 2: .long old_vbr #endif /* PROFILE code */ chandler: mov.l expevt_k, r4 mov.l @r4, r4 ! r4 is value of expevt hence making this the return code mov.l handler_exit_k,r0 jsr @r0 nop ! We should never return from _exit but in case we do we would enter the ! the following tight loop limbo: bra limbo nop .balign 4 #ifdef PROFILE interrupt_stack_k: .long __timer_stack ! The high end of the stack timer_handler_k: .long __profil_counter #endif expevt_k: .long 0xff000024 ! Address of expevt chandler_k: .long chandler superh_trap_handler_k: .long __superh_trap_handler handler_exit_k: .long _exit .align 2 ! Simulated compile of trap handler. .section .debug_abbrev,"",@progbits .Ldebug_abbrev0: .section .debug_info,"",@progbits .Ldebug_info0: .section .debug_line,"",@progbits .Ldebug_line0: .text .Ltext0: .align 5 .type __superh_trap_handler,@function __superh_trap_handler: .LFB1: mov.l r14,@-r15 .LCFI0: add #-4,r15 .LCFI1: mov r15,r14 .LCFI2: mov.l r4,@r14 lds r1, pr add #4,r14 mov r14,r15 mov.l @r15+,r14 rts nop .LFE1: .Lfe1: .size __superh_trap_handler,.Lfe1-__superh_trap_handler .section .debug_frame,"",@progbits .Lframe0: .ualong .LECIE0-.LSCIE0 .LSCIE0: .ualong 0xffffffff .byte 0x1 .string "" .uleb128 0x1 .sleb128 -4 .byte 0x11 .byte 0xc .uleb128 0xf .uleb128 0x0 .align 2 .LECIE0: .LSFDE0: .ualong .LEFDE0-.LASFDE0 .LASFDE0: .ualong .Lframe0 .ualong .LFB1 .ualong .LFE1-.LFB1 .byte 0x4 .ualong .LCFI0-.LFB1 .byte 0xe .uleb128 0x4 .byte 0x4 .ualong .LCFI1-.LCFI0 .byte 0xe .uleb128 0x8 .byte 0x8e .uleb128 0x1 .byte 0x4 .ualong .LCFI2-.LCFI1 .byte 0xd .uleb128 0xe .align 2 .LEFDE0: .text .Letext0: .section .debug_info .ualong 0xb3 .uaword 0x2 .ualong .Ldebug_abbrev0 .byte 0x4 .uleb128 0x1 .ualong .Ldebug_line0 .ualong .Letext0 .ualong .Ltext0 .string "trap_handler.c" .string "xxxxxxxxxxxxxxxxxxxxxxxxxxxx" .string "GNU C 3.2 20020529 (experimental)" .byte 0x1 .uleb128 0x2 .ualong 0xa6 .byte 0x1 .string "_superh_trap_handler" .byte 0x1 .byte 0x2 .byte 0x1 .ualong .LFB1 .ualong .LFE1 .byte 0x1 .byte 0x5e .uleb128 0x3 .string "trap_reason" .byte 0x1 .byte 0x1 .ualong 0xa6 .byte 0x2 .byte 0x91 .sleb128 0 .byte 0x0 .uleb128 0x4 .string "unsigned int" .byte 0x4 .byte 0x7 .byte 0x0 .section .debug_abbrev .uleb128 0x1 .uleb128 0x11 .byte 0x1 .uleb128 0x10 .uleb128 0x6 .uleb128 0x12 .uleb128 0x1 .uleb128 0x11 .uleb128 0x1 .uleb128 0x3 .uleb128 0x8 .uleb128 0x1b .uleb128 0x8 .uleb128 0x25 .uleb128 0x8 .uleb128 0x13 .uleb128 0xb .byte 0x0 .byte 0x0 .uleb128 0x2 .uleb128 0x2e .byte 0x1 .uleb128 0x1 .uleb128 0x13 .uleb128 0x3f .uleb128 0xc .uleb128 0x3 .uleb128 0x8 .uleb128 0x3a .uleb128 0xb .uleb128 0x3b .uleb128 0xb .uleb128 0x27 .uleb128 0xc .uleb128 0x11 .uleb128 0x1 .uleb128 0x12 .uleb128 0x1 .uleb128 0x40 .uleb128 0xa .byte 0x0 .byte 0x0 .uleb128 0x3 .uleb128 0x5 .byte 0x0 .uleb128 0x3 .uleb128 0x8 .uleb128 0x3a .uleb128 0xb .uleb128 0x3b .uleb128 0xb .uleb128 0x49 .uleb128 0x13 .uleb128 0x2 .uleb128 0xa .byte 0x0 .byte 0x0 .uleb128 0x4 .uleb128 0x24 .byte 0x0 .uleb128 0x3 .uleb128 0x8 .uleb128 0xb .uleb128 0xb .uleb128 0x3e .uleb128 0xb .byte 0x0 .byte 0x0 .byte 0x0 .section .debug_pubnames,"",@progbits .ualong 0x27 .uaword 0x2 .ualong .Ldebug_info0 .ualong 0xb7 .ualong 0x67 .string "_superh_trap_handler" .ualong 0x0 .section .debug_aranges,"",@progbits .ualong 0x1c .uaword 0x2 .ualong .Ldebug_info0 .byte 0x4 .byte 0x0 .uaword 0x0 .uaword 0x0 .ualong .Ltext0 .ualong .Letext0-.Ltext0 .ualong 0x0 .ualong 0x0 #endif /* VBR_SETUP */
4ms/metamodule-plugin-sdk
41,874
plugin-libc/libgcc/config/sh/lib1funcs.S
/* Copyright (C) 1994-2022 Free Software Foundation, Inc. This file is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 3, or (at your option) any later version. This file is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. Under Section 7 of GPL version 3, you are granted additional permissions described in the GCC Runtime Library Exception, version 3.1, as published by the Free Software Foundation. You should have received a copy of the GNU General Public License and a copy of the GCC Runtime Library Exception along with this program; see the files COPYING3 and COPYING.RUNTIME respectively. If not, see <http://www.gnu.org/licenses/>. */ !! libgcc routines for the Renesas / SuperH SH CPUs. !! Contributed by Steve Chamberlain. !! sac@cygnus.com !! ashiftrt_r4_x, ___ashrsi3, ___ashlsi3, ___lshrsi3 routines !! recoded in assembly by Toshiyasu Morita !! tm@netcom.com #if defined(__ELF__) && defined(__linux__) .section .note.GNU-stack,"",%progbits .previous #endif /* SH2 optimizations for ___ashrsi3, ___ashlsi3, ___lshrsi3 and ELF local label prefixes by J"orn Rennecke amylaar@cygnus.com */ #include "lib1funcs.h" /* t-vxworks needs to build both PIC and non-PIC versions of libgcc, so it is more convenient to define NO_FPSCR_VALUES here than to define it on the command line. */ #if defined __vxworks && defined __PIC__ #define NO_FPSCR_VALUES #endif #ifdef L_ashiftrt .global GLOBAL(ashiftrt_r4_0) .global GLOBAL(ashiftrt_r4_1) .global GLOBAL(ashiftrt_r4_2) .global GLOBAL(ashiftrt_r4_3) .global GLOBAL(ashiftrt_r4_4) .global GLOBAL(ashiftrt_r4_5) .global GLOBAL(ashiftrt_r4_6) .global GLOBAL(ashiftrt_r4_7) .global GLOBAL(ashiftrt_r4_8) .global GLOBAL(ashiftrt_r4_9) .global GLOBAL(ashiftrt_r4_10) .global GLOBAL(ashiftrt_r4_11) .global GLOBAL(ashiftrt_r4_12) .global GLOBAL(ashiftrt_r4_13) .global GLOBAL(ashiftrt_r4_14) .global GLOBAL(ashiftrt_r4_15) .global GLOBAL(ashiftrt_r4_16) .global GLOBAL(ashiftrt_r4_17) .global GLOBAL(ashiftrt_r4_18) .global GLOBAL(ashiftrt_r4_19) .global GLOBAL(ashiftrt_r4_20) .global GLOBAL(ashiftrt_r4_21) .global GLOBAL(ashiftrt_r4_22) .global GLOBAL(ashiftrt_r4_23) .global GLOBAL(ashiftrt_r4_24) .global GLOBAL(ashiftrt_r4_25) .global GLOBAL(ashiftrt_r4_26) .global GLOBAL(ashiftrt_r4_27) .global GLOBAL(ashiftrt_r4_28) .global GLOBAL(ashiftrt_r4_29) .global GLOBAL(ashiftrt_r4_30) .global GLOBAL(ashiftrt_r4_31) .global GLOBAL(ashiftrt_r4_32) HIDDEN_FUNC(GLOBAL(ashiftrt_r4_0)) HIDDEN_FUNC(GLOBAL(ashiftrt_r4_1)) HIDDEN_FUNC(GLOBAL(ashiftrt_r4_2)) HIDDEN_FUNC(GLOBAL(ashiftrt_r4_3)) HIDDEN_FUNC(GLOBAL(ashiftrt_r4_4)) HIDDEN_FUNC(GLOBAL(ashiftrt_r4_5)) HIDDEN_FUNC(GLOBAL(ashiftrt_r4_6)) HIDDEN_FUNC(GLOBAL(ashiftrt_r4_7)) HIDDEN_FUNC(GLOBAL(ashiftrt_r4_8)) HIDDEN_FUNC(GLOBAL(ashiftrt_r4_9)) HIDDEN_FUNC(GLOBAL(ashiftrt_r4_10)) HIDDEN_FUNC(GLOBAL(ashiftrt_r4_11)) HIDDEN_FUNC(GLOBAL(ashiftrt_r4_12)) HIDDEN_FUNC(GLOBAL(ashiftrt_r4_13)) HIDDEN_FUNC(GLOBAL(ashiftrt_r4_14)) HIDDEN_FUNC(GLOBAL(ashiftrt_r4_15)) HIDDEN_FUNC(GLOBAL(ashiftrt_r4_16)) HIDDEN_FUNC(GLOBAL(ashiftrt_r4_17)) HIDDEN_FUNC(GLOBAL(ashiftrt_r4_18)) HIDDEN_FUNC(GLOBAL(ashiftrt_r4_19)) HIDDEN_FUNC(GLOBAL(ashiftrt_r4_20)) HIDDEN_FUNC(GLOBAL(ashiftrt_r4_21)) HIDDEN_FUNC(GLOBAL(ashiftrt_r4_22)) HIDDEN_FUNC(GLOBAL(ashiftrt_r4_23)) HIDDEN_FUNC(GLOBAL(ashiftrt_r4_24)) HIDDEN_FUNC(GLOBAL(ashiftrt_r4_25)) HIDDEN_FUNC(GLOBAL(ashiftrt_r4_26)) HIDDEN_FUNC(GLOBAL(ashiftrt_r4_27)) HIDDEN_FUNC(GLOBAL(ashiftrt_r4_28)) HIDDEN_FUNC(GLOBAL(ashiftrt_r4_29)) HIDDEN_FUNC(GLOBAL(ashiftrt_r4_30)) HIDDEN_FUNC(GLOBAL(ashiftrt_r4_31)) HIDDEN_FUNC(GLOBAL(ashiftrt_r4_32)) .align 1 GLOBAL(ashiftrt_r4_32): GLOBAL(ashiftrt_r4_31): rotcl r4 rts subc r4,r4 GLOBAL(ashiftrt_r4_30): shar r4 GLOBAL(ashiftrt_r4_29): shar r4 GLOBAL(ashiftrt_r4_28): shar r4 GLOBAL(ashiftrt_r4_27): shar r4 GLOBAL(ashiftrt_r4_26): shar r4 GLOBAL(ashiftrt_r4_25): shar r4 GLOBAL(ashiftrt_r4_24): shlr16 r4 shlr8 r4 rts exts.b r4,r4 GLOBAL(ashiftrt_r4_23): shar r4 GLOBAL(ashiftrt_r4_22): shar r4 GLOBAL(ashiftrt_r4_21): shar r4 GLOBAL(ashiftrt_r4_20): shar r4 GLOBAL(ashiftrt_r4_19): shar r4 GLOBAL(ashiftrt_r4_18): shar r4 GLOBAL(ashiftrt_r4_17): shar r4 GLOBAL(ashiftrt_r4_16): shlr16 r4 rts exts.w r4,r4 GLOBAL(ashiftrt_r4_15): shar r4 GLOBAL(ashiftrt_r4_14): shar r4 GLOBAL(ashiftrt_r4_13): shar r4 GLOBAL(ashiftrt_r4_12): shar r4 GLOBAL(ashiftrt_r4_11): shar r4 GLOBAL(ashiftrt_r4_10): shar r4 GLOBAL(ashiftrt_r4_9): shar r4 GLOBAL(ashiftrt_r4_8): shar r4 GLOBAL(ashiftrt_r4_7): shar r4 GLOBAL(ashiftrt_r4_6): shar r4 GLOBAL(ashiftrt_r4_5): shar r4 GLOBAL(ashiftrt_r4_4): shar r4 GLOBAL(ashiftrt_r4_3): shar r4 GLOBAL(ashiftrt_r4_2): shar r4 GLOBAL(ashiftrt_r4_1): rts shar r4 GLOBAL(ashiftrt_r4_0): rts nop ENDFUNC(GLOBAL(ashiftrt_r4_0)) ENDFUNC(GLOBAL(ashiftrt_r4_1)) ENDFUNC(GLOBAL(ashiftrt_r4_2)) ENDFUNC(GLOBAL(ashiftrt_r4_3)) ENDFUNC(GLOBAL(ashiftrt_r4_4)) ENDFUNC(GLOBAL(ashiftrt_r4_5)) ENDFUNC(GLOBAL(ashiftrt_r4_6)) ENDFUNC(GLOBAL(ashiftrt_r4_7)) ENDFUNC(GLOBAL(ashiftrt_r4_8)) ENDFUNC(GLOBAL(ashiftrt_r4_9)) ENDFUNC(GLOBAL(ashiftrt_r4_10)) ENDFUNC(GLOBAL(ashiftrt_r4_11)) ENDFUNC(GLOBAL(ashiftrt_r4_12)) ENDFUNC(GLOBAL(ashiftrt_r4_13)) ENDFUNC(GLOBAL(ashiftrt_r4_14)) ENDFUNC(GLOBAL(ashiftrt_r4_15)) ENDFUNC(GLOBAL(ashiftrt_r4_16)) ENDFUNC(GLOBAL(ashiftrt_r4_17)) ENDFUNC(GLOBAL(ashiftrt_r4_18)) ENDFUNC(GLOBAL(ashiftrt_r4_19)) ENDFUNC(GLOBAL(ashiftrt_r4_20)) ENDFUNC(GLOBAL(ashiftrt_r4_21)) ENDFUNC(GLOBAL(ashiftrt_r4_22)) ENDFUNC(GLOBAL(ashiftrt_r4_23)) ENDFUNC(GLOBAL(ashiftrt_r4_24)) ENDFUNC(GLOBAL(ashiftrt_r4_25)) ENDFUNC(GLOBAL(ashiftrt_r4_26)) ENDFUNC(GLOBAL(ashiftrt_r4_27)) ENDFUNC(GLOBAL(ashiftrt_r4_28)) ENDFUNC(GLOBAL(ashiftrt_r4_29)) ENDFUNC(GLOBAL(ashiftrt_r4_30)) ENDFUNC(GLOBAL(ashiftrt_r4_31)) ENDFUNC(GLOBAL(ashiftrt_r4_32)) #endif #ifdef L_ashiftrt_n ! ! GLOBAL(ashrsi3) ! ! Entry: ! ! r4: Value to shift ! r5: Shift count ! ! Exit: ! ! r0: Result ! ! Destroys: ! ! T bit, r5 ! .global GLOBAL(ashrsi3) HIDDEN_FUNC(GLOBAL(ashrsi3)) .align 2 GLOBAL(ashrsi3): mov #31,r0 and r0,r5 mova LOCAL(ashrsi3_table),r0 mov.b @(r0,r5),r5 #ifdef __sh1__ add r5,r0 jmp @r0 #else braf r5 #endif mov r4,r0 .align 2 LOCAL(ashrsi3_table): .byte LOCAL(ashrsi3_0)-LOCAL(ashrsi3_table) .byte LOCAL(ashrsi3_1)-LOCAL(ashrsi3_table) .byte LOCAL(ashrsi3_2)-LOCAL(ashrsi3_table) .byte LOCAL(ashrsi3_3)-LOCAL(ashrsi3_table) .byte LOCAL(ashrsi3_4)-LOCAL(ashrsi3_table) .byte LOCAL(ashrsi3_5)-LOCAL(ashrsi3_table) .byte LOCAL(ashrsi3_6)-LOCAL(ashrsi3_table) .byte LOCAL(ashrsi3_7)-LOCAL(ashrsi3_table) .byte LOCAL(ashrsi3_8)-LOCAL(ashrsi3_table) .byte LOCAL(ashrsi3_9)-LOCAL(ashrsi3_table) .byte LOCAL(ashrsi3_10)-LOCAL(ashrsi3_table) .byte LOCAL(ashrsi3_11)-LOCAL(ashrsi3_table) .byte LOCAL(ashrsi3_12)-LOCAL(ashrsi3_table) .byte LOCAL(ashrsi3_13)-LOCAL(ashrsi3_table) .byte LOCAL(ashrsi3_14)-LOCAL(ashrsi3_table) .byte LOCAL(ashrsi3_15)-LOCAL(ashrsi3_table) .byte LOCAL(ashrsi3_16)-LOCAL(ashrsi3_table) .byte LOCAL(ashrsi3_17)-LOCAL(ashrsi3_table) .byte LOCAL(ashrsi3_18)-LOCAL(ashrsi3_table) .byte LOCAL(ashrsi3_19)-LOCAL(ashrsi3_table) .byte LOCAL(ashrsi3_20)-LOCAL(ashrsi3_table) .byte LOCAL(ashrsi3_21)-LOCAL(ashrsi3_table) .byte LOCAL(ashrsi3_22)-LOCAL(ashrsi3_table) .byte LOCAL(ashrsi3_23)-LOCAL(ashrsi3_table) .byte LOCAL(ashrsi3_24)-LOCAL(ashrsi3_table) .byte LOCAL(ashrsi3_25)-LOCAL(ashrsi3_table) .byte LOCAL(ashrsi3_26)-LOCAL(ashrsi3_table) .byte LOCAL(ashrsi3_27)-LOCAL(ashrsi3_table) .byte LOCAL(ashrsi3_28)-LOCAL(ashrsi3_table) .byte LOCAL(ashrsi3_29)-LOCAL(ashrsi3_table) .byte LOCAL(ashrsi3_30)-LOCAL(ashrsi3_table) .byte LOCAL(ashrsi3_31)-LOCAL(ashrsi3_table) LOCAL(ashrsi3_31): rotcl r0 rts subc r0,r0 LOCAL(ashrsi3_30): shar r0 LOCAL(ashrsi3_29): shar r0 LOCAL(ashrsi3_28): shar r0 LOCAL(ashrsi3_27): shar r0 LOCAL(ashrsi3_26): shar r0 LOCAL(ashrsi3_25): shar r0 LOCAL(ashrsi3_24): shlr16 r0 shlr8 r0 rts exts.b r0,r0 LOCAL(ashrsi3_23): shar r0 LOCAL(ashrsi3_22): shar r0 LOCAL(ashrsi3_21): shar r0 LOCAL(ashrsi3_20): shar r0 LOCAL(ashrsi3_19): shar r0 LOCAL(ashrsi3_18): shar r0 LOCAL(ashrsi3_17): shar r0 LOCAL(ashrsi3_16): shlr16 r0 rts exts.w r0,r0 LOCAL(ashrsi3_15): shar r0 LOCAL(ashrsi3_14): shar r0 LOCAL(ashrsi3_13): shar r0 LOCAL(ashrsi3_12): shar r0 LOCAL(ashrsi3_11): shar r0 LOCAL(ashrsi3_10): shar r0 LOCAL(ashrsi3_9): shar r0 LOCAL(ashrsi3_8): shar r0 LOCAL(ashrsi3_7): shar r0 LOCAL(ashrsi3_6): shar r0 LOCAL(ashrsi3_5): shar r0 LOCAL(ashrsi3_4): shar r0 LOCAL(ashrsi3_3): shar r0 LOCAL(ashrsi3_2): shar r0 LOCAL(ashrsi3_1): rts shar r0 LOCAL(ashrsi3_0): rts nop ENDFUNC(GLOBAL(ashrsi3)) #endif #ifdef L_ashiftlt ! ! GLOBAL(ashlsi3) ! (For compatibility with older binaries, not used by compiler) ! ! Entry: ! r4: Value to shift ! r5: Shift count ! ! Exit: ! r0: Result ! ! Destroys: ! T bit ! ! ! GLOBAL(ashlsi3_r0) ! ! Entry: ! r4: Value to shift ! r0: Shift count ! ! Exit: ! r0: Result ! ! Destroys: ! T bit .global GLOBAL(ashlsi3) .global GLOBAL(ashlsi3_r0) HIDDEN_FUNC(GLOBAL(ashlsi3)) HIDDEN_FUNC(GLOBAL(ashlsi3_r0)) GLOBAL(ashlsi3): mov r5,r0 .align 2 GLOBAL(ashlsi3_r0): #ifdef __sh1__ and #31,r0 shll2 r0 mov.l r4,@-r15 mov r0,r4 mova LOCAL(ashlsi3_table),r0 add r4,r0 mov.l @r15+,r4 jmp @r0 mov r4,r0 .align 2 #else and #31,r0 shll2 r0 braf r0 mov r4,r0 #endif LOCAL(ashlsi3_table): rts // << 0 nop LOCAL(ashlsi_1): rts // << 1 shll r0 LOCAL(ashlsi_2): // << 2 rts shll2 r0 bra LOCAL(ashlsi_1) // << 3 shll2 r0 bra LOCAL(ashlsi_2) // << 4 shll2 r0 bra LOCAL(ashlsi_5) // << 5 shll r0 bra LOCAL(ashlsi_6) // << 6 shll2 r0 bra LOCAL(ashlsi_7) // << 7 shll r0 LOCAL(ashlsi_8): // << 8 rts shll8 r0 bra LOCAL(ashlsi_8) // << 9 shll r0 bra LOCAL(ashlsi_8) // << 10 shll2 r0 bra LOCAL(ashlsi_11) // << 11 shll r0 bra LOCAL(ashlsi_12) // << 12 shll2 r0 bra LOCAL(ashlsi_13) // << 13 shll r0 bra LOCAL(ashlsi_14) // << 14 shll8 r0 bra LOCAL(ashlsi_15) // << 15 shll8 r0 LOCAL(ashlsi_16): // << 16 rts shll16 r0 bra LOCAL(ashlsi_16) // << 17 shll r0 bra LOCAL(ashlsi_16) // << 18 shll2 r0 bra LOCAL(ashlsi_19) // << 19 shll r0 bra LOCAL(ashlsi_20) // << 20 shll2 r0 bra LOCAL(ashlsi_21) // << 21 shll r0 bra LOCAL(ashlsi_22) // << 22 shll16 r0 bra LOCAL(ashlsi_23) // << 23 shll16 r0 bra LOCAL(ashlsi_16) // << 24 shll8 r0 bra LOCAL(ashlsi_25) // << 25 shll r0 bra LOCAL(ashlsi_26) // << 26 shll2 r0 bra LOCAL(ashlsi_27) // << 27 shll r0 bra LOCAL(ashlsi_28) // << 28 shll2 r0 bra LOCAL(ashlsi_29) // << 29 shll16 r0 bra LOCAL(ashlsi_30) // << 30 shll16 r0 and #1,r0 // << 31 rts rotr r0 LOCAL(ashlsi_7): shll2 r0 LOCAL(ashlsi_5): LOCAL(ashlsi_6): shll2 r0 rts LOCAL(ashlsi_13): shll2 r0 LOCAL(ashlsi_12): LOCAL(ashlsi_11): shll8 r0 rts LOCAL(ashlsi_21): shll2 r0 LOCAL(ashlsi_20): LOCAL(ashlsi_19): shll16 r0 rts LOCAL(ashlsi_28): LOCAL(ashlsi_27): shll2 r0 LOCAL(ashlsi_26): LOCAL(ashlsi_25): shll16 r0 rts shll8 r0 LOCAL(ashlsi_22): LOCAL(ashlsi_14): shlr2 r0 rts shll8 r0 LOCAL(ashlsi_23): LOCAL(ashlsi_15): shlr r0 rts shll8 r0 LOCAL(ashlsi_29): shlr r0 LOCAL(ashlsi_30): shlr2 r0 rts shll16 r0 ENDFUNC(GLOBAL(ashlsi3)) ENDFUNC(GLOBAL(ashlsi3_r0)) #endif #ifdef L_lshiftrt ! ! GLOBAL(lshrsi3) ! (For compatibility with older binaries, not used by compiler) ! ! Entry: ! r4: Value to shift ! r5: Shift count ! ! Exit: ! r0: Result ! ! Destroys: ! T bit ! ! ! GLOBAL(lshrsi3_r0) ! ! Entry: ! r4: Value to shift ! r0: Shift count ! ! Exit: ! r0: Result ! ! Destroys: ! T bit .global GLOBAL(lshrsi3) .global GLOBAL(lshrsi3_r0) HIDDEN_FUNC(GLOBAL(lshrsi3)) HIDDEN_FUNC(GLOBAL(lshrsi3_r0)) GLOBAL(lshrsi3): mov r5,r0 .align 2 GLOBAL(lshrsi3_r0): #ifdef __sh1__ and #31,r0 shll2 r0 mov.l r4,@-r15 mov r0,r4 mova LOCAL(lshrsi3_table),r0 add r4,r0 mov.l @r15+,r4 jmp @r0 mov r4,r0 .align 2 #else and #31,r0 shll2 r0 braf r0 mov r4,r0 #endif LOCAL(lshrsi3_table): rts // >> 0 nop LOCAL(lshrsi_1): // >> 1 rts shlr r0 LOCAL(lshrsi_2): // >> 2 rts shlr2 r0 bra LOCAL(lshrsi_1) // >> 3 shlr2 r0 bra LOCAL(lshrsi_2) // >> 4 shlr2 r0 bra LOCAL(lshrsi_5) // >> 5 shlr r0 bra LOCAL(lshrsi_6) // >> 6 shlr2 r0 bra LOCAL(lshrsi_7) // >> 7 shlr r0 LOCAL(lshrsi_8): // >> 8 rts shlr8 r0 bra LOCAL(lshrsi_8) // >> 9 shlr r0 bra LOCAL(lshrsi_8) // >> 10 shlr2 r0 bra LOCAL(lshrsi_11) // >> 11 shlr r0 bra LOCAL(lshrsi_12) // >> 12 shlr2 r0 bra LOCAL(lshrsi_13) // >> 13 shlr r0 bra LOCAL(lshrsi_14) // >> 14 shlr8 r0 bra LOCAL(lshrsi_15) // >> 15 shlr8 r0 LOCAL(lshrsi_16): // >> 16 rts shlr16 r0 bra LOCAL(lshrsi_16) // >> 17 shlr r0 bra LOCAL(lshrsi_16) // >> 18 shlr2 r0 bra LOCAL(lshrsi_19) // >> 19 shlr r0 bra LOCAL(lshrsi_20) // >> 20 shlr2 r0 bra LOCAL(lshrsi_21) // >> 21 shlr r0 bra LOCAL(lshrsi_22) // >> 22 shlr16 r0 bra LOCAL(lshrsi_23) // >> 23 shlr16 r0 bra LOCAL(lshrsi_16) // >> 24 shlr8 r0 bra LOCAL(lshrsi_25) // >> 25 shlr r0 bra LOCAL(lshrsi_26) // >> 26 shlr2 r0 bra LOCAL(lshrsi_27) // >> 27 shlr r0 bra LOCAL(lshrsi_28) // >> 28 shlr2 r0 bra LOCAL(lshrsi_29) // >> 29 shlr16 r0 bra LOCAL(lshrsi_30) // >> 30 shlr16 r0 shll r0 // >> 31 rts movt r0 LOCAL(lshrsi_7): shlr2 r0 LOCAL(lshrsi_5): LOCAL(lshrsi_6): shlr2 r0 rts LOCAL(lshrsi_13): shlr2 r0 LOCAL(lshrsi_12): LOCAL(lshrsi_11): shlr8 r0 rts LOCAL(lshrsi_21): shlr2 r0 LOCAL(lshrsi_20): LOCAL(lshrsi_19): shlr16 r0 rts LOCAL(lshrsi_28): LOCAL(lshrsi_27): shlr2 r0 LOCAL(lshrsi_26): LOCAL(lshrsi_25): shlr16 r0 rts shlr8 r0 LOCAL(lshrsi_22): LOCAL(lshrsi_14): shll2 r0 rts shlr8 r0 LOCAL(lshrsi_23): LOCAL(lshrsi_15): shll r0 rts shlr8 r0 LOCAL(lshrsi_29): shll r0 LOCAL(lshrsi_30): shll2 r0 rts shlr16 r0 ENDFUNC(GLOBAL(lshrsi3)) ENDFUNC(GLOBAL(lshrsi3_r0)) #endif #ifdef L_movmem .text .balign 4 .global GLOBAL(movmem) HIDDEN_FUNC(GLOBAL(movmem)) HIDDEN_ALIAS(movstr,movmem) /* This would be a lot simpler if r6 contained the byte count minus 64, and we wouldn't be called here for a byte count of 64. */ GLOBAL(movmem): sts.l pr,@-r15 shll2 r6 bsr GLOBAL(movmemSI52+2) mov.l @(48,r5),r0 .balign 4 LOCAL(movmem_loop): /* Reached with rts */ mov.l @(60,r5),r0 add #-64,r6 mov.l r0,@(60,r4) tst r6,r6 mov.l @(56,r5),r0 bt LOCAL(movmem_done) mov.l r0,@(56,r4) cmp/pl r6 mov.l @(52,r5),r0 add #64,r5 mov.l r0,@(52,r4) add #64,r4 bt GLOBAL(movmemSI52) ! done all the large groups, do the remainder ! jump to movmem+ mova GLOBAL(movmemSI4)+4,r0 add r6,r0 jmp @r0 LOCAL(movmem_done): ! share slot insn, works out aligned. lds.l @r15+,pr mov.l r0,@(56,r4) mov.l @(52,r5),r0 rts mov.l r0,@(52,r4) .balign 4 ! ??? We need aliases movstr* for movmem* for the older libraries. These ! aliases will be removed at the some point in the future. .global GLOBAL(movmemSI64) HIDDEN_FUNC(GLOBAL(movmemSI64)) HIDDEN_ALIAS(movstrSI64,movmemSI64) GLOBAL(movmemSI64): mov.l @(60,r5),r0 mov.l r0,@(60,r4) .global GLOBAL(movmemSI60) HIDDEN_FUNC(GLOBAL(movmemSI60)) HIDDEN_ALIAS(movstrSI60,movmemSI60) GLOBAL(movmemSI60): mov.l @(56,r5),r0 mov.l r0,@(56,r4) .global GLOBAL(movmemSI56) HIDDEN_FUNC(GLOBAL(movmemSI56)) HIDDEN_ALIAS(movstrSI56,movmemSI56) GLOBAL(movmemSI56): mov.l @(52,r5),r0 mov.l r0,@(52,r4) .global GLOBAL(movmemSI52) HIDDEN_FUNC(GLOBAL(movmemSI52)) HIDDEN_ALIAS(movstrSI52,movmemSI52) GLOBAL(movmemSI52): mov.l @(48,r5),r0 mov.l r0,@(48,r4) .global GLOBAL(movmemSI48) HIDDEN_FUNC(GLOBAL(movmemSI48)) HIDDEN_ALIAS(movstrSI48,movmemSI48) GLOBAL(movmemSI48): mov.l @(44,r5),r0 mov.l r0,@(44,r4) .global GLOBAL(movmemSI44) HIDDEN_FUNC(GLOBAL(movmemSI44)) HIDDEN_ALIAS(movstrSI44,movmemSI44) GLOBAL(movmemSI44): mov.l @(40,r5),r0 mov.l r0,@(40,r4) .global GLOBAL(movmemSI40) HIDDEN_FUNC(GLOBAL(movmemSI40)) HIDDEN_ALIAS(movstrSI40,movmemSI40) GLOBAL(movmemSI40): mov.l @(36,r5),r0 mov.l r0,@(36,r4) .global GLOBAL(movmemSI36) HIDDEN_FUNC(GLOBAL(movmemSI36)) HIDDEN_ALIAS(movstrSI36,movmemSI36) GLOBAL(movmemSI36): mov.l @(32,r5),r0 mov.l r0,@(32,r4) .global GLOBAL(movmemSI32) HIDDEN_FUNC(GLOBAL(movmemSI32)) HIDDEN_ALIAS(movstrSI32,movmemSI32) GLOBAL(movmemSI32): mov.l @(28,r5),r0 mov.l r0,@(28,r4) .global GLOBAL(movmemSI28) HIDDEN_FUNC(GLOBAL(movmemSI28)) HIDDEN_ALIAS(movstrSI28,movmemSI28) GLOBAL(movmemSI28): mov.l @(24,r5),r0 mov.l r0,@(24,r4) .global GLOBAL(movmemSI24) HIDDEN_FUNC(GLOBAL(movmemSI24)) HIDDEN_ALIAS(movstrSI24,movmemSI24) GLOBAL(movmemSI24): mov.l @(20,r5),r0 mov.l r0,@(20,r4) .global GLOBAL(movmemSI20) HIDDEN_FUNC(GLOBAL(movmemSI20)) HIDDEN_ALIAS(movstrSI20,movmemSI20) GLOBAL(movmemSI20): mov.l @(16,r5),r0 mov.l r0,@(16,r4) .global GLOBAL(movmemSI16) HIDDEN_FUNC(GLOBAL(movmemSI16)) HIDDEN_ALIAS(movstrSI16,movmemSI16) GLOBAL(movmemSI16): mov.l @(12,r5),r0 mov.l r0,@(12,r4) .global GLOBAL(movmemSI12) HIDDEN_FUNC(GLOBAL(movmemSI12)) HIDDEN_ALIAS(movstrSI12,movmemSI12) GLOBAL(movmemSI12): mov.l @(8,r5),r0 mov.l r0,@(8,r4) .global GLOBAL(movmemSI8) HIDDEN_FUNC(GLOBAL(movmemSI8)) HIDDEN_ALIAS(movstrSI8,movmemSI8) GLOBAL(movmemSI8): mov.l @(4,r5),r0 mov.l r0,@(4,r4) .global GLOBAL(movmemSI4) HIDDEN_FUNC(GLOBAL(movmemSI4)) HIDDEN_ALIAS(movstrSI4,movmemSI4) GLOBAL(movmemSI4): mov.l @(0,r5),r0 rts mov.l r0,@(0,r4) ENDFUNC(GLOBAL(movmemSI64)) ENDFUNC(GLOBAL(movmemSI60)) ENDFUNC(GLOBAL(movmemSI56)) ENDFUNC(GLOBAL(movmemSI52)) ENDFUNC(GLOBAL(movmemSI48)) ENDFUNC(GLOBAL(movmemSI44)) ENDFUNC(GLOBAL(movmemSI40)) ENDFUNC(GLOBAL(movmemSI36)) ENDFUNC(GLOBAL(movmemSI32)) ENDFUNC(GLOBAL(movmemSI28)) ENDFUNC(GLOBAL(movmemSI24)) ENDFUNC(GLOBAL(movmemSI20)) ENDFUNC(GLOBAL(movmemSI16)) ENDFUNC(GLOBAL(movmemSI12)) ENDFUNC(GLOBAL(movmemSI8)) ENDFUNC(GLOBAL(movmemSI4)) ENDFUNC(GLOBAL(movmem)) #endif #ifdef L_movmem_i4 .text .global GLOBAL(movmem_i4_even) .global GLOBAL(movmem_i4_odd) .global GLOBAL(movmemSI12_i4) HIDDEN_FUNC(GLOBAL(movmem_i4_even)) HIDDEN_FUNC(GLOBAL(movmem_i4_odd)) HIDDEN_FUNC(GLOBAL(movmemSI12_i4)) HIDDEN_ALIAS(movstr_i4_even,movmem_i4_even) HIDDEN_ALIAS(movstr_i4_odd,movmem_i4_odd) HIDDEN_ALIAS(movstrSI12_i4,movmemSI12_i4) .p2align 5 L_movmem_2mod4_end: mov.l r0,@(16,r4) rts mov.l r1,@(20,r4) .p2align 2 GLOBAL(movmem_i4_even): mov.l @r5+,r0 bra L_movmem_start_even mov.l @r5+,r1 GLOBAL(movmem_i4_odd): mov.l @r5+,r1 add #-4,r4 mov.l @r5+,r2 mov.l @r5+,r3 mov.l r1,@(4,r4) mov.l r2,@(8,r4) L_movmem_loop: mov.l r3,@(12,r4) dt r6 mov.l @r5+,r0 bt/s L_movmem_2mod4_end mov.l @r5+,r1 add #16,r4 L_movmem_start_even: mov.l @r5+,r2 mov.l @r5+,r3 mov.l r0,@r4 dt r6 mov.l r1,@(4,r4) bf/s L_movmem_loop mov.l r2,@(8,r4) rts mov.l r3,@(12,r4) ENDFUNC(GLOBAL(movmem_i4_even)) ENDFUNC(GLOBAL(movmem_i4_odd)) .p2align 4 GLOBAL(movmemSI12_i4): mov.l @r5,r0 mov.l @(4,r5),r1 mov.l @(8,r5),r2 mov.l r0,@r4 mov.l r1,@(4,r4) rts mov.l r2,@(8,r4) ENDFUNC(GLOBAL(movmemSI12_i4)) #endif #ifdef L_mulsi3 .global GLOBAL(mulsi3) HIDDEN_FUNC(GLOBAL(mulsi3)) ! r4 = aabb ! r5 = ccdd ! r0 = aabb*ccdd via partial products ! ! if aa == 0 and cc = 0 ! r0 = bb*dd ! ! else ! aa = bb*dd + (aa*dd*65536) + (cc*bb*65536) ! GLOBAL(mulsi3): mulu.w r4,r5 ! multiply the lsws macl=bb*dd mov r5,r3 ! r3 = ccdd swap.w r4,r2 ! r2 = bbaa xtrct r2,r3 ! r3 = aacc tst r3,r3 ! msws zero ? bf hiset rts ! yes - then we have the answer sts macl,r0 hiset: sts macl,r0 ! r0 = bb*dd mulu.w r2,r5 ! brewing macl = aa*dd sts macl,r1 mulu.w r3,r4 ! brewing macl = cc*bb sts macl,r2 add r1,r2 shll16 r2 rts add r2,r0 ENDFUNC(GLOBAL(mulsi3)) #endif /*------------------------------------------------------------------------------ 32 bit signed integer division that uses FPU double precision division. */ #ifdef L_sdivsi3_i4 .title "SH DIVIDE" #if defined (__SH4__) || defined (__SH2A__) /* This variant is used when FPSCR.PR = 1 (double precision) is the default setting. Args in r4 and r5, result in fpul, clobber dr0, dr2. */ .global GLOBAL(sdivsi3_i4) HIDDEN_FUNC(GLOBAL(sdivsi3_i4)) GLOBAL(sdivsi3_i4): lds r4,fpul float fpul,dr0 lds r5,fpul float fpul,dr2 fdiv dr2,dr0 rts ftrc dr0,fpul ENDFUNC(GLOBAL(sdivsi3_i4)) #elif defined (__SH2A_SINGLE__) || defined (__SH2A_SINGLE_ONLY__) || defined(__SH4_SINGLE__) || defined(__SH4_SINGLE_ONLY__) /* This variant is used when FPSCR.PR = 0 (sigle precision) is the default setting. Args in r4 and r5, result in fpul, clobber r2, dr0, dr2. For this to work, we must temporarily switch the FPU do double precision, but we better do not touch FPSCR.FR. See PR 6526. */ .global GLOBAL(sdivsi3_i4) HIDDEN_FUNC(GLOBAL(sdivsi3_i4)) GLOBAL(sdivsi3_i4): #ifndef __SH4A__ mov.l r3,@-r15 sts fpscr,r2 mov #8,r3 swap.w r3,r3 // r3 = 1 << 19 (FPSCR.PR bit) or r2,r3 lds r3,fpscr // Set FPSCR.PR = 1. lds r4,fpul float fpul,dr0 lds r5,fpul float fpul,dr2 fdiv dr2,dr0 ftrc dr0,fpul lds r2,fpscr rts mov.l @r15+,r3 #else /* On SH4A we can use the fpchg instruction to flip the FPSCR.PR bit. */ fpchg lds r4,fpul float fpul,dr0 lds r5,fpul float fpul,dr2 fdiv dr2,dr0 ftrc dr0,fpul rts fpchg #endif /* __SH4A__ */ ENDFUNC(GLOBAL(sdivsi3_i4)) #endif /* ! __SH4__ || __SH2A__ */ #endif /* L_sdivsi3_i4 */ //------------------------------------------------------------------------------ #ifdef L_sdivsi3 /* __SH4_SINGLE_ONLY__ keeps this part for link compatibility with sh2e/sh3e code. */ !! !! Steve Chamberlain !! sac@cygnus.com !! !! !! args in r4 and r5, result in r0 clobber r1, r2, r3, and t bit .global GLOBAL(sdivsi3) .align 2 FUNC(GLOBAL(sdivsi3)) GLOBAL(sdivsi3): mov r4,r1 mov r5,r0 tst r0,r0 bt div0 mov #0,r2 div0s r2,r1 subc r3,r3 subc r2,r1 div0s r0,r3 rotcl r1 div1 r0,r3 rotcl r1 div1 r0,r3 rotcl r1 div1 r0,r3 rotcl r1 div1 r0,r3 rotcl r1 div1 r0,r3 rotcl r1 div1 r0,r3 rotcl r1 div1 r0,r3 rotcl r1 div1 r0,r3 rotcl r1 div1 r0,r3 rotcl r1 div1 r0,r3 rotcl r1 div1 r0,r3 rotcl r1 div1 r0,r3 rotcl r1 div1 r0,r3 rotcl r1 div1 r0,r3 rotcl r1 div1 r0,r3 rotcl r1 div1 r0,r3 rotcl r1 div1 r0,r3 rotcl r1 div1 r0,r3 rotcl r1 div1 r0,r3 rotcl r1 div1 r0,r3 rotcl r1 div1 r0,r3 rotcl r1 div1 r0,r3 rotcl r1 div1 r0,r3 rotcl r1 div1 r0,r3 rotcl r1 div1 r0,r3 rotcl r1 div1 r0,r3 rotcl r1 div1 r0,r3 rotcl r1 div1 r0,r3 rotcl r1 div1 r0,r3 rotcl r1 div1 r0,r3 rotcl r1 div1 r0,r3 rotcl r1 div1 r0,r3 rotcl r1 addc r2,r1 rts mov r1,r0 div0: rts mov #0,r0 ENDFUNC(GLOBAL(sdivsi3)) #endif /* L_sdivsi3 */ /*------------------------------------------------------------------------------ 32 bit unsigned integer division that uses FPU double precision division. */ #ifdef L_udivsi3_i4 .title "SH DIVIDE" #if defined (__SH4__) || defined (__SH2A__) /* This variant is used when FPSCR.PR = 1 (double precision) is the default setting. Args in r4 and r5, result in fpul, clobber r0, r1, r4, r5, dr0, dr2, dr4, and t bit */ .global GLOBAL(udivsi3_i4) HIDDEN_FUNC(GLOBAL(udivsi3_i4)) GLOBAL(udivsi3_i4): mov #1,r1 cmp/hi r1,r5 bf/s trivial rotr r1 xor r1,r4 lds r4,fpul mova L1,r0 #ifdef FMOVD_WORKS fmov.d @r0+,dr4 #else fmov.s @r0+,DR40 fmov.s @r0,DR41 #endif float fpul,dr0 xor r1,r5 lds r5,fpul float fpul,dr2 fadd dr4,dr0 fadd dr4,dr2 fdiv dr2,dr0 rts ftrc dr0,fpul trivial: rts lds r4,fpul .align 2 #ifdef FMOVD_WORKS .align 3 // Make the double below 8 byte aligned. #endif L1: .double 2147483648 ENDFUNC(GLOBAL(udivsi3_i4)) #elif defined (__SH2A_SINGLE__) || defined (__SH2A_SINGLE_ONLY__) || defined(__SH4_SINGLE__) || defined(__SH4_SINGLE_ONLY__) /* This variant is used when FPSCR.PR = 0 (sigle precision) is the default setting. Args in r4 and r5, result in fpul, clobber r0, r1, r4, r5, dr0, dr2, dr4. For this to work, we must temporarily switch the FPU do double precision, but we better do not touch FPSCR.FR. See PR 6526. */ .global GLOBAL(udivsi3_i4) HIDDEN_FUNC(GLOBAL(udivsi3_i4)) GLOBAL(udivsi3_i4): #ifndef __SH4A__ mov #1,r1 cmp/hi r1,r5 bf/s trivial rotr r1 // r1 = 1 << 31 sts.l fpscr,@-r15 xor r1,r4 mov.l @(0,r15),r0 xor r1,r5 mov.l L2,r1 lds r4,fpul or r0,r1 mova L1,r0 lds r1,fpscr #ifdef FMOVD_WORKS fmov.d @r0+,dr4 #else fmov.s @r0+,DR40 fmov.s @r0,DR41 #endif float fpul,dr0 lds r5,fpul float fpul,dr2 fadd dr4,dr0 fadd dr4,dr2 fdiv dr2,dr0 ftrc dr0,fpul rts lds.l @r15+,fpscr #ifdef FMOVD_WORKS .align 3 // Make the double below 8 byte aligned. #endif trivial: rts lds r4,fpul .align 2 L2: #ifdef FMOVD_WORKS .long 0x180000 // FPSCR.PR = 1, FPSCR.SZ = 1 #else .long 0x80000 // FPSCR.PR = 1 #endif L1: .double 2147483648 #else /* On SH4A we can use the fpchg instruction to flip the FPSCR.PR bit. Although on SH4A fmovd usually works, it would require either additional two fschg instructions or an FPSCR push + pop. It's not worth the effort for loading only one double constant. */ mov #1,r1 cmp/hi r1,r5 bf/s trivial rotr r1 // r1 = 1 << 31 fpchg mova L1,r0 xor r1,r4 fmov.s @r0+,DR40 lds r4,fpul fmov.s @r0,DR41 xor r1,r5 float fpul,dr0 lds r5,fpul float fpul,dr2 fadd dr4,dr0 fadd dr4,dr2 fdiv dr2,dr0 ftrc dr0,fpul rts fpchg trivial: rts lds r4,fpul .align 2 L1: .double 2147483648 #endif /* __SH4A__ */ ENDFUNC(GLOBAL(udivsi3_i4)) #endif /* ! __SH4__ */ #endif /* L_udivsi3_i4 */ #ifdef L_udivsi3 /* __SH4_SINGLE_ONLY__ keeps this part for link compatibility with sh2e/sh3e code. */ !! args in r4 and r5, result in r0, clobbers r4, pr, and t bit .global GLOBAL(udivsi3) HIDDEN_FUNC(GLOBAL(udivsi3)) LOCAL(div8): div1 r5,r4 LOCAL(div7): div1 r5,r4; div1 r5,r4; div1 r5,r4 div1 r5,r4; div1 r5,r4; div1 r5,r4; rts; div1 r5,r4 LOCAL(divx4): div1 r5,r4; rotcl r0 div1 r5,r4; rotcl r0 div1 r5,r4; rotcl r0 rts; div1 r5,r4 GLOBAL(udivsi3): sts.l pr,@-r15 extu.w r5,r0 cmp/eq r5,r0 #ifdef __sh1__ bf LOCAL(large_divisor) #else bf/s LOCAL(large_divisor) #endif div0u swap.w r4,r0 shlr16 r4 bsr LOCAL(div8) shll16 r5 bsr LOCAL(div7) div1 r5,r4 xtrct r4,r0 xtrct r0,r4 bsr LOCAL(div8) swap.w r4,r4 bsr LOCAL(div7) div1 r5,r4 lds.l @r15+,pr xtrct r4,r0 swap.w r0,r0 rotcl r0 rts shlr16 r5 LOCAL(large_divisor): #ifdef __sh1__ div0u #endif mov #0,r0 xtrct r4,r0 xtrct r0,r4 bsr LOCAL(divx4) rotcl r0 bsr LOCAL(divx4) rotcl r0 bsr LOCAL(divx4) rotcl r0 bsr LOCAL(divx4) rotcl r0 lds.l @r15+,pr rts rotcl r0 ENDFUNC(GLOBAL(udivsi3)) #endif /* L_udivsi3 */ #ifdef L_set_fpscr #if !defined (__SH2A_NOFPU__) #if defined (__SH2E__) || defined (__SH2A__) || defined (__SH3E__) || defined(__SH4_SINGLE__) || defined(__SH4__) || defined(__SH4_SINGLE_ONLY__) .global GLOBAL(set_fpscr) HIDDEN_FUNC(GLOBAL(set_fpscr)) GLOBAL(set_fpscr): lds r4,fpscr #ifdef __PIC__ mov.l r12,@-r15 #ifdef __vxworks mov.l LOCAL(set_fpscr_L0_base),r12 mov.l LOCAL(set_fpscr_L0_index),r0 mov.l @r12,r12 mov.l @(r0,r12),r12 #else mova LOCAL(set_fpscr_L0),r0 mov.l LOCAL(set_fpscr_L0),r12 add r0,r12 #endif mov.l LOCAL(set_fpscr_L1),r0 mov.l @(r0,r12),r1 mov.l @r15+,r12 #else mov.l LOCAL(set_fpscr_L1),r1 #endif swap.w r4,r0 or #24,r0 #ifndef FMOVD_WORKS xor #16,r0 #endif #if defined(__SH4__) || defined (__SH2A_DOUBLE__) swap.w r0,r3 mov.l r3,@(4,r1) #else /* defined (__SH2E__) || defined(__SH3E__) || defined(__SH4_SINGLE*__) */ swap.w r0,r2 mov.l r2,@r1 #endif #ifndef FMOVD_WORKS xor #8,r0 #else xor #24,r0 #endif #if defined(__SH4__) || defined (__SH2A_DOUBLE__) swap.w r0,r2 rts mov.l r2,@r1 #else /* defined(__SH2E__) || defined(__SH3E__) || defined(__SH4_SINGLE*__) */ swap.w r0,r3 rts mov.l r3,@(4,r1) #endif .align 2 #ifdef __PIC__ #ifdef __vxworks LOCAL(set_fpscr_L0_base): .long ___GOTT_BASE__ LOCAL(set_fpscr_L0_index): .long ___GOTT_INDEX__ #else LOCAL(set_fpscr_L0): .long _GLOBAL_OFFSET_TABLE_ #endif LOCAL(set_fpscr_L1): .long GLOBAL(fpscr_values@GOT) #else LOCAL(set_fpscr_L1): .long GLOBAL(fpscr_values) #endif ENDFUNC(GLOBAL(set_fpscr)) #ifndef NO_FPSCR_VALUES #ifdef __ELF__ .comm GLOBAL(fpscr_values),8,4 #else .comm GLOBAL(fpscr_values),8 #endif /* ELF */ #endif /* NO_FPSCR_VALUES */ #endif /* SH2E / SH3E / SH4 */ #endif /* __SH2A_NOFPU__ */ #endif /* L_set_fpscr */ #ifdef L_ic_invalidate #if defined(__SH4A__) .global GLOBAL(ic_invalidate) HIDDEN_FUNC(GLOBAL(ic_invalidate)) GLOBAL(ic_invalidate): ocbwb @r4 synco icbi @r4 rts nop ENDFUNC(GLOBAL(ic_invalidate)) #elif defined(__SH4_SINGLE__) || defined(__SH4__) || defined(__SH4_SINGLE_ONLY__) || defined(__SH4_NOFPU__) /* For system code, we use ic_invalidate_line_i, but user code needs a different mechanism. A kernel call is generally not available, and it would also be slow. Different SH4 variants use different sizes and associativities of the Icache. We use a small bit of dispatch code that can be put hidden in every shared object, which calls the actual processor-specific invalidation code in a separate module. Or if you have operating system support, the OS could mmap the procesor-specific code from a single page, since it is highly repetitive. */ .global GLOBAL(ic_invalidate) HIDDEN_FUNC(GLOBAL(ic_invalidate)) GLOBAL(ic_invalidate): #ifdef __pic__ #ifdef __vxworks mov.l 1f,r1 mov.l 2f,r0 mov.l @r1,r1 mov.l 0f,r2 mov.l @(r0,r1),r0 #else mov.l 1f,r1 mova 1f,r0 mov.l 0f,r2 add r1,r0 #endif mov.l @(r0,r2),r1 #else mov.l 0f,r1 #endif ocbwb @r4 mov.l @(8,r1),r0 sub r1,r4 and r4,r0 add r1,r0 jmp @r0 mov.l @(4,r1),r0 .align 2 #ifndef __pic__ 0: .long GLOBAL(ic_invalidate_array) #else /* __pic__ */ .global GLOBAL(ic_invalidate_array) 0: .long GLOBAL(ic_invalidate_array)@GOT #ifdef __vxworks 1: .long ___GOTT_BASE__ 2: .long ___GOTT_INDEX__ #else 1: .long _GLOBAL_OFFSET_TABLE_ #endif ENDFUNC(GLOBAL(ic_invalidate)) #endif /* __pic__ */ #endif /* SH4 */ #endif /* L_ic_invalidate */ #ifdef L_ic_invalidate_array #if defined(__SH4A__) || (defined (__FORCE_SH4A__) && (defined(__SH4_SINGLE__) || defined(__SH4__) || defined(__SH4_SINGLE_ONLY__) || defined(__SH4_NOFPU__))) .global GLOBAL(ic_invalidate_array) /* This is needed when an SH4 dso with trampolines is used on SH4A. */ .global GLOBAL(ic_invalidate_array) FUNC(GLOBAL(ic_invalidate_array)) GLOBAL(ic_invalidate_array): add r1,r4 synco icbi @r4 rts nop .align 2 .long 0 ENDFUNC(GLOBAL(ic_invalidate_array)) #elif defined(__SH4_SINGLE__) || defined(__SH4__) || defined(__SH4_SINGLE_ONLY__) || defined(__SH4_NOFPU__) .global GLOBAL(ic_invalidate_array) .p2align 5 FUNC(GLOBAL(ic_invalidate_array)) /* This must be aligned to the beginning of a cache line. */ GLOBAL(ic_invalidate_array): #ifndef WAYS #define WAYS 4 #define WAY_SIZE 0x4000 #endif #if WAYS == 1 .rept WAY_SIZE * WAYS / 32 rts nop .rept 7 .long WAY_SIZE - 32 .endr .endr #elif WAYS <= 6 .rept WAY_SIZE * WAYS / 32 braf r0 add #-8,r0 .long WAY_SIZE + 8 .long WAY_SIZE - 32 .rept WAYS-2 braf r0 nop .endr .rept 7 - WAYS rts nop .endr .endr #else /* WAYS > 6 */ /* This variant needs two different pages for mmap-ing. */ .rept WAYS-1 .rept WAY_SIZE / 32 braf r0 nop .long WAY_SIZE .rept 6 .long WAY_SIZE - 32 .endr .endr .endr .rept WAY_SIZE / 32 rts .rept 15 nop .endr .endr #endif /* WAYS */ ENDFUNC(GLOBAL(ic_invalidate_array)) #endif /* SH4 */ #endif /* L_ic_invalidate_array */ #ifdef L_div_table #if defined (__SH2A__) || defined (__SH3__) || defined (__SH3E__) || defined (__SH4__) || defined (__SH4_SINGLE__) || defined (__SH4_SINGLE_ONLY__) || defined (__SH4_NOFPU__) /* This code uses shld, thus is not suitable for SH1 / SH2. */ /* Signed / unsigned division without use of FPU, optimized for SH4. Uses a lookup table for divisors in the range -128 .. +128, and div1 with case distinction for larger divisors in three more ranges. The code is lumped together with the table to allow the use of mova. */ #ifdef __LITTLE_ENDIAN__ #define L_LSB 0 #define L_LSWMSB 1 #define L_MSWLSB 2 #else #define L_LSB 3 #define L_LSWMSB 2 #define L_MSWLSB 1 #endif .balign 4 .global GLOBAL(udivsi3_i4i) FUNC(GLOBAL(udivsi3_i4i)) GLOBAL(udivsi3_i4i): mov.w LOCAL(c128_w), r1 div0u mov r4,r0 shlr8 r0 cmp/hi r1,r5 extu.w r5,r1 bf LOCAL(udiv_le128) cmp/eq r5,r1 bf LOCAL(udiv_ge64k) shlr r0 mov r5,r1 shll16 r5 mov.l r4,@-r15 div1 r5,r0 mov.l r1,@-r15 div1 r5,r0 div1 r5,r0 bra LOCAL(udiv_25) div1 r5,r0 LOCAL(div_le128): mova LOCAL(div_table_ix),r0 bra LOCAL(div_le128_2) mov.b @(r0,r5),r1 LOCAL(udiv_le128): mov.l r4,@-r15 mova LOCAL(div_table_ix),r0 mov.b @(r0,r5),r1 mov.l r5,@-r15 LOCAL(div_le128_2): mova LOCAL(div_table_inv),r0 mov.l @(r0,r1),r1 mov r5,r0 tst #0xfe,r0 mova LOCAL(div_table_clz),r0 dmulu.l r1,r4 mov.b @(r0,r5),r1 bt/s LOCAL(div_by_1) mov r4,r0 mov.l @r15+,r5 sts mach,r0 /* clrt */ addc r4,r0 mov.l @r15+,r4 rotcr r0 rts shld r1,r0 LOCAL(div_by_1_neg): neg r4,r0 LOCAL(div_by_1): mov.l @r15+,r5 rts mov.l @r15+,r4 LOCAL(div_ge64k): bt/s LOCAL(div_r8) div0u shll8 r5 bra LOCAL(div_ge64k_2) div1 r5,r0 LOCAL(udiv_ge64k): cmp/hi r0,r5 mov r5,r1 bt LOCAL(udiv_r8) shll8 r5 mov.l r4,@-r15 div1 r5,r0 mov.l r1,@-r15 LOCAL(div_ge64k_2): div1 r5,r0 mov.l LOCAL(zero_l),r1 .rept 4 div1 r5,r0 .endr mov.l r1,@-r15 div1 r5,r0 mov.w LOCAL(m256_w),r1 div1 r5,r0 mov.b r0,@(L_LSWMSB,r15) xor r4,r0 and r1,r0 bra LOCAL(div_ge64k_end) xor r4,r0 LOCAL(div_r8): shll16 r4 bra LOCAL(div_r8_2) shll8 r4 LOCAL(udiv_r8): mov.l r4,@-r15 shll16 r4 clrt shll8 r4 mov.l r5,@-r15 LOCAL(div_r8_2): rotcl r4 mov r0,r1 div1 r5,r1 mov r4,r0 rotcl r0 mov r5,r4 div1 r5,r1 .rept 5 rotcl r0; div1 r5,r1 .endr rotcl r0 mov.l @r15+,r5 div1 r4,r1 mov.l @r15+,r4 rts rotcl r0 ENDFUNC(GLOBAL(udivsi3_i4i)) .global GLOBAL(sdivsi3_i4i) FUNC(GLOBAL(sdivsi3_i4i)) /* This is link-compatible with a GLOBAL(sdivsi3) call, but we effectively clobber only r1. */ GLOBAL(sdivsi3_i4i): mov.l r4,@-r15 cmp/pz r5 mov.w LOCAL(c128_w), r1 bt/s LOCAL(pos_divisor) cmp/pz r4 mov.l r5,@-r15 neg r5,r5 bt/s LOCAL(neg_result) cmp/hi r1,r5 neg r4,r4 LOCAL(pos_result): extu.w r5,r0 bf LOCAL(div_le128) cmp/eq r5,r0 mov r4,r0 shlr8 r0 bf/s LOCAL(div_ge64k) cmp/hi r0,r5 div0u shll16 r5 div1 r5,r0 div1 r5,r0 div1 r5,r0 LOCAL(udiv_25): mov.l LOCAL(zero_l),r1 div1 r5,r0 div1 r5,r0 mov.l r1,@-r15 .rept 3 div1 r5,r0 .endr mov.b r0,@(L_MSWLSB,r15) xtrct r4,r0 swap.w r0,r0 .rept 8 div1 r5,r0 .endr mov.b r0,@(L_LSWMSB,r15) LOCAL(div_ge64k_end): .rept 8 div1 r5,r0 .endr mov.l @r15+,r4 ! zero-extension and swap using LS unit. extu.b r0,r0 mov.l @r15+,r5 or r4,r0 mov.l @r15+,r4 rts rotcl r0 LOCAL(div_le128_neg): tst #0xfe,r0 mova LOCAL(div_table_ix),r0 mov.b @(r0,r5),r1 mova LOCAL(div_table_inv),r0 bt/s LOCAL(div_by_1_neg) mov.l @(r0,r1),r1 mova LOCAL(div_table_clz),r0 dmulu.l r1,r4 mov.b @(r0,r5),r1 mov.l @r15+,r5 sts mach,r0 /* clrt */ addc r4,r0 mov.l @r15+,r4 rotcr r0 shld r1,r0 rts neg r0,r0 LOCAL(pos_divisor): mov.l r5,@-r15 bt/s LOCAL(pos_result) cmp/hi r1,r5 neg r4,r4 LOCAL(neg_result): extu.w r5,r0 bf LOCAL(div_le128_neg) cmp/eq r5,r0 mov r4,r0 shlr8 r0 bf/s LOCAL(div_ge64k_neg) cmp/hi r0,r5 div0u mov.l LOCAL(zero_l),r1 shll16 r5 div1 r5,r0 mov.l r1,@-r15 .rept 7 div1 r5,r0 .endr mov.b r0,@(L_MSWLSB,r15) xtrct r4,r0 swap.w r0,r0 .rept 8 div1 r5,r0 .endr mov.b r0,@(L_LSWMSB,r15) LOCAL(div_ge64k_neg_end): .rept 8 div1 r5,r0 .endr mov.l @r15+,r4 ! zero-extension and swap using LS unit. extu.b r0,r1 mov.l @r15+,r5 or r4,r1 LOCAL(div_r8_neg_end): mov.l @r15+,r4 rotcl r1 rts neg r1,r0 LOCAL(div_ge64k_neg): bt/s LOCAL(div_r8_neg) div0u shll8 r5 mov.l LOCAL(zero_l),r1 .rept 6 div1 r5,r0 .endr mov.l r1,@-r15 div1 r5,r0 mov.w LOCAL(m256_w),r1 div1 r5,r0 mov.b r0,@(L_LSWMSB,r15) xor r4,r0 and r1,r0 bra LOCAL(div_ge64k_neg_end) xor r4,r0 LOCAL(c128_w): .word 128 LOCAL(div_r8_neg): clrt shll16 r4 mov r4,r1 shll8 r1 mov r5,r4 .rept 7 rotcl r1; div1 r5,r0 .endr mov.l @r15+,r5 rotcl r1 bra LOCAL(div_r8_neg_end) div1 r4,r0 LOCAL(m256_w): .word 0xff00 /* This table has been generated by divtab-sh4.c. */ .balign 4 LOCAL(div_table_clz): .byte 0 .byte 1 .byte 0 .byte -1 .byte -1 .byte -2 .byte -2 .byte -2 .byte -2 .byte -3 .byte -3 .byte -3 .byte -3 .byte -3 .byte -3 .byte -3 .byte -3 .byte -4 .byte -4 .byte -4 .byte -4 .byte -4 .byte -4 .byte -4 .byte -4 .byte -4 .byte -4 .byte -4 .byte -4 .byte -4 .byte -4 .byte -4 .byte -4 .byte -5 .byte -5 .byte -5 .byte -5 .byte -5 .byte -5 .byte -5 .byte -5 .byte -5 .byte -5 .byte -5 .byte -5 .byte -5 .byte -5 .byte -5 .byte -5 .byte -5 .byte -5 .byte -5 .byte -5 .byte -5 .byte -5 .byte -5 .byte -5 .byte -5 .byte -5 .byte -5 .byte -5 .byte -5 .byte -5 .byte -5 .byte -5 .byte -6 .byte -6 .byte -6 .byte -6 .byte -6 .byte -6 .byte -6 .byte -6 .byte -6 .byte -6 .byte -6 .byte -6 .byte -6 .byte -6 .byte -6 .byte -6 .byte -6 .byte -6 .byte -6 .byte -6 .byte -6 .byte -6 .byte -6 .byte -6 .byte -6 .byte -6 .byte -6 .byte -6 .byte -6 .byte -6 .byte -6 .byte -6 .byte -6 .byte -6 .byte -6 .byte -6 .byte -6 .byte -6 .byte -6 .byte -6 .byte -6 .byte -6 .byte -6 .byte -6 .byte -6 .byte -6 .byte -6 .byte -6 .byte -6 .byte -6 .byte -6 .byte -6 .byte -6 .byte -6 .byte -6 .byte -6 .byte -6 .byte -6 .byte -6 .byte -6 .byte -6 .byte -6 .byte -6 /* Lookup table translating positive divisor to index into table of normalized inverse. N.B. the '0' entry is also the last entry of the previous table, and causes an unaligned access for division by zero. */ LOCAL(div_table_ix): .byte -6 .byte -128 .byte -128 .byte 0 .byte -128 .byte -64 .byte 0 .byte 64 .byte -128 .byte -96 .byte -64 .byte -32 .byte 0 .byte 32 .byte 64 .byte 96 .byte -128 .byte -112 .byte -96 .byte -80 .byte -64 .byte -48 .byte -32 .byte -16 .byte 0 .byte 16 .byte 32 .byte 48 .byte 64 .byte 80 .byte 96 .byte 112 .byte -128 .byte -120 .byte -112 .byte -104 .byte -96 .byte -88 .byte -80 .byte -72 .byte -64 .byte -56 .byte -48 .byte -40 .byte -32 .byte -24 .byte -16 .byte -8 .byte 0 .byte 8 .byte 16 .byte 24 .byte 32 .byte 40 .byte 48 .byte 56 .byte 64 .byte 72 .byte 80 .byte 88 .byte 96 .byte 104 .byte 112 .byte 120 .byte -128 .byte -124 .byte -120 .byte -116 .byte -112 .byte -108 .byte -104 .byte -100 .byte -96 .byte -92 .byte -88 .byte -84 .byte -80 .byte -76 .byte -72 .byte -68 .byte -64 .byte -60 .byte -56 .byte -52 .byte -48 .byte -44 .byte -40 .byte -36 .byte -32 .byte -28 .byte -24 .byte -20 .byte -16 .byte -12 .byte -8 .byte -4 .byte 0 .byte 4 .byte 8 .byte 12 .byte 16 .byte 20 .byte 24 .byte 28 .byte 32 .byte 36 .byte 40 .byte 44 .byte 48 .byte 52 .byte 56 .byte 60 .byte 64 .byte 68 .byte 72 .byte 76 .byte 80 .byte 84 .byte 88 .byte 92 .byte 96 .byte 100 .byte 104 .byte 108 .byte 112 .byte 116 .byte 120 .byte 124 .byte -128 /* 1/64 .. 1/127, normalized. There is an implicit leading 1 in bit 32. */ .balign 4 LOCAL(zero_l): .long 0x0 .long 0xF81F81F9 .long 0xF07C1F08 .long 0xE9131AC0 .long 0xE1E1E1E2 .long 0xDAE6076C .long 0xD41D41D5 .long 0xCD856891 .long 0xC71C71C8 .long 0xC0E07039 .long 0xBACF914D .long 0xB4E81B4F .long 0xAF286BCB .long 0xA98EF607 .long 0xA41A41A5 .long 0x9EC8E952 .long 0x9999999A .long 0x948B0FCE .long 0x8F9C18FA .long 0x8ACB90F7 .long 0x86186187 .long 0x81818182 .long 0x7D05F418 .long 0x78A4C818 .long 0x745D1746 .long 0x702E05C1 .long 0x6C16C16D .long 0x68168169 .long 0x642C8591 .long 0x60581606 .long 0x5C9882BA .long 0x58ED2309 LOCAL(div_table_inv): .long 0x55555556 .long 0x51D07EAF .long 0x4E5E0A73 .long 0x4AFD6A06 .long 0x47AE147B .long 0x446F8657 .long 0x41414142 .long 0x3E22CBCF .long 0x3B13B13C .long 0x38138139 .long 0x3521CFB3 .long 0x323E34A3 .long 0x2F684BDB .long 0x2C9FB4D9 .long 0x29E4129F .long 0x27350B89 .long 0x24924925 .long 0x21FB7813 .long 0x1F7047DD .long 0x1CF06ADB .long 0x1A7B9612 .long 0x18118119 .long 0x15B1E5F8 .long 0x135C8114 .long 0x11111112 .long 0xECF56BF .long 0xC9714FC .long 0xA6810A7 .long 0x8421085 .long 0x624DD30 .long 0x4104105 .long 0x2040811 /* maximum error: 0.987342 scaled: 0.921875*/ ENDFUNC(GLOBAL(sdivsi3_i4i)) #endif /* SH3 / SH4 */ #endif /* L_div_table */ #ifdef L_udiv_qrnnd_16 HIDDEN_FUNC(GLOBAL(udiv_qrnnd_16)) /* r0: rn r1: qn */ /* r0: n1 r4: n0 r5: d r6: d1 */ /* r2: __m */ /* n1 < d, but n1 might be larger than d1. */ .global GLOBAL(udiv_qrnnd_16) .balign 8 GLOBAL(udiv_qrnnd_16): div0u cmp/hi r6,r0 bt .Lots .rept 16 div1 r6,r0 .endr extu.w r0,r1 bt 0f add r6,r0 0: rotcl r1 mulu.w r1,r5 xtrct r4,r0 swap.w r0,r0 sts macl,r2 cmp/hs r2,r0 sub r2,r0 bt 0f addc r5,r0 add #-1,r1 bt 0f 1: add #-1,r1 rts add r5,r0 .balign 8 .Lots: sub r5,r0 swap.w r4,r1 xtrct r0,r1 clrt mov r1,r0 addc r5,r0 mov #-1,r1 SL1(bf, 1b, shlr16 r1) 0: rts nop ENDFUNC(GLOBAL(udiv_qrnnd_16)) #endif /* L_udiv_qrnnd_16 */
4ms/metamodule-plugin-sdk
6,384
plugin-libc/libgcc/config/sh/lib1funcs-Os-4-200.S
/* Copyright (C) 2006-2022 Free Software Foundation, Inc. This file is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 3, or (at your option) any later version. This file is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. Under Section 7 of GPL version 3, you are granted additional permissions described in the GCC Runtime Library Exception, version 3.1, as published by the Free Software Foundation. You should have received a copy of the GNU General Public License and a copy of the GCC Runtime Library Exception along with this program; see the files COPYING3 and COPYING.RUNTIME respectively. If not, see <http://www.gnu.org/licenses/>. */ /* Moderately Space-optimized libgcc routines for the Renesas SH / STMicroelectronics ST40 CPUs. Contributed by J"orn Rennecke joern.rennecke@st.com. */ #include "lib1funcs.h" #ifdef L_udivsi3_i4i /* 88 bytes; sh4-200 cycle counts: divisor >= 2G: 11 cycles dividend < 2G: 48 cycles dividend >= 2G: divisor != 1: 54 cycles dividend >= 2G, divisor == 1: 22 cycles */ #if defined (__SH_FPU_DOUBLE__) || defined (__SH4_SINGLE_ONLY__) !! args in r4 and r5, result in r0, clobber r1 .global GLOBAL(udivsi3_i4i) FUNC(GLOBAL(udivsi3_i4i)) GLOBAL(udivsi3_i4i): mova L1,r0 cmp/pz r5 sts fpscr,r1 lds.l @r0+,fpscr sts.l fpul,@-r15 bf LOCAL(huge_divisor) mov.l r1,@-r15 lds r4,fpul cmp/pz r4 #ifdef FMOVD_WORKS fmov.d dr0,@-r15 float fpul,dr0 fmov.d dr2,@-r15 bt LOCAL(dividend_adjusted) mov #1,r1 fmov.d @r0,dr2 cmp/eq r1,r5 bt LOCAL(div_by_1) fadd dr2,dr0 LOCAL(dividend_adjusted): lds r5,fpul float fpul,dr2 fdiv dr2,dr0 LOCAL(div_by_1): fmov.d @r15+,dr2 ftrc dr0,fpul fmov.d @r15+,dr0 #else /* !FMOVD_WORKS */ fmov.s DR01,@-r15 mov #1,r1 fmov.s DR00,@-r15 float fpul,dr0 fmov.s DR21,@-r15 bt/s LOCAL(dividend_adjusted) fmov.s DR20,@-r15 cmp/eq r1,r5 bt LOCAL(div_by_1) fmov.s @r0+,DR20 fmov.s @r0,DR21 fadd dr2,dr0 LOCAL(dividend_adjusted): lds r5,fpul float fpul,dr2 fdiv dr2,dr0 LOCAL(div_by_1): fmov.s @r15+,DR20 fmov.s @r15+,DR21 ftrc dr0,fpul fmov.s @r15+,DR00 fmov.s @r15+,DR01 #endif /* !FMOVD_WORKS */ lds.l @r15+,fpscr sts fpul,r0 rts lds.l @r15+,fpul #ifdef FMOVD_WORKS .p2align 3 ! make double below 8 byte aligned. #endif LOCAL(huge_divisor): lds r1,fpscr add #4,r15 cmp/hs r5,r4 rts movt r0 .p2align 2 L1: #ifndef FMOVD_WORKS .long 0x80000 #else .long 0x180000 #endif .double 4294967296 ENDFUNC(GLOBAL(udivsi3_i4i)) #elif !defined (__sh1__) /* !__SH_FPU_DOUBLE__ */ #if 0 /* With 36 bytes, the following would probably be the most compact implementation, but with 139 cycles on an sh4-200, it is extremely slow. */ GLOBAL(udivsi3_i4i): mov.l r2,@-r15 mov #0,r1 div0u mov r1,r2 mov.l r3,@-r15 mov r1,r3 sett mov r4,r0 LOCAL(loop): rotcr r2 ; bt/s LOCAL(end) cmp/gt r2,r3 rotcl r0 bra LOCAL(loop) div1 r5,r1 LOCAL(end): rotcl r0 mov.l @r15+,r3 rts mov.l @r15+,r2 #endif /* 0 */ /* Size: 186 bytes jointly for udivsi3_i4i and sdivsi3_i4i sh4-200 run times: udiv small divisor: 55 cycles udiv large divisor: 52 cycles sdiv small divisor, positive result: 59 cycles sdiv large divisor, positive result: 56 cycles sdiv small divisor, negative result: 65 cycles (*) sdiv large divisor, negative result: 62 cycles (*) (*): r2 is restored in the rts delay slot and has a lingering latency of two more cycles. */ .balign 4 .global GLOBAL(udivsi3_i4i) FUNC(GLOBAL(udivsi3_i4i)) FUNC(GLOBAL(sdivsi3_i4i)) GLOBAL(udivsi3_i4i): sts pr,r1 mov.l r4,@-r15 extu.w r5,r0 cmp/eq r5,r0 swap.w r4,r0 shlr16 r4 bf/s LOCAL(large_divisor) div0u mov.l r5,@-r15 shll16 r5 LOCAL(sdiv_small_divisor): div1 r5,r4 bsr LOCAL(div6) div1 r5,r4 div1 r5,r4 bsr LOCAL(div6) div1 r5,r4 xtrct r4,r0 xtrct r0,r4 bsr LOCAL(div7) swap.w r4,r4 div1 r5,r4 bsr LOCAL(div7) div1 r5,r4 xtrct r4,r0 mov.l @r15+,r5 swap.w r0,r0 mov.l @r15+,r4 jmp @r1 rotcl r0 LOCAL(div7): div1 r5,r4 LOCAL(div6): div1 r5,r4; div1 r5,r4; div1 r5,r4 div1 r5,r4; div1 r5,r4; rts; div1 r5,r4 LOCAL(divx3): rotcl r0 div1 r5,r4 rotcl r0 div1 r5,r4 rotcl r0 rts div1 r5,r4 LOCAL(large_divisor): mov.l r5,@-r15 LOCAL(sdiv_large_divisor): xor r4,r0 .rept 4 rotcl r0 bsr LOCAL(divx3) div1 r5,r4 .endr mov.l @r15+,r5 mov.l @r15+,r4 jmp @r1 rotcl r0 ENDFUNC(GLOBAL(udivsi3_i4i)) .global GLOBAL(sdivsi3_i4i) GLOBAL(sdivsi3_i4i): mov.l r4,@-r15 cmp/pz r5 mov.l r5,@-r15 bt/s LOCAL(pos_divisor) cmp/pz r4 neg r5,r5 extu.w r5,r0 bt/s LOCAL(neg_result) cmp/eq r5,r0 neg r4,r4 LOCAL(pos_result): swap.w r4,r0 bra LOCAL(sdiv_check_divisor) sts pr,r1 LOCAL(pos_divisor): extu.w r5,r0 bt/s LOCAL(pos_result) cmp/eq r5,r0 neg r4,r4 LOCAL(neg_result): mova LOCAL(negate_result),r0 ; mov r0,r1 swap.w r4,r0 lds r2,macl sts pr,r2 LOCAL(sdiv_check_divisor): shlr16 r4 bf/s LOCAL(sdiv_large_divisor) div0u bra LOCAL(sdiv_small_divisor) shll16 r5 .balign 4 LOCAL(negate_result): neg r0,r0 jmp @r2 sts macl,r2 ENDFUNC(GLOBAL(sdivsi3_i4i)) #endif /* !__SH_FPU_DOUBLE__ */ #endif /* L_udivsi3_i4i */ #ifdef L_sdivsi3_i4i #if defined (__SH_FPU_DOUBLE__) || defined (__SH4_SINGLE_ONLY__) /* 48 bytes, 45 cycles on sh4-200 */ !! args in r4 and r5, result in r0, clobber r1 .global GLOBAL(sdivsi3_i4i) FUNC(GLOBAL(sdivsi3_i4i)) GLOBAL(sdivsi3_i4i): sts.l fpscr,@-r15 sts fpul,r1 mova L1,r0 lds.l @r0+,fpscr lds r4,fpul #ifdef FMOVD_WORKS fmov.d dr0,@-r15 float fpul,dr0 lds r5,fpul fmov.d dr2,@-r15 #else fmov.s DR01,@-r15 fmov.s DR00,@-r15 float fpul,dr0 lds r5,fpul fmov.s DR21,@-r15 fmov.s DR20,@-r15 #endif float fpul,dr2 fdiv dr2,dr0 #ifdef FMOVD_WORKS fmov.d @r15+,dr2 #else fmov.s @r15+,DR20 fmov.s @r15+,DR21 #endif ftrc dr0,fpul #ifdef FMOVD_WORKS fmov.d @r15+,dr0 #else fmov.s @r15+,DR00 fmov.s @r15+,DR01 #endif lds.l @r15+,fpscr sts fpul,r0 rts lds r1,fpul .p2align 2 L1: #ifndef FMOVD_WORKS .long 0x80000 #else .long 0x180000 #endif ENDFUNC(GLOBAL(sdivsi3_i4i)) #endif /* __SH_FPU_DOUBLE__ */ #endif /* L_sdivsi3_i4i */
4ms/metamodule-plugin-sdk
7,018
plugin-libc/libgcc/config/cris/umulsidi3.S
;; Copyright (C) 2001-2022 Free Software Foundation, Inc. ;; ;; This file is part of GCC. ;; ;; GCC is free software; you can redistribute it and/or modify it under ;; the terms of the GNU General Public License as published by the Free ;; Software Foundation; either version 3, or (at your option) any later ;; version. ;; ;; GCC is distributed in the hope that it will be useful, but WITHOUT ANY ;; WARRANTY; without even the implied warranty of MERCHANTABILITY or ;; FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License ;; for more details. ;; ;; Under Section 7 of GPL version 3, you are granted additional ;; permissions described in the GCC Runtime Library Exception, version ;; 3.1, as published by the Free Software Foundation. ;; ;; You should have received a copy of the GNU General Public License and ;; a copy of the GCC Runtime Library Exception along with this program; ;; see the files COPYING3 and COPYING.RUNTIME respectively. If not, see ;; <http://www.gnu.org/licenses/>. ;; ;; This code is derived from mulsi3.S, observing that the mstep*16-based ;; multiplications there, from which it is formed, are actually ;; zero-extending; in gcc-speak "umulhisi3". The difference to *this* ;; function is just a missing top mstep*16 sequence and shifts and 64-bit ;; additions for the high part. Compared to an implementation based on ;; calling __Mul four times (see default implementation of umul_ppmm in ;; longlong.h), this will complete in a time between a fourth and a third ;; of that, assuming the value-based optimizations don't strike. If they ;; all strike there (very often) but none here, we still win, though by a ;; lesser margin, due to lesser total overhead. #define L(x) .x #define CONCAT1(a, b) CONCAT2(a, b) #define CONCAT2(a, b) a ## b #ifdef __USER_LABEL_PREFIX__ # define SYM(x) CONCAT1 (__USER_LABEL_PREFIX__, x) #else # define SYM(x) x #endif .global SYM(__umulsidi3) .type SYM(__umulsidi3),@function SYM(__umulsidi3): #if defined (__CRIS_arch_version) && __CRIS_arch_version >= 10 ;; Can't have the mulu.d last on a cache-line, due to a hardware bug. See ;; the documentation for -mmul-bug-workaround. ;; Not worthwhile to conditionalize here. .p2alignw 2,0x050f mulu.d $r11,$r10 ret move $mof,$r11 #else move.d $r11,$r9 bound.d $r10,$r9 cmpu.w 65535,$r9 bls L(L3) move.d $r10,$r12 move.d $r10,$r13 movu.w $r11,$r9 ; ab*cd = (a*c)<<32 (a*d + b*c)<<16 + b*d ;; We're called for floating point numbers very often with the "low" 16 ;; bits zero, so it's worthwhile to optimize for that. beq L(L6) ; d == 0? lslq 16,$r13 beq L(L7) ; b == 0? clear.w $r10 mstep $r9,$r13 ; d*b mstep $r9,$r13 mstep $r9,$r13 mstep $r9,$r13 mstep $r9,$r13 mstep $r9,$r13 mstep $r9,$r13 mstep $r9,$r13 mstep $r9,$r13 mstep $r9,$r13 mstep $r9,$r13 mstep $r9,$r13 mstep $r9,$r13 mstep $r9,$r13 mstep $r9,$r13 mstep $r9,$r13 L(L7): test.d $r10 mstep $r9,$r10 ; d*a mstep $r9,$r10 mstep $r9,$r10 mstep $r9,$r10 mstep $r9,$r10 mstep $r9,$r10 mstep $r9,$r10 mstep $r9,$r10 mstep $r9,$r10 mstep $r9,$r10 mstep $r9,$r10 mstep $r9,$r10 mstep $r9,$r10 mstep $r9,$r10 mstep $r9,$r10 mstep $r9,$r10 ;; d*a in $r10, d*b in $r13, ab in $r12 and cd in $r11 ;; $r9 = d, need to do b*c and a*c; we can drop d. ;; so $r9 is up for use and we can shift down $r11 as the mstep ;; source for the next mstep-part. L(L8): lsrq 16,$r11 move.d $r12,$r9 lslq 16,$r9 beq L(L9) ; b == 0? mstep $r11,$r9 mstep $r11,$r9 ; b*c mstep $r11,$r9 mstep $r11,$r9 mstep $r11,$r9 mstep $r11,$r9 mstep $r11,$r9 mstep $r11,$r9 mstep $r11,$r9 mstep $r11,$r9 mstep $r11,$r9 mstep $r11,$r9 mstep $r11,$r9 mstep $r11,$r9 mstep $r11,$r9 mstep $r11,$r9 L(L9): ;; d*a in $r10, d*b in $r13, c*b in $r9, ab in $r12 and c in $r11, ;; need to do a*c. We want that to end up in $r11, so we shift up $r11 to ;; now use as the destination operand. We'd need a test insn to update N ;; to do it the other way round. lsrq 16,$r12 lslq 16,$r11 mstep $r12,$r11 mstep $r12,$r11 mstep $r12,$r11 mstep $r12,$r11 mstep $r12,$r11 mstep $r12,$r11 mstep $r12,$r11 mstep $r12,$r11 mstep $r12,$r11 mstep $r12,$r11 mstep $r12,$r11 mstep $r12,$r11 mstep $r12,$r11 mstep $r12,$r11 mstep $r12,$r11 mstep $r12,$r11 ;; d*a in $r10, d*b in $r13, c*b in $r9, a*c in $r11 ($r12 free). ;; Need (a*d + b*c)<<16 + b*d into $r10 and ;; a*c + (a*d + b*c)>>16 plus carry from the additions into $r11. add.d $r9,$r10 ; (a*d + b*c) - may produce a carry. scs $r12 ; The carry corresponds to bit 16 of $r11. lslq 16,$r12 add.d $r12,$r11 ; $r11 = a*c + carry from (a*d + b*c). #if defined (__CRIS_arch_version) && __CRIS_arch_version >= 8 swapw $r10 addu.w $r10,$r11 ; $r11 = a*c + (a*d + b*c) >> 16 including carry. clear.w $r10 ; $r10 = (a*d + b*c) << 16 #else move.d $r10,$r9 lsrq 16,$r9 add.d $r9,$r11 ; $r11 = a*c + (a*d + b*c) >> 16 including carry. lslq 16,$r10 ; $r10 = (a*d + b*c) << 16 #endif add.d $r13,$r10 ; $r10 = (a*d + b*c) << 16 + b*d - may produce a carry. scs $r9 ret add.d $r9,$r11 ; Last carry added to the high-order 32 bits. L(L6): clear.d $r13 ba L(L8) clear.d $r10 L(L11): clear.d $r10 ret clear.d $r11 L(L3): ;; Form the maximum in $r10, by knowing the minimum, $r9. ;; (We don't know which one of $r10 or $r11 it is.) ;; Check if the largest operand is still just 16 bits. xor $r9,$r10 xor $r11,$r10 cmpu.w 65535,$r10 bls L(L5) movu.w $r9,$r13 ;; We have ab*cd = (a*c)<<32 + (a*d + b*c)<<16 + b*d, but c==0 ;; so we only need (a*d)<<16 + b*d with d = $r13, ab = $r10. ;; Remember that the upper part of (a*d)<<16 goes into the lower part ;; of $r11 and there may be a carry from adding the low 32 parts. beq L(L11) ; d == 0? move.d $r10,$r9 lslq 16,$r9 beq L(L10) ; b == 0? clear.w $r10 mstep $r13,$r9 ; b*d mstep $r13,$r9 mstep $r13,$r9 mstep $r13,$r9 mstep $r13,$r9 mstep $r13,$r9 mstep $r13,$r9 mstep $r13,$r9 mstep $r13,$r9 mstep $r13,$r9 mstep $r13,$r9 mstep $r13,$r9 mstep $r13,$r9 mstep $r13,$r9 mstep $r13,$r9 mstep $r13,$r9 L(L10): test.d $r10 mstep $r13,$r10 ; a*d mstep $r13,$r10 mstep $r13,$r10 mstep $r13,$r10 mstep $r13,$r10 mstep $r13,$r10 mstep $r13,$r10 mstep $r13,$r10 mstep $r13,$r10 mstep $r13,$r10 mstep $r13,$r10 mstep $r13,$r10 mstep $r13,$r10 mstep $r13,$r10 mstep $r13,$r10 mstep $r13,$r10 move.d $r10,$r11 lsrq 16,$r11 lslq 16,$r10 add.d $r9,$r10 scs $r12 ret add.d $r12,$r11 L(L5): ;; We have ab*cd = (a*c)<<32 + (a*d + b*c)<<16 + b*d, but a and c==0 ;; so b*d (with min=b=$r13, max=d=$r10) it is. As it won't overflow the ;; 32-bit part, just set $r11 to 0. lslq 16,$r10 clear.d $r11 mstep $r13,$r10 mstep $r13,$r10 mstep $r13,$r10 mstep $r13,$r10 mstep $r13,$r10 mstep $r13,$r10 mstep $r13,$r10 mstep $r13,$r10 mstep $r13,$r10 mstep $r13,$r10 mstep $r13,$r10 mstep $r13,$r10 mstep $r13,$r10 mstep $r13,$r10 mstep $r13,$r10 ret mstep $r13,$r10 #endif L(Lfe1): .size SYM(__umulsidi3),L(Lfe1)-SYM(__umulsidi3)
4ms/metamodule-plugin-sdk
7,172
plugin-libc/libgcc/config/cris/mulsi3.S
;; Copyright (C) 2001-2022 Free Software Foundation, Inc. ;; ;; This file is part of GCC. ;; ;; GCC is free software; you can redistribute it and/or modify it under ;; the terms of the GNU General Public License as published by the Free ;; Software Foundation; either version 3, or (at your option) any later ;; version. ;; ;; GCC is distributed in the hope that it will be useful, but WITHOUT ANY ;; WARRANTY; without even the implied warranty of MERCHANTABILITY or ;; FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License ;; for more details. ;; ;; Under Section 7 of GPL version 3, you are granted additional ;; permissions described in the GCC Runtime Library Exception, version ;; 3.1, as published by the Free Software Foundation. ;; ;; You should have received a copy of the GNU General Public License and ;; a copy of the GCC Runtime Library Exception along with this program; ;; see the files COPYING3 and COPYING.RUNTIME respectively. If not, see ;; <http://www.gnu.org/licenses/>. ;; ;; This code used to be expanded through interesting expansions in ;; the machine description, compiled from this code: ;; ;; #ifdef L_mulsi3 ;; long __Mul (unsigned long a, unsigned long b) __attribute__ ((__const__)); ;; ;; /* This must be compiled with the -mexpand-mul flag, to synthesize the ;; multiplication from the mstep instructions. The check for ;; smaller-size multiplication pays off in the order of .5-10%; ;; estimated median 1%, depending on application. ;; FIXME: It can be further optimized if we go to assembler code, as ;; gcc 2.7.2 adds a few unnecessary instructions and does not put the ;; basic blocks in optimal order. */ ;; long ;; __Mul (unsigned long a, unsigned long b) ;; { ;; #if defined (__CRIS_arch_version) && __CRIS_arch_version >= 10 ;; /* In case other code is compiled without -march=v10, they will ;; contain calls to __Mul, regardless of flags at link-time. The ;; "else"-code below will work, but is unnecessarily slow. This ;; sometimes cuts a few minutes off from simulation time by just ;; returning a "mulu.d". */ ;; return a * b; ;; #else ;; unsigned long min; ;; ;; /* Get minimum via the bound insn. */ ;; min = a < b ? a : b; ;; ;; /* Can we omit computation of the high part? */ ;; if (min > 65535) ;; /* No. Perform full multiplication. */ ;; return a * b; ;; else ;; { ;; /* Check if both operands are within 16 bits. */ ;; unsigned long max; ;; ;; /* Get maximum, by knowing the minimum. ;; This will partition a and b into max and min. ;; This is not currently something GCC understands, ;; so do this trick by asm. */ ;; __asm__ ("xor %1,%0\n\txor %2,%0" ;; : "=r" (max) ;; : "r" (b), "r" (a), "0" (min)); ;; ;; if (max > 65535) ;; /* Make GCC understand that only the low part of "min" will be ;; used. */ ;; return max * (unsigned short) min; ;; else ;; /* Only the low parts of both operands are necessary. */ ;; return ((unsigned short) max) * (unsigned short) min; ;; } ;; #endif /* not __CRIS_arch_version >= 10 */ ;; } ;; #endif /* L_mulsi3 */ ;; ;; That approach was abandoned since the caveats outweighted the ;; benefits. The expand-multiplication machinery is also removed, so you ;; can't do this anymore. ;; ;; For doubters of there being any benefits, some where: insensitivity to: ;; - ABI changes (mostly for experimentation) ;; - assembler syntax differences (mostly debug format). ;; - insn scheduling issues. ;; Most ABI experiments will presumably happen with arches with mul insns, ;; so that argument doesn't really hold anymore, and it's unlikely there ;; being new arch variants needing insn scheduling and not having mul ;; insns. ;; ELF and a.out have different syntax for local labels: the "wrong" ;; one may not be omitted from the object. #undef L #ifdef __AOUT__ # define L(x) x #else # define L(x) .x #endif .global ___Mul .type ___Mul,@function ___Mul: #if defined (__CRIS_arch_version) && __CRIS_arch_version >= 10 ;; Can't have the mulu.d last on a cache-line (in the delay-slot of the ;; "ret"), due to hardware bug. See documentation for -mmul-bug-workaround. ;; Not worthwhile to conditionalize here. .p2alignw 2,0x050f mulu.d $r11,$r10 ret nop #else ;; See if we can avoid multiplying some of the parts, knowing ;; they're zero. move.d $r11,$r9 bound.d $r10,$r9 cmpu.w 65535,$r9 bls L(L3) move.d $r10,$r12 ;; Nope, have to do all the parts of a 32-bit multiplication. ;; See head comment in optabs.c:expand_doubleword_mult. move.d $r10,$r13 movu.w $r11,$r9 ; ab*cd = (a*d + b*c)<<16 + b*d lslq 16,$r13 mstep $r9,$r13 ; d*b mstep $r9,$r13 mstep $r9,$r13 mstep $r9,$r13 mstep $r9,$r13 mstep $r9,$r13 mstep $r9,$r13 mstep $r9,$r13 mstep $r9,$r13 mstep $r9,$r13 mstep $r9,$r13 mstep $r9,$r13 mstep $r9,$r13 mstep $r9,$r13 mstep $r9,$r13 mstep $r9,$r13 clear.w $r10 test.d $r10 mstep $r9,$r10 ; d*a mstep $r9,$r10 mstep $r9,$r10 mstep $r9,$r10 mstep $r9,$r10 mstep $r9,$r10 mstep $r9,$r10 mstep $r9,$r10 mstep $r9,$r10 mstep $r9,$r10 mstep $r9,$r10 mstep $r9,$r10 mstep $r9,$r10 mstep $r9,$r10 mstep $r9,$r10 mstep $r9,$r10 movu.w $r12,$r12 clear.w $r11 move.d $r11,$r9 ; Doubles as a "test.d" preparing for the mstep. mstep $r12,$r9 ; b*c mstep $r12,$r9 mstep $r12,$r9 mstep $r12,$r9 mstep $r12,$r9 mstep $r12,$r9 mstep $r12,$r9 mstep $r12,$r9 mstep $r12,$r9 mstep $r12,$r9 mstep $r12,$r9 mstep $r12,$r9 mstep $r12,$r9 mstep $r12,$r9 mstep $r12,$r9 mstep $r12,$r9 add.w $r9,$r10 lslq 16,$r10 ret add.d $r13,$r10 L(L3): ;; Form the maximum in $r10, by knowing the minimum, $r9. ;; (We don't know which one of $r10 or $r11 it is.) ;; Check if the largest operand is still just 16 bits. xor $r9,$r10 xor $r11,$r10 cmpu.w 65535,$r10 bls L(L5) movu.w $r9,$r13 ;; We have ab*cd = (a*c)<<32 + (a*d + b*c)<<16 + b*d, but c==0 ;; so we only need (a*d)<<16 + b*d with d = $r13, ab = $r10. ;; We drop the upper part of (a*d)<<16 as we're only doing a ;; 32-bit-result multiplication. move.d $r10,$r9 lslq 16,$r9 mstep $r13,$r9 ; b*d mstep $r13,$r9 mstep $r13,$r9 mstep $r13,$r9 mstep $r13,$r9 mstep $r13,$r9 mstep $r13,$r9 mstep $r13,$r9 mstep $r13,$r9 mstep $r13,$r9 mstep $r13,$r9 mstep $r13,$r9 mstep $r13,$r9 mstep $r13,$r9 mstep $r13,$r9 mstep $r13,$r9 clear.w $r10 test.d $r10 mstep $r13,$r10 ; a*d mstep $r13,$r10 mstep $r13,$r10 mstep $r13,$r10 mstep $r13,$r10 mstep $r13,$r10 mstep $r13,$r10 mstep $r13,$r10 mstep $r13,$r10 mstep $r13,$r10 mstep $r13,$r10 mstep $r13,$r10 mstep $r13,$r10 mstep $r13,$r10 mstep $r13,$r10 mstep $r13,$r10 lslq 16,$r10 ret add.d $r9,$r10 L(L5): ;; We have ab*cd = (a*c)<<32 + (a*d + b*c)<<16 + b*d, but a and c==0 ;; so b*d (with b=$r13, a=$r10) it is. lslq 16,$r10 mstep $r13,$r10 mstep $r13,$r10 mstep $r13,$r10 mstep $r13,$r10 mstep $r13,$r10 mstep $r13,$r10 mstep $r13,$r10 mstep $r13,$r10 mstep $r13,$r10 mstep $r13,$r10 mstep $r13,$r10 mstep $r13,$r10 mstep $r13,$r10 mstep $r13,$r10 mstep $r13,$r10 ret mstep $r13,$r10 #endif L(Lfe1): .size ___Mul,L(Lfe1)-___Mul
4ms/metamodule-plugin-sdk
5,835
plugin-libc/libgcc/config/arm/bpabi-v6m.S
/* Miscellaneous BPABI functions. Thumb-1 implementation, suitable for ARMv4T, ARMv6-M and ARMv8-M Baseline like ISA variants. Copyright (C) 2006-2022 Free Software Foundation, Inc. Contributed by CodeSourcery. This file is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 3, or (at your option) any later version. This file is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. Under Section 7 of GPL version 3, you are granted additional permissions described in the GCC Runtime Library Exception, version 3.1, as published by the Free Software Foundation. You should have received a copy of the GNU General Public License and a copy of the GCC Runtime Library Exception along with this program; see the files COPYING3 and COPYING.RUNTIME respectively. If not, see <http://www.gnu.org/licenses/>. */ #ifdef __ARM_EABI__ /* Some attributes that are common to all routines in this file. */ /* Tag_ABI_align_needed: This code does not require 8-byte alignment from the caller. */ /* .eabi_attribute 24, 0 -- default setting. */ /* Tag_ABI_align_preserved: This code preserves 8-byte alignment in any callee. */ .eabi_attribute 25, 1 #endif /* __ARM_EABI__ */ #ifdef L_aeabi_lcmp FUNC_START aeabi_lcmp cmp xxh, yyh beq 1f bgt 2f movs r0, #1 negs r0, r0 RET 2: movs r0, #1 RET 1: subs r0, xxl, yyl beq 1f bhi 2f movs r0, #1 negs r0, r0 RET 2: movs r0, #1 1: RET FUNC_END aeabi_lcmp #endif /* L_aeabi_lcmp */ #ifdef L_aeabi_ulcmp FUNC_START aeabi_ulcmp cmp xxh, yyh bne 1f subs r0, xxl, yyl beq 2f 1: bcs 1f movs r0, #1 negs r0, r0 RET 1: movs r0, #1 2: RET FUNC_END aeabi_ulcmp #endif /* L_aeabi_ulcmp */ .macro test_div_by_zero signed cmp yyh, #0 bne 7f cmp yyl, #0 bne 7f cmp xxh, #0 .ifc \signed, unsigned bne 2f cmp xxl, #0 2: beq 3f movs xxh, #0 mvns xxh, xxh @ 0xffffffff movs xxl, xxh 3: .else blt 6f bgt 4f cmp xxl, #0 beq 5f 4: movs xxl, #0 mvns xxl, xxl @ 0xffffffff lsrs xxh, xxl, #1 @ 0x7fffffff b 5f 6: movs xxh, #0x80 lsls xxh, xxh, #24 @ 0x80000000 movs xxl, #0 5: .endif @ tailcalls are tricky on v6-m. push {r0, r1, r2} ldr r0, 1f adr r1, 1f adds r0, r1 str r0, [sp, #8] @ We know we are not on armv4t, so pop pc is safe. pop {r0, r1, pc} .align 2 1: .word __aeabi_ldiv0 - 1b 7: .endm #ifdef L_aeabi_ldivmod FUNC_START aeabi_ldivmod test_div_by_zero signed push {r0, r1} mov r0, sp push {r0, lr} ldr r0, [sp, #8] bl SYM(__gnu_ldivmod_helper) ldr r3, [sp, #4] mov lr, r3 add sp, sp, #8 pop {r2, r3} RET FUNC_END aeabi_ldivmod #endif /* L_aeabi_ldivmod */ #ifdef L_aeabi_uldivmod FUNC_START aeabi_uldivmod test_div_by_zero unsigned push {r0, r1} mov r0, sp push {r0, lr} ldr r0, [sp, #8] bl SYM(__udivmoddi4) ldr r3, [sp, #4] mov lr, r3 add sp, sp, #8 pop {r2, r3} RET FUNC_END aeabi_uldivmod #endif /* L_aeabi_uldivmod */ #ifdef L_arm_addsubsf3 FUNC_START aeabi_frsub push {r4, lr} movs r4, #1 lsls r4, #31 eors r0, r0, r4 bl __aeabi_fadd pop {r4, pc} FUNC_END aeabi_frsub #endif /* L_arm_addsubsf3 */ #ifdef L_arm_cmpsf2 FUNC_START aeabi_cfrcmple mov ip, r0 movs r0, r1 mov r1, ip b 6f FUNC_START aeabi_cfcmpeq FUNC_ALIAS aeabi_cfcmple aeabi_cfcmpeq @ The status-returning routines are required to preserve all @ registers except ip, lr, and cpsr. 6: push {r0, r1, r2, r3, r4, lr} bl __lesf2 @ Set the Z flag correctly, and the C flag unconditionally. cmp r0, #0 @ Clear the C flag if the return value was -1, indicating @ that the first operand was smaller than the second. bmi 1f movs r1, #0 cmn r0, r1 1: pop {r0, r1, r2, r3, r4, pc} FUNC_END aeabi_cfcmple FUNC_END aeabi_cfcmpeq FUNC_END aeabi_cfrcmple FUNC_START aeabi_fcmpeq push {r4, lr} bl __eqsf2 negs r0, r0 adds r0, r0, #1 pop {r4, pc} FUNC_END aeabi_fcmpeq .macro COMPARISON cond, helper, mode=sf2 FUNC_START aeabi_fcmp\cond push {r4, lr} bl __\helper\mode cmp r0, #0 b\cond 1f movs r0, #0 pop {r4, pc} 1: movs r0, #1 pop {r4, pc} FUNC_END aeabi_fcmp\cond .endm COMPARISON lt, le COMPARISON le, le COMPARISON gt, ge COMPARISON ge, ge #endif /* L_arm_cmpsf2 */ #ifdef L_arm_addsubdf3 FUNC_START aeabi_drsub push {r4, lr} movs r4, #1 lsls r4, #31 eors xxh, xxh, r4 bl __aeabi_dadd pop {r4, pc} FUNC_END aeabi_drsub #endif /* L_arm_addsubdf3 */ #ifdef L_arm_cmpdf2 FUNC_START aeabi_cdrcmple mov ip, r0 movs r0, r2 mov r2, ip mov ip, r1 movs r1, r3 mov r3, ip b 6f FUNC_START aeabi_cdcmpeq FUNC_ALIAS aeabi_cdcmple aeabi_cdcmpeq @ The status-returning routines are required to preserve all @ registers except ip, lr, and cpsr. 6: push {r0, r1, r2, r3, r4, lr} bl __ledf2 @ Set the Z flag correctly, and the C flag unconditionally. cmp r0, #0 @ Clear the C flag if the return value was -1, indicating @ that the first operand was smaller than the second. bmi 1f movs r1, #0 cmn r0, r1 1: pop {r0, r1, r2, r3, r4, pc} FUNC_END aeabi_cdcmple FUNC_END aeabi_cdcmpeq FUNC_END aeabi_cdrcmple FUNC_START aeabi_dcmpeq push {r4, lr} bl __eqdf2 negs r0, r0 adds r0, r0, #1 pop {r4, pc} FUNC_END aeabi_dcmpeq .macro COMPARISON cond, helper, mode=df2 FUNC_START aeabi_dcmp\cond push {r4, lr} bl __\helper\mode cmp r0, #0 b\cond 1f movs r0, #0 pop {r4, pc} 1: movs r0, #1 pop {r4, pc} FUNC_END aeabi_dcmp\cond .endm COMPARISON lt, le COMPARISON le, le COMPARISON gt, ge COMPARISON ge, ge #endif /* L_arm_cmpdf2 */
4ms/metamodule-plugin-sdk
2,567
plugin-libc/libgcc/config/arm/crtn.S
# Copyright (C) 2001-2022 Free Software Foundation, Inc. # Written By Nick Clifton # # This file is free software; you can redistribute it and/or modify it # under the terms of the GNU General Public License as published by the # Free Software Foundation; either version 3, or (at your option) any # later version. # # This file is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # General Public License for more details. # # Under Section 7 of GPL version 3, you are granted additional # permissions described in the GCC Runtime Library Exception, version # 3.1, as published by the Free Software Foundation. # # You should have received a copy of the GNU General Public License and # a copy of the GCC Runtime Library Exception along with this program; # see the files COPYING3 and COPYING.RUNTIME respectively. If not, see # <http://www.gnu.org/licenses/>. /* An executable stack is *not* required for these functions. */ #if defined(__ELF__) && defined(__linux__) .section .note.GNU-stack,"",%progbits .previous #endif #ifdef __ARM_EABI__ /* Some attributes that are common to all routines in this file. */ /* Tag_ABI_align_needed: This code does not require 8-byte alignment from the caller. */ /* .eabi_attribute 24, 0 -- default setting. */ /* Tag_ABI_align_preserved: This code preserves 8-byte alignment in any callee. */ .eabi_attribute 25, 1 #endif /* __ARM_EABI__ */ # This file just makes sure that the .fini and .init sections do in # fact return. Users may put any desired instructions in those sections. # This file is the last thing linked into any executable. # Note - this macro is complemented by the FUNC_START macro # in crti.S. If you change this macro you must also change # that macro match. # # Note - we do not try any fancy optimizations of the return # sequences here, it is just not worth it. Instead keep things # simple. Restore all the save registers, including the link # register and then perform the correct function return instruction. # We also save/restore r3 to ensure stack alignment. .macro FUNC_END #ifdef __thumb__ .thumb pop {r3, r4, r5, r6, r7} pop {r3} mov lr, r3 #else .arm sub sp, fp, #40 ldmfd sp, {r4, r5, r6, r7, r8, r9, sl, fp, sp, lr} #endif #if defined __THUMB_INTERWORK__ || defined __thumb__ bx lr #else mov pc, lr #endif .endm .section ".init" ;; FUNC_END .section ".fini" ;; FUNC_END # end of crtn.S
4ms/metamodule-plugin-sdk
3,926
plugin-libc/libgcc/config/arm/cmse_nonsecure_call.S
/* CMSE wrapper function used to save, clear and restore callee saved registers for cmse_nonsecure_call's. Copyright (C) 2016-2022 Free Software Foundation, Inc. Contributed by ARM Ltd. This file is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 3, or (at your option) any later version. This file is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. Under Section 7 of GPL version 3, you are granted additional permissions described in the GCC Runtime Library Exception, version 3.1, as published by the Free Software Foundation. You should have received a copy of the GNU General Public License and a copy of the GCC Runtime Library Exception along with this program; see the files COPYING3 and COPYING.RUNTIME respectively. If not, see <http://www.gnu.org/licenses/>. */ .syntax unified #ifdef __ARM_PCS_VFP # if (__ARM_FP & 0x8) || (__ARM_FEATURE_MVE & 1) .fpu fpv5-d16 # else .fpu fpv4-sp-d16 # endif #endif .thumb .global __gnu_cmse_nonsecure_call __gnu_cmse_nonsecure_call: #if defined(__ARM_ARCH_8M_MAIN__) push {r5-r11,lr} mov r7, r4 mov r8, r4 mov r9, r4 mov r10, r4 mov r11, r4 mov ip, r4 /* Save and clear callee-saved registers only if we are dealing with hard float ABI. The unused caller-saved registers have already been cleared by GCC generated code. */ #ifdef __ARM_PCS_VFP vpush.f64 {d8-d15} mov r5, #0 vmov d8, r5, r5 #if __ARM_FP & 0x04 vmov s18, s19, r5, r5 vmov s20, s21, r5, r5 vmov s22, s23, r5, r5 vmov s24, s25, r5, r5 vmov s26, s27, r5, r5 vmov s28, s29, r5, r5 vmov s30, s31, r5, r5 #elif (__ARM_FP & 0x8) || (__ARM_FEATURE_MVE & 1) vmov.f64 d9, d8 vmov.f64 d10, d8 vmov.f64 d11, d8 vmov.f64 d12, d8 vmov.f64 d13, d8 vmov.f64 d14, d8 vmov.f64 d15, d8 #else #error "Half precision implementation not supported." #endif /* Clear the cumulative exception-status bits (0-4,7) and the condition code bits (28-31) of the FPSCR. */ vmrs r5, fpscr movw r6, #65376 movt r6, #4095 ands r5, r6 vmsr fpscr, r5 /* We are not dealing with hard float ABI, so we can safely use the vlstm and vlldm instructions without needing to preserve the registers used for argument passing. */ #else sub sp, sp, #0x88 /* Reserve stack space to save all floating point registers, including FPSCR. */ vlstm sp /* Lazy store and clearance of d0-d16 and FPSCR. */ #endif /* __ARM_PCS_VFP */ /* Make sure to clear the 'GE' bits of the APSR register if 32-bit SIMD instructions are available. */ #if defined(__ARM_FEATURE_SIMD32) msr APSR_nzcvqg, r4 #else msr APSR_nzcvq, r4 #endif mov r5, r4 mov r6, r4 blxns r4 #ifdef __ARM_PCS_VFP vpop.f64 {d8-d15} #else /* VLLDM erratum mitigation sequence. */ mrs r5, control tst r5, #8 /* CONTROL_S.SFPA */ it ne .inst.w 0xeeb00a40 /* vmovne s0, s0 */ vlldm sp /* Lazy restore of d0-d16 and FPSCR. */ add sp, sp, #0x88 /* Free space used to save floating point registers. */ #endif /* __ARM_PCS_VFP */ pop {r5-r11, pc} #elif defined (__ARM_ARCH_8M_BASE__) push {r5-r7, lr} mov r5, r8 mov r6, r9 mov r7, r10 push {r5-r7} mov r5, r11 push {r5} mov r5, r4 mov r6, r4 mov r7, r4 mov r8, r4 mov r9, r4 mov r10, r4 mov r11, r4 mov ip, r4 msr APSR_nzcvq, r4 blxns r4 pop {r5} mov r11, r5 pop {r5-r7} mov r10, r7 mov r9, r6 mov r8, r5 pop {r5-r7, pc} #else #error "This should only be used for armv8-m base- and mainline." #endif
4ms/metamodule-plugin-sdk
5,907
plugin-libc/libgcc/config/arm/bpabi.S
/* Miscellaneous BPABI functions. Copyright (C) 2003-2022 Free Software Foundation, Inc. Contributed by CodeSourcery, LLC. This file is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 3, or (at your option) any later version. This file is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. Under Section 7 of GPL version 3, you are granted additional permissions described in the GCC Runtime Library Exception, version 3.1, as published by the Free Software Foundation. You should have received a copy of the GNU General Public License and a copy of the GCC Runtime Library Exception along with this program; see the files COPYING3 and COPYING.RUNTIME respectively. If not, see <http://www.gnu.org/licenses/>. */ .cfi_sections .debug_frame #ifdef __ARM_EABI__ /* Some attributes that are common to all routines in this file. */ /* Tag_ABI_align_needed: This code does not require 8-byte alignment from the caller. */ /* .eabi_attribute 24, 0 -- default setting. */ /* Tag_ABI_align_preserved: This code preserves 8-byte alignment in any callee. */ .eabi_attribute 25, 1 #endif /* __ARM_EABI__ */ #ifdef L_aeabi_lcmp ARM_FUNC_START aeabi_lcmp cmp xxh, yyh do_it lt movlt r0, #-1 do_it gt movgt r0, #1 do_it ne RETc(ne) subs r0, xxl, yyl do_it lo movlo r0, #-1 do_it hi movhi r0, #1 RET FUNC_END aeabi_lcmp #endif /* L_aeabi_lcmp */ #ifdef L_aeabi_ulcmp ARM_FUNC_START aeabi_ulcmp cmp xxh, yyh do_it lo movlo r0, #-1 do_it hi movhi r0, #1 do_it ne RETc(ne) cmp xxl, yyl do_it lo movlo r0, #-1 do_it hi movhi r0, #1 do_it eq moveq r0, #0 RET FUNC_END aeabi_ulcmp #endif /* L_aeabi_ulcmp */ .macro test_div_by_zero signed /* Tail-call to divide-by-zero handlers which may be overridden by the user, so unwinding works properly. */ #if defined(__thumb2__) cbnz yyh, 2f cbnz yyl, 2f cmp xxh, #0 .ifc \signed, unsigned do_it eq cmpeq xxl, #0 do_it ne, t movne xxh, #0xffffffff movne xxl, #0xffffffff .else do_it lt, tt movlt xxl, #0 movlt xxh, #0x80000000 blt 1f do_it eq cmpeq xxl, #0 do_it ne, t movne xxh, #0x7fffffff movne xxl, #0xffffffff .endif 1: b SYM (__aeabi_ldiv0) __PLT__ 2: #else /* Note: Thumb-1 code calls via an ARM shim on processors which support ARM mode. */ cmp yyh, #0 cmpeq yyl, #0 bne 2f cmp xxh, #0 .ifc \signed, unsigned cmpeq xxl, #0 movne xxh, #0xffffffff movne xxl, #0xffffffff .else movlt xxh, #0x80000000 movlt xxl, #0 blt 1f cmpeq xxl, #0 movne xxh, #0x7fffffff movne xxl, #0xffffffff .endif 1: b SYM (__aeabi_ldiv0) __PLT__ 2: #endif .endm /* we can use STRD/LDRD on v5TE and later, and any Thumb-2 architecture. */ #if (defined(__ARM_EABI__) \ && (defined(__thumb2__) \ || (__ARM_ARCH >= 5 && defined(__TARGET_FEATURE_DSP)))) #define CAN_USE_LDRD 1 #else #define CAN_USE_LDRD 0 #endif /* set up stack from for call to __udivmoddi4. At the end of the macro the stack is arranged as follows: sp+12 / space for remainder sp+8 \ (written by __udivmoddi4) sp+4 lr sp+0 sp+8 [rp (remainder pointer) argument for __udivmoddi4] */ .macro push_for_divide fname #if defined(__thumb2__) && CAN_USE_LDRD sub ip, sp, #8 strd ip, lr, [sp, #-16]! #else sub sp, sp, #8 do_push {sp, lr} #endif .cfi_adjust_cfa_offset 16 .cfi_offset 14, -12 .endm /* restore stack */ .macro pop_for_divide ldr lr, [sp, #4] #if CAN_USE_LDRD ldrd r2, r3, [sp, #8] add sp, sp, #16 #else add sp, sp, #8 do_pop {r2, r3} #endif .cfi_restore 14 .cfi_adjust_cfa_offset 0 .endm #ifdef L_aeabi_ldivmod /* Perform 64 bit signed division. Inputs: r0:r1 numerator r2:r3 denominator Outputs: r0:r1 quotient r2:r3 remainder */ ARM_FUNC_START aeabi_ldivmod .cfi_startproc test_div_by_zero signed push_for_divide __aeabi_ldivmod cmp xxh, #0 blt 1f cmp yyh, #0 blt 2f /* arguments in (r0:r1), (r2:r3) and *sp */ bl SYM(__udivmoddi4) __PLT__ .cfi_remember_state pop_for_divide RET 1: /* xxh:xxl is negative */ .cfi_restore_state negs xxl, xxl sbc xxh, xxh, xxh, lsl #1 /* Thumb-2 has no RSC, so use X - 2X */ cmp yyh, #0 blt 3f /* arguments in (r0:r1), (r2:r3) and *sp */ bl SYM(__udivmoddi4) __PLT__ .cfi_remember_state pop_for_divide negs xxl, xxl sbc xxh, xxh, xxh, lsl #1 /* Thumb-2 has no RSC, so use X - 2X */ negs yyl, yyl sbc yyh, yyh, yyh, lsl #1 /* Thumb-2 has no RSC, so use X - 2X */ RET 2: /* only yyh:yyl is negative */ .cfi_restore_state negs yyl, yyl sbc yyh, yyh, yyh, lsl #1 /* Thumb-2 has no RSC, so use X - 2X */ /* arguments in (r0:r1), (r2:r3) and *sp */ bl SYM(__udivmoddi4) __PLT__ .cfi_remember_state pop_for_divide negs xxl, xxl sbc xxh, xxh, xxh, lsl #1 /* Thumb-2 has no RSC, so use X - 2X */ RET 3: /* both xxh:xxl and yyh:yyl are negative */ .cfi_restore_state negs yyl, yyl sbc yyh, yyh, yyh, lsl #1 /* Thumb-2 has no RSC, so use X - 2X */ /* arguments in (r0:r1), (r2:r3) and *sp */ bl SYM(__udivmoddi4) __PLT__ pop_for_divide negs yyl, yyl sbc yyh, yyh, yyh, lsl #1 /* Thumb-2 has no RSC, so use X - 2X */ RET .cfi_endproc #endif /* L_aeabi_ldivmod */ #ifdef L_aeabi_uldivmod /* Perform 64 bit signed division. Inputs: r0:r1 numerator r2:r3 denominator Outputs: r0:r1 quotient r2:r3 remainder */ ARM_FUNC_START aeabi_uldivmod .cfi_startproc test_div_by_zero unsigned push_for_divide __aeabi_uldivmod /* arguments in (r0:r1), (r2:r3) and *sp */ bl SYM(__udivmoddi4) __PLT__ pop_for_divide RET .cfi_endproc #endif /* L_aeabi_divmod */
4ms/metamodule-plugin-sdk
23,200
plugin-libc/libgcc/config/arm/ieee754-sf.S
/* ieee754-sf.S single-precision floating point support for ARM Copyright (C) 2003-2022 Free Software Foundation, Inc. Contributed by Nicolas Pitre (nico@fluxnic.net) This file is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 3, or (at your option) any later version. This file is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. Under Section 7 of GPL version 3, you are granted additional permissions described in the GCC Runtime Library Exception, version 3.1, as published by the Free Software Foundation. You should have received a copy of the GNU General Public License and a copy of the GCC Runtime Library Exception along with this program; see the files COPYING3 and COPYING.RUNTIME respectively. If not, see <http://www.gnu.org/licenses/>. */ /* * Notes: * * The goal of this code is to be as fast as possible. This is * not meant to be easy to understand for the casual reader. * * Only the default rounding mode is intended for best performances. * Exceptions aren't supported yet, but that can be added quite easily * if necessary without impacting performances. * * In the CFI related comments, 'previousOffset' refers to the previous offset * from sp used to compute the CFA. */ #ifdef L_arm_negsf2 ARM_FUNC_START negsf2 ARM_FUNC_ALIAS aeabi_fneg negsf2 CFI_START_FUNCTION eor r0, r0, #0x80000000 @ flip sign bit RET CFI_END_FUNCTION FUNC_END aeabi_fneg FUNC_END negsf2 #endif #ifdef L_arm_addsubsf3 ARM_FUNC_START aeabi_frsub CFI_START_FUNCTION eor r0, r0, #0x80000000 @ flip sign bit of first arg b 1f ARM_FUNC_START subsf3 ARM_FUNC_ALIAS aeabi_fsub subsf3 eor r1, r1, #0x80000000 @ flip sign bit of second arg #if defined(__INTERWORKING_STUBS__) b 1f @ Skip Thumb-code prologue #endif ARM_FUNC_START addsf3 ARM_FUNC_ALIAS aeabi_fadd addsf3 1: @ Look for zeroes, equal values, INF, or NAN. movs r2, r0, lsl #1 do_it ne, ttt COND(mov,s,ne) r3, r1, lsl #1 teqne r2, r3 COND(mvn,s,ne) ip, r2, asr #24 COND(mvn,s,ne) ip, r3, asr #24 beq LSYM(Lad_s) @ Compute exponent difference. Make largest exponent in r2, @ corresponding arg in r0, and positive exponent difference in r3. mov r2, r2, lsr #24 rsbs r3, r2, r3, lsr #24 do_it gt, ttt addgt r2, r2, r3 eorgt r1, r0, r1 eorgt r0, r1, r0 eorgt r1, r0, r1 do_it lt rsblt r3, r3, #0 @ If exponent difference is too large, return largest argument @ already in r0. We need up to 25 bit to handle proper rounding @ of 0x1p25 - 1.1. cmp r3, #25 do_it hi RETc(hi) @ Convert mantissa to signed integer. tst r0, #0x80000000 orr r0, r0, #0x00800000 bic r0, r0, #0xff000000 do_it ne rsbne r0, r0, #0 tst r1, #0x80000000 orr r1, r1, #0x00800000 bic r1, r1, #0xff000000 do_it ne rsbne r1, r1, #0 @ If exponent == difference, one or both args were denormalized. @ Since this is not common case, rescale them off line. teq r2, r3 beq LSYM(Lad_d) LSYM(Lad_x): @ Compensate for the exponent overlapping the mantissa MSB added later sub r2, r2, #1 @ Shift and add second arg to first arg in r0. @ Keep leftover bits into r1. shiftop adds r0 r0 r1 asr r3 ip rsb r3, r3, #32 shift1 lsl, r1, r1, r3 @ Keep absolute value in r0-r1, sign in r3 (the n bit was set above) and r3, r0, #0x80000000 bpl LSYM(Lad_p) #if defined(__thumb2__) negs r1, r1 sbc r0, r0, r0, lsl #1 #else rsbs r1, r1, #0 rsc r0, r0, #0 #endif @ Determine how to normalize the result. LSYM(Lad_p): cmp r0, #0x00800000 bcc LSYM(Lad_a) cmp r0, #0x01000000 bcc LSYM(Lad_e) @ Result needs to be shifted right. movs r0, r0, lsr #1 mov r1, r1, rrx add r2, r2, #1 @ Make sure we did not bust our exponent. cmp r2, #254 bhs LSYM(Lad_o) @ Our result is now properly aligned into r0, remaining bits in r1. @ Pack final result together. @ Round with MSB of r1. If halfway between two numbers, round towards @ LSB of r0 = 0. LSYM(Lad_e): cmp r1, #0x80000000 adc r0, r0, r2, lsl #23 do_it eq biceq r0, r0, #1 orr r0, r0, r3 RET @ Result must be shifted left and exponent adjusted. LSYM(Lad_a): movs r1, r1, lsl #1 adc r0, r0, r0 subs r2, r2, #1 do_it hs cmphs r0, #0x00800000 bhs LSYM(Lad_e) @ No rounding necessary since r1 will always be 0 at this point. LSYM(Lad_l): #if !defined (__ARM_FEATURE_CLZ) movs ip, r0, lsr #12 moveq r0, r0, lsl #12 subeq r2, r2, #12 tst r0, #0x00ff0000 moveq r0, r0, lsl #8 subeq r2, r2, #8 tst r0, #0x00f00000 moveq r0, r0, lsl #4 subeq r2, r2, #4 tst r0, #0x00c00000 moveq r0, r0, lsl #2 subeq r2, r2, #2 cmp r0, #0x00800000 movcc r0, r0, lsl #1 sbcs r2, r2, #0 #else clz ip, r0 sub ip, ip, #8 subs r2, r2, ip shift1 lsl, r0, r0, ip #endif @ Final result with sign @ If exponent negative, denormalize result. do_it ge, et addge r0, r0, r2, lsl #23 rsblt r2, r2, #0 orrge r0, r0, r3 #if defined(__thumb2__) do_it lt, t lsrlt r0, r0, r2 orrlt r0, r3, r0 #else orrlt r0, r3, r0, lsr r2 #endif RET @ Fixup and adjust bit position for denormalized arguments. @ Note that r2 must not remain equal to 0. LSYM(Lad_d): teq r2, #0 eor r1, r1, #0x00800000 do_it eq, te eoreq r0, r0, #0x00800000 addeq r2, r2, #1 subne r3, r3, #1 b LSYM(Lad_x) LSYM(Lad_s): mov r3, r1, lsl #1 mvns ip, r2, asr #24 do_it ne COND(mvn,s,ne) ip, r3, asr #24 beq LSYM(Lad_i) teq r2, r3 beq 1f @ Result is x + 0.0 = x or 0.0 + y = y. teq r2, #0 do_it eq moveq r0, r1 RET 1: teq r0, r1 @ Result is x - x = 0. do_it ne, t movne r0, #0 RETc(ne) @ Result is x + x = 2x. tst r2, #0xff000000 bne 2f movs r0, r0, lsl #1 do_it cs orrcs r0, r0, #0x80000000 RET 2: adds r2, r2, #(2 << 24) do_it cc, t addcc r0, r0, #(1 << 23) RETc(cc) and r3, r0, #0x80000000 @ Overflow: return INF. LSYM(Lad_o): orr r0, r3, #0x7f000000 orr r0, r0, #0x00800000 RET @ At least one of r0/r1 is INF/NAN. @ if r0 != INF/NAN: return r1 (which is INF/NAN) @ if r1 != INF/NAN: return r0 (which is INF/NAN) @ if r0 or r1 is NAN: return NAN @ if opposite sign: return NAN @ otherwise return r0 (which is INF or -INF) LSYM(Lad_i): mvns r2, r2, asr #24 do_it ne, et movne r0, r1 COND(mvn,s,eq) r3, r3, asr #24 movne r1, r0 movs r2, r0, lsl #9 do_it eq, te COND(mov,s,eq) r3, r1, lsl #9 teqeq r0, r1 orrne r0, r0, #0x00400000 @ quiet NAN RET CFI_END_FUNCTION FUNC_END aeabi_frsub FUNC_END aeabi_fadd FUNC_END addsf3 FUNC_END aeabi_fsub FUNC_END subsf3 ARM_FUNC_START floatunsisf ARM_FUNC_ALIAS aeabi_ui2f floatunsisf CFI_START_FUNCTION mov r3, #0 b 1f ARM_FUNC_START floatsisf ARM_FUNC_ALIAS aeabi_i2f floatsisf ands r3, r0, #0x80000000 do_it mi rsbmi r0, r0, #0 1: movs ip, r0 do_it eq RETc(eq) @ Add initial exponent to sign orr r3, r3, #((127 + 23) << 23) .ifnc ah, r0 mov ah, r0 .endif mov al, #0 b 2f CFI_END_FUNCTION FUNC_END aeabi_i2f FUNC_END floatsisf FUNC_END aeabi_ui2f FUNC_END floatunsisf ARM_FUNC_START floatundisf ARM_FUNC_ALIAS aeabi_ul2f floatundisf CFI_START_FUNCTION orrs r2, r0, r1 do_it eq RETc(eq) mov r3, #0 b 1f ARM_FUNC_START floatdisf ARM_FUNC_ALIAS aeabi_l2f floatdisf orrs r2, r0, r1 do_it eq RETc(eq) ands r3, ah, #0x80000000 @ sign bit in r3 bpl 1f #if defined(__thumb2__) negs al, al sbc ah, ah, ah, lsl #1 #else rsbs al, al, #0 rsc ah, ah, #0 #endif 1: movs ip, ah do_it eq, tt moveq ip, al moveq ah, al moveq al, #0 @ Add initial exponent to sign orr r3, r3, #((127 + 23 + 32) << 23) do_it eq subeq r3, r3, #(32 << 23) 2: sub r3, r3, #(1 << 23) #if !defined (__ARM_FEATURE_CLZ) mov r2, #23 cmp ip, #(1 << 16) do_it hs, t movhs ip, ip, lsr #16 subhs r2, r2, #16 cmp ip, #(1 << 8) do_it hs, t movhs ip, ip, lsr #8 subhs r2, r2, #8 cmp ip, #(1 << 4) do_it hs, t movhs ip, ip, lsr #4 subhs r2, r2, #4 cmp ip, #(1 << 2) do_it hs, e subhs r2, r2, #2 sublo r2, r2, ip, lsr #1 subs r2, r2, ip, lsr #3 #else clz r2, ip subs r2, r2, #8 #endif sub r3, r3, r2, lsl #23 blt 3f shiftop add r3 r3 ah lsl r2 ip shift1 lsl, ip, al, r2 rsb r2, r2, #32 cmp ip, #0x80000000 shiftop adc r0 r3 al lsr r2 r2 do_it eq biceq r0, r0, #1 RET 3: add r2, r2, #32 shift1 lsl, ip, ah, r2 rsb r2, r2, #32 orrs al, al, ip, lsl #1 shiftop adc r0 r3 ah lsr r2 r2 do_it eq biceq r0, r0, ip, lsr #31 RET CFI_END_FUNCTION FUNC_END floatdisf FUNC_END aeabi_l2f FUNC_END floatundisf FUNC_END aeabi_ul2f #endif /* L_addsubsf3 */ #if defined(L_arm_mulsf3) || defined(L_arm_muldivsf3) @ Define multiplication as weak in _arm_mulsf3.o so that it can be overriden @ by the global definition in _arm_muldivsf3.o. This allows a program only @ using multiplication to take the weak definition which does not contain the @ division code. Programs using only division or both division and @ multiplication will pull _arm_muldivsf3.o from which both the multiplication @ and division are taken thanks to the override. #ifdef L_arm_mulsf3 WEAK mulsf3 WEAK aeabi_fmul #endif ARM_FUNC_START mulsf3 ARM_FUNC_ALIAS aeabi_fmul mulsf3 CFI_START_FUNCTION @ Mask out exponents, trap any zero/denormal/INF/NAN. mov ip, #0xff ands r2, ip, r0, lsr #23 do_it ne, tt COND(and,s,ne) r3, ip, r1, lsr #23 teqne r2, ip teqne r3, ip beq LSYM(Lml_s) LSYM(Lml_x): @ Add exponents together add r2, r2, r3 @ Determine final sign. eor ip, r0, r1 @ Convert mantissa to unsigned integer. @ If power of two, branch to a separate path. @ Make up for final alignment. movs r0, r0, lsl #9 do_it ne COND(mov,s,ne) r1, r1, lsl #9 beq LSYM(Lml_1) mov r3, #0x08000000 orr r0, r3, r0, lsr #5 orr r1, r3, r1, lsr #5 @ The actual multiplication. @ This code works on architecture versions >= 4 umull r3, r1, r0, r1 @ Put final sign in r0. and r0, ip, #0x80000000 @ Adjust result upon the MSB position. cmp r1, #(1 << 23) do_it cc, tt movcc r1, r1, lsl #1 orrcc r1, r1, r3, lsr #31 movcc r3, r3, lsl #1 @ Add sign to result. orr r0, r0, r1 @ Apply exponent bias, check for under/overflow. sbc r2, r2, #127 cmp r2, #(254 - 1) bhi LSYM(Lml_u) @ Round the result, merge final exponent. cmp r3, #0x80000000 adc r0, r0, r2, lsl #23 do_it eq biceq r0, r0, #1 RET @ Multiplication by 0x1p*: let''s shortcut a lot of code. LSYM(Lml_1): teq r0, #0 and ip, ip, #0x80000000 do_it eq moveq r1, r1, lsl #9 orr r0, ip, r0, lsr #9 orr r0, r0, r1, lsr #9 subs r2, r2, #127 do_it gt, tt COND(rsb,s,gt) r3, r2, #255 orrgt r0, r0, r2, lsl #23 RETc(gt) @ Under/overflow: fix things up for the code below. orr r0, r0, #0x00800000 mov r3, #0 subs r2, r2, #1 LSYM(Lml_u): @ Overflow? bgt LSYM(Lml_o) @ Check if denormalized result is possible, otherwise return signed 0. cmn r2, #(24 + 1) do_it le, t bicle r0, r0, #0x7fffffff RETc(le) @ Shift value right, round, etc. rsb r2, r2, #0 movs r1, r0, lsl #1 shift1 lsr, r1, r1, r2 rsb r2, r2, #32 shift1 lsl, ip, r0, r2 movs r0, r1, rrx adc r0, r0, #0 orrs r3, r3, ip, lsl #1 do_it eq biceq r0, r0, ip, lsr #31 RET @ One or both arguments are denormalized. @ Scale them leftwards and preserve sign bit. LSYM(Lml_d): teq r2, #0 and ip, r0, #0x80000000 1: do_it eq, tt moveq r0, r0, lsl #1 tsteq r0, #0x00800000 subeq r2, r2, #1 beq 1b orr r0, r0, ip teq r3, #0 and ip, r1, #0x80000000 2: do_it eq, tt moveq r1, r1, lsl #1 tsteq r1, #0x00800000 subeq r3, r3, #1 beq 2b orr r1, r1, ip b LSYM(Lml_x) LSYM(Lml_s): @ Isolate the INF and NAN cases away and r3, ip, r1, lsr #23 teq r2, ip do_it ne teqne r3, ip beq 1f @ Here, one or more arguments are either denormalized or zero. bics ip, r0, #0x80000000 do_it ne COND(bic,s,ne) ip, r1, #0x80000000 bne LSYM(Lml_d) @ Result is 0, but determine sign anyway. LSYM(Lml_z): eor r0, r0, r1 bic r0, r0, #0x7fffffff RET 1: @ One or both args are INF or NAN. teq r0, #0x0 do_it ne, ett teqne r0, #0x80000000 moveq r0, r1 teqne r1, #0x0 teqne r1, #0x80000000 beq LSYM(Lml_n) @ 0 * INF or INF * 0 -> NAN teq r2, ip bne 1f movs r2, r0, lsl #9 bne LSYM(Lml_n) @ NAN * <anything> -> NAN 1: teq r3, ip bne LSYM(Lml_i) movs r3, r1, lsl #9 do_it ne movne r0, r1 bne LSYM(Lml_n) @ <anything> * NAN -> NAN @ Result is INF, but we need to determine its sign. LSYM(Lml_i): eor r0, r0, r1 @ Overflow: return INF (sign already in r0). LSYM(Lml_o): and r0, r0, #0x80000000 orr r0, r0, #0x7f000000 orr r0, r0, #0x00800000 RET @ Return a quiet NAN. LSYM(Lml_n): orr r0, r0, #0x7f000000 orr r0, r0, #0x00c00000 RET CFI_END_FUNCTION FUNC_END aeabi_fmul FUNC_END mulsf3 #ifdef L_arm_muldivsf3 ARM_FUNC_START divsf3 ARM_FUNC_ALIAS aeabi_fdiv divsf3 CFI_START_FUNCTION @ Mask out exponents, trap any zero/denormal/INF/NAN. mov ip, #0xff ands r2, ip, r0, lsr #23 do_it ne, tt COND(and,s,ne) r3, ip, r1, lsr #23 teqne r2, ip teqne r3, ip beq LSYM(Ldv_s) LSYM(Ldv_x): @ Subtract divisor exponent from dividend''s sub r2, r2, r3 @ Preserve final sign into ip. eor ip, r0, r1 @ Convert mantissa to unsigned integer. @ Dividend -> r3, divisor -> r1. movs r1, r1, lsl #9 mov r0, r0, lsl #9 beq LSYM(Ldv_1) mov r3, #0x10000000 orr r1, r3, r1, lsr #4 orr r3, r3, r0, lsr #4 @ Initialize r0 (result) with final sign bit. and r0, ip, #0x80000000 @ Ensure result will land to known bit position. @ Apply exponent bias accordingly. cmp r3, r1 do_it cc movcc r3, r3, lsl #1 adc r2, r2, #(127 - 2) @ The actual division loop. mov ip, #0x00800000 1: cmp r3, r1 do_it cs, t subcs r3, r3, r1 orrcs r0, r0, ip cmp r3, r1, lsr #1 do_it cs, t subcs r3, r3, r1, lsr #1 orrcs r0, r0, ip, lsr #1 cmp r3, r1, lsr #2 do_it cs, t subcs r3, r3, r1, lsr #2 orrcs r0, r0, ip, lsr #2 cmp r3, r1, lsr #3 do_it cs, t subcs r3, r3, r1, lsr #3 orrcs r0, r0, ip, lsr #3 movs r3, r3, lsl #4 do_it ne COND(mov,s,ne) ip, ip, lsr #4 bne 1b @ Check exponent for under/overflow. cmp r2, #(254 - 1) bhi LSYM(Lml_u) @ Round the result, merge final exponent. cmp r3, r1 adc r0, r0, r2, lsl #23 do_it eq biceq r0, r0, #1 RET @ Division by 0x1p*: let''s shortcut a lot of code. LSYM(Ldv_1): and ip, ip, #0x80000000 orr r0, ip, r0, lsr #9 adds r2, r2, #127 do_it gt, tt COND(rsb,s,gt) r3, r2, #255 orrgt r0, r0, r2, lsl #23 RETc(gt) orr r0, r0, #0x00800000 mov r3, #0 subs r2, r2, #1 b LSYM(Lml_u) @ One or both arguments are denormalized. @ Scale them leftwards and preserve sign bit. LSYM(Ldv_d): teq r2, #0 and ip, r0, #0x80000000 1: do_it eq, tt moveq r0, r0, lsl #1 tsteq r0, #0x00800000 subeq r2, r2, #1 beq 1b orr r0, r0, ip teq r3, #0 and ip, r1, #0x80000000 2: do_it eq, tt moveq r1, r1, lsl #1 tsteq r1, #0x00800000 subeq r3, r3, #1 beq 2b orr r1, r1, ip b LSYM(Ldv_x) @ One or both arguments are either INF, NAN, zero or denormalized. LSYM(Ldv_s): and r3, ip, r1, lsr #23 teq r2, ip bne 1f movs r2, r0, lsl #9 bne LSYM(Lml_n) @ NAN / <anything> -> NAN teq r3, ip bne LSYM(Lml_i) @ INF / <anything> -> INF mov r0, r1 b LSYM(Lml_n) @ INF / (INF or NAN) -> NAN 1: teq r3, ip bne 2f movs r3, r1, lsl #9 beq LSYM(Lml_z) @ <anything> / INF -> 0 mov r0, r1 b LSYM(Lml_n) @ <anything> / NAN -> NAN 2: @ If both are nonzero, we need to normalize and resume above. bics ip, r0, #0x80000000 do_it ne COND(bic,s,ne) ip, r1, #0x80000000 bne LSYM(Ldv_d) @ One or both arguments are zero. bics r2, r0, #0x80000000 bne LSYM(Lml_i) @ <non_zero> / 0 -> INF bics r3, r1, #0x80000000 bne LSYM(Lml_z) @ 0 / <non_zero> -> 0 b LSYM(Lml_n) @ 0 / 0 -> NAN CFI_END_FUNCTION FUNC_END aeabi_fdiv FUNC_END divsf3 #endif /* L_muldivsf3 */ #endif /* L_arm_mulsf3 || L_arm_muldivsf3 */ #ifdef L_arm_cmpsf2 @ The return value in r0 is @ @ 0 if the operands are equal @ 1 if the first operand is greater than the second, or @ the operands are unordered and the operation is @ CMP, LT, LE, NE, or EQ. @ -1 if the first operand is less than the second, or @ the operands are unordered and the operation is GT @ or GE. @ @ The Z flag will be set iff the operands are equal. @ @ The following registers are clobbered by this function: @ ip, r0, r1, r2, r3 ARM_FUNC_START gtsf2 ARM_FUNC_ALIAS gesf2 gtsf2 CFI_START_FUNCTION mov ip, #-1 b 1f ARM_FUNC_START ltsf2 ARM_FUNC_ALIAS lesf2 ltsf2 mov ip, #1 b 1f ARM_FUNC_START cmpsf2 ARM_FUNC_ALIAS nesf2 cmpsf2 ARM_FUNC_ALIAS eqsf2 cmpsf2 mov ip, #1 @ how should we specify unordered here? 1: str ip, [sp, #-4]! .cfi_adjust_cfa_offset 4 @ CFA is now sp + previousOffset + 4. @ We're not adding CFI for ip as it's pushed into the stack only because @ it may be popped off later as a return value (i.e. we're not preserving @ it anyways). @ Trap any INF/NAN first. mov r2, r0, lsl #1 mov r3, r1, lsl #1 mvns ip, r2, asr #24 do_it ne COND(mvn,s,ne) ip, r3, asr #24 beq 3f .cfi_remember_state @ Save the current CFI state. This is done because the branch is conditional, @ and if we don't take it we'll issue a .cfi_adjust_cfa_offset and return. @ If we do take it, however, the .cfi_adjust_cfa_offset from the non-branch @ code will affect the branch code as well. To avoid this we'll restore @ the current state before executing the branch code. @ Compare values. @ Note that 0.0 is equal to -0.0. 2: add sp, sp, #4 .cfi_adjust_cfa_offset -4 @ CFA is now sp + previousOffset. orrs ip, r2, r3, lsr #1 @ test if both are 0, clear C flag do_it ne teqne r0, r1 @ if not 0 compare sign do_it pl COND(sub,s,pl) r0, r2, r3 @ if same sign compare values, set r0 @ Result: do_it hi movhi r0, r1, asr #31 do_it lo mvnlo r0, r1, asr #31 do_it ne orrne r0, r0, #1 RET 3: @ Look for a NAN. @ Restore the previous CFI state (i.e. keep the CFI state as it was @ before the branch). .cfi_restore_state mvns ip, r2, asr #24 bne 4f movs ip, r0, lsl #9 bne 5f @ r0 is NAN 4: mvns ip, r3, asr #24 bne 2b movs ip, r1, lsl #9 beq 2b @ r1 is not NAN 5: ldr r0, [sp], #4 @ return unordered code. .cfi_adjust_cfa_offset -4 @ CFA is now sp + previousOffset. RET CFI_END_FUNCTION FUNC_END gesf2 FUNC_END gtsf2 FUNC_END lesf2 FUNC_END ltsf2 FUNC_END nesf2 FUNC_END eqsf2 FUNC_END cmpsf2 ARM_FUNC_START aeabi_cfrcmple CFI_START_FUNCTION mov ip, r0 mov r0, r1 mov r1, ip b 6f ARM_FUNC_START aeabi_cfcmpeq ARM_FUNC_ALIAS aeabi_cfcmple aeabi_cfcmpeq @ The status-returning routines are required to preserve all @ registers except ip, lr, and cpsr. 6: do_push {r0, r1, r2, r3, lr} .cfi_adjust_cfa_offset 20 @ CFA is at sp + previousOffset + 20 .cfi_rel_offset r0, 0 @ Registers are saved from sp to sp + 16 .cfi_rel_offset r1, 4 .cfi_rel_offset r2, 8 .cfi_rel_offset r3, 12 .cfi_rel_offset lr, 16 ARM_CALL cmpsf2 @ Set the Z flag correctly, and the C flag unconditionally. cmp r0, #0 @ Clear the C flag if the return value was -1, indicating @ that the first operand was smaller than the second. do_it mi cmnmi r0, #0 RETLDM "r0, r1, r2, r3" CFI_END_FUNCTION FUNC_END aeabi_cfcmple FUNC_END aeabi_cfcmpeq FUNC_END aeabi_cfrcmple ARM_FUNC_START aeabi_fcmpeq CFI_START_FUNCTION str lr, [sp, #-8]! @ sp -= 8 .cfi_adjust_cfa_offset 8 @ CFA is now sp + previousOffset + 8 .cfi_rel_offset lr, 0 @ lr is at sp ARM_CALL aeabi_cfcmple do_it eq, e moveq r0, #1 @ Equal to. movne r0, #0 @ Less than, greater than, or unordered. RETLDM CFI_END_FUNCTION FUNC_END aeabi_fcmpeq ARM_FUNC_START aeabi_fcmplt CFI_START_FUNCTION str lr, [sp, #-8]! @ sp -= 8 .cfi_adjust_cfa_offset 8 @ CFA is now sp + previousOffset + 8 .cfi_rel_offset lr, 0 @ lr is at sp ARM_CALL aeabi_cfcmple do_it cc, e movcc r0, #1 @ Less than. movcs r0, #0 @ Equal to, greater than, or unordered. RETLDM CFI_END_FUNCTION FUNC_END aeabi_fcmplt ARM_FUNC_START aeabi_fcmple CFI_START_FUNCTION str lr, [sp, #-8]! @ sp -= 8 .cfi_adjust_cfa_offset 8 @ CFA is now sp + previousOffset + 8 .cfi_rel_offset lr, 0 @ lr is at sp ARM_CALL aeabi_cfcmple do_it ls, e movls r0, #1 @ Less than or equal to. movhi r0, #0 @ Greater than or unordered. RETLDM CFI_END_FUNCTION FUNC_END aeabi_fcmple ARM_FUNC_START aeabi_fcmpge CFI_START_FUNCTION str lr, [sp, #-8]! @ sp -= 8 .cfi_adjust_cfa_offset 8 @ CFA is now sp + previousOffset + 8 .cfi_rel_offset lr, 0 @ lr is at sp ARM_CALL aeabi_cfrcmple do_it ls, e movls r0, #1 @ Operand 2 is less than or equal to operand 1. movhi r0, #0 @ Operand 2 greater than operand 1, or unordered. RETLDM CFI_END_FUNCTION FUNC_END aeabi_fcmpge ARM_FUNC_START aeabi_fcmpgt CFI_START_FUNCTION str lr, [sp, #-8]! @ sp -= 8 .cfi_adjust_cfa_offset 8 @ CFA is now sp + previousOffset + 8 .cfi_rel_offset lr, 0 @ lr is at sp ARM_CALL aeabi_cfrcmple do_it cc, e movcc r0, #1 @ Operand 2 is less than operand 1. movcs r0, #0 @ Operand 2 is greater than or equal to operand 1, @ or they are unordered. RETLDM CFI_END_FUNCTION FUNC_END aeabi_fcmpgt #endif /* L_cmpsf2 */ #ifdef L_arm_unordsf2 ARM_FUNC_START unordsf2 ARM_FUNC_ALIAS aeabi_fcmpun unordsf2 CFI_START_FUNCTION mov r2, r0, lsl #1 mov r3, r1, lsl #1 mvns ip, r2, asr #24 bne 1f movs ip, r0, lsl #9 bne 3f @ r0 is NAN 1: mvns ip, r3, asr #24 bne 2f movs ip, r1, lsl #9 bne 3f @ r1 is NAN 2: mov r0, #0 @ arguments are ordered. RET 3: mov r0, #1 @ arguments are unordered. RET CFI_END_FUNCTION FUNC_END aeabi_fcmpun FUNC_END unordsf2 #endif /* L_unordsf2 */ #ifdef L_arm_fixsfsi ARM_FUNC_START fixsfsi ARM_FUNC_ALIAS aeabi_f2iz fixsfsi CFI_START_FUNCTION @ check exponent range. mov r2, r0, lsl #1 cmp r2, #(127 << 24) bcc 1f @ value is too small mov r3, #(127 + 31) subs r2, r3, r2, lsr #24 bls 2f @ value is too large @ scale value mov r3, r0, lsl #8 orr r3, r3, #0x80000000 tst r0, #0x80000000 @ the sign bit shift1 lsr, r0, r3, r2 do_it ne rsbne r0, r0, #0 RET 1: mov r0, #0 RET 2: cmp r2, #(127 + 31 - 0xff) bne 3f movs r2, r0, lsl #9 bne 4f @ r0 is NAN. 3: ands r0, r0, #0x80000000 @ the sign bit do_it eq moveq r0, #0x7fffffff @ the maximum signed positive si RET 4: mov r0, #0 @ What should we convert NAN to? RET CFI_END_FUNCTION FUNC_END aeabi_f2iz FUNC_END fixsfsi #endif /* L_fixsfsi */ #ifdef L_arm_fixunssfsi ARM_FUNC_START fixunssfsi ARM_FUNC_ALIAS aeabi_f2uiz fixunssfsi CFI_START_FUNCTION @ check exponent range. movs r2, r0, lsl #1 bcs 1f @ value is negative cmp r2, #(127 << 24) bcc 1f @ value is too small mov r3, #(127 + 31) subs r2, r3, r2, lsr #24 bmi 2f @ value is too large @ scale the value mov r3, r0, lsl #8 orr r3, r3, #0x80000000 shift1 lsr, r0, r3, r2 RET 1: mov r0, #0 RET 2: cmp r2, #(127 + 31 - 0xff) bne 3f movs r2, r0, lsl #9 bne 4f @ r0 is NAN. 3: mov r0, #0xffffffff @ maximum unsigned si RET 4: mov r0, #0 @ What should we convert NAN to? RET CFI_END_FUNCTION FUNC_END aeabi_f2uiz FUNC_END fixunssfsi #endif /* L_fixunssfsi */
4ms/metamodule-plugin-sdk
2,413
plugin-libc/libgcc/config/arm/crti.S
# Copyright (C) 2001-2022 Free Software Foundation, Inc. # Written By Nick Clifton # # This file is free software; you can redistribute it and/or modify it # under the terms of the GNU General Public License as published by the # Free Software Foundation; either version 3, or (at your option) any # later version. # # This file is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # General Public License for more details. # # Under Section 7 of GPL version 3, you are granted additional # permissions described in the GCC Runtime Library Exception, version # 3.1, as published by the Free Software Foundation. # # You should have received a copy of the GNU General Public License and # a copy of the GCC Runtime Library Exception along with this program; # see the files COPYING3 and COPYING.RUNTIME respectively. If not, see # <http://www.gnu.org/licenses/>. /* An executable stack is *not* required for these functions. */ #if defined(__ELF__) && defined(__linux__) .section .note.GNU-stack,"",%progbits .previous #endif # This file just make a stack frame for the contents of the .fini and # .init sections. Users may put any desired instructions in those # sections. #ifdef __ELF__ #define TYPE(x) .type x,function #else #define TYPE(x) #endif #ifdef __ARM_EABI__ /* Some attributes that are common to all routines in this file. */ /* Tag_ABI_align_needed: This code does not require 8-byte alignment from the caller. */ /* .eabi_attribute 24, 0 -- default setting. */ /* Tag_ABI_align_preserved: This code preserves 8-byte alignment in any callee. */ .eabi_attribute 25, 1 #endif /* __ARM_EABI__ */ # Note - this macro is complemented by the FUNC_END macro # in crtn.S. If you change this macro you must also change # that macro match. .macro FUNC_START #ifdef __thumb__ .thumb push {r3, r4, r5, r6, r7, lr} #else .arm # Create a stack frame and save any call-preserved registers mov ip, sp stmdb sp!, {r3, r4, r5, r6, r7, r8, r9, sl, fp, ip, lr, pc} sub fp, ip, #4 #endif .endm .section ".init" .align 2 .global _init #ifdef __thumb__ .thumb_func #endif TYPE(_init) _init: FUNC_START .section ".fini" .align 2 .global _fini #ifdef __thumb__ .thumb_func #endif TYPE(_fini) _fini: FUNC_START # end of crti.S
4ms/metamodule-plugin-sdk
32,761
plugin-libc/libgcc/config/arm/ieee754-df.S
/* ieee754-df.S double-precision floating point support for ARM Copyright (C) 2003-2022 Free Software Foundation, Inc. Contributed by Nicolas Pitre (nico@fluxnic.net) This file is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 3, or (at your option) any later version. This file is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. Under Section 7 of GPL version 3, you are granted additional permissions described in the GCC Runtime Library Exception, version 3.1, as published by the Free Software Foundation. You should have received a copy of the GNU General Public License and a copy of the GCC Runtime Library Exception along with this program; see the files COPYING3 and COPYING.RUNTIME respectively. If not, see <http://www.gnu.org/licenses/>. */ /* * Notes: * * The goal of this code is to be as fast as possible. This is * not meant to be easy to understand for the casual reader. * For slightly simpler code please see the single precision version * of this file. * * Only the default rounding mode is intended for best performances. * Exceptions aren't supported yet, but that can be added quite easily * if necessary without impacting performances. * * In the CFI related comments, 'previousOffset' refers to the previous offset * from sp used to compute the CFA. */ .cfi_sections .debug_frame #ifndef __ARMEB__ #define xl r0 #define xh r1 #define yl r2 #define yh r3 #else #define xh r0 #define xl r1 #define yh r2 #define yl r3 #endif #ifdef L_arm_negdf2 ARM_FUNC_START negdf2 ARM_FUNC_ALIAS aeabi_dneg negdf2 CFI_START_FUNCTION @ flip sign bit eor xh, xh, #0x80000000 RET CFI_END_FUNCTION FUNC_END aeabi_dneg FUNC_END negdf2 #endif #ifdef L_arm_addsubdf3 ARM_FUNC_START aeabi_drsub CFI_START_FUNCTION eor xh, xh, #0x80000000 @ flip sign bit of first arg b 1f ARM_FUNC_START subdf3 ARM_FUNC_ALIAS aeabi_dsub subdf3 eor yh, yh, #0x80000000 @ flip sign bit of second arg #if defined(__INTERWORKING_STUBS__) b 1f @ Skip Thumb-code prologue #endif ARM_FUNC_START adddf3 ARM_FUNC_ALIAS aeabi_dadd adddf3 1: do_push {r4, r5, lr} @ sp -= 12 .cfi_adjust_cfa_offset 12 @ CFA is now sp + previousOffset + 12 .cfi_rel_offset r4, 0 @ Registers are saved from sp to sp + 8 .cfi_rel_offset r5, 4 .cfi_rel_offset lr, 8 @ Look for zeroes, equal values, INF, or NAN. shift1 lsl, r4, xh, #1 shift1 lsl, r5, yh, #1 teq r4, r5 do_it eq teqeq xl, yl do_it ne, ttt COND(orr,s,ne) ip, r4, xl COND(orr,s,ne) ip, r5, yl COND(mvn,s,ne) ip, r4, asr #21 COND(mvn,s,ne) ip, r5, asr #21 beq LSYM(Lad_s) @ Compute exponent difference. Make largest exponent in r4, @ corresponding arg in xh-xl, and positive exponent difference in r5. shift1 lsr, r4, r4, #21 rsbs r5, r4, r5, lsr #21 do_it lt rsblt r5, r5, #0 ble 1f add r4, r4, r5 eor yl, xl, yl eor yh, xh, yh eor xl, yl, xl eor xh, yh, xh eor yl, xl, yl eor yh, xh, yh 1: @ If exponent difference is too large, return largest argument @ already in xh-xl. We need up to 54 bit to handle proper rounding @ of 0x1p54 - 1.1. cmp r5, #54 do_it hi RETLDM "r4, r5" hi @ Convert mantissa to signed integer. tst xh, #0x80000000 mov xh, xh, lsl #12 mov ip, #0x00100000 orr xh, ip, xh, lsr #12 beq 1f #if defined(__thumb2__) negs xl, xl sbc xh, xh, xh, lsl #1 #else rsbs xl, xl, #0 rsc xh, xh, #0 #endif 1: tst yh, #0x80000000 mov yh, yh, lsl #12 orr yh, ip, yh, lsr #12 beq 1f #if defined(__thumb2__) negs yl, yl sbc yh, yh, yh, lsl #1 #else rsbs yl, yl, #0 rsc yh, yh, #0 #endif 1: @ If exponent == difference, one or both args were denormalized. @ Since this is not common case, rescale them off line. teq r4, r5 beq LSYM(Lad_d) @ CFI note: we're lucky that the branches to Lad_* that appear after this @ function have a CFI state that's exactly the same as the one we're in at this @ point. Otherwise the CFI would change to a different state after the branch, @ which would be disastrous for backtracing. LSYM(Lad_x): @ Compensate for the exponent overlapping the mantissa MSB added later sub r4, r4, #1 @ Shift yh-yl right per r5, add to xh-xl, keep leftover bits into ip. rsbs lr, r5, #32 blt 1f shift1 lsl, ip, yl, lr shiftop adds xl xl yl lsr r5 yl adc xh, xh, #0 shiftop adds xl xl yh lsl lr yl shiftop adcs xh xh yh asr r5 yh b 2f 1: sub r5, r5, #32 add lr, lr, #32 cmp yl, #1 shift1 lsl,ip, yh, lr do_it cs orrcs ip, ip, #2 @ 2 not 1, to allow lsr #1 later shiftop adds xl xl yh asr r5 yh adcs xh, xh, yh, asr #31 2: @ We now have a result in xh-xl-ip. @ Keep absolute value in xh-xl-ip, sign in r5 (the n bit was set above) and r5, xh, #0x80000000 bpl LSYM(Lad_p) #if defined(__thumb2__) mov lr, #0 negs ip, ip sbcs xl, lr, xl sbc xh, lr, xh #else rsbs ip, ip, #0 rscs xl, xl, #0 rsc xh, xh, #0 #endif @ Determine how to normalize the result. LSYM(Lad_p): cmp xh, #0x00100000 bcc LSYM(Lad_a) cmp xh, #0x00200000 bcc LSYM(Lad_e) @ Result needs to be shifted right. movs xh, xh, lsr #1 movs xl, xl, rrx mov ip, ip, rrx add r4, r4, #1 @ Make sure we did not bust our exponent. mov r2, r4, lsl #21 cmn r2, #(2 << 21) bcs LSYM(Lad_o) @ Our result is now properly aligned into xh-xl, remaining bits in ip. @ Round with MSB of ip. If halfway between two numbers, round towards @ LSB of xl = 0. @ Pack final result together. LSYM(Lad_e): cmp ip, #0x80000000 do_it eq COND(mov,s,eq) ip, xl, lsr #1 adcs xl, xl, #0 adc xh, xh, r4, lsl #20 orr xh, xh, r5 RETLDM "r4, r5" @ Result must be shifted left and exponent adjusted. LSYM(Lad_a): movs ip, ip, lsl #1 adcs xl, xl, xl adc xh, xh, xh subs r4, r4, #1 do_it hs cmphs xh, #0x00100000 bhs LSYM(Lad_e) @ No rounding necessary since ip will always be 0 at this point. LSYM(Lad_l): #if !defined (__ARM_FEATURE_CLZ) teq xh, #0 movne r3, #20 moveq r3, #52 moveq xh, xl moveq xl, #0 mov r2, xh cmp r2, #(1 << 16) movhs r2, r2, lsr #16 subhs r3, r3, #16 cmp r2, #(1 << 8) movhs r2, r2, lsr #8 subhs r3, r3, #8 cmp r2, #(1 << 4) movhs r2, r2, lsr #4 subhs r3, r3, #4 cmp r2, #(1 << 2) subhs r3, r3, #2 sublo r3, r3, r2, lsr #1 sub r3, r3, r2, lsr #3 #else teq xh, #0 do_it eq, t moveq xh, xl moveq xl, #0 clz r3, xh do_it eq addeq r3, r3, #32 sub r3, r3, #11 #endif @ determine how to shift the value. subs r2, r3, #32 bge 2f adds r2, r2, #12 ble 1f @ shift value left 21 to 31 bits, or actually right 11 to 1 bits @ since a register switch happened above. add ip, r2, #20 rsb r2, r2, #12 shift1 lsl, xl, xh, ip shift1 lsr, xh, xh, r2 b 3f @ actually shift value left 1 to 20 bits, which might also represent @ 32 to 52 bits if counting the register switch that happened earlier. 1: add r2, r2, #20 2: do_it le rsble ip, r2, #32 shift1 lsl, xh, xh, r2 #if defined(__thumb2__) lsr ip, xl, ip itt le orrle xh, xh, ip lslle xl, xl, r2 #else orrle xh, xh, xl, lsr ip movle xl, xl, lsl r2 #endif @ adjust exponent accordingly. 3: subs r4, r4, r3 do_it ge, tt addge xh, xh, r4, lsl #20 orrge xh, xh, r5 RETLDM "r4, r5" ge @ Exponent too small, denormalize result. @ Find out proper shift value. mvn r4, r4 subs r4, r4, #31 bge 2f adds r4, r4, #12 bgt 1f @ shift result right of 1 to 20 bits, sign is in r5. add r4, r4, #20 rsb r2, r4, #32 shift1 lsr, xl, xl, r4 shiftop orr xl xl xh lsl r2 yh shiftop orr xh r5 xh lsr r4 yh RETLDM "r4, r5" @ shift result right of 21 to 31 bits, or left 11 to 1 bits after @ a register switch from xh to xl. 1: rsb r4, r4, #12 rsb r2, r4, #32 shift1 lsr, xl, xl, r2 shiftop orr xl xl xh lsl r4 yh mov xh, r5 RETLDM "r4, r5" @ Shift value right of 32 to 64 bits, or 0 to 32 bits after a switch @ from xh to xl. 2: shift1 lsr, xl, xh, r4 mov xh, r5 RETLDM "r4, r5" @ Adjust exponents for denormalized arguments. @ Note that r4 must not remain equal to 0. LSYM(Lad_d): teq r4, #0 eor yh, yh, #0x00100000 do_it eq, te eoreq xh, xh, #0x00100000 addeq r4, r4, #1 subne r5, r5, #1 b LSYM(Lad_x) LSYM(Lad_s): mvns ip, r4, asr #21 do_it ne COND(mvn,s,ne) ip, r5, asr #21 beq LSYM(Lad_i) teq r4, r5 do_it eq teqeq xl, yl beq 1f @ Result is x + 0.0 = x or 0.0 + y = y. orrs ip, r4, xl do_it eq, t moveq xh, yh moveq xl, yl RETLDM "r4, r5" 1: teq xh, yh @ Result is x - x = 0. do_it ne, tt movne xh, #0 movne xl, #0 RETLDM "r4, r5" ne @ Result is x + x = 2x. movs ip, r4, lsr #21 bne 2f movs xl, xl, lsl #1 adcs xh, xh, xh do_it cs orrcs xh, xh, #0x80000000 RETLDM "r4, r5" 2: adds r4, r4, #(2 << 21) do_it cc, t addcc xh, xh, #(1 << 20) RETLDM "r4, r5" cc and r5, xh, #0x80000000 @ Overflow: return INF. LSYM(Lad_o): orr xh, r5, #0x7f000000 orr xh, xh, #0x00f00000 mov xl, #0 RETLDM "r4, r5" @ At least one of x or y is INF/NAN. @ if xh-xl != INF/NAN: return yh-yl (which is INF/NAN) @ if yh-yl != INF/NAN: return xh-xl (which is INF/NAN) @ if either is NAN: return NAN @ if opposite sign: return NAN @ otherwise return xh-xl (which is INF or -INF) LSYM(Lad_i): mvns ip, r4, asr #21 do_it ne, te movne xh, yh movne xl, yl COND(mvn,s,eq) ip, r5, asr #21 do_it ne, t movne yh, xh movne yl, xl orrs r4, xl, xh, lsl #12 do_it eq, te COND(orr,s,eq) r5, yl, yh, lsl #12 teqeq xh, yh orrne xh, xh, #0x00080000 @ quiet NAN RETLDM "r4, r5" CFI_END_FUNCTION FUNC_END aeabi_dsub FUNC_END subdf3 FUNC_END aeabi_dadd FUNC_END adddf3 ARM_FUNC_START floatunsidf ARM_FUNC_ALIAS aeabi_ui2d floatunsidf CFI_START_FUNCTION teq r0, #0 do_it eq, t moveq r1, #0 RETc(eq) do_push {r4, r5, lr} @ sp -= 12 .cfi_adjust_cfa_offset 12 @ CFA is now sp + previousOffset + 12 .cfi_rel_offset r4, 0 @ Registers are saved from sp + 0 to sp + 8. .cfi_rel_offset r5, 4 .cfi_rel_offset lr, 8 mov r4, #0x400 @ initial exponent add r4, r4, #(52-1 - 1) mov r5, #0 @ sign bit is 0 .ifnc xl, r0 mov xl, r0 .endif mov xh, #0 b LSYM(Lad_l) CFI_END_FUNCTION FUNC_END aeabi_ui2d FUNC_END floatunsidf ARM_FUNC_START floatsidf ARM_FUNC_ALIAS aeabi_i2d floatsidf CFI_START_FUNCTION teq r0, #0 do_it eq, t moveq r1, #0 RETc(eq) do_push {r4, r5, lr} @ sp -= 12 .cfi_adjust_cfa_offset 12 @ CFA is now sp + previousOffset + 12 .cfi_rel_offset r4, 0 @ Registers are saved from sp + 0 to sp + 8. .cfi_rel_offset r5, 4 .cfi_rel_offset lr, 8 mov r4, #0x400 @ initial exponent add r4, r4, #(52-1 - 1) ands r5, r0, #0x80000000 @ sign bit in r5 do_it mi rsbmi r0, r0, #0 @ absolute value .ifnc xl, r0 mov xl, r0 .endif mov xh, #0 b LSYM(Lad_l) CFI_END_FUNCTION FUNC_END aeabi_i2d FUNC_END floatsidf ARM_FUNC_START extendsfdf2 ARM_FUNC_ALIAS aeabi_f2d extendsfdf2 CFI_START_FUNCTION movs r2, r0, lsl #1 @ toss sign bit mov xh, r2, asr #3 @ stretch exponent mov xh, xh, rrx @ retrieve sign bit mov xl, r2, lsl #28 @ retrieve remaining bits do_it ne, ttt COND(and,s,ne) r3, r2, #0xff000000 @ isolate exponent teqne r3, #0xff000000 @ if not 0, check if INF or NAN eorne xh, xh, #0x38000000 @ fixup exponent otherwise. RETc(ne) @ and return it. bics r2, r2, #0xff000000 @ isolate mantissa do_it eq @ if 0, that is ZERO or INF, RETc(eq) @ we are done already. teq r3, #0xff000000 @ check for NAN do_it eq, t orreq xh, xh, #0x00080000 @ change to quiet NAN RETc(eq) @ and return it. @ value was denormalized. We can normalize it now. do_push {r4, r5, lr} .cfi_adjust_cfa_offset 12 @ CFA is now sp + previousOffset + 12 .cfi_rel_offset r4, 0 @ Registers are saved from sp + 0 to sp + 8. .cfi_rel_offset r5, 4 .cfi_rel_offset lr, 8 mov r4, #0x380 @ setup corresponding exponent and r5, xh, #0x80000000 @ move sign bit in r5 bic xh, xh, #0x80000000 b LSYM(Lad_l) CFI_END_FUNCTION FUNC_END aeabi_f2d FUNC_END extendsfdf2 ARM_FUNC_START floatundidf ARM_FUNC_ALIAS aeabi_ul2d floatundidf CFI_START_FUNCTION .cfi_remember_state @ Save the current CFA state. orrs r2, r0, r1 do_it eq RETc(eq) do_push {r4, r5, lr} @ sp -= 12 .cfi_adjust_cfa_offset 12 @ CFA is now sp + previousOffset + 12 .cfi_rel_offset r4, 0 @ Registers are saved from sp + 0 to sp + 8 .cfi_rel_offset r5, 4 .cfi_rel_offset lr, 8 mov r5, #0 b 2f ARM_FUNC_START floatdidf ARM_FUNC_ALIAS aeabi_l2d floatdidf .cfi_restore_state @ Restore the CFI state we saved above. If we didn't do this then the @ following instructions would have the CFI state that was set by the @ offset adjustments made in floatundidf. orrs r2, r0, r1 do_it eq RETc(eq) do_push {r4, r5, lr} @ sp -= 12 .cfi_adjust_cfa_offset 12 @ CFA is now sp + previousOffset + 12 .cfi_rel_offset r4, 0 @ Registers are saved from sp to sp + 8 .cfi_rel_offset r5, 4 .cfi_rel_offset lr, 8 ands r5, ah, #0x80000000 @ sign bit in r5 bpl 2f #if defined(__thumb2__) negs al, al sbc ah, ah, ah, lsl #1 #else rsbs al, al, #0 rsc ah, ah, #0 #endif 2: mov r4, #0x400 @ initial exponent add r4, r4, #(52-1 - 1) @ If FP word order does not match integer word order, swap the words. .ifnc xh, ah mov ip, al mov xh, ah mov xl, ip .endif movs ip, xh, lsr #22 beq LSYM(Lad_p) @ The value is too big. Scale it down a bit... mov r2, #3 movs ip, ip, lsr #3 do_it ne addne r2, r2, #3 movs ip, ip, lsr #3 do_it ne addne r2, r2, #3 add r2, r2, ip, lsr #3 rsb r3, r2, #32 shift1 lsl, ip, xl, r3 shift1 lsr, xl, xl, r2 shiftop orr xl xl xh lsl r3 lr shift1 lsr, xh, xh, r2 add r4, r4, r2 b LSYM(Lad_p) CFI_END_FUNCTION FUNC_END floatdidf FUNC_END aeabi_l2d FUNC_END floatundidf FUNC_END aeabi_ul2d #endif /* L_addsubdf3 */ #if defined(L_arm_muldf3) || defined(L_arm_muldivdf3) @ Define multiplication as weak in _arm_muldf3.o so that it can be overriden @ by the global definition in _arm_muldivdf3.o. This allows a program only @ using multiplication to take the weak definition which does not contain the @ division code. Programs using only division or both division and @ multiplication will pull _arm_muldivdf3.o from which both the multiplication @ and division are taken thanks to the override. #ifdef L_arm_muldf3 WEAK muldf3 WEAK aeabi_dmul #endif ARM_FUNC_START muldf3 ARM_FUNC_ALIAS aeabi_dmul muldf3 CFI_START_FUNCTION do_push {r4, r5, r6, lr} @ sp -= 16 .cfi_adjust_cfa_offset 16 @ CFA is now sp + previousOffset + 16 .cfi_rel_offset r4, 0 @ Registers are saved from sp to sp + 12. .cfi_rel_offset r5, 4 .cfi_rel_offset r6, 8 .cfi_rel_offset lr, 12 @ Mask out exponents, trap any zero/denormal/INF/NAN. mov ip, #0xff orr ip, ip, #0x700 ands r4, ip, xh, lsr #20 do_it ne, tte COND(and,s,ne) r5, ip, yh, lsr #20 teqne r4, ip teqne r5, ip bleq LSYM(Lml_s) @ Add exponents together add r4, r4, r5 @ Determine final sign. eor r6, xh, yh @ Convert mantissa to unsigned integer. @ If power of two, branch to a separate path. bic xh, xh, ip, lsl #21 bic yh, yh, ip, lsl #21 orrs r5, xl, xh, lsl #12 do_it ne COND(orr,s,ne) r5, yl, yh, lsl #12 orr xh, xh, #0x00100000 orr yh, yh, #0x00100000 beq LSYM(Lml_1) @ Here is the actual multiplication. @ This code works on architecture versions >= 4 umull ip, lr, xl, yl mov r5, #0 umlal lr, r5, xh, yl and yl, r6, #0x80000000 umlal lr, r5, xl, yh mov r6, #0 umlal r5, r6, xh, yh @ The LSBs in ip are only significant for the final rounding. @ Fold them into lr. teq ip, #0 do_it ne orrne lr, lr, #1 @ Adjust result upon the MSB position. sub r4, r4, #0xff cmp r6, #(1 << (20-11)) sbc r4, r4, #0x300 bcs 1f movs lr, lr, lsl #1 adcs r5, r5, r5 adc r6, r6, r6 1: @ Shift to final position, add sign to result. orr xh, yl, r6, lsl #11 orr xh, xh, r5, lsr #21 mov xl, r5, lsl #11 orr xl, xl, lr, lsr #21 mov lr, lr, lsl #11 @ Check exponent range for under/overflow. subs ip, r4, #(254 - 1) do_it hi cmphi ip, #0x700 bhi LSYM(Lml_u) @ Round the result, merge final exponent. cmp lr, #0x80000000 do_it eq COND(mov,s,eq) lr, xl, lsr #1 adcs xl, xl, #0 adc xh, xh, r4, lsl #20 RETLDM "r4, r5, r6" @ Multiplication by 0x1p*: let''s shortcut a lot of code. LSYM(Lml_1): and r6, r6, #0x80000000 orr xh, r6, xh orr xl, xl, yl eor xh, xh, yh subs r4, r4, ip, lsr #1 do_it gt, tt COND(rsb,s,gt) r5, r4, ip orrgt xh, xh, r4, lsl #20 RETLDM "r4, r5, r6" gt @ Under/overflow: fix things up for the code below. orr xh, xh, #0x00100000 mov lr, #0 subs r4, r4, #1 LSYM(Lml_u): @ Overflow? bgt LSYM(Lml_o) @ Check if denormalized result is possible, otherwise return signed 0. cmn r4, #(53 + 1) do_it le, tt movle xl, #0 bicle xh, xh, #0x7fffffff RETLDM "r4, r5, r6" le @ Find out proper shift value. rsb r4, r4, #0 subs r4, r4, #32 bge 2f adds r4, r4, #12 bgt 1f @ shift result right of 1 to 20 bits, preserve sign bit, round, etc. add r4, r4, #20 rsb r5, r4, #32 shift1 lsl, r3, xl, r5 shift1 lsr, xl, xl, r4 shiftop orr xl xl xh lsl r5 r2 and r2, xh, #0x80000000 bic xh, xh, #0x80000000 adds xl, xl, r3, lsr #31 shiftop adc xh r2 xh lsr r4 r6 orrs lr, lr, r3, lsl #1 do_it eq biceq xl, xl, r3, lsr #31 RETLDM "r4, r5, r6" @ shift result right of 21 to 31 bits, or left 11 to 1 bits after @ a register switch from xh to xl. Then round. 1: rsb r4, r4, #12 rsb r5, r4, #32 shift1 lsl, r3, xl, r4 shift1 lsr, xl, xl, r5 shiftop orr xl xl xh lsl r4 r2 bic xh, xh, #0x7fffffff adds xl, xl, r3, lsr #31 adc xh, xh, #0 orrs lr, lr, r3, lsl #1 do_it eq biceq xl, xl, r3, lsr #31 RETLDM "r4, r5, r6" @ Shift value right of 32 to 64 bits, or 0 to 32 bits after a switch @ from xh to xl. Leftover bits are in r3-r6-lr for rounding. 2: rsb r5, r4, #32 shiftop orr lr lr xl lsl r5 r2 shift1 lsr, r3, xl, r4 shiftop orr r3 r3 xh lsl r5 r2 shift1 lsr, xl, xh, r4 bic xh, xh, #0x7fffffff shiftop bic xl xl xh lsr r4 r2 add xl, xl, r3, lsr #31 orrs lr, lr, r3, lsl #1 do_it eq biceq xl, xl, r3, lsr #31 RETLDM "r4, r5, r6" @ One or both arguments are denormalized. @ Scale them leftwards and preserve sign bit. LSYM(Lml_d): teq r4, #0 bne 2f and r6, xh, #0x80000000 1: movs xl, xl, lsl #1 adc xh, xh, xh tst xh, #0x00100000 do_it eq subeq r4, r4, #1 beq 1b orr xh, xh, r6 teq r5, #0 do_it ne RETc(ne) 2: and r6, yh, #0x80000000 3: movs yl, yl, lsl #1 adc yh, yh, yh tst yh, #0x00100000 do_it eq subeq r5, r5, #1 beq 3b orr yh, yh, r6 RET LSYM(Lml_s): @ Isolate the INF and NAN cases away teq r4, ip and r5, ip, yh, lsr #20 do_it ne teqne r5, ip beq 1f @ Here, one or more arguments are either denormalized or zero. orrs r6, xl, xh, lsl #1 do_it ne COND(orr,s,ne) r6, yl, yh, lsl #1 bne LSYM(Lml_d) @ Result is 0, but determine sign anyway. LSYM(Lml_z): eor xh, xh, yh and xh, xh, #0x80000000 mov xl, #0 RETLDM "r4, r5, r6" 1: @ One or both args are INF or NAN. orrs r6, xl, xh, lsl #1 do_it eq, te moveq xl, yl moveq xh, yh COND(orr,s,ne) r6, yl, yh, lsl #1 beq LSYM(Lml_n) @ 0 * INF or INF * 0 -> NAN teq r4, ip bne 1f orrs r6, xl, xh, lsl #12 bne LSYM(Lml_n) @ NAN * <anything> -> NAN 1: teq r5, ip bne LSYM(Lml_i) orrs r6, yl, yh, lsl #12 do_it ne, t movne xl, yl movne xh, yh bne LSYM(Lml_n) @ <anything> * NAN -> NAN @ Result is INF, but we need to determine its sign. LSYM(Lml_i): eor xh, xh, yh @ Overflow: return INF (sign already in xh). LSYM(Lml_o): and xh, xh, #0x80000000 orr xh, xh, #0x7f000000 orr xh, xh, #0x00f00000 mov xl, #0 RETLDM "r4, r5, r6" @ Return a quiet NAN. LSYM(Lml_n): orr xh, xh, #0x7f000000 orr xh, xh, #0x00f80000 RETLDM "r4, r5, r6" CFI_END_FUNCTION FUNC_END aeabi_dmul FUNC_END muldf3 #ifdef L_arm_muldivdf3 ARM_FUNC_START divdf3 ARM_FUNC_ALIAS aeabi_ddiv divdf3 CFI_START_FUNCTION do_push {r4, r5, r6, lr} .cfi_adjust_cfa_offset 16 .cfi_rel_offset r4, 0 .cfi_rel_offset r5, 4 .cfi_rel_offset r6, 8 .cfi_rel_offset lr, 12 @ Mask out exponents, trap any zero/denormal/INF/NAN. mov ip, #0xff orr ip, ip, #0x700 ands r4, ip, xh, lsr #20 do_it ne, tte COND(and,s,ne) r5, ip, yh, lsr #20 teqne r4, ip teqne r5, ip bleq LSYM(Ldv_s) @ Subtract divisor exponent from dividend''s. sub r4, r4, r5 @ Preserve final sign into lr. eor lr, xh, yh @ Convert mantissa to unsigned integer. @ Dividend -> r5-r6, divisor -> yh-yl. orrs r5, yl, yh, lsl #12 mov xh, xh, lsl #12 beq LSYM(Ldv_1) mov yh, yh, lsl #12 mov r5, #0x10000000 orr yh, r5, yh, lsr #4 orr yh, yh, yl, lsr #24 mov yl, yl, lsl #8 orr r5, r5, xh, lsr #4 orr r5, r5, xl, lsr #24 mov r6, xl, lsl #8 @ Initialize xh with final sign bit. and xh, lr, #0x80000000 @ Ensure result will land to known bit position. @ Apply exponent bias accordingly. cmp r5, yh do_it eq cmpeq r6, yl adc r4, r4, #(255 - 2) add r4, r4, #0x300 bcs 1f movs yh, yh, lsr #1 mov yl, yl, rrx 1: @ Perform first subtraction to align result to a nibble. subs r6, r6, yl sbc r5, r5, yh movs yh, yh, lsr #1 mov yl, yl, rrx mov xl, #0x00100000 mov ip, #0x00080000 @ The actual division loop. 1: subs lr, r6, yl sbcs lr, r5, yh do_it cs, tt subcs r6, r6, yl movcs r5, lr orrcs xl, xl, ip movs yh, yh, lsr #1 mov yl, yl, rrx subs lr, r6, yl sbcs lr, r5, yh do_it cs, tt subcs r6, r6, yl movcs r5, lr orrcs xl, xl, ip, lsr #1 movs yh, yh, lsr #1 mov yl, yl, rrx subs lr, r6, yl sbcs lr, r5, yh do_it cs, tt subcs r6, r6, yl movcs r5, lr orrcs xl, xl, ip, lsr #2 movs yh, yh, lsr #1 mov yl, yl, rrx subs lr, r6, yl sbcs lr, r5, yh do_it cs, tt subcs r6, r6, yl movcs r5, lr orrcs xl, xl, ip, lsr #3 orrs lr, r5, r6 beq 2f mov r5, r5, lsl #4 orr r5, r5, r6, lsr #28 mov r6, r6, lsl #4 mov yh, yh, lsl #3 orr yh, yh, yl, lsr #29 mov yl, yl, lsl #3 movs ip, ip, lsr #4 bne 1b @ We are done with a word of the result. @ Loop again for the low word if this pass was for the high word. tst xh, #0x00100000 bne 3f orr xh, xh, xl mov xl, #0 mov ip, #0x80000000 b 1b 2: @ Be sure result starts in the high word. tst xh, #0x00100000 do_it eq, t orreq xh, xh, xl moveq xl, #0 3: @ Check exponent range for under/overflow. subs ip, r4, #(254 - 1) do_it hi cmphi ip, #0x700 bhi LSYM(Lml_u) @ Round the result, merge final exponent. subs ip, r5, yh do_it eq, t COND(sub,s,eq) ip, r6, yl COND(mov,s,eq) ip, xl, lsr #1 adcs xl, xl, #0 adc xh, xh, r4, lsl #20 RETLDM "r4, r5, r6" @ Division by 0x1p*: shortcut a lot of code. LSYM(Ldv_1): and lr, lr, #0x80000000 orr xh, lr, xh, lsr #12 adds r4, r4, ip, lsr #1 do_it gt, tt COND(rsb,s,gt) r5, r4, ip orrgt xh, xh, r4, lsl #20 RETLDM "r4, r5, r6" gt orr xh, xh, #0x00100000 mov lr, #0 subs r4, r4, #1 b LSYM(Lml_u) @ Result mightt need to be denormalized: put remainder bits @ in lr for rounding considerations. LSYM(Ldv_u): orr lr, r5, r6 b LSYM(Lml_u) @ One or both arguments is either INF, NAN or zero. LSYM(Ldv_s): and r5, ip, yh, lsr #20 teq r4, ip do_it eq teqeq r5, ip beq LSYM(Lml_n) @ INF/NAN / INF/NAN -> NAN teq r4, ip bne 1f orrs r4, xl, xh, lsl #12 bne LSYM(Lml_n) @ NAN / <anything> -> NAN teq r5, ip bne LSYM(Lml_i) @ INF / <anything> -> INF mov xl, yl mov xh, yh b LSYM(Lml_n) @ INF / (INF or NAN) -> NAN 1: teq r5, ip bne 2f orrs r5, yl, yh, lsl #12 beq LSYM(Lml_z) @ <anything> / INF -> 0 mov xl, yl mov xh, yh b LSYM(Lml_n) @ <anything> / NAN -> NAN 2: @ If both are nonzero, we need to normalize and resume above. orrs r6, xl, xh, lsl #1 do_it ne COND(orr,s,ne) r6, yl, yh, lsl #1 bne LSYM(Lml_d) @ One or both arguments are 0. orrs r4, xl, xh, lsl #1 bne LSYM(Lml_i) @ <non_zero> / 0 -> INF orrs r5, yl, yh, lsl #1 bne LSYM(Lml_z) @ 0 / <non_zero> -> 0 b LSYM(Lml_n) @ 0 / 0 -> NAN CFI_END_FUNCTION FUNC_END aeabi_ddiv FUNC_END divdf3 #endif /* L_muldivdf3 */ #endif /* L_arm_muldf3 || L_arm_muldivdf3 */ #ifdef L_arm_cmpdf2 @ Note: only r0 (return value) and ip are clobbered here. ARM_FUNC_START gtdf2 ARM_FUNC_ALIAS gedf2 gtdf2 CFI_START_FUNCTION mov ip, #-1 b 1f ARM_FUNC_START ltdf2 ARM_FUNC_ALIAS ledf2 ltdf2 mov ip, #1 b 1f ARM_FUNC_START cmpdf2 ARM_FUNC_ALIAS nedf2 cmpdf2 ARM_FUNC_ALIAS eqdf2 cmpdf2 mov ip, #1 @ how should we specify unordered here? 1: str ip, [sp, #-4]! .cfi_adjust_cfa_offset 4 @ CFA is now sp + previousOffset + 4. @ We're not adding CFI for ip as it's pushed into the stack @ only because it may be popped off later as a return value @ (i.e. we're not preserving it anyways). @ Trap any INF/NAN first. mov ip, xh, lsl #1 mvns ip, ip, asr #21 mov ip, yh, lsl #1 do_it ne COND(mvn,s,ne) ip, ip, asr #21 beq 3f .cfi_remember_state @ Save the current CFI state. This is done because the branch @ is conditional, and if we don't take it we'll issue a @ .cfi_adjust_cfa_offset and return. If we do take it, @ however, the .cfi_adjust_cfa_offset from the non-branch code @ will affect the branch code as well. To avoid this we'll @ restore the current state before executing the branch code. @ Test for equality. Note that 0.0 is equal to -0.0. 2: add sp, sp, #4 .cfi_adjust_cfa_offset -4 @ CFA is now sp + previousOffset. orrs ip, xl, xh, lsl #1 @ if x == 0.0 or -0.0 do_it eq, e COND(orr,s,eq) ip, yl, yh, lsl #1 @ and y == 0.0 or -0.0 teqne xh, yh @ or xh == yh do_it eq, tt teqeq xl, yl @ and xl == yl moveq r0, #0 @ then equal. RETc(eq) @ Clear C flag cmn r0, #0 @ Compare sign, teq xh, yh @ Compare values if same sign do_it pl cmppl xh, yh do_it eq cmpeq xl, yl @ Result: do_it cs, e movcs r0, yh, asr #31 mvncc r0, yh, asr #31 orr r0, r0, #1 RET 3: @ Look for a NAN. @ Restore the previous CFI state (i.e. keep the CFI state as it was @ before the branch). .cfi_restore_state mov ip, xh, lsl #1 mvns ip, ip, asr #21 bne 4f orrs ip, xl, xh, lsl #12 bne 5f @ x is NAN 4: mov ip, yh, lsl #1 mvns ip, ip, asr #21 bne 2b orrs ip, yl, yh, lsl #12 beq 2b @ y is not NAN 5: ldr r0, [sp], #4 @ unordered return code .cfi_adjust_cfa_offset -4 @ CFA is now sp + previousOffset. RET CFI_END_FUNCTION FUNC_END gedf2 FUNC_END gtdf2 FUNC_END ledf2 FUNC_END ltdf2 FUNC_END nedf2 FUNC_END eqdf2 FUNC_END cmpdf2 ARM_FUNC_START aeabi_cdrcmple CFI_START_FUNCTION mov ip, r0 mov r0, r2 mov r2, ip mov ip, r1 mov r1, r3 mov r3, ip b 6f ARM_FUNC_START aeabi_cdcmpeq ARM_FUNC_ALIAS aeabi_cdcmple aeabi_cdcmpeq @ The status-returning routines are required to preserve all @ registers except ip, lr, and cpsr. 6: do_push {r0, lr} .cfi_adjust_cfa_offset 8 @ CFA is now sp + previousOffset + 8. .cfi_rel_offset r0, 0 @ Previous r0 is saved at sp. .cfi_rel_offset lr, 4 @ Previous lr is saved at sp + 4. ARM_CALL cmpdf2 @ Set the Z flag correctly, and the C flag unconditionally. cmp r0, #0 @ Clear the C flag if the return value was -1, indicating @ that the first operand was smaller than the second. do_it mi cmnmi r0, #0 RETLDM "r0" CFI_END_FUNCTION FUNC_END aeabi_cdcmple FUNC_END aeabi_cdcmpeq FUNC_END aeabi_cdrcmple ARM_FUNC_START aeabi_dcmpeq CFI_START_FUNCTION str lr, [sp, #-8]! @ sp -= 8 .cfi_adjust_cfa_offset 8 @ CFA is now sp + previousOffset + 8 .cfi_rel_offset lr, 0 @ lr is at sp ARM_CALL aeabi_cdcmple do_it eq, e moveq r0, #1 @ Equal to. movne r0, #0 @ Less than, greater than, or unordered. RETLDM CFI_END_FUNCTION FUNC_END aeabi_dcmpeq ARM_FUNC_START aeabi_dcmplt CFI_START_FUNCTION str lr, [sp, #-8]! @ sp -= 8 .cfi_adjust_cfa_offset 8 @ CFA is now sp + previousOffset + 8 .cfi_rel_offset lr, 0 @ lr is at sp ARM_CALL aeabi_cdcmple do_it cc, e movcc r0, #1 @ Less than. movcs r0, #0 @ Equal to, greater than, or unordered. RETLDM CFI_END_FUNCTION FUNC_END aeabi_dcmplt ARM_FUNC_START aeabi_dcmple CFI_START_FUNCTION str lr, [sp, #-8]! @ sp -= 8 .cfi_adjust_cfa_offset 8 @ CFA is now sp + previousOffset + 8 .cfi_rel_offset lr, 0 @ lr is at sp ARM_CALL aeabi_cdcmple do_it ls, e movls r0, #1 @ Less than or equal to. movhi r0, #0 @ Greater than or unordered. RETLDM CFI_END_FUNCTION FUNC_END aeabi_dcmple ARM_FUNC_START aeabi_dcmpge CFI_START_FUNCTION str lr, [sp, #-8]! @ sp -= 8 .cfi_adjust_cfa_offset 8 @ CFA is now sp + previousOffset + 8 .cfi_rel_offset lr, 0 @ lr is at sp ARM_CALL aeabi_cdrcmple do_it ls, e movls r0, #1 @ Operand 2 is less than or equal to operand 1. movhi r0, #0 @ Operand 2 greater than operand 1, or unordered. RETLDM CFI_END_FUNCTION FUNC_END aeabi_dcmpge ARM_FUNC_START aeabi_dcmpgt CFI_START_FUNCTION str lr, [sp, #-8]! @ sp -= 8 .cfi_adjust_cfa_offset 8 @ CFA is now sp + previousOffset + 8 .cfi_rel_offset lr, 0 @ lr is at sp ARM_CALL aeabi_cdrcmple do_it cc, e movcc r0, #1 @ Operand 2 is less than operand 1. movcs r0, #0 @ Operand 2 is greater than or equal to operand 1, @ or they are unordered. RETLDM CFI_END_FUNCTION FUNC_END aeabi_dcmpgt #endif /* L_cmpdf2 */ #ifdef L_arm_unorddf2 ARM_FUNC_START unorddf2 ARM_FUNC_ALIAS aeabi_dcmpun unorddf2 .cfi_startproc mov ip, xh, lsl #1 mvns ip, ip, asr #21 bne 1f orrs ip, xl, xh, lsl #12 bne 3f @ x is NAN 1: mov ip, yh, lsl #1 mvns ip, ip, asr #21 bne 2f orrs ip, yl, yh, lsl #12 bne 3f @ y is NAN 2: mov r0, #0 @ arguments are ordered. RET 3: mov r0, #1 @ arguments are unordered. RET .cfi_endproc FUNC_END aeabi_dcmpun FUNC_END unorddf2 #endif /* L_unorddf2 */ #ifdef L_arm_fixdfsi ARM_FUNC_START fixdfsi ARM_FUNC_ALIAS aeabi_d2iz fixdfsi CFI_START_FUNCTION @ check exponent range. mov r2, xh, lsl #1 adds r2, r2, #(1 << 21) bcs 2f @ value is INF or NAN bpl 1f @ value is too small mov r3, #(0xfffffc00 + 31) subs r2, r3, r2, asr #21 bls 3f @ value is too large @ scale value mov r3, xh, lsl #11 orr r3, r3, #0x80000000 orr r3, r3, xl, lsr #21 tst xh, #0x80000000 @ the sign bit shift1 lsr, r0, r3, r2 do_it ne rsbne r0, r0, #0 RET 1: mov r0, #0 RET 2: orrs xl, xl, xh, lsl #12 bne 4f @ x is NAN. 3: ands r0, xh, #0x80000000 @ the sign bit do_it eq moveq r0, #0x7fffffff @ maximum signed positive si RET 4: mov r0, #0 @ How should we convert NAN? RET CFI_END_FUNCTION FUNC_END aeabi_d2iz FUNC_END fixdfsi #endif /* L_fixdfsi */ #ifdef L_arm_fixunsdfsi ARM_FUNC_START fixunsdfsi ARM_FUNC_ALIAS aeabi_d2uiz fixunsdfsi CFI_START_FUNCTION @ check exponent range. movs r2, xh, lsl #1 bcs 1f @ value is negative adds r2, r2, #(1 << 21) bcs 2f @ value is INF or NAN bpl 1f @ value is too small mov r3, #(0xfffffc00 + 31) subs r2, r3, r2, asr #21 bmi 3f @ value is too large @ scale value mov r3, xh, lsl #11 orr r3, r3, #0x80000000 orr r3, r3, xl, lsr #21 shift1 lsr, r0, r3, r2 RET 1: mov r0, #0 RET 2: orrs xl, xl, xh, lsl #12 bne 4f @ value is NAN. 3: mov r0, #0xffffffff @ maximum unsigned si RET 4: mov r0, #0 @ How should we convert NAN? RET CFI_END_FUNCTION FUNC_END aeabi_d2uiz FUNC_END fixunsdfsi #endif /* L_fixunsdfsi */ #ifdef L_arm_truncdfsf2 ARM_FUNC_START truncdfsf2 ARM_FUNC_ALIAS aeabi_d2f truncdfsf2 CFI_START_FUNCTION @ check exponent range. mov r2, xh, lsl #1 subs r3, r2, #((1023 - 127) << 21) do_it cs, t COND(sub,s,cs) ip, r3, #(1 << 21) COND(rsb,s,cs) ip, ip, #(254 << 21) bls 2f @ value is out of range 1: @ shift and round mantissa and ip, xh, #0x80000000 mov r2, xl, lsl #3 orr xl, ip, xl, lsr #29 cmp r2, #0x80000000 adc r0, xl, r3, lsl #2 do_it eq biceq r0, r0, #1 RET 2: @ either overflow or underflow tst xh, #0x40000000 bne 3f @ overflow @ check if denormalized value is possible adds r2, r3, #(23 << 21) do_it lt, t andlt r0, xh, #0x80000000 @ too small, return signed 0. RETc(lt) @ denormalize value so we can resume with the code above afterwards. orr xh, xh, #0x00100000 mov r2, r2, lsr #21 rsb r2, r2, #24 rsb ip, r2, #32 #if defined(__thumb2__) lsls r3, xl, ip #else movs r3, xl, lsl ip #endif shift1 lsr, xl, xl, r2 do_it ne orrne xl, xl, #1 @ fold r3 for rounding considerations. mov r3, xh, lsl #11 mov r3, r3, lsr #11 shiftop orr xl xl r3 lsl ip ip shift1 lsr, r3, r3, r2 mov r3, r3, lsl #1 b 1b 3: @ chech for NAN mvns r3, r2, asr #21 bne 5f @ simple overflow orrs r3, xl, xh, lsl #12 do_it ne, tt movne r0, #0x7f000000 orrne r0, r0, #0x00c00000 RETc(ne) @ return NAN 5: @ return INF with sign and r0, xh, #0x80000000 orr r0, r0, #0x7f000000 orr r0, r0, #0x00800000 RET CFI_END_FUNCTION FUNC_END aeabi_d2f FUNC_END truncdfsf2 #endif /* L_truncdfsf2 */
4ms/metamodule-plugin-sdk
11,019
plugin-libc/libgcc/config/arm/libunwind.S
/* Support functions for the unwinder. Copyright (C) 2003-2022 Free Software Foundation, Inc. Contributed by Paul Brook This file is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 3, or (at your option) any later version. This file is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. Under Section 7 of GPL version 3, you are granted additional permissions described in the GCC Runtime Library Exception, version 3.1, as published by the Free Software Foundation. You should have received a copy of the GNU General Public License and a copy of the GCC Runtime Library Exception along with this program; see the files COPYING3 and COPYING.RUNTIME respectively. If not, see <http://www.gnu.org/licenses/>. */ /* An executable stack is *not* required for these functions. */ #if defined(__ELF__) && defined(__linux__) .section .note.GNU-stack,"",%progbits .previous #endif #ifdef __ARM_EABI__ /* Some attributes that are common to all routines in this file. */ /* Tag_ABI_align_needed: This code does not require 8-byte alignment from the caller. */ /* .eabi_attribute 24, 0 -- default setting. */ /* Tag_ABI_align_preserved: This code preserves 8-byte alignment in any callee. */ .eabi_attribute 25, 1 #endif /* __ARM_EABI__ */ #ifndef __symbian__ #include "lib1funcs.S" .macro UNPREFIX name .global SYM (\name) EQUIV SYM (\name), SYM (__\name) .endm #if (__ARM_ARCH == 4) /* Some coprocessors require armv5t. We know this code will never be run on other cpus. Tell gas to allow armv5t, but only mark the objects as armv4. */ .arch armv5t #ifdef __ARM_ARCH_4T__ .object_arch armv4t #else .object_arch armv4 #endif #endif #if !__ARM_ARCH_ISA_ARM && __ARM_ARCH_ISA_THUMB == 1 /* r0 points to a 16-word block. Upload these values to the actual core state. */ FUNC_START restore_core_regs movs r1, r0 adds r1, r1, #52 ldmia r1!, {r3, r4, r5} subs r3, r3, #4 mov ip, r3 str r5, [r3] mov lr, r4 /* Restore r8-r11. */ movs r1, r0 adds r1, r1, #32 ldmia r1!, {r2, r3, r4, r5} mov r8, r2 mov r9, r3 mov sl, r4 mov fp, r5 movs r1, r0 adds r1, r1, #8 ldmia r1!, {r2, r3, r4, r5, r6, r7} ldr r1, [r0, #4] ldr r0, [r0] mov sp, ip pop {pc} FUNC_END restore_core_regs UNPREFIX restore_core_regs /* ARMV6M does not have coprocessors, so these should never be used. */ FUNC_START gnu_Unwind_Restore_VFP RET /* Store VFR regsters d0-d15 to the address in r0. */ FUNC_START gnu_Unwind_Save_VFP RET /* Load VFP registers d0-d15 from the address in r0. Use this to load from FSTMD format. */ FUNC_START gnu_Unwind_Restore_VFP_D RET /* Store VFP registers d0-d15 to the address in r0. Use this to store in FLDMD format. */ FUNC_START gnu_Unwind_Save_VFP_D RET /* Load VFP registers d16-d31 from the address in r0. Use this to load from FSTMD (=VSTM) format. Needs VFPv3. */ FUNC_START gnu_Unwind_Restore_VFP_D_16_to_31 RET /* Store VFP registers d16-d31 to the address in r0. Use this to store in FLDMD (=VLDM) format. Needs VFPv3. */ FUNC_START gnu_Unwind_Save_VFP_D_16_to_31 RET FUNC_START gnu_Unwind_Restore_WMMXD RET FUNC_START gnu_Unwind_Save_WMMXD RET FUNC_START gnu_Unwind_Restore_WMMXC RET FUNC_START gnu_Unwind_Save_WMMXC RET .macro UNWIND_WRAPPER name nargs FUNC_START \name /* Create a phase2_vrs structure. */ /* Save r0 in the PC slot so we can use it as a scratch register. */ push {r0} add r0, sp, #4 push {r0, lr} /* Push original SP and LR. */ /* Make space for r8-r12. */ sub sp, sp, #20 /* Save low registers. */ push {r0, r1, r2, r3, r4, r5, r6, r7} /* Save high registers. */ add r0, sp, #32 mov r1, r8 mov r2, r9 mov r3, sl mov r4, fp mov r5, ip stmia r0!, {r1, r2, r3, r4, r5} /* Restore original low register values. */ add r0, sp, #4 ldmia r0!, {r1, r2, r3, r4, r5} /* Restore orginial r0. */ ldr r0, [sp, #60] str r0, [sp] /* Demand-save flags, plus an extra word for alignment. */ movs r3, #0 push {r2, r3} /* Point r1 at the block. Pass r[0..nargs) unchanged. */ add r\nargs, sp, #4 bl SYM (__gnu\name) ldr r3, [sp, #64] add sp, sp, #72 bx r3 FUNC_END \name UNPREFIX \name .endm #else /* __ARM_ARCH_ISA_ARM || __ARM_ARCH_ISA_THUMB != 1 */ /* r0 points to a 16-word block. Upload these values to the actual core state. */ ARM_FUNC_START restore_core_regs /* We must use sp as the base register when restoring sp. Push the last 3 registers onto the top of the current stack to achieve this. */ add r1, r0, #52 ldmia r1, {r3, r4, r5} /* {sp, lr, pc}. */ #if defined(__thumb2__) /* Thumb-2 doesn't allow sp in a load-multiple instruction, so push the target address onto the target stack. This is safe as we're always returning to somewhere further up the call stack. */ mov ip, r3 mov lr, r4 str r5, [ip, #-4]! #elif defined(__INTERWORKING__) /* Restore pc into ip. */ mov r2, r5 stmfd sp!, {r2, r3, r4} #else stmfd sp!, {r3, r4, r5} #endif /* Don't bother restoring ip. */ ldmia r0, {r0, r1, r2, r3, r4, r5, r6, r7, r8, r9, sl, fp} #if defined(__thumb2__) /* Pop the return address off the target stack. */ mov sp, ip pop {pc} #elif defined(__INTERWORKING__) /* Pop the three registers we pushed earlier. */ ldmfd sp, {ip, sp, lr} bx ip #else ldmfd sp, {sp, lr, pc} #endif FUNC_END restore_core_regs UNPREFIX restore_core_regs /* Load VFP registers d0-d15 from the address in r0. Use this to load from FSTMX format. */ ARM_FUNC_START gnu_Unwind_Restore_VFP /* Use the generic coprocessor form so that gas doesn't complain on soft-float targets. */ ldc p11,cr0,[r0],{0x21} /* fldmiax r0, {d0-d15} */ RET /* Store VFP registers d0-d15 to the address in r0. Use this to store in FSTMX format. */ ARM_FUNC_START gnu_Unwind_Save_VFP /* Use the generic coprocessor form so that gas doesn't complain on soft-float targets. */ stc p11,cr0,[r0],{0x21} /* fstmiax r0, {d0-d15} */ RET /* Load VFP registers d0-d15 from the address in r0. Use this to load from FSTMD format. */ ARM_FUNC_START gnu_Unwind_Restore_VFP_D ldc p11,cr0,[r0],{0x20} /* fldmiad r0, {d0-d15} */ RET /* Store VFP registers d0-d15 to the address in r0. Use this to store in FLDMD format. */ ARM_FUNC_START gnu_Unwind_Save_VFP_D stc p11,cr0,[r0],{0x20} /* fstmiad r0, {d0-d15} */ RET /* Load VFP registers d16-d31 from the address in r0. Use this to load from FSTMD (=VSTM) format. Needs VFPv3. */ ARM_FUNC_START gnu_Unwind_Restore_VFP_D_16_to_31 ldcl p11,cr0,[r0],{0x20} /* vldm r0, {d16-d31} */ RET /* Store VFP registers d16-d31 to the address in r0. Use this to store in FLDMD (=VLDM) format. Needs VFPv3. */ ARM_FUNC_START gnu_Unwind_Save_VFP_D_16_to_31 stcl p11,cr0,[r0],{0x20} /* vstm r0, {d16-d31} */ RET ARM_FUNC_START gnu_Unwind_Restore_WMMXD /* Use the generic coprocessor form so that gas doesn't complain on non-iWMMXt targets. */ ldcl p1, cr0, [r0], #8 /* wldrd wr0, [r0], #8 */ ldcl p1, cr1, [r0], #8 /* wldrd wr1, [r0], #8 */ ldcl p1, cr2, [r0], #8 /* wldrd wr2, [r0], #8 */ ldcl p1, cr3, [r0], #8 /* wldrd wr3, [r0], #8 */ ldcl p1, cr4, [r0], #8 /* wldrd wr4, [r0], #8 */ ldcl p1, cr5, [r0], #8 /* wldrd wr5, [r0], #8 */ ldcl p1, cr6, [r0], #8 /* wldrd wr6, [r0], #8 */ ldcl p1, cr7, [r0], #8 /* wldrd wr7, [r0], #8 */ ldcl p1, cr8, [r0], #8 /* wldrd wr8, [r0], #8 */ ldcl p1, cr9, [r0], #8 /* wldrd wr9, [r0], #8 */ ldcl p1, cr10, [r0], #8 /* wldrd wr10, [r0], #8 */ ldcl p1, cr11, [r0], #8 /* wldrd wr11, [r0], #8 */ ldcl p1, cr12, [r0], #8 /* wldrd wr12, [r0], #8 */ ldcl p1, cr13, [r0], #8 /* wldrd wr13, [r0], #8 */ ldcl p1, cr14, [r0], #8 /* wldrd wr14, [r0], #8 */ ldcl p1, cr15, [r0], #8 /* wldrd wr15, [r0], #8 */ RET ARM_FUNC_START gnu_Unwind_Save_WMMXD /* Use the generic coprocessor form so that gas doesn't complain on non-iWMMXt targets. */ stcl p1, cr0, [r0], #8 /* wstrd wr0, [r0], #8 */ stcl p1, cr1, [r0], #8 /* wstrd wr1, [r0], #8 */ stcl p1, cr2, [r0], #8 /* wstrd wr2, [r0], #8 */ stcl p1, cr3, [r0], #8 /* wstrd wr3, [r0], #8 */ stcl p1, cr4, [r0], #8 /* wstrd wr4, [r0], #8 */ stcl p1, cr5, [r0], #8 /* wstrd wr5, [r0], #8 */ stcl p1, cr6, [r0], #8 /* wstrd wr6, [r0], #8 */ stcl p1, cr7, [r0], #8 /* wstrd wr7, [r0], #8 */ stcl p1, cr8, [r0], #8 /* wstrd wr8, [r0], #8 */ stcl p1, cr9, [r0], #8 /* wstrd wr9, [r0], #8 */ stcl p1, cr10, [r0], #8 /* wstrd wr10, [r0], #8 */ stcl p1, cr11, [r0], #8 /* wstrd wr11, [r0], #8 */ stcl p1, cr12, [r0], #8 /* wstrd wr12, [r0], #8 */ stcl p1, cr13, [r0], #8 /* wstrd wr13, [r0], #8 */ stcl p1, cr14, [r0], #8 /* wstrd wr14, [r0], #8 */ stcl p1, cr15, [r0], #8 /* wstrd wr15, [r0], #8 */ RET ARM_FUNC_START gnu_Unwind_Restore_WMMXC /* Use the generic coprocessor form so that gas doesn't complain on non-iWMMXt targets. */ ldc2 p1, cr8, [r0], #4 /* wldrw wcgr0, [r0], #4 */ ldc2 p1, cr9, [r0], #4 /* wldrw wcgr1, [r0], #4 */ ldc2 p1, cr10, [r0], #4 /* wldrw wcgr2, [r0], #4 */ ldc2 p1, cr11, [r0], #4 /* wldrw wcgr3, [r0], #4 */ RET ARM_FUNC_START gnu_Unwind_Save_WMMXC /* Use the generic coprocessor form so that gas doesn't complain on non-iWMMXt targets. */ stc2 p1, cr8, [r0], #4 /* wstrw wcgr0, [r0], #4 */ stc2 p1, cr9, [r0], #4 /* wstrw wcgr1, [r0], #4 */ stc2 p1, cr10, [r0], #4 /* wstrw wcgr2, [r0], #4 */ stc2 p1, cr11, [r0], #4 /* wstrw wcgr3, [r0], #4 */ RET /* Wrappers to save core registers, then call the real routine. */ .macro UNWIND_WRAPPER name nargs ARM_FUNC_START \name /* Create a phase2_vrs structure. */ /* Split reg push in two to ensure the correct value for sp. */ #if defined(__thumb2__) mov ip, sp push {lr} /* PC is ignored. */ push {ip, lr} /* Push original SP and LR. */ #else stmfd sp!, {sp, lr, pc} #endif stmfd sp!, {r0, r1, r2, r3, r4, r5, r6, r7, r8, r9, sl, fp, ip} /* Demand-save flags, plus an extra word for alignment. */ mov r3, #0 stmfd sp!, {r2, r3} /* Point r1 at the block. Pass r[0..nargs) unchanged. */ add r\nargs, sp, #4 #if defined(__thumb__) && !defined(__thumb2__) /* Switch back to thumb mode to avoid interworking hassle. */ adr ip, .L1_\name orr ip, ip, #1 bx ip .thumb .L1_\name: bl SYM (__gnu\name) __PLT__ ldr r3, [sp, #64] add sp, #72 bx r3 #else bl SYM (__gnu\name) __PLT__ ldr lr, [sp, #64] add sp, sp, #72 RET #endif FUNC_END \name UNPREFIX \name .endm #endif /* __ARM_ARCH_ISA_ARM || __ARM_ARCH_ISA_THUMB != 1 */ UNWIND_WRAPPER _Unwind_RaiseException 1 UNWIND_WRAPPER _Unwind_Resume 1 UNWIND_WRAPPER _Unwind_Resume_or_Rethrow 1 UNWIND_WRAPPER _Unwind_ForcedUnwind 3 UNWIND_WRAPPER _Unwind_Backtrace 2 #endif /* ndef __symbian__ */
4ms/metamodule-plugin-sdk
46,938
plugin-libc/libgcc/config/arm/lib1funcs.S
@ libgcc routines for ARM cpu. @ Division routines, written by Richard Earnshaw, (rearnsha@armltd.co.uk) /* Copyright (C) 1995-2022 Free Software Foundation, Inc. This file is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 3, or (at your option) any later version. This file is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. Under Section 7 of GPL version 3, you are granted additional permissions described in the GCC Runtime Library Exception, version 3.1, as published by the Free Software Foundation. You should have received a copy of the GNU General Public License and a copy of the GCC Runtime Library Exception along with this program; see the files COPYING3 and COPYING.RUNTIME respectively. If not, see <http://www.gnu.org/licenses/>. */ /* Everything in this file should now use unified syntax. */ .syntax unified /* An executable stack is *not* required for these functions. */ #if defined(__ELF__) && defined(__linux__) .section .note.GNU-stack,"",%progbits .previous #endif /* __ELF__ and __linux__ */ #ifdef __ARM_EABI__ /* Some attributes that are common to all routines in this file. */ /* Tag_ABI_align_needed: This code does not require 8-byte alignment from the caller. */ /* .eabi_attribute 24, 0 -- default setting. */ /* Tag_ABI_align_preserved: This code preserves 8-byte alignment in any callee. */ .eabi_attribute 25, 1 #endif /* __ARM_EABI__ */ /* ------------------------------------------------------------------------ */ /* We need to know what prefix to add to function names. */ #ifndef __USER_LABEL_PREFIX__ #error __USER_LABEL_PREFIX__ not defined #endif /* ANSI concatenation macros. */ #define CONCAT1(a, b) CONCAT2(a, b) #define CONCAT2(a, b) a ## b /* Use the right prefix for global labels. */ #define SYM(x) CONCAT1 (__USER_LABEL_PREFIX__, x) #ifdef __ELF__ #ifdef __thumb__ #define __PLT__ /* Not supported in Thumb assembler (for now). */ #elif defined __vxworks && !defined __PIC__ #define __PLT__ /* Not supported by the kernel loader. */ #else #define __PLT__ (PLT) #endif #define TYPE(x) .type SYM(x),function #define SIZE(x) .size SYM(x), . - SYM(x) #define LSYM(x) .x #else #define __PLT__ #define TYPE(x) #define SIZE(x) #define LSYM(x) x #endif /* Function end macros. Variants for interworking. */ /* There are times when we might prefer Thumb1 code even if ARM code is permitted, for example, the code might be smaller, or there might be interworking problems with switching to ARM state if interworking is disabled. */ #if (defined(__thumb__) \ && !defined(__thumb2__) \ && (!defined(__THUMB_INTERWORK__) \ || defined (__OPTIMIZE_SIZE__) \ || !__ARM_ARCH_ISA_ARM)) # define __prefer_thumb__ #endif #if !__ARM_ARCH_ISA_ARM && __ARM_ARCH_ISA_THUMB == 1 #define NOT_ISA_TARGET_32BIT 1 #endif /* How to return from a function call depends on the architecture variant. */ #if (__ARM_ARCH > 4) || defined(__ARM_ARCH_4T__) # define RET bx lr # define RETc(x) bx##x lr /* Special precautions for interworking on armv4t. */ # if (__ARM_ARCH == 4) /* Always use bx, not ldr pc. */ # if (defined(__thumb__) || defined(__THUMB_INTERWORK__)) # define __INTERWORKING__ # endif /* __THUMB__ || __THUMB_INTERWORK__ */ /* Include thumb stub before arm mode code. */ # if defined(__thumb__) && !defined(__THUMB_INTERWORK__) # define __INTERWORKING_STUBS__ # endif /* __thumb__ && !__THUMB_INTERWORK__ */ #endif /* __ARM_ARCH == 4 */ #else # define RET mov pc, lr # define RETc(x) mov##x pc, lr #endif .macro cfi_pop advance, reg, cfa_offset #ifdef __ELF__ .pushsection .debug_frame .byte 0x4 /* DW_CFA_advance_loc4 */ .4byte \advance .byte (0xc0 | \reg) /* DW_CFA_restore */ .byte 0xe /* DW_CFA_def_cfa_offset */ .uleb128 \cfa_offset .popsection #endif .endm .macro cfi_push advance, reg, offset, cfa_offset #ifdef __ELF__ .pushsection .debug_frame .byte 0x4 /* DW_CFA_advance_loc4 */ .4byte \advance .byte (0x80 | \reg) /* DW_CFA_offset */ .uleb128 (\offset / -4) .byte 0xe /* DW_CFA_def_cfa_offset */ .uleb128 \cfa_offset .popsection #endif .endm .macro cfi_start start_label, end_label #ifdef __ELF__ .pushsection .debug_frame LSYM(Lstart_frame): .4byte LSYM(Lend_cie) - LSYM(Lstart_cie) @ Length of CIE LSYM(Lstart_cie): .4byte 0xffffffff @ CIE Identifier Tag .byte 0x1 @ CIE Version .ascii "\0" @ CIE Augmentation .uleb128 0x1 @ CIE Code Alignment Factor .sleb128 -4 @ CIE Data Alignment Factor .byte 0xe @ CIE RA Column .byte 0xc @ DW_CFA_def_cfa .uleb128 0xd .uleb128 0x0 .align 2 LSYM(Lend_cie): .4byte LSYM(Lend_fde)-LSYM(Lstart_fde) @ FDE Length LSYM(Lstart_fde): .4byte LSYM(Lstart_frame) @ FDE CIE offset .4byte \start_label @ FDE initial location .4byte \end_label-\start_label @ FDE address range .popsection #endif .endm .macro cfi_end end_label #ifdef __ELF__ .pushsection .debug_frame .align 2 LSYM(Lend_fde): .popsection \end_label: #endif .endm /* Don't pass dirn, it's there just to get token pasting right. */ .macro RETLDM regs=, cond=, unwind=, dirn=ia #if defined (__INTERWORKING__) .ifc "\regs","" ldr\cond lr, [sp], #8 .else # if defined(__thumb2__) pop\cond {\regs, lr} # else ldm\cond\dirn sp!, {\regs, lr} # endif .endif .ifnc "\unwind", "" /* Mark LR as restored. */ 97: cfi_pop 97b - \unwind, 0xe, 0x0 .endif bx\cond lr #else /* Caller is responsible for providing IT instruction. */ .ifc "\regs","" ldr\cond pc, [sp], #8 .else # if defined(__thumb2__) pop\cond {\regs, pc} # else ldm\cond\dirn sp!, {\regs, pc} # endif .endif #endif .endm /* The Unified assembly syntax allows the same code to be assembled for both ARM and Thumb-2. However this is only supported by recent gas, so define a set of macros to allow ARM code on older assemblers. */ #if defined(__thumb2__) .macro do_it cond, suffix="" it\suffix \cond .endm .macro shift1 op, arg0, arg1, arg2 \op \arg0, \arg1, \arg2 .endm #define do_push push #define do_pop pop /* Perform an arithmetic operation with a variable shift operand. This requires two instructions and a scratch register on Thumb-2. */ .macro shiftop name, dest, src1, src2, shiftop, shiftreg, tmp \shiftop \tmp, \src2, \shiftreg \name \dest, \src1, \tmp .endm #else .macro do_it cond, suffix="" .endm .macro shift1 op, arg0, arg1, arg2 mov \arg0, \arg1, \op \arg2 .endm #define do_push stmfd sp!, #define do_pop ldmfd sp!, .macro shiftop name, dest, src1, src2, shiftop, shiftreg, tmp \name \dest, \src1, \src2, \shiftop \shiftreg .endm #endif #define COND(op1, op2, cond) op1 ## op2 ## cond #ifdef __ARM_EABI__ .macro ARM_LDIV0 name signed cmp r0, #0 .ifc \signed, unsigned movne r0, #0xffffffff .else movgt r0, #0x7fffffff movlt r0, #0x80000000 .endif b SYM (__aeabi_idiv0) __PLT__ .endm #else .macro ARM_LDIV0 name signed str lr, [sp, #-8]! 98: cfi_push 98b - __\name, 0xe, -0x8, 0x8 bl SYM (__div0) __PLT__ mov r0, #0 @ About as wrong as it could be. RETLDM unwind=98b .endm #endif #ifdef __ARM_EABI__ .macro THUMB_LDIV0 name signed #ifdef NOT_ISA_TARGET_32BIT push {r0, lr} movs r0, #0 bl SYM(__aeabi_idiv0) @ We know we are not on armv4t, so pop pc is safe. pop {r1, pc} #elif defined(__thumb2__) .syntax unified .ifc \signed, unsigned cbz r0, 1f mov r0, #0xffffffff 1: .else cmp r0, #0 do_it gt movgt r0, #0x7fffffff do_it lt movlt r0, #0x80000000 .endif b.w SYM(__aeabi_idiv0) __PLT__ #else .align 2 bx pc nop .arm cmp r0, #0 .ifc \signed, unsigned movne r0, #0xffffffff .else movgt r0, #0x7fffffff movlt r0, #0x80000000 .endif b SYM(__aeabi_idiv0) __PLT__ .thumb #endif .endm #else .macro THUMB_LDIV0 name signed push { r1, lr } 98: cfi_push 98b - __\name, 0xe, -0x4, 0x8 bl SYM (__div0) movs r0, #0 @ About as wrong as it could be. #if defined (__INTERWORKING__) pop { r1, r2 } bx r2 #else pop { r1, pc } #endif .endm #endif .macro FUNC_END name SIZE (__\name) .endm .macro DIV_FUNC_END name signed cfi_start __\name, LSYM(Lend_div0) LSYM(Ldiv0): #ifdef __thumb__ THUMB_LDIV0 \name \signed #else ARM_LDIV0 \name \signed #endif cfi_end LSYM(Lend_div0) FUNC_END \name .endm .macro THUMB_FUNC_START name .globl SYM (\name) TYPE (\name) .thumb_func SYM (\name): .endm /* Function start macros. Variants for ARM and Thumb. */ #ifdef __thumb__ #define THUMB_FUNC .thumb_func #define THUMB_CODE .force_thumb # if defined(__thumb2__) #define THUMB_SYNTAX # else #define THUMB_SYNTAX # endif #else #define THUMB_FUNC #define THUMB_CODE #define THUMB_SYNTAX #endif .macro FUNC_START name .text .globl SYM (__\name) TYPE (__\name) .align 0 THUMB_CODE THUMB_FUNC THUMB_SYNTAX SYM (__\name): .endm .macro ARM_SYM_START name TYPE (\name) .align 0 SYM (\name): .endm .macro SYM_END name SIZE (\name) .endm /* Special function that will always be coded in ARM assembly, even if in Thumb-only compilation. */ #if defined(__thumb2__) /* For Thumb-2 we build everything in thumb mode. */ .macro ARM_FUNC_START name FUNC_START \name .syntax unified .endm #define EQUIV .thumb_set .macro ARM_CALL name bl __\name .endm #elif defined(__INTERWORKING_STUBS__) .macro ARM_FUNC_START name FUNC_START \name bx pc nop .arm /* A hook to tell gdb that we've switched to ARM mode. Also used to call directly from other local arm routines. */ _L__\name: .endm #define EQUIV .thumb_set /* Branch directly to a function declared with ARM_FUNC_START. Must be called in arm mode. */ .macro ARM_CALL name bl _L__\name .endm #else /* !(__INTERWORKING_STUBS__ || __thumb2__) */ #ifdef NOT_ISA_TARGET_32BIT #define EQUIV .thumb_set #else .macro ARM_FUNC_START name .text .globl SYM (__\name) TYPE (__\name) .align 0 .arm SYM (__\name): .endm #define EQUIV .set .macro ARM_CALL name bl __\name .endm #endif #endif .macro FUNC_ALIAS new old .globl SYM (__\new) #if defined (__thumb__) .thumb_set SYM (__\new), SYM (__\old) #else .set SYM (__\new), SYM (__\old) #endif .endm #ifndef NOT_ISA_TARGET_32BIT .macro ARM_FUNC_ALIAS new old .globl SYM (__\new) EQUIV SYM (__\new), SYM (__\old) #if defined(__INTERWORKING_STUBS__) .set SYM (_L__\new), SYM (_L__\old) #endif .endm #endif #ifdef __ARMEB__ #define xxh r0 #define xxl r1 #define yyh r2 #define yyl r3 #else #define xxh r1 #define xxl r0 #define yyh r3 #define yyl r2 #endif #ifdef __ARM_EABI__ .macro WEAK name .weak SYM (__\name) .endm #endif #ifdef __thumb__ /* Register aliases. */ work .req r4 @ XXXX is this safe ? dividend .req r0 divisor .req r1 overdone .req r2 result .req r2 curbit .req r3 #endif #if 0 ip .req r12 sp .req r13 lr .req r14 pc .req r15 #endif /* ------------------------------------------------------------------------ */ /* Bodies of the division and modulo routines. */ /* ------------------------------------------------------------------------ */ .macro ARM_DIV_BODY dividend, divisor, result, curbit #if defined (__ARM_FEATURE_CLZ) && ! defined (__OPTIMIZE_SIZE__) #if defined (__thumb2__) clz \curbit, \dividend clz \result, \divisor sub \curbit, \result, \curbit rsb \curbit, \curbit, #31 adr \result, 1f add \curbit, \result, \curbit, lsl #4 mov \result, #0 mov pc, \curbit .p2align 3 1: .set shift, 32 .rept 32 .set shift, shift - 1 cmp.w \dividend, \divisor, lsl #shift nop.n adc.w \result, \result, \result it cs subcs.w \dividend, \dividend, \divisor, lsl #shift .endr #else clz \curbit, \dividend clz \result, \divisor sub \curbit, \result, \curbit rsbs \curbit, \curbit, #31 addne \curbit, \curbit, \curbit, lsl #1 mov \result, #0 addne pc, pc, \curbit, lsl #2 nop .set shift, 32 .rept 32 .set shift, shift - 1 cmp \dividend, \divisor, lsl #shift adc \result, \result, \result subcs \dividend, \dividend, \divisor, lsl #shift .endr #endif #else /* !defined (__ARM_FEATURE_CLZ) || defined (__OPTIMIZE_SIZE__) */ #if defined (__ARM_FEATURE_CLZ) clz \curbit, \divisor clz \result, \dividend sub \result, \curbit, \result mov \curbit, #1 mov \divisor, \divisor, lsl \result mov \curbit, \curbit, lsl \result mov \result, #0 #else /* !defined (__ARM_FEATURE_CLZ) */ @ Initially shift the divisor left 3 bits if possible, @ set curbit accordingly. This allows for curbit to be located @ at the left end of each 4-bit nibbles in the division loop @ to save one loop in most cases. tst \divisor, #0xe0000000 moveq \divisor, \divisor, lsl #3 moveq \curbit, #8 movne \curbit, #1 @ Unless the divisor is very big, shift it up in multiples of @ four bits, since this is the amount of unwinding in the main @ division loop. Continue shifting until the divisor is @ larger than the dividend. 1: cmp \divisor, #0x10000000 cmplo \divisor, \dividend movlo \divisor, \divisor, lsl #4 movlo \curbit, \curbit, lsl #4 blo 1b @ For very big divisors, we must shift it a bit at a time, or @ we will be in danger of overflowing. 1: cmp \divisor, #0x80000000 cmplo \divisor, \dividend movlo \divisor, \divisor, lsl #1 movlo \curbit, \curbit, lsl #1 blo 1b mov \result, #0 #endif /* !defined (__ARM_FEATURE_CLZ) */ @ Division loop 1: cmp \dividend, \divisor do_it hs, t subhs \dividend, \dividend, \divisor orrhs \result, \result, \curbit cmp \dividend, \divisor, lsr #1 do_it hs, t subhs \dividend, \dividend, \divisor, lsr #1 orrhs \result, \result, \curbit, lsr #1 cmp \dividend, \divisor, lsr #2 do_it hs, t subhs \dividend, \dividend, \divisor, lsr #2 orrhs \result, \result, \curbit, lsr #2 cmp \dividend, \divisor, lsr #3 do_it hs, t subhs \dividend, \dividend, \divisor, lsr #3 orrhs \result, \result, \curbit, lsr #3 cmp \dividend, #0 @ Early termination? do_it ne, t movnes \curbit, \curbit, lsr #4 @ No, any more bits to do? movne \divisor, \divisor, lsr #4 bne 1b #endif /* !defined (__ARM_FEATURE_CLZ) || defined (__OPTIMIZE_SIZE__) */ .endm /* ------------------------------------------------------------------------ */ .macro ARM_DIV2_ORDER divisor, order #if defined (__ARM_FEATURE_CLZ) clz \order, \divisor rsb \order, \order, #31 #else cmp \divisor, #(1 << 16) movhs \divisor, \divisor, lsr #16 movhs \order, #16 movlo \order, #0 cmp \divisor, #(1 << 8) movhs \divisor, \divisor, lsr #8 addhs \order, \order, #8 cmp \divisor, #(1 << 4) movhs \divisor, \divisor, lsr #4 addhs \order, \order, #4 cmp \divisor, #(1 << 2) addhi \order, \order, #3 addls \order, \order, \divisor, lsr #1 #endif .endm /* ------------------------------------------------------------------------ */ .macro ARM_MOD_BODY dividend, divisor, order, spare #if defined(__ARM_FEATURE_CLZ) && ! defined (__OPTIMIZE_SIZE__) clz \order, \divisor clz \spare, \dividend sub \order, \order, \spare rsbs \order, \order, #31 addne pc, pc, \order, lsl #3 nop .set shift, 32 .rept 32 .set shift, shift - 1 cmp \dividend, \divisor, lsl #shift subcs \dividend, \dividend, \divisor, lsl #shift .endr #else /* !defined (__ARM_FEATURE_CLZ) || defined (__OPTIMIZE_SIZE__) */ #if defined (__ARM_FEATURE_CLZ) clz \order, \divisor clz \spare, \dividend sub \order, \order, \spare mov \divisor, \divisor, lsl \order #else /* !defined (__ARM_FEATURE_CLZ) */ mov \order, #0 @ Unless the divisor is very big, shift it up in multiples of @ four bits, since this is the amount of unwinding in the main @ division loop. Continue shifting until the divisor is @ larger than the dividend. 1: cmp \divisor, #0x10000000 cmplo \divisor, \dividend movlo \divisor, \divisor, lsl #4 addlo \order, \order, #4 blo 1b @ For very big divisors, we must shift it a bit at a time, or @ we will be in danger of overflowing. 1: cmp \divisor, #0x80000000 cmplo \divisor, \dividend movlo \divisor, \divisor, lsl #1 addlo \order, \order, #1 blo 1b #endif /* !defined (__ARM_FEATURE_CLZ) */ @ Perform all needed substractions to keep only the reminder. @ Do comparisons in batch of 4 first. subs \order, \order, #3 @ yes, 3 is intended here blt 2f 1: cmp \dividend, \divisor subhs \dividend, \dividend, \divisor cmp \dividend, \divisor, lsr #1 subhs \dividend, \dividend, \divisor, lsr #1 cmp \dividend, \divisor, lsr #2 subhs \dividend, \dividend, \divisor, lsr #2 cmp \dividend, \divisor, lsr #3 subhs \dividend, \dividend, \divisor, lsr #3 cmp \dividend, #1 mov \divisor, \divisor, lsr #4 subges \order, \order, #4 bge 1b tst \order, #3 teqne \dividend, #0 beq 5f @ Either 1, 2 or 3 comparison/substractions are left. 2: cmn \order, #2 blt 4f beq 3f cmp \dividend, \divisor subhs \dividend, \dividend, \divisor mov \divisor, \divisor, lsr #1 3: cmp \dividend, \divisor subhs \dividend, \dividend, \divisor mov \divisor, \divisor, lsr #1 4: cmp \dividend, \divisor subhs \dividend, \dividend, \divisor 5: #endif /* !defined (__ARM_FEATURE_CLZ) || defined (__OPTIMIZE_SIZE__) */ .endm /* ------------------------------------------------------------------------ */ .macro THUMB_DIV_MOD_BODY modulo @ Load the constant 0x10000000 into our work register. movs work, #1 lsls work, #28 LSYM(Loop1): @ Unless the divisor is very big, shift it up in multiples of @ four bits, since this is the amount of unwinding in the main @ division loop. Continue shifting until the divisor is @ larger than the dividend. cmp divisor, work bhs LSYM(Lbignum) cmp divisor, dividend bhs LSYM(Lbignum) lsls divisor, #4 lsls curbit, #4 b LSYM(Loop1) LSYM(Lbignum): @ Set work to 0x80000000 lsls work, #3 LSYM(Loop2): @ For very big divisors, we must shift it a bit at a time, or @ we will be in danger of overflowing. cmp divisor, work bhs LSYM(Loop3) cmp divisor, dividend bhs LSYM(Loop3) lsls divisor, #1 lsls curbit, #1 b LSYM(Loop2) LSYM(Loop3): @ Test for possible subtractions ... .if \modulo @ ... On the final pass, this may subtract too much from the dividend, @ so keep track of which subtractions are done, we can fix them up @ afterwards. movs overdone, #0 cmp dividend, divisor blo LSYM(Lover1) subs dividend, dividend, divisor LSYM(Lover1): lsrs work, divisor, #1 cmp dividend, work blo LSYM(Lover2) subs dividend, dividend, work mov ip, curbit movs work, #1 rors curbit, work orrs overdone, curbit mov curbit, ip LSYM(Lover2): lsrs work, divisor, #2 cmp dividend, work blo LSYM(Lover3) subs dividend, dividend, work mov ip, curbit movs work, #2 rors curbit, work orrs overdone, curbit mov curbit, ip LSYM(Lover3): lsrs work, divisor, #3 cmp dividend, work blo LSYM(Lover4) subs dividend, dividend, work mov ip, curbit movs work, #3 rors curbit, work orrs overdone, curbit mov curbit, ip LSYM(Lover4): mov ip, curbit .else @ ... and note which bits are done in the result. On the final pass, @ this may subtract too much from the dividend, but the result will be ok, @ since the "bit" will have been shifted out at the bottom. cmp dividend, divisor blo LSYM(Lover1) subs dividend, dividend, divisor orrs result, result, curbit LSYM(Lover1): lsrs work, divisor, #1 cmp dividend, work blo LSYM(Lover2) subs dividend, dividend, work lsrs work, curbit, #1 orrs result, work LSYM(Lover2): lsrs work, divisor, #2 cmp dividend, work blo LSYM(Lover3) subs dividend, dividend, work lsrs work, curbit, #2 orrs result, work LSYM(Lover3): lsrs work, divisor, #3 cmp dividend, work blo LSYM(Lover4) subs dividend, dividend, work lsrs work, curbit, #3 orrs result, work LSYM(Lover4): .endif cmp dividend, #0 @ Early termination? beq LSYM(Lover5) lsrs curbit, #4 @ No, any more bits to do? beq LSYM(Lover5) lsrs divisor, #4 b LSYM(Loop3) LSYM(Lover5): .if \modulo @ Any subtractions that we should not have done will be recorded in @ the top three bits of "overdone". Exactly which were not needed @ are governed by the position of the bit, stored in ip. movs work, #0xe lsls work, #28 ands overdone, work beq LSYM(Lgot_result) @ If we terminated early, because dividend became zero, then the @ bit in ip will not be in the bottom nibble, and we should not @ perform the additions below. We must test for this though @ (rather relying upon the TSTs to prevent the additions) since @ the bit in ip could be in the top two bits which might then match @ with one of the smaller RORs. mov curbit, ip movs work, #0x7 tst curbit, work beq LSYM(Lgot_result) mov curbit, ip movs work, #3 rors curbit, work tst overdone, curbit beq LSYM(Lover6) lsrs work, divisor, #3 adds dividend, work LSYM(Lover6): mov curbit, ip movs work, #2 rors curbit, work tst overdone, curbit beq LSYM(Lover7) lsrs work, divisor, #2 adds dividend, work LSYM(Lover7): mov curbit, ip movs work, #1 rors curbit, work tst overdone, curbit beq LSYM(Lgot_result) lsrs work, divisor, #1 adds dividend, work .endif LSYM(Lgot_result): .endm /* If performance is preferred, the following functions are provided. */ #if defined(__prefer_thumb__) && !defined(__OPTIMIZE_SIZE__) /* Branch to div(n), and jump to label if curbit is lo than divisior. */ .macro BranchToDiv n, label lsrs curbit, dividend, \n cmp curbit, divisor blo \label .endm /* Body of div(n). Shift the divisor in n bits and compare the divisor and dividend. Update the dividend as the substruction result. */ .macro DoDiv n lsrs curbit, dividend, \n cmp curbit, divisor bcc 1f lsls curbit, divisor, \n subs dividend, dividend, curbit 1: adcs result, result .endm /* The body of division with positive divisor. Unless the divisor is very big, shift it up in multiples of four bits, since this is the amount of unwinding in the main division loop. Continue shifting until the divisor is larger than the dividend. */ .macro THUMB1_Div_Positive movs result, #0 BranchToDiv #1, LSYM(Lthumb1_div1) BranchToDiv #4, LSYM(Lthumb1_div4) BranchToDiv #8, LSYM(Lthumb1_div8) BranchToDiv #12, LSYM(Lthumb1_div12) BranchToDiv #16, LSYM(Lthumb1_div16) LSYM(Lthumb1_div_large_positive): movs result, #0xff lsls divisor, divisor, #8 rev result, result lsrs curbit, dividend, #16 cmp curbit, divisor blo 1f asrs result, #8 lsls divisor, divisor, #8 beq LSYM(Ldivbyzero_waypoint) 1: lsrs curbit, dividend, #12 cmp curbit, divisor blo LSYM(Lthumb1_div12) b LSYM(Lthumb1_div16) LSYM(Lthumb1_div_loop): lsrs divisor, divisor, #8 LSYM(Lthumb1_div16): Dodiv #15 Dodiv #14 Dodiv #13 Dodiv #12 LSYM(Lthumb1_div12): Dodiv #11 Dodiv #10 Dodiv #9 Dodiv #8 bcs LSYM(Lthumb1_div_loop) LSYM(Lthumb1_div8): Dodiv #7 Dodiv #6 Dodiv #5 LSYM(Lthumb1_div5): Dodiv #4 LSYM(Lthumb1_div4): Dodiv #3 LSYM(Lthumb1_div3): Dodiv #2 LSYM(Lthumb1_div2): Dodiv #1 LSYM(Lthumb1_div1): subs divisor, dividend, divisor bcs 1f cpy divisor, dividend 1: adcs result, result cpy dividend, result RET LSYM(Ldivbyzero_waypoint): b LSYM(Ldiv0) .endm /* The body of division with negative divisor. Similar with THUMB1_Div_Positive except that the shift steps are in multiples of six bits. */ .macro THUMB1_Div_Negative lsrs result, divisor, #31 beq 1f negs divisor, divisor 1: asrs curbit, dividend, #32 bcc 2f negs dividend, dividend 2: eors curbit, result movs result, #0 cpy ip, curbit BranchToDiv #4, LSYM(Lthumb1_div_negative4) BranchToDiv #8, LSYM(Lthumb1_div_negative8) LSYM(Lthumb1_div_large): movs result, #0xfc lsls divisor, divisor, #6 rev result, result lsrs curbit, dividend, #8 cmp curbit, divisor blo LSYM(Lthumb1_div_negative8) lsls divisor, divisor, #6 asrs result, result, #6 cmp curbit, divisor blo LSYM(Lthumb1_div_negative8) lsls divisor, divisor, #6 asrs result, result, #6 cmp curbit, divisor blo LSYM(Lthumb1_div_negative8) lsls divisor, divisor, #6 beq LSYM(Ldivbyzero_negative) asrs result, result, #6 b LSYM(Lthumb1_div_negative8) LSYM(Lthumb1_div_negative_loop): lsrs divisor, divisor, #6 LSYM(Lthumb1_div_negative8): DoDiv #7 DoDiv #6 DoDiv #5 DoDiv #4 LSYM(Lthumb1_div_negative4): DoDiv #3 DoDiv #2 bcs LSYM(Lthumb1_div_negative_loop) DoDiv #1 subs divisor, dividend, divisor bcs 1f cpy divisor, dividend 1: cpy curbit, ip adcs result, result asrs curbit, curbit, #1 cpy dividend, result bcc 2f negs dividend, dividend cmp curbit, #0 2: bpl 3f negs divisor, divisor 3: RET LSYM(Ldivbyzero_negative): cpy curbit, ip asrs curbit, curbit, #1 bcc LSYM(Ldiv0) negs dividend, dividend .endm #endif /* ARM Thumb version. */ /* ------------------------------------------------------------------------ */ /* Start of the Real Functions */ /* ------------------------------------------------------------------------ */ #ifdef L_udivsi3 #if defined(__prefer_thumb__) FUNC_START udivsi3 FUNC_ALIAS aeabi_uidiv udivsi3 #if defined(__OPTIMIZE_SIZE__) cmp divisor, #0 beq LSYM(Ldiv0) LSYM(udivsi3_skip_div0_test): movs curbit, #1 movs result, #0 push { work } cmp dividend, divisor blo LSYM(Lgot_result) THUMB_DIV_MOD_BODY 0 movs r0, result pop { work } RET /* Implementation of aeabi_uidiv for ARMv6m. This version is only used in ARMv6-M when we need an efficient implementation. */ #else LSYM(udivsi3_skip_div0_test): THUMB1_Div_Positive #endif /* __OPTIMIZE_SIZE__ */ #elif defined(__ARM_ARCH_EXT_IDIV__) ARM_FUNC_START udivsi3 ARM_FUNC_ALIAS aeabi_uidiv udivsi3 cmp r1, #0 beq LSYM(Ldiv0) udiv r0, r0, r1 RET #else /* ARM version/Thumb-2. */ ARM_FUNC_START udivsi3 ARM_FUNC_ALIAS aeabi_uidiv udivsi3 /* Note: if called via udivsi3_skip_div0_test, this will unnecessarily check for division-by-zero a second time. */ LSYM(udivsi3_skip_div0_test): subs r2, r1, #1 do_it eq RETc(eq) bcc LSYM(Ldiv0) cmp r0, r1 bls 11f tst r1, r2 beq 12f ARM_DIV_BODY r0, r1, r2, r3 mov r0, r2 RET 11: do_it eq, e moveq r0, #1 movne r0, #0 RET 12: ARM_DIV2_ORDER r1, r2 mov r0, r0, lsr r2 RET #endif /* ARM version */ DIV_FUNC_END udivsi3 unsigned #if defined(__prefer_thumb__) FUNC_START aeabi_uidivmod cmp r1, #0 beq LSYM(Ldiv0) # if defined(__OPTIMIZE_SIZE__) push {r0, r1, lr} bl LSYM(udivsi3_skip_div0_test) POP {r1, r2, r3} muls r2, r0 subs r1, r1, r2 bx r3 # else /* Both the quotient and remainder are calculated simultaneously in THUMB1_Div_Positive. There is no need to calculate the remainder again here. */ b LSYM(udivsi3_skip_div0_test) RET # endif /* __OPTIMIZE_SIZE__ */ #elif defined(__ARM_ARCH_EXT_IDIV__) ARM_FUNC_START aeabi_uidivmod cmp r1, #0 beq LSYM(Ldiv0) mov r2, r0 udiv r0, r0, r1 mls r1, r0, r1, r2 RET #else ARM_FUNC_START aeabi_uidivmod cmp r1, #0 beq LSYM(Ldiv0) stmfd sp!, { r0, r1, lr } bl LSYM(udivsi3_skip_div0_test) ldmfd sp!, { r1, r2, lr } mul r3, r2, r0 sub r1, r1, r3 RET #endif FUNC_END aeabi_uidivmod #endif /* L_udivsi3 */ /* ------------------------------------------------------------------------ */ #ifdef L_umodsi3 #if defined(__ARM_ARCH_EXT_IDIV__) && __ARM_ARCH_ISA_THUMB != 1 ARM_FUNC_START umodsi3 cmp r1, #0 beq LSYM(Ldiv0) udiv r2, r0, r1 mls r0, r1, r2, r0 RET #elif defined(__thumb__) FUNC_START umodsi3 cmp divisor, #0 beq LSYM(Ldiv0) movs curbit, #1 cmp dividend, divisor bhs LSYM(Lover10) RET LSYM(Lover10): push { work } THUMB_DIV_MOD_BODY 1 pop { work } RET #else /* ARM version. */ FUNC_START umodsi3 subs r2, r1, #1 @ compare divisor with 1 bcc LSYM(Ldiv0) cmpne r0, r1 @ compare dividend with divisor moveq r0, #0 tsthi r1, r2 @ see if divisor is power of 2 andeq r0, r0, r2 RETc(ls) ARM_MOD_BODY r0, r1, r2, r3 RET #endif /* ARM version. */ DIV_FUNC_END umodsi3 unsigned #endif /* L_umodsi3 */ /* ------------------------------------------------------------------------ */ #ifdef L_divsi3 #if defined(__prefer_thumb__) FUNC_START divsi3 FUNC_ALIAS aeabi_idiv divsi3 #if defined(__OPTIMIZE_SIZE__) cmp divisor, #0 beq LSYM(Ldiv0) LSYM(divsi3_skip_div0_test): push { work } movs work, dividend eors work, divisor @ Save the sign of the result. mov ip, work movs curbit, #1 movs result, #0 cmp divisor, #0 bpl LSYM(Lover10) negs divisor, divisor @ Loops below use unsigned. LSYM(Lover10): cmp dividend, #0 bpl LSYM(Lover11) negs dividend, dividend LSYM(Lover11): cmp dividend, divisor blo LSYM(Lgot_result) THUMB_DIV_MOD_BODY 0 movs r0, result mov work, ip cmp work, #0 bpl LSYM(Lover12) negs r0, r0 LSYM(Lover12): pop { work } RET /* Implementation of aeabi_idiv for ARMv6m. This version is only used in ARMv6-M when we need an efficient implementation. */ #else LSYM(divsi3_skip_div0_test): cpy curbit, dividend orrs curbit, divisor bmi LSYM(Lthumb1_div_negative) LSYM(Lthumb1_div_positive): THUMB1_Div_Positive LSYM(Lthumb1_div_negative): THUMB1_Div_Negative #endif /* __OPTIMIZE_SIZE__ */ #elif defined(__ARM_ARCH_EXT_IDIV__) ARM_FUNC_START divsi3 ARM_FUNC_ALIAS aeabi_idiv divsi3 cmp r1, #0 beq LSYM(Ldiv0) sdiv r0, r0, r1 RET #else /* ARM/Thumb-2 version. */ ARM_FUNC_START divsi3 ARM_FUNC_ALIAS aeabi_idiv divsi3 cmp r1, #0 beq LSYM(Ldiv0) LSYM(divsi3_skip_div0_test): eor ip, r0, r1 @ save the sign of the result. do_it mi rsbmi r1, r1, #0 @ loops below use unsigned. subs r2, r1, #1 @ division by 1 or -1 ? beq 10f movs r3, r0 do_it mi rsbmi r3, r0, #0 @ positive dividend value cmp r3, r1 bls 11f tst r1, r2 @ divisor is power of 2 ? beq 12f ARM_DIV_BODY r3, r1, r0, r2 cmp ip, #0 do_it mi rsbmi r0, r0, #0 RET 10: teq ip, r0 @ same sign ? do_it mi rsbmi r0, r0, #0 RET 11: do_it lo movlo r0, #0 do_it eq,t moveq r0, ip, asr #31 orreq r0, r0, #1 RET 12: ARM_DIV2_ORDER r1, r2 cmp ip, #0 mov r0, r3, lsr r2 do_it mi rsbmi r0, r0, #0 RET #endif /* ARM version */ DIV_FUNC_END divsi3 signed #if defined(__prefer_thumb__) FUNC_START aeabi_idivmod cmp r1, #0 beq LSYM(Ldiv0) # if defined(__OPTIMIZE_SIZE__) push {r0, r1, lr} bl LSYM(divsi3_skip_div0_test) POP {r1, r2, r3} muls r2, r0 subs r1, r1, r2 bx r3 # else /* Both the quotient and remainder are calculated simultaneously in THUMB1_Div_Positive and THUMB1_Div_Negative. There is no need to calculate the remainder again here. */ b LSYM(divsi3_skip_div0_test) RET # endif /* __OPTIMIZE_SIZE__ */ #elif defined(__ARM_ARCH_EXT_IDIV__) ARM_FUNC_START aeabi_idivmod cmp r1, #0 beq LSYM(Ldiv0) mov r2, r0 sdiv r0, r0, r1 mls r1, r0, r1, r2 RET #else ARM_FUNC_START aeabi_idivmod cmp r1, #0 beq LSYM(Ldiv0) stmfd sp!, { r0, r1, lr } bl LSYM(divsi3_skip_div0_test) ldmfd sp!, { r1, r2, lr } mul r3, r2, r0 sub r1, r1, r3 RET #endif FUNC_END aeabi_idivmod #endif /* L_divsi3 */ /* ------------------------------------------------------------------------ */ #ifdef L_modsi3 #if defined(__ARM_ARCH_EXT_IDIV__) && __ARM_ARCH_ISA_THUMB != 1 ARM_FUNC_START modsi3 cmp r1, #0 beq LSYM(Ldiv0) sdiv r2, r0, r1 mls r0, r1, r2, r0 RET #elif defined(__thumb__) FUNC_START modsi3 movs curbit, #1 cmp divisor, #0 beq LSYM(Ldiv0) bpl LSYM(Lover10) negs divisor, divisor @ Loops below use unsigned. LSYM(Lover10): push { work } @ Need to save the sign of the dividend, unfortunately, we need @ work later on. Must do this after saving the original value of @ the work register, because we will pop this value off first. push { dividend } cmp dividend, #0 bpl LSYM(Lover11) negs dividend, dividend LSYM(Lover11): cmp dividend, divisor blo LSYM(Lgot_result) THUMB_DIV_MOD_BODY 1 pop { work } cmp work, #0 bpl LSYM(Lover12) negs dividend, dividend LSYM(Lover12): pop { work } RET #else /* ARM version. */ FUNC_START modsi3 cmp r1, #0 beq LSYM(Ldiv0) rsbmi r1, r1, #0 @ loops below use unsigned. movs ip, r0 @ preserve sign of dividend rsbmi r0, r0, #0 @ if negative make positive subs r2, r1, #1 @ compare divisor with 1 cmpne r0, r1 @ compare dividend with divisor moveq r0, #0 tsthi r1, r2 @ see if divisor is power of 2 andeq r0, r0, r2 bls 10f ARM_MOD_BODY r0, r1, r2, r3 10: cmp ip, #0 rsbmi r0, r0, #0 RET #endif /* ARM version */ DIV_FUNC_END modsi3 signed #endif /* L_modsi3 */ /* ------------------------------------------------------------------------ */ #ifdef L_dvmd_tls #ifdef __ARM_EABI__ WEAK aeabi_idiv0 WEAK aeabi_ldiv0 FUNC_START aeabi_idiv0 FUNC_START aeabi_ldiv0 RET FUNC_END aeabi_ldiv0 FUNC_END aeabi_idiv0 #else FUNC_START div0 RET FUNC_END div0 #endif #endif /* L_divmodsi_tools */ /* ------------------------------------------------------------------------ */ #ifdef L_dvmd_lnx @ GNU/Linux division-by zero handler. Used in place of L_dvmd_tls /* Constant taken from <asm/signal.h>. */ #define SIGFPE 8 #ifdef __ARM_EABI__ cfi_start __aeabi_ldiv0, LSYM(Lend_aeabi_ldiv0) WEAK aeabi_idiv0 WEAK aeabi_ldiv0 ARM_FUNC_START aeabi_idiv0 ARM_FUNC_START aeabi_ldiv0 do_push {r1, lr} 98: cfi_push 98b - __aeabi_ldiv0, 0xe, -0x4, 0x8 #else cfi_start __div0, LSYM(Lend_div0) ARM_FUNC_START div0 do_push {r1, lr} 98: cfi_push 98b - __div0, 0xe, -0x4, 0x8 #endif mov r0, #SIGFPE bl SYM(raise) __PLT__ RETLDM r1 unwind=98b #ifdef __ARM_EABI__ cfi_end LSYM(Lend_aeabi_ldiv0) FUNC_END aeabi_ldiv0 FUNC_END aeabi_idiv0 #else cfi_end LSYM(Lend_div0) FUNC_END div0 #endif #endif /* L_dvmd_lnx */ #ifdef L_clear_cache #if defined __ARM_EABI__ && defined __linux__ @ EABI GNU/Linux call to cacheflush syscall. ARM_FUNC_START clear_cache do_push {r7} #if __ARM_ARCH >= 7 || defined(__ARM_ARCH_6T2__) movw r7, #2 movt r7, #0xf #else mov r7, #0xf0000 add r7, r7, #2 #endif mov r2, #0 swi 0 do_pop {r7} RET FUNC_END clear_cache #else #error "This is only for ARM EABI GNU/Linux" #endif #endif /* L_clear_cache */ #ifdef L_speculation_barrier FUNC_START speculation_barrier #if __ARM_ARCH >= 7 isb dsb sy #elif defined __ARM_EABI__ && defined __linux__ /* We don't have a speculation barrier directly for this platform/architecture variant. But we can use a kernel clear_cache service routine which will emit such instructions if run on a later version of the architecture. We don't really want to flush the cache, but we must give it a valid address, so just clear pc..pc+1. */ #if defined __thumb__ && !defined __thumb2__ push {r7} movs r7, #0xf lsls r7, #16 adds r7, #2 adr r0, . + 4 adds r1, r0, #1 movs r2, #0 svc 0 pop {r7} #else do_push {r7} #ifdef __ARM_ARCH_6T2__ movw r7, #2 movt r7, #0xf #else mov r7, #0xf0000 add r7, r7, #2 #endif add r0, pc, #0 /* ADR. */ add r1, r0, #1 mov r2, #0 svc 0 do_pop {r7} #endif /* Thumb1 only */ #else #warning "No speculation barrier defined for this platform" #endif RET FUNC_END speculation_barrier #endif /* ------------------------------------------------------------------------ */ /* Dword shift operations. */ /* All the following Dword shift variants rely on the fact that shft xxx, Reg is in fact done as shft xxx, (Reg & 255) so for Reg value in (32...63) and (-1...-31) we will get zero (in the case of logical shifts) or the sign (for asr). */ #ifdef __ARMEB__ #define al r1 #define ah r0 #else #define al r0 #define ah r1 #endif /* Prevent __aeabi double-word shifts from being produced on SymbianOS. */ #ifndef __symbian__ #ifdef L_lshrdi3 FUNC_START lshrdi3 FUNC_ALIAS aeabi_llsr lshrdi3 #ifdef __thumb__ lsrs al, r2 movs r3, ah lsrs ah, r2 mov ip, r3 subs r2, #32 lsrs r3, r2 orrs al, r3 negs r2, r2 mov r3, ip lsls r3, r2 orrs al, r3 RET #else subs r3, r2, #32 rsb ip, r2, #32 movmi al, al, lsr r2 movpl al, ah, lsr r3 orrmi al, al, ah, lsl ip mov ah, ah, lsr r2 RET #endif FUNC_END aeabi_llsr FUNC_END lshrdi3 #endif #ifdef L_ashrdi3 FUNC_START ashrdi3 FUNC_ALIAS aeabi_lasr ashrdi3 #ifdef __thumb__ lsrs al, r2 movs r3, ah asrs ah, r2 subs r2, #32 @ If r2 is negative at this point the following step would OR @ the sign bit into all of AL. That's not what we want... bmi 1f mov ip, r3 asrs r3, r2 orrs al, r3 mov r3, ip 1: negs r2, r2 lsls r3, r2 orrs al, r3 RET #else subs r3, r2, #32 rsb ip, r2, #32 movmi al, al, lsr r2 movpl al, ah, asr r3 orrmi al, al, ah, lsl ip mov ah, ah, asr r2 RET #endif FUNC_END aeabi_lasr FUNC_END ashrdi3 #endif #ifdef L_ashldi3 FUNC_START ashldi3 FUNC_ALIAS aeabi_llsl ashldi3 #ifdef __thumb__ lsls ah, r2 movs r3, al lsls al, r2 mov ip, r3 subs r2, #32 lsls r3, r2 orrs ah, r3 negs r2, r2 mov r3, ip lsrs r3, r2 orrs ah, r3 RET #else subs r3, r2, #32 rsb ip, r2, #32 movmi ah, ah, lsl r2 movpl ah, al, lsl r3 orrmi ah, ah, al, lsr ip mov al, al, lsl r2 RET #endif FUNC_END aeabi_llsl FUNC_END ashldi3 #endif #endif /* __symbian__ */ #ifdef L_clzsi2 #ifdef NOT_ISA_TARGET_32BIT FUNC_START clzsi2 movs r1, #28 movs r3, #1 lsls r3, r3, #16 cmp r0, r3 /* 0x10000 */ bcc 2f lsrs r0, r0, #16 subs r1, r1, #16 2: lsrs r3, r3, #8 cmp r0, r3 /* #0x100 */ bcc 2f lsrs r0, r0, #8 subs r1, r1, #8 2: lsrs r3, r3, #4 cmp r0, r3 /* #0x10 */ bcc 2f lsrs r0, r0, #4 subs r1, r1, #4 2: adr r2, 1f ldrb r0, [r2, r0] adds r0, r0, r1 bx lr .align 2 1: .byte 4, 3, 2, 2, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0 FUNC_END clzsi2 #else ARM_FUNC_START clzsi2 # if defined (__ARM_FEATURE_CLZ) clz r0, r0 RET # else mov r1, #28 cmp r0, #0x10000 do_it cs, t movcs r0, r0, lsr #16 subcs r1, r1, #16 cmp r0, #0x100 do_it cs, t movcs r0, r0, lsr #8 subcs r1, r1, #8 cmp r0, #0x10 do_it cs, t movcs r0, r0, lsr #4 subcs r1, r1, #4 adr r2, 1f ldrb r0, [r2, r0] add r0, r0, r1 RET .align 2 1: .byte 4, 3, 2, 2, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0 # endif /* !defined (__ARM_FEATURE_CLZ) */ FUNC_END clzsi2 #endif #endif /* L_clzsi2 */ #ifdef L_clzdi2 #if !defined (__ARM_FEATURE_CLZ) # ifdef NOT_ISA_TARGET_32BIT FUNC_START clzdi2 push {r4, lr} cmp xxh, #0 bne 1f # ifdef __ARMEB__ movs r0, xxl bl __clzsi2 adds r0, r0, #32 b 2f 1: bl __clzsi2 # else bl __clzsi2 adds r0, r0, #32 b 2f 1: movs r0, xxh bl __clzsi2 # endif 2: pop {r4, pc} # else /* NOT_ISA_TARGET_32BIT */ ARM_FUNC_START clzdi2 do_push {r4, lr} cmp xxh, #0 bne 1f # ifdef __ARMEB__ mov r0, xxl bl __clzsi2 add r0, r0, #32 b 2f 1: bl __clzsi2 # else bl __clzsi2 add r0, r0, #32 b 2f 1: mov r0, xxh bl __clzsi2 # endif 2: RETLDM r4 FUNC_END clzdi2 # endif /* NOT_ISA_TARGET_32BIT */ #else /* defined (__ARM_FEATURE_CLZ) */ ARM_FUNC_START clzdi2 cmp xxh, #0 do_it eq, et clzeq r0, xxl clzne r0, xxh addeq r0, r0, #32 RET FUNC_END clzdi2 #endif #endif /* L_clzdi2 */ #ifdef L_ctzsi2 #ifdef NOT_ISA_TARGET_32BIT FUNC_START ctzsi2 negs r1, r0 ands r0, r0, r1 movs r1, #28 movs r3, #1 lsls r3, r3, #16 cmp r0, r3 /* 0x10000 */ bcc 2f lsrs r0, r0, #16 subs r1, r1, #16 2: lsrs r3, r3, #8 cmp r0, r3 /* #0x100 */ bcc 2f lsrs r0, r0, #8 subs r1, r1, #8 2: lsrs r3, r3, #4 cmp r0, r3 /* #0x10 */ bcc 2f lsrs r0, r0, #4 subs r1, r1, #4 2: adr r2, 1f ldrb r0, [r2, r0] subs r0, r0, r1 bx lr .align 2 1: .byte 27, 28, 29, 29, 30, 30, 30, 30, 31, 31, 31, 31, 31, 31, 31, 31 FUNC_END ctzsi2 #else ARM_FUNC_START ctzsi2 rsb r1, r0, #0 and r0, r0, r1 # if defined (__ARM_FEATURE_CLZ) clz r0, r0 rsb r0, r0, #31 RET # else mov r1, #28 cmp r0, #0x10000 do_it cs, t movcs r0, r0, lsr #16 subcs r1, r1, #16 cmp r0, #0x100 do_it cs, t movcs r0, r0, lsr #8 subcs r1, r1, #8 cmp r0, #0x10 do_it cs, t movcs r0, r0, lsr #4 subcs r1, r1, #4 adr r2, 1f ldrb r0, [r2, r0] sub r0, r0, r1 RET .align 2 1: .byte 27, 28, 29, 29, 30, 30, 30, 30, 31, 31, 31, 31, 31, 31, 31, 31 # endif /* !defined (__ARM_FEATURE_CLZ) */ FUNC_END ctzsi2 #endif #endif /* L_clzsi2 */ /* ------------------------------------------------------------------------ */ /* These next two sections are here despite the fact that they contain Thumb assembler because their presence allows interworked code to be linked even when the GCC library is this one. */ /* Do not build the interworking functions when the target architecture does not support Thumb instructions. (This can be a multilib option). */ #if defined __ARM_ARCH_4T__ || defined __ARM_ARCH_5T__\ || defined __ARM_ARCH_5TE__ || defined __ARM_ARCH_5TEJ__ \ || __ARM_ARCH >= 6 #if defined L_call_via_rX /* These labels & instructions are used by the Arm/Thumb interworking code. The address of function to be called is loaded into a register and then one of these labels is called via a BL instruction. This puts the return address into the link register with the bottom bit set, and the code here switches to the correct mode before executing the function. */ .text .align 0 .force_thumb .macro call_via register THUMB_FUNC_START _call_via_\register bx \register nop SIZE (_call_via_\register) .endm call_via r0 call_via r1 call_via r2 call_via r3 call_via r4 call_via r5 call_via r6 call_via r7 call_via r8 call_via r9 call_via sl call_via fp call_via ip call_via sp call_via lr #endif /* L_call_via_rX */ /* Don't bother with the old interworking routines for Thumb-2. */ /* ??? Maybe only omit these on "m" variants. */ #if !defined(__thumb2__) && __ARM_ARCH_ISA_ARM #if defined L_interwork_call_via_rX /* These labels & instructions are used by the Arm/Thumb interworking code, when the target address is in an unknown instruction set. The address of function to be called is loaded into a register and then one of these labels is called via a BL instruction. This puts the return address into the link register with the bottom bit set, and the code here switches to the correct mode before executing the function. Unfortunately the target code cannot be relied upon to return via a BX instruction, so instead we have to store the resturn address on the stack and allow the called function to return here instead. Upon return we recover the real return address and use a BX to get back to Thumb mode. There are three variations of this code. The first, _interwork_call_via_rN(), will push the return address onto the stack and pop it in _arm_return(). It should only be used if all arguments are passed in registers. The second, _interwork_r7_call_via_rN(), instead stores the return address at [r7, #-4]. It is the caller's responsibility to ensure that this address is valid and contains no useful data. The third, _interwork_r11_call_via_rN(), works in the same way but uses r11 instead of r7. It is useful if the caller does not really need a frame pointer. */ .text .align 0 .code 32 .globl _arm_return LSYM(Lstart_arm_return): cfi_start LSYM(Lstart_arm_return) LSYM(Lend_arm_return) cfi_push 0, 0xe, -0x8, 0x8 nop @ This nop is for the benefit of debuggers, so that @ backtraces will use the correct unwind information. _arm_return: RETLDM unwind=LSYM(Lstart_arm_return) cfi_end LSYM(Lend_arm_return) .globl _arm_return_r7 _arm_return_r7: ldr lr, [r7, #-4] bx lr .globl _arm_return_r11 _arm_return_r11: ldr lr, [r11, #-4] bx lr .macro interwork_with_frame frame, register, name, return .code 16 THUMB_FUNC_START \name bx pc nop .code 32 tst \register, #1 streq lr, [\frame, #-4] adreq lr, _arm_return_\frame bx \register SIZE (\name) .endm .macro interwork register .code 16 THUMB_FUNC_START _interwork_call_via_\register bx pc nop .code 32 .globl LSYM(Lchange_\register) LSYM(Lchange_\register): tst \register, #1 streq lr, [sp, #-8]! adreq lr, _arm_return bx \register SIZE (_interwork_call_via_\register) interwork_with_frame r7,\register,_interwork_r7_call_via_\register interwork_with_frame r11,\register,_interwork_r11_call_via_\register .endm interwork r0 interwork r1 interwork r2 interwork r3 interwork r4 interwork r5 interwork r6 interwork r7 interwork r8 interwork r9 interwork sl interwork fp interwork ip interwork sp /* The LR case has to be handled a little differently... */ .code 16 THUMB_FUNC_START _interwork_call_via_lr bx pc nop .code 32 .globl .Lchange_lr .Lchange_lr: tst lr, #1 stmeqdb r13!, {lr, pc} mov ip, lr adreq lr, _arm_return bx ip SIZE (_interwork_call_via_lr) #endif /* L_interwork_call_via_rX */ #endif /* !__thumb2__ */ /* Functions to support compact pic switch tables in thumb1 state. All these routines take an index into the table in r0. The table is at LR & ~1 (but this must be rounded up in the case of 32-bit entires). They are only permitted to clobber r12 and r14 and r0 must be preserved on exit. */ #ifdef L_thumb1_case_sqi .text .align 0 .force_thumb .syntax unified THUMB_FUNC_START __gnu_thumb1_case_sqi push {r1} mov r1, lr lsrs r1, r1, #1 lsls r1, r1, #1 ldrsb r1, [r1, r0] lsls r1, r1, #1 add lr, lr, r1 pop {r1} bx lr SIZE (__gnu_thumb1_case_sqi) #endif #ifdef L_thumb1_case_uqi .text .align 0 .force_thumb .syntax unified THUMB_FUNC_START __gnu_thumb1_case_uqi push {r1} mov r1, lr lsrs r1, r1, #1 lsls r1, r1, #1 ldrb r1, [r1, r0] lsls r1, r1, #1 add lr, lr, r1 pop {r1} bx lr SIZE (__gnu_thumb1_case_uqi) #endif #ifdef L_thumb1_case_shi .text .align 0 .force_thumb .syntax unified THUMB_FUNC_START __gnu_thumb1_case_shi push {r0, r1} mov r1, lr lsrs r1, r1, #1 lsls r0, r0, #1 lsls r1, r1, #1 ldrsh r1, [r1, r0] lsls r1, r1, #1 add lr, lr, r1 pop {r0, r1} bx lr SIZE (__gnu_thumb1_case_shi) #endif #ifdef L_thumb1_case_uhi .text .align 0 .force_thumb .syntax unified THUMB_FUNC_START __gnu_thumb1_case_uhi push {r0, r1} mov r1, lr lsrs r1, r1, #1 lsls r0, r0, #1 lsls r1, r1, #1 ldrh r1, [r1, r0] lsls r1, r1, #1 add lr, lr, r1 pop {r0, r1} bx lr SIZE (__gnu_thumb1_case_uhi) #endif #ifdef L_thumb1_case_si .text .align 0 .force_thumb .syntax unified THUMB_FUNC_START __gnu_thumb1_case_si push {r0, r1} mov r1, lr adds.n r1, r1, #2 /* Align to word. */ lsrs r1, r1, #2 lsls r0, r0, #2 lsls r1, r1, #2 ldr r0, [r1, r0] adds r0, r0, r1 mov lr, r0 pop {r0, r1} mov pc, lr /* We know we were called from thumb code. */ SIZE (__gnu_thumb1_case_si) #endif #endif /* Arch supports thumb. */ .macro CFI_START_FUNCTION .cfi_startproc .cfi_remember_state .endm .macro CFI_END_FUNCTION .cfi_restore_state .cfi_endproc .endm #ifndef __symbian__ /* The condition here must match the one in gcc/config/arm/elf.h and libgcc/config/arm/t-elf. */ #ifndef NOT_ISA_TARGET_32BIT #include "ieee754-df.S" #include "ieee754-sf.S" #include "bpabi.S" #else /* NOT_ISA_TARGET_32BIT */ #include "bpabi-v6m.S" #endif /* NOT_ISA_TARGET_32BIT */ #endif /* !__symbian__ */
4ms/metamodule-plugin-sdk
1,698
plugin-libc/libgcc/config/pru/mpyll.S
/* Copyright (C) 2014-2022 Free Software Foundation, Inc. Contributed by Dimitar Dimitrov <dimitar@dinux.eu> This file is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 3, or (at your option) any later version. This file is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. Under Section 7 of GPL version 3, you are granted additional permissions described in the GCC Runtime Library Exception, version 3.1, as published by the Free Software Foundation. You should have received a copy of the GNU General Public License and a copy of the GCC Runtime Library Exception along with this program; see the files COPYING3 and COPYING.RUNTIME respectively. If not, see <http://www.gnu.org/licenses/>. (al + C * ah) * (bl + C * bh) = al * bl + C * ah * bl + C * al * bh + C * C * ah * bh -> discard, overflow Where C=(1 << 32) */ #include "pru-asm.h" .section .text.__pruabi_mpyll, "ax" .global SYM(__pruabi_mpyll) FUNC(__pruabi_mpyll) SYM(__pruabi_mpyll): /* + C * ah * bl */ mov r28, r15 mov r29, r16 nop xin 0, r26, 8 /* + C * al * bh */ mov r28, r14 mov r29, r17 mov r15, r26 /* "Loose" ah, record only reslo. */ xin 0, r26, 8 /* + al * bl */ /* mov r28, r14 -> No need, already loaded. */ mov r29, r16 add r15, r15, r26 xin 0, r26, 8 mov r14, r26 add r15, r15, r27 ret ENDFUNC(__pruabi_mpyll)
4ms/metamodule-plugin-sdk
1,477
plugin-libc/libgcc/config/xtensa/crtn.S
# End of .init and .fini sections. # Copyright (C) 2003-2022 Free Software Foundation, Inc. # # This file is free software; you can redistribute it and/or modify it # under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 3, or (at your option) # any later version. # # GCC is distributed in the hope that it will be useful, but WITHOUT ANY # WARRANTY; without even the implied warranty of MERCHANTABILITY or # FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License # for more details. # # Under Section 7 of GPL version 3, you are granted additional # permissions described in the GCC Runtime Library Exception, version # 3.1, as published by the Free Software Foundation. # # You should have received a copy of the GNU General Public License and # a copy of the GCC Runtime Library Exception along with this program; # see the files COPYING3 and COPYING.RUNTIME respectively. If not, see # <http://www.gnu.org/licenses/>. # This file just makes sure that the .fini and .init sections do in # fact return. Users may put any desired instructions in those sections. # This file is the last thing linked into any executable. #include "xtensa-config.h" .section .init #if XCHAL_HAVE_WINDOWED && !__XTENSA_CALL0_ABI__ retw #else l32i a0, sp, 0 addi sp, sp, 32 ret #endif .section .fini #if XCHAL_HAVE_WINDOWED && !__XTENSA_CALL0_ABI__ retw #else l32i a0, sp, 0 addi sp, sp, 32 ret #endif
4ms/metamodule-plugin-sdk
40,815
plugin-libc/libgcc/config/xtensa/ieee754-sf.S
/* IEEE-754 single-precision functions for Xtensa Copyright (C) 2006-2022 Free Software Foundation, Inc. Contributed by Bob Wilson (bwilson@tensilica.com) at Tensilica. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 3, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. Under Section 7 of GPL version 3, you are granted additional permissions described in the GCC Runtime Library Exception, version 3.1, as published by the Free Software Foundation. You should have received a copy of the GNU General Public License and a copy of the GCC Runtime Library Exception along with this program; see the files COPYING3 and COPYING.RUNTIME respectively. If not, see <http://www.gnu.org/licenses/>. */ #ifdef __XTENSA_EB__ #define xh a2 #define xl a3 #define yh a4 #define yl a5 #else #define xh a3 #define xl a2 #define yh a5 #define yl a4 #endif /* Warning! The branch displacements for some Xtensa branch instructions are quite small, and this code has been carefully laid out to keep branch targets in range. If you change anything, be sure to check that the assembler is not relaxing anything to branch over a jump. */ #ifdef L_negsf2 .align 4 .global __negsf2 .type __negsf2, @function __negsf2: leaf_entry sp, 16 movi a4, 0x80000000 xor a2, a2, a4 leaf_return #endif /* L_negsf2 */ #ifdef L_addsubsf3 .literal_position /* Addition */ __addsf3_aux: /* Handle NaNs and Infinities. (This code is placed before the start of the function just to keep it in range of the limited branch displacements.) */ .Ladd_xnan_or_inf: /* If y is neither Infinity nor NaN, return x. */ bnall a3, a6, .Ladd_return_nan_or_inf /* If x is a NaN, return it. Otherwise, return y. */ slli a7, a2, 9 bnez a7, .Ladd_return_nan .Ladd_ynan_or_inf: /* Return y. */ mov a2, a3 .Ladd_return_nan_or_inf: slli a7, a2, 9 bnez a7, .Ladd_return_nan leaf_return .Ladd_return_nan: movi a6, 0x400000 /* make it a quiet NaN */ or a2, a2, a6 leaf_return .Ladd_opposite_signs: /* Operand signs differ. Do a subtraction. */ slli a7, a6, 8 xor a3, a3, a7 j .Lsub_same_sign .align 4 .global __addsf3 .type __addsf3, @function __addsf3: leaf_entry sp, 16 movi a6, 0x7f800000 /* Check if the two operands have the same sign. */ xor a7, a2, a3 bltz a7, .Ladd_opposite_signs .Ladd_same_sign: /* Check if either exponent == 0x7f8 (i.e., NaN or Infinity). */ ball a2, a6, .Ladd_xnan_or_inf ball a3, a6, .Ladd_ynan_or_inf /* Compare the exponents. The smaller operand will be shifted right by the exponent difference and added to the larger one. */ extui a7, a2, 23, 9 extui a8, a3, 23, 9 bltu a7, a8, .Ladd_shiftx .Ladd_shifty: /* Check if the smaller (or equal) exponent is zero. */ bnone a3, a6, .Ladd_yexpzero /* Replace y sign/exponent with 0x008. */ or a3, a3, a6 slli a3, a3, 8 srli a3, a3, 8 .Ladd_yexpdiff: /* Compute the exponent difference. */ sub a10, a7, a8 /* Exponent difference > 32 -- just return the bigger value. */ bgeui a10, 32, 1f /* Shift y right by the exponent difference. Any bits that are shifted out of y are saved in a9 for rounding the result. */ ssr a10 movi a9, 0 src a9, a3, a9 srl a3, a3 /* Do the addition. */ add a2, a2, a3 /* Check if the add overflowed into the exponent. */ extui a10, a2, 23, 9 beq a10, a7, .Ladd_round mov a8, a7 j .Ladd_carry .Ladd_yexpzero: /* y is a subnormal value. Replace its sign/exponent with zero, i.e., no implicit "1.0", and increment the apparent exponent because subnormals behave as if they had the minimum (nonzero) exponent. Test for the case when both exponents are zero. */ slli a3, a3, 9 srli a3, a3, 9 bnone a2, a6, .Ladd_bothexpzero addi a8, a8, 1 j .Ladd_yexpdiff .Ladd_bothexpzero: /* Both exponents are zero. Handle this as a special case. There is no need to shift or round, and the normal code for handling a carry into the exponent field will not work because it assumes there is an implicit "1.0" that needs to be added. */ add a2, a2, a3 1: leaf_return .Ladd_xexpzero: /* Same as "yexpzero" except skip handling the case when both exponents are zero. */ slli a2, a2, 9 srli a2, a2, 9 addi a7, a7, 1 j .Ladd_xexpdiff .Ladd_shiftx: /* Same thing as the "shifty" code, but with x and y swapped. Also, because the exponent difference is always nonzero in this version, the shift sequence can use SLL and skip loading a constant zero. */ bnone a2, a6, .Ladd_xexpzero or a2, a2, a6 slli a2, a2, 8 srli a2, a2, 8 .Ladd_xexpdiff: sub a10, a8, a7 bgeui a10, 32, .Ladd_returny ssr a10 sll a9, a2 srl a2, a2 add a2, a2, a3 /* Check if the add overflowed into the exponent. */ extui a10, a2, 23, 9 bne a10, a8, .Ladd_carry .Ladd_round: /* Round up if the leftover fraction is >= 1/2. */ bgez a9, 1f addi a2, a2, 1 /* Check if the leftover fraction is exactly 1/2. */ slli a9, a9, 1 beqz a9, .Ladd_exactlyhalf 1: leaf_return .Ladd_returny: mov a2, a3 leaf_return .Ladd_carry: /* The addition has overflowed into the exponent field, so the value needs to be renormalized. The mantissa of the result can be recovered by subtracting the original exponent and adding 0x800000 (which is the explicit "1.0" for the mantissa of the non-shifted operand -- the "1.0" for the shifted operand was already added). The mantissa can then be shifted right by one bit. The explicit "1.0" of the shifted mantissa then needs to be replaced by the exponent, incremented by one to account for the normalizing shift. It is faster to combine these operations: do the shift first and combine the additions and subtractions. If x is the original exponent, the result is: shifted mantissa - (x << 22) + (1 << 22) + (x << 23) or: shifted mantissa + ((x + 1) << 22) Note that the exponent is incremented here by leaving the explicit "1.0" of the mantissa in the exponent field. */ /* Shift x right by one bit. Save the lsb. */ mov a10, a2 srli a2, a2, 1 /* See explanation above. The original exponent is in a8. */ addi a8, a8, 1 slli a8, a8, 22 add a2, a2, a8 /* Return an Infinity if the exponent overflowed. */ ball a2, a6, .Ladd_infinity /* Same thing as the "round" code except the msb of the leftover fraction is bit 0 of a10, with the rest of the fraction in a9. */ bbci.l a10, 0, 1f addi a2, a2, 1 beqz a9, .Ladd_exactlyhalf 1: leaf_return .Ladd_infinity: /* Clear the mantissa. */ srli a2, a2, 23 slli a2, a2, 23 /* The sign bit may have been lost in a carry-out. Put it back. */ slli a8, a8, 1 or a2, a2, a8 leaf_return .Ladd_exactlyhalf: /* Round down to the nearest even value. */ srli a2, a2, 1 slli a2, a2, 1 leaf_return /* Subtraction */ __subsf3_aux: /* Handle NaNs and Infinities. (This code is placed before the start of the function just to keep it in range of the limited branch displacements.) */ .Lsub_xnan_or_inf: /* If y is neither Infinity nor NaN, return x. */ bnall a3, a6, .Lsub_return_nan_or_inf /* Both x and y are either NaN or Inf, so the result is NaN. */ .Lsub_return_nan: movi a4, 0x400000 /* make it a quiet NaN */ or a2, a2, a4 leaf_return .Lsub_ynan_or_inf: /* Negate y and return it. */ slli a7, a6, 8 xor a2, a3, a7 .Lsub_return_nan_or_inf: slli a7, a2, 9 bnez a7, .Lsub_return_nan leaf_return .Lsub_opposite_signs: /* Operand signs differ. Do an addition. */ slli a7, a6, 8 xor a3, a3, a7 j .Ladd_same_sign .align 4 .global __subsf3 .type __subsf3, @function __subsf3: leaf_entry sp, 16 movi a6, 0x7f800000 /* Check if the two operands have the same sign. */ xor a7, a2, a3 bltz a7, .Lsub_opposite_signs .Lsub_same_sign: /* Check if either exponent == 0x7f8 (i.e., NaN or Infinity). */ ball a2, a6, .Lsub_xnan_or_inf ball a3, a6, .Lsub_ynan_or_inf /* Compare the operands. In contrast to addition, the entire value matters here. */ extui a7, a2, 23, 8 extui a8, a3, 23, 8 bltu a2, a3, .Lsub_xsmaller .Lsub_ysmaller: /* Check if the smaller (or equal) exponent is zero. */ bnone a3, a6, .Lsub_yexpzero /* Replace y sign/exponent with 0x008. */ or a3, a3, a6 slli a3, a3, 8 srli a3, a3, 8 .Lsub_yexpdiff: /* Compute the exponent difference. */ sub a10, a7, a8 /* Exponent difference > 32 -- just return the bigger value. */ bgeui a10, 32, 1f /* Shift y right by the exponent difference. Any bits that are shifted out of y are saved in a9 for rounding the result. */ ssr a10 movi a9, 0 src a9, a3, a9 srl a3, a3 sub a2, a2, a3 /* Subtract the leftover bits in a9 from zero and propagate any borrow from a2. */ neg a9, a9 addi a10, a2, -1 movnez a2, a10, a9 /* Check if the subtract underflowed into the exponent. */ extui a10, a2, 23, 8 beq a10, a7, .Lsub_round j .Lsub_borrow .Lsub_yexpzero: /* Return zero if the inputs are equal. (For the non-subnormal case, subtracting the "1.0" will cause a borrow from the exponent and this case can be detected when handling the borrow.) */ beq a2, a3, .Lsub_return_zero /* y is a subnormal value. Replace its sign/exponent with zero, i.e., no implicit "1.0". Unless x is also a subnormal, increment y's apparent exponent because subnormals behave as if they had the minimum (nonzero) exponent. */ slli a3, a3, 9 srli a3, a3, 9 bnone a2, a6, .Lsub_yexpdiff addi a8, a8, 1 j .Lsub_yexpdiff .Lsub_returny: /* Negate and return y. */ slli a7, a6, 8 xor a2, a3, a7 1: leaf_return .Lsub_xsmaller: /* Same thing as the "ysmaller" code, but with x and y swapped and with y negated. */ bnone a2, a6, .Lsub_xexpzero or a2, a2, a6 slli a2, a2, 8 srli a2, a2, 8 .Lsub_xexpdiff: sub a10, a8, a7 bgeui a10, 32, .Lsub_returny ssr a10 movi a9, 0 src a9, a2, a9 srl a2, a2 /* Negate y. */ slli a11, a6, 8 xor a3, a3, a11 sub a2, a3, a2 neg a9, a9 addi a10, a2, -1 movnez a2, a10, a9 /* Check if the subtract underflowed into the exponent. */ extui a10, a2, 23, 8 bne a10, a8, .Lsub_borrow .Lsub_round: /* Round up if the leftover fraction is >= 1/2. */ bgez a9, 1f addi a2, a2, 1 /* Check if the leftover fraction is exactly 1/2. */ slli a9, a9, 1 beqz a9, .Lsub_exactlyhalf 1: leaf_return .Lsub_xexpzero: /* Same as "yexpzero". */ beq a2, a3, .Lsub_return_zero slli a2, a2, 9 srli a2, a2, 9 bnone a3, a6, .Lsub_xexpdiff addi a7, a7, 1 j .Lsub_xexpdiff .Lsub_return_zero: movi a2, 0 leaf_return .Lsub_borrow: /* The subtraction has underflowed into the exponent field, so the value needs to be renormalized. Shift the mantissa left as needed to remove any leading zeros and adjust the exponent accordingly. If the exponent is not large enough to remove all the leading zeros, the result will be a subnormal value. */ slli a8, a2, 9 beqz a8, .Lsub_xzero do_nsau a6, a8, a7, a11 srli a8, a8, 9 bge a6, a10, .Lsub_subnormal addi a6, a6, 1 .Lsub_normalize_shift: /* Shift the mantissa (a8/a9) left by a6. */ ssl a6 src a8, a8, a9 sll a9, a9 /* Combine the shifted mantissa with the sign and exponent, decrementing the exponent by a6. (The exponent has already been decremented by one due to the borrow from the subtraction, but adding the mantissa will increment the exponent by one.) */ srli a2, a2, 23 sub a2, a2, a6 slli a2, a2, 23 add a2, a2, a8 j .Lsub_round .Lsub_exactlyhalf: /* Round down to the nearest even value. */ srli a2, a2, 1 slli a2, a2, 1 leaf_return .Lsub_xzero: /* If there was a borrow from the exponent, and the mantissa and guard digits are all zero, then the inputs were equal and the result should be zero. */ beqz a9, .Lsub_return_zero /* Only the guard digit is nonzero. Shift by min(24, a10). */ addi a11, a10, -24 movi a6, 24 movltz a6, a10, a11 j .Lsub_normalize_shift .Lsub_subnormal: /* The exponent is too small to shift away all the leading zeros. Set a6 to the current exponent (which has already been decremented by the borrow) so that the exponent of the result will be zero. Do not add 1 to a6 in this case, because: (1) adding the mantissa will not increment the exponent, so there is no need to subtract anything extra from the exponent to compensate, and (2) the effective exponent of a subnormal is 1 not 0 so the shift amount must be 1 smaller than normal. */ mov a6, a10 j .Lsub_normalize_shift #endif /* L_addsubsf3 */ #ifdef L_mulsf3 /* Multiplication */ #if !XCHAL_HAVE_MUL16 && !XCHAL_HAVE_MUL32 && !XCHAL_HAVE_MAC16 #define XCHAL_NO_MUL 1 #endif .literal_position __mulsf3_aux: /* Handle unusual cases (zeros, subnormals, NaNs and Infinities). (This code is placed before the start of the function just to keep it in range of the limited branch displacements.) */ .Lmul_xexpzero: /* Clear the sign bit of x. */ slli a2, a2, 1 srli a2, a2, 1 /* If x is zero, return zero. */ beqz a2, .Lmul_return_zero /* Normalize x. Adjust the exponent in a8. */ do_nsau a10, a2, a11, a12 addi a10, a10, -8 ssl a10 sll a2, a2 movi a8, 1 sub a8, a8, a10 j .Lmul_xnormalized .Lmul_yexpzero: /* Clear the sign bit of y. */ slli a3, a3, 1 srli a3, a3, 1 /* If y is zero, return zero. */ beqz a3, .Lmul_return_zero /* Normalize y. Adjust the exponent in a9. */ do_nsau a10, a3, a11, a12 addi a10, a10, -8 ssl a10 sll a3, a3 movi a9, 1 sub a9, a9, a10 j .Lmul_ynormalized .Lmul_return_zero: /* Return zero with the appropriate sign bit. */ srli a2, a7, 31 slli a2, a2, 31 j .Lmul_done .Lmul_xnan_or_inf: /* If y is zero, return NaN. */ slli a8, a3, 1 beqz a8, .Lmul_return_nan /* If y is NaN, return y. */ bnall a3, a6, .Lmul_returnx slli a8, a3, 9 beqz a8, .Lmul_returnx .Lmul_returny: mov a2, a3 .Lmul_returnx: slli a8, a2, 9 bnez a8, .Lmul_return_nan /* Set the sign bit and return. */ extui a7, a7, 31, 1 slli a2, a2, 1 ssai 1 src a2, a7, a2 j .Lmul_done .Lmul_ynan_or_inf: /* If x is zero, return NaN. */ slli a8, a2, 1 bnez a8, .Lmul_returny mov a2, a3 .Lmul_return_nan: movi a4, 0x400000 /* make it a quiet NaN */ or a2, a2, a4 j .Lmul_done .align 4 .global __mulsf3 .type __mulsf3, @function __mulsf3: #if __XTENSA_CALL0_ABI__ leaf_entry sp, 32 addi sp, sp, -32 s32i a12, sp, 16 s32i a13, sp, 20 s32i a14, sp, 24 s32i a15, sp, 28 #elif XCHAL_NO_MUL /* This is not really a leaf function; allocate enough stack space to allow CALL12s to a helper function. */ leaf_entry sp, 64 #else leaf_entry sp, 32 #endif movi a6, 0x7f800000 /* Get the sign of the result. */ xor a7, a2, a3 /* Check for NaN and infinity. */ ball a2, a6, .Lmul_xnan_or_inf ball a3, a6, .Lmul_ynan_or_inf /* Extract the exponents. */ extui a8, a2, 23, 8 extui a9, a3, 23, 8 beqz a8, .Lmul_xexpzero .Lmul_xnormalized: beqz a9, .Lmul_yexpzero .Lmul_ynormalized: /* Add the exponents. */ add a8, a8, a9 /* Replace sign/exponent fields with explicit "1.0". */ movi a10, 0xffffff or a2, a2, a6 and a2, a2, a10 or a3, a3, a6 and a3, a3, a10 /* Multiply 32x32 to 64 bits. The result ends up in a2/a6. */ #if XCHAL_HAVE_MUL32_HIGH mull a6, a2, a3 muluh a2, a2, a3 #else /* Break the inputs into 16-bit chunks and compute 4 32-bit partial products. These partial products are: 0 xl * yl 1 xl * yh 2 xh * yl 3 xh * yh If using the Mul16 or Mul32 multiplier options, these input chunks must be stored in separate registers. For Mac16, the UMUL.AA.* opcodes can specify that the inputs come from either half of the registers, so there is no need to shift them out ahead of time. If there is no multiply hardware, the 16-bit chunks can be extracted when setting up the arguments to the separate multiply function. */ #if __XTENSA_CALL0_ABI__ && XCHAL_NO_MUL /* Calling a separate multiply function will clobber a0 and requires use of a8 as a temporary, so save those values now. (The function uses a custom ABI so nothing else needs to be saved.) */ s32i a0, sp, 0 s32i a8, sp, 4 #endif #if XCHAL_HAVE_MUL16 || XCHAL_HAVE_MUL32 #define a2h a4 #define a3h a5 /* Get the high halves of the inputs into registers. */ srli a2h, a2, 16 srli a3h, a3, 16 #define a2l a2 #define a3l a3 #if XCHAL_HAVE_MUL32 && !XCHAL_HAVE_MUL16 /* Clear the high halves of the inputs. This does not matter for MUL16 because the high bits are ignored. */ extui a2, a2, 0, 16 extui a3, a3, 0, 16 #endif #endif /* MUL16 || MUL32 */ #if XCHAL_HAVE_MUL16 #define do_mul(dst, xreg, xhalf, yreg, yhalf) \ mul16u dst, xreg ## xhalf, yreg ## yhalf #elif XCHAL_HAVE_MUL32 #define do_mul(dst, xreg, xhalf, yreg, yhalf) \ mull dst, xreg ## xhalf, yreg ## yhalf #elif XCHAL_HAVE_MAC16 /* The preprocessor insists on inserting a space when concatenating after a period in the definition of do_mul below. These macros are a workaround using underscores instead of periods when doing the concatenation. */ #define umul_aa_ll umul.aa.ll #define umul_aa_lh umul.aa.lh #define umul_aa_hl umul.aa.hl #define umul_aa_hh umul.aa.hh #define do_mul(dst, xreg, xhalf, yreg, yhalf) \ umul_aa_ ## xhalf ## yhalf xreg, yreg; \ rsr dst, ACCLO #else /* no multiply hardware */ #define set_arg_l(dst, src) \ extui dst, src, 0, 16 #define set_arg_h(dst, src) \ srli dst, src, 16 #if __XTENSA_CALL0_ABI__ #define do_mul(dst, xreg, xhalf, yreg, yhalf) \ set_arg_ ## xhalf (a13, xreg); \ set_arg_ ## yhalf (a14, yreg); \ call0 .Lmul_mulsi3; \ mov dst, a12 #else #define do_mul(dst, xreg, xhalf, yreg, yhalf) \ set_arg_ ## xhalf (a14, xreg); \ set_arg_ ## yhalf (a15, yreg); \ call12 .Lmul_mulsi3; \ mov dst, a14 #endif /* __XTENSA_CALL0_ABI__ */ #endif /* no multiply hardware */ /* Add pp1 and pp2 into a6 with carry-out in a9. */ do_mul(a6, a2, l, a3, h) /* pp 1 */ do_mul(a11, a2, h, a3, l) /* pp 2 */ movi a9, 0 add a6, a6, a11 bgeu a6, a11, 1f addi a9, a9, 1 1: /* Shift the high half of a9/a6 into position in a9. Note that this value can be safely incremented without any carry-outs. */ ssai 16 src a9, a9, a6 /* Compute the low word into a6. */ do_mul(a11, a2, l, a3, l) /* pp 0 */ sll a6, a6 add a6, a6, a11 bgeu a6, a11, 1f addi a9, a9, 1 1: /* Compute the high word into a2. */ do_mul(a2, a2, h, a3, h) /* pp 3 */ add a2, a2, a9 #if __XTENSA_CALL0_ABI__ && XCHAL_NO_MUL /* Restore values saved on the stack during the multiplication. */ l32i a0, sp, 0 l32i a8, sp, 4 #endif #endif /* ! XCHAL_HAVE_MUL32_HIGH */ /* Shift left by 9 bits, unless there was a carry-out from the multiply, in which case, shift by 8 bits and increment the exponent. */ movi a4, 9 srli a5, a2, 24 - 9 beqz a5, 1f addi a4, a4, -1 addi a8, a8, 1 1: ssl a4 src a2, a2, a6 sll a6, a6 /* Subtract the extra bias from the exponent sum (plus one to account for the explicit "1.0" of the mantissa that will be added to the exponent in the final result). */ movi a4, 0x80 sub a8, a8, a4 /* Check for over/underflow. The value in a8 is one less than the final exponent, so values in the range 0..fd are OK here. */ movi a4, 0xfe bgeu a8, a4, .Lmul_overflow .Lmul_round: /* Round. */ bgez a6, .Lmul_rounded addi a2, a2, 1 slli a6, a6, 1 beqz a6, .Lmul_exactlyhalf .Lmul_rounded: /* Add the exponent to the mantissa. */ slli a8, a8, 23 add a2, a2, a8 .Lmul_addsign: /* Add the sign bit. */ srli a7, a7, 31 slli a7, a7, 31 or a2, a2, a7 .Lmul_done: #if __XTENSA_CALL0_ABI__ l32i a12, sp, 16 l32i a13, sp, 20 l32i a14, sp, 24 l32i a15, sp, 28 addi sp, sp, 32 #endif leaf_return .Lmul_exactlyhalf: /* Round down to the nearest even value. */ srli a2, a2, 1 slli a2, a2, 1 j .Lmul_rounded .Lmul_overflow: bltz a8, .Lmul_underflow /* Return +/- Infinity. */ movi a8, 0xff slli a2, a8, 23 j .Lmul_addsign .Lmul_underflow: /* Create a subnormal value, where the exponent field contains zero, but the effective exponent is 1. The value of a8 is one less than the actual exponent, so just negate it to get the shift amount. */ neg a8, a8 mov a9, a6 ssr a8 bgeui a8, 32, .Lmul_flush_to_zero /* Shift a2 right. Any bits that are shifted out of a2 are saved in a6 (combined with the shifted-out bits currently in a6) for rounding the result. */ sll a6, a2 srl a2, a2 /* Set the exponent to zero. */ movi a8, 0 /* Pack any nonzero bits shifted out into a6. */ beqz a9, .Lmul_round movi a9, 1 or a6, a6, a9 j .Lmul_round .Lmul_flush_to_zero: /* Return zero with the appropriate sign bit. */ srli a2, a7, 31 slli a2, a2, 31 j .Lmul_done #if XCHAL_NO_MUL /* For Xtensa processors with no multiply hardware, this simplified version of _mulsi3 is used for multiplying 16-bit chunks of the floating-point mantissas. When using CALL0, this function uses a custom ABI: the inputs are passed in a13 and a14, the result is returned in a12, and a8 and a15 are clobbered. */ .align 4 .Lmul_mulsi3: leaf_entry sp, 16 .macro mul_mulsi3_body dst, src1, src2, tmp1, tmp2 movi \dst, 0 1: add \tmp1, \src2, \dst extui \tmp2, \src1, 0, 1 movnez \dst, \tmp1, \tmp2 do_addx2 \tmp1, \src2, \dst, \tmp1 extui \tmp2, \src1, 1, 1 movnez \dst, \tmp1, \tmp2 do_addx4 \tmp1, \src2, \dst, \tmp1 extui \tmp2, \src1, 2, 1 movnez \dst, \tmp1, \tmp2 do_addx8 \tmp1, \src2, \dst, \tmp1 extui \tmp2, \src1, 3, 1 movnez \dst, \tmp1, \tmp2 srli \src1, \src1, 4 slli \src2, \src2, 4 bnez \src1, 1b .endm #if __XTENSA_CALL0_ABI__ mul_mulsi3_body a12, a13, a14, a15, a8 #else /* The result will be written into a2, so save that argument in a4. */ mov a4, a2 mul_mulsi3_body a2, a4, a3, a5, a6 #endif leaf_return #endif /* XCHAL_NO_MUL */ #endif /* L_mulsf3 */ #ifdef L_divsf3 /* Division */ #if XCHAL_HAVE_FP_DIV .align 4 .global __divsf3 .type __divsf3, @function __divsf3: leaf_entry sp, 16 wfr f1, a2 /* dividend */ wfr f2, a3 /* divisor */ div0.s f3, f2 nexp01.s f4, f2 const.s f5, 1 maddn.s f5, f4, f3 mov.s f6, f3 mov.s f7, f2 nexp01.s f2, f1 maddn.s f6, f5, f6 const.s f5, 1 const.s f0, 0 neg.s f8, f2 maddn.s f5, f4, f6 maddn.s f0, f8, f3 mkdadj.s f7, f1 maddn.s f6, f5, f6 maddn.s f8, f4, f0 const.s f3, 1 maddn.s f3, f4, f6 maddn.s f0, f8, f6 neg.s f2, f2 maddn.s f6, f3, f6 maddn.s f2, f4, f0 addexpm.s f0, f7 addexp.s f6, f7 divn.s f0, f2, f6 rfr a2, f0 leaf_return #else .literal_position __divsf3_aux: /* Handle unusual cases (zeros, subnormals, NaNs and Infinities). (This code is placed before the start of the function just to keep it in range of the limited branch displacements.) */ .Ldiv_yexpzero: /* Clear the sign bit of y. */ slli a3, a3, 1 srli a3, a3, 1 /* Check for division by zero. */ beqz a3, .Ldiv_yzero /* Normalize y. Adjust the exponent in a9. */ do_nsau a10, a3, a4, a5 addi a10, a10, -8 ssl a10 sll a3, a3 movi a9, 1 sub a9, a9, a10 j .Ldiv_ynormalized .Ldiv_yzero: /* y is zero. Return NaN if x is also zero; otherwise, infinity. */ slli a4, a2, 1 srli a4, a4, 1 srli a2, a7, 31 slli a2, a2, 31 or a2, a2, a6 bnez a4, 1f movi a4, 0x400000 /* make it a quiet NaN */ or a2, a2, a4 1: leaf_return .Ldiv_xexpzero: /* Clear the sign bit of x. */ slli a2, a2, 1 srli a2, a2, 1 /* If x is zero, return zero. */ beqz a2, .Ldiv_return_zero /* Normalize x. Adjust the exponent in a8. */ do_nsau a10, a2, a4, a5 addi a10, a10, -8 ssl a10 sll a2, a2 movi a8, 1 sub a8, a8, a10 j .Ldiv_xnormalized .Ldiv_return_zero: /* Return zero with the appropriate sign bit. */ srli a2, a7, 31 slli a2, a2, 31 leaf_return .Ldiv_xnan_or_inf: /* Set the sign bit of the result. */ srli a7, a3, 31 slli a7, a7, 31 xor a2, a2, a7 /* If y is NaN or Inf, return NaN. */ ball a3, a6, .Ldiv_return_nan slli a7, a2, 9 bnez a7, .Ldiv_return_nan leaf_return .Ldiv_ynan_or_inf: /* If y is Infinity, return zero. */ slli a8, a3, 9 beqz a8, .Ldiv_return_zero /* y is NaN; return it. */ mov a2, a3 .Ldiv_return_nan: movi a4, 0x400000 /* make it a quiet NaN */ or a2, a2, a4 leaf_return .align 4 .global __divsf3 .type __divsf3, @function __divsf3: leaf_entry sp, 16 movi a6, 0x7f800000 /* Get the sign of the result. */ xor a7, a2, a3 /* Check for NaN and infinity. */ ball a2, a6, .Ldiv_xnan_or_inf ball a3, a6, .Ldiv_ynan_or_inf /* Extract the exponents. */ extui a8, a2, 23, 8 extui a9, a3, 23, 8 beqz a9, .Ldiv_yexpzero .Ldiv_ynormalized: beqz a8, .Ldiv_xexpzero .Ldiv_xnormalized: /* Subtract the exponents. */ sub a8, a8, a9 /* Replace sign/exponent fields with explicit "1.0". */ movi a10, 0xffffff or a2, a2, a6 and a2, a2, a10 or a3, a3, a6 and a3, a3, a10 /* The first digit of the mantissa division must be a one. Shift x (and adjust the exponent) as needed to make this true. */ bltu a3, a2, 1f slli a2, a2, 1 addi a8, a8, -1 1: /* Do the first subtraction and shift. */ sub a2, a2, a3 slli a2, a2, 1 /* Put the quotient into a10. */ movi a10, 1 /* Divide one bit at a time for 23 bits. */ movi a9, 23 #if XCHAL_HAVE_LOOPS loop a9, .Ldiv_loopend #endif .Ldiv_loop: /* Shift the quotient << 1. */ slli a10, a10, 1 /* Is this digit a 0 or 1? */ bltu a2, a3, 1f /* Output a 1 and subtract. */ addi a10, a10, 1 sub a2, a2, a3 /* Shift the dividend << 1. */ 1: slli a2, a2, 1 #if !XCHAL_HAVE_LOOPS addi a9, a9, -1 bnez a9, .Ldiv_loop #endif .Ldiv_loopend: /* Add the exponent bias (less one to account for the explicit "1.0" of the mantissa that will be added to the exponent in the final result). */ addi a8, a8, 0x7e /* Check for over/underflow. The value in a8 is one less than the final exponent, so values in the range 0..fd are OK here. */ movi a4, 0xfe bgeu a8, a4, .Ldiv_overflow .Ldiv_round: /* Round. The remainder (<< 1) is in a2. */ bltu a2, a3, .Ldiv_rounded addi a10, a10, 1 beq a2, a3, .Ldiv_exactlyhalf .Ldiv_rounded: /* Add the exponent to the mantissa. */ slli a8, a8, 23 add a2, a10, a8 .Ldiv_addsign: /* Add the sign bit. */ srli a7, a7, 31 slli a7, a7, 31 or a2, a2, a7 leaf_return .Ldiv_overflow: bltz a8, .Ldiv_underflow /* Return +/- Infinity. */ addi a8, a4, 1 /* 0xff */ slli a2, a8, 23 j .Ldiv_addsign .Ldiv_exactlyhalf: /* Remainder is exactly half the divisor. Round even. */ srli a10, a10, 1 slli a10, a10, 1 j .Ldiv_rounded .Ldiv_underflow: /* Create a subnormal value, where the exponent field contains zero, but the effective exponent is 1. The value of a8 is one less than the actual exponent, so just negate it to get the shift amount. */ neg a8, a8 ssr a8 bgeui a8, 32, .Ldiv_flush_to_zero /* Shift a10 right. Any bits that are shifted out of a10 are saved in a6 for rounding the result. */ sll a6, a10 srl a10, a10 /* Set the exponent to zero. */ movi a8, 0 /* Pack any nonzero remainder (in a2) into a6. */ beqz a2, 1f movi a9, 1 or a6, a6, a9 /* Round a10 based on the bits shifted out into a6. */ 1: bgez a6, .Ldiv_rounded addi a10, a10, 1 slli a6, a6, 1 bnez a6, .Ldiv_rounded srli a10, a10, 1 slli a10, a10, 1 j .Ldiv_rounded .Ldiv_flush_to_zero: /* Return zero with the appropriate sign bit. */ srli a2, a7, 31 slli a2, a2, 31 leaf_return #endif /* XCHAL_HAVE_FP_DIV */ #endif /* L_divsf3 */ #ifdef L_cmpsf2 /* Equal and Not Equal */ .align 4 .global __eqsf2 .global __nesf2 .set __nesf2, __eqsf2 .type __eqsf2, @function __eqsf2: leaf_entry sp, 16 bne a2, a3, 4f /* The values are equal but NaN != NaN. Check the exponent. */ movi a6, 0x7f800000 ball a2, a6, 3f /* Equal. */ movi a2, 0 leaf_return /* Not equal. */ 2: movi a2, 1 leaf_return /* Check if the mantissas are nonzero. */ 3: slli a7, a2, 9 j 5f /* Check if x and y are zero with different signs. */ 4: or a7, a2, a3 slli a7, a7, 1 /* Equal if a7 == 0, where a7 is either abs(x | y) or the mantissa or x when exponent(x) = 0x7f8 and x == y. */ 5: movi a2, 0 movi a3, 1 movnez a2, a3, a7 leaf_return /* Greater Than */ .align 4 .global __gtsf2 .type __gtsf2, @function __gtsf2: leaf_entry sp, 16 movi a6, 0x7f800000 ball a2, a6, 2f 1: bnall a3, a6, .Lle_cmp /* Check if y is a NaN. */ slli a7, a3, 9 beqz a7, .Lle_cmp movi a2, 0 leaf_return /* Check if x is a NaN. */ 2: slli a7, a2, 9 beqz a7, 1b movi a2, 0 leaf_return /* Less Than or Equal */ .align 4 .global __lesf2 .type __lesf2, @function __lesf2: leaf_entry sp, 16 movi a6, 0x7f800000 ball a2, a6, 2f 1: bnall a3, a6, .Lle_cmp /* Check if y is a NaN. */ slli a7, a3, 9 beqz a7, .Lle_cmp movi a2, 1 leaf_return /* Check if x is a NaN. */ 2: slli a7, a2, 9 beqz a7, 1b movi a2, 1 leaf_return .Lle_cmp: /* Check if x and y have different signs. */ xor a7, a2, a3 bltz a7, .Lle_diff_signs /* Check if x is negative. */ bltz a2, .Lle_xneg /* Check if x <= y. */ bltu a3, a2, 5f 4: movi a2, 0 leaf_return .Lle_xneg: /* Check if y <= x. */ bgeu a2, a3, 4b 5: movi a2, 1 leaf_return .Lle_diff_signs: bltz a2, 4b /* Check if both x and y are zero. */ or a7, a2, a3 slli a7, a7, 1 movi a2, 1 movi a3, 0 moveqz a2, a3, a7 leaf_return /* Greater Than or Equal */ .align 4 .global __gesf2 .type __gesf2, @function __gesf2: leaf_entry sp, 16 movi a6, 0x7f800000 ball a2, a6, 2f 1: bnall a3, a6, .Llt_cmp /* Check if y is a NaN. */ slli a7, a3, 9 beqz a7, .Llt_cmp movi a2, -1 leaf_return /* Check if x is a NaN. */ 2: slli a7, a2, 9 beqz a7, 1b movi a2, -1 leaf_return /* Less Than */ .align 4 .global __ltsf2 .type __ltsf2, @function __ltsf2: leaf_entry sp, 16 movi a6, 0x7f800000 ball a2, a6, 2f 1: bnall a3, a6, .Llt_cmp /* Check if y is a NaN. */ slli a7, a3, 9 beqz a7, .Llt_cmp movi a2, 0 leaf_return /* Check if x is a NaN. */ 2: slli a7, a2, 9 beqz a7, 1b movi a2, 0 leaf_return .Llt_cmp: /* Check if x and y have different signs. */ xor a7, a2, a3 bltz a7, .Llt_diff_signs /* Check if x is negative. */ bltz a2, .Llt_xneg /* Check if x < y. */ bgeu a2, a3, 5f 4: movi a2, -1 leaf_return .Llt_xneg: /* Check if y < x. */ bltu a3, a2, 4b 5: movi a2, 0 leaf_return .Llt_diff_signs: bgez a2, 5b /* Check if both x and y are nonzero. */ or a7, a2, a3 slli a7, a7, 1 movi a2, 0 movi a3, -1 movnez a2, a3, a7 leaf_return /* Unordered */ .align 4 .global __unordsf2 .type __unordsf2, @function __unordsf2: leaf_entry sp, 16 movi a6, 0x7f800000 ball a2, a6, 3f 1: ball a3, a6, 4f 2: movi a2, 0 leaf_return 3: slli a7, a2, 9 beqz a7, 1b movi a2, 1 leaf_return 4: slli a7, a3, 9 beqz a7, 2b movi a2, 1 leaf_return #endif /* L_cmpsf2 */ #ifdef L_fixsfsi .align 4 .global __fixsfsi .type __fixsfsi, @function __fixsfsi: leaf_entry sp, 16 /* Check for NaN and Infinity. */ movi a6, 0x7f800000 ball a2, a6, .Lfixsfsi_nan_or_inf /* Extract the exponent and check if 0 < (exp - 0x7e) < 32. */ extui a4, a2, 23, 8 addi a4, a4, -0x7e bgei a4, 32, .Lfixsfsi_maxint blti a4, 1, .Lfixsfsi_zero /* Add explicit "1.0" and shift << 8. */ or a7, a2, a6 slli a5, a7, 8 /* Shift back to the right, based on the exponent. */ ssl a4 /* shift by 32 - a4 */ srl a5, a5 /* Negate the result if sign != 0. */ neg a2, a5 movgez a2, a5, a7 leaf_return .Lfixsfsi_nan_or_inf: /* Handle Infinity and NaN. */ slli a4, a2, 9 beqz a4, .Lfixsfsi_maxint /* Translate NaN to +maxint. */ movi a2, 0 .Lfixsfsi_maxint: slli a4, a6, 8 /* 0x80000000 */ addi a5, a4, -1 /* 0x7fffffff */ movgez a4, a5, a2 mov a2, a4 leaf_return .Lfixsfsi_zero: movi a2, 0 leaf_return #endif /* L_fixsfsi */ #ifdef L_fixsfdi .align 4 .global __fixsfdi .type __fixsfdi, @function __fixsfdi: leaf_entry sp, 16 /* Check for NaN and Infinity. */ movi a6, 0x7f800000 ball a2, a6, .Lfixsfdi_nan_or_inf /* Extract the exponent and check if 0 < (exp - 0x7e) < 64. */ extui a4, a2, 23, 8 addi a4, a4, -0x7e bgei a4, 64, .Lfixsfdi_maxint blti a4, 1, .Lfixsfdi_zero /* Add explicit "1.0" and shift << 8. */ or a7, a2, a6 slli xh, a7, 8 /* Shift back to the right, based on the exponent. */ ssl a4 /* shift by 64 - a4 */ bgei a4, 32, .Lfixsfdi_smallshift srl xl, xh movi xh, 0 .Lfixsfdi_shifted: /* Negate the result if sign != 0. */ bgez a7, 1f neg xl, xl neg xh, xh beqz xl, 1f addi xh, xh, -1 1: leaf_return .Lfixsfdi_smallshift: movi xl, 0 sll xl, xh srl xh, xh j .Lfixsfdi_shifted .Lfixsfdi_nan_or_inf: /* Handle Infinity and NaN. */ slli a4, a2, 9 beqz a4, .Lfixsfdi_maxint /* Translate NaN to +maxint. */ movi a2, 0 .Lfixsfdi_maxint: slli a7, a6, 8 /* 0x80000000 */ bgez a2, 1f mov xh, a7 movi xl, 0 leaf_return 1: addi xh, a7, -1 /* 0x7fffffff */ movi xl, -1 leaf_return .Lfixsfdi_zero: movi xh, 0 movi xl, 0 leaf_return #endif /* L_fixsfdi */ #ifdef L_fixunssfsi .align 4 .global __fixunssfsi .type __fixunssfsi, @function __fixunssfsi: leaf_entry sp, 16 /* Check for NaN and Infinity. */ movi a6, 0x7f800000 ball a2, a6, .Lfixunssfsi_nan_or_inf /* Extract the exponent and check if 0 <= (exp - 0x7f) < 32. */ extui a4, a2, 23, 8 addi a4, a4, -0x7f bgei a4, 32, .Lfixunssfsi_maxint bltz a4, .Lfixunssfsi_zero /* Add explicit "1.0" and shift << 8. */ or a7, a2, a6 slli a5, a7, 8 /* Shift back to the right, based on the exponent. */ addi a4, a4, 1 beqi a4, 32, .Lfixunssfsi_bigexp ssl a4 /* shift by 32 - a4 */ srl a5, a5 /* Negate the result if sign != 0. */ neg a2, a5 movgez a2, a5, a7 leaf_return .Lfixunssfsi_nan_or_inf: /* Handle Infinity and NaN. */ slli a4, a2, 9 beqz a4, .Lfixunssfsi_maxint /* Translate NaN to 0xffffffff. */ movi a2, -1 leaf_return .Lfixunssfsi_maxint: slli a4, a6, 8 /* 0x80000000 */ movi a5, -1 /* 0xffffffff */ movgez a4, a5, a2 mov a2, a4 leaf_return .Lfixunssfsi_zero: movi a2, 0 leaf_return .Lfixunssfsi_bigexp: /* Handle unsigned maximum exponent case. */ bltz a2, 1f mov a2, a5 /* no shift needed */ leaf_return /* Return 0x80000000 if negative. */ 1: slli a2, a6, 8 leaf_return #endif /* L_fixunssfsi */ #ifdef L_fixunssfdi .align 4 .global __fixunssfdi .type __fixunssfdi, @function __fixunssfdi: leaf_entry sp, 16 /* Check for NaN and Infinity. */ movi a6, 0x7f800000 ball a2, a6, .Lfixunssfdi_nan_or_inf /* Extract the exponent and check if 0 <= (exp - 0x7f) < 64. */ extui a4, a2, 23, 8 addi a4, a4, -0x7f bgei a4, 64, .Lfixunssfdi_maxint bltz a4, .Lfixunssfdi_zero /* Add explicit "1.0" and shift << 8. */ or a7, a2, a6 slli xh, a7, 8 /* Shift back to the right, based on the exponent. */ addi a4, a4, 1 beqi a4, 64, .Lfixunssfdi_bigexp ssl a4 /* shift by 64 - a4 */ bgei a4, 32, .Lfixunssfdi_smallshift srl xl, xh movi xh, 0 .Lfixunssfdi_shifted: /* Negate the result if sign != 0. */ bgez a7, 1f neg xl, xl neg xh, xh beqz xl, 1f addi xh, xh, -1 1: leaf_return .Lfixunssfdi_smallshift: movi xl, 0 src xl, xh, xl srl xh, xh j .Lfixunssfdi_shifted .Lfixunssfdi_nan_or_inf: /* Handle Infinity and NaN. */ slli a4, a2, 9 beqz a4, .Lfixunssfdi_maxint /* Translate NaN to 0xffffffff.... */ 1: movi xh, -1 movi xl, -1 leaf_return .Lfixunssfdi_maxint: bgez a2, 1b 2: slli xh, a6, 8 /* 0x80000000 */ movi xl, 0 leaf_return .Lfixunssfdi_zero: movi xh, 0 movi xl, 0 leaf_return .Lfixunssfdi_bigexp: /* Handle unsigned maximum exponent case. */ bltz a7, 2b movi xl, 0 leaf_return /* no shift needed */ #endif /* L_fixunssfdi */ #ifdef L_floatsisf .align 4 .global __floatunsisf .type __floatunsisf, @function __floatunsisf: leaf_entry sp, 16 beqz a2, .Lfloatsisf_return /* Set the sign to zero and jump to the floatsisf code. */ movi a7, 0 j .Lfloatsisf_normalize .align 4 .global __floatsisf .type __floatsisf, @function __floatsisf: leaf_entry sp, 16 /* Check for zero. */ beqz a2, .Lfloatsisf_return /* Save the sign. */ extui a7, a2, 31, 1 /* Get the absolute value. */ #if XCHAL_HAVE_ABS abs a2, a2 #else neg a4, a2 movltz a2, a4, a2 #endif .Lfloatsisf_normalize: /* Normalize with the first 1 bit in the msb. */ do_nsau a4, a2, a5, a6 ssl a4 sll a5, a2 /* Shift the mantissa into position, with rounding bits in a6. */ srli a2, a5, 8 slli a6, a5, (32 - 8) /* Set the exponent. */ movi a5, 0x9d /* 0x7e + 31 */ sub a5, a5, a4 slli a5, a5, 23 add a2, a2, a5 /* Add the sign. */ slli a7, a7, 31 or a2, a2, a7 /* Round up if the leftover fraction is >= 1/2. */ bgez a6, .Lfloatsisf_return addi a2, a2, 1 /* Overflow to the exponent is OK. */ /* Check if the leftover fraction is exactly 1/2. */ slli a6, a6, 1 beqz a6, .Lfloatsisf_exactlyhalf .Lfloatsisf_return: leaf_return .Lfloatsisf_exactlyhalf: /* Round down to the nearest even value. */ srli a2, a2, 1 slli a2, a2, 1 leaf_return #endif /* L_floatsisf */ #ifdef L_floatdisf .align 4 .global __floatundisf .type __floatundisf, @function __floatundisf: leaf_entry sp, 16 /* Check for zero. */ or a4, xh, xl beqz a4, 2f /* Set the sign to zero and jump to the floatdisf code. */ movi a7, 0 j .Lfloatdisf_normalize .align 4 .global __floatdisf .type __floatdisf, @function __floatdisf: leaf_entry sp, 16 /* Check for zero. */ or a4, xh, xl beqz a4, 2f /* Save the sign. */ extui a7, xh, 31, 1 /* Get the absolute value. */ bgez xh, .Lfloatdisf_normalize neg xl, xl neg xh, xh beqz xl, .Lfloatdisf_normalize addi xh, xh, -1 .Lfloatdisf_normalize: /* Normalize with the first 1 bit in the msb of xh. */ beqz xh, .Lfloatdisf_bigshift do_nsau a4, xh, a5, a6 ssl a4 src xh, xh, xl sll xl, xl .Lfloatdisf_shifted: /* Shift the mantissa into position, with rounding bits in a6. */ ssai 8 sll a5, xl src a6, xh, xl srl xh, xh beqz a5, 1f movi a5, 1 or a6, a6, a5 1: /* Set the exponent. */ movi a5, 0xbd /* 0x7e + 63 */ sub a5, a5, a4 slli a5, a5, 23 add a2, xh, a5 /* Add the sign. */ slli a7, a7, 31 or a2, a2, a7 /* Round up if the leftover fraction is >= 1/2. */ bgez a6, 2f addi a2, a2, 1 /* Overflow to the exponent is OK. */ /* Check if the leftover fraction is exactly 1/2. */ slli a6, a6, 1 beqz a6, .Lfloatdisf_exactlyhalf 2: leaf_return .Lfloatdisf_bigshift: /* xh is zero. Normalize with first 1 bit of xl in the msb of xh. */ do_nsau a4, xl, a5, a6 ssl a4 sll xh, xl movi xl, 0 addi a4, a4, 32 j .Lfloatdisf_shifted .Lfloatdisf_exactlyhalf: /* Round down to the nearest even value. */ srli a2, a2, 1 slli a2, a2, 1 leaf_return #endif /* L_floatdisf */ #if XCHAL_HAVE_FP_SQRT #ifdef L_sqrtf /* Square root */ .align 4 .global __ieee754_sqrtf .type __ieee754_sqrtf, @function __ieee754_sqrtf: leaf_entry sp, 16 wfr f1, a2 sqrt0.s f2, f1 const.s f3, 0 maddn.s f3, f2, f2 nexp01.s f4, f1 const.s f0, 3 addexp.s f4, f0 maddn.s f0, f3, f4 nexp01.s f3, f1 neg.s f5, f3 maddn.s f2, f0, f2 const.s f0, 0 const.s f6, 0 const.s f7, 0 maddn.s f0, f5, f2 maddn.s f6, f2, f4 const.s f4, 3 maddn.s f7, f4, f2 maddn.s f3, f0, f0 maddn.s f4, f6, f2 neg.s f2, f7 maddn.s f0, f3, f2 maddn.s f7, f4, f7 mksadj.s f2, f1 nexp01.s f1, f1 maddn.s f1, f0, f0 neg.s f3, f7 addexpm.s f0, f2 addexp.s f3, f2 divn.s f0, f1, f3 rfr a2, f0 leaf_return #endif /* L_sqrtf */ #endif /* XCHAL_HAVE_FP_SQRT */ #if XCHAL_HAVE_FP_RECIP #ifdef L_recipsf2 /* Reciprocal */ .align 4 .global __recipsf2 .type __recipsf2, @function __recipsf2: leaf_entry sp, 16 wfr f1, a2 recip0.s f0, f1 const.s f2, 1 msub.s f2, f1, f0 maddn.s f0, f0, f2 const.s f2, 1 msub.s f2, f1, f0 maddn.s f0, f0, f2 rfr a2, f0 leaf_return #endif /* L_recipsf2 */ #endif /* XCHAL_HAVE_FP_RECIP */ #if XCHAL_HAVE_FP_RSQRT #ifdef L_rsqrtsf2 /* Reciprocal square root */ .align 4 .global __rsqrtsf2 .type __rsqrtsf2, @function __rsqrtsf2: leaf_entry sp, 16 wfr f1, a2 rsqrt0.s f0, f1 mul.s f2, f1, f0 const.s f3, 3; mul.s f4, f3, f0 const.s f5, 1 msub.s f5, f2, f0 maddn.s f0, f4, f5 mul.s f2, f1, f0 mul.s f1, f3, f0 const.s f3, 1 msub.s f3, f2, f0 maddn.s f0, f1, f3 rfr a2, f0 leaf_return #endif /* L_rsqrtsf2 */ #endif /* XCHAL_HAVE_FP_RSQRT */
4ms/metamodule-plugin-sdk
1,541
plugin-libc/libgcc/config/xtensa/crti.S
# Start .init and .fini sections. # Copyright (C) 2003-2022 Free Software Foundation, Inc. # # This file is free software; you can redistribute it and/or modify it # under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 3, or (at your option) # any later version. # # GCC is distributed in the hope that it will be useful, but WITHOUT ANY # WARRANTY; without even the implied warranty of MERCHANTABILITY or # FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License # for more details. # # Under Section 7 of GPL version 3, you are granted additional # permissions described in the GCC Runtime Library Exception, version # 3.1, as published by the Free Software Foundation. # # You should have received a copy of the GNU General Public License and # a copy of the GCC Runtime Library Exception along with this program; # see the files COPYING3 and COPYING.RUNTIME respectively. If not, see # <http://www.gnu.org/licenses/>. # This file just makes a stack frame for the contents of the .fini and # .init sections. Users may put any desired instructions in those # sections. #include "xtensa-config.h" .section .init .globl _init .type _init,@function .align 4 _init: #if XCHAL_HAVE_WINDOWED && !__XTENSA_CALL0_ABI__ entry sp, 64 #else addi sp, sp, -32 s32i a0, sp, 0 #endif .section .fini .globl _fini .type _fini,@function .align 4 _fini: #if XCHAL_HAVE_WINDOWED && !__XTENSA_CALL0_ABI__ entry sp, 64 #else addi sp, sp, -32 s32i a0, sp, 0 #endif
4ms/metamodule-plugin-sdk
6,905
plugin-libc/libgcc/config/xtensa/lib2funcs.S
/* Assembly functions for libgcc2. Copyright (C) 2001-2022 Free Software Foundation, Inc. Contributed by Bob Wilson (bwilson@tensilica.com) at Tensilica. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 3, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. Under Section 7 of GPL version 3, you are granted additional permissions described in the GCC Runtime Library Exception, version 3.1, as published by the Free Software Foundation. You should have received a copy of the GNU General Public License and a copy of the GCC Runtime Library Exception along with this program; see the files COPYING3 and COPYING.RUNTIME respectively. If not, see <http://www.gnu.org/licenses/>. */ #include "xtensa-config.h" /* __xtensa_libgcc_window_spill: This function flushes out all but the current register window. This is used to set up the stack so that arbitrary frames can be accessed. */ #if XCHAL_HAVE_WINDOWED && !__XTENSA_CALL0_ABI__ .align 4 .global __xtensa_libgcc_window_spill .type __xtensa_libgcc_window_spill,@function __xtensa_libgcc_window_spill: entry sp, 48 #if XCHAL_NUM_AREGS > 16 call12 1f retw .align 4 1: .rept (XCHAL_NUM_AREGS - 24) / 12 _entry sp, 48 mov a12, a0 .endr _entry sp, 16 #if XCHAL_NUM_AREGS % 12 == 0 mov a4, a4 #elif XCHAL_NUM_AREGS % 12 == 4 mov a8, a8 #elif XCHAL_NUM_AREGS % 12 == 8 mov a12, a12 #endif retw #else mov a8, a8 retw #endif .size __xtensa_libgcc_window_spill, .-__xtensa_libgcc_window_spill #endif /* __xtensa_nonlocal_goto: This code does all the hard work of a nonlocal goto on Xtensa. It is here in the library to avoid the code size bloat of generating it in-line. There are two arguments: a2 = frame pointer for the procedure containing the label a3 = goto handler address This function never returns to its caller but instead goes directly to the address of the specified goto handler. */ #if XCHAL_HAVE_WINDOWED && !__XTENSA_CALL0_ABI__ .align 4 .global __xtensa_nonlocal_goto .type __xtensa_nonlocal_goto,@function __xtensa_nonlocal_goto: entry sp, 32 /* Flush registers. */ call8 __xtensa_libgcc_window_spill /* Because the save area for a0-a3 is stored one frame below the one identified by a2, the only way to restore those registers is to unwind the stack. If alloca() were never called, we could just unwind until finding the sp value matching a2. However, a2 is a frame pointer, not a stack pointer, and may not be encountered during the unwinding. The solution is to unwind until going _past_ the value given by a2. This involves keeping three stack pointer values during the unwinding: next = sp of frame N-1 cur = sp of frame N prev = sp of frame N+1 When next > a2, the desired save area is stored relative to prev. At this point, cur will be the same as a2 except in the alloca() case. Besides finding the values to be restored to a0-a3, we also need to find the current window size for the target function. This can be extracted from the high bits of the return address, initially in a0. As the unwinding proceeds, the window size is taken from the value of a0 saved _two_ frames below the current frame. */ addi a5, sp, -16 /* a5 = prev - save area */ l32i a6, a5, 4 addi a6, a6, -16 /* a6 = cur - save area */ mov a8, a0 /* a8 = return address (for window size) */ j .Lfirstframe .Lnextframe: l32i a8, a5, 0 /* next return address (for window size) */ mov a5, a6 /* advance prev */ addi a6, a7, -16 /* advance cur */ .Lfirstframe: l32i a7, a6, 4 /* a7 = next */ bgeu a2, a7, .Lnextframe /* At this point, prev (a5) points to the save area with the saved values of a0-a3. Copy those values into the save area at the current sp so they will be reloaded when the return from this function underflows. We don't have to worry about exceptions while updating the current save area, because the windows have already been flushed. */ addi a4, sp, -16 /* a4 = save area of this function */ l32i a6, a5, 0 l32i a7, a5, 4 s32i a6, a4, 0 s32i a7, a4, 4 l32i a6, a5, 8 l32i a7, a5, 12 s32i a6, a4, 8 s32i a7, a4, 12 /* Set return address to goto handler. Use the window size bits from the return address two frames below the target. */ extui a8, a8, 30, 2 /* get window size from return addr. */ slli a3, a3, 2 /* get goto handler addr. << 2 */ ssai 2 src a0, a8, a3 /* combine them with a funnel shift */ retw .size __xtensa_nonlocal_goto, .-__xtensa_nonlocal_goto #endif /* __xtensa_sync_caches: This function is called after writing a trampoline on the stack to force all the data writes to memory and invalidate the instruction cache. a2 is the address of the new trampoline. After the trampoline data is written out, it must be flushed out of the data cache into memory. We use DHWB in case we have a writeback cache. At least one DHWB instruction is needed for each data cache line which may be touched by the trampoline. An ISYNC instruction must follow the DHWBs. We have to flush the i-cache to make sure that the new values get used. At least one IHI instruction is needed for each i-cache line which may be touched by the trampoline. An ISYNC instruction is also needed to make sure that the modified instructions are loaded into the instruction fetch buffer. */ /* Use the maximum trampoline size. Flushing a bit extra is OK. */ #define TRAMPOLINE_SIZE 60 .text .align 4 .global __xtensa_sync_caches .type __xtensa_sync_caches,@function __xtensa_sync_caches: #if XCHAL_HAVE_WINDOWED && !__XTENSA_CALL0_ABI__ entry sp, 32 #endif #if XCHAL_DCACHE_SIZE > 0 /* Flush the trampoline from the data cache. */ extui a4, a2, 0, XCHAL_DCACHE_LINEWIDTH addi a4, a4, TRAMPOLINE_SIZE addi a4, a4, (1 << XCHAL_DCACHE_LINEWIDTH) - 1 srli a4, a4, XCHAL_DCACHE_LINEWIDTH mov a3, a2 .Ldcache_loop: dhwb a3, 0 addi a3, a3, (1 << XCHAL_DCACHE_LINEWIDTH) addi a4, a4, -1 bnez a4, .Ldcache_loop isync #endif #if XCHAL_ICACHE_SIZE > 0 /* Invalidate the corresponding lines in the instruction cache. */ extui a4, a2, 0, XCHAL_ICACHE_LINEWIDTH addi a4, a4, TRAMPOLINE_SIZE addi a4, a4, (1 << XCHAL_ICACHE_LINEWIDTH) - 1 srli a4, a4, XCHAL_ICACHE_LINEWIDTH .Licache_loop: ihi a2, 0 addi a2, a2, (1 << XCHAL_ICACHE_LINEWIDTH) addi a4, a4, -1 bnez a4, .Licache_loop #endif isync #if XCHAL_HAVE_WINDOWED && !__XTENSA_CALL0_ABI__ retw #else ret #endif .size __xtensa_sync_caches, .-__xtensa_sync_caches
4ms/metamodule-plugin-sdk
54,007
plugin-libc/libgcc/config/xtensa/ieee754-df.S
/* IEEE-754 double-precision functions for Xtensa Copyright (C) 2006-2022 Free Software Foundation, Inc. Contributed by Bob Wilson (bwilson@tensilica.com) at Tensilica. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 3, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. Under Section 7 of GPL version 3, you are granted additional permissions described in the GCC Runtime Library Exception, version 3.1, as published by the Free Software Foundation. You should have received a copy of the GNU General Public License and a copy of the GCC Runtime Library Exception along with this program; see the files COPYING3 and COPYING.RUNTIME respectively. If not, see <http://www.gnu.org/licenses/>. */ #ifdef __XTENSA_EB__ #define xh a2 #define xl a3 #define yh a4 #define yl a5 #else #define xh a3 #define xl a2 #define yh a5 #define yl a4 #endif /* Warning! The branch displacements for some Xtensa branch instructions are quite small, and this code has been carefully laid out to keep branch targets in range. If you change anything, be sure to check that the assembler is not relaxing anything to branch over a jump. */ #ifdef L_negdf2 .align 4 .global __negdf2 .type __negdf2, @function __negdf2: leaf_entry sp, 16 movi a4, 0x80000000 xor xh, xh, a4 leaf_return #endif /* L_negdf2 */ #ifdef L_addsubdf3 .literal_position /* Addition */ __adddf3_aux: /* Handle NaNs and Infinities. (This code is placed before the start of the function just to keep it in range of the limited branch displacements.) */ .Ladd_xnan_or_inf: /* If y is neither Infinity nor NaN, return x. */ bnall yh, a6, .Ladd_return_nan_or_inf /* If x is a NaN, return it. Otherwise, return y. */ slli a7, xh, 12 or a7, a7, xl bnez a7, .Ladd_return_nan .Ladd_ynan_or_inf: /* Return y. */ mov xh, yh mov xl, yl .Ladd_return_nan_or_inf: slli a7, xh, 12 or a7, a7, xl bnez a7, .Ladd_return_nan leaf_return .Ladd_return_nan: movi a4, 0x80000 /* make it a quiet NaN */ or xh, xh, a4 leaf_return .Ladd_opposite_signs: /* Operand signs differ. Do a subtraction. */ slli a7, a6, 11 xor yh, yh, a7 j .Lsub_same_sign .align 4 .global __adddf3 .type __adddf3, @function __adddf3: leaf_entry sp, 16 movi a6, 0x7ff00000 /* Check if the two operands have the same sign. */ xor a7, xh, yh bltz a7, .Ladd_opposite_signs .Ladd_same_sign: /* Check if either exponent == 0x7ff (i.e., NaN or Infinity). */ ball xh, a6, .Ladd_xnan_or_inf ball yh, a6, .Ladd_ynan_or_inf /* Compare the exponents. The smaller operand will be shifted right by the exponent difference and added to the larger one. */ extui a7, xh, 20, 12 extui a8, yh, 20, 12 bltu a7, a8, .Ladd_shiftx .Ladd_shifty: /* Check if the smaller (or equal) exponent is zero. */ bnone yh, a6, .Ladd_yexpzero /* Replace yh sign/exponent with 0x001. */ or yh, yh, a6 slli yh, yh, 11 srli yh, yh, 11 .Ladd_yexpdiff: /* Compute the exponent difference. Optimize for difference < 32. */ sub a10, a7, a8 bgeui a10, 32, .Ladd_bigshifty /* Shift yh/yl right by the exponent difference. Any bits that are shifted out of yl are saved in a9 for rounding the result. */ ssr a10 movi a9, 0 src a9, yl, a9 src yl, yh, yl srl yh, yh .Ladd_addy: /* Do the 64-bit addition. */ add xl, xl, yl add xh, xh, yh bgeu xl, yl, 1f addi xh, xh, 1 1: /* Check if the add overflowed into the exponent. */ extui a10, xh, 20, 12 beq a10, a7, .Ladd_round mov a8, a7 j .Ladd_carry .Ladd_yexpzero: /* y is a subnormal value. Replace its sign/exponent with zero, i.e., no implicit "1.0", and increment the apparent exponent because subnormals behave as if they had the minimum (nonzero) exponent. Test for the case when both exponents are zero. */ slli yh, yh, 12 srli yh, yh, 12 bnone xh, a6, .Ladd_bothexpzero addi a8, a8, 1 j .Ladd_yexpdiff .Ladd_bothexpzero: /* Both exponents are zero. Handle this as a special case. There is no need to shift or round, and the normal code for handling a carry into the exponent field will not work because it assumes there is an implicit "1.0" that needs to be added. */ add xl, xl, yl add xh, xh, yh bgeu xl, yl, 1f addi xh, xh, 1 1: leaf_return .Ladd_bigshifty: /* Exponent difference > 64 -- just return the bigger value. */ bgeui a10, 64, 1b /* Shift yh/yl right by the exponent difference. Any bits that are shifted out are saved in a9 for rounding the result. */ ssr a10 sll a11, yl /* lost bits shifted out of yl */ src a9, yh, yl srl yl, yh movi yh, 0 beqz a11, .Ladd_addy or a9, a9, a10 /* any positive, nonzero value will work */ j .Ladd_addy .Ladd_xexpzero: /* Same as "yexpzero" except skip handling the case when both exponents are zero. */ slli xh, xh, 12 srli xh, xh, 12 addi a7, a7, 1 j .Ladd_xexpdiff .Ladd_shiftx: /* Same thing as the "shifty" code, but with x and y swapped. Also, because the exponent difference is always nonzero in this version, the shift sequence can use SLL and skip loading a constant zero. */ bnone xh, a6, .Ladd_xexpzero or xh, xh, a6 slli xh, xh, 11 srli xh, xh, 11 .Ladd_xexpdiff: sub a10, a8, a7 bgeui a10, 32, .Ladd_bigshiftx ssr a10 sll a9, xl src xl, xh, xl srl xh, xh .Ladd_addx: add xl, xl, yl add xh, xh, yh bgeu xl, yl, 1f addi xh, xh, 1 1: /* Check if the add overflowed into the exponent. */ extui a10, xh, 20, 12 bne a10, a8, .Ladd_carry .Ladd_round: /* Round up if the leftover fraction is >= 1/2. */ bgez a9, 1f addi xl, xl, 1 beqz xl, .Ladd_roundcarry /* Check if the leftover fraction is exactly 1/2. */ slli a9, a9, 1 beqz a9, .Ladd_exactlyhalf 1: leaf_return .Ladd_bigshiftx: /* Mostly the same thing as "bigshifty".... */ bgeui a10, 64, .Ladd_returny ssr a10 sll a11, xl src a9, xh, xl srl xl, xh movi xh, 0 beqz a11, .Ladd_addx or a9, a9, a10 j .Ladd_addx .Ladd_returny: mov xh, yh mov xl, yl leaf_return .Ladd_carry: /* The addition has overflowed into the exponent field, so the value needs to be renormalized. The mantissa of the result can be recovered by subtracting the original exponent and adding 0x100000 (which is the explicit "1.0" for the mantissa of the non-shifted operand -- the "1.0" for the shifted operand was already added). The mantissa can then be shifted right by one bit. The explicit "1.0" of the shifted mantissa then needs to be replaced by the exponent, incremented by one to account for the normalizing shift. It is faster to combine these operations: do the shift first and combine the additions and subtractions. If x is the original exponent, the result is: shifted mantissa - (x << 19) + (1 << 19) + (x << 20) or: shifted mantissa + ((x + 1) << 19) Note that the exponent is incremented here by leaving the explicit "1.0" of the mantissa in the exponent field. */ /* Shift xh/xl right by one bit. Save the lsb of xl. */ mov a10, xl ssai 1 src xl, xh, xl srl xh, xh /* See explanation above. The original exponent is in a8. */ addi a8, a8, 1 slli a8, a8, 19 add xh, xh, a8 /* Return an Infinity if the exponent overflowed. */ ball xh, a6, .Ladd_infinity /* Same thing as the "round" code except the msb of the leftover fraction is bit 0 of a10, with the rest of the fraction in a9. */ bbci.l a10, 0, 1f addi xl, xl, 1 beqz xl, .Ladd_roundcarry beqz a9, .Ladd_exactlyhalf 1: leaf_return .Ladd_infinity: /* Clear the mantissa. */ movi xl, 0 srli xh, xh, 20 slli xh, xh, 20 /* The sign bit may have been lost in a carry-out. Put it back. */ slli a8, a8, 1 or xh, xh, a8 leaf_return .Ladd_exactlyhalf: /* Round down to the nearest even value. */ srli xl, xl, 1 slli xl, xl, 1 leaf_return .Ladd_roundcarry: /* xl is always zero when the rounding increment overflows, so there's no need to round it to an even value. */ addi xh, xh, 1 /* Overflow to the exponent is OK. */ leaf_return /* Subtraction */ __subdf3_aux: /* Handle NaNs and Infinities. (This code is placed before the start of the function just to keep it in range of the limited branch displacements.) */ .Lsub_xnan_or_inf: /* If y is neither Infinity nor NaN, return x. */ bnall yh, a6, .Lsub_return_nan_or_inf .Lsub_return_nan: /* Both x and y are either NaN or Inf, so the result is NaN. */ movi a4, 0x80000 /* make it a quiet NaN */ or xh, xh, a4 leaf_return .Lsub_ynan_or_inf: /* Negate y and return it. */ slli a7, a6, 11 xor xh, yh, a7 mov xl, yl .Lsub_return_nan_or_inf: slli a7, xh, 12 or a7, a7, xl bnez a7, .Lsub_return_nan leaf_return .Lsub_opposite_signs: /* Operand signs differ. Do an addition. */ slli a7, a6, 11 xor yh, yh, a7 j .Ladd_same_sign .align 4 .global __subdf3 .type __subdf3, @function __subdf3: leaf_entry sp, 16 movi a6, 0x7ff00000 /* Check if the two operands have the same sign. */ xor a7, xh, yh bltz a7, .Lsub_opposite_signs .Lsub_same_sign: /* Check if either exponent == 0x7ff (i.e., NaN or Infinity). */ ball xh, a6, .Lsub_xnan_or_inf ball yh, a6, .Lsub_ynan_or_inf /* Compare the operands. In contrast to addition, the entire value matters here. */ extui a7, xh, 20, 11 extui a8, yh, 20, 11 bltu xh, yh, .Lsub_xsmaller beq xh, yh, .Lsub_compare_low .Lsub_ysmaller: /* Check if the smaller (or equal) exponent is zero. */ bnone yh, a6, .Lsub_yexpzero /* Replace yh sign/exponent with 0x001. */ or yh, yh, a6 slli yh, yh, 11 srli yh, yh, 11 .Lsub_yexpdiff: /* Compute the exponent difference. Optimize for difference < 32. */ sub a10, a7, a8 bgeui a10, 32, .Lsub_bigshifty /* Shift yh/yl right by the exponent difference. Any bits that are shifted out of yl are saved in a9 for rounding the result. */ ssr a10 movi a9, 0 src a9, yl, a9 src yl, yh, yl srl yh, yh .Lsub_suby: /* Do the 64-bit subtraction. */ sub xh, xh, yh bgeu xl, yl, 1f addi xh, xh, -1 1: sub xl, xl, yl /* Subtract the leftover bits in a9 from zero and propagate any borrow from xh/xl. */ neg a9, a9 beqz a9, 1f addi a5, xh, -1 moveqz xh, a5, xl addi xl, xl, -1 1: /* Check if the subtract underflowed into the exponent. */ extui a10, xh, 20, 11 beq a10, a7, .Lsub_round j .Lsub_borrow .Lsub_compare_low: /* The high words are equal. Compare the low words. */ bltu xl, yl, .Lsub_xsmaller bltu yl, xl, .Lsub_ysmaller /* The operands are equal. Return 0.0. */ movi xh, 0 movi xl, 0 1: leaf_return .Lsub_yexpzero: /* y is a subnormal value. Replace its sign/exponent with zero, i.e., no implicit "1.0". Unless x is also a subnormal, increment y's apparent exponent because subnormals behave as if they had the minimum (nonzero) exponent. */ slli yh, yh, 12 srli yh, yh, 12 bnone xh, a6, .Lsub_yexpdiff addi a8, a8, 1 j .Lsub_yexpdiff .Lsub_bigshifty: /* Exponent difference > 64 -- just return the bigger value. */ bgeui a10, 64, 1b /* Shift yh/yl right by the exponent difference. Any bits that are shifted out are saved in a9 for rounding the result. */ ssr a10 sll a11, yl /* lost bits shifted out of yl */ src a9, yh, yl srl yl, yh movi yh, 0 beqz a11, .Lsub_suby or a9, a9, a10 /* any positive, nonzero value will work */ j .Lsub_suby .Lsub_xsmaller: /* Same thing as the "ysmaller" code, but with x and y swapped and with y negated. */ bnone xh, a6, .Lsub_xexpzero or xh, xh, a6 slli xh, xh, 11 srli xh, xh, 11 .Lsub_xexpdiff: sub a10, a8, a7 bgeui a10, 32, .Lsub_bigshiftx ssr a10 movi a9, 0 src a9, xl, a9 src xl, xh, xl srl xh, xh /* Negate y. */ slli a11, a6, 11 xor yh, yh, a11 .Lsub_subx: sub xl, yl, xl sub xh, yh, xh bgeu yl, xl, 1f addi xh, xh, -1 1: /* Subtract the leftover bits in a9 from zero and propagate any borrow from xh/xl. */ neg a9, a9 beqz a9, 1f addi a5, xh, -1 moveqz xh, a5, xl addi xl, xl, -1 1: /* Check if the subtract underflowed into the exponent. */ extui a10, xh, 20, 11 bne a10, a8, .Lsub_borrow .Lsub_round: /* Round up if the leftover fraction is >= 1/2. */ bgez a9, 1f addi xl, xl, 1 beqz xl, .Lsub_roundcarry /* Check if the leftover fraction is exactly 1/2. */ slli a9, a9, 1 beqz a9, .Lsub_exactlyhalf 1: leaf_return .Lsub_xexpzero: /* Same as "yexpzero". */ slli xh, xh, 12 srli xh, xh, 12 bnone yh, a6, .Lsub_xexpdiff addi a7, a7, 1 j .Lsub_xexpdiff .Lsub_bigshiftx: /* Mostly the same thing as "bigshifty", but with the sign bit of the shifted value set so that the subsequent subtraction flips the sign of y. */ bgeui a10, 64, .Lsub_returny ssr a10 sll a11, xl src a9, xh, xl srl xl, xh slli xh, a6, 11 /* set sign bit of xh */ beqz a11, .Lsub_subx or a9, a9, a10 j .Lsub_subx .Lsub_returny: /* Negate and return y. */ slli a7, a6, 11 xor xh, yh, a7 mov xl, yl leaf_return .Lsub_borrow: /* The subtraction has underflowed into the exponent field, so the value needs to be renormalized. Shift the mantissa left as needed to remove any leading zeros and adjust the exponent accordingly. If the exponent is not large enough to remove all the leading zeros, the result will be a subnormal value. */ slli a8, xh, 12 beqz a8, .Lsub_xhzero do_nsau a6, a8, a7, a11 srli a8, a8, 12 bge a6, a10, .Lsub_subnormal addi a6, a6, 1 .Lsub_shift_lt32: /* Shift the mantissa (a8/xl/a9) left by a6. */ ssl a6 src a8, a8, xl src xl, xl, a9 sll a9, a9 /* Combine the shifted mantissa with the sign and exponent, decrementing the exponent by a6. (The exponent has already been decremented by one due to the borrow from the subtraction, but adding the mantissa will increment the exponent by one.) */ srli xh, xh, 20 sub xh, xh, a6 slli xh, xh, 20 add xh, xh, a8 j .Lsub_round .Lsub_exactlyhalf: /* Round down to the nearest even value. */ srli xl, xl, 1 slli xl, xl, 1 leaf_return .Lsub_roundcarry: /* xl is always zero when the rounding increment overflows, so there's no need to round it to an even value. */ addi xh, xh, 1 /* Overflow to the exponent is OK. */ leaf_return .Lsub_xhzero: /* When normalizing the result, all the mantissa bits in the high word are zero. Shift by "20 + (leading zero count of xl) + 1". */ do_nsau a6, xl, a7, a11 addi a6, a6, 21 blt a10, a6, .Lsub_subnormal .Lsub_normalize_shift: bltui a6, 32, .Lsub_shift_lt32 ssl a6 src a8, xl, a9 sll xl, a9 movi a9, 0 srli xh, xh, 20 sub xh, xh, a6 slli xh, xh, 20 add xh, xh, a8 j .Lsub_round .Lsub_subnormal: /* The exponent is too small to shift away all the leading zeros. Set a6 to the current exponent (which has already been decremented by the borrow) so that the exponent of the result will be zero. Do not add 1 to a6 in this case, because: (1) adding the mantissa will not increment the exponent, so there is no need to subtract anything extra from the exponent to compensate, and (2) the effective exponent of a subnormal is 1 not 0 so the shift amount must be 1 smaller than normal. */ mov a6, a10 j .Lsub_normalize_shift #endif /* L_addsubdf3 */ #ifdef L_muldf3 /* Multiplication */ #if !XCHAL_HAVE_MUL16 && !XCHAL_HAVE_MUL32 && !XCHAL_HAVE_MAC16 #define XCHAL_NO_MUL 1 #endif .literal_position __muldf3_aux: /* Handle unusual cases (zeros, subnormals, NaNs and Infinities). (This code is placed before the start of the function just to keep it in range of the limited branch displacements.) */ .Lmul_xexpzero: /* Clear the sign bit of x. */ slli xh, xh, 1 srli xh, xh, 1 /* If x is zero, return zero. */ or a10, xh, xl beqz a10, .Lmul_return_zero /* Normalize x. Adjust the exponent in a8. */ beqz xh, .Lmul_xh_zero do_nsau a10, xh, a11, a12 addi a10, a10, -11 ssl a10 src xh, xh, xl sll xl, xl movi a8, 1 sub a8, a8, a10 j .Lmul_xnormalized .Lmul_xh_zero: do_nsau a10, xl, a11, a12 addi a10, a10, -11 movi a8, -31 sub a8, a8, a10 ssl a10 bltz a10, .Lmul_xl_srl sll xh, xl movi xl, 0 j .Lmul_xnormalized .Lmul_xl_srl: srl xh, xl sll xl, xl j .Lmul_xnormalized .Lmul_yexpzero: /* Clear the sign bit of y. */ slli yh, yh, 1 srli yh, yh, 1 /* If y is zero, return zero. */ or a10, yh, yl beqz a10, .Lmul_return_zero /* Normalize y. Adjust the exponent in a9. */ beqz yh, .Lmul_yh_zero do_nsau a10, yh, a11, a12 addi a10, a10, -11 ssl a10 src yh, yh, yl sll yl, yl movi a9, 1 sub a9, a9, a10 j .Lmul_ynormalized .Lmul_yh_zero: do_nsau a10, yl, a11, a12 addi a10, a10, -11 movi a9, -31 sub a9, a9, a10 ssl a10 bltz a10, .Lmul_yl_srl sll yh, yl movi yl, 0 j .Lmul_ynormalized .Lmul_yl_srl: srl yh, yl sll yl, yl j .Lmul_ynormalized .Lmul_return_zero: /* Return zero with the appropriate sign bit. */ srli xh, a7, 31 slli xh, xh, 31 movi xl, 0 j .Lmul_done .Lmul_xnan_or_inf: /* If y is zero, return NaN. */ bnez yl, 1f slli a8, yh, 1 beqz a8, .Lmul_return_nan 1: /* If y is NaN, return y. */ bnall yh, a6, .Lmul_returnx slli a8, yh, 12 or a8, a8, yl beqz a8, .Lmul_returnx .Lmul_returny: mov xh, yh mov xl, yl .Lmul_returnx: slli a8, xh, 12 or a8, a8, xl bnez a8, .Lmul_return_nan /* Set the sign bit and return. */ extui a7, a7, 31, 1 slli xh, xh, 1 ssai 1 src xh, a7, xh j .Lmul_done .Lmul_ynan_or_inf: /* If x is zero, return NaN. */ bnez xl, .Lmul_returny slli a8, xh, 1 bnez a8, .Lmul_returny mov xh, yh .Lmul_return_nan: movi a4, 0x80000 /* make it a quiet NaN */ or xh, xh, a4 j .Lmul_done .align 4 .global __muldf3 .type __muldf3, @function __muldf3: #if __XTENSA_CALL0_ABI__ leaf_entry sp, 32 addi sp, sp, -32 s32i a12, sp, 16 s32i a13, sp, 20 s32i a14, sp, 24 s32i a15, sp, 28 #elif XCHAL_NO_MUL /* This is not really a leaf function; allocate enough stack space to allow CALL12s to a helper function. */ leaf_entry sp, 64 #else leaf_entry sp, 32 #endif movi a6, 0x7ff00000 /* Get the sign of the result. */ xor a7, xh, yh /* Check for NaN and infinity. */ ball xh, a6, .Lmul_xnan_or_inf ball yh, a6, .Lmul_ynan_or_inf /* Extract the exponents. */ extui a8, xh, 20, 11 extui a9, yh, 20, 11 beqz a8, .Lmul_xexpzero .Lmul_xnormalized: beqz a9, .Lmul_yexpzero .Lmul_ynormalized: /* Add the exponents. */ add a8, a8, a9 /* Replace sign/exponent fields with explicit "1.0". */ movi a10, 0x1fffff or xh, xh, a6 and xh, xh, a10 or yh, yh, a6 and yh, yh, a10 /* Multiply 64x64 to 128 bits. The result ends up in xh/xl/a6. The least-significant word of the result is thrown away except that if it is nonzero, the lsb of a6 is set to 1. */ #if XCHAL_HAVE_MUL32_HIGH /* Compute a6 with any carry-outs in a10. */ movi a10, 0 mull a6, xl, yh mull a11, xh, yl add a6, a6, a11 bgeu a6, a11, 1f addi a10, a10, 1 1: muluh a11, xl, yl add a6, a6, a11 bgeu a6, a11, 1f addi a10, a10, 1 1: /* If the low word of the result is nonzero, set the lsb of a6. */ mull a11, xl, yl beqz a11, 1f movi a9, 1 or a6, a6, a9 1: /* Compute xl with any carry-outs in a9. */ movi a9, 0 mull a11, xh, yh add a10, a10, a11 bgeu a10, a11, 1f addi a9, a9, 1 1: muluh a11, xh, yl add a10, a10, a11 bgeu a10, a11, 1f addi a9, a9, 1 1: muluh xl, xl, yh add xl, xl, a10 bgeu xl, a10, 1f addi a9, a9, 1 1: /* Compute xh. */ muluh xh, xh, yh add xh, xh, a9 #else /* ! XCHAL_HAVE_MUL32_HIGH */ /* Break the inputs into 16-bit chunks and compute 16 32-bit partial products. These partial products are: 0 xll * yll 1 xll * ylh 2 xlh * yll 3 xll * yhl 4 xlh * ylh 5 xhl * yll 6 xll * yhh 7 xlh * yhl 8 xhl * ylh 9 xhh * yll 10 xlh * yhh 11 xhl * yhl 12 xhh * ylh 13 xhl * yhh 14 xhh * yhl 15 xhh * yhh where the input chunks are (hh, hl, lh, ll). If using the Mul16 or Mul32 multiplier options, these input chunks must be stored in separate registers. For Mac16, the UMUL.AA.* opcodes can specify that the inputs come from either half of the registers, so there is no need to shift them out ahead of time. If there is no multiply hardware, the 16-bit chunks can be extracted when setting up the arguments to the separate multiply function. */ /* Save a7 since it is needed to hold a temporary value. */ s32i a7, sp, 4 #if __XTENSA_CALL0_ABI__ && XCHAL_NO_MUL /* Calling a separate multiply function will clobber a0 and requires use of a8 as a temporary, so save those values now. (The function uses a custom ABI so nothing else needs to be saved.) */ s32i a0, sp, 0 s32i a8, sp, 8 #endif #if XCHAL_HAVE_MUL16 || XCHAL_HAVE_MUL32 #define xlh a12 #define ylh a13 #define xhh a14 #define yhh a15 /* Get the high halves of the inputs into registers. */ srli xlh, xl, 16 srli ylh, yl, 16 srli xhh, xh, 16 srli yhh, yh, 16 #define xll xl #define yll yl #define xhl xh #define yhl yh #if XCHAL_HAVE_MUL32 && !XCHAL_HAVE_MUL16 /* Clear the high halves of the inputs. This does not matter for MUL16 because the high bits are ignored. */ extui xl, xl, 0, 16 extui xh, xh, 0, 16 extui yl, yl, 0, 16 extui yh, yh, 0, 16 #endif #endif /* MUL16 || MUL32 */ #if XCHAL_HAVE_MUL16 #define do_mul(dst, xreg, xhalf, yreg, yhalf) \ mul16u dst, xreg ## xhalf, yreg ## yhalf #elif XCHAL_HAVE_MUL32 #define do_mul(dst, xreg, xhalf, yreg, yhalf) \ mull dst, xreg ## xhalf, yreg ## yhalf #elif XCHAL_HAVE_MAC16 /* The preprocessor insists on inserting a space when concatenating after a period in the definition of do_mul below. These macros are a workaround using underscores instead of periods when doing the concatenation. */ #define umul_aa_ll umul.aa.ll #define umul_aa_lh umul.aa.lh #define umul_aa_hl umul.aa.hl #define umul_aa_hh umul.aa.hh #define do_mul(dst, xreg, xhalf, yreg, yhalf) \ umul_aa_ ## xhalf ## yhalf xreg, yreg; \ rsr dst, ACCLO #else /* no multiply hardware */ #define set_arg_l(dst, src) \ extui dst, src, 0, 16 #define set_arg_h(dst, src) \ srli dst, src, 16 #if __XTENSA_CALL0_ABI__ #define do_mul(dst, xreg, xhalf, yreg, yhalf) \ set_arg_ ## xhalf (a13, xreg); \ set_arg_ ## yhalf (a14, yreg); \ call0 .Lmul_mulsi3; \ mov dst, a12 #else #define do_mul(dst, xreg, xhalf, yreg, yhalf) \ set_arg_ ## xhalf (a14, xreg); \ set_arg_ ## yhalf (a15, yreg); \ call12 .Lmul_mulsi3; \ mov dst, a14 #endif /* __XTENSA_CALL0_ABI__ */ #endif /* no multiply hardware */ /* Add pp1 and pp2 into a10 with carry-out in a9. */ do_mul(a10, xl, l, yl, h) /* pp 1 */ do_mul(a11, xl, h, yl, l) /* pp 2 */ movi a9, 0 add a10, a10, a11 bgeu a10, a11, 1f addi a9, a9, 1 1: /* Initialize a6 with a9/a10 shifted into position. Note that this value can be safely incremented without any carry-outs. */ ssai 16 src a6, a9, a10 /* Compute the low word into a10. */ do_mul(a11, xl, l, yl, l) /* pp 0 */ sll a10, a10 add a10, a10, a11 bgeu a10, a11, 1f addi a6, a6, 1 1: /* Compute the contributions of pp0-5 to a6, with carry-outs in a9. This is good enough to determine the low half of a6, so that any nonzero bits from the low word of the result can be collapsed into a6, freeing up a register. */ movi a9, 0 do_mul(a11, xl, l, yh, l) /* pp 3 */ add a6, a6, a11 bgeu a6, a11, 1f addi a9, a9, 1 1: do_mul(a11, xl, h, yl, h) /* pp 4 */ add a6, a6, a11 bgeu a6, a11, 1f addi a9, a9, 1 1: do_mul(a11, xh, l, yl, l) /* pp 5 */ add a6, a6, a11 bgeu a6, a11, 1f addi a9, a9, 1 1: /* Collapse any nonzero bits from the low word into a6. */ beqz a10, 1f movi a11, 1 or a6, a6, a11 1: /* Add pp6-9 into a11 with carry-outs in a10. */ do_mul(a7, xl, l, yh, h) /* pp 6 */ do_mul(a11, xh, h, yl, l) /* pp 9 */ movi a10, 0 add a11, a11, a7 bgeu a11, a7, 1f addi a10, a10, 1 1: do_mul(a7, xl, h, yh, l) /* pp 7 */ add a11, a11, a7 bgeu a11, a7, 1f addi a10, a10, 1 1: do_mul(a7, xh, l, yl, h) /* pp 8 */ add a11, a11, a7 bgeu a11, a7, 1f addi a10, a10, 1 1: /* Shift a10/a11 into position, and add low half of a11 to a6. */ src a10, a10, a11 add a10, a10, a9 sll a11, a11 add a6, a6, a11 bgeu a6, a11, 1f addi a10, a10, 1 1: /* Add pp10-12 into xl with carry-outs in a9. */ movi a9, 0 do_mul(xl, xl, h, yh, h) /* pp 10 */ add xl, xl, a10 bgeu xl, a10, 1f addi a9, a9, 1 1: do_mul(a10, xh, l, yh, l) /* pp 11 */ add xl, xl, a10 bgeu xl, a10, 1f addi a9, a9, 1 1: do_mul(a10, xh, h, yl, h) /* pp 12 */ add xl, xl, a10 bgeu xl, a10, 1f addi a9, a9, 1 1: /* Add pp13-14 into a11 with carry-outs in a10. */ do_mul(a11, xh, l, yh, h) /* pp 13 */ do_mul(a7, xh, h, yh, l) /* pp 14 */ movi a10, 0 add a11, a11, a7 bgeu a11, a7, 1f addi a10, a10, 1 1: /* Shift a10/a11 into position, and add low half of a11 to a6. */ src a10, a10, a11 add a10, a10, a9 sll a11, a11 add xl, xl, a11 bgeu xl, a11, 1f addi a10, a10, 1 1: /* Compute xh. */ do_mul(xh, xh, h, yh, h) /* pp 15 */ add xh, xh, a10 /* Restore values saved on the stack during the multiplication. */ l32i a7, sp, 4 #if __XTENSA_CALL0_ABI__ && XCHAL_NO_MUL l32i a0, sp, 0 l32i a8, sp, 8 #endif #endif /* ! XCHAL_HAVE_MUL32_HIGH */ /* Shift left by 12 bits, unless there was a carry-out from the multiply, in which case, shift by 11 bits and increment the exponent. Note: It is convenient to use the constant 0x3ff instead of 0x400 when removing the extra exponent bias (so that it is easy to construct 0x7fe for the overflow check). Reverse the logic here to decrement the exponent sum by one unless there was a carry-out. */ movi a4, 11 srli a5, xh, 21 - 12 bnez a5, 1f addi a4, a4, 1 addi a8, a8, -1 1: ssl a4 src xh, xh, xl src xl, xl, a6 sll a6, a6 /* Subtract the extra bias from the exponent sum (plus one to account for the explicit "1.0" of the mantissa that will be added to the exponent in the final result). */ movi a4, 0x3ff sub a8, a8, a4 /* Check for over/underflow. The value in a8 is one less than the final exponent, so values in the range 0..7fd are OK here. */ slli a4, a4, 1 /* 0x7fe */ bgeu a8, a4, .Lmul_overflow .Lmul_round: /* Round. */ bgez a6, .Lmul_rounded addi xl, xl, 1 beqz xl, .Lmul_roundcarry slli a6, a6, 1 beqz a6, .Lmul_exactlyhalf .Lmul_rounded: /* Add the exponent to the mantissa. */ slli a8, a8, 20 add xh, xh, a8 .Lmul_addsign: /* Add the sign bit. */ srli a7, a7, 31 slli a7, a7, 31 or xh, xh, a7 .Lmul_done: #if __XTENSA_CALL0_ABI__ l32i a12, sp, 16 l32i a13, sp, 20 l32i a14, sp, 24 l32i a15, sp, 28 addi sp, sp, 32 #endif leaf_return .Lmul_exactlyhalf: /* Round down to the nearest even value. */ srli xl, xl, 1 slli xl, xl, 1 j .Lmul_rounded .Lmul_roundcarry: /* xl is always zero when the rounding increment overflows, so there's no need to round it to an even value. */ addi xh, xh, 1 /* Overflow is OK -- it will be added to the exponent. */ j .Lmul_rounded .Lmul_overflow: bltz a8, .Lmul_underflow /* Return +/- Infinity. */ addi a8, a4, 1 /* 0x7ff */ slli xh, a8, 20 movi xl, 0 j .Lmul_addsign .Lmul_underflow: /* Create a subnormal value, where the exponent field contains zero, but the effective exponent is 1. The value of a8 is one less than the actual exponent, so just negate it to get the shift amount. */ neg a8, a8 mov a9, a6 ssr a8 bgeui a8, 32, .Lmul_bigshift /* Shift xh/xl right. Any bits that are shifted out of xl are saved in a6 (combined with the shifted-out bits currently in a6) for rounding the result. */ sll a6, xl src xl, xh, xl srl xh, xh j 1f .Lmul_bigshift: bgeui a8, 64, .Lmul_flush_to_zero sll a10, xl /* lost bits shifted out of xl */ src a6, xh, xl srl xl, xh movi xh, 0 or a9, a9, a10 /* Set the exponent to zero. */ 1: movi a8, 0 /* Pack any nonzero bits shifted out into a6. */ beqz a9, .Lmul_round movi a9, 1 or a6, a6, a9 j .Lmul_round .Lmul_flush_to_zero: /* Return zero with the appropriate sign bit. */ srli xh, a7, 31 slli xh, xh, 31 movi xl, 0 j .Lmul_done #if XCHAL_NO_MUL /* For Xtensa processors with no multiply hardware, this simplified version of _mulsi3 is used for multiplying 16-bit chunks of the floating-point mantissas. When using CALL0, this function uses a custom ABI: the inputs are passed in a13 and a14, the result is returned in a12, and a8 and a15 are clobbered. */ .align 4 .Lmul_mulsi3: leaf_entry sp, 16 .macro mul_mulsi3_body dst, src1, src2, tmp1, tmp2 movi \dst, 0 1: add \tmp1, \src2, \dst extui \tmp2, \src1, 0, 1 movnez \dst, \tmp1, \tmp2 do_addx2 \tmp1, \src2, \dst, \tmp1 extui \tmp2, \src1, 1, 1 movnez \dst, \tmp1, \tmp2 do_addx4 \tmp1, \src2, \dst, \tmp1 extui \tmp2, \src1, 2, 1 movnez \dst, \tmp1, \tmp2 do_addx8 \tmp1, \src2, \dst, \tmp1 extui \tmp2, \src1, 3, 1 movnez \dst, \tmp1, \tmp2 srli \src1, \src1, 4 slli \src2, \src2, 4 bnez \src1, 1b .endm #if __XTENSA_CALL0_ABI__ mul_mulsi3_body a12, a13, a14, a15, a8 #else /* The result will be written into a2, so save that argument in a4. */ mov a4, a2 mul_mulsi3_body a2, a4, a3, a5, a6 #endif leaf_return #endif /* XCHAL_NO_MUL */ #endif /* L_muldf3 */ #ifdef L_divdf3 /* Division */ #if XCHAL_HAVE_DFP_DIV .text .align 4 .global __divdf3 .type __divdf3, @function __divdf3: leaf_entry sp, 16 wfrd f1, xh, xl wfrd f2, yh, yl div0.d f3, f2 nexp01.d f4, f2 const.d f0, 1 maddn.d f0, f4, f3 const.d f5, 0 mov.d f7, f2 mkdadj.d f7, f1 maddn.d f3, f0, f3 maddn.d f5, f0, f0 nexp01.d f1, f1 div0.d f2, f2 maddn.d f3, f5, f3 const.d f5, 1 const.d f0, 0 neg.d f6, f1 maddn.d f5, f4, f3 maddn.d f0, f6, f2 maddn.d f3, f5, f3 maddn.d f6, f4, f0 const.d f2, 1 maddn.d f2, f4, f3 maddn.d f0, f6, f3 neg.d f1, f1 maddn.d f3, f2, f3 maddn.d f1, f4, f0 addexpm.d f0, f7 addexp.d f3, f7 divn.d f0, f1, f3 rfr xl, f0 rfrd xh, f0 leaf_return #else .literal_position __divdf3_aux: /* Handle unusual cases (zeros, subnormals, NaNs and Infinities). (This code is placed before the start of the function just to keep it in range of the limited branch displacements.) */ .Ldiv_yexpzero: /* Clear the sign bit of y. */ slli yh, yh, 1 srli yh, yh, 1 /* Check for division by zero. */ or a10, yh, yl beqz a10, .Ldiv_yzero /* Normalize y. Adjust the exponent in a9. */ beqz yh, .Ldiv_yh_zero do_nsau a10, yh, a11, a9 addi a10, a10, -11 ssl a10 src yh, yh, yl sll yl, yl movi a9, 1 sub a9, a9, a10 j .Ldiv_ynormalized .Ldiv_yh_zero: do_nsau a10, yl, a11, a9 addi a10, a10, -11 movi a9, -31 sub a9, a9, a10 ssl a10 bltz a10, .Ldiv_yl_srl sll yh, yl movi yl, 0 j .Ldiv_ynormalized .Ldiv_yl_srl: srl yh, yl sll yl, yl j .Ldiv_ynormalized .Ldiv_yzero: /* y is zero. Return NaN if x is also zero; otherwise, infinity. */ slli xh, xh, 1 srli xh, xh, 1 or xl, xl, xh srli xh, a7, 31 slli xh, xh, 31 or xh, xh, a6 bnez xl, 1f movi a4, 0x80000 /* make it a quiet NaN */ or xh, xh, a4 1: movi xl, 0 leaf_return .Ldiv_xexpzero: /* Clear the sign bit of x. */ slli xh, xh, 1 srli xh, xh, 1 /* If x is zero, return zero. */ or a10, xh, xl beqz a10, .Ldiv_return_zero /* Normalize x. Adjust the exponent in a8. */ beqz xh, .Ldiv_xh_zero do_nsau a10, xh, a11, a8 addi a10, a10, -11 ssl a10 src xh, xh, xl sll xl, xl movi a8, 1 sub a8, a8, a10 j .Ldiv_xnormalized .Ldiv_xh_zero: do_nsau a10, xl, a11, a8 addi a10, a10, -11 movi a8, -31 sub a8, a8, a10 ssl a10 bltz a10, .Ldiv_xl_srl sll xh, xl movi xl, 0 j .Ldiv_xnormalized .Ldiv_xl_srl: srl xh, xl sll xl, xl j .Ldiv_xnormalized .Ldiv_return_zero: /* Return zero with the appropriate sign bit. */ srli xh, a7, 31 slli xh, xh, 31 movi xl, 0 leaf_return .Ldiv_xnan_or_inf: /* Set the sign bit of the result. */ srli a7, yh, 31 slli a7, a7, 31 xor xh, xh, a7 /* If y is NaN or Inf, return NaN. */ ball yh, a6, .Ldiv_return_nan slli a8, xh, 12 or a8, a8, xl bnez a8, .Ldiv_return_nan leaf_return .Ldiv_ynan_or_inf: /* If y is Infinity, return zero. */ slli a8, yh, 12 or a8, a8, yl beqz a8, .Ldiv_return_zero /* y is NaN; return it. */ mov xh, yh mov xl, yl .Ldiv_return_nan: movi a4, 0x80000 /* make it a quiet NaN */ or xh, xh, a4 leaf_return .Ldiv_highequal1: bltu xl, yl, 2f j 3f .align 4 .global __divdf3 .type __divdf3, @function __divdf3: leaf_entry sp, 16 movi a6, 0x7ff00000 /* Get the sign of the result. */ xor a7, xh, yh /* Check for NaN and infinity. */ ball xh, a6, .Ldiv_xnan_or_inf ball yh, a6, .Ldiv_ynan_or_inf /* Extract the exponents. */ extui a8, xh, 20, 11 extui a9, yh, 20, 11 beqz a9, .Ldiv_yexpzero .Ldiv_ynormalized: beqz a8, .Ldiv_xexpzero .Ldiv_xnormalized: /* Subtract the exponents. */ sub a8, a8, a9 /* Replace sign/exponent fields with explicit "1.0". */ movi a10, 0x1fffff or xh, xh, a6 and xh, xh, a10 or yh, yh, a6 and yh, yh, a10 /* Set SAR for left shift by one. */ ssai (32 - 1) /* The first digit of the mantissa division must be a one. Shift x (and adjust the exponent) as needed to make this true. */ bltu yh, xh, 3f beq yh, xh, .Ldiv_highequal1 2: src xh, xh, xl sll xl, xl addi a8, a8, -1 3: /* Do the first subtraction and shift. */ sub xh, xh, yh bgeu xl, yl, 1f addi xh, xh, -1 1: sub xl, xl, yl src xh, xh, xl sll xl, xl /* Put the quotient into a10/a11. */ movi a10, 0 movi a11, 1 /* Divide one bit at a time for 52 bits. */ movi a9, 52 #if XCHAL_HAVE_LOOPS loop a9, .Ldiv_loopend #endif .Ldiv_loop: /* Shift the quotient << 1. */ src a10, a10, a11 sll a11, a11 /* Is this digit a 0 or 1? */ bltu xh, yh, 3f beq xh, yh, .Ldiv_highequal2 /* Output a 1 and subtract. */ 2: addi a11, a11, 1 sub xh, xh, yh bgeu xl, yl, 1f addi xh, xh, -1 1: sub xl, xl, yl /* Shift the dividend << 1. */ 3: src xh, xh, xl sll xl, xl #if !XCHAL_HAVE_LOOPS addi a9, a9, -1 bnez a9, .Ldiv_loop #endif .Ldiv_loopend: /* Add the exponent bias (less one to account for the explicit "1.0" of the mantissa that will be added to the exponent in the final result). */ movi a9, 0x3fe add a8, a8, a9 /* Check for over/underflow. The value in a8 is one less than the final exponent, so values in the range 0..7fd are OK here. */ addmi a9, a9, 0x400 /* 0x7fe */ bgeu a8, a9, .Ldiv_overflow .Ldiv_round: /* Round. The remainder (<< 1) is in xh/xl. */ bltu xh, yh, .Ldiv_rounded beq xh, yh, .Ldiv_highequal3 .Ldiv_roundup: addi a11, a11, 1 beqz a11, .Ldiv_roundcarry .Ldiv_rounded: mov xl, a11 /* Add the exponent to the mantissa. */ slli a8, a8, 20 add xh, a10, a8 .Ldiv_addsign: /* Add the sign bit. */ srli a7, a7, 31 slli a7, a7, 31 or xh, xh, a7 leaf_return .Ldiv_highequal2: bgeu xl, yl, 2b j 3b .Ldiv_highequal3: bltu xl, yl, .Ldiv_rounded bne xl, yl, .Ldiv_roundup /* Remainder is exactly half the divisor. Round even. */ addi a11, a11, 1 beqz a11, .Ldiv_roundcarry srli a11, a11, 1 slli a11, a11, 1 j .Ldiv_rounded .Ldiv_overflow: bltz a8, .Ldiv_underflow /* Return +/- Infinity. */ addi a8, a9, 1 /* 0x7ff */ slli xh, a8, 20 movi xl, 0 j .Ldiv_addsign .Ldiv_underflow: /* Create a subnormal value, where the exponent field contains zero, but the effective exponent is 1. The value of a8 is one less than the actual exponent, so just negate it to get the shift amount. */ neg a8, a8 ssr a8 bgeui a8, 32, .Ldiv_bigshift /* Shift a10/a11 right. Any bits that are shifted out of a11 are saved in a6 for rounding the result. */ sll a6, a11 src a11, a10, a11 srl a10, a10 j 1f .Ldiv_bigshift: bgeui a8, 64, .Ldiv_flush_to_zero sll a9, a11 /* lost bits shifted out of a11 */ src a6, a10, a11 srl a11, a10 movi a10, 0 or xl, xl, a9 /* Set the exponent to zero. */ 1: movi a8, 0 /* Pack any nonzero remainder (in xh/xl) into a6. */ or xh, xh, xl beqz xh, 1f movi a9, 1 or a6, a6, a9 /* Round a10/a11 based on the bits shifted out into a6. */ 1: bgez a6, .Ldiv_rounded addi a11, a11, 1 beqz a11, .Ldiv_roundcarry slli a6, a6, 1 bnez a6, .Ldiv_rounded srli a11, a11, 1 slli a11, a11, 1 j .Ldiv_rounded .Ldiv_roundcarry: /* a11 is always zero when the rounding increment overflows, so there's no need to round it to an even value. */ addi a10, a10, 1 /* Overflow to the exponent field is OK. */ j .Ldiv_rounded .Ldiv_flush_to_zero: /* Return zero with the appropriate sign bit. */ srli xh, a7, 31 slli xh, xh, 31 movi xl, 0 leaf_return #endif /* XCHAL_HAVE_DFP_DIV */ #endif /* L_divdf3 */ #ifdef L_cmpdf2 /* Equal and Not Equal */ .align 4 .global __eqdf2 .global __nedf2 .set __nedf2, __eqdf2 .type __eqdf2, @function __eqdf2: leaf_entry sp, 16 bne xl, yl, 2f bne xh, yh, 4f /* The values are equal but NaN != NaN. Check the exponent. */ movi a6, 0x7ff00000 ball xh, a6, 3f /* Equal. */ movi a2, 0 leaf_return /* Not equal. */ 2: movi a2, 1 leaf_return /* Check if the mantissas are nonzero. */ 3: slli a7, xh, 12 or a7, a7, xl j 5f /* Check if x and y are zero with different signs. */ 4: or a7, xh, yh slli a7, a7, 1 or a7, a7, xl /* xl == yl here */ /* Equal if a7 == 0, where a7 is either abs(x | y) or the mantissa or x when exponent(x) = 0x7ff and x == y. */ 5: movi a2, 0 movi a3, 1 movnez a2, a3, a7 leaf_return /* Greater Than */ .align 4 .global __gtdf2 .type __gtdf2, @function __gtdf2: leaf_entry sp, 16 movi a6, 0x7ff00000 ball xh, a6, 2f 1: bnall yh, a6, .Lle_cmp /* Check if y is a NaN. */ slli a7, yh, 12 or a7, a7, yl beqz a7, .Lle_cmp movi a2, 0 leaf_return /* Check if x is a NaN. */ 2: slli a7, xh, 12 or a7, a7, xl beqz a7, 1b movi a2, 0 leaf_return /* Less Than or Equal */ .align 4 .global __ledf2 .type __ledf2, @function __ledf2: leaf_entry sp, 16 movi a6, 0x7ff00000 ball xh, a6, 2f 1: bnall yh, a6, .Lle_cmp /* Check if y is a NaN. */ slli a7, yh, 12 or a7, a7, yl beqz a7, .Lle_cmp movi a2, 1 leaf_return /* Check if x is a NaN. */ 2: slli a7, xh, 12 or a7, a7, xl beqz a7, 1b movi a2, 1 leaf_return .Lle_cmp: /* Check if x and y have different signs. */ xor a7, xh, yh bltz a7, .Lle_diff_signs /* Check if x is negative. */ bltz xh, .Lle_xneg /* Check if x <= y. */ bltu xh, yh, 4f bne xh, yh, 5f bltu yl, xl, 5f 4: movi a2, 0 leaf_return .Lle_xneg: /* Check if y <= x. */ bltu yh, xh, 4b bne yh, xh, 5f bgeu xl, yl, 4b 5: movi a2, 1 leaf_return .Lle_diff_signs: bltz xh, 4b /* Check if both x and y are zero. */ or a7, xh, yh slli a7, a7, 1 or a7, a7, xl or a7, a7, yl movi a2, 1 movi a3, 0 moveqz a2, a3, a7 leaf_return /* Greater Than or Equal */ .align 4 .global __gedf2 .type __gedf2, @function __gedf2: leaf_entry sp, 16 movi a6, 0x7ff00000 ball xh, a6, 2f 1: bnall yh, a6, .Llt_cmp /* Check if y is a NaN. */ slli a7, yh, 12 or a7, a7, yl beqz a7, .Llt_cmp movi a2, -1 leaf_return /* Check if x is a NaN. */ 2: slli a7, xh, 12 or a7, a7, xl beqz a7, 1b movi a2, -1 leaf_return /* Less Than */ .align 4 .global __ltdf2 .type __ltdf2, @function __ltdf2: leaf_entry sp, 16 movi a6, 0x7ff00000 ball xh, a6, 2f 1: bnall yh, a6, .Llt_cmp /* Check if y is a NaN. */ slli a7, yh, 12 or a7, a7, yl beqz a7, .Llt_cmp movi a2, 0 leaf_return /* Check if x is a NaN. */ 2: slli a7, xh, 12 or a7, a7, xl beqz a7, 1b movi a2, 0 leaf_return .Llt_cmp: /* Check if x and y have different signs. */ xor a7, xh, yh bltz a7, .Llt_diff_signs /* Check if x is negative. */ bltz xh, .Llt_xneg /* Check if x < y. */ bltu xh, yh, 4f bne xh, yh, 5f bgeu xl, yl, 5f 4: movi a2, -1 leaf_return .Llt_xneg: /* Check if y < x. */ bltu yh, xh, 4b bne yh, xh, 5f bltu yl, xl, 4b 5: movi a2, 0 leaf_return .Llt_diff_signs: bgez xh, 5b /* Check if both x and y are nonzero. */ or a7, xh, yh slli a7, a7, 1 or a7, a7, xl or a7, a7, yl movi a2, 0 movi a3, -1 movnez a2, a3, a7 leaf_return /* Unordered */ .align 4 .global __unorddf2 .type __unorddf2, @function __unorddf2: leaf_entry sp, 16 movi a6, 0x7ff00000 ball xh, a6, 3f 1: ball yh, a6, 4f 2: movi a2, 0 leaf_return 3: slli a7, xh, 12 or a7, a7, xl beqz a7, 1b movi a2, 1 leaf_return 4: slli a7, yh, 12 or a7, a7, yl beqz a7, 2b movi a2, 1 leaf_return #endif /* L_cmpdf2 */ #ifdef L_fixdfsi .align 4 .global __fixdfsi .type __fixdfsi, @function __fixdfsi: leaf_entry sp, 16 /* Check for NaN and Infinity. */ movi a6, 0x7ff00000 ball xh, a6, .Lfixdfsi_nan_or_inf /* Extract the exponent and check if 0 < (exp - 0x3fe) < 32. */ extui a4, xh, 20, 11 extui a5, a6, 19, 10 /* 0x3fe */ sub a4, a4, a5 bgei a4, 32, .Lfixdfsi_maxint blti a4, 1, .Lfixdfsi_zero /* Add explicit "1.0" and shift << 11. */ or a7, xh, a6 ssai (32 - 11) src a5, a7, xl /* Shift back to the right, based on the exponent. */ ssl a4 /* shift by 32 - a4 */ srl a5, a5 /* Negate the result if sign != 0. */ neg a2, a5 movgez a2, a5, a7 leaf_return .Lfixdfsi_nan_or_inf: /* Handle Infinity and NaN. */ slli a4, xh, 12 or a4, a4, xl beqz a4, .Lfixdfsi_maxint /* Translate NaN to +maxint. */ movi xh, 0 .Lfixdfsi_maxint: slli a4, a6, 11 /* 0x80000000 */ addi a5, a4, -1 /* 0x7fffffff */ movgez a4, a5, xh mov a2, a4 leaf_return .Lfixdfsi_zero: movi a2, 0 leaf_return #endif /* L_fixdfsi */ #ifdef L_fixdfdi .align 4 .global __fixdfdi .type __fixdfdi, @function __fixdfdi: leaf_entry sp, 16 /* Check for NaN and Infinity. */ movi a6, 0x7ff00000 ball xh, a6, .Lfixdfdi_nan_or_inf /* Extract the exponent and check if 0 < (exp - 0x3fe) < 64. */ extui a4, xh, 20, 11 extui a5, a6, 19, 10 /* 0x3fe */ sub a4, a4, a5 bgei a4, 64, .Lfixdfdi_maxint blti a4, 1, .Lfixdfdi_zero /* Add explicit "1.0" and shift << 11. */ or a7, xh, a6 ssai (32 - 11) src xh, a7, xl sll xl, xl /* Shift back to the right, based on the exponent. */ ssl a4 /* shift by 64 - a4 */ bgei a4, 32, .Lfixdfdi_smallshift srl xl, xh movi xh, 0 .Lfixdfdi_shifted: /* Negate the result if sign != 0. */ bgez a7, 1f neg xl, xl neg xh, xh beqz xl, 1f addi xh, xh, -1 1: leaf_return .Lfixdfdi_smallshift: src xl, xh, xl srl xh, xh j .Lfixdfdi_shifted .Lfixdfdi_nan_or_inf: /* Handle Infinity and NaN. */ slli a4, xh, 12 or a4, a4, xl beqz a4, .Lfixdfdi_maxint /* Translate NaN to +maxint. */ movi xh, 0 .Lfixdfdi_maxint: slli a7, a6, 11 /* 0x80000000 */ bgez xh, 1f mov xh, a7 movi xl, 0 leaf_return 1: addi xh, a7, -1 /* 0x7fffffff */ movi xl, -1 leaf_return .Lfixdfdi_zero: movi xh, 0 movi xl, 0 leaf_return #endif /* L_fixdfdi */ #ifdef L_fixunsdfsi .align 4 .global __fixunsdfsi .type __fixunsdfsi, @function __fixunsdfsi: leaf_entry sp, 16 /* Check for NaN and Infinity. */ movi a6, 0x7ff00000 ball xh, a6, .Lfixunsdfsi_nan_or_inf /* Extract the exponent and check if 0 <= (exp - 0x3ff) < 32. */ extui a4, xh, 20, 11 extui a5, a6, 20, 10 /* 0x3ff */ sub a4, a4, a5 bgei a4, 32, .Lfixunsdfsi_maxint bltz a4, .Lfixunsdfsi_zero /* Add explicit "1.0" and shift << 11. */ or a7, xh, a6 ssai (32 - 11) src a5, a7, xl /* Shift back to the right, based on the exponent. */ addi a4, a4, 1 beqi a4, 32, .Lfixunsdfsi_bigexp ssl a4 /* shift by 32 - a4 */ srl a5, a5 /* Negate the result if sign != 0. */ neg a2, a5 movgez a2, a5, a7 leaf_return .Lfixunsdfsi_nan_or_inf: /* Handle Infinity and NaN. */ slli a4, xh, 12 or a4, a4, xl beqz a4, .Lfixunsdfsi_maxint /* Translate NaN to 0xffffffff. */ movi a2, -1 leaf_return .Lfixunsdfsi_maxint: slli a4, a6, 11 /* 0x80000000 */ movi a5, -1 /* 0xffffffff */ movgez a4, a5, xh mov a2, a4 leaf_return .Lfixunsdfsi_zero: movi a2, 0 leaf_return .Lfixunsdfsi_bigexp: /* Handle unsigned maximum exponent case. */ bltz xh, 1f mov a2, a5 /* no shift needed */ leaf_return /* Return 0x80000000 if negative. */ 1: slli a2, a6, 11 leaf_return #endif /* L_fixunsdfsi */ #ifdef L_fixunsdfdi .align 4 .global __fixunsdfdi .type __fixunsdfdi, @function __fixunsdfdi: leaf_entry sp, 16 /* Check for NaN and Infinity. */ movi a6, 0x7ff00000 ball xh, a6, .Lfixunsdfdi_nan_or_inf /* Extract the exponent and check if 0 <= (exp - 0x3ff) < 64. */ extui a4, xh, 20, 11 extui a5, a6, 20, 10 /* 0x3ff */ sub a4, a4, a5 bgei a4, 64, .Lfixunsdfdi_maxint bltz a4, .Lfixunsdfdi_zero /* Add explicit "1.0" and shift << 11. */ or a7, xh, a6 ssai (32 - 11) src xh, a7, xl sll xl, xl /* Shift back to the right, based on the exponent. */ addi a4, a4, 1 beqi a4, 64, .Lfixunsdfdi_bigexp ssl a4 /* shift by 64 - a4 */ bgei a4, 32, .Lfixunsdfdi_smallshift srl xl, xh movi xh, 0 .Lfixunsdfdi_shifted: /* Negate the result if sign != 0. */ bgez a7, 1f neg xl, xl neg xh, xh beqz xl, 1f addi xh, xh, -1 1: leaf_return .Lfixunsdfdi_smallshift: src xl, xh, xl srl xh, xh j .Lfixunsdfdi_shifted .Lfixunsdfdi_nan_or_inf: /* Handle Infinity and NaN. */ slli a4, xh, 12 or a4, a4, xl beqz a4, .Lfixunsdfdi_maxint /* Translate NaN to 0xffffffff.... */ 1: movi xh, -1 movi xl, -1 leaf_return .Lfixunsdfdi_maxint: bgez xh, 1b 2: slli xh, a6, 11 /* 0x80000000 */ movi xl, 0 leaf_return .Lfixunsdfdi_zero: movi xh, 0 movi xl, 0 leaf_return .Lfixunsdfdi_bigexp: /* Handle unsigned maximum exponent case. */ bltz a7, 2b leaf_return /* no shift needed */ #endif /* L_fixunsdfdi */ #ifdef L_floatsidf .align 4 .global __floatunsidf .type __floatunsidf, @function __floatunsidf: leaf_entry sp, 16 beqz a2, .Lfloatsidf_return_zero /* Set the sign to zero and jump to the floatsidf code. */ movi a7, 0 j .Lfloatsidf_normalize .align 4 .global __floatsidf .type __floatsidf, @function __floatsidf: leaf_entry sp, 16 /* Check for zero. */ beqz a2, .Lfloatsidf_return_zero /* Save the sign. */ extui a7, a2, 31, 1 /* Get the absolute value. */ #if XCHAL_HAVE_ABS abs a2, a2 #else neg a4, a2 movltz a2, a4, a2 #endif .Lfloatsidf_normalize: /* Normalize with the first 1 bit in the msb. */ do_nsau a4, a2, a5, a6 ssl a4 sll a5, a2 /* Shift the mantissa into position. */ srli xh, a5, 11 slli xl, a5, (32 - 11) /* Set the exponent. */ movi a5, 0x41d /* 0x3fe + 31 */ sub a5, a5, a4 slli a5, a5, 20 add xh, xh, a5 /* Add the sign and return. */ slli a7, a7, 31 or xh, xh, a7 leaf_return .Lfloatsidf_return_zero: movi a3, 0 leaf_return #endif /* L_floatsidf */ #ifdef L_floatdidf .align 4 .global __floatundidf .type __floatundidf, @function __floatundidf: leaf_entry sp, 16 /* Check for zero. */ or a4, xh, xl beqz a4, 2f /* Set the sign to zero and jump to the floatdidf code. */ movi a7, 0 j .Lfloatdidf_normalize .align 4 .global __floatdidf .type __floatdidf, @function __floatdidf: leaf_entry sp, 16 /* Check for zero. */ or a4, xh, xl beqz a4, 2f /* Save the sign. */ extui a7, xh, 31, 1 /* Get the absolute value. */ bgez xh, .Lfloatdidf_normalize neg xl, xl neg xh, xh beqz xl, .Lfloatdidf_normalize addi xh, xh, -1 .Lfloatdidf_normalize: /* Normalize with the first 1 bit in the msb of xh. */ beqz xh, .Lfloatdidf_bigshift do_nsau a4, xh, a5, a6 ssl a4 src xh, xh, xl sll xl, xl .Lfloatdidf_shifted: /* Shift the mantissa into position, with rounding bits in a6. */ ssai 11 sll a6, xl src xl, xh, xl srl xh, xh /* Set the exponent. */ movi a5, 0x43d /* 0x3fe + 63 */ sub a5, a5, a4 slli a5, a5, 20 add xh, xh, a5 /* Add the sign. */ slli a7, a7, 31 or xh, xh, a7 /* Round up if the leftover fraction is >= 1/2. */ bgez a6, 2f addi xl, xl, 1 beqz xl, .Lfloatdidf_roundcarry /* Check if the leftover fraction is exactly 1/2. */ slli a6, a6, 1 beqz a6, .Lfloatdidf_exactlyhalf 2: leaf_return .Lfloatdidf_bigshift: /* xh is zero. Normalize with first 1 bit of xl in the msb of xh. */ do_nsau a4, xl, a5, a6 ssl a4 sll xh, xl movi xl, 0 addi a4, a4, 32 j .Lfloatdidf_shifted .Lfloatdidf_exactlyhalf: /* Round down to the nearest even value. */ srli xl, xl, 1 slli xl, xl, 1 leaf_return .Lfloatdidf_roundcarry: /* xl is always zero when the rounding increment overflows, so there's no need to round it to an even value. */ addi xh, xh, 1 /* Overflow to the exponent is OK. */ leaf_return #endif /* L_floatdidf */ #ifdef L_truncdfsf2 .align 4 .global __truncdfsf2 .type __truncdfsf2, @function __truncdfsf2: leaf_entry sp, 16 /* Adjust the exponent bias. */ movi a4, (0x3ff - 0x7f) << 20 sub a5, xh, a4 /* Check for underflow. */ xor a6, xh, a5 bltz a6, .Ltrunc_underflow extui a6, a5, 20, 11 beqz a6, .Ltrunc_underflow /* Check for overflow. */ movi a4, 255 bge a6, a4, .Ltrunc_overflow /* Shift a5/xl << 3 into a5/a4. */ ssai (32 - 3) src a5, a5, xl sll a4, xl .Ltrunc_addsign: /* Add the sign bit. */ extui a6, xh, 31, 1 slli a6, a6, 31 or a2, a6, a5 /* Round up if the leftover fraction is >= 1/2. */ bgez a4, 1f addi a2, a2, 1 /* Overflow to the exponent is OK. The answer will be correct. */ /* Check if the leftover fraction is exactly 1/2. */ slli a4, a4, 1 beqz a4, .Ltrunc_exactlyhalf 1: leaf_return .Ltrunc_exactlyhalf: /* Round down to the nearest even value. */ srli a2, a2, 1 slli a2, a2, 1 leaf_return .Ltrunc_overflow: /* Check if exponent == 0x7ff. */ movi a4, 0x7ff00000 bnall xh, a4, 1f /* Check if mantissa is nonzero. */ slli a5, xh, 12 or a5, a5, xl beqz a5, 1f /* Shift a4 to set a bit in the mantissa, making a quiet NaN. */ srli a4, a4, 1 1: slli a4, a4, 4 /* 0xff000000 or 0xff800000 */ /* Add the sign bit. */ extui a6, xh, 31, 1 ssai 1 src a2, a6, a4 leaf_return .Ltrunc_underflow: /* Find shift count for a subnormal. Flush to zero if >= 32. */ extui a6, xh, 20, 11 movi a5, 0x3ff - 0x7f sub a6, a5, a6 addi a6, a6, 1 bgeui a6, 32, 1f /* Replace the exponent with an explicit "1.0". */ slli a5, a5, 13 /* 0x700000 */ or a5, a5, xh slli a5, a5, 11 srli a5, a5, 11 /* Shift the mantissa left by 3 bits (into a5/a4). */ ssai (32 - 3) src a5, a5, xl sll a4, xl /* Shift right by a6. */ ssr a6 sll a7, a4 src a4, a5, a4 srl a5, a5 beqz a7, .Ltrunc_addsign or a4, a4, a6 /* any positive, nonzero value will work */ j .Ltrunc_addsign /* Return +/- zero. */ 1: extui a2, xh, 31, 1 slli a2, a2, 31 leaf_return #endif /* L_truncdfsf2 */ #ifdef L_extendsfdf2 .align 4 .global __extendsfdf2 .type __extendsfdf2, @function __extendsfdf2: leaf_entry sp, 16 /* Save the sign bit and then shift it off. */ extui a5, a2, 31, 1 slli a5, a5, 31 slli a4, a2, 1 /* Extract and check the exponent. */ extui a6, a2, 23, 8 beqz a6, .Lextend_expzero addi a6, a6, 1 beqi a6, 256, .Lextend_nan_or_inf /* Shift >> 3 into a4/xl. */ srli a4, a4, 4 slli xl, a2, (32 - 3) /* Adjust the exponent bias. */ movi a6, (0x3ff - 0x7f) << 20 add a4, a4, a6 /* Add the sign bit. */ or xh, a4, a5 leaf_return .Lextend_nan_or_inf: movi a4, 0x7ff00000 /* Check for NaN. */ slli a7, a2, 9 beqz a7, 1f slli a6, a6, 11 /* 0x80000 */ or a4, a4, a6 /* Add the sign and return. */ 1: or xh, a4, a5 movi xl, 0 leaf_return .Lextend_expzero: beqz a4, 1b /* Normalize it to have 8 zero bits before the first 1 bit. */ do_nsau a7, a4, a2, a3 addi a7, a7, -8 ssl a7 sll a4, a4 /* Shift >> 3 into a4/xl. */ slli xl, a4, (32 - 3) srli a4, a4, 3 /* Set the exponent. */ movi a6, 0x3fe - 0x7f sub a6, a6, a7 slli a6, a6, 20 add a4, a4, a6 /* Add the sign and return. */ or xh, a4, a5 leaf_return #endif /* L_extendsfdf2 */ #if XCHAL_HAVE_DFP_SQRT #ifdef L_sqrt .text .align 4 .global __ieee754_sqrt .type __ieee754_sqrt, @function __ieee754_sqrt: leaf_entry sp, 16 wfrd f1, xh, xl sqrt0.d f2, f1 const.d f4, 0 maddn.d f4, f2, f2 nexp01.d f3, f1 const.d f0, 3 addexp.d f3, f0 maddn.d f0, f4, f3 nexp01.d f4, f1 maddn.d f2, f0, f2 const.d f5, 0 maddn.d f5, f2, f3 const.d f0, 3 maddn.d f0, f5, f2 neg.d f6, f4 maddn.d f2, f0, f2 const.d f0, 0 const.d f5, 0 const.d f7, 0 maddn.d f0, f6, f2 maddn.d f5, f2, f3 const.d f3, 3 maddn.d f7, f3, f2 maddn.d f4, f0, f0 maddn.d f3, f5, f2 neg.d f2, f7 maddn.d f0, f4, f2 maddn.d f7, f3, f7 mksadj.d f2, f1 nexp01.d f1, f1 maddn.d f1, f0, f0 neg.d f3, f7 addexpm.d f0, f2 addexp.d f3, f2 divn.d f0, f1, f3 rfr xl, f0 rfrd xh, f0 leaf_return #endif /* L_sqrt */ #endif /* XCHAL_HAVE_DFP_SQRT */ #if XCHAL_HAVE_DFP_RECIP #ifdef L_recipdf2 /* Reciprocal */ .align 4 .global __recipdf2 .type __recipdf2, @function __recipdf2: leaf_entry sp, 16 wfrd f1, xh, xl recip0.d f0, f1 const.d f2, 2 msub.d f2, f1, f0 mul.d f3, f1, f0 const.d f4, 2 mul.d f5, f0, f2 msub.d f4, f3, f2 const.d f2, 1 mul.d f0, f5, f4 msub.d f2, f1, f0 maddn.d f0, f0, f2 rfr xl, f0 rfrd xh, f0 leaf_return #endif /* L_recipdf2 */ #endif /* XCHAL_HAVE_DFP_RECIP */ #if XCHAL_HAVE_DFP_RSQRT #ifdef L_rsqrtdf2 /* Reciprocal square root */ .align 4 .global __rsqrtdf2 .type __rsqrtdf2, @function __rsqrtdf2: leaf_entry sp, 16 wfrd f1, xh, xl rsqrt0.d f0, f1 mul.d f2, f1, f0 const.d f3, 3 mul.d f4, f3, f0 const.d f5, 1 msub.d f5, f2, f0 maddn.d f0, f4, f5 const.d f2, 1 mul.d f4, f1, f0 mul.d f5, f3, f0 msub.d f2, f4, f0 maddn.d f0, f5, f2 const.d f2, 1 mul.d f1, f1, f0 mul.d f3, f3, f0 msub.d f2, f1, f0 maddn.d f0, f3, f2 rfr xl, f0 rfrd xh, f0 leaf_return #endif /* L_rsqrtdf2 */ #endif /* XCHAL_HAVE_DFP_RSQRT */
4ms/metamodule-plugin-sdk
19,046
plugin-libc/libgcc/config/xtensa/lib1funcs.S
/* Assembly functions for the Xtensa version of libgcc1. Copyright (C) 2001-2022 Free Software Foundation, Inc. Contributed by Bob Wilson (bwilson@tensilica.com) at Tensilica. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 3, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. Under Section 7 of GPL version 3, you are granted additional permissions described in the GCC Runtime Library Exception, version 3.1, as published by the Free Software Foundation. You should have received a copy of the GNU General Public License and a copy of the GCC Runtime Library Exception along with this program; see the files COPYING3 and COPYING.RUNTIME respectively. If not, see <http://www.gnu.org/licenses/>. */ #include "xtensa-config.h" /* Define macros for the ABS and ADDX* instructions to handle cases where they are not included in the Xtensa processor configuration. */ .macro do_abs dst, src, tmp #if XCHAL_HAVE_ABS abs \dst, \src #else neg \tmp, \src movgez \tmp, \src, \src mov \dst, \tmp #endif .endm .macro do_addx2 dst, as, at, tmp #if XCHAL_HAVE_ADDX addx2 \dst, \as, \at #else slli \tmp, \as, 1 add \dst, \tmp, \at #endif .endm .macro do_addx4 dst, as, at, tmp #if XCHAL_HAVE_ADDX addx4 \dst, \as, \at #else slli \tmp, \as, 2 add \dst, \tmp, \at #endif .endm .macro do_addx8 dst, as, at, tmp #if XCHAL_HAVE_ADDX addx8 \dst, \as, \at #else slli \tmp, \as, 3 add \dst, \tmp, \at #endif .endm /* Define macros for leaf function entry and return, supporting either the standard register windowed ABI or the non-windowed call0 ABI. These macros do not allocate any extra stack space, so they only work for leaf functions that do not need to spill anything to the stack. */ .macro leaf_entry reg, size #if XCHAL_HAVE_WINDOWED && !__XTENSA_CALL0_ABI__ entry \reg, \size #else /* do nothing */ #endif .endm .macro leaf_return #if XCHAL_HAVE_WINDOWED && !__XTENSA_CALL0_ABI__ retw #else ret #endif .endm #ifdef L_mulsi3 .align 4 .global __mulsi3 .type __mulsi3, @function __mulsi3: leaf_entry sp, 16 #if XCHAL_HAVE_MUL32 mull a2, a2, a3 #elif XCHAL_HAVE_MUL16 or a4, a2, a3 srai a4, a4, 16 bnez a4, .LMUL16 mul16u a2, a2, a3 leaf_return .LMUL16: srai a4, a2, 16 srai a5, a3, 16 mul16u a7, a4, a3 mul16u a6, a5, a2 mul16u a4, a2, a3 add a7, a7, a6 slli a7, a7, 16 add a2, a7, a4 #elif XCHAL_HAVE_MAC16 mul.aa.hl a2, a3 mula.aa.lh a2, a3 rsr a5, ACCLO umul.aa.ll a2, a3 rsr a4, ACCLO slli a5, a5, 16 add a2, a4, a5 #else /* !MUL32 && !MUL16 && !MAC16 */ /* Multiply one bit at a time, but unroll the loop 4x to better exploit the addx instructions and avoid overhead. Peel the first iteration to save a cycle on init. */ /* Avoid negative numbers. */ xor a5, a2, a3 /* Top bit is 1 if one input is negative. */ do_abs a3, a3, a6 do_abs a2, a2, a6 /* Swap so the second argument is smaller. */ sub a7, a2, a3 mov a4, a3 movgez a4, a2, a7 /* a4 = max (a2, a3) */ movltz a3, a2, a7 /* a3 = min (a2, a3) */ movi a2, 0 extui a6, a3, 0, 1 movnez a2, a4, a6 do_addx2 a7, a4, a2, a7 extui a6, a3, 1, 1 movnez a2, a7, a6 do_addx4 a7, a4, a2, a7 extui a6, a3, 2, 1 movnez a2, a7, a6 do_addx8 a7, a4, a2, a7 extui a6, a3, 3, 1 movnez a2, a7, a6 bgeui a3, 16, .Lmult_main_loop neg a3, a2 movltz a2, a3, a5 leaf_return .align 4 .Lmult_main_loop: srli a3, a3, 4 slli a4, a4, 4 add a7, a4, a2 extui a6, a3, 0, 1 movnez a2, a7, a6 do_addx2 a7, a4, a2, a7 extui a6, a3, 1, 1 movnez a2, a7, a6 do_addx4 a7, a4, a2, a7 extui a6, a3, 2, 1 movnez a2, a7, a6 do_addx8 a7, a4, a2, a7 extui a6, a3, 3, 1 movnez a2, a7, a6 bgeui a3, 16, .Lmult_main_loop neg a3, a2 movltz a2, a3, a5 #endif /* !MUL32 && !MUL16 && !MAC16 */ leaf_return .size __mulsi3, . - __mulsi3 #endif /* L_mulsi3 */ #ifdef L_umulsidi3 #if !XCHAL_HAVE_MUL16 && !XCHAL_HAVE_MUL32 && !XCHAL_HAVE_MAC16 #define XCHAL_NO_MUL 1 #endif .align 4 .global __umulsidi3 .type __umulsidi3, @function __umulsidi3: #if __XTENSA_CALL0_ABI__ leaf_entry sp, 32 addi sp, sp, -32 s32i a12, sp, 16 s32i a13, sp, 20 s32i a14, sp, 24 s32i a15, sp, 28 #elif XCHAL_NO_MUL /* This is not really a leaf function; allocate enough stack space to allow CALL12s to a helper function. */ leaf_entry sp, 48 #else leaf_entry sp, 16 #endif #ifdef __XTENSA_EB__ #define wh a2 #define wl a3 #else #define wh a3 #define wl a2 #endif /* __XTENSA_EB__ */ /* This code is taken from the mulsf3 routine in ieee754-sf.S. See more comments there. */ #if XCHAL_HAVE_MUL32_HIGH mull a6, a2, a3 muluh wh, a2, a3 mov wl, a6 #else /* ! MUL32_HIGH */ #if __XTENSA_CALL0_ABI__ && XCHAL_NO_MUL /* a0 and a8 will be clobbered by calling the multiply function but a8 is not used here and need not be saved. */ s32i a0, sp, 0 #endif #if XCHAL_HAVE_MUL16 || XCHAL_HAVE_MUL32 #define a2h a4 #define a3h a5 /* Get the high halves of the inputs into registers. */ srli a2h, a2, 16 srli a3h, a3, 16 #define a2l a2 #define a3l a3 #if XCHAL_HAVE_MUL32 && !XCHAL_HAVE_MUL16 /* Clear the high halves of the inputs. This does not matter for MUL16 because the high bits are ignored. */ extui a2, a2, 0, 16 extui a3, a3, 0, 16 #endif #endif /* MUL16 || MUL32 */ #if XCHAL_HAVE_MUL16 #define do_mul(dst, xreg, xhalf, yreg, yhalf) \ mul16u dst, xreg ## xhalf, yreg ## yhalf #elif XCHAL_HAVE_MUL32 #define do_mul(dst, xreg, xhalf, yreg, yhalf) \ mull dst, xreg ## xhalf, yreg ## yhalf #elif XCHAL_HAVE_MAC16 /* The preprocessor insists on inserting a space when concatenating after a period in the definition of do_mul below. These macros are a workaround using underscores instead of periods when doing the concatenation. */ #define umul_aa_ll umul.aa.ll #define umul_aa_lh umul.aa.lh #define umul_aa_hl umul.aa.hl #define umul_aa_hh umul.aa.hh #define do_mul(dst, xreg, xhalf, yreg, yhalf) \ umul_aa_ ## xhalf ## yhalf xreg, yreg; \ rsr dst, ACCLO #else /* no multiply hardware */ #define set_arg_l(dst, src) \ extui dst, src, 0, 16 #define set_arg_h(dst, src) \ srli dst, src, 16 #if __XTENSA_CALL0_ABI__ #define do_mul(dst, xreg, xhalf, yreg, yhalf) \ set_arg_ ## xhalf (a13, xreg); \ set_arg_ ## yhalf (a14, yreg); \ call0 .Lmul_mulsi3; \ mov dst, a12 #else #define do_mul(dst, xreg, xhalf, yreg, yhalf) \ set_arg_ ## xhalf (a14, xreg); \ set_arg_ ## yhalf (a15, yreg); \ call12 .Lmul_mulsi3; \ mov dst, a14 #endif /* __XTENSA_CALL0_ABI__ */ #endif /* no multiply hardware */ /* Add pp1 and pp2 into a6 with carry-out in a9. */ do_mul(a6, a2, l, a3, h) /* pp 1 */ do_mul(a11, a2, h, a3, l) /* pp 2 */ movi a9, 0 add a6, a6, a11 bgeu a6, a11, 1f addi a9, a9, 1 1: /* Shift the high half of a9/a6 into position in a9. Note that this value can be safely incremented without any carry-outs. */ ssai 16 src a9, a9, a6 /* Compute the low word into a6. */ do_mul(a11, a2, l, a3, l) /* pp 0 */ sll a6, a6 add a6, a6, a11 bgeu a6, a11, 1f addi a9, a9, 1 1: /* Compute the high word into wh. */ do_mul(wh, a2, h, a3, h) /* pp 3 */ add wh, wh, a9 mov wl, a6 #endif /* !MUL32_HIGH */ #if __XTENSA_CALL0_ABI__ && XCHAL_NO_MUL /* Restore the original return address. */ l32i a0, sp, 0 #endif #if __XTENSA_CALL0_ABI__ l32i a12, sp, 16 l32i a13, sp, 20 l32i a14, sp, 24 l32i a15, sp, 28 addi sp, sp, 32 #endif leaf_return #if XCHAL_NO_MUL /* For Xtensa processors with no multiply hardware, this simplified version of _mulsi3 is used for multiplying 16-bit chunks of the floating-point mantissas. When using CALL0, this function uses a custom ABI: the inputs are passed in a13 and a14, the result is returned in a12, and a8 and a15 are clobbered. */ .align 4 .Lmul_mulsi3: leaf_entry sp, 16 .macro mul_mulsi3_body dst, src1, src2, tmp1, tmp2 movi \dst, 0 1: add \tmp1, \src2, \dst extui \tmp2, \src1, 0, 1 movnez \dst, \tmp1, \tmp2 do_addx2 \tmp1, \src2, \dst, \tmp1 extui \tmp2, \src1, 1, 1 movnez \dst, \tmp1, \tmp2 do_addx4 \tmp1, \src2, \dst, \tmp1 extui \tmp2, \src1, 2, 1 movnez \dst, \tmp1, \tmp2 do_addx8 \tmp1, \src2, \dst, \tmp1 extui \tmp2, \src1, 3, 1 movnez \dst, \tmp1, \tmp2 srli \src1, \src1, 4 slli \src2, \src2, 4 bnez \src1, 1b .endm #if __XTENSA_CALL0_ABI__ mul_mulsi3_body a12, a13, a14, a15, a8 #else /* The result will be written into a2, so save that argument in a4. */ mov a4, a2 mul_mulsi3_body a2, a4, a3, a5, a6 #endif leaf_return #endif /* XCHAL_NO_MUL */ .size __umulsidi3, . - __umulsidi3 #endif /* L_umulsidi3 */ /* Define a macro for the NSAU (unsigned normalize shift amount) instruction, which computes the number of leading zero bits, to handle cases where it is not included in the Xtensa processor configuration. */ .macro do_nsau cnt, val, tmp, a #if XCHAL_HAVE_NSA nsau \cnt, \val #else mov \a, \val movi \cnt, 0 extui \tmp, \a, 16, 16 bnez \tmp, 0f movi \cnt, 16 slli \a, \a, 16 0: extui \tmp, \a, 24, 8 bnez \tmp, 1f addi \cnt, \cnt, 8 slli \a, \a, 8 1: movi \tmp, __nsau_data extui \a, \a, 24, 8 add \tmp, \tmp, \a l8ui \tmp, \tmp, 0 add \cnt, \cnt, \tmp #endif /* !XCHAL_HAVE_NSA */ .endm #ifdef L_clz .section .rodata .align 4 .global __nsau_data .type __nsau_data, @object __nsau_data: #if !XCHAL_HAVE_NSA .byte 8, 7, 6, 6, 5, 5, 5, 5, 4, 4, 4, 4, 4, 4, 4, 4 .byte 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3 .byte 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2 .byte 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2 .byte 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 .byte 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 .byte 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 .byte 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 .byte 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 .byte 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 .byte 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 .byte 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 .byte 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 .byte 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 .byte 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 .byte 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 #endif /* !XCHAL_HAVE_NSA */ .size __nsau_data, . - __nsau_data .hidden __nsau_data #endif /* L_clz */ #ifdef L_clzsi2 .align 4 .global __clzsi2 .type __clzsi2, @function __clzsi2: leaf_entry sp, 16 do_nsau a2, a2, a3, a4 leaf_return .size __clzsi2, . - __clzsi2 #endif /* L_clzsi2 */ #ifdef L_ctzsi2 .align 4 .global __ctzsi2 .type __ctzsi2, @function __ctzsi2: leaf_entry sp, 16 neg a3, a2 and a3, a3, a2 do_nsau a2, a3, a4, a5 neg a2, a2 addi a2, a2, 31 leaf_return .size __ctzsi2, . - __ctzsi2 #endif /* L_ctzsi2 */ #ifdef L_ffssi2 .align 4 .global __ffssi2 .type __ffssi2, @function __ffssi2: leaf_entry sp, 16 neg a3, a2 and a3, a3, a2 do_nsau a2, a3, a4, a5 neg a2, a2 addi a2, a2, 32 leaf_return .size __ffssi2, . - __ffssi2 #endif /* L_ffssi2 */ #ifdef L_udivsi3 .align 4 .global __udivsi3 .type __udivsi3, @function __udivsi3: leaf_entry sp, 16 #if XCHAL_HAVE_DIV32 quou a2, a2, a3 #else bltui a3, 2, .Lle_one /* check if the divisor <= 1 */ mov a6, a2 /* keep dividend in a6 */ do_nsau a5, a6, a2, a7 /* dividend_shift = nsau (dividend) */ do_nsau a4, a3, a2, a7 /* divisor_shift = nsau (divisor) */ bgeu a5, a4, .Lspecial sub a4, a4, a5 /* count = divisor_shift - dividend_shift */ ssl a4 sll a3, a3 /* divisor <<= count */ movi a2, 0 /* quotient = 0 */ /* test-subtract-and-shift loop; one quotient bit on each iteration */ #if XCHAL_HAVE_LOOPS loopnez a4, .Lloopend #endif /* XCHAL_HAVE_LOOPS */ .Lloop: bltu a6, a3, .Lzerobit sub a6, a6, a3 addi a2, a2, 1 .Lzerobit: slli a2, a2, 1 srli a3, a3, 1 #if !XCHAL_HAVE_LOOPS addi a4, a4, -1 bnez a4, .Lloop #endif /* !XCHAL_HAVE_LOOPS */ .Lloopend: bltu a6, a3, .Lreturn addi a2, a2, 1 /* increment quotient if dividend >= divisor */ .Lreturn: leaf_return .Lle_one: beqz a3, .Lerror /* if divisor == 1, return the dividend */ leaf_return .Lspecial: /* return dividend >= divisor */ bltu a6, a3, .Lreturn0 movi a2, 1 leaf_return .Lerror: /* Divide by zero: Use an illegal instruction to force an exception. The subsequent "DIV0" string can be recognized by the exception handler to identify the real cause of the exception. */ ill .ascii "DIV0" .Lreturn0: movi a2, 0 #endif /* XCHAL_HAVE_DIV32 */ leaf_return .size __udivsi3, . - __udivsi3 #endif /* L_udivsi3 */ #ifdef L_divsi3 .align 4 .global __divsi3 .type __divsi3, @function __divsi3: leaf_entry sp, 16 #if XCHAL_HAVE_DIV32 quos a2, a2, a3 #else xor a7, a2, a3 /* sign = dividend ^ divisor */ do_abs a6, a2, a4 /* udividend = abs (dividend) */ do_abs a3, a3, a4 /* udivisor = abs (divisor) */ bltui a3, 2, .Lle_one /* check if udivisor <= 1 */ do_nsau a5, a6, a2, a8 /* udividend_shift = nsau (udividend) */ do_nsau a4, a3, a2, a8 /* udivisor_shift = nsau (udivisor) */ bgeu a5, a4, .Lspecial sub a4, a4, a5 /* count = udivisor_shift - udividend_shift */ ssl a4 sll a3, a3 /* udivisor <<= count */ movi a2, 0 /* quotient = 0 */ /* test-subtract-and-shift loop; one quotient bit on each iteration */ #if XCHAL_HAVE_LOOPS loopnez a4, .Lloopend #endif /* XCHAL_HAVE_LOOPS */ .Lloop: bltu a6, a3, .Lzerobit sub a6, a6, a3 addi a2, a2, 1 .Lzerobit: slli a2, a2, 1 srli a3, a3, 1 #if !XCHAL_HAVE_LOOPS addi a4, a4, -1 bnez a4, .Lloop #endif /* !XCHAL_HAVE_LOOPS */ .Lloopend: bltu a6, a3, .Lreturn addi a2, a2, 1 /* increment if udividend >= udivisor */ .Lreturn: neg a5, a2 movltz a2, a5, a7 /* return (sign < 0) ? -quotient : quotient */ leaf_return .Lle_one: beqz a3, .Lerror neg a2, a6 /* if udivisor == 1, then return... */ movgez a2, a6, a7 /* (sign < 0) ? -udividend : udividend */ leaf_return .Lspecial: bltu a6, a3, .Lreturn0 /* if dividend < divisor, return 0 */ movi a2, 1 movi a4, -1 movltz a2, a4, a7 /* else return (sign < 0) ? -1 : 1 */ leaf_return .Lerror: /* Divide by zero: Use an illegal instruction to force an exception. The subsequent "DIV0" string can be recognized by the exception handler to identify the real cause of the exception. */ ill .ascii "DIV0" .Lreturn0: movi a2, 0 #endif /* XCHAL_HAVE_DIV32 */ leaf_return .size __divsi3, . - __divsi3 #endif /* L_divsi3 */ #ifdef L_umodsi3 .align 4 .global __umodsi3 .type __umodsi3, @function __umodsi3: leaf_entry sp, 16 #if XCHAL_HAVE_DIV32 remu a2, a2, a3 #else bltui a3, 2, .Lle_one /* check if the divisor is <= 1 */ do_nsau a5, a2, a6, a7 /* dividend_shift = nsau (dividend) */ do_nsau a4, a3, a6, a7 /* divisor_shift = nsau (divisor) */ bgeu a5, a4, .Lspecial sub a4, a4, a5 /* count = divisor_shift - dividend_shift */ ssl a4 sll a3, a3 /* divisor <<= count */ /* test-subtract-and-shift loop */ #if XCHAL_HAVE_LOOPS loopnez a4, .Lloopend #endif /* XCHAL_HAVE_LOOPS */ .Lloop: bltu a2, a3, .Lzerobit sub a2, a2, a3 .Lzerobit: srli a3, a3, 1 #if !XCHAL_HAVE_LOOPS addi a4, a4, -1 bnez a4, .Lloop #endif /* !XCHAL_HAVE_LOOPS */ .Lloopend: .Lspecial: bltu a2, a3, .Lreturn sub a2, a2, a3 /* subtract once more if dividend >= divisor */ .Lreturn: leaf_return .Lle_one: bnez a3, .Lreturn0 /* Divide by zero: Use an illegal instruction to force an exception. The subsequent "DIV0" string can be recognized by the exception handler to identify the real cause of the exception. */ ill .ascii "DIV0" .Lreturn0: movi a2, 0 #endif /* XCHAL_HAVE_DIV32 */ leaf_return .size __umodsi3, . - __umodsi3 #endif /* L_umodsi3 */ #ifdef L_modsi3 .align 4 .global __modsi3 .type __modsi3, @function __modsi3: leaf_entry sp, 16 #if XCHAL_HAVE_DIV32 rems a2, a2, a3 #else mov a7, a2 /* save original (signed) dividend */ do_abs a2, a2, a4 /* udividend = abs (dividend) */ do_abs a3, a3, a4 /* udivisor = abs (divisor) */ bltui a3, 2, .Lle_one /* check if udivisor <= 1 */ do_nsau a5, a2, a6, a8 /* udividend_shift = nsau (udividend) */ do_nsau a4, a3, a6, a8 /* udivisor_shift = nsau (udivisor) */ bgeu a5, a4, .Lspecial sub a4, a4, a5 /* count = udivisor_shift - udividend_shift */ ssl a4 sll a3, a3 /* udivisor <<= count */ /* test-subtract-and-shift loop */ #if XCHAL_HAVE_LOOPS loopnez a4, .Lloopend #endif /* XCHAL_HAVE_LOOPS */ .Lloop: bltu a2, a3, .Lzerobit sub a2, a2, a3 .Lzerobit: srli a3, a3, 1 #if !XCHAL_HAVE_LOOPS addi a4, a4, -1 bnez a4, .Lloop #endif /* !XCHAL_HAVE_LOOPS */ .Lloopend: .Lspecial: bltu a2, a3, .Lreturn sub a2, a2, a3 /* subtract again if udividend >= udivisor */ .Lreturn: bgez a7, .Lpositive neg a2, a2 /* if (dividend < 0), return -udividend */ .Lpositive: leaf_return .Lle_one: bnez a3, .Lreturn0 /* Divide by zero: Use an illegal instruction to force an exception. The subsequent "DIV0" string can be recognized by the exception handler to identify the real cause of the exception. */ ill .ascii "DIV0" .Lreturn0: movi a2, 0 #endif /* XCHAL_HAVE_DIV32 */ leaf_return .size __modsi3, . - __modsi3 #endif /* L_modsi3 */ #ifdef __XTENSA_EB__ #define uh a2 #define ul a3 #else #define uh a3 #define ul a2 #endif /* __XTENSA_EB__ */ #ifdef L_ashldi3 .align 4 .global __ashldi3 .type __ashldi3, @function __ashldi3: leaf_entry sp, 16 ssl a4 bgei a4, 32, .Llow_only src uh, uh, ul sll ul, ul leaf_return .Llow_only: sll uh, ul movi ul, 0 leaf_return .size __ashldi3, . - __ashldi3 #endif /* L_ashldi3 */ #ifdef L_ashrdi3 .align 4 .global __ashrdi3 .type __ashrdi3, @function __ashrdi3: leaf_entry sp, 16 ssr a4 bgei a4, 32, .Lhigh_only src ul, uh, ul sra uh, uh leaf_return .Lhigh_only: sra ul, uh srai uh, uh, 31 leaf_return .size __ashrdi3, . - __ashrdi3 #endif /* L_ashrdi3 */ #ifdef L_lshrdi3 .align 4 .global __lshrdi3 .type __lshrdi3, @function __lshrdi3: leaf_entry sp, 16 ssr a4 bgei a4, 32, .Lhigh_only1 src ul, uh, ul srl uh, uh leaf_return .Lhigh_only1: srl ul, uh movi uh, 0 leaf_return .size __lshrdi3, . - __lshrdi3 #endif /* L_lshrdi3 */ #ifdef L_bswapsi2 .align 4 .global __bswapsi2 .type __bswapsi2, @function __bswapsi2: leaf_entry sp, 16 ssai 8 srli a3, a2, 16 src a3, a3, a2 src a3, a3, a3 src a2, a2, a3 leaf_return .size __bswapsi2, . - __bswapsi2 #endif /* L_bswapsi2 */ #ifdef L_bswapdi2 .align 4 .global __bswapdi2 .type __bswapdi2, @function __bswapdi2: leaf_entry sp, 16 ssai 8 srli a4, a2, 16 src a4, a4, a2 src a4, a4, a4 src a4, a2, a4 srli a2, a3, 16 src a2, a2, a3 src a2, a2, a2 src a2, a3, a2 mov a3, a4 leaf_return .size __bswapdi2, . - __bswapdi2 #endif /* L_bswapdi2 */ #include "ieee754-df.S" #include "ieee754-sf.S"
4ms/metamodule-plugin-sdk
16,807
plugin-libc/libgcc/config/s390/morestack.S
# s390 support for -fsplit-stack. # Copyright (C) 2015-2022 Free Software Foundation, Inc. # Contributed by Marcin Kościelnicki <koriakin@0x04.net>. # This file is part of GCC. # GCC is free software; you can redistribute it and/or modify it under # the terms of the GNU General Public License as published by the Free # Software Foundation; either version 3, or (at your option) any later # version. # GCC is distributed in the hope that it will be useful, but WITHOUT ANY # WARRANTY; without even the implied warranty of MERCHANTABILITY or # FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License # for more details. # Under Section 7 of GPL version 3, you are granted additional # permissions described in the GCC Runtime Library Exception, version # 3.1, as published by the Free Software Foundation. # You should have received a copy of the GNU General Public License and # a copy of the GCC Runtime Library Exception along with this program; # see the files COPYING3 and COPYING.RUNTIME respectively. If not, see # <http://www.gnu.org/licenses/>. # Excess space needed to call ld.so resolver for lazy plt # resolution. Go uses sigaltstack so this doesn't need to # also cover signal frame size. #define BACKOFF 0x1000 #include <auto-host.h> # The __morestack function. .global __morestack .hidden __morestack .type __morestack,@function __morestack: .LFB1: .cfi_startproc #ifndef __s390x__ # The 31-bit __morestack function. # We use a cleanup to restore the stack guard if an exception # is thrown through this code. #ifndef __PIC__ .cfi_personality 0,__gcc_personality_v0 .cfi_lsda 0,.LLSDA1 #else .cfi_personality 0x9b,DW.ref.__gcc_personality_v0 .cfi_lsda 0x1b,.LLSDA1 #endif stm %r2, %r15, 0x8(%r15) # Save %r2-%r15. .cfi_offset %r6, -0x48 .cfi_offset %r7, -0x44 .cfi_offset %r8, -0x40 .cfi_offset %r9, -0x3c .cfi_offset %r10, -0x38 .cfi_offset %r11, -0x34 .cfi_offset %r12, -0x30 .cfi_offset %r13, -0x2c .cfi_offset %r14, -0x28 .cfi_offset %r15, -0x24 lr %r11, %r15 # Make frame pointer for vararg. .cfi_def_cfa_register %r11 ahi %r15, -0x60 # 0x60 for standard frame. st %r11, 0(%r15) # Save back chain. lr %r8, %r0 # Save %r0 (static chain). lr %r10, %r1 # Save %r1 (address of parameter block). l %r7, 0(%r10) # Required frame size to %r7 ear %r1, %a0 # Extract thread pointer. l %r1, 0x20(%r1) # Get stack bounduary ar %r1, %r7 # Stack bounduary + frame size a %r1, 4(%r10) # + stack param size clr %r1, %r15 # Compare with current stack pointer jle .Lnoalloc # guard > sp - frame-size: need alloc brasl %r14, __morestack_block_signals # We abuse one of caller's fpr save slots (which we don't use for fprs) # as a local variable. Not needed here, but done to be consistent with # the below use. ahi %r7, BACKOFF # Bump requested size a bit. st %r7, 0x40(%r11) # Stuff frame size on stack. la %r2, 0x40(%r11) # Pass its address as parameter. la %r3, 0x60(%r11) # Caller's stack parameters. l %r4, 4(%r10) # Size of stack parameters. brasl %r14, __generic_morestack lr %r15, %r2 # Switch to the new stack. ahi %r15, -0x60 # Make a stack frame on it. st %r11, 0(%r15) # Save back chain. s %r2, 0x40(%r11) # The end of stack space. ahi %r2, BACKOFF # Back off a bit. ear %r1, %a0 # Extract thread pointer. .LEHB0: st %r2, 0x20(%r1) # Save the new stack boundary. brasl %r14, __morestack_unblock_signals lr %r0, %r8 # Static chain. lm %r2, %r6, 0x8(%r11) # Paremeter registers. # Third parameter is address of function meat - address of parameter # block. a %r10, 0x8(%r10) # Leave vararg pointer in %r1, in case function uses it la %r1, 0x60(%r11) # State of registers: # %r0: Static chain from entry. # %r1: Vararg pointer. # %r2-%r6: Parameters from entry. # %r7-%r10: Indeterminate. # %r11: Frame pointer (%r15 from entry). # %r12-%r13: Indeterminate. # %r14: Return address. # %r15: Stack pointer. basr %r14, %r10 # Call our caller. stm %r2, %r3, 0x8(%r11) # Save return registers. brasl %r14, __morestack_block_signals # We need a stack slot now, but have no good way to get it - the frame # on new stack had to be exactly 0x60 bytes, or stack parameters would # be passed wrong. Abuse fpr save area in caller's frame (we don't # save actual fprs). la %r2, 0x40(%r11) brasl %r14, __generic_releasestack s %r2, 0x40(%r11) # Subtract available space. ahi %r2, BACKOFF # Back off a bit. ear %r1, %a0 # Extract thread pointer. .LEHE0: st %r2, 0x20(%r1) # Save the new stack boundary. # We need to restore the old stack pointer before unblocking signals. # We also need 0x60 bytes for a stack frame. Since we had a stack # frame at this place before the stack switch, there's no need to # write the back chain again. lr %r15, %r11 ahi %r15, -0x60 brasl %r14, __morestack_unblock_signals lm %r2, %r15, 0x8(%r11) # Restore all registers. .cfi_remember_state .cfi_restore %r15 .cfi_restore %r14 .cfi_restore %r13 .cfi_restore %r12 .cfi_restore %r11 .cfi_restore %r10 .cfi_restore %r9 .cfi_restore %r8 .cfi_restore %r7 .cfi_restore %r6 .cfi_def_cfa_register %r15 br %r14 # Return to caller's caller. # Executed if no new stack allocation is needed. .Lnoalloc: .cfi_restore_state # We may need to copy stack parameters. l %r9, 0x4(%r10) # Load stack parameter size. ltr %r9, %r9 # And check if it's 0. je .Lnostackparm # Skip the copy if not needed. sr %r15, %r9 # Make space on the stack. la %r8, 0x60(%r15) # Destination. la %r12, 0x60(%r11) # Source. lr %r13, %r9 # Source size. .Lcopy: mvcle %r8, %r12, 0 # Copy. jo .Lcopy .Lnostackparm: # Third parameter is address of function meat - address of parameter # block. a %r10, 0x8(%r10) # Leave vararg pointer in %r1, in case function uses it la %r1, 0x60(%r11) # OK, no stack allocation needed. We still follow the protocol and # call our caller - it doesn't cost much and makes sure vararg works. # No need to set any registers here - %r0 and %r2-%r6 weren't modified. basr %r14, %r10 # Call our caller. lm %r6, %r15, 0x18(%r11) # Restore all callee-saved registers. .cfi_remember_state .cfi_restore %r15 .cfi_restore %r14 .cfi_restore %r13 .cfi_restore %r12 .cfi_restore %r11 .cfi_restore %r10 .cfi_restore %r9 .cfi_restore %r8 .cfi_restore %r7 .cfi_restore %r6 .cfi_def_cfa_register %r15 br %r14 # Return to caller's caller. # This is the cleanup code called by the stack unwinder when unwinding # through the code between .LEHB0 and .LEHE0 above. .L1: .cfi_restore_state lr %r2, %r11 # Stack pointer after resume. brasl %r14, __generic_findstack lr %r3, %r11 # Get the stack pointer. sr %r3, %r2 # Subtract available space. ahi %r3, BACKOFF # Back off a bit. ear %r1, %a0 # Extract thread pointer. st %r3, 0x20(%r1) # Save the new stack boundary. # We need GOT pointer in %r12 for PLT entry. larl %r12,_GLOBAL_OFFSET_TABLE_ lr %r2, %r6 # Exception header. #ifdef __PIC__ brasl %r14, _Unwind_Resume@PLT #else brasl %r14, _Unwind_Resume #endif #else /* defined(__s390x__) */ # The 64-bit __morestack function. # We use a cleanup to restore the stack guard if an exception # is thrown through this code. #ifndef __PIC__ .cfi_personality 0x3,__gcc_personality_v0 .cfi_lsda 0x3,.LLSDA1 #else .cfi_personality 0x9b,DW.ref.__gcc_personality_v0 .cfi_lsda 0x1b,.LLSDA1 #endif stmg %r2, %r15, 0x10(%r15) # Save %r2-%r15. .cfi_offset %r6, -0x70 .cfi_offset %r7, -0x68 .cfi_offset %r8, -0x60 .cfi_offset %r9, -0x58 .cfi_offset %r10, -0x50 .cfi_offset %r11, -0x48 .cfi_offset %r12, -0x40 .cfi_offset %r13, -0x38 .cfi_offset %r14, -0x30 .cfi_offset %r15, -0x28 lgr %r11, %r15 # Make frame pointer for vararg. .cfi_def_cfa_register %r11 aghi %r15, -0xa0 # 0xa0 for standard frame. stg %r11, 0(%r15) # Save back chain. lgr %r8, %r0 # Save %r0 (static chain). lgr %r10, %r1 # Save %r1 (address of parameter block). lg %r7, 0(%r10) # Required frame size to %r7 ear %r1, %a0 sllg %r1, %r1, 32 ear %r1, %a1 # Extract thread pointer. lg %r1, 0x38(%r1) # Get stack bounduary agr %r1, %r7 # Stack bounduary + frame size ag %r1, 8(%r10) # + stack param size clgr %r1, %r15 # Compare with current stack pointer jle .Lnoalloc # guard > sp - frame-size: need alloc brasl %r14, __morestack_block_signals # We abuse one of caller's fpr save slots (which we don't use for fprs) # as a local variable. Not needed here, but done to be consistent with # the below use. aghi %r7, BACKOFF # Bump requested size a bit. stg %r7, 0x80(%r11) # Stuff frame size on stack. la %r2, 0x80(%r11) # Pass its address as parameter. la %r3, 0xa0(%r11) # Caller's stack parameters. lg %r4, 8(%r10) # Size of stack parameters. brasl %r14, __generic_morestack lgr %r15, %r2 # Switch to the new stack. aghi %r15, -0xa0 # Make a stack frame on it. stg %r11, 0(%r15) # Save back chain. sg %r2, 0x80(%r11) # The end of stack space. aghi %r2, BACKOFF # Back off a bit. ear %r1, %a0 sllg %r1, %r1, 32 ear %r1, %a1 # Extract thread pointer. .LEHB0: stg %r2, 0x38(%r1) # Save the new stack boundary. brasl %r14, __morestack_unblock_signals lgr %r0, %r8 # Static chain. lmg %r2, %r6, 0x10(%r11) # Paremeter registers. # Third parameter is address of function meat - address of parameter # block. ag %r10, 0x10(%r10) # Leave vararg pointer in %r1, in case function uses it la %r1, 0xa0(%r11) # State of registers: # %r0: Static chain from entry. # %r1: Vararg pointer. # %r2-%r6: Parameters from entry. # %r7-%r10: Indeterminate. # %r11: Frame pointer (%r15 from entry). # %r12-%r13: Indeterminate. # %r14: Return address. # %r15: Stack pointer. basr %r14, %r10 # Call our caller. stg %r2, 0x10(%r11) # Save return register. brasl %r14, __morestack_block_signals # We need a stack slot now, but have no good way to get it - the frame # on new stack had to be exactly 0xa0 bytes, or stack parameters would # be passed wrong. Abuse fpr save area in caller's frame (we don't # save actual fprs). la %r2, 0x80(%r11) brasl %r14, __generic_releasestack sg %r2, 0x80(%r11) # Subtract available space. aghi %r2, BACKOFF # Back off a bit. ear %r1, %a0 sllg %r1, %r1, 32 ear %r1, %a1 # Extract thread pointer. .LEHE0: stg %r2, 0x38(%r1) # Save the new stack boundary. # We need to restore the old stack pointer before unblocking signals. # We also need 0xa0 bytes for a stack frame. Since we had a stack # frame at this place before the stack switch, there's no need to # write the back chain again. lgr %r15, %r11 aghi %r15, -0xa0 brasl %r14, __morestack_unblock_signals lmg %r2, %r15, 0x10(%r11) # Restore all registers. .cfi_remember_state .cfi_restore %r15 .cfi_restore %r14 .cfi_restore %r13 .cfi_restore %r12 .cfi_restore %r11 .cfi_restore %r10 .cfi_restore %r9 .cfi_restore %r8 .cfi_restore %r7 .cfi_restore %r6 .cfi_def_cfa_register %r15 br %r14 # Return to caller's caller. # Executed if no new stack allocation is needed. .Lnoalloc: .cfi_restore_state # We may need to copy stack parameters. lg %r9, 0x8(%r10) # Load stack parameter size. ltgr %r9, %r9 # Check if it's 0. je .Lnostackparm # Skip the copy if not needed. sgr %r15, %r9 # Make space on the stack. la %r8, 0xa0(%r15) # Destination. la %r12, 0xa0(%r11) # Source. lgr %r13, %r9 # Source size. .Lcopy: mvcle %r8, %r12, 0 # Copy. jo .Lcopy .Lnostackparm: # Third parameter is address of function meat - address of parameter # block. ag %r10, 0x10(%r10) # Leave vararg pointer in %r1, in case function uses it la %r1, 0xa0(%r11) # OK, no stack allocation needed. We still follow the protocol and # call our caller - it doesn't cost much and makes sure vararg works. # No need to set any registers here - %r0 and %r2-%r6 weren't modified. basr %r14, %r10 # Call our caller. lmg %r6, %r15, 0x30(%r11) # Restore all callee-saved registers. .cfi_remember_state .cfi_restore %r15 .cfi_restore %r14 .cfi_restore %r13 .cfi_restore %r12 .cfi_restore %r11 .cfi_restore %r10 .cfi_restore %r9 .cfi_restore %r8 .cfi_restore %r7 .cfi_restore %r6 .cfi_def_cfa_register %r15 br %r14 # Return to caller's caller. # This is the cleanup code called by the stack unwinder when unwinding # through the code between .LEHB0 and .LEHE0 above. .L1: .cfi_restore_state lgr %r2, %r11 # Stack pointer after resume. brasl %r14, __generic_findstack lgr %r3, %r11 # Get the stack pointer. sgr %r3, %r2 # Subtract available space. aghi %r3, BACKOFF # Back off a bit. ear %r1, %a0 sllg %r1, %r1, 32 ear %r1, %a1 # Extract thread pointer. stg %r3, 0x38(%r1) # Save the new stack boundary. lgr %r2, %r6 # Exception header. #ifdef __PIC__ brasl %r14, _Unwind_Resume@PLT #else brasl %r14, _Unwind_Resume #endif #endif /* defined(__s390x__) */ .cfi_endproc .size __morestack, . - __morestack # The exception table. This tells the personality routine to execute # the exception handler. .section .gcc_except_table,"a",@progbits .align 4 .LLSDA1: .byte 0xff # @LPStart format (omit) .byte 0xff # @TType format (omit) .byte 0x1 # call-site format (uleb128) .uleb128 .LLSDACSE1-.LLSDACSB1 # Call-site table length .LLSDACSB1: .uleb128 .LEHB0-.LFB1 # region 0 start .uleb128 .LEHE0-.LEHB0 # length .uleb128 .L1-.LFB1 # landing pad .uleb128 0 # action .LLSDACSE1: .global __gcc_personality_v0 #ifdef __PIC__ # Build a position independent reference to the basic # personality function. .hidden DW.ref.__gcc_personality_v0 .weak DW.ref.__gcc_personality_v0 .section .data.DW.ref.__gcc_personality_v0,"awG",@progbits,DW.ref.__gcc_personality_v0,comdat .type DW.ref.__gcc_personality_v0, @object DW.ref.__gcc_personality_v0: #ifndef __LP64__ .align 4 .size DW.ref.__gcc_personality_v0, 4 .long __gcc_personality_v0 #else .align 8 .size DW.ref.__gcc_personality_v0, 8 .quad __gcc_personality_v0 #endif #endif # Initialize the stack test value when the program starts or when a # new thread starts. We don't know how large the main stack is, so we # guess conservatively. We might be able to use getrlimit here. .text .global __stack_split_initialize .hidden __stack_split_initialize .type __stack_split_initialize, @function __stack_split_initialize: #ifndef __s390x__ ear %r1, %a0 lr %r0, %r15 ahi %r0, -0x4000 # We should have at least 16K. st %r0, 0x20(%r1) lr %r2, %r15 lhi %r3, 0x4000 #ifdef __PIC__ jg __generic_morestack_set_initial_sp@PLT # Tail call #else jg __generic_morestack_set_initial_sp # Tail call #endif #else /* defined(__s390x__) */ ear %r1, %a0 sllg %r1, %r1, 32 ear %r1, %a1 lgr %r0, %r15 aghi %r0, -0x4000 # We should have at least 16K. stg %r0, 0x38(%r1) lgr %r2, %r15 lghi %r3, 0x4000 #ifdef __PIC__ jg __generic_morestack_set_initial_sp@PLT # Tail call #else jg __generic_morestack_set_initial_sp # Tail call #endif #endif /* defined(__s390x__) */ .size __stack_split_initialize, . - __stack_split_initialize # Routines to get and set the guard, for __splitstack_getcontext, # __splitstack_setcontext, and __splitstack_makecontext. # void *__morestack_get_guard (void) returns the current stack guard. .text .global __morestack_get_guard .hidden __morestack_get_guard .type __morestack_get_guard,@function __morestack_get_guard: #ifndef __s390x__ ear %r1, %a0 l %r2, 0x20(%r1) #else ear %r1, %a0 sllg %r1, %r1, 32 ear %r1, %a1 lg %r2, 0x38(%r1) #endif br %r14 .size __morestack_get_guard, . - __morestack_get_guard # void __morestack_set_guard (void *) sets the stack guard. .global __morestack_set_guard .hidden __morestack_set_guard .type __morestack_set_guard,@function __morestack_set_guard: #ifndef __s390x__ ear %r1, %a0 st %r2, 0x20(%r1) #else ear %r1, %a0 sllg %r1, %r1, 32 ear %r1, %a1 stg %r2, 0x38(%r1) #endif br %r14 .size __morestack_set_guard, . - __morestack_set_guard # void *__morestack_make_guard (void *, size_t) returns the stack # guard value for a stack. .global __morestack_make_guard .hidden __morestack_make_guard .type __morestack_make_guard,@function __morestack_make_guard: #ifndef __s390x__ sr %r2, %r3 ahi %r2, BACKOFF #else sgr %r2, %r3 aghi %r2, BACKOFF #endif br %r14 .size __morestack_make_guard, . - __morestack_make_guard # Make __stack_split_initialize a high priority constructor. #if HAVE_INITFINI_ARRAY_SUPPORT .section .init_array.00000,"aw",@progbits #else .section .ctors.65535,"aw",@progbits #endif #ifndef __LP64__ .align 4 .long __stack_split_initialize .long __morestack_load_mmap #else .align 8 .quad __stack_split_initialize .quad __morestack_load_mmap #endif .section .note.GNU-stack,"",@progbits .section .note.GNU-split-stack,"",@progbits .section .note.GNU-no-split-stack,"",@progbits
4ms/metamodule-plugin-sdk
1,500
plugin-libc/libgcc/config/cr16/crtn.S
# Specialized code needed to support construction and destruction of # file-scope objects in C++ and Java code, and to support exception handling. # Copyright (C) 2012-2022 Free Software Foundation, Inc. # Contributed by KPIT Cummins Infosystems Limited. # This file is free software; you can redistribute it and/or modify it # under the terms of the GNU General Public License as published by the # Free Software Foundation; either version 3, or (at your option) any # later version. # # This file is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # General Public License for more details. # # Under Section 7 of GPL version 3, you are granted additional # permissions described in the GCC Runtime Library Exception, version # 3.1, as published by the Free Software Foundation. # # You should have received a copy of the GNU General Public License and # a copy of the GCC Runtime Library Exception along with this program; # see the files COPYING3 and COPYING.RUNTIME respectively. If not, see # <http://www.gnu.org/licenses/>. /* This file supplies function epilogues for the .init and .fini sections. It is linked in after all other files. */ .ident "GNU C crtn.o" .section .init #if defined (__ID_SHARED_LIB__) popret $2, r12, ra #else popret ra #endif .section .fini #if defined (__ID_SHARED_LIB__) popret $2, r12, ra #else popret ra #endif
4ms/metamodule-plugin-sdk
1,234
plugin-libc/libgcc/config/cr16/crtlibid.S
# Provide a weak definition of the library ID, for the benefit of certain # configure scripts. # Copyright (C) 2012-2022 Free Software Foundation, Inc. # Contributed by KPIT Cummins Infosystems Limited. # This file is free software; you can redistribute it and/or modify it # under the terms of the GNU General Public License as published by the # Free Software Foundation; either version 3, or (at your option) any # later version. # # This file is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # General Public License for more details. # # Under Section 7 of GPL version 3, you are granted additional # permissions described in the GCC Runtime Library Exception, version # 3.1, as published by the Free Software Foundation. # # You should have received a copy of the GNU General Public License and # a copy of the GCC Runtime Library Exception along with this program; # see the files COPYING3 and COPYING.RUNTIME respectively. If not, see # <http://www.gnu.org/licenses/>. .ident "GNU C crtlibid.o" .weak __current_shared_library_r12_offset_ .set __current_shared_library_r12_offset_, 0
4ms/metamodule-plugin-sdk
1,782
plugin-libc/libgcc/config/cr16/crti.S
# Specialized code needed to support construction and destruction of # file-scope objects in C++ and Java code, and to support exception handling. # Copyright (C) 2012-2022 Free Software Foundation, Inc. # Contributed by KPIT Cummins Infosystems Limited. # This file is free software; you can redistribute it and/or modify it # under the terms of the GNU General Public License as published by the # Free Software Foundation; either version 3, or (at your option) any # later version. # # This file is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # General Public License for more details. # # Under Section 7 of GPL version 3, you are granted additional # permissions described in the GCC Runtime Library Exception, version # 3.1, as published by the Free Software Foundation. # # You should have received a copy of the GNU General Public License and # a copy of the GCC Runtime Library Exception along with this program; # see the files COPYING3 and COPYING.RUNTIME respectively. If not, see # <http://www.gnu.org/licenses/>. /* This file just supplies function prologues for the .init and .fini sections. It is linked in before crtbegin.o. */ .ident "GNU C crti.o" .section .init .globl __init .type __init,@function __init: #if defined (__ID_SHARED_LIB__) push $2, r12, ra movd $__current_shared_library_r12_offset_, (r1,r0) loadd [r12]0(r1,r0), (r12) #else push ra #endif .section .fini .globl __fini .type __fini,@function __fini: #if defined (__ID_SHARED_LIB__) push $2, r12, ra movd $__current_shared_library_r12_offset_, (r1,r0) loadd [r12]0(r1,r0), (r12) #else push ra #endif